merge mozilla-inbound to mozilla-central a=merge

This commit is contained in:
Carsten "Tomcat" Book 2015-09-04 16:05:11 +02:00
Родитель 810033e52a 3145bc32e7
Коммит 57bd4d9f0d
144 изменённых файлов: 2228 добавлений и 2174 удалений

Просмотреть файл

@ -559,5 +559,5 @@ DocManager::RemoteDocAdded(DocAccessibleParent* aDoc)
MOZ_ASSERT(!sRemoteDocuments->Contains(aDoc),
"How did we already have the doc!");
sRemoteDocuments->AppendElement(aDoc);
ProxyCreated(aDoc, 0);
ProxyCreated(aDoc, Interfaces::DOCUMENT | Interfaces::HYPERTEXT);
}

Просмотреть файл

@ -57,7 +57,8 @@ SerializeTree(Accessible* aRoot, nsTArray<AccessibleData>& aTree)
// OuterDocAccessibles are special because we don't want to serialize the
// child doc here, we'll call PDocAccessibleConstructor in
// NotificationController.
if (childCount == 1 && aRoot->GetChildAt(0)->IsDoc())
MOZ_ASSERT(!aRoot->IsDoc(), "documents shouldn't be serialized");
if (aRoot->IsOuterDoc())
childCount = 0;
aTree.AppendElement(AccessibleData(id, role, childCount, interfaces));

Просмотреть файл

@ -112,8 +112,8 @@ endif
ifdef MOZ_SHARED_ICU
DEFINES += -DMOZ_SHARED_ICU
endif
ifdef MOZ_JEMALLOC3
DEFINES += -DMOZ_JEMALLOC3
ifdef MOZ_JEMALLOC4
DEFINES += -DMOZ_JEMALLOC4
endif
DEFINES += -DMOZ_ICU_DBG_SUFFIX=$(MOZ_ICU_DBG_SUFFIX)

Просмотреть файл

@ -116,7 +116,7 @@
@BINPATH@/@DLL_PREFIX@mozglue@DLL_SUFFIX@
#endif
#ifdef MOZ_REPLACE_MALLOC
#ifndef MOZ_JEMALLOC3
#ifndef MOZ_JEMALLOC4
@BINPATH@/@DLL_PREFIX@replace_jemalloc@DLL_SUFFIX@
#endif
#endif

Просмотреть файл

@ -1055,7 +1055,7 @@ nsContextMenu.prototype = {
BrowserViewSourceOfDocument({
browser: this.browser,
URL: gContextMenuContentData.docLocation,
outerWindowID: gContextMenuContentData.frameOuterWindowID,
outerWindowID: this.frameOuterWindowID,
});
},
@ -1369,7 +1369,7 @@ nsContextMenu.prototype = {
urlSecurityCheck(this.linkURL, this.principal);
this.saveHelper(this.linkURL, this.linkTextStr, null, true, this.ownerDoc,
gContextMenuContentData.documentURIObject,
gContextMenuContentData.frameOuterWindowID,
this.frameOuterWindowID,
this.linkDownload);
},
@ -1400,7 +1400,7 @@ nsContextMenu.prototype = {
urlSecurityCheck(this.mediaURL, this.principal);
var dialogTitle = this.onVideo ? "SaveVideoTitle" : "SaveAudioTitle";
this.saveHelper(this.mediaURL, null, dialogTitle, false, doc, referrerURI,
gContextMenuContentData.frameOuterWindowID, "");
this.frameOuterWindowID, "");
}
},

Просмотреть файл

@ -141,8 +141,8 @@ endif
ifdef MOZ_SHARED_ICU
DEFINES += -DMOZ_SHARED_ICU
endif
ifdef MOZ_JEMALLOC3
DEFINES += -DMOZ_JEMALLOC3
ifdef MOZ_JEMALLOC4
DEFINES += -DMOZ_JEMALLOC4
endif
DEFINES += -DMOZ_ICU_DBG_SUFFIX=$(MOZ_ICU_DBG_SUFFIX)
ifdef CLANG_CXX

Просмотреть файл

@ -124,7 +124,7 @@
#endif
#endif
#ifdef MOZ_REPLACE_MALLOC
#ifndef MOZ_JEMALLOC3
#ifndef MOZ_JEMALLOC4
@BINPATH@/@DLL_PREFIX@replace_jemalloc@DLL_SUFFIX@
#endif
#endif

Просмотреть файл

@ -94,7 +94,6 @@ dnl ==============================================================
_topsrcdir=`cd \`dirname $0\`; pwd`
_objdir=`pwd`
dnl TODO Don't exempt L10N builds once bug 842760 is resolved.
if test "$_topsrcdir" = "$_objdir" -a "${with_l10n_base+set}" != set; then
echo " ***"
@ -5439,6 +5438,23 @@ if test -n "$MOZ_VPX" -a -z "$MOZ_NATIVE_LIBVPX"; then
VPX_AS_CONVERSION='$(PERL) $(topsrcdir)/media/libvpx/build/make/ads2gas.pl'
VPX_ASM_SUFFIX="$ASM_SUFFIX"
VPX_ARM_ASM=1
dnl Building with -mfpu=neon requires either the "softfp" or the
dnl "hardfp" ABI. Depending on the compiler's default target, and the
dnl CFLAGS, the default ABI might be neither, in which case it is the
dnl "softfloat" ABI.
dnl The "softfloat" ABI is binary-compatible with the "softfp" ABI, so
dnl we can safely mix code built with both ABIs. So, if we detect
dnl that compiling uses the "softfloat" ABI, force the use of the
dnl "softfp" ABI instead.
dnl Confusingly, the __SOFTFP__ preprocessor variable indicates the
dnl "softfloat" ABI, not the "softfp" ABI.
dnl Note: VPX_ASFLAGS is also used in CFLAGS.
AC_TRY_COMPILE([],
[#ifndef __SOFTFP__
#error "compiler target supports -mfpu=neon, so we don't have to add extra flags"
#endif],
VPX_ASFLAGS="$VPX_ASFLAGS -mfloat-abi=softfp"
)
fi
;;
*:x86)
@ -7207,7 +7223,7 @@ dnl ========================================================
dnl = Jemalloc build setup
dnl ========================================================
if test -z "$MOZ_MEMORY"; then
if test -n "$MOZ_JEMALLOC3" -a -z "$MOZ_REPLACE_MALLOC"; then
if test -n "$MOZ_JEMALLOC4" -a -z "$MOZ_REPLACE_MALLOC"; then
MOZ_NATIVE_JEMALLOC=1
AC_CHECK_FUNCS(mallctl nallocx,,
[MOZ_NATIVE_JEMALLOC=
@ -7215,7 +7231,7 @@ if test -z "$MOZ_MEMORY"; then
if test -n "$MOZ_NATIVE_JEMALLOC"; then
MOZ_MEMORY=1
AC_DEFINE(MOZ_MEMORY)
AC_DEFINE(MOZ_JEMALLOC3)
AC_DEFINE(MOZ_JEMALLOC4)
AC_DEFINE(MOZ_NATIVE_JEMALLOC)
fi
fi
@ -7228,8 +7244,11 @@ if test -z "$MOZ_MEMORY"; then
esac
else
AC_DEFINE(MOZ_MEMORY)
if test -n "$MOZ_JEMALLOC3"; then
AC_DEFINE(MOZ_JEMALLOC3)
if test -n "$NIGHTLY_BUILD"; then
MOZ_JEMALLOC4=1
fi
if test -n "$MOZ_JEMALLOC4"; then
AC_DEFINE(MOZ_JEMALLOC4)
fi
if test "x$MOZ_DEBUG" = "x1"; then
AC_DEFINE(MOZ_MEMORY_DEBUG)
@ -7278,7 +7297,7 @@ else
esac
fi # MOZ_MEMORY
AC_SUBST(MOZ_MEMORY)
AC_SUBST(MOZ_JEMALLOC3)
AC_SUBST(MOZ_JEMALLOC4)
AC_SUBST(MOZ_NATIVE_JEMALLOC)
AC_SUBST(MOZ_CRT)
export MOZ_CRT
@ -9115,7 +9134,7 @@ fi
# Run jemalloc configure script
if test -z "$MOZ_NATIVE_JEMALLOC" -a "$MOZ_MEMORY" && test -n "$MOZ_JEMALLOC3" -o -n "$MOZ_REPLACE_MALLOC"; then
if test -z "$MOZ_NATIVE_JEMALLOC" -a "$MOZ_MEMORY" && test -n "$MOZ_JEMALLOC4" -o -n "$MOZ_REPLACE_MALLOC"; then
ac_configure_args="--build=$build --host=$target --enable-stats --with-jemalloc-prefix=je_ --disable-valgrind"
# We're using memalign for _aligned_malloc in memory/build/mozmemory_wrap.c
# on Windows, so just export memalign on all platforms.
@ -9129,7 +9148,7 @@ if test -z "$MOZ_NATIVE_JEMALLOC" -a "$MOZ_MEMORY" && test -n "$MOZ_JEMALLOC3" -
ac_configure_args="$ac_configure_args --enable-ivsalloc"
fi
fi
if test -n "$MOZ_JEMALLOC3"; then
if test -n "$MOZ_JEMALLOC4"; then
case "${OS_ARCH}" in
WINNT|Darwin)
# We want jemalloc functions to be kept hidden on both Mac and Windows
@ -9155,7 +9174,7 @@ if test -z "$MOZ_NATIVE_JEMALLOC" -a "$MOZ_MEMORY" && test -n "$MOZ_JEMALLOC3" -
MANGLE=$_MANGLE
;;
esac
elif test -z "$MOZ_JEMALLOC3"; then
elif test -z "$MOZ_JEMALLOC4"; then
MANGLE=$_MANGLE
JEMALLOC_WRAPPER=replace_
fi

Просмотреть файл

@ -12369,7 +12369,8 @@ nsGlobalWindow::SetTimeout(JSContext* aCx, Function& aFunction,
const Sequence<JS::Value>& aArguments,
ErrorResult& aError)
{
return SetTimeoutOrInterval(aFunction, aTimeout, aArguments, false, aError);
return SetTimeoutOrInterval(aCx, aFunction, aTimeout, aArguments, false,
aError);
}
int32_t
@ -12403,7 +12404,7 @@ nsGlobalWindow::SetInterval(JSContext* aCx, Function& aFunction,
{
int32_t timeout;
bool isInterval = IsInterval(aTimeout, timeout);
return SetTimeoutOrInterval(aFunction, timeout, aArguments, isInterval,
return SetTimeoutOrInterval(aCx, aFunction, timeout, aArguments, isInterval,
aError);
}
@ -12545,7 +12546,8 @@ nsGlobalWindow::SetTimeoutOrInterval(nsIScriptTimeoutHandler *aHandler,
}
int32_t
nsGlobalWindow::SetTimeoutOrInterval(Function& aFunction, int32_t aTimeout,
nsGlobalWindow::SetTimeoutOrInterval(JSContext *aCx, Function& aFunction,
int32_t aTimeout,
const Sequence<JS::Value>& aArguments,
bool aIsInterval, ErrorResult& aError)
{
@ -12555,12 +12557,12 @@ nsGlobalWindow::SetTimeoutOrInterval(Function& aFunction, int32_t aTimeout,
}
if (inner != this) {
return inner->SetTimeoutOrInterval(aFunction, aTimeout, aArguments,
return inner->SetTimeoutOrInterval(aCx, aFunction, aTimeout, aArguments,
aIsInterval, aError);
}
nsCOMPtr<nsIScriptTimeoutHandler> handler =
NS_CreateJSTimeoutHandler(this, aFunction, aArguments, aError);
NS_CreateJSTimeoutHandler(aCx, this, aFunction, aArguments, aError);
if (!handler) {
return 0;
}
@ -12642,8 +12644,8 @@ nsGlobalWindow::RunTimeoutHandler(nsTimeout* aTimeout,
NS_ASSERTION(script, "timeout has no script nor handler text!");
const char* filename = nullptr;
uint32_t lineNo = 0;
handler->GetLocation(&filename, &lineNo);
uint32_t lineNo = 0, dummyColumn = 0;
handler->GetLocation(&filename, &lineNo, &dummyColumn);
// New script entry point required, due to the "Create a script" sub-step of
// http://www.whatwg.org/specs/web-apps/current-work/#timer-initialisation-steps

Просмотреть файл

@ -130,7 +130,7 @@ class VRHMDInfo;
} // namespace mozilla
extern already_AddRefed<nsIScriptTimeoutHandler>
NS_CreateJSTimeoutHandler(nsGlobalWindow *aWindow,
NS_CreateJSTimeoutHandler(JSContext* aCx, nsGlobalWindow *aWindow,
mozilla::dom::Function& aFunction,
const mozilla::dom::Sequence<JS::Value>& aArguments,
mozilla::ErrorResult& aError);
@ -1394,7 +1394,8 @@ public:
nsresult SetTimeoutOrInterval(nsIScriptTimeoutHandler *aHandler,
int32_t interval,
bool aIsInterval, int32_t* aReturn) override;
int32_t SetTimeoutOrInterval(mozilla::dom::Function& aFunction,
int32_t SetTimeoutOrInterval(JSContext* aCx,
mozilla::dom::Function& aFunction,
int32_t aTimeout,
const mozilla::dom::Sequence<JS::Value>& aArguments,
bool aIsInterval, mozilla::ErrorResult& aError);

Просмотреть файл

@ -39,7 +39,8 @@ public:
// Get the location of the script.
// Note: The memory pointed to by aFileName is owned by the
// nsIScriptTimeoutHandler and should not be freed by the caller.
virtual void GetLocation(const char **aFileName, uint32_t *aLineNo) = 0;
virtual void GetLocation(const char **aFileName, uint32_t *aLineNo,
uint32_t *aColumn) = 0;
// If we have a Function, get the arguments for passing to it.
virtual const nsTArray<JS::Value>& GetArgs() = 0;

Просмотреть файл

@ -35,7 +35,8 @@ public:
nsJSScriptTimeoutHandler();
// This will call SwapElements on aArguments with an empty array.
nsJSScriptTimeoutHandler(nsGlobalWindow *aWindow, Function& aFunction,
nsJSScriptTimeoutHandler(JSContext* aCx, nsGlobalWindow *aWindow,
Function& aFunction,
FallibleTArray<JS::Heap<JS::Value> >& aArguments,
ErrorResult& aError);
nsJSScriptTimeoutHandler(JSContext* aCx, nsGlobalWindow *aWindow,
@ -47,10 +48,12 @@ public:
{
return mFunction;
}
virtual void GetLocation(const char** aFileName, uint32_t* aLineNo) override
virtual void GetLocation(const char** aFileName, uint32_t* aLineNo,
uint32_t* aColumn) override
{
*aFileName = mFileName.get();
*aLineNo = mLineNo;
*aColumn = mColumn;
}
virtual const nsTArray<JS::Value>& GetArgs() override
@ -67,6 +70,7 @@ private:
// caller of setTimeout()
nsCString mFileName;
uint32_t mLineNo;
uint32_t mColumn;
nsTArray<JS::Heap<JS::Value> > mArgs;
// The expression to evaluate or function to call. If mFunction is non-null
@ -106,6 +110,8 @@ NS_IMPL_CYCLE_COLLECTION_TRAVERSE_BEGIN_INTERNAL(nsJSScriptTimeoutHandler)
name.Append(tmp->mFileName);
name.Append(':');
name.AppendInt(tmp->mLineNo);
name.Append(':');
name.AppendInt(tmp->mColumn);
name.Append(']');
}
cb.DescribeRefCountedNode(tmp->mRefCnt.get(), name.get());
@ -183,17 +189,20 @@ CheckCSPForEval(JSContext* aCx, nsGlobalWindow* aWindow, ErrorResult& aError)
return allowsEval;
}
nsJSScriptTimeoutHandler::nsJSScriptTimeoutHandler() :
mLineNo(0)
nsJSScriptTimeoutHandler::nsJSScriptTimeoutHandler()
: mLineNo(0)
, mColumn(0)
{
}
nsJSScriptTimeoutHandler::nsJSScriptTimeoutHandler(nsGlobalWindow *aWindow,
nsJSScriptTimeoutHandler::nsJSScriptTimeoutHandler(JSContext* aCx,
nsGlobalWindow *aWindow,
Function& aFunction,
FallibleTArray<JS::Heap<JS::Value> >& aArguments,
ErrorResult& aError) :
mLineNo(0),
mFunction(&aFunction)
ErrorResult& aError)
: mLineNo(0)
, mColumn(0)
, mFunction(&aFunction)
{
if (!aWindow->GetContextInternal() || !aWindow->FastGetGlobalJSObject()) {
// This window was already closed, or never properly initialized,
@ -204,15 +213,19 @@ nsJSScriptTimeoutHandler::nsJSScriptTimeoutHandler(nsGlobalWindow *aWindow,
mozilla::HoldJSObjects(this);
mArgs.SwapElements(aArguments);
// Get the calling location.
nsJSUtils::GetCallingLocation(aCx, mFileName, &mLineNo, &mColumn);
}
nsJSScriptTimeoutHandler::nsJSScriptTimeoutHandler(JSContext* aCx,
nsGlobalWindow *aWindow,
const nsAString& aExpression,
bool* aAllowEval,
ErrorResult& aError) :
mLineNo(0),
mExpr(aExpression)
ErrorResult& aError)
: mLineNo(0)
, mColumn(0)
, mExpr(aExpression)
{
if (!aWindow->GetContextInternal() || !aWindow->FastGetGlobalJSObject()) {
// This window was already closed, or never properly initialized,
@ -227,7 +240,7 @@ nsJSScriptTimeoutHandler::nsJSScriptTimeoutHandler(JSContext* aCx,
}
// Get the calling location.
nsJSUtils::GetCallingLocation(aCx, mFileName, &mLineNo);
nsJSUtils::GetCallingLocation(aCx, mFileName, &mLineNo, &mColumn);
}
nsJSScriptTimeoutHandler::~nsJSScriptTimeoutHandler()
@ -253,7 +266,8 @@ nsJSScriptTimeoutHandler::GetHandlerText()
}
already_AddRefed<nsIScriptTimeoutHandler>
NS_CreateJSTimeoutHandler(nsGlobalWindow *aWindow, Function& aFunction,
NS_CreateJSTimeoutHandler(JSContext *aCx, nsGlobalWindow *aWindow,
Function& aFunction,
const Sequence<JS::Value>& aArguments,
ErrorResult& aError)
{
@ -264,7 +278,7 @@ NS_CreateJSTimeoutHandler(nsGlobalWindow *aWindow, Function& aFunction,
}
nsRefPtr<nsJSScriptTimeoutHandler> handler =
new nsJSScriptTimeoutHandler(aWindow, aFunction, args, aError);
new nsJSScriptTimeoutHandler(aCx, aWindow, aFunction, args, aError);
return aError.Failed() ? nullptr : handler.forget();
}

Просмотреть файл

@ -1,6 +1,6 @@
[DEFAULT]
# Good luck running these tests on anything but desktop Linux.
skip-if = toolkit != "gtk2" || ((buildapp =='mulet' || buildapp == 'b2g') && (toolkit != 'gonk' || debug)) || e10s
run-if = os == 'linux' && buildapp == 'browser' && !e10s
# Note: ../browserElementTestHelpers.js makes all tests in this directory OOP,
# because testing the process-priority manager without OOP frames does not make

Просмотреть файл

@ -216,7 +216,7 @@ MediaDecoderStateMachine::MediaDecoderStateMachine(MediaDecoder* aDecoder,
mDropVideoUntilNextDiscontinuity(false),
mDecodeToSeekTarget(false),
mCurrentTimeBeforeSeek(0),
mCorruptFrames(30),
mCorruptFrames(60),
mDecodingFirstFrame(true),
mSentLoadedMetadataEvent(false),
mSentFirstFrameLoadedEvent(false),
@ -2474,8 +2474,8 @@ bool MediaDecoderStateMachine::CheckFrameValidity(VideoData* aData)
// only supports integer types.
mCorruptFrames.insert(10);
if (mReader->VideoIsHardwareAccelerated() &&
frameStats.GetPresentedFrames() > 30 &&
mCorruptFrames.mean() >= 1 /* 10% */) {
frameStats.GetPresentedFrames() > 60 &&
mCorruptFrames.mean() >= 2 /* 20% */) {
nsCOMPtr<nsIRunnable> task =
NS_NewRunnableMethod(mReader, &MediaDecoderReader::DisableHardwareAcceleration);
DecodeTaskQueue()->Dispatch(task.forget());

Просмотреть файл

@ -285,7 +285,7 @@ nsresult nsAutoConfig::downloadAutoConfig()
rv = NS_NewChannel(getter_AddRefs(channel),
url,
nsContentUtils::GetSystemPrincipal(),
nsILoadInfo::SEC_NORMAL,
nsILoadInfo::SEC_ALLOW_CROSS_ORIGIN_DATA_IS_NULL,
nsIContentPolicy::TYPE_OTHER,
nullptr, // loadGroup
nullptr, // aCallbacks
@ -295,7 +295,7 @@ nsresult nsAutoConfig::downloadAutoConfig()
if (NS_FAILED(rv))
return rv;
rv = channel->AsyncOpen(this, nullptr);
rv = channel->AsyncOpen2(this);
if (NS_FAILED(rv)) {
readOfflineFile();
return rv;

Просмотреть файл

@ -647,7 +647,17 @@ AsyncCompositionManager::ApplyAsyncContentTransformToTree(Layer *aLayer)
// bug 1036967 removed the (dead) call.
#if defined(MOZ_ANDROID_APZ)
if (metrics.IsRootContent()) {
bool rootContentLayer = metrics.IsRootContent();
#ifdef MOZ_B2GDROID
// B2GDroid is a special snowflake since it doesn't seem to have any root
// content document. However we still need to send a setFirstPaintViewport
// message, so we use the root of the layer tree as the root content layer
// instead. For the most part this should work fine; the Java code will just
// think the root layer is the "main" content, which in a manner of speaking,
// it is.
rootContentLayer = (aLayer->GetParent() == nullptr);
#endif // MOZ_B2GDROID
if (rootContentLayer) {
if (mIsFirstPaint) {
CSSToLayerScale geckoZoom = metrics.LayersPixelsPerCSSPixel().ToScaleFactor();
LayerIntPoint scrollOffsetLayerPixels = RoundedToInt(metrics.GetScrollOffset() * geckoZoom);
@ -659,7 +669,7 @@ AsyncCompositionManager::ApplyAsyncContentTransformToTree(Layer *aLayer)
mIsFirstPaint = false;
mLayersUpdated = false;
}
#endif
#endif // MOZ_ANDROID_APZ
// Transform the current local clip by this APZC's async transform. If we're
// using containerful scrolling, then the clip is not part of the scrolled

Просмотреть файл

@ -55,6 +55,13 @@ DriverCrashGuard::InitializeIfNeeded()
void
DriverCrashGuard::Initialize()
{
#ifdef NIGHTLY_BUILD
// We only use the crash guard on non-nightly channels, since the nightly
// channel is for development and having graphics features perma-disabled
// is rather annoying.
return;
#endif
// Using DriverCrashGuard off the main thread currently does not work. Under
// e10s it could conceivably work by dispatching the IPC calls via the main
// thread. In the parent process this would be harder. For now, we simply
@ -63,6 +70,12 @@ DriverCrashGuard::Initialize()
return;
}
// Check to see if all guards have been disabled through the environment.
static bool sAllGuardsDisabled = !!PR_GetEnv("MOZ_DISABLE_CRASH_GUARD");
if (sAllGuardsDisabled) {
return;
}
mGfxInfo = services::GetGfxInfo();
if (XRE_IsContentProcess()) {
@ -469,6 +482,18 @@ GLContextCrashGuard::GLContextCrashGuard(dom::ContentParent* aContentParent)
{
}
void
GLContextCrashGuard::Initialize()
{
if (XRE_IsContentProcess()) {
// Disable the GL crash guard in content processes, since we're not going
// to lose the entire browser and we don't want to hinder WebGL availability.
return;
}
DriverCrashGuard::Initialize();
}
bool
GLContextCrashGuard::UpdateEnvironment()
{

Просмотреть файл

@ -147,6 +147,7 @@ class GLContextCrashGuard final : public DriverCrashGuard
{
public:
explicit GLContextCrashGuard(dom::ContentParent* aContentParent = nullptr);
void Initialize() override;
protected:
bool UpdateEnvironment() override;

Просмотреть файл

@ -790,7 +790,7 @@ IPDL union type."""
if self.recursive:
return self.ptrToType()
else:
return TypeArray(Type('char'), ExprSizeof(self.internalType()))
return Type('mozilla::AlignedStorage2', T=self.internalType())
def unionValue(self):
# NB: knows that Union's storage C union is named |mValue|
@ -852,14 +852,14 @@ IPDL union type."""
if self.recursive:
return v
else:
return ExprCast(ExprAddrOf(v), self.ptrToType(), reinterpret=1)
return ExprCall(ExprSelect(v, '.', 'addr'))
def constptrToSelfExpr(self):
"""|*constptrToSelfExpr()| has type |self.constType()|"""
v = self.unionValue()
if self.recursive:
return v
return ExprCast(ExprAddrOf(v), self.constPtrToType(), reinterpret=1)
return ExprCall(ExprSelect(v, '.', 'addr'))
def ptrToInternalType(self):
t = self.ptrToType()

Просмотреть файл

@ -653,6 +653,8 @@ class HashMapEntry
template <class> friend class detail::HashTableEntry;
template <class, class, class, class> friend class HashMap;
Key & mutableKey() { return key_; }
public:
template<typename KeyInput, typename ValueInput>
HashMapEntry(KeyInput&& k, ValueInput&& v)
@ -668,10 +670,9 @@ class HashMapEntry
typedef Key KeyType;
typedef Value ValueType;
const Key& key() const { return key_; }
Key& mutableKey() { return key_; }
const Value& value() const { return value_; }
Value& value() { return value_; }
const Key & key() const { return key_; }
const Value & value() const { return value_; }
Value & value() { return value_; }
private:
HashMapEntry(const HashMapEntry&) = delete;
@ -740,7 +741,6 @@ class HashTableEntry
}
T& get() { MOZ_ASSERT(isLive()); return *mem.addr(); }
NonConstT& getMutable() { MOZ_ASSERT(isLive()); return *mem.addr(); }
bool isFree() const { return keyHash == sFreeKey; }
void clearLive() { MOZ_ASSERT(isLive()); keyHash = sFreeKey; mem.addr()->~T(); }
@ -980,16 +980,6 @@ class HashTable : private AllocPolicy
#endif
}
NonConstT& mutableFront() {
MOZ_ASSERT(!this->empty());
#ifdef JS_DEBUG
MOZ_ASSERT(this->validEntry);
MOZ_ASSERT(this->generation == this->Range::table_->generation());
MOZ_ASSERT(this->mutationCount == this->Range::table_->mutationCount);
#endif
return this->cur->getMutable();
}
// Removes the |front()| element and re-inserts it into the table with
// a new key at the new Lookup position. |front()| is invalid after
// this operation until the next call to |popFront()|.

Просмотреть файл

@ -54,8 +54,7 @@ const size_t ChunkMarkBitmapOffset = 1032352;
const size_t ChunkMarkBitmapBits = 129024;
#endif
const size_t ChunkRuntimeOffset = ChunkSize - sizeof(void*);
const size_t ChunkTrailerSize = 2 * sizeof(uintptr_t) + sizeof(uint64_t);
const size_t ChunkLocationOffset = ChunkSize - ChunkTrailerSize;
const size_t ChunkLocationOffset = ChunkSize - 2 * sizeof(void*) - sizeof(uint64_t);
const size_t ArenaZoneOffset = 0;
/*

Просмотреть файл

@ -584,8 +584,7 @@ struct ZoneStats
macro(Other, GCHeapUsed, objectGroupsGCHeap) \
macro(Other, MallocHeap, objectGroupsMallocHeap) \
macro(Other, MallocHeap, typePool) \
macro(Other, MallocHeap, baselineStubsOptimized) \
macro(Other, MallocHeap, uniqueIdMap)
macro(Other, MallocHeap, baselineStubsOptimized)
ZoneStats()
: FOR_EACH_SIZE(ZERO_SIZE)

Просмотреть файл

@ -756,7 +756,9 @@ class NodeBuilder
bool generatorExpression(HandleValue body, NodeVector& blocks, HandleValue filter,
bool isLegacy, TokenPos* pos, MutableHandleValue dst);
bool newTargetExpression(TokenPos* pos, MutableHandleValue dst);
bool metaProperty(HandleValue meta, HandleValue property, TokenPos* pos, MutableHandleValue dst);
bool super(TokenPos* pos, MutableHandleValue dst);
/*
* declarations
@ -1819,20 +1821,33 @@ NodeBuilder::classDefinition(bool expr, HandleValue name, HandleValue heritage,
return callback(cb, name, heritage, block, pos, dst);
return newNode(type, pos,
"name", name,
"heritage", heritage,
"id", name,
"superClass", heritage,
"body", block,
dst);
}
bool
NodeBuilder::newTargetExpression(TokenPos* pos, MutableHandleValue dst)
NodeBuilder::metaProperty(HandleValue meta, HandleValue property, TokenPos* pos, MutableHandleValue dst)
{
RootedValue cb(cx, callbacks[AST_NEWTARGET_EXPR]);
RootedValue cb(cx, callbacks[AST_METAPROPERTY]);
if (!cb.isNull())
return callback(cb, meta, property, pos, dst);
return newNode(AST_METAPROPERTY, pos,
"meta", meta,
"property", property,
dst);
}
bool
NodeBuilder::super(TokenPos* pos, MutableHandleValue dst)
{
RootedValue cb(cx, callbacks[AST_SUPER]);
if (!cb.isNull())
return callback(cb, pos, dst);
return newNode(AST_NEWTARGET_EXPR, pos, dst);
return newNode(AST_SUPER, pos, dst);
}
namespace {
@ -3066,9 +3081,7 @@ ASTSerializer::expression(ParseNode* pn, MutableHandleValue dst)
case PNK_DELETENAME:
case PNK_DELETEPROP:
case PNK_DELETESUPERPROP:
case PNK_DELETEELEM:
case PNK_DELETESUPERELEM:
case PNK_DELETEEXPR:
case PNK_TYPEOFNAME:
case PNK_TYPEOFEXPR:
@ -3129,21 +3142,20 @@ ASTSerializer::expression(ParseNode* pn, MutableHandleValue dst)
{
MOZ_ASSERT(pn->pn_pos.encloses(pn->pn_expr->pn_pos));
RootedValue expr(cx), id(cx);
RootedValue expr(cx);
RootedValue propname(cx);
RootedAtom pnAtom(cx, pn->pn_atom);
return expression(pn->pn_expr, &expr) &&
identifier(pnAtom, nullptr, &id) &&
builder.memberExpression(false, expr, id, &pn->pn_pos, dst);
}
case PNK_SUPERPROP:
{
RootedValue superBase(cx), id(cx);
RootedAtom superAtom(cx, cx->names().super);
RootedAtom pnAtom(cx, pn->pn_atom);
return identifier(superAtom, nullptr, &superBase) &&
identifier(pnAtom, nullptr, &id) &&
builder.memberExpression(false, superBase, id, &pn->pn_pos, dst);
if (pn->as<PropertyAccess>().isSuper()) {
if (!builder.super(&pn->pn_expr->pn_pos, &expr))
return false;
} else {
if (!expression(pn->pn_expr, &expr))
return false;
}
return identifier(pnAtom, nullptr, &propname) &&
builder.memberExpression(false, expr, propname, &pn->pn_pos, dst);
}
case PNK_ELEM:
@ -3152,22 +3164,19 @@ ASTSerializer::expression(ParseNode* pn, MutableHandleValue dst)
MOZ_ASSERT(pn->pn_pos.encloses(pn->pn_right->pn_pos));
RootedValue left(cx), right(cx);
return expression(pn->pn_left, &left) &&
expression(pn->pn_right, &right) &&
if (pn->as<PropertyByValue>().isSuper()) {
if (!builder.super(&pn->pn_left->pn_pos, &left))
return false;
} else {
if (!expression(pn->pn_left, &left))
return false;
}
return expression(pn->pn_right, &right) &&
builder.memberExpression(true, left, right, &pn->pn_pos, dst);
}
case PNK_SUPERELEM:
{
MOZ_ASSERT(pn->pn_pos.encloses(pn->pn_kid->pn_pos));
RootedValue superBase(cx), expr(cx);
RootedAtom superAtom(cx, cx->names().super);
return identifier(superAtom, nullptr, &superBase) &&
expression(pn->pn_kid, &expr) &&
builder.memberExpression(true, superBase, expr, &pn->pn_pos, dst);
}
case PNK_CALLSITEOBJ:
{
NodeVector raw(cx);
@ -3312,7 +3321,22 @@ ASTSerializer::expression(ParseNode* pn, MutableHandleValue dst)
return classDefinition(pn, true, dst);
case PNK_NEWTARGET:
return builder.newTargetExpression(&pn->pn_pos, dst);
{
MOZ_ASSERT(pn->pn_left->isKind(PNK_POSHOLDER));
MOZ_ASSERT(pn->pn_pos.encloses(pn->pn_left->pn_pos));
MOZ_ASSERT(pn->pn_right->isKind(PNK_POSHOLDER));
MOZ_ASSERT(pn->pn_pos.encloses(pn->pn_right->pn_pos));
RootedValue newIdent(cx);
RootedValue targetIdent(cx);
RootedAtom newStr(cx, cx->names().new_);
RootedAtom targetStr(cx, cx->names().target);
return identifier(newStr, &pn->pn_left->pn_pos, &newIdent) &&
identifier(targetStr, &pn->pn_right->pn_pos, &targetIdent) &&
builder.metaProperty(newIdent, targetIdent, &pn->pn_pos, dst);
}
default:
LOCAL_NOT_REACHED("unexpected expression type");

Просмотреть файл

@ -1,38 +0,0 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
* vim: set ts=8 sts=4 et sw=4 tw=99:
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef js_LockGuard_h
#define js_LockGuard_h
#include "mozilla/GuardObjects.h"
namespace js {
// An implementation of C++11's std::lock_guard, enhanced with a guard object
// to help with correct usage.
template <typename LockType>
class LockGuard
{
LockType& lockRef_;
MOZ_DECL_USE_GUARD_OBJECT_NOTIFIER;
public:
explicit LockGuard(LockType& lock
MOZ_GUARD_OBJECT_NOTIFIER_PARAM)
: lockRef_(lock)
{
MOZ_GUARD_OBJECT_NOTIFIER_INIT;
lockRef_.lock();
}
~LockGuard() {
lockRef_.unlock();
}
};
} // namespace js
#endif // js_LockGuard_h

Просмотреть файл

@ -1,40 +0,0 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
* vim: set ts=8 sts=4 et sw=4 tw=99:
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef js_SpinLock_h
#define js_SpinLock_h
#include "mozilla/Atomics.h"
#include "ds/LockGuard.h"
namespace js {
// A trivial spin-lock implementation. Extremely fast when rarely-contended.
class SpinLock
{
mozilla::Atomic<bool, mozilla::ReleaseAcquire> locked_;
public:
SpinLock() : locked_(false) {}
void lock() {
do {
while (locked_)
; // Spin until the lock seems free.
} while (!locked_.compareExchange(false, true)); // Atomically take the lock.
}
void unlock() {
locked_ = false;
}
};
using AutoSpinLock = LockGuard<SpinLock>;
} // namespace js
#endif // js_SpinLock_h

Просмотреть файл

@ -1915,7 +1915,6 @@ BytecodeEmitter::checkSideEffects(ParseNode* pn, bool* answer)
switch (pn->getKind()) {
// Trivial cases with no side effects.
case PNK_NEWTARGET:
case PNK_NOP:
case PNK_STRING:
case PNK_TEMPLATE_STRING:
@ -1932,6 +1931,14 @@ BytecodeEmitter::checkSideEffects(ParseNode* pn, bool* answer)
*answer = false;
return true;
// Trivial binary nodes with more token pos holders.
case PNK_NEWTARGET:
MOZ_ASSERT(pn->isArity(PN_BINARY));
MOZ_ASSERT(pn->pn_left->isKind(PNK_POSHOLDER));
MOZ_ASSERT(pn->pn_right->isKind(PNK_POSHOLDER));
*answer = false;
return true;
case PNK_BREAK:
case PNK_CONTINUE:
case PNK_DEBUGGER:
@ -1940,12 +1947,6 @@ BytecodeEmitter::checkSideEffects(ParseNode* pn, bool* answer)
return true;
// Watch out for getters!
case PNK_SUPERPROP:
MOZ_ASSERT(pn->isArity(PN_NULLARY));
*answer = true;
return true;
// Again, getters.
case PNK_DOT:
MOZ_ASSERT(pn->isArity(PN_NAME));
*answer = true;
@ -2011,9 +2012,7 @@ BytecodeEmitter::checkSideEffects(ParseNode* pn, bool* answer)
// Deletion generally has side effects, even if isolated cases have none.
case PNK_DELETENAME:
case PNK_DELETEPROP:
case PNK_DELETESUPERPROP:
case PNK_DELETEELEM:
case PNK_DELETESUPERELEM:
MOZ_ASSERT(pn->isArity(PN_UNARY));
*answer = true;
return true;
@ -2121,12 +2120,6 @@ BytecodeEmitter::checkSideEffects(ParseNode* pn, bool* answer)
*answer = true;
return true;
// Again, getters.
case PNK_SUPERELEM:
MOZ_ASSERT(pn->isArity(PN_UNARY));
*answer = true;
return true;
// These affect visible names in this code, or in other code.
case PNK_IMPORT:
case PNK_EXPORT_FROM:
@ -2320,6 +2313,7 @@ BytecodeEmitter::checkSideEffects(ParseNode* pn, bool* answer)
case PNK_EXPORT_SPEC_LIST: // by PNK_EXPORT
case PNK_EXPORT_SPEC: // by PNK_EXPORT
case PNK_CALLSITEOBJ: // by PNK_TAGGED_TEMPLATE
case PNK_POSHOLDER: // by PNK_NEWTARGET
MOZ_CRASH("handled by parent nodes");
case PNK_LIMIT: // invalid sentinel value
@ -2552,6 +2546,9 @@ BytecodeEmitter::emitPropLHS(ParseNode* pn)
MOZ_ASSERT(pn->isKind(PNK_DOT));
ParseNode* pn2 = pn->maybeExpr();
// Don't want super sneaking in here.
MOZ_ASSERT(!pn2->isKind(PNK_POSHOLDER));
/*
* If the object operand is also a dotted property reference, reverse the
* list linked via pn_expr temporarily so we can iterate over it from the
@ -2568,7 +2565,7 @@ BytecodeEmitter::emitPropLHS(ParseNode* pn)
MOZ_ASSERT(!pndot->isUsed());
pndown = pndot->pn_expr;
pndot->pn_expr = pnup;
if (!pndown->isKind(PNK_DOT))
if (!pndown->isKind(PNK_DOT) || pndown->as<PropertyAccess>().isSuper())
break;
pnup = pndot;
pndot = pndown;
@ -2648,13 +2645,21 @@ BytecodeEmitter::emitPropIncDec(ParseNode* pn)
MOZ_ASSERT(pn->pn_kid->isKind(PNK_DOT));
bool post;
bool isSuper = pn->pn_kid->as<PropertyAccess>().isSuper();
JSOp binop = GetIncDecInfo(pn->getKind(), &post);
if (!emitPropLHS(pn->pn_kid)) // OBJ
return false;
if (!emit1(JSOP_DUP)) // OBJ OBJ
return false;
if (!emitAtomOp(pn->pn_kid, JSOP_GETPROP)) // OBJ V
if (isSuper) {
if (!emitSuperPropLHS()) // THIS OBJ
return false;
if (!emit1(JSOP_DUP2)) // THIS OBJ THIS OBJ
return false;
} else {
if (!emitPropLHS(pn->pn_kid)) // OBJ
return false;
if (!emit1(JSOP_DUP)) // OBJ OBJ
return false;
}
if (!emitAtomOp(pn->pn_kid, isSuper? JSOP_GETPROP_SUPER : JSOP_GETPROP)) // OBJ V
return false;
if (!emit1(JSOP_POS)) // OBJ N
return false;
@ -2666,13 +2671,20 @@ BytecodeEmitter::emitPropIncDec(ParseNode* pn)
return false;
if (post) {
if (!emit2(JSOP_PICK, 2)) // N? N+1 OBJ
if (!emit2(JSOP_PICK, 2 + isSuper)) // N? N+1 OBJ
return false;
if (!emit1(JSOP_SWAP)) // N? OBJ N+1
if (!emit1(JSOP_SWAP)) // N? OBJ N+1
return false;
if (isSuper) {
if (!emit2(JSOP_PICK, 3)) // N THIS N+1 OBJ
return false;
if (!emit1(JSOP_SWAP)) // N THIS OBJ N+1
return false;
}
}
JSOp setOp = sc->strict() ? JSOP_STRICTSETPROP : JSOP_SETPROP;
JSOp setOp = isSuper ? sc->strict() ? JSOP_STRICTSETPROP_SUPER : JSOP_SETPROP_SUPER
: sc->strict() ? JSOP_STRICTSETPROP : JSOP_SETPROP;
if (!emitAtomOp(pn->pn_kid, setOp)) // N? N+1
return false;
if (post && !emit1(JSOP_POP)) // RESULT
@ -2681,50 +2693,6 @@ BytecodeEmitter::emitPropIncDec(ParseNode* pn)
return true;
}
bool
BytecodeEmitter::emitSuperPropIncDec(ParseNode* pn)
{
MOZ_ASSERT(pn->pn_kid->isKind(PNK_SUPERPROP));
bool post;
JSOp binop = GetIncDecInfo(pn->getKind(), &post);
if (!emitSuperPropLHS()) // THIS OBJ
return false;
if (!emit1(JSOP_DUP2)) // THIS OBJ THIS OBJ
return false;
if (!emitAtomOp(pn->pn_kid, JSOP_GETPROP_SUPER)) // THIS OBJ V
return false;
if (!emit1(JSOP_POS)) // THIS OBJ N
return false;
if (post && !emit1(JSOP_DUP)) // THIS OBJ N? N
return false;
if (!emit1(JSOP_ONE)) // THIS OBJ N? N 1
return false;
if (!emit1(binop)) // THIS OBJ N? N+1
return false;
if (post) {
if (!emit2(JSOP_PICK, 3)) // OBJ N N+1 THIS
return false;
if (!emit1(JSOP_SWAP)) // OBJ N THIS N+1
return false;
if (!emit2(JSOP_PICK, 3)) // N THIS N+1 OBJ
return false;
if (!emit1(JSOP_SWAP)) // N THIS OBJ N+1
return false;
}
JSOp setOp = sc->strict() ? JSOP_STRICTSETPROP_SUPER : JSOP_SETPROP_SUPER;
if (!emitAtomOp(pn->pn_kid, setOp)) // N? N+1
return false;
if (post && !emit1(JSOP_POP)) // RESULT
return false;
return true;
}
bool
BytecodeEmitter::emitNameIncDec(ParseNode* pn)
{
@ -2786,14 +2754,14 @@ BytecodeEmitter::emitElemOperands(ParseNode* pn, JSOp op)
bool
BytecodeEmitter::emitSuperElemOperands(ParseNode* pn, SuperElemOptions opts)
{
MOZ_ASSERT(pn->isKind(PNK_SUPERELEM));
MOZ_ASSERT(pn->isKind(PNK_ELEM) && pn->as<PropertyByValue>().isSuper());
// The ordering here is somewhat screwy. We need to evaluate the propval
// first, by spec. Do a little dance to not emit more than one JSOP_THIS.
// Since JSOP_THIS might throw in derived class constructors, we cannot
// just push it earlier as the receiver. We have to swap it down instead.
if (!emitTree(pn->pn_kid))
if (!emitTree(pn->pn_right))
return false;
// We need to convert the key to an object id first, so that we do not do
@ -2863,99 +2831,77 @@ BytecodeEmitter::emitElemIncDec(ParseNode* pn)
{
MOZ_ASSERT(pn->pn_kid->isKind(PNK_ELEM));
if (!emitElemOperands(pn->pn_kid, JSOP_GETELEM))
return false;
bool isSuper = pn->pn_kid->as<PropertyByValue>().isSuper();
if (isSuper) {
if (!emitSuperElemOperands(pn->pn_kid, SuperElem_IncDec))
return false;
} else {
if (!emitElemOperands(pn->pn_kid, JSOP_GETELEM))
return false;
}
bool post;
JSOp binop = GetIncDecInfo(pn->getKind(), &post);
/*
* We need to convert the key to an object id first, so that we do not do
* it inside both the GETELEM and the SETELEM.
*/
// OBJ KEY*
if (!emit1(JSOP_TOID)) // OBJ KEY
JSOp getOp;
if (isSuper) {
// There's no such thing as JSOP_DUP3, so we have to be creative.
// Note that pushing things again is no fewer JSOps.
if (!emitDupAt(2)) // KEY THIS OBJ KEY
return false;
if (!emitDupAt(2)) // KEY THIS OBJ KEY THIS
return false;
if (!emitDupAt(2)) // KEY THIS OBJ KEY THIS OBJ
return false;
getOp = JSOP_GETELEM_SUPER;
} else {
// We need to convert the key to an object id first, so that we do not do
// it inside both the GETELEM and the SETELEM. In the super case, this is
// done by emitSuperElemOperands.
// OBJ KEY*
if (!emit1(JSOP_TOID)) // OBJ KEY
return false;
if (!emit1(JSOP_DUP2)) // OBJ KEY OBJ KEY
return false;
getOp = JSOP_GETELEM;
}
if (!emitElemOpBase(getOp)) // OBJ KEY V
return false;
if (!emit1(JSOP_DUP2)) // OBJ KEY OBJ KEY
if (!emit1(JSOP_POS)) // OBJ KEY N
return false;
if (!emitElemOpBase(JSOP_GETELEM)) // OBJ KEY V
if (post && !emit1(JSOP_DUP)) // OBJ KEY N? N
return false;
if (!emit1(JSOP_POS)) // OBJ KEY N
if (!emit1(JSOP_ONE)) // OBJ KEY N? N 1
return false;
if (post && !emit1(JSOP_DUP)) // OBJ KEY N? N
return false;
if (!emit1(JSOP_ONE)) // OBJ KEY N? N 1
return false;
if (!emit1(binop)) // OBJ KEY N? N+1
if (!emit1(binop)) // OBJ KEY N? N+1
return false;
if (post) {
if (!emit2(JSOP_PICK, 3)) // KEY N N+1 OBJ
if (isSuper) {
// We have one more value to rotate around, because of |this|
// on the stack
if (!emit2(JSOP_PICK, 4))
return false;
}
if (!emit2(JSOP_PICK, 3 + isSuper)) // KEY N N+1 OBJ
return false;
if (!emit2(JSOP_PICK, 3)) // N N+1 OBJ KEY
if (!emit2(JSOP_PICK, 3 + isSuper)) // N N+1 OBJ KEY
return false;
if (!emit2(JSOP_PICK, 2)) // N OBJ KEY N+1
if (!emit2(JSOP_PICK, 2 + isSuper)) // N OBJ KEY N+1
return false;
}
JSOp setOp = sc->strict() ? JSOP_STRICTSETELEM : JSOP_SETELEM;
if (!emitElemOpBase(setOp)) // N? N+1
JSOp setOp = isSuper ? (sc->strict() ? JSOP_STRICTSETELEM_SUPER : JSOP_SETELEM_SUPER)
: (sc->strict() ? JSOP_STRICTSETELEM : JSOP_SETELEM);
if (!emitElemOpBase(setOp)) // N? N+1
return false;
if (post && !emit1(JSOP_POP)) // RESULT
if (post && !emit1(JSOP_POP)) // RESULT
return false;
return true;
}
bool
BytecodeEmitter::emitSuperElemIncDec(ParseNode* pn)
{
MOZ_ASSERT(pn->pn_kid->isKind(PNK_SUPERELEM));
if (!emitSuperElemOperands(pn->pn_kid, SuperElem_IncDec))
return false;
bool post;
JSOp binop = GetIncDecInfo(pn->getKind(), &post);
// There's no such thing as JSOP_DUP3, so we have to be creative.
// Note that pushing things again is no fewer JSOps.
if (!emitDupAt(2)) // KEY THIS OBJ KEY
return false;
if (!emitDupAt(2)) // KEY THIS OBJ KEY THIS
return false;
if (!emitDupAt(2)) // KEY THIS OBJ KEY THIS OBJ
return false;
if (!emitElemOpBase(JSOP_GETELEM_SUPER)) // KEY THIS OBJ V
return false;
if (!emit1(JSOP_POS)) // KEY THIS OBJ N
return false;
if (post && !emit1(JSOP_DUP)) // KEY THIS OBJ N? N
return false;
if (!emit1(JSOP_ONE)) // KEY THIS OBJ N? N 1
return false;
if (!emit1(binop)) // KEY THIS OBJ N? N+1
return false;
if (post) {
if (!emit2(JSOP_PICK, 4)) // THIS OBJ N N+1 KEY
return false;
if (!emit2(JSOP_PICK, 4)) // OBJ N N+1 KEY THIS
return false;
if (!emit2(JSOP_PICK, 4)) // N N+1 KEY THIS OBJ
return false;
if (!emit2(JSOP_PICK, 3)) // N KEY THIS OBJ N+1
return false;
}
JSOp setOp = sc->strict() ? JSOP_STRICTSETELEM_SUPER : JSOP_SETELEM_SUPER;
if (!emitElemOpBase(setOp)) // N? N+1
return false;
if (post && !emit1(JSOP_POP)) // RESULT
return false;
return true;
}
bool
BytecodeEmitter::emitNumberOp(double dval)
{
@ -3783,24 +3729,20 @@ BytecodeEmitter::emitDestructuringLHS(ParseNode* target, VarEmitOption emitOptio
// In `[a.x] = [b]`, per spec, `b` is evaluated before `a`. Then we
// need a property set -- but the operands are on the stack in the
// wrong order for JSOP_SETPROP, so we have to add a JSOP_SWAP.
if (!emitTree(target->pn_expr))
return false;
if (!emit1(JSOP_SWAP))
return false;
JSOp setOp = sc->strict() ? JSOP_STRICTSETPROP : JSOP_SETPROP;
if (!emitAtomOp(target, setOp))
return false;
break;
}
case PNK_SUPERPROP:
{
// See comment above at PNK_DOT. Pick up the pushed value, to fix ordering.
if (!emitSuperPropLHS())
return false;
if (!emit2(JSOP_PICK, 2))
return false;
JSOp setOp = sc->strict() ? JSOP_STRICTSETPROP_SUPER : JSOP_SETPROP_SUPER;
JSOp setOp;
if (target->as<PropertyAccess>().isSuper()) {
if (!emitSuperPropLHS())
return false;
if (!emit2(JSOP_PICK, 2))
return false;
setOp = sc->strict() ? JSOP_STRICTSETPROP_SUPER : JSOP_SETPROP_SUPER;
} else {
if (!emitTree(target->pn_expr))
return false;
if (!emit1(JSOP_SWAP))
return false;
setOp = sc->strict() ? JSOP_STRICTSETPROP : JSOP_SETPROP;
}
if (!emitAtomOp(target, setOp))
return false;
break;
@ -3811,19 +3753,15 @@ BytecodeEmitter::emitDestructuringLHS(ParseNode* target, VarEmitOption emitOptio
// See the comment at `case PNK_DOT:` above. This case,
// `[a[x]] = [b]`, is handled much the same way. The JSOP_SWAP
// is emitted by emitElemOperands.
JSOp setOp = sc->strict() ? JSOP_STRICTSETELEM : JSOP_SETELEM;
if (!emitElemOp(target, setOp))
return false;
break;
}
case PNK_SUPERELEM:
{
// See comment above in the PNK_ELEM case. Just as there, the
// reordering is handled by emitSuperElemOp.
JSOp setOp = sc->strict() ? JSOP_STRICTSETELEM_SUPER : JSOP_SETELEM_SUPER;
if (!emitSuperElemOp(target, setOp))
return false;
if (target->as<PropertyByValue>().isSuper()) {
JSOp setOp = sc->strict() ? JSOP_STRICTSETELEM_SUPER : JSOP_SETELEM_SUPER;
if (!emitSuperElemOp(target, setOp))
return false;
} else {
JSOp setOp = sc->strict() ? JSOP_STRICTSETELEM : JSOP_SETELEM;
if (!emitElemOp(target, setOp))
return false;
}
break;
}
@ -4448,31 +4386,31 @@ BytecodeEmitter::emitAssignment(ParseNode* lhs, JSOp op, ParseNode* rhs)
}
break;
case PNK_DOT:
if (!emitTree(lhs->expr()))
return false;
offset++;
if (!makeAtomIndex(lhs->pn_atom, &atomIndex))
return false;
break;
case PNK_SUPERPROP:
if (!emitSuperPropLHS())
return false;
offset += 2;
if (lhs->as<PropertyAccess>().isSuper()) {
if (!emitSuperPropLHS())
return false;
offset += 2;
} else {
if (!emitTree(lhs->expr()))
return false;
offset += 1;
}
if (!makeAtomIndex(lhs->pn_atom, &atomIndex))
return false;
break;
case PNK_ELEM:
MOZ_ASSERT(lhs->isArity(PN_BINARY));
if (!emitTree(lhs->pn_left))
return false;
if (!emitTree(lhs->pn_right))
return false;
offset += 2;
break;
case PNK_SUPERELEM:
if (!emitSuperElemOperands(lhs))
return false;
offset += 3;
if (lhs->as<PropertyByValue>().isSuper()) {
if (!emitSuperElemOperands(lhs))
return false;
offset += 3;
} else {
if (!emitTree(lhs->pn_left))
return false;
if (!emitTree(lhs->pn_right))
return false;
offset += 2;
}
break;
case PNK_ARRAY:
case PNK_OBJECT:
@ -4530,35 +4468,40 @@ BytecodeEmitter::emitAssignment(ParseNode* lhs, JSOp op, ParseNode* rhs)
}
break;
case PNK_DOT: {
if (!emit1(JSOP_DUP))
return false;
bool isLength = (lhs->pn_atom == cx->names().length);
if (!emitIndex32(isLength ? JSOP_LENGTH : JSOP_GETPROP, atomIndex))
JSOp getOp;
if (lhs->as<PropertyAccess>().isSuper()) {
if (!emit1(JSOP_DUP2))
return false;
getOp = JSOP_GETPROP_SUPER;
} else {
if (!emit1(JSOP_DUP))
return false;
bool isLength = (lhs->pn_atom == cx->names().length);
getOp = isLength ? JSOP_LENGTH : JSOP_GETPROP;
}
if (!emitIndex32(getOp, atomIndex))
return false;
break;
}
case PNK_SUPERPROP:
if (!emit1(JSOP_DUP2))
return false;
if (!emitIndex32(JSOP_GETPROP_SUPER, atomIndex))
return false;
break;
case PNK_ELEM:
if (!emit1(JSOP_DUP2))
return false;
if (!emitElemOpBase(JSOP_GETELEM))
return false;
break;
case PNK_SUPERELEM:
if (!emitDupAt(2))
return false;
if (!emitDupAt(2))
return false;
if (!emitDupAt(2))
return false;
if (!emitElemOpBase(JSOP_GETELEM_SUPER))
case PNK_ELEM: {
JSOp elemOp;
if (lhs->as<PropertyByValue>().isSuper()) {
if (!emitDupAt(2))
return false;
if (!emitDupAt(2))
return false;
if (!emitDupAt(2))
return false;
elemOp = JSOP_GETELEM_SUPER;
} else {
if (!emit1(JSOP_DUP2))
return false;
elemOp = JSOP_GETELEM;
}
if (!emitElemOpBase(elemOp))
return false;
break;
}
case PNK_CALL:
/*
* We just emitted a JSOP_SETCALL (which will always throw) and
@ -4617,14 +4560,9 @@ BytecodeEmitter::emitAssignment(ParseNode* lhs, JSOp op, ParseNode* rhs)
break;
case PNK_DOT:
{
JSOp setOp = sc->strict() ? JSOP_STRICTSETPROP : JSOP_SETPROP;
if (!emitIndexOp(setOp, atomIndex))
return false;
break;
}
case PNK_SUPERPROP:
{
JSOp setOp = sc->strict() ? JSOP_STRICTSETPROP_SUPER : JSOP_SETPROP_SUPER;
JSOp setOp = lhs->as<PropertyAccess>().isSuper() ?
(sc->strict() ? JSOP_STRICTSETPROP_SUPER : JSOP_SETPROP_SUPER) :
(sc->strict() ? JSOP_STRICTSETPROP : JSOP_SETPROP);
if (!emitIndexOp(setOp, atomIndex))
return false;
break;
@ -4635,14 +4573,9 @@ BytecodeEmitter::emitAssignment(ParseNode* lhs, JSOp op, ParseNode* rhs)
break;
case PNK_ELEM:
{
JSOp setOp = sc->strict() ? JSOP_STRICTSETELEM : JSOP_SETELEM;
if (!emit1(setOp))
return false;
break;
}
case PNK_SUPERELEM:
{
JSOp setOp = sc->strict() ? JSOP_STRICTSETELEM_SUPER : JSOP_SETELEM_SUPER;
JSOp setOp = lhs->as<PropertyByValue>().isSuper() ?
sc->strict() ? JSOP_STRICTSETELEM_SUPER : JSOP_SETELEM_SUPER :
sc->strict() ? JSOP_STRICTSETELEM : JSOP_SETELEM;
if (!emit1(setOp))
return false;
break;
@ -6518,26 +6451,20 @@ BytecodeEmitter::emitDeleteProperty(ParseNode* node)
ParseNode* propExpr = node->pn_kid;
MOZ_ASSERT(propExpr->isKind(PNK_DOT));
if (propExpr->as<PropertyAccess>().isSuper()) {
// Still have to calculate the base, even though we are are going
// to throw unconditionally, as calculating the base could also
// throw.
if (!emit1(JSOP_SUPERBASE))
return false;
return emitUint16Operand(JSOP_THROWMSG, JSMSG_CANT_DELETE_SUPER);
}
JSOp delOp = sc->strict() ? JSOP_STRICTDELPROP : JSOP_DELPROP;
return emitPropOp(propExpr, delOp);
}
bool
BytecodeEmitter::emitDeleteSuperProperty(ParseNode* node)
{
MOZ_ASSERT(node->isKind(PNK_DELETESUPERPROP));
MOZ_ASSERT(node->isArity(PN_UNARY));
MOZ_ASSERT(node->pn_kid->isKind(PNK_SUPERPROP));
// Still have to calculate the base, even though we are are going
// to throw unconditionally, as calculating the base could also
// throw.
if (!emit1(JSOP_SUPERBASE))
return false;
return emitUint16Operand(JSOP_THROWMSG, JSMSG_CANT_DELETE_SUPER);
}
bool
BytecodeEmitter::emitDeleteElement(ParseNode* node)
{
@ -6547,34 +6474,26 @@ BytecodeEmitter::emitDeleteElement(ParseNode* node)
ParseNode* elemExpr = node->pn_kid;
MOZ_ASSERT(elemExpr->isKind(PNK_ELEM));
if (elemExpr->as<PropertyByValue>().isSuper()) {
// Still have to calculate everything, even though we're gonna throw
// since it may have side effects
if (!emitTree(elemExpr->pn_right))
return false;
if (!emit1(JSOP_SUPERBASE))
return false;
if (!emitUint16Operand(JSOP_THROWMSG, JSMSG_CANT_DELETE_SUPER))
return false;
// Another wrinkle: Balance the stack from the emitter's point of view.
// Execution will not reach here, as the last bytecode threw.
return emit1(JSOP_POP);
}
JSOp delOp = sc->strict() ? JSOP_STRICTDELELEM : JSOP_DELELEM;
return emitElemOp(elemExpr, delOp);
}
bool
BytecodeEmitter::emitDeleteSuperElement(ParseNode* node)
{
MOZ_ASSERT(node->isKind(PNK_DELETESUPERELEM));
MOZ_ASSERT(node->isArity(PN_UNARY));
ParseNode* superElemExpr = node->pn_kid;
MOZ_ASSERT(superElemExpr->isKind(PNK_SUPERELEM));
// Still have to calculate everything, even though we're gonna throw
// since it may have side effects
MOZ_ASSERT(superElemExpr->isArity(PN_UNARY));
if (!emitTree(superElemExpr->pn_kid))
return false;
if (!emit1(JSOP_SUPERBASE))
return false;
if (!emitUint16Operand(JSOP_THROWMSG, JSMSG_CANT_DELETE_SUPER))
return false;
// Another wrinkle: Balance the stack from the emitter's point of view.
// Execution will not reach here, as the last bytecode threw.
return emit1(JSOP_POP);
}
bool
BytecodeEmitter::emitDeleteExpression(ParseNode* node)
{
@ -6734,24 +6653,26 @@ BytecodeEmitter::emitCallOrNew(ParseNode* pn)
return false;
break;
case PNK_DOT:
if (!emitPropOp(pn2, callop ? JSOP_CALLPROP : JSOP_GETPROP))
return false;
break;
case PNK_SUPERPROP:
if (!emitSuperPropOp(pn2, JSOP_GETPROP_SUPER, /* isCall = */ callop))
return false;
break;
case PNK_ELEM:
if (!emitElemOp(pn2, callop ? JSOP_CALLELEM : JSOP_GETELEM))
return false;
if (callop) {
if (!emit1(JSOP_SWAP))
if (pn2->as<PropertyAccess>().isSuper()) {
if (!emitSuperPropOp(pn2, JSOP_GETPROP_SUPER, /* isCall = */ callop))
return false;
} else {
if (!emitPropOp(pn2, callop ? JSOP_CALLPROP : JSOP_GETPROP))
return false;
}
break;
case PNK_SUPERELEM:
if (!emitSuperElemOp(pn2, JSOP_GETELEM_SUPER, /* isCall = */ callop))
return false;
case PNK_ELEM:
if (pn2->as<PropertyByValue>().isSuper()) {
if (!emitSuperElemOp(pn2, JSOP_GETELEM_SUPER, /* isCall = */ callop))
return false;
} else {
if (!emitElemOp(pn2, callop ? JSOP_CALLELEM : JSOP_GETELEM))
return false;
if (callop) {
if (!emit1(JSOP_SWAP))
return false;
}
}
break;
case PNK_FUNCTION:
/*
@ -6909,18 +6830,10 @@ BytecodeEmitter::emitIncOrDec(ParseNode* pn)
if (!emitPropIncDec(pn))
return false;
break;
case PNK_SUPERPROP:
if (!emitSuperPropIncDec(pn))
return false;
break;
case PNK_ELEM:
if (!emitElemIncDec(pn))
return false;
break;
case PNK_SUPERELEM:
if (!emitSuperElemIncDec(pn))
return false;
break;
case PNK_CALL:
MOZ_ASSERT(pn2->pn_xflags & PNX_SETCALL);
if (!emitTree(pn2))
@ -7884,38 +7797,32 @@ BytecodeEmitter::emitTree(ParseNode* pn)
ok = emitDeleteProperty(pn);
break;
case PNK_DELETESUPERPROP:
ok = emitDeleteSuperProperty(pn);
break;
case PNK_DELETEELEM:
ok = emitDeleteElement(pn);
break;
case PNK_DELETESUPERELEM:
ok = emitDeleteSuperElement(pn);
break;
case PNK_DELETEEXPR:
ok = emitDeleteExpression(pn);
break;
case PNK_DOT:
ok = emitPropOp(pn, JSOP_GETPROP);
break;
case PNK_SUPERPROP:
if (!emitSuperPropOp(pn, JSOP_GETPROP_SUPER))
return false;
if (pn->as<PropertyAccess>().isSuper()) {
if (!emitSuperPropOp(pn, JSOP_GETPROP_SUPER))
return false;
} else {
if (!emitPropOp(pn, JSOP_GETPROP))
return false;
}
break;
case PNK_ELEM:
ok = emitElemOp(pn, JSOP_GETELEM);
break;
case PNK_SUPERELEM:
if (!emitSuperElemOp(pn, JSOP_GETELEM_SUPER))
return false;
if (pn->as<PropertyByValue>().isSuper()) {
if (!emitSuperElemOp(pn, JSOP_GETELEM_SUPER))
return false;
} else {
if (!emitElemOp(pn, JSOP_GETELEM))
return false;
}
break;
case PNK_NEW:
@ -8088,6 +7995,9 @@ BytecodeEmitter::emitTree(ParseNode* pn)
return false;
break;
case PNK_POSHOLDER:
MOZ_ASSERT_UNREACHABLE("Should never try to emit PNK_POSHOLDER");
default:
MOZ_ASSERT(0);
}

Просмотреть файл

@ -554,9 +554,7 @@ struct BytecodeEmitter
bool emitDeleteName(ParseNode* pn);
bool emitDeleteProperty(ParseNode* pn);
bool emitDeleteSuperProperty(ParseNode* pn);
bool emitDeleteElement(ParseNode* pn);
bool emitDeleteSuperElement(ParseNode* pn);
bool emitDeleteExpression(ParseNode* pn);
// |op| must be JSOP_TYPEOF or JSOP_TYPEOFEXPR.
@ -611,11 +609,9 @@ struct BytecodeEmitter
bool emitClass(ParseNode* pn);
bool emitSuperPropLHS(bool isCall = false);
bool emitSuperPropOp(ParseNode* pn, JSOp op, bool isCall = false);
bool emitSuperPropIncDec(ParseNode* pn);
enum SuperElemOptions { SuperElem_Get, SuperElem_Set, SuperElem_Call, SuperElem_IncDec };
bool emitSuperElemOperands(ParseNode* pn, SuperElemOptions opts = SuperElem_Get);
bool emitSuperElemOp(ParseNode* pn, JSOp op, bool isCall = false);
bool emitSuperElemIncDec(ParseNode* pn);
};
} /* namespace frontend */

Просмотреть файл

@ -334,9 +334,7 @@ ContainsHoistedDeclaration(ExclusiveContext* cx, ParseNode* node, bool* result)
case PNK_BITNOT:
case PNK_DELETENAME:
case PNK_DELETEPROP:
case PNK_DELETESUPERPROP:
case PNK_DELETEELEM:
case PNK_DELETESUPERELEM:
case PNK_DELETEEXPR:
case PNK_POS:
case PNK_NEG:
@ -414,9 +412,8 @@ ContainsHoistedDeclaration(ExclusiveContext* cx, ParseNode* node, bool* result)
case PNK_CLASSMETHOD:
case PNK_CLASSMETHODLIST:
case PNK_CLASSNAMES:
case PNK_SUPERPROP:
case PNK_SUPERELEM:
case PNK_NEWTARGET:
case PNK_POSHOLDER:
MOZ_CRASH("ContainsHoistedDeclaration should have indicated false on "
"some parent node without recurring to test this node");
@ -632,9 +629,9 @@ static bool
FoldDeleteElement(ExclusiveContext* cx, ParseNode* node, Parser<FullParseHandler>& parser,
bool inGenexpLambda)
{
MOZ_ASSERT(node->isKind(PNK_DELETEELEM) || node->isKind(PNK_DELETESUPERELEM));
MOZ_ASSERT(node->isKind(PNK_DELETEELEM));
MOZ_ASSERT(node->isArity(PN_UNARY));
MOZ_ASSERT(node->pn_kid->isKind(PNK_ELEM) || node->pn_kid->isKind(PNK_SUPERELEM));
MOZ_ASSERT(node->pn_kid->isKind(PNK_ELEM));
ParseNode*& expr = node->pn_kid;
if (!Fold(cx, &expr, parser, inGenexpLambda))
@ -646,11 +643,9 @@ FoldDeleteElement(ExclusiveContext* cx, ParseNode* node, Parser<FullParseHandler
//
// In principle this also applies to |super["foo"] -> super.foo|,
// but we don't constant-fold |super["foo"]| yet.
if (node->isKind(PNK_DELETEELEM)) {
MOZ_ASSERT(expr->isKind(PNK_ELEM) || expr->isKind(PNK_DOT));
if (expr->isKind(PNK_DOT))
node->setKind(PNK_DELETEPROP);
}
MOZ_ASSERT(expr->isKind(PNK_ELEM) || expr->isKind(PNK_DOT));
if (expr->isKind(PNK_DOT))
node->setKind(PNK_DELETEPROP);
return true;
}
@ -659,9 +654,9 @@ static bool
FoldDeleteProperty(ExclusiveContext* cx, ParseNode* node, Parser<FullParseHandler>& parser,
bool inGenexpLambda)
{
MOZ_ASSERT(node->isKind(PNK_DELETEPROP) || node->isKind(PNK_DELETESUPERPROP));
MOZ_ASSERT(node->isKind(PNK_DELETEPROP));
MOZ_ASSERT(node->isArity(PN_UNARY));
MOZ_ASSERT(node->pn_kid->isKind(PNK_DOT) || node->pn_kid->isKind(PNK_SUPERPROP));
MOZ_ASSERT(node->pn_kid->isKind(PNK_DOT));
ParseNode*& expr = node->pn_kid;
#ifdef DEBUG
@ -1709,7 +1704,6 @@ Fold(ExclusiveContext* cx, ParseNode** pnp, Parser<FullParseHandler>& parser, bo
ParseNode* pn = *pnp;
switch (pn->getKind()) {
case PNK_NEWTARGET:
case PNK_NOP:
case PNK_REGEXP:
case PNK_STRING:
@ -1726,8 +1720,8 @@ Fold(ExclusiveContext* cx, ParseNode** pnp, Parser<FullParseHandler>& parser, bo
case PNK_GENERATOR:
case PNK_EXPORT_BATCH_SPEC:
case PNK_OBJECT_PROPERTY_NAME:
case PNK_SUPERPROP:
case PNK_FRESHENBLOCK:
case PNK_POSHOLDER:
MOZ_ASSERT(pn->isArity(PN_NULLARY));
return true;
@ -1750,11 +1744,9 @@ Fold(ExclusiveContext* cx, ParseNode** pnp, Parser<FullParseHandler>& parser, bo
return FoldDeleteExpr(cx, pn, parser, inGenexpLambda);
case PNK_DELETEELEM:
case PNK_DELETESUPERELEM:
return FoldDeleteElement(cx, pn, parser, inGenexpLambda);
case PNK_DELETEPROP:
case PNK_DELETESUPERPROP:
return FoldDeleteProperty(cx, pn, parser, inGenexpLambda);
case PNK_CONDITIONAL:
@ -1782,7 +1774,6 @@ Fold(ExclusiveContext* cx, ParseNode** pnp, Parser<FullParseHandler>& parser, bo
case PNK_MUTATEPROTO:
case PNK_COMPUTED_NAME:
case PNK_SPREAD:
case PNK_SUPERELEM:
case PNK_EXPORT:
case PNK_EXPORT_DEFAULT:
case PNK_VOID:
@ -1918,6 +1909,12 @@ Fold(ExclusiveContext* cx, ParseNode** pnp, Parser<FullParseHandler>& parser, bo
return Fold(cx, &pn->pn_left, parser, inGenexpLambda) &&
Fold(cx, &pn->pn_right, parser, inGenexpLambda);
case PNK_NEWTARGET:
MOZ_ASSERT(pn->isArity(PN_BINARY));
MOZ_ASSERT(pn->pn_left->isKind(PNK_POSHOLDER));
MOZ_ASSERT(pn->pn_right->isKind(PNK_POSHOLDER));
return true;
case PNK_CLASSNAMES:
MOZ_ASSERT(pn->isArity(PN_BINARY));
if (ParseNode*& outerBinding = pn->pn_left) {

Просмотреть файл

@ -73,9 +73,7 @@ class FullParseHandler
typedef Definition* DefinitionNode;
bool isPropertyAccess(ParseNode* node) {
if (node->isKind(PNK_DOT) || node->isKind(PNK_ELEM))
return true;
return node->isKind(PNK_SUPERPROP) || node->isKind(PNK_SUPERELEM);
return node->isKind(PNK_DOT) || node->isKind(PNK_ELEM);
}
bool isFunctionCall(ParseNode* node) {
@ -225,13 +223,9 @@ class FullParseHandler
if (expr->isKind(PNK_DOT))
return newUnary(PNK_DELETEPROP, JSOP_NOP, begin, expr);
if (expr->isKind(PNK_SUPERPROP))
return newUnary(PNK_DELETESUPERPROP, JSOP_NOP, begin, expr);
if (expr->isKind(PNK_ELEM))
return newUnary(PNK_DELETEELEM, JSOP_NOP, begin, expr);
if (expr->isKind(PNK_SUPERELEM))
return newUnary(PNK_DELETESUPERELEM, JSOP_NOP, begin, expr);
return newUnary(PNK_DELETEEXPR, JSOP_NOP, begin, expr);
}
@ -346,14 +340,20 @@ class FullParseHandler
ParseNode* newClassNames(ParseNode* outer, ParseNode* inner, const TokenPos& pos) {
return new_<ClassNames>(outer, inner, pos);
}
ParseNode* newSuperProperty(JSAtom* atom, const TokenPos& pos) {
return new_<SuperProperty>(atom, pos);
ParseNode* newNewTarget(ParseNode* newHolder, ParseNode* targetHolder) {
return new_<BinaryNode>(PNK_NEWTARGET, JSOP_NOP, newHolder, targetHolder);
}
ParseNode* newSuperElement(ParseNode* expr, const TokenPos& pos) {
return new_<SuperElement>(expr, pos);
ParseNode* newPosHolder(const TokenPos& pos) {
return new_<NullaryNode>(PNK_POSHOLDER, pos);
}
ParseNode* newNewTarget(const TokenPos& pos) {
return new_<NullaryNode>(PNK_NEWTARGET, pos);
ParseNode* newSuperBase(const TokenPos& pos, ExclusiveContext* cx) {
ParseNode* node = newPosHolder(pos);
#ifdef DEBUG
// Set the atom for assertion purposes
if (node)
node->pn_atom = cx->names().super;
#endif
return node;
}
bool addPrototypeMutation(ParseNode* literal, uint32_t begin, ParseNode* expr) {
@ -711,6 +711,11 @@ class FullParseHandler
(kind == PNK_SEMI && !node->pn_kid);
}
bool isSuperBase(ParseNode* node, ExclusiveContext* cx) {
MOZ_ASSERT_IF(node->isKind(PNK_POSHOLDER), node->pn_atom == cx->names().super);
return node->isKind(PNK_POSHOLDER);
}
inline bool finishInitializerAssignment(ParseNode* pn, ParseNode* init, JSOp op);
void setBeginPosition(ParseNode* pn, ParseNode* oth) {

Просмотреть файл

@ -375,9 +375,7 @@ class NameResolver
case PNK_DEBUGGER:
case PNK_EXPORT_BATCH_SPEC:
case PNK_FRESHENBLOCK:
case PNK_SUPERPROP:
case PNK_OBJECT_PROPERTY_NAME:
case PNK_NEWTARGET:
MOZ_ASSERT(cur->isArity(PN_NULLARY));
break;
@ -387,6 +385,12 @@ class NameResolver
MOZ_ASSERT(!cur->pn_kid->maybeExpr());
break;
case PNK_NEWTARGET:
MOZ_ASSERT(cur->isArity(PN_BINARY));
MOZ_ASSERT(cur->pn_left->isKind(PNK_POSHOLDER));
MOZ_ASSERT(cur->pn_right->isKind(PNK_POSHOLDER));
break;
// Nodes with a single non-null child requiring name resolution.
case PNK_TYPEOFEXPR:
case PNK_VOID:
@ -395,9 +399,7 @@ class NameResolver
case PNK_THROW:
case PNK_DELETENAME:
case PNK_DELETEPROP:
case PNK_DELETESUPERPROP:
case PNK_DELETEELEM:
case PNK_DELETESUPERELEM:
case PNK_DELETEEXPR:
case PNK_NEG:
case PNK_POS:
@ -409,7 +411,6 @@ class NameResolver
case PNK_ARRAYPUSH:
case PNK_SPREAD:
case PNK_MUTATEPROTO:
case PNK_SUPERELEM:
case PNK_EXPORT:
case PNK_EXPORT_DEFAULT:
MOZ_ASSERT(cur->isArity(PN_UNARY));
@ -440,7 +441,6 @@ class NameResolver
case PNK_DIVASSIGN:
case PNK_MODASSIGN:
case PNK_POWASSIGN:
case PNK_ELEM:
case PNK_COLON:
case PNK_CASE:
case PNK_SHORTHAND:
@ -457,6 +457,14 @@ class NameResolver
return false;
break;
case PNK_ELEM:
MOZ_ASSERT(cur->isArity(PN_BINARY));
if (!cur->as<PropertyByValue>().isSuper() && !resolve(cur->pn_left, prefix))
return false;
if (!resolve(cur->pn_right, prefix))
return false;
break;
case PNK_WITH:
MOZ_ASSERT(cur->isArity(PN_BINARY_OBJ));
if (!resolve(cur->pn_left, prefix))
@ -752,9 +760,18 @@ class NameResolver
break;
}
case PNK_LABEL:
case PNK_DOT:
MOZ_ASSERT(cur->isArity(PN_NAME));
// Super prop nodes do not have a meaningful LHS
if (cur->as<PropertyAccess>().isSuper())
break;
if (!resolve(cur->expr(), prefix))
return false;
break;
case PNK_LABEL:
MOZ_ASSERT(cur->isArity(PN_NAME));
if (!resolve(cur->expr(), prefix))
return false;
break;
@ -779,6 +796,7 @@ class NameResolver
case PNK_EXPORT_SPEC: // by PNK_EXPORT_SPEC_LIST
case PNK_CALLSITEOBJ: // by PNK_TAGGED_TEMPLATE
case PNK_CLASSNAMES: // by PNK_CLASS
case PNK_POSHOLDER: // by PNK_NEWTARGET, PNK_DOT
MOZ_CRASH("should have been handled by a parent node");
case PNK_LIMIT: // invalid sentinel value

Просмотреть файл

@ -214,8 +214,7 @@ PushNodeChildren(ParseNode* pn, NodeStack* stack)
case PNK_EXPORT_BATCH_SPEC:
case PNK_OBJECT_PROPERTY_NAME:
case PNK_FRESHENBLOCK:
case PNK_SUPERPROP:
case PNK_NEWTARGET:
case PNK_POSHOLDER:
MOZ_ASSERT(pn->isArity(PN_NULLARY));
MOZ_ASSERT(!pn->isUsed(), "handle non-trivial cases separately");
MOZ_ASSERT(!pn->isDefn(), "handle non-trivial cases separately");
@ -230,9 +229,7 @@ PushNodeChildren(ParseNode* pn, NodeStack* stack)
case PNK_THROW:
case PNK_DELETENAME:
case PNK_DELETEPROP:
case PNK_DELETESUPERPROP:
case PNK_DELETEELEM:
case PNK_DELETESUPERELEM:
case PNK_DELETEEXPR:
case PNK_POS:
case PNK_NEG:
@ -246,7 +243,6 @@ PushNodeChildren(ParseNode* pn, NodeStack* stack)
case PNK_MUTATEPROTO:
case PNK_EXPORT:
case PNK_EXPORT_DEFAULT:
case PNK_SUPERELEM:
return PushUnaryNodeChild(pn, stack);
// Nodes with a single nullable child.
@ -285,6 +281,7 @@ PushNodeChildren(ParseNode* pn, NodeStack* stack)
case PNK_SWITCH:
case PNK_LETBLOCK:
case PNK_CLASSMETHOD:
case PNK_NEWTARGET:
case PNK_FOR: {
MOZ_ASSERT(pn->isArity(PN_BINARY));
stack->push(pn->pn_left);
@ -1106,7 +1103,10 @@ NameNode::dump(int indent)
if (isKind(PNK_DOT)) {
fputc(' ', stderr);
DumpParseTree(expr(), indent + 2);
if (as<PropertyAccess>().isSuper())
fprintf(stderr, "super");
else
DumpParseTree(expr(), indent + 2);
fputc(')', stderr);
}
return;

Просмотреть файл

@ -138,9 +138,7 @@ class PackedScopeCoordinate
/* Delete operations. These must be sequential. */ \
F(DELETENAME) \
F(DELETEPROP) \
F(DELETESUPERPROP) \
F(DELETEELEM) \
F(DELETESUPERELEM) \
F(DELETEEXPR) \
F(TRY) \
F(CATCH) \
@ -176,9 +174,8 @@ class PackedScopeCoordinate
F(CLASSMETHOD) \
F(CLASSMETHODLIST) \
F(CLASSNAMES) \
F(SUPERPROP) \
F(SUPERELEM) \
F(NEWTARGET) \
F(POSHOLDER) \
\
/* Unary operators. */ \
F(TYPEOFNAME) \
@ -432,7 +429,6 @@ IsDeleteKind(ParseNodeKind kind)
* ctor is a MEMBER expr
* PNK_DELETENAME unary pn_kid: PNK_NAME expr
* PNK_DELETEPROP unary pn_kid: PNK_DOT expr
* PNK_DELETESUPERPROP unary pn_kid: PNK_SUPERPROP expr
* PNK_DELETEELEM unary pn_kid: PNK_ELEM expr
* PNK_DELETESUPERELEM unary pn_kid: PNK_SUPERELEM expr
* PNK_DELETEEXPR unary pn_kid: MEMBER expr that's evaluated, then the
@ -1323,6 +1319,11 @@ class PropertyAccess : public ParseNode
PropertyName& name() const {
return *pn_u.name.atom->asPropertyName();
}
bool isSuper() const {
// PNK_POSHOLDER cannot result from any expression syntax.
return expression().isKind(PNK_POSHOLDER);
}
};
class PropertyByValue : public ParseNode
@ -1334,6 +1335,17 @@ class PropertyByValue : public ParseNode
pn_u.binary.left = lhs;
pn_u.binary.right = propExpr;
}
static bool test(const ParseNode& node) {
bool match = node.isKind(PNK_ELEM);
MOZ_ASSERT_IF(match, node.isArity(PN_BINARY));
return match;
}
bool isSuper() const {
// Like PropertyAccess above, PNK_POSHOLDER is "good enough".
return pn_left->isKind(PNK_POSHOLDER);
}
};
/*
@ -1446,38 +1458,6 @@ struct ClassNode : public TernaryNode {
}
};
struct SuperProperty : public NullaryNode {
SuperProperty(JSAtom* atom, const TokenPos& pos)
: NullaryNode(PNK_SUPERPROP, JSOP_NOP, pos, atom)
{ }
static bool test(const ParseNode& node) {
bool match = node.isKind(PNK_SUPERPROP);
MOZ_ASSERT_IF(match, node.isArity(PN_NULLARY));
return match;
}
JSAtom* propName() const {
return pn_atom;
}
};
struct SuperElement : public UnaryNode {
SuperElement(ParseNode* expr, const TokenPos& pos)
: UnaryNode(PNK_SUPERELEM, JSOP_NOP, pos, expr)
{ }
static bool test(const ParseNode& node) {
bool match = node.isKind(PNK_SUPERELEM);
MOZ_ASSERT_IF(match, node.isArity(PN_UNARY));
return match;
}
ParseNode* expr() const {
return pn_kid;
}
};
#ifdef DEBUG
void DumpParseTree(ParseNode* pn, int indent = 0);
#endif

Просмотреть файл

@ -4965,9 +4965,7 @@ Parser<FullParseHandler>::isValidForStatementLHS(ParseNode* pn1, JSVersion versi
case PNK_ARRAY:
case PNK_CALL:
case PNK_DOT:
case PNK_SUPERPROP:
case PNK_ELEM:
case PNK_SUPERELEM:
case PNK_NAME:
case PNK_OBJECT:
return true;
@ -8250,9 +8248,6 @@ Parser<ParseHandler>::memberExpr(YieldHandling yieldHandling, TokenKind tt, bool
JS_CHECK_RECURSION(context, return null());
bool isSuper = false;
uint32_t superBegin = pos().begin;
/* Check for new expression first. */
if (tt == TOK_NEW) {
uint32_t newBegin = pos().begin;
@ -8287,14 +8282,17 @@ Parser<ParseHandler>::memberExpr(YieldHandling yieldHandling, TokenKind tt, bool
}
}
} else if (tt == TOK_SUPER) {
lhs = null();
isSuper = true;
lhs = handler.newSuperBase(pos(), context);
if (!lhs)
return null();
} else {
lhs = primaryExpr(yieldHandling, tt, invoked);
if (!lhs)
return null();
}
MOZ_ASSERT_IF(handler.isSuperBase(lhs, context), tokenStream.isCurrentTokenType(TOK_SUPER));
while (true) {
if (!tokenStream.getToken(&tt))
return null();
@ -8307,16 +8305,11 @@ Parser<ParseHandler>::memberExpr(YieldHandling yieldHandling, TokenKind tt, bool
return null();
if (tt == TOK_NAME) {
PropertyName* field = tokenStream.currentName();
if (isSuper) {
isSuper = false;
if (!checkAndMarkSuperScope()) {
report(ParseError, false, null(), JSMSG_BAD_SUPERPROP, "property");
return null();
}
nextMember = handler.newSuperProperty(field, TokenPos(superBegin, pos().end));
} else {
nextMember = handler.newPropertyAccess(lhs, field, pos().end);
if (handler.isSuperBase(lhs, context) && !checkAndMarkSuperScope()) {
report(ParseError, false, null(), JSMSG_BAD_SUPERPROP, "property");
return null();
}
nextMember = handler.newPropertyAccess(lhs, field, pos().end);
if (!nextMember)
return null();
} else {
@ -8330,23 +8323,18 @@ Parser<ParseHandler>::memberExpr(YieldHandling yieldHandling, TokenKind tt, bool
MUST_MATCH_TOKEN(TOK_RB, JSMSG_BRACKET_IN_INDEX);
if (isSuper) {
isSuper = false;
if (!checkAndMarkSuperScope()) {
if (handler.isSuperBase(lhs, context) && !checkAndMarkSuperScope()) {
report(ParseError, false, null(), JSMSG_BAD_SUPERPROP, "member");
return null();
}
nextMember = handler.newSuperElement(propExpr, TokenPos(superBegin, pos().end));
} else {
nextMember = handler.newPropertyByValue(lhs, propExpr, pos().end);
}
nextMember = handler.newPropertyByValue(lhs, propExpr, pos().end);
if (!nextMember)
return null();
} else if ((allowCallSyntax && tt == TOK_LP) ||
tt == TOK_TEMPLATE_HEAD ||
tt == TOK_NO_SUBS_TEMPLATE)
{
if (isSuper) {
if (handler.isSuperBase(lhs, context)) {
// For now...
report(ParseError, false, null(), JSMSG_BAD_SUPER);
return null();
@ -8410,18 +8398,16 @@ Parser<ParseHandler>::memberExpr(YieldHandling yieldHandling, TokenKind tt, bool
}
handler.setOp(nextMember, op);
} else {
if (isSuper) {
report(ParseError, false, null(), JSMSG_BAD_SUPER);
return null();
}
tokenStream.ungetToken();
if (handler.isSuperBase(lhs, context))
break;
return lhs;
}
lhs = nextMember;
}
if (isSuper) {
if (handler.isSuperBase(lhs, context)) {
report(ParseError, false, null(), JSMSG_BAD_SUPER);
return null();
}
@ -8949,9 +8935,14 @@ Parser<ParseHandler>::tryNewTarget(Node &newTarget)
{
MOZ_ASSERT(tokenStream.isCurrentTokenType(TOK_NEW));
uint32_t begin = pos().begin;
newTarget = null();
Node newHolder = handler.newPosHolder(pos());
if (!newHolder)
return false;
uint32_t begin = pos().begin;
// |new| expects to look for an operand, so we will honor that.
TokenKind next;
if (!tokenStream.getToken(&next, TokenStream::Operand))
@ -8975,7 +8966,11 @@ Parser<ParseHandler>::tryNewTarget(Node &newTarget)
return false;
}
newTarget = handler.newNewTarget(TokenPos(begin, pos().end));
Node targetHolder = handler.newPosHolder(pos());
if (!targetHolder)
return false;
newTarget = handler.newNewTarget(newHolder, targetHolder);
return !!newTarget;
}

Просмотреть файл

@ -47,9 +47,6 @@ class SyntaxParseHandler
NodeThrow,
NodeEmptyStatement,
NodeSuperProperty,
NodeSuperElement,
// This is needed for proper assignment-target handling. ES6 formally
// requires function calls *not* pass IsValidSimpleAssignmentTarget,
// but at last check there were still sites with |f() = 5| and similar
@ -132,13 +129,16 @@ class SyntaxParseHandler
// warnings, and parsing with that option disables syntax parsing. But
// it seems best to be consistent, and perhaps the syntax parser will
// eventually enforce extraWarnings and will require this then.)
NodeUnparenthesizedAssignment
NodeUnparenthesizedAssignment,
// This node is necessary to determine if the LHS of a property access is
// super related.
NodeSuperBase
};
typedef Definition::Kind DefinitionNode;
bool isPropertyAccess(Node node) {
return node == NodeDottedProperty || node == NodeElement ||
node == NodeSuperProperty || node == NodeSuperElement;
return node == NodeDottedProperty || node == NodeElement;
}
bool isFunctionCall(Node node) {
@ -274,14 +274,9 @@ class SyntaxParseHandler
Node newObjectLiteral(uint32_t begin) { return NodeUnparenthesizedObject; }
Node newClassMethodList(uint32_t begin) { return NodeGeneric; }
Node newSuperProperty(PropertyName* prop, const TokenPos& pos) {
return NodeSuperProperty;
}
Node newSuperElement(Node expr, const TokenPos& pos) {
return NodeSuperElement;
}
Node newNewTarget(const TokenPos& pos) { return NodeGeneric; }
Node newNewTarget(Node newHolder, Node targetHolder) { return NodeGeneric; }
Node newPosHolder(const TokenPos& pos) { return NodeGeneric; }
Node newSuperBase(const TokenPos& pos, ExclusiveContext* cx) { return NodeSuperBase; }
bool addPrototypeMutation(Node literal, uint32_t begin, Node expr) { return true; }
bool addPropertyDefinition(Node literal, Node name, Node expr) { return true; }
@ -436,6 +431,12 @@ class SyntaxParseHandler
pn == NodeEmptyStatement;
}
bool isSuperBase(Node pn, ExclusiveContext* cx) {
// While NodePosHolder is used in other places than just as super-base,
// it is unique enough for our purposes.
return pn == NodeSuperBase;
}
void setOp(Node pn, JSOp op) {}
void setBlockId(Node pn, unsigned blockid) {}
void setFlag(Node pn, unsigned flag) {}

Просмотреть файл

@ -102,56 +102,3 @@ JS::HeapValuePostBarrier(JS::Value* valuep, const Value& prev, const Value& next
MOZ_ASSERT(valuep);
js::InternalGCMethods<JS::Value>::postBarrier(valuep, prev, next);
}
template <typename T>
/* static */ HashNumber
js::MovableCellHasher<T>::hash(const Lookup& l)
{
if (!l)
return 0;
// We have to access the zone from-any-thread here: a worker thread may be
// cloning a self-hosted object from the main-thread-runtime-owned self-
// hosting zone into the off-main-thread runtime. The zone's uid lock will
// protect against multiple workers doing this simultaneously.
MOZ_ASSERT(CurrentThreadCanAccessZone(l->zoneFromAnyThread()) ||
l->zoneFromAnyThread()->isSelfHostingZone());
HashNumber hn;
if (!l->zoneFromAnyThread()->getHashCode(l, &hn))
js::CrashAtUnhandlableOOM("failed to get a stable hash code");
return hn;
}
template <typename T>
/* static */ bool
js::MovableCellHasher<T>::match(const Key& k, const Lookup& l)
{
// Return true if both are null or false if only one is null.
if (!k)
return !l;
if (!l)
return false;
MOZ_ASSERT(k);
MOZ_ASSERT(l);
MOZ_ASSERT(CurrentThreadCanAccessZone(l->zoneFromAnyThread()) ||
l->zoneFromAnyThread()->isSelfHostingZone());
Zone* zone = k->zoneFromAnyThread();
if (zone != l->zoneFromAnyThread())
return false;
MOZ_ASSERT(zone->hasUniqueId(k));
MOZ_ASSERT(zone->hasUniqueId(l));
// Since both already have a uid (from hash), the get is infallible.
uint64_t uidK, uidL;
MOZ_ALWAYS_TRUE(zone->getUniqueId(k, &uidK));
MOZ_ALWAYS_TRUE(zone->getUniqueId(l, &uidL));
return uidK == uidL;
}
template struct js::MovableCellHasher<JSObject*>;
template struct js::MovableCellHasher<js::GlobalObject*>;
template struct js::MovableCellHasher<js::SavedFrame*>;
template struct js::MovableCellHasher<js::ScopeObject*>;

Просмотреть файл

@ -545,31 +545,6 @@ BarrieredSetPair(Zone* zone,
v2.postBarrieredSet(val2);
}
// Provide hash codes for Cell kinds that may be relocated and, thus, not have
// a stable address to use as the base for a hash code. Instead of the address,
// this hasher uses Cell::getUniqueId to provide exact matches and as a base
// for generating hash codes.
//
// Note: this hasher, like PointerHasher can "hash" a nullptr. While a nullptr
// would not likely be a useful key, there are some cases where being able to
// hash a nullptr is useful, either on purpose or because of bugs:
// (1) existence checks where the key may happen to be null and (2) some
// aggregate Lookup kinds embed a JSObject* that is frequently null and do not
// null test before dispatching to the hasher.
template <typename T>
struct MovableCellHasher
{
static_assert(mozilla::IsBaseOf<JSObject, typename mozilla::RemovePointer<T>::Type>::value,
"MovableCellHasher's T must be a Cell type that may move");
using Key = T;
using Lookup = T;
static HashNumber hash(const Lookup& l);
static bool match(const Key& k, const Lookup& l);
static void rekey(Key& k, const Key& newKey) { k = newKey; }
};
/* Useful for hashtables with a HeapPtr as key. */
template <class T>
struct HeapPtrHasher

Просмотреть файл

@ -654,11 +654,6 @@ class GCRuntime
size_t maxMallocBytesAllocated() { return maxMallocBytes; }
uint64_t nextCellUniqueId() {
MOZ_ASSERT(nextCellUniqueId_ > 0);
return nextCellUniqueId_++;
}
public:
// Internal public interface
js::gc::State state() const { return incrementalState; }
@ -1013,9 +1008,6 @@ class GCRuntime
size_t maxMallocBytes;
// An incrementing id used to assign unique ids to cells that require one.
uint64_t nextCellUniqueId_;
/*
* Number of the committed arenas in all GC chunks including empty chunks.
*/

Просмотреть файл

@ -293,9 +293,6 @@ class TenuredCell : public Cell
#endif
};
/* Cells are aligned to CellShift, so the largest tagged null pointer is: */
const uintptr_t LargestTaggedNullCellPointer = (1 << CellShift) - 1;
/*
* The mark bitmap has one bit per each GC cell. For multi-cell GC things this
* wastes space but allows to avoid expensive devisions by thing's size when
@ -807,17 +804,6 @@ ArenaHeader::getThingSize() const
*/
struct ChunkTrailer
{
/* Construct a Nursery ChunkTrailer. */
ChunkTrailer(JSRuntime* rt, StoreBuffer* sb)
: location(gc::ChunkLocationBitNursery), storeBuffer(sb), runtime(rt)
{}
/* Construct a Tenured heap ChunkTrailer. */
explicit ChunkTrailer(JSRuntime* rt)
: location(gc::ChunkLocationBitTenuredHeap), storeBuffer(nullptr), runtime(rt)
{}
public:
/* The index the chunk in the nursery, or LocationTenuredHeap. */
uint32_t location;
uint32_t padding;
@ -825,12 +811,11 @@ struct ChunkTrailer
/* The store buffer for writes to things in this chunk or nullptr. */
StoreBuffer* storeBuffer;
/* This provides quick access to the runtime from absolutely anywhere. */
JSRuntime* runtime;
};
static_assert(sizeof(ChunkTrailer) == ChunkTrailerSize,
"ChunkTrailer size must match the API defined size.");
static_assert(sizeof(ChunkTrailer) == 2 * sizeof(uintptr_t) + sizeof(uint64_t),
"ChunkTrailer size is incorrect.");
/* The chunk header (located at the end of the chunk to preserve arena alignment). */
struct ChunkInfo
@ -1019,16 +1004,13 @@ struct Chunk
return reinterpret_cast<Chunk*>(addr);
}
static bool withinValidRange(uintptr_t addr) {
static bool withinArenasRange(uintptr_t addr) {
uintptr_t offset = addr & ChunkMask;
return Chunk::fromAddress(addr)->isNurseryChunk()
? offset < ChunkSize - sizeof(ChunkTrailer)
: offset < ArenasPerChunk * ArenaSize;
return offset < ArenasPerChunk * ArenaSize;
}
static size_t arenaIndex(uintptr_t addr) {
MOZ_ASSERT(!Chunk::fromAddress(addr)->isNurseryChunk());
MOZ_ASSERT(withinValidRange(addr));
MOZ_ASSERT(withinArenasRange(addr));
return (addr & ChunkMask) >> ArenaShift;
}
@ -1046,10 +1028,6 @@ struct Chunk
return info.numArenasFree != 0;
}
bool isNurseryChunk() const {
return info.trailer.storeBuffer;
}
ArenaHeader* allocateArena(JSRuntime* rt, JS::Zone* zone, AllocKind kind,
const AutoLockGC& lock);
@ -1149,7 +1127,7 @@ ArenaHeader::address() const
uintptr_t addr = reinterpret_cast<uintptr_t>(this);
MOZ_ASSERT(addr);
MOZ_ASSERT(!(addr & ArenaMask));
MOZ_ASSERT(Chunk::withinValidRange(addr));
MOZ_ASSERT(Chunk::withinArenasRange(addr));
return addr;
}
@ -1318,7 +1296,7 @@ Cell::address() const
{
uintptr_t addr = uintptr_t(this);
MOZ_ASSERT(addr % CellSize == 0);
MOZ_ASSERT(Chunk::withinValidRange(addr));
MOZ_ASSERT(Chunk::withinArenasRange(addr));
return addr;
}

Просмотреть файл

@ -2140,13 +2140,7 @@ js::TenuringTracer::moveObjectToTenured(JSObject* dst, JSObject* src, AllocKind
if (src->is<ArrayObject>())
tenuredSize = srcSize = sizeof(NativeObject);
// Copy the Cell contents.
js_memcpy(dst, src, srcSize);
// Move any hash code attached to the object.
src->zone()->transferUniqueId(dst, src);
// Move the slots and elements, if we need to.
if (src->isNative()) {
NativeObject* ndst = &dst->as<NativeObject>();
NativeObject* nsrc = &src->as<NativeObject>();

Просмотреть файл

@ -426,7 +426,7 @@ ToMarkable(Cell* cell)
MOZ_ALWAYS_INLINE bool
IsNullTaggedPointer(void* p)
{
return uintptr_t(p) <= LargestTaggedNullCellPointer;
return uintptr_t(p) < 32;
}
// HashKeyRef represents a reference to a HashMap key. This should normally

Просмотреть файл

@ -64,9 +64,6 @@ js::Nursery::init(uint32_t maxNurseryBytes)
if (!mallocedBuffers.init())
return false;
if (!cellsWithUid_.init())
return false;
void* heap = MapAlignedPages(nurserySize(), Alignment);
if (!heap)
return false;
@ -651,16 +648,6 @@ js::Nursery::waitBackgroundFreeEnd()
void
js::Nursery::sweep()
{
/* Sweep unique id's in all in-use chunks. */
for (CellsWithUniqueIdSet::Enum e(cellsWithUid_); !e.empty(); e.popFront()) {
JSObject* obj = static_cast<JSObject*>(e.front());
if (!IsForwarded(obj))
obj->zone()->removeUniqueId(obj);
else
MOZ_ASSERT(Forwarded(obj)->zone()->hasUniqueId(Forwarded(obj)));
}
cellsWithUid_.clear();
#ifdef JS_GC_ZEAL
/* Poison the nursery contents so touching a freed object will crash. */
JS_POISON((void*)start(), JS_SWEPT_NURSERY_PATTERN, nurserySize());
@ -678,8 +665,10 @@ js::Nursery::sweep()
{
#ifdef JS_CRASH_DIAGNOSTICS
JS_POISON((void*)start(), JS_SWEPT_NURSERY_PATTERN, allocationEnd() - start());
for (int i = 0; i < numActiveChunks_; ++i)
initChunk(i);
for (int i = 0; i < numActiveChunks_; ++i) {
chunk(i).trailer.location = gc::ChunkLocationBitNursery;
chunk(i).trailer.runtime = runtime();
}
#endif
setCurrentChunk(0);
}

Просмотреть файл

@ -182,14 +182,6 @@ class Nursery
void waitBackgroundFreeEnd();
bool addedUniqueIdToCell(gc::Cell* cell) {
if (!IsInsideNursery(cell) || !isEnabled())
return true;
MOZ_ASSERT(cellsWithUid_.initialized());
MOZ_ASSERT(!cellsWithUid_.has(cell));
return cellsWithUid_.put(cell);
}
size_t sizeOfHeapCommitted() const {
return numActiveChunks_ * gc::ChunkSize;
}
@ -273,21 +265,6 @@ class Nursery
typedef HashMap<void*, void*, PointerHasher<void*, 1>, SystemAllocPolicy> ForwardedBufferMap;
ForwardedBufferMap forwardedBuffers;
/*
* When we assign a unique id to cell in the nursery, that almost always
* means that the cell will be in a hash table, and thus, held live,
* automatically moving the uid from the nursery to its new home in
* tenured. It is possible, if rare, for an object that acquired a uid to
* be dead before the next collection, in which case we need to know to
* remove it when we sweep.
*
* Note: we store the pointers as Cell* here, resulting in an ugly cast in
* sweep. This is because this structure is used to help implement
* stable object hashing and we have to break the cycle somehow.
*/
using CellsWithUniqueIdSet = HashSet<gc::Cell*, PointerHasher<gc::Cell*, 3>, SystemAllocPolicy>;
CellsWithUniqueIdSet cellsWithUid_;
/* The maximum number of bytes allowed to reside in nursery buffers. */
static const size_t MaxNurseryBufferSize = 1024;
@ -309,8 +286,10 @@ class Nursery
}
MOZ_ALWAYS_INLINE void initChunk(int chunkno) {
gc::StoreBuffer* sb = JS::shadow::Runtime::asShadowRuntime(runtime())->gcStoreBufferPtr();
new (&chunk(chunkno).trailer) gc::ChunkTrailer(runtime(), sb);
NurseryChunkLayout& c = chunk(chunkno);
c.trailer.storeBuffer = JS::shadow::Runtime::asShadowRuntime(runtime())->gcStoreBufferPtr();
c.trailer.location = gc::ChunkLocationBitNursery;
c.trailer.runtime = runtime();
}
MOZ_ALWAYS_INLINE void setCurrentChunk(int chunkno) {

Просмотреть файл

@ -13,12 +13,9 @@
#include "jscntxt.h"
#include "ds/SpinLock.h"
#include "ds/SplayTree.h"
#include "gc/FindSCCs.h"
#include "gc/GCRuntime.h"
#include "js/TracingAPI.h"
#include "vm/MallocProvider.h"
#include "vm/TypeInference.h"
namespace js {
@ -61,73 +58,6 @@ class ZoneHeapThreshold
const GCSchedulingTunables& tunables);
};
// Maps a Cell* to a unique, 64bit id. This implementation uses a SplayTree
// instead of a HashMap. While a SplayTree has worse worst-case performance,
// the typical usage of storing stable hashmap keys tends to cluster with
// extremely frequent lookups of the same key repeatedly. Thus, we typically
// get very close to HashMap-like O(1) performance with much denser storage.
class UniqueIdMap
{
struct Pair {
uint64_t uniqueId;
Cell* key;
public:
Pair(Cell* cell, uint64_t uid) : uniqueId(uid), key(cell) {}
Pair(const Pair& other) : uniqueId(other.uniqueId), key(other.key) {}
static ptrdiff_t compare(const Pair& a, const Pair& b) {
return b.key - a.key;
}
};
// Use a relatively small chunk, as many users will not have many entries.
const size_t AllocChunkSize = mozilla::RoundUpPow2(16 * sizeof(Pair));
LifoAlloc alloc;
SplayTree<Pair, Pair> map;
public:
UniqueIdMap() : alloc(AllocChunkSize), map(&alloc) {}
// Returns true if the map is empty.
bool isEmpty() { return map.empty(); }
// Return true if the cell is present in the map.
bool has(Cell* cell) {
return map.maybeLookup(Pair(cell, 0));
}
// Returns whether the cell is present or not. If true, sets the uid.
bool lookup(Cell* cell, uint64_t* uidp) {
Pair tmp(nullptr, 0);
if (!map.contains(Pair(cell, 0), &tmp))
return false;
MOZ_ASSERT(tmp.key == cell);
MOZ_ASSERT(tmp.uniqueId > 0);
*uidp = tmp.uniqueId;
return true;
}
// Inserts a value; returns false on OOM.
bool put(Cell* cell, uint64_t uid) {
MOZ_ASSERT(uid > 0);
return map.insert(Pair(cell, uid));
}
// Remove the given key from the map.
void remove(Cell* cell) {
map.remove(Pair(cell, 0));
}
size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const {
// All data allocated by |map| is contained in |alloc|.
return alloc.sizeOfExcludingThis(mallocSizeOf);
}
};
extern uint64_t NextCellUniqueId(JSRuntime* rt);
} // namespace gc
} // namespace js
@ -188,8 +118,7 @@ struct Zone : public JS::shadow::Zone,
void addSizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf,
size_t* typePool,
size_t* baselineStubsOptimized,
size_t* uniqueIdMap);
size_t* baselineStubsOptimized);
void resetGCMallocBytes();
void setGCMaxMallocBytes(size_t value);
@ -321,13 +250,6 @@ struct Zone : public JS::shadow::Zone,
return isOnList();
}
// Side map for storing a unique ids for cells, independent of address.
js::gc::UniqueIdMap uniqueIds_;
// Guards the uniqueIds_ map as it is accessed directly from the background
// sweeping thread. This uses a spinlock, since it is normally uncontended.
js::SpinLock uniqueIdsLock_;
public:
bool hasDebuggers() const { return debuggers && debuggers->length(); }
DebuggerVector* getDebuggers() const { return debuggers; }
@ -396,74 +318,6 @@ struct Zone : public JS::shadow::Zone,
mozilla::DebugOnly<unsigned> gcLastZoneGroupIndex;
// Creates a HashNumber based on getUniqueId. Returns false on OOM.
bool getHashCode(js::gc::Cell* cell, js::HashNumber* hashp) {
uint64_t uid;
if (!getUniqueId(cell, &uid))
return false;
*hashp = (uid >> 32) & (uid & 0xFFFFFFFF);
return true;
}
// Puts an existing UID in |uidp|, or creates a new UID for this Cell and
// puts that into |uidp|. Returns false on OOM.
bool getUniqueId(js::gc::Cell* cell, uint64_t* uidp) {
MOZ_ASSERT(uidp);
js::AutoSpinLock lock(uniqueIdsLock_);
// Get an existing uid, if one has been set.
if (uniqueIds_.lookup(cell, uidp))
return true;
// Set a new uid on the cell.
*uidp = js::gc::NextCellUniqueId(runtimeFromAnyThread());
if (!uniqueIds_.put(cell, *uidp))
return false;
// If the cell was in the nursery, hopefully unlikely, then we need to
// tell the nursery about it so that it can sweep the uid if the thing
// does not get tenured.
if (!runtimeFromAnyThread()->gc.nursery.addedUniqueIdToCell(cell))
js::CrashAtUnhandlableOOM("failed to allocate tracking data for a nursery uid");
return true;
}
// Return true if this cell has a UID associated with it.
bool hasUniqueId(js::gc::Cell* cell) {
js::AutoSpinLock lock(uniqueIdsLock_);
uint64_t tmp;
return uniqueIds_.lookup(cell, &tmp);
}
// Transfer an id from another cell. This must only be called on behalf of a
// moving GC. This method is infallible.
void transferUniqueId(js::gc::Cell* tgt, js::gc::Cell* src) {
MOZ_ASSERT(src != tgt);
MOZ_ASSERT(!IsInsideNursery(tgt));
MOZ_ASSERT(CurrentThreadCanAccessRuntime(runtimeFromMainThread()));
js::AutoSpinLock lock(uniqueIdsLock_);
// Return early if we do not have a UID set on the source.
uint64_t uid = 0;
if (!uniqueIds_.lookup(src, &uid))
return;
// Remove from the source first to guarantee that at least one node
// will be available in the free pool. This allows us to avoid OOM
// in all cases when transfering uids.
uniqueIds_.remove(src);
MOZ_ASSERT(uid > 0);
mozilla::DebugOnly<bool> ok = uniqueIds_.put(tgt, uid);
MOZ_ASSERT(ok);
}
// Remove any unique id associated with this Cell.
void removeUniqueId(js::gc::Cell* cell) {
js::AutoSpinLock lock(uniqueIdsLock_);
uniqueIds_.remove(cell);
}
private:
js::jit::JitZone* jitZone_;

Просмотреть файл

@ -2,7 +2,7 @@
var g = newGlobal();
g.eval('function f(a) { if (a == 1) debugger; evaluate("f(" + a + " - 1);", {newContext: true}); }');
var N = 9;
var N = 2;
var dbg = new Debugger(g);
var frames = [];
dbg.onEnterFrame = function (frame) {

Просмотреть файл

@ -38,7 +38,7 @@ functionDeclaration = (id, params, body) => Pattern({
});
classDeclaration = (name) => Pattern({
type: "ClassStatement",
name: name
id: name
});
variableDeclaration = (decls) => Pattern({
type: "VariableDeclaration",

Просмотреть файл

@ -42,7 +42,6 @@ UNIFIED_SOURCES += [
'testGCMarking.cpp',
'testGCOutOfMemory.cpp',
'testGCStoreBufferRemoval.cpp',
'testGCUniqueId.cpp',
'testGetPropertyDescriptor.cpp',
'testHashTable.cpp',
'testIndexToString.cpp',

Просмотреть файл

@ -1,120 +0,0 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
* vim: set ts=8 sts=4 et sw=4 tw=99:
*/
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "gc/GCInternals.h"
#include "gc/Zone.h"
static void
MinimizeHeap(JSRuntime* rt)
{
// The second collection is to force us to wait for the background
// sweeping that the first GC started to finish.
JS_GC(rt);
JS_GC(rt);
js::gc::AutoFinishGC finish(rt);
}
BEGIN_TEST(testGCUID)
{
#ifdef JS_GC_ZEAL
AutoLeaveZeal nozeal(cx);
#endif /* JS_GC_ZEAL */
uint64_t uid = 0;
uint64_t tmp = 0;
// Ensure the heap is as minimal as it can get.
MinimizeHeap(rt);
JS::RootedObject obj(cx, JS_NewPlainObject(cx));
uintptr_t nurseryAddr = uintptr_t(obj.get());
CHECK(obj);
CHECK(js::gc::IsInsideNursery(obj));
// Do not start with an ID.
CHECK(!obj->zone()->hasUniqueId(obj));
// Ensure we can get a new UID.
CHECK(obj->zone()->getUniqueId(obj, &uid));
CHECK(uid > js::gc::LargestTaggedNullCellPointer);
// We should now have an id.
CHECK(obj->zone()->hasUniqueId(obj));
// Calling again should get us the same thing.
CHECK(obj->zone()->getUniqueId(obj, &tmp));
CHECK(uid == tmp);
// Tenure the thing and check that the UID moved with it.
MinimizeHeap(rt);
uintptr_t tenuredAddr = uintptr_t(obj.get());
CHECK(tenuredAddr != nurseryAddr);
CHECK(!js::gc::IsInsideNursery(obj));
CHECK(obj->zone()->hasUniqueId(obj));
CHECK(obj->zone()->getUniqueId(obj, &tmp));
CHECK(uid == tmp);
// Allocate a new nursery thing in the same location and check that we
// removed the prior uid that was attached to the location.
obj = JS_NewPlainObject(cx);
CHECK(obj);
CHECK(uintptr_t(obj.get()) == nurseryAddr);
CHECK(!obj->zone()->hasUniqueId(obj));
// Try to get another tenured object in the same location and check that
// the uid was removed correctly.
obj = nullptr;
MinimizeHeap(rt);
obj = JS_NewPlainObject(cx);
MinimizeHeap(rt);
CHECK(uintptr_t(obj.get()) == tenuredAddr);
CHECK(!obj->zone()->hasUniqueId(obj));
CHECK(obj->zone()->getUniqueId(obj, &tmp));
CHECK(uid != tmp);
uid = tmp;
// Allocate a few arenas worth of objects to ensure we get some compaction.
const static size_t N = 2049;
using ObjectVector = js::TraceableVector<JSObject*>;
JS::Rooted<ObjectVector> vec(cx, ObjectVector(cx));
for (size_t i = 0; i < N; ++i) {
obj = JS_NewPlainObject(cx);
CHECK(obj);
CHECK(vec.append(obj));
}
// Transfer our vector to tenured if it isn't there already.
MinimizeHeap(rt);
// Tear holes in the heap by unrooting the even objects and collecting.
JS::Rooted<ObjectVector> vec2(cx, ObjectVector(cx));
for (size_t i = 0; i < N; ++i) {
if (i % 2 == 1)
vec2.append(vec[i]);
}
vec.clear();
MinimizeHeap(rt);
// Grab the last object in the vector as our object of interest.
obj = vec2.back();
CHECK(obj);
tenuredAddr = uintptr_t(obj.get());
CHECK(obj->zone()->getUniqueId(obj, &uid));
// Force a compaction to move the object and check that the uid moved to
// the new tenured heap location.
JS::PrepareForFullGC(rt);
JS::GCForReason(rt, GC_SHRINK, JS::gcreason::API);
MinimizeHeap(rt);
CHECK(uintptr_t(obj.get()) != tenuredAddr);
CHECK(obj->zone()->hasUniqueId(obj));
CHECK(obj->zone()->getUniqueId(obj, &tmp));
CHECK(uid == tmp);
return true;
}
END_TEST(testGCUID)

Просмотреть файл

@ -4478,6 +4478,10 @@ JS_GetLocaleCallbacks(JSRuntime* rt);
* Error reporting.
*/
namespace JS {
const uint16_t MaxNumErrorArguments = 10;
};
/*
* Report an exception represented by the sprintf-like conversion of format
* and its arguments. This exception message string is passed to a pre-set

Просмотреть файл

@ -38,7 +38,8 @@ ASTDEF(AST_COMP_EXPR, "ComprehensionExpression", "comprehensi
ASTDEF(AST_GENERATOR_EXPR, "GeneratorExpression", "generatorExpression")
ASTDEF(AST_YIELD_EXPR, "YieldExpression", "yieldExpression")
ASTDEF(AST_CLASS_EXPR, "ClassExpression", "classExpression")
ASTDEF(AST_NEWTARGET_EXPR, "NewTargetExpression", "newTargetExpression")
ASTDEF(AST_METAPROPERTY, "MetaProperty", "metaProperty")
ASTDEF(AST_SUPER, "Super", "super")
ASTDEF(AST_EMPTY_STMT, "EmptyStatement", "emptyStatement")
ASTDEF(AST_BLOCK_STMT, "BlockStatement", "blockStatement")

Просмотреть файл

@ -76,7 +76,6 @@ const char js_import_str[] = "import";
const char js_in_str[] = "in";
const char js_instanceof_str[] = "instanceof";
const char js_interface_str[] = "interface";
const char js_new_str[] = "new";
const char js_package_str[] = "package";
const char js_private_str[] = "private";
const char js_protected_str[] = "protected";

Просмотреть файл

@ -176,7 +176,6 @@ extern const char js_import_str[];
extern const char js_in_str[];
extern const char js_instanceof_str[];
extern const char js_interface_str[];
extern const char js_new_str[];
extern const char js_package_str[];
extern const char js_private_str[];
extern const char js_protected_str[];

Просмотреть файл

@ -565,8 +565,7 @@ js::ExpandErrorArgumentsVA(ExclusiveContext* cx, JSErrorCallback callback,
ErrorArgumentsType argumentsType, va_list ap)
{
const JSErrorFormatString* efs;
int i;
int argCount;
uint16_t argCount;
bool messageArgsPassed = !!reportp->messageArgs;
*messagep = nullptr;
@ -583,9 +582,9 @@ js::ExpandErrorArgumentsVA(ExclusiveContext* cx, JSErrorCallback callback,
reportp->exnType = efs->exnType;
size_t totalArgsLength = 0;
size_t argLengths[10]; /* only {0} thru {9} supported */
size_t argLengths[JS::MaxNumErrorArguments]; /* only {0} thru {9} supported */
argCount = efs->argCount;
MOZ_ASSERT(argCount <= 10);
MOZ_RELEASE_ASSERT(argCount <= JS::MaxNumErrorArguments);
if (argCount > 0) {
/*
* Gather the arguments into an array, and accumulate
@ -602,7 +601,7 @@ js::ExpandErrorArgumentsVA(ExclusiveContext* cx, JSErrorCallback callback,
/* nullptr-terminate for easy copying. */
reportp->messageArgs[argCount] = nullptr;
}
for (i = 0; i < argCount; i++) {
for (uint16_t i = 0; i < argCount; i++) {
if (messageArgsPassed) {
/* Do nothing. */
} else if (argumentsType == ArgumentsAreASCII) {
@ -652,7 +651,7 @@ js::ExpandErrorArgumentsVA(ExclusiveContext* cx, JSErrorCallback callback,
if (*fmt == '{') {
if (isdigit(fmt[1])) {
int d = JS7_UNDEC(fmt[1]);
MOZ_ASSERT(d < argCount);
MOZ_RELEASE_ASSERT(d < argCount);
js_strncpy(out, reportp->messageArgs[d],
argLengths[d]);
out += argLengths[d];
@ -708,7 +707,7 @@ error:
if (!messageArgsPassed && reportp->messageArgs) {
/* free the arguments only if we allocated them */
if (argumentsType == ArgumentsAreASCII) {
i = 0;
uint16_t i = 0;
while (reportp->messageArgs[i])
js_free((void*)reportp->messageArgs[i++]);
}

Просмотреть файл

@ -824,7 +824,9 @@ Chunk::init(JSRuntime* rt)
/* Initialize the chunk info. */
info.init();
new (&info.trailer) ChunkTrailer(rt);
info.trailer.storeBuffer = nullptr;
info.trailer.location = ChunkLocationBitTenuredHeap;
info.trailer.runtime = rt;
/* The rest of info fields are initialized in pickChunk. */
}
@ -1098,7 +1100,6 @@ GCRuntime::GCRuntime(JSRuntime* rt) :
marker(rt),
usage(nullptr),
maxMallocBytes(0),
nextCellUniqueId_(LargestTaggedNullCellPointer + 1), // Ensure disjoint from null tagged pointers.
numArenasFreeCommitted(0),
verifyPreData(nullptr),
chunkAllocationSinceLastGC(false),
@ -2021,9 +2022,6 @@ RelocateCell(Zone* zone, TenuredCell* src, AllocKind thingKind, size_t thingSize
// Copy source cell contents to destination.
memcpy(dst, src, thingSize);
// Move any uid attached to the object.
src->zone()->transferUniqueId(dst, src);
if (IsObjectAllocKind(thingKind)) {
JSObject* srcObj = static_cast<JSObject*>(static_cast<Cell*>(src));
JSObject* dstObj = static_cast<JSObject*>(static_cast<Cell*>(dst));
@ -7317,12 +7315,6 @@ JS::IsGenerationalGCEnabled(JSRuntime* rt)
return rt->gc.isGenerationalGCEnabled();
}
uint64_t
js::gc::NextCellUniqueId(JSRuntime* rt)
{
return rt->gc.nextCellUniqueId();
}
namespace js {
namespace gc {
namespace MemInfo {

Просмотреть файл

@ -75,11 +75,6 @@ JSObject::finalize(js::FreeOp* fop)
MOZ_ASSERT(CurrentThreadCanAccessRuntime(fop->runtime()));
}
#endif
// Remove any UID attached to this object.
if (zoneFromAnyThread()->hasUniqueId(this))
zoneFromAnyThread()->removeUniqueId(this);
const js::Class* clasp = getClass();
if (clasp->finalize)
clasp->finalize(fop, this);

Просмотреть файл

@ -7,11 +7,15 @@ class testNonExistent {
super["prop"]();
}
}
assertThrownErrorContains(() => new testNonExistent(), 'super["prop"]');
// Should fold to super.prop
assertThrownErrorContains(() => new testNonExistent(), 'super.prop');
var ol = { testNonExistent() { super.prop(); } };
assertThrownErrorContains(() => ol.testNonExistent(), "super.prop");
var olElem = { testNonExistent() { var prop = "prop"; super[prop](); } };
assertThrownErrorContains(() => olElem.testNonExistent(), "super[prop]");
`;
if (classesEnabled())

Просмотреть файл

@ -124,22 +124,22 @@ function letStmt(head, body) {
}
function superProp(id) {
return dotExpr(ident("super"), id);
return dotExpr(Pattern({ type: "Super" }), id);
}
function superElem(id) {
return memExpr(ident("super"), id);
return memExpr(Pattern({ type: "Super" }), id);
}
function classStmt(id, heritage, body) {
return Pattern({ type: "ClassStatement",
name: id,
heritage: heritage,
id: id,
superClass: heritage,
body: body});
}
function classExpr(id, heritage, body) {
return Pattern({ type: "ClassExpression",
name: id,
heritage: heritage,
id: id,
superClass: heritage,
body: body});
}
function classMethod(id, body, kind, static) {
@ -170,9 +170,15 @@ function arrowExpr(args, body) {
body: body });
}
function newTarget() {
return Pattern({ type: "NewTargetExpression" });
function metaProperty(meta, property) {
return Pattern({ type: "MetaProperty",
meta: meta,
property: property });
}
function newTarget() {
return metaProperty(ident("new"), ident("target"));
}
function unExpr(op, arg) {
return Pattern({ type: "UnaryExpression", operator: op, argument: arg });
}

Просмотреть файл

@ -150,6 +150,7 @@
macro(multiline, multiline, "multiline") \
macro(name, name, "name") \
macro(NaN, NaN, "NaN") \
macro(new, new_, "new") \
macro(next, next, "next") \
macro(NFC, NFC, "NFC") \
macro(NFD, NFD, "NFD") \

Просмотреть файл

@ -5909,7 +5909,7 @@ CheckThisFrame(JSContext* cx, const CallArgs& args, const char* fnname, bool che
THIS_FRAME_THISOBJ(cx, argc, vp, fnname, args, thisobj); \
AbstractFramePtr frame = AbstractFramePtr::FromRaw(thisobj->getPrivate()); \
if (frame.isScriptFrameIterData()) { \
ScriptFrameIter iter(cx, *(ScriptFrameIter::Data*)(frame.raw())); \
ScriptFrameIter iter(*(ScriptFrameIter::Data*)(frame.raw())); \
frame = iter.abstractFramePtr(); \
}
@ -5919,7 +5919,7 @@ CheckThisFrame(JSContext* cx, const CallArgs& args, const char* fnname, bool che
{ \
AbstractFramePtr f = AbstractFramePtr::FromRaw(thisobj->getPrivate()); \
if (f.isScriptFrameIterData()) { \
maybeIter.emplace(cx, *(ScriptFrameIter::Data*)(f.raw())); \
maybeIter.emplace(*(ScriptFrameIter::Data*)(f.raw())); \
} else { \
maybeIter.emplace(cx, ScriptFrameIter::ALL_CONTEXTS, \
ScriptFrameIter::GO_THROUGH_SAVED, \

Просмотреть файл

@ -311,8 +311,7 @@ StatsZoneCallback(JSRuntime* rt, void* data, Zone* zone)
zone->addSizeOfIncludingThis(rtStats->mallocSizeOf_,
&zStats.typePool,
&zStats.baselineStubsOptimized,
&zStats.uniqueIdMap);
&zStats.baselineStubsOptimized);
}
static void

Просмотреть файл

@ -769,7 +769,7 @@
* Stack: callee, this, args[0], ..., args[argc-1], newTarget => rval
* nuses: (argc+3)
*/ \
macro(JSOP_NEW, 82, js_new_str, NULL, 3, -1, 1, JOF_UINT16|JOF_INVOKE|JOF_TYPESET) \
macro(JSOP_NEW, 82, "new", NULL, 3, -1, 1, JOF_UINT16|JOF_INVOKE|JOF_TYPESET) \
/*
* Pushes newly created object onto the stack with provided [[Prototype]].
*

Просмотреть файл

@ -595,7 +595,7 @@ FrameIter::Data::Data(JSContext* cx, SavedOption savedOption,
}
FrameIter::Data::Data(const FrameIter::Data& other)
: cx_(nullptr),
: cx_(other.cx_),
savedOption_(other.savedOption_),
contextOption_(other.contextOption_),
debuggerEvalOption_(other.debuggerEvalOption_),
@ -645,12 +645,11 @@ FrameIter::FrameIter(const FrameIter& other)
{
}
FrameIter::FrameIter(JSContext* cx, const Data& data)
FrameIter::FrameIter(const Data& data)
: data_(data),
ionInlineFrames_(cx, data_.jitFrames_.isIonScripted() ? &data_.jitFrames_ : nullptr)
ionInlineFrames_(data.cx_, data_.jitFrames_.isIonScripted() ? &data_.jitFrames_ : nullptr)
{
MOZ_ASSERT(!data.cx_);
data_.cx_ = cx;
MOZ_ASSERT(data.cx_);
if (data_.jitFrames_.isIonScripted()) {
while (ionInlineFrames_.frameNo() != data.ionInlineFrameNo_)
@ -756,6 +755,11 @@ FrameIter::copyData() const
MOZ_ASSERT(data_.state_ != ASMJS);
if (data && data_.jitFrames_.isIonScripted())
data->ionInlineFrameNo_ = ionInlineFrames_.frameNo();
// Give the copied Data the cx of the current activation, which may be
// different than the cx that the current FrameIter was constructed
// with. This ensures that when we instantiate another FrameIter with the
// copied data, its cx is still alive.
data->cx_ = activation()->cx();
return data;
}

Просмотреть файл

@ -1906,7 +1906,7 @@ class FrameIter
DebuggerEvalOption = FOLLOW_DEBUGGER_EVAL_PREV_LINK);
FrameIter(JSContext* cx, ContextOption, SavedOption, DebuggerEvalOption, JSPrincipals*);
FrameIter(const FrameIter& iter);
FrameIter(JSContext* cx, const Data& data);
MOZ_IMPLICIT FrameIter(const Data& data);
MOZ_IMPLICIT FrameIter(AbstractFramePtr frame);
bool done() const { return data_.state_ == DONE; }
@ -2018,11 +2018,6 @@ class FrameIter
// -----------------------------------------------------------
AbstractFramePtr abstractFramePtr() const;
// N.B. Copying the internal data nulls out the saved cx_, as the
// JSContext's lifetime is not tied to the Data lifetime. When
// re-instantiating a new FrameIter with a saved data, a new cx must be
// provided.
AbstractFramePtr copyDataAsAbstractFramePtr() const;
Data* copyData() const;
@ -2078,7 +2073,7 @@ class ScriptFrameIter : public FrameIter
}
ScriptFrameIter(const ScriptFrameIter& iter) : FrameIter(iter) { settle(); }
ScriptFrameIter(JSContext* cx, const FrameIter::Data& data) : FrameIter(cx, data) { settle(); }
explicit ScriptFrameIter(const FrameIter::Data& data) : FrameIter(data) { settle(); }
explicit ScriptFrameIter(AbstractFramePtr frame) : FrameIter(frame) { settle(); }
ScriptFrameIter& operator++() {
@ -2138,8 +2133,8 @@ class NonBuiltinFrameIter : public FrameIter
settle();
}
explicit NonBuiltinFrameIter(JSContext* cx, const FrameIter::Data& data)
: FrameIter(cx, data)
explicit NonBuiltinFrameIter(const FrameIter::Data& data)
: FrameIter(data)
{}
NonBuiltinFrameIter& operator++() {
@ -2183,8 +2178,8 @@ class NonBuiltinScriptFrameIter : public ScriptFrameIter
settle();
}
explicit NonBuiltinScriptFrameIter(JSContext* cx, const ScriptFrameIter::Data& data)
: ScriptFrameIter(cx, data)
explicit NonBuiltinScriptFrameIter(const ScriptFrameIter::Data& data)
: ScriptFrameIter(data)
{}
NonBuiltinScriptFrameIter& operator++() {

Просмотреть файл

@ -4276,15 +4276,13 @@ TypeScript::destroy()
void
Zone::addSizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf,
size_t* typePool,
size_t* baselineStubsOptimized,
size_t* uniqueIdMap)
size_t* baselineStubsOptimized)
{
*typePool += types.typeLifoAlloc.sizeOfExcludingThis(mallocSizeOf);
if (jitZone()) {
*baselineStubsOptimized +=
jitZone()->optimizedStubSpace()->sizeOfExcludingThis(mallocSizeOf);
}
*uniqueIdMap += uniqueIds_.sizeOfExcludingThis(mallocSizeOf);
}
TypeZone::TypeZone(Zone* zone)

Просмотреть файл

@ -1883,10 +1883,6 @@ ReportZoneStats(const JS::ZoneStats& zStats,
zStats.unusedGCThings.totalSize(),
"Unused GC thing cells within non-empty arenas.");
ZCREPORT_BYTES(pathPrefix + NS_LITERAL_CSTRING("unique-id-map"),
zStats.uniqueIdMap,
"Address-independent cell identities.");
ZCREPORT_GC_BYTES(pathPrefix + NS_LITERAL_CSTRING("lazy-scripts/gc-heap"),
zStats.lazyScriptsGCHeap,
"Scripts that haven't executed yet.");

Просмотреть файл

@ -8,6 +8,7 @@
#include "jsapi.h"
#include "jsfriendapi.h"
#include "jsprf.h"
#include "mozilla/ChaosMode.h"
#include "mozilla/dom/ScriptSettings.h"
#include "nsServiceManagerUtils.h"
#include "nsComponentManagerUtils.h"
@ -1258,6 +1259,20 @@ XRE_XPCShellMain(int argc, char** argv, char** envp)
UniquePtr<base::StatisticsRecorder> telStats =
MakeUnique<base::StatisticsRecorder>();
if (PR_GetEnv("MOZ_CHAOSMODE")) {
ChaosFeature feature = ChaosFeature::Any;
long featureInt = strtol(PR_GetEnv("MOZ_CHAOSMODE"), nullptr, 16);
if (featureInt) {
// NOTE: MOZ_CHAOSMODE=0 or a non-hex value maps to Any feature.
feature = static_cast<ChaosFeature>(featureInt);
}
ChaosMode::SetChaosFeature(feature);
}
if (ChaosMode::isActive(ChaosFeature::Any)) {
printf_stderr("*** You are running in chaos test mode. See ChaosMode.h. ***\n");
}
nsCOMPtr<nsIFile> appFile;
rv = XRE_GetBinaryPath(argv[0], getter_AddRefs(appFile));
if (NS_FAILED(rv)) {

Просмотреть файл

@ -2770,22 +2770,213 @@ private:
* mSelectorsForDescendants. If the element does match one of the selectors,
* we cause it to be restyled with eRestyle_Self.
*
* We traverse down the flattened tree unless we find an element that
* (a) already has a pending restyle, or (b) does not have a pending restyle
* but does match one of the selectors in mSelectorsForDescendants. For (a),
* we add the current mSelectorsForDescendants into the existing restyle data,
* and for (b) we add a new pending restyle with that array. So in both
* cases, when we come to restyling this element back up in
* ProcessPendingRestyles, we will again find the eRestyle_SomeDescendants
* hint and its selectors array.
* We traverse down the frame tree (and through the flattened content tree
* when we find undisplayed content) unless we find an element that (a) already
* has a pending restyle, or (b) does not have a pending restyle but does match
* one of the selectors in mSelectorsForDescendants. For (a), we add the
* current mSelectorsForDescendants into the existing restyle data, and for (b)
* we add a new pending restyle with that array. So in both cases, when we
* come to restyling this element back up in ProcessPendingRestyles, we will
* again find the eRestyle_SomeDescendants hint and its selectors array.
*
* This ensures that we don't visit descendant elements and check them
* against mSelectorsForDescendants more than once.
*/
void
ElementRestyler::AddPendingRestylesForDescendantsMatchingSelectors(
Element* aElement,
ElementRestyler::ConditionallyRestyleChildren()
{
MOZ_ASSERT(mContent == mFrame->GetContent());
if (!mContent->IsElement() || mSelectorsForDescendants.IsEmpty()) {
return;
}
Element* element = mContent->AsElement();
LOG_RESTYLE("traversing descendants of frame %s (with element %s) to "
"propagate eRestyle_SomeDescendants for these %d selectors:",
FrameTagToString(mFrame).get(),
ElementTagToString(element).get(),
int(mSelectorsForDescendants.Length()));
LOG_RESTYLE_INDENT();
#ifdef RESTYLE_LOGGING
for (nsCSSSelector* sel : mSelectorsForDescendants) {
LOG_RESTYLE("%s", sel->RestrictedSelectorToString().get());
}
#endif
Element* restyleRoot = mRestyleTracker.FindClosestRestyleRoot(element);
ConditionallyRestyleChildren(mFrame, restyleRoot);
}
void
ElementRestyler::ConditionallyRestyleChildren(nsIFrame* aFrame,
Element* aRestyleRoot)
{
MOZ_ASSERT(aFrame->GetContent());
MOZ_ASSERT(aFrame->GetContent()->IsElement());
ConditionallyRestyleUndisplayedDescendants(aFrame, aRestyleRoot);
ConditionallyRestyleContentChildren(aFrame, aRestyleRoot);
}
// The structure of this method parallels RestyleContentChildren.
// If you update this method, you probably want to update that one too.
void
ElementRestyler::ConditionallyRestyleContentChildren(nsIFrame* aFrame,
Element* aRestyleRoot)
{
MOZ_ASSERT(aFrame->GetContent());
MOZ_ASSERT(aFrame->GetContent()->IsElement());
if (aFrame->GetContent()->HasFlag(mRestyleTracker.RootBit())) {
aRestyleRoot = aFrame->GetContent()->AsElement();
}
for (nsIFrame* f = aFrame; f;
f = GetNextContinuationWithSameStyle(f, f->StyleContext())) {
nsIFrame::ChildListIterator lists(f);
for (; !lists.IsDone(); lists.Next()) {
nsFrameList::Enumerator childFrames(lists.CurrentList());
for (; !childFrames.AtEnd(); childFrames.Next()) {
nsIFrame* child = childFrames.get();
// Out-of-flows are reached through their placeholders. Continuations
// and block-in-inline splits are reached through those chains.
if (!(child->GetStateBits() & NS_FRAME_OUT_OF_FLOW) &&
!GetPrevContinuationWithSameStyle(child)) {
// only do frames that are in flow
if (child->GetType() == nsGkAtoms::placeholderFrame) { // placeholder
// get out of flow frame and recur there
nsIFrame* outOfFlowFrame =
nsPlaceholderFrame::GetRealFrameForPlaceholder(child);
// |nsFrame::GetParentStyleContext| checks being out
// of flow so that this works correctly.
do {
if (GetPrevContinuationWithSameStyle(outOfFlowFrame)) {
continue;
}
if (!ConditionallyRestyle(outOfFlowFrame, aRestyleRoot)) {
ConditionallyRestyleChildren(outOfFlowFrame, aRestyleRoot);
}
} while ((outOfFlowFrame = outOfFlowFrame->GetNextContinuation()));
} else { // regular child frame
if (child != mResolvedChild) {
if (!ConditionallyRestyle(child, aRestyleRoot)) {
ConditionallyRestyleChildren(child, aRestyleRoot);
}
}
}
}
}
}
}
}
// The structure of this method parallels RestyleUndisplayedDescendants.
// If you update this method, you probably want to update that one too.
void
ElementRestyler::ConditionallyRestyleUndisplayedDescendants(
nsIFrame* aFrame,
Element* aRestyleRoot)
{
nsIContent* undisplayedParent;
if (MustCheckUndisplayedContent(aFrame, undisplayedParent)) {
DoConditionallyRestyleUndisplayedDescendants(undisplayedParent,
aRestyleRoot);
}
}
// The structure of this method parallels DoRestyleUndisplayedDescendants.
// If you update this method, you probably want to update that one too.
void
ElementRestyler::DoConditionallyRestyleUndisplayedDescendants(
nsIContent* aParent,
Element* aRestyleRoot)
{
nsCSSFrameConstructor* fc = mPresContext->FrameConstructor();
UndisplayedNode* nodes = fc->GetAllUndisplayedContentIn(aParent);
ConditionallyRestyleUndisplayedNodes(nodes, aParent,
NS_STYLE_DISPLAY_NONE, aRestyleRoot);
nodes = fc->GetAllDisplayContentsIn(aParent);
ConditionallyRestyleUndisplayedNodes(nodes, aParent,
NS_STYLE_DISPLAY_CONTENTS, aRestyleRoot);
}
// The structure of this method parallels RestyleUndisplayedNodes.
// If you update this method, you probably want to update that one too.
void
ElementRestyler::ConditionallyRestyleUndisplayedNodes(
UndisplayedNode* aUndisplayed,
nsIContent* aUndisplayedParent,
const uint8_t aDisplay,
Element* aRestyleRoot)
{
MOZ_ASSERT(aDisplay == NS_STYLE_DISPLAY_NONE ||
aDisplay == NS_STYLE_DISPLAY_CONTENTS);
if (!aUndisplayed) {
return;
}
if (aUndisplayedParent &&
aUndisplayedParent->IsElement() &&
aUndisplayedParent->HasFlag(mRestyleTracker.RootBit())) {
aRestyleRoot = aUndisplayedParent->AsElement();
}
for (UndisplayedNode* undisplayed = aUndisplayed; undisplayed;
undisplayed = undisplayed->mNext) {
if (!undisplayed->mContent->IsElement()) {
continue;
}
Element* element = undisplayed->mContent->AsElement();
if (!ConditionallyRestyle(element, aRestyleRoot)) {
if (aDisplay == NS_STYLE_DISPLAY_NONE) {
ConditionallyRestyleContentDescendants(element, aRestyleRoot);
} else { // NS_STYLE_DISPLAY_CONTENTS
DoConditionallyRestyleUndisplayedDescendants(element, aRestyleRoot);
}
}
}
}
void
ElementRestyler::ConditionallyRestyleContentDescendants(Element* aElement,
Element* aRestyleRoot)
{
if (aElement->HasFlag(mRestyleTracker.RootBit())) {
aRestyleRoot = aElement;
}
FlattenedChildIterator it(aElement);
for (nsIContent* n = it.GetNextChild(); n; n = it.GetNextChild()) {
if (n->IsElement()) {
Element* e = n->AsElement();
if (!ConditionallyRestyle(e, aRestyleRoot)) {
ConditionallyRestyleContentDescendants(e, aRestyleRoot);
}
}
}
}
bool
ElementRestyler::ConditionallyRestyle(nsIFrame* aFrame, Element* aRestyleRoot)
{
MOZ_ASSERT(aFrame->GetContent());
if (!aFrame->GetContent()->IsElement()) {
return true;
}
return ConditionallyRestyle(aFrame->GetContent()->AsElement(), aRestyleRoot);
}
bool
ElementRestyler::ConditionallyRestyle(Element* aElement, Element* aRestyleRoot)
{
LOG_RESTYLE("considering element %s for eRestyle_SomeDescendants",
ElementTagToString(aElement).get());
@ -2807,7 +2998,7 @@ ElementRestyler::AddPendingRestylesForDescendantsMatchingSelectors(
data.mSelectorsForDescendants = mSelectorsForDescendants;
mRestyleTracker.AddPendingRestyle(aElement, rshint, nsChangeHint(0), &data,
Some(aRestyleRoot));
return;
return true;
}
if (SelectorMatchesForRestyle(aElement)) {
@ -2818,61 +3009,26 @@ ElementRestyler::AddPendingRestylesForDescendantsMatchingSelectors(
eRestyle_Self | eRestyle_SomeDescendants,
nsChangeHint(0), &data,
Some(aRestyleRoot));
return;
return true;
}
FlattenedChildIterator it(aElement);
for (nsIContent* n = it.GetNextChild(); n; n = it.GetNextChild()) {
if (n->IsElement()) {
AddPendingRestylesForDescendantsMatchingSelectors(n->AsElement(),
aRestyleRoot);
}
}
}
void
ElementRestyler::AddPendingRestylesForDescendantsMatchingSelectors(
nsIContent* aContent)
{
if (!mContent->IsElement() || mSelectorsForDescendants.IsEmpty()) {
return;
}
Element* element = mContent->AsElement();
LOG_RESTYLE("traversing descendants of element %s to propagate "
"eRestyle_SomeDescendants for these %d selectors:",
ElementTagToString(element).get(),
int(mSelectorsForDescendants.Length()));
LOG_RESTYLE_INDENT();
#ifdef RESTYLE_LOGGING
for (nsCSSSelector* sel : mSelectorsForDescendants) {
LOG_RESTYLE("%s", sel->RestrictedSelectorToString().get());
}
#endif
Element* restyleRoot = mRestyleTracker.FindClosestRestyleRoot(element);
FlattenedChildIterator it(element);
for (nsIContent* n = it.GetNextChild(); n; n = it.GetNextChild()) {
if (n->IsElement()) {
AddPendingRestylesForDescendantsMatchingSelectors(n->AsElement(),
restyleRoot);
}
}
return false;
}
bool
ElementRestyler::MustCheckUndisplayedContent(nsIContent*& aUndisplayedParent)
ElementRestyler::MustCheckUndisplayedContent(nsIFrame* aFrame,
nsIContent*& aUndisplayedParent)
{
// When the root element is display:none, we still construct *some*
// frames that have the root element as their mContent, down to the
// DocElementContainingBlock.
if (mFrame->StyleContext()->GetPseudo()) {
if (aFrame->StyleContext()->GetPseudo()) {
aUndisplayedParent = nullptr;
return mFrame == mPresContext->FrameConstructor()->
return aFrame == mPresContext->FrameConstructor()->
GetDocElementContainingBlock();
}
aUndisplayedParent = mFrame->GetContent();
aUndisplayedParent = aFrame->GetContent();
return !!aUndisplayedParent;
}
@ -2945,7 +3101,7 @@ ElementRestyler::MoveStyleContextsForChildren(nsStyleContext* aOldContext)
// Bail out if there are undisplayed or display:contents children.
// FIXME: We could get this to work if we need to.
nsIContent* undisplayedParent;
if (MustCheckUndisplayedContent(undisplayedParent)) {
if (MustCheckUndisplayedContent(mFrame, undisplayedParent)) {
nsCSSFrameConstructor* fc = mPresContext->FrameConstructor();
if (fc->GetAllUndisplayedContentIn(undisplayedParent) ||
fc->GetAllDisplayContentsIn(undisplayedParent)) {
@ -3173,7 +3329,7 @@ ElementRestyler::Restyle(nsRestyleHint aRestyleHint)
mRestyleTracker.AddRestyleRootsIfAwaitingRestyle(descendants);
if (aRestyleHint & eRestyle_SomeDescendants) {
AddPendingRestylesForDescendantsMatchingSelectors(mContent);
ConditionallyRestyleChildren();
}
return;
}
@ -3205,7 +3361,7 @@ ElementRestyler::Restyle(nsRestyleHint aRestyleHint)
mRestyleTracker.AddRestyleRootsIfAwaitingRestyle(descendants);
if (aRestyleHint & eRestyle_SomeDescendants) {
AddPendingRestylesForDescendantsMatchingSelectors(mContent);
ConditionallyRestyleChildren();
}
return;
}
@ -4245,16 +4401,20 @@ ElementRestyler::ComputeStyleChangeFor(nsIFrame* aFrame,
}
}
// The structure of this method parallels ConditionallyRestyleUndisplayedDescendants.
// If you update this method, you probably want to update that one too.
void
ElementRestyler::RestyleUndisplayedDescendants(nsRestyleHint aChildRestyleHint)
{
nsIContent* undisplayedParent;
if (MustCheckUndisplayedContent(undisplayedParent)) {
if (MustCheckUndisplayedContent(mFrame, undisplayedParent)) {
DoRestyleUndisplayedDescendants(aChildRestyleHint, undisplayedParent,
mFrame->StyleContext());
}
}
// The structure of this method parallels DoConditionallyRestyleUndisplayedDescendants.
// If you update this method, you probably want to update that one too.
void
ElementRestyler::DoRestyleUndisplayedDescendants(nsRestyleHint aChildRestyleHint,
nsIContent* aParent,
@ -4269,6 +4429,8 @@ ElementRestyler::DoRestyleUndisplayedDescendants(nsRestyleHint aChildRestyleHint
aParentContext, NS_STYLE_DISPLAY_CONTENTS);
}
// The structure of this method parallels ConditionallyRestyleUndisplayedNodes.
// If you update this method, you probably want to update that one too.
void
ElementRestyler::RestyleUndisplayedNodes(nsRestyleHint aChildRestyleHint,
UndisplayedNode* aUndisplayed,
@ -4497,6 +4659,8 @@ ElementRestyler::InitializeAccessibilityNotifications(nsStyleContext* aNewContex
#endif
}
// The structure of this method parallels ConditionallyRestyleContentChildren.
// If you update this method, you probably want to update that one too.
void
ElementRestyler::RestyleContentChildren(nsIFrame* aParent,
nsRestyleHint aChildRestyleHint)

Просмотреть файл

@ -709,7 +709,8 @@ private:
// Helpers for RestyleChildren().
void RestyleUndisplayedDescendants(nsRestyleHint aChildRestyleHint);
bool MustCheckUndisplayedContent(nsIContent*& aUndisplayedParent);
bool MustCheckUndisplayedContent(nsIFrame* aFrame,
nsIContent*& aUndisplayedParent);
/**
* In the following two methods, aParentStyleContext is either
@ -757,9 +758,27 @@ private:
eNotifyHidden
};
void AddPendingRestylesForDescendantsMatchingSelectors(Element* aElement,
Element* aRestyleRoot);
void AddPendingRestylesForDescendantsMatchingSelectors(nsIContent* aContent);
// These methods handle the eRestyle_SomeDescendants hint by traversing
// down the frame tree (and then when reaching undisplayed content,
// the flattened content tree) find elements that match a selector
// in mSelectorsForDescendants and call AddPendingRestyle for them.
void ConditionallyRestyleChildren();
void ConditionallyRestyleChildren(nsIFrame* aFrame,
Element* aRestyleRoot);
void ConditionallyRestyleContentChildren(nsIFrame* aFrame,
Element* aRestyleRoot);
void ConditionallyRestyleUndisplayedDescendants(nsIFrame* aFrame,
Element* aRestyleRoot);
void DoConditionallyRestyleUndisplayedDescendants(nsIContent* aParent,
Element* aRestyleRoot);
void ConditionallyRestyleUndisplayedNodes(UndisplayedNode* aUndisplayed,
nsIContent* aUndisplayedParent,
const uint8_t aDisplay,
Element* aRestyleRoot);
void ConditionallyRestyleContentDescendants(Element* aElement,
Element* aRestyleRoot);
bool ConditionallyRestyle(nsIFrame* aFrame, Element* aRestyleRoot);
bool ConditionallyRestyle(Element* aElement, Element* aRestyleRoot);
#ifdef RESTYLE_LOGGING
int32_t& LoggingDepth() { return mLoggingDepth; }

Просмотреть файл

@ -2385,14 +2385,6 @@ nsDisplayBackgroundImage::CanOptimizeToImageLayer(LayerManager* aManager,
nsRect borderArea = nsRect(ToReferenceFrame(), mFrame->GetSize());
const nsStyleBackground::Layer &layer = mBackgroundStyle->mLayers[mLayer];
if (layer.mClip != NS_STYLE_BG_CLIP_BORDER) {
return false;
}
nscoord radii[8];
if (mFrame->GetBorderRadii(radii)) {
return false;
}
nsBackgroundLayerState state =
nsCSSRendering::PrepareBackgroundLayer(presContext, mFrame, flags,
borderArea, borderArea, layer);

Просмотреть файл

@ -920,12 +920,6 @@ GetDisplayPortFromMarginsData(nsIContent* aContent,
ScreenRect screenRect = LayoutDeviceRect::FromAppUnits(base, auPerDevPixel)
* parentRes;
nsRect expandedScrollableRect =
nsLayoutUtils::CalculateExpandedScrollableRect(frame);
ScreenRect screenExpScrollableRect =
LayoutDeviceRect::FromAppUnits(expandedScrollableRect - scrollPos,
auPerDevPixel) * parentRes;
if (gfxPrefs::LayersTilesEnabled()) {
// Note on the correctness of applying the alignment in Screen space:
// The correct space to apply the alignment in would be Layer space, but
@ -949,9 +943,6 @@ GetDisplayPortFromMarginsData(nsIContent* aContent,
// up to tile boundaries.
screenRect.Inflate(1);
// Make sure the displayport remains within the scrollable rect.
screenRect = screenRect.ForceInside(screenExpScrollableRect);
// Avoid division by zero.
if (alignmentX == 0) {
alignmentX = 1;
@ -1001,9 +992,6 @@ GetDisplayPortFromMarginsData(nsIContent* aContent,
screenRect.x -= left;
screenRect.width += left + right;
}
// Make sure the displayport remains within the scrollable rect.
screenRect = screenRect.ForceInside(screenExpScrollableRect);
}
// Convert the aligned rect back into app units.
@ -1013,6 +1001,7 @@ GetDisplayPortFromMarginsData(nsIContent* aContent,
result = ApplyRectMultiplier(result, aMultiplier);
// Finally, clamp it to the expanded scrollable rect.
nsRect expandedScrollableRect = nsLayoutUtils::CalculateExpandedScrollableRect(frame);
result = expandedScrollableRect.Intersect(result + scrollPos) - scrollPos;
return result;

Просмотреть файл

@ -116,9 +116,13 @@ status_t SampleIterator::seekTo(uint32_t sampleIndex) {
}
}
CHECK(mCurrentChunkSampleSizes.size() == mSamplesPerChunk);
uint32_t chunkRelativeSampleIndex =
(sampleIndex - mFirstChunkSampleIndex) % mSamplesPerChunk;
CHECK(chunkRelativeSampleIndex < mSamplesPerChunk);
mCurrentSampleOffset = mCurrentChunkOffset;
for (uint32_t i = 0; i < chunkRelativeSampleIndex; ++i) {
mCurrentSampleOffset += mCurrentChunkSampleSizes[i];

Просмотреть файл

@ -3,3 +3,7 @@
# You can obtain one at http://mozilla.org/MPL/2.0/.
STLFLAGS =
# Dummy rule to transition from .c file to .cpp file without a clobber.
# (bug 1005486)
%/jemalloc_config.c: ;

Просмотреть файл

@ -1,51 +0,0 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this file,
* You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifdef MOZ_JEMALLOC3
#define MOZ_JEMALLOC_IMPL
#include "mozmemory_wrap.h"
#include "mozilla/Types.h"
/* Override some jemalloc defaults */
#ifdef MOZ_WIDGET_GONK
/* we tolerate around 4MiB of dirty pages on most platforms, except for B2G,
* where our limit is 1MiB
*/
#define MOZ_MALLOC_PLATFORM_OPTIONS ",lg_dirty_mult:8"
#else
#define MOZ_MALLOC_PLATFORM_OPTIONS ",lg_dirty_mult:6"
#endif
#ifdef DEBUG
#define MOZ_MALLOC_BUILD_OPTIONS ",junk:true"
#else
#define MOZ_MALLOC_BUILD_OPTIONS ",junk:free"
#endif
#define MOZ_MALLOC_OPTIONS "narenas:1,lg_chunk:20,tcache:false"
MFBT_DATA const char * je_(malloc_conf) =
MOZ_MALLOC_OPTIONS MOZ_MALLOC_PLATFORM_OPTIONS MOZ_MALLOC_BUILD_OPTIONS;
#ifdef ANDROID
#include <android/log.h>
static void
_je_malloc_message(void *cbopaque, const char *s)
{
__android_log_print(ANDROID_LOG_INFO, "GeckoJemalloc", "%s", s);
}
void (*je_(malloc_message))(void *, const char *s) = _je_malloc_message;
#endif
#endif /* MOZ_JEMALLOC3 */
/* Provide an abort function for use in jemalloc code */
#include <mozilla/Assertions.h>
void moz_abort() {
MOZ_CRASH();
}

Просмотреть файл

@ -0,0 +1,169 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this file,
* You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifdef MOZ_JEMALLOC4
#define MOZ_JEMALLOC_IMPL
/* mozmemory_wrap.h needs to be included before MFBT headers */
#include "mozmemory_wrap.h"
#include <mozilla/Assertions.h>
#include "mozilla/Types.h"
#define DLLEXPORT
#include "jemalloc/jemalloc.h"
#ifdef XP_WIN
#include <windows.h>
#endif
#ifdef XP_DARWIN
#include <sys/mman.h>
#endif
/* Override some jemalloc defaults */
#ifdef MOZ_WIDGET_GONK
/* we tolerate around 4MiB of dirty pages on most platforms, except for B2G,
* where our limit is 1MiB
*/
#define MOZ_MALLOC_PLATFORM_OPTIONS ",lg_dirty_mult:8"
#else
#define MOZ_MALLOC_PLATFORM_OPTIONS ",lg_dirty_mult:6"
#endif
#ifdef DEBUG
#define MOZ_MALLOC_BUILD_OPTIONS ",junk:true"
#else
#define MOZ_MALLOC_BUILD_OPTIONS ",junk:free"
#endif
#define MOZ_MALLOC_OPTIONS "narenas:1,lg_chunk:20,tcache:false"
MFBT_DATA const char* je_(malloc_conf) =
MOZ_MALLOC_OPTIONS MOZ_MALLOC_PLATFORM_OPTIONS MOZ_MALLOC_BUILD_OPTIONS;
#ifdef ANDROID
#include <android/log.h>
static void
_je_malloc_message(void* cbopaque, const char* s)
{
__android_log_print(ANDROID_LOG_INFO, "GeckoJemalloc", "%s", s);
}
void (*je_(malloc_message))(void*, const char* s) = _je_malloc_message;
#endif
/* Jemalloc supports hooks that are called on chunk
* allocate/deallocate/commit/decommit/purge/etc.
*
* We currently only hook commit, decommit and purge. We do this to tweak
* the way chunks are handled so that RSS stays lower than it normally
* would with the default jemalloc uses.
* This somewhat matches the behavior of mozjemalloc, except it doesn't
* rely on a double purge on mac, instead purging directly. (Yes, this
* means we can get rid of jemalloc_purge_freed_pages at some point)
*
* The default for jemalloc is to do the following:
* - commit, decommit: nothing
* - purge: MEM_RESET on Windows, MADV_FREE on Mac/BSD, MADV_DONTNEED on Linux
*
* The hooks we setup do the following:
* on Windows:
* - commit: MEM_COMMIT
* - decommit: MEM_DECOMMIT
* on Mac:
* - purge: mmap new anonymous memory on top of the chunk
*
* We only set the above hooks, others are left with the default.
*/
#if defined(XP_WIN) || defined(XP_DARWIN)
class JemallocInit {
public:
JemallocInit()
{
chunk_hooks_t hooks;
size_t hooks_len;
unsigned narenas;
size_t mib[3];
size_t size;
size = sizeof(narenas);
je_(mallctl)("arenas.narenas", &narenas, &size, nullptr, 0);
size = sizeof(mib) / sizeof(mib[0]);
je_(mallctlnametomib)("arena.0.chunk_hooks", mib, &size);
/* Set the hooks on all the existing arenas. */
for (unsigned arena = 0; arena < narenas; arena++) {
mib[1] = arena;
hooks_len = sizeof(hooks);
je_(mallctlbymib)(mib, size, &hooks, &hooks_len, nullptr, 0);
#ifdef XP_WIN
hooks.commit = CommitHook;
hooks.decommit = DecommitHook;
#endif
#ifdef XP_DARWIN
hooks.purge = PurgeHook;
#endif
je_(mallctlbymib)(mib, size, nullptr, nullptr, &hooks, hooks_len);
}
}
private:
#ifdef XP_WIN
static bool
CommitHook(void* chunk, size_t size, size_t offset, size_t length,
unsigned arena_ind)
{
void* addr = reinterpret_cast<void*>(
reinterpret_cast<uintptr_t>(chunk) + static_cast<uintptr_t>(offset));
if (!VirtualAlloc(addr, length, MEM_COMMIT, PAGE_READWRITE))
MOZ_CRASH();
return false;
}
static bool
DecommitHook(void* chunk, size_t size, size_t offset, size_t length,
unsigned arena_ind)
{
void* addr = reinterpret_cast<void*>(
reinterpret_cast<uintptr_t>(chunk) + static_cast<uintptr_t>(offset));
if (!VirtualFree(addr, length, MEM_DECOMMIT))
MOZ_CRASH();
return false;
}
#endif
#ifdef XP_DARWIN
static bool
PurgeHook(void* chunk, size_t size, size_t offset, size_t length,
unsigned arena_ind)
{
void* addr = reinterpret_cast<void*>(
reinterpret_cast<uintptr_t>(chunk) + static_cast<uintptr_t>(offset));
void* new_addr = mmap(addr, length, PROT_READ | PROT_WRITE,
MAP_FIXED | MAP_PRIVATE | MAP_ANON, -1, 0);
return (new_addr != addr);
}
#endif
};
/* For the static constructor from the class above */
JemallocInit gJemallocInit;
#endif
#else
#include <mozilla/Assertions.h>
#endif /* MOZ_JEMALLOC4 */
/* Provide an abort function for use in jemalloc code */
extern "C" void moz_abort() {
MOZ_CRASH();
}

Просмотреть файл

@ -19,11 +19,11 @@ if CONFIG['MOZ_REPLACE_MALLOC']:
]
SOURCES += [
'jemalloc_config.c',
'jemalloc_config.cpp',
'mozmemory_wrap.c',
]
if CONFIG['MOZ_JEMALLOC3']:
if CONFIG['MOZ_JEMALLOC4']:
SOURCES += [
'mozjemalloc_compat.c',
]
@ -40,7 +40,7 @@ if CONFIG['MOZ_REPLACE_MALLOC']:
Library('memory')
if CONFIG['MOZ_JEMALLOC3']:
if CONFIG['MOZ_JEMALLOC4']:
if not CONFIG['MOZ_NATIVE_JEMALLOC']:
USE_LIBS += [
'jemalloc',

Просмотреть файл

@ -2,7 +2,7 @@
* License, v. 2.0. If a copy of the MPL was not distributed with this file,
* You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef MOZ_JEMALLOC3
#ifndef MOZ_JEMALLOC4
# error Should only compile this file when building with jemalloc 3
#endif
@ -105,7 +105,6 @@ compute_bin_unused_and_bookkeeping(jemalloc_stats_t *stats, unsigned int narenas
unsigned int i, j;
size_t stats_metadata;
size_t ameta; // internal allocations for a single arena
size_t stats_ametadata = 0; // total internal allocations in all arenas
// narenas also counts uninitialized arenas, and initialized arenas
@ -125,9 +124,6 @@ compute_bin_unused_and_bookkeeping(jemalloc_stats_t *stats, unsigned int narenas
continue;
}
CTL_I_GET("stats.arenas.0.metadata.allocated", ameta, i);
stats_ametadata += ameta;
CTL_IJ_GET("stats.arenas.0.bins.0.curruns", curruns, i, j);
CTL_IJ_GET("stats.arenas.0.bins.0.curregs", curregs, i, j);
@ -136,6 +132,10 @@ compute_bin_unused_and_bookkeeping(jemalloc_stats_t *stats, unsigned int narenas
}
CTL_GET("stats.metadata", stats_metadata);
/* get the summation for all arenas, i == narenas */
CTL_I_GET("stats.arenas.0.metadata.allocated", stats_ametadata, narenas);
stats->bookkeeping = stats_metadata - stats_ametadata;
stats->bin_unused = bin_unused;
}

Просмотреть файл

@ -61,7 +61,7 @@ if CONFIG['OS_TARGET'] == 'Linux':
# For mremap
DEFINES['_GNU_SOURCE'] = True
if CONFIG['MOZ_NUWA_PROCESS'] and CONFIG['MOZ_JEMALLOC3']:
if CONFIG['MOZ_NUWA_PROCESS'] and CONFIG['MOZ_JEMALLOC4']:
DEFINES['pthread_mutex_lock'] = '__real_pthread_mutex_lock'
if CONFIG['GNU_CC']:

Просмотреть файл

@ -4,6 +4,25 @@ brevity. Much more detail can be found in the git revision history:
https://github.com/jemalloc/jemalloc
* 4.0.1 (XXX)
Bug fixes:
- Fix arenas_cache_cleanup() and arena_get_hard() to handle
allocation/deallocation within the application's thread-specific data
cleanup functions even after arenas_cache is torn down.
- Don't bitshift by negative amounts when encoding/decoding run sizes in chunk
header maps. This affected systems with page sizes greater than 8 KiB.
- Rename index_t to szind_t to avoid an existing type on Solaris.
- Add JEMALLOC_CXX_THROW to the memalign() function prototype, in order to
match glibc and avoid compilation errors when including both
jemalloc/jemalloc.h and malloc.h in C++ code.
- Fix chunk purge hook calls for in-place huge shrinking reallocation to
specify the old chunk size rather than the new chunk size. This bug caused
no correctness issues for the default chunk purge function, but was
visible to custom functions set via the "arena.<i>.chunk_hooks" mallctl.
- Fix TLS configuration such that it is enabled by default for platforms on
which it works correctly.
* 4.0.0 (August 17, 2015)
This version contains many speed and space optimizations, both minor and

Просмотреть файл

@ -1 +1 @@
4.0.0-0-g6e98caf8f064482b9ab292ef3638dea67420bbc2
4.0.0-12-ged4883285e111b426e5769b24dad164ebacaa5b9

41
memory/jemalloc/src/configure поставляемый
Просмотреть файл

@ -728,6 +728,7 @@ infodir
docdir
oldincludedir
includedir
runstatedir
localstatedir
sharedstatedir
sysconfdir
@ -831,6 +832,7 @@ datadir='${datarootdir}'
sysconfdir='${prefix}/etc'
sharedstatedir='${prefix}/com'
localstatedir='${prefix}/var'
runstatedir='${localstatedir}/run'
includedir='${prefix}/include'
oldincludedir='/usr/include'
docdir='${datarootdir}/doc/${PACKAGE}'
@ -1083,6 +1085,15 @@ do
| -silent | --silent | --silen | --sile | --sil)
silent=yes ;;
-runstatedir | --runstatedir | --runstatedi | --runstated \
| --runstate | --runstat | --runsta | --runst | --runs \
| --run | --ru | --r)
ac_prev=runstatedir ;;
-runstatedir=* | --runstatedir=* | --runstatedi=* | --runstated=* \
| --runstate=* | --runstat=* | --runsta=* | --runst=* | --runs=* \
| --run=* | --ru=* | --r=*)
runstatedir=$ac_optarg ;;
-sbindir | --sbindir | --sbindi | --sbind | --sbin | --sbi | --sb)
ac_prev=sbindir ;;
-sbindir=* | --sbindir=* | --sbindi=* | --sbind=* | --sbin=* \
@ -1220,7 +1231,7 @@ fi
for ac_var in exec_prefix prefix bindir sbindir libexecdir datarootdir \
datadir sysconfdir sharedstatedir localstatedir includedir \
oldincludedir docdir infodir htmldir dvidir pdfdir psdir \
libdir localedir mandir
libdir localedir mandir runstatedir
do
eval ac_val=\$$ac_var
# Remove trailing slashes.
@ -1373,6 +1384,7 @@ Fine tuning of the installation directories:
--sysconfdir=DIR read-only single-machine data [PREFIX/etc]
--sharedstatedir=DIR modifiable architecture-independent data [PREFIX/com]
--localstatedir=DIR modifiable single-machine data [PREFIX/var]
--runstatedir=DIR modifiable per-process data [LOCALSTATEDIR/run]
--libdir=DIR object code libraries [EPREFIX/lib]
--includedir=DIR C header files [PREFIX/include]
--oldincludedir=DIR C header files for non-gcc [/usr/include]
@ -6964,7 +6976,6 @@ else
fi
set -x
if test "x`test ! \"${srcroot}\" && cd \"${srcroot}\"; git rev-parse --is-inside-work-tree 2>/dev/null`" = "xtrue" ; then
rm -f "${objroot}VERSION"
@ -6992,8 +7003,6 @@ $as_echo "Missing VERSION file, and unable to generate it; creating bogus VERSIO
cp ${srcroot}VERSION ${objroot}VERSION
fi
fi
set +x
jemalloc_version=`cat "${objroot}VERSION"`
jemalloc_version_major=`echo ${jemalloc_version} | tr ".g-" " " | awk '{print $1}'`
jemalloc_version_minor=`echo ${jemalloc_version} | tr ".g-" " " | awk '{print $2}'`
@ -7283,15 +7292,18 @@ else
fi
if test "x${enable_tls}" = "x" -a "x${force_tls}" = "x1" ; then
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: Forcing TLS to avoid allocator/threading bootstrap issues" >&5
if test "x${enable_tls}" = "x" ; then
if test "x${force_tls}" = "x1" ; then
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: Forcing TLS to avoid allocator/threading bootstrap issues" >&5
$as_echo "Forcing TLS to avoid allocator/threading bootstrap issues" >&6; }
enable_tls="1"
fi
if test "x${enable_tls}" = "x" -a "x${force_tls}" = "x0" ; then
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: Forcing no TLS to avoid allocator/threading bootstrap issues" >&5
enable_tls="1"
elif test "x${force_tls}" = "x0" ; then
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: Forcing no TLS to avoid allocator/threading bootstrap issues" >&5
$as_echo "Forcing no TLS to avoid allocator/threading bootstrap issues" >&6; }
enable_tls="0"
enable_tls="0"
else
enable_tls="1"
fi
fi
if test "x${enable_tls}" = "x1" ; then
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for TLS" >&5
@ -7327,12 +7339,17 @@ else
fi
if test "x${enable_tls}" = "x1" ; then
if test "x${force_tls}" = "x0" ; then
{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: TLS enabled despite being marked unusable on this platform" >&5
$as_echo "$as_me: WARNING: TLS enabled despite being marked unusable on this platform" >&2;}
fi
cat >>confdefs.h <<_ACEOF
#define JEMALLOC_TLS
_ACEOF
elif test "x${force_tls}" = "x1" ; then
as_fn_error $? "Failed to configure TLS, which is mandatory for correct function" "$LINENO" 5
{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: TLS disabled despite being marked critical on this platform" >&5
$as_echo "$as_me: WARNING: TLS disabled despite being marked critical on this platform" >&2;}
fi

Просмотреть файл

@ -1272,13 +1272,16 @@ fi
,
enable_tls=""
)
if test "x${enable_tls}" = "x" -a "x${force_tls}" = "x1" ; then
AC_MSG_RESULT([Forcing TLS to avoid allocator/threading bootstrap issues])
enable_tls="1"
fi
if test "x${enable_tls}" = "x" -a "x${force_tls}" = "x0" ; then
AC_MSG_RESULT([Forcing no TLS to avoid allocator/threading bootstrap issues])
enable_tls="0"
if test "x${enable_tls}" = "x" ; then
if test "x${force_tls}" = "x1" ; then
AC_MSG_RESULT([Forcing TLS to avoid allocator/threading bootstrap issues])
enable_tls="1"
elif test "x${force_tls}" = "x0" ; then
AC_MSG_RESULT([Forcing no TLS to avoid allocator/threading bootstrap issues])
enable_tls="0"
else
enable_tls="1"
fi
fi
if test "x${enable_tls}" = "x1" ; then
AC_MSG_CHECKING([for TLS])
@ -1298,9 +1301,12 @@ else
fi
AC_SUBST([enable_tls])
if test "x${enable_tls}" = "x1" ; then
if test "x${force_tls}" = "x0" ; then
AC_MSG_WARN([TLS enabled despite being marked unusable on this platform])
fi
AC_DEFINE_UNQUOTED([JEMALLOC_TLS], [ ])
elif test "x${force_tls}" = "x1" ; then
AC_MSG_ERROR([Failed to configure TLS, which is mandatory for correct function])
AC_MSG_WARN([TLS disabled despite being marked critical on this platform])
fi
dnl ============================================================================

Просмотреть файл

@ -39,7 +39,7 @@ typedef struct arena_s arena_t;
#ifdef JEMALLOC_ARENA_STRUCTS_A
struct arena_run_s {
/* Index of bin this run is associated with. */
index_t binind;
szind_t binind;
/* Number of free regions in run. */
unsigned nfree;
@ -448,7 +448,7 @@ bool arena_lg_dirty_mult_set(arena_t *arena, ssize_t lg_dirty_mult);
void arena_maybe_purge(arena_t *arena);
void arena_purge_all(arena_t *arena);
void arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin,
index_t binind, uint64_t prof_accumbytes);
szind_t binind, uint64_t prof_accumbytes);
void arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info,
bool zero);
#ifdef JEMALLOC_JET
@ -519,17 +519,19 @@ arena_chunk_map_misc_t *arena_run_to_miscelm(arena_run_t *run);
size_t *arena_mapbitsp_get(arena_chunk_t *chunk, size_t pageind);
size_t arena_mapbitsp_read(size_t *mapbitsp);
size_t arena_mapbits_get(arena_chunk_t *chunk, size_t pageind);
size_t arena_mapbits_size_decode(size_t mapbits);
size_t arena_mapbits_unallocated_size_get(arena_chunk_t *chunk,
size_t pageind);
size_t arena_mapbits_large_size_get(arena_chunk_t *chunk, size_t pageind);
size_t arena_mapbits_small_runind_get(arena_chunk_t *chunk, size_t pageind);
index_t arena_mapbits_binind_get(arena_chunk_t *chunk, size_t pageind);
szind_t arena_mapbits_binind_get(arena_chunk_t *chunk, size_t pageind);
size_t arena_mapbits_dirty_get(arena_chunk_t *chunk, size_t pageind);
size_t arena_mapbits_unzeroed_get(arena_chunk_t *chunk, size_t pageind);
size_t arena_mapbits_decommitted_get(arena_chunk_t *chunk, size_t pageind);
size_t arena_mapbits_large_get(arena_chunk_t *chunk, size_t pageind);
size_t arena_mapbits_allocated_get(arena_chunk_t *chunk, size_t pageind);
void arena_mapbitsp_write(size_t *mapbitsp, size_t mapbits);
size_t arena_mapbits_size_encode(size_t size);
void arena_mapbits_unallocated_set(arena_chunk_t *chunk, size_t pageind,
size_t size, size_t flags);
void arena_mapbits_unallocated_size_set(arena_chunk_t *chunk, size_t pageind,
@ -539,21 +541,21 @@ void arena_mapbits_internal_set(arena_chunk_t *chunk, size_t pageind,
void arena_mapbits_large_set(arena_chunk_t *chunk, size_t pageind,
size_t size, size_t flags);
void arena_mapbits_large_binind_set(arena_chunk_t *chunk, size_t pageind,
index_t binind);
szind_t binind);
void arena_mapbits_small_set(arena_chunk_t *chunk, size_t pageind,
size_t runind, index_t binind, size_t flags);
size_t runind, szind_t binind, size_t flags);
void arena_metadata_allocated_add(arena_t *arena, size_t size);
void arena_metadata_allocated_sub(arena_t *arena, size_t size);
size_t arena_metadata_allocated_get(arena_t *arena);
bool arena_prof_accum_impl(arena_t *arena, uint64_t accumbytes);
bool arena_prof_accum_locked(arena_t *arena, uint64_t accumbytes);
bool arena_prof_accum(arena_t *arena, uint64_t accumbytes);
index_t arena_ptr_small_binind_get(const void *ptr, size_t mapbits);
index_t arena_bin_index(arena_t *arena, arena_bin_t *bin);
szind_t arena_ptr_small_binind_get(const void *ptr, size_t mapbits);
szind_t arena_bin_index(arena_t *arena, arena_bin_t *bin);
unsigned arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info,
const void *ptr);
prof_tctx_t *arena_prof_tctx_get(const void *ptr);
void arena_prof_tctx_set(const void *ptr, prof_tctx_t *tctx);
void arena_prof_tctx_set(const void *ptr, size_t usize, prof_tctx_t *tctx);
void *arena_malloc(tsd_t *tsd, arena_t *arena, size_t size, bool zero,
tcache_t *tcache);
arena_t *arena_aalloc(const void *ptr);
@ -652,6 +654,22 @@ arena_mapbits_get(arena_chunk_t *chunk, size_t pageind)
return (arena_mapbitsp_read(arena_mapbitsp_get(chunk, pageind)));
}
JEMALLOC_ALWAYS_INLINE size_t
arena_mapbits_size_decode(size_t mapbits)
{
size_t size;
#if CHUNK_MAP_SIZE_SHIFT > 0
size = (mapbits & CHUNK_MAP_SIZE_MASK) >> CHUNK_MAP_SIZE_SHIFT;
#elif CHUNK_MAP_SIZE_SHIFT == 0
size = mapbits & CHUNK_MAP_SIZE_MASK;
#else
size = (mapbits & CHUNK_MAP_SIZE_MASK) << -CHUNK_MAP_SIZE_SHIFT;
#endif
return (size);
}
JEMALLOC_ALWAYS_INLINE size_t
arena_mapbits_unallocated_size_get(arena_chunk_t *chunk, size_t pageind)
{
@ -659,7 +677,7 @@ arena_mapbits_unallocated_size_get(arena_chunk_t *chunk, size_t pageind)
mapbits = arena_mapbits_get(chunk, pageind);
assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) == 0);
return ((mapbits & CHUNK_MAP_SIZE_MASK) >> CHUNK_MAP_SIZE_SHIFT);
return (arena_mapbits_size_decode(mapbits));
}
JEMALLOC_ALWAYS_INLINE size_t
@ -670,7 +688,7 @@ arena_mapbits_large_size_get(arena_chunk_t *chunk, size_t pageind)
mapbits = arena_mapbits_get(chunk, pageind);
assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) ==
(CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED));
return ((mapbits & CHUNK_MAP_SIZE_MASK) >> CHUNK_MAP_SIZE_SHIFT);
return (arena_mapbits_size_decode(mapbits));
}
JEMALLOC_ALWAYS_INLINE size_t
@ -684,11 +702,11 @@ arena_mapbits_small_runind_get(arena_chunk_t *chunk, size_t pageind)
return (mapbits >> CHUNK_MAP_RUNIND_SHIFT);
}
JEMALLOC_ALWAYS_INLINE index_t
JEMALLOC_ALWAYS_INLINE szind_t
arena_mapbits_binind_get(arena_chunk_t *chunk, size_t pageind)
{
size_t mapbits;
index_t binind;
szind_t binind;
mapbits = arena_mapbits_get(chunk, pageind);
binind = (mapbits & CHUNK_MAP_BININD_MASK) >> CHUNK_MAP_BININD_SHIFT;
@ -754,6 +772,23 @@ arena_mapbitsp_write(size_t *mapbitsp, size_t mapbits)
*mapbitsp = mapbits;
}
JEMALLOC_ALWAYS_INLINE size_t
arena_mapbits_size_encode(size_t size)
{
size_t mapbits;
#if CHUNK_MAP_SIZE_SHIFT > 0
mapbits = size << CHUNK_MAP_SIZE_SHIFT;
#elif CHUNK_MAP_SIZE_SHIFT == 0
mapbits = size;
#else
mapbits = size >> -CHUNK_MAP_SIZE_SHIFT;
#endif
assert((mapbits & ~CHUNK_MAP_SIZE_MASK) == 0);
return (mapbits);
}
JEMALLOC_ALWAYS_INLINE void
arena_mapbits_unallocated_set(arena_chunk_t *chunk, size_t pageind, size_t size,
size_t flags)
@ -761,11 +796,10 @@ arena_mapbits_unallocated_set(arena_chunk_t *chunk, size_t pageind, size_t size,
size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind);
assert((size & PAGE_MASK) == 0);
assert(((size << CHUNK_MAP_SIZE_SHIFT) & ~CHUNK_MAP_SIZE_MASK) == 0);
assert((flags & CHUNK_MAP_FLAGS_MASK) == flags);
assert((flags & CHUNK_MAP_DECOMMITTED) == 0 || (flags &
(CHUNK_MAP_DIRTY|CHUNK_MAP_UNZEROED)) == 0);
arena_mapbitsp_write(mapbitsp, (size << CHUNK_MAP_SIZE_SHIFT) |
arena_mapbitsp_write(mapbitsp, arena_mapbits_size_encode(size) |
CHUNK_MAP_BININD_INVALID | flags);
}
@ -777,10 +811,9 @@ arena_mapbits_unallocated_size_set(arena_chunk_t *chunk, size_t pageind,
size_t mapbits = arena_mapbitsp_read(mapbitsp);
assert((size & PAGE_MASK) == 0);
assert(((size << CHUNK_MAP_SIZE_SHIFT) & ~CHUNK_MAP_SIZE_MASK) == 0);
assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) == 0);
arena_mapbitsp_write(mapbitsp, (size << CHUNK_MAP_SIZE_SHIFT) | (mapbits
& ~CHUNK_MAP_SIZE_MASK));
arena_mapbitsp_write(mapbitsp, arena_mapbits_size_encode(size) |
(mapbits & ~CHUNK_MAP_SIZE_MASK));
}
JEMALLOC_ALWAYS_INLINE void
@ -799,18 +832,17 @@ arena_mapbits_large_set(arena_chunk_t *chunk, size_t pageind, size_t size,
size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind);
assert((size & PAGE_MASK) == 0);
assert(((size << CHUNK_MAP_SIZE_SHIFT) & ~CHUNK_MAP_SIZE_MASK) == 0);
assert((flags & CHUNK_MAP_FLAGS_MASK) == flags);
assert((flags & CHUNK_MAP_DECOMMITTED) == 0 || (flags &
(CHUNK_MAP_DIRTY|CHUNK_MAP_UNZEROED)) == 0);
arena_mapbitsp_write(mapbitsp, (size << CHUNK_MAP_SIZE_SHIFT) |
arena_mapbitsp_write(mapbitsp, arena_mapbits_size_encode(size) |
CHUNK_MAP_BININD_INVALID | flags | CHUNK_MAP_LARGE |
CHUNK_MAP_ALLOCATED);
}
JEMALLOC_ALWAYS_INLINE void
arena_mapbits_large_binind_set(arena_chunk_t *chunk, size_t pageind,
index_t binind)
szind_t binind)
{
size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind);
size_t mapbits = arena_mapbitsp_read(mapbitsp);
@ -824,7 +856,7 @@ arena_mapbits_large_binind_set(arena_chunk_t *chunk, size_t pageind,
JEMALLOC_ALWAYS_INLINE void
arena_mapbits_small_set(arena_chunk_t *chunk, size_t pageind, size_t runind,
index_t binind, size_t flags)
szind_t binind, size_t flags)
{
size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind);
@ -901,10 +933,10 @@ arena_prof_accum(arena_t *arena, uint64_t accumbytes)
}
}
JEMALLOC_ALWAYS_INLINE index_t
JEMALLOC_ALWAYS_INLINE szind_t
arena_ptr_small_binind_get(const void *ptr, size_t mapbits)
{
index_t binind;
szind_t binind;
binind = (mapbits & CHUNK_MAP_BININD_MASK) >> CHUNK_MAP_BININD_SHIFT;
@ -916,7 +948,7 @@ arena_ptr_small_binind_get(const void *ptr, size_t mapbits)
size_t rpages_ind;
arena_run_t *run;
arena_bin_t *bin;
index_t run_binind, actual_binind;
szind_t run_binind, actual_binind;
arena_bin_info_t *bin_info;
arena_chunk_map_misc_t *miscelm;
void *rpages;
@ -950,10 +982,10 @@ arena_ptr_small_binind_get(const void *ptr, size_t mapbits)
# endif /* JEMALLOC_ARENA_INLINE_A */
# ifdef JEMALLOC_ARENA_INLINE_B
JEMALLOC_INLINE index_t
JEMALLOC_INLINE szind_t
arena_bin_index(arena_t *arena, arena_bin_t *bin)
{
index_t binind = bin - arena->bins;
szind_t binind = bin - arena->bins;
assert(binind < NBINS);
return (binind);
}
@ -1060,7 +1092,7 @@ arena_prof_tctx_get(const void *ptr)
}
JEMALLOC_INLINE void
arena_prof_tctx_set(const void *ptr, prof_tctx_t *tctx)
arena_prof_tctx_set(const void *ptr, size_t usize, prof_tctx_t *tctx)
{
arena_chunk_t *chunk;
@ -1070,12 +1102,25 @@ arena_prof_tctx_set(const void *ptr, prof_tctx_t *tctx)
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
if (likely(chunk != ptr)) {
size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
assert(arena_mapbits_allocated_get(chunk, pageind) != 0);
if (unlikely(arena_mapbits_large_get(chunk, pageind) != 0)) {
arena_chunk_map_misc_t *elm = arena_miscelm_get(chunk,
pageind);
if (unlikely(usize > SMALL_MAXCLASS || tctx >
(prof_tctx_t *)(uintptr_t)1U)) {
arena_chunk_map_misc_t *elm;
assert(arena_mapbits_large_get(chunk, pageind) != 0);
elm = arena_miscelm_get(chunk, pageind);
atomic_write_p(&elm->prof_tctx_pun, tctx);
} else {
/*
* tctx must always be initialized for large runs.
* Assert that the surrounding conditional logic is
* equivalent to checking whether ptr refers to a large
* run.
*/
assert(arena_mapbits_large_get(chunk, pageind) == 0);
}
} else
huge_prof_tctx_set(ptr, tctx);
@ -1131,7 +1176,7 @@ arena_salloc(const void *ptr, bool demote)
size_t ret;
arena_chunk_t *chunk;
size_t pageind;
index_t binind;
szind_t binind;
assert(ptr != NULL);
@ -1190,7 +1235,7 @@ arena_dalloc(tsd_t *tsd, void *ptr, tcache_t *tcache)
if (likely((mapbits & CHUNK_MAP_LARGE) == 0)) {
/* Small allocation. */
if (likely(tcache != NULL)) {
index_t binind = arena_ptr_small_binind_get(ptr,
szind_t binind = arena_ptr_small_binind_get(ptr,
mapbits);
tcache_dalloc_small(tsd, tcache, ptr, binind);
} else {
@ -1242,7 +1287,7 @@ arena_sdalloc(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache)
if (likely(size <= SMALL_MAXCLASS)) {
/* Small allocation. */
if (likely(tcache != NULL)) {
index_t binind = size2index(size);
szind_t binind = size2index(size);
tcache_dalloc_small(tsd, tcache, ptr, binind);
} else {
size_t pageind = ((uintptr_t)ptr -

Просмотреть файл

@ -184,7 +184,7 @@ static const bool config_cache_oblivious =
#include "jemalloc/internal/jemalloc_internal_macros.h"
/* Size class index type. */
typedef unsigned index_t;
typedef unsigned szind_t;
/*
* Flags bits:
@ -511,12 +511,12 @@ void jemalloc_postfork_child(void);
#include "jemalloc/internal/huge.h"
#ifndef JEMALLOC_ENABLE_INLINE
index_t size2index_compute(size_t size);
index_t size2index_lookup(size_t size);
index_t size2index(size_t size);
size_t index2size_compute(index_t index);
size_t index2size_lookup(index_t index);
size_t index2size(index_t index);
szind_t size2index_compute(size_t size);
szind_t size2index_lookup(size_t size);
szind_t size2index(size_t size);
size_t index2size_compute(szind_t index);
size_t index2size_lookup(szind_t index);
size_t index2size(szind_t index);
size_t s2u_compute(size_t size);
size_t s2u_lookup(size_t size);
size_t s2u(size_t size);
@ -527,7 +527,7 @@ arena_t *arena_get(tsd_t *tsd, unsigned ind, bool init_if_missing,
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_))
JEMALLOC_INLINE index_t
JEMALLOC_INLINE szind_t
size2index_compute(size_t size)
{
@ -558,7 +558,7 @@ size2index_compute(size_t size)
}
}
JEMALLOC_ALWAYS_INLINE index_t
JEMALLOC_ALWAYS_INLINE szind_t
size2index_lookup(size_t size)
{
@ -571,7 +571,7 @@ size2index_lookup(size_t size)
}
}
JEMALLOC_ALWAYS_INLINE index_t
JEMALLOC_ALWAYS_INLINE szind_t
size2index(size_t size)
{
@ -582,7 +582,7 @@ size2index(size_t size)
}
JEMALLOC_INLINE size_t
index2size_compute(index_t index)
index2size_compute(szind_t index)
{
#if (NTBINS > 0)
@ -609,7 +609,7 @@ index2size_compute(index_t index)
}
JEMALLOC_ALWAYS_INLINE size_t
index2size_lookup(index_t index)
index2size_lookup(szind_t index)
{
size_t ret = (size_t)index2size_tab[index];
assert(ret == index2size_compute(index));
@ -617,7 +617,7 @@ index2size_lookup(index_t index)
}
JEMALLOC_ALWAYS_INLINE size_t
index2size(index_t index)
index2size(szind_t index)
{
assert(index < NSIZES);
@ -976,7 +976,7 @@ u2rz(size_t usize)
size_t ret;
if (usize <= SMALL_MAXCLASS) {
index_t binind = size2index(usize);
szind_t binind = size2index(usize);
ret = arena_bin_info[binind].redzone_size;
} else
ret = 0;

Просмотреть файл

@ -50,6 +50,8 @@ arena_mapbits_large_size_get
arena_mapbitsp_get
arena_mapbitsp_read
arena_mapbitsp_write
arena_mapbits_size_decode
arena_mapbits_size_encode
arena_mapbits_small_runind_get
arena_mapbits_small_set
arena_mapbits_unallocated_set

Просмотреть файл

@ -332,7 +332,7 @@ bool prof_sample_accum_update(tsd_t *tsd, size_t usize, bool commit,
prof_tdata_t **tdata_out);
prof_tctx_t *prof_alloc_prep(tsd_t *tsd, size_t usize, bool update);
prof_tctx_t *prof_tctx_get(const void *ptr);
void prof_tctx_set(const void *ptr, prof_tctx_t *tctx);
void prof_tctx_set(const void *ptr, size_t usize, prof_tctx_t *tctx);
void prof_malloc_sample_object(const void *ptr, size_t usize,
prof_tctx_t *tctx);
void prof_malloc(const void *ptr, size_t usize, prof_tctx_t *tctx);
@ -402,13 +402,13 @@ prof_tctx_get(const void *ptr)
}
JEMALLOC_ALWAYS_INLINE void
prof_tctx_set(const void *ptr, prof_tctx_t *tctx)
prof_tctx_set(const void *ptr, size_t usize, prof_tctx_t *tctx)
{
cassert(config_prof);
assert(ptr != NULL);
arena_prof_tctx_set(ptr, tctx);
arena_prof_tctx_set(ptr, usize, tctx);
}
JEMALLOC_ALWAYS_INLINE bool
@ -473,7 +473,7 @@ prof_malloc(const void *ptr, size_t usize, prof_tctx_t *tctx)
if (unlikely((uintptr_t)tctx > (uintptr_t)1U))
prof_malloc_sample_object(ptr, usize, tctx);
else
prof_tctx_set(ptr, (prof_tctx_t *)(uintptr_t)1U);
prof_tctx_set(ptr, usize, (prof_tctx_t *)(uintptr_t)1U);
}
JEMALLOC_ALWAYS_INLINE void
@ -503,7 +503,7 @@ prof_realloc(tsd_t *tsd, const void *ptr, size_t usize, prof_tctx_t *tctx,
if (unlikely((uintptr_t)tctx > (uintptr_t)1U))
prof_malloc_sample_object(ptr, usize, tctx);
else
prof_tctx_set(ptr, (prof_tctx_t *)(uintptr_t)1U);
prof_tctx_set(ptr, usize, (prof_tctx_t *)(uintptr_t)1U);
}
JEMALLOC_ALWAYS_INLINE void

Просмотреть файл

@ -77,7 +77,7 @@ struct tcache_s {
ql_elm(tcache_t) link; /* Used for aggregating stats. */
uint64_t prof_accumbytes;/* Cleared after arena_prof_accum(). */
unsigned ev_cnt; /* Event count since incremental GC. */
index_t next_gc_bin; /* Next bin to GC. */
szind_t next_gc_bin; /* Next bin to GC. */
tcache_bin_t tbins[1]; /* Dynamically sized. */
/*
* The pointer stacks associated with tbins follow as a contiguous
@ -126,10 +126,10 @@ extern tcaches_t *tcaches;
size_t tcache_salloc(const void *ptr);
void tcache_event_hard(tsd_t *tsd, tcache_t *tcache);
void *tcache_alloc_small_hard(tsd_t *tsd, arena_t *arena, tcache_t *tcache,
tcache_bin_t *tbin, index_t binind);
tcache_bin_t *tbin, szind_t binind);
void tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
index_t binind, unsigned rem);
void tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, index_t binind,
szind_t binind, unsigned rem);
void tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind,
unsigned rem, tcache_t *tcache);
void tcache_arena_associate(tcache_t *tcache, arena_t *arena);
void tcache_arena_reassociate(tcache_t *tcache, arena_t *oldarena,
@ -161,7 +161,7 @@ void *tcache_alloc_small(tsd_t *tsd, arena_t *arena, tcache_t *tcache,
void *tcache_alloc_large(tsd_t *tsd, arena_t *arena, tcache_t *tcache,
size_t size, bool zero);
void tcache_dalloc_small(tsd_t *tsd, tcache_t *tcache, void *ptr,
index_t binind);
szind_t binind);
void tcache_dalloc_large(tsd_t *tsd, tcache_t *tcache, void *ptr,
size_t size);
tcache_t *tcaches_get(tsd_t *tsd, unsigned ind);
@ -267,7 +267,7 @@ tcache_alloc_small(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
bool zero)
{
void *ret;
index_t binind;
szind_t binind;
size_t usize;
tcache_bin_t *tbin;
@ -312,7 +312,7 @@ tcache_alloc_large(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
bool zero)
{
void *ret;
index_t binind;
szind_t binind;
size_t usize;
tcache_bin_t *tbin;
@ -360,7 +360,7 @@ tcache_alloc_large(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
}
JEMALLOC_ALWAYS_INLINE void
tcache_dalloc_small(tsd_t *tsd, tcache_t *tcache, void *ptr, index_t binind)
tcache_dalloc_small(tsd_t *tsd, tcache_t *tcache, void *ptr, szind_t binind)
{
tcache_bin_t *tbin;
tcache_bin_info_t *tbin_info;
@ -386,7 +386,7 @@ tcache_dalloc_small(tsd_t *tsd, tcache_t *tcache, void *ptr, index_t binind)
JEMALLOC_ALWAYS_INLINE void
tcache_dalloc_large(tsd_t *tsd, tcache_t *tcache, void *ptr, size_t size)
{
index_t binind;
szind_t binind;
tcache_bin_t *tbin;
tcache_bin_info_t *tbin_info;

Просмотреть файл

@ -56,7 +56,7 @@ JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW @je_@malloc_usable_size(
#ifdef JEMALLOC_OVERRIDE_MEMALIGN
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
void JEMALLOC_NOTHROW *@je_@memalign(size_t alignment, size_t size)
JEMALLOC_ATTR(malloc);
JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc);
#endif
#ifdef JEMALLOC_OVERRIDE_VALLOC

Просмотреть файл

@ -39,7 +39,7 @@ JEMALLOC_INLINE_C arena_chunk_map_misc_t *
arena_miscelm_key_create(size_t size)
{
return ((arena_chunk_map_misc_t *)((size << CHUNK_MAP_SIZE_SHIFT) |
return ((arena_chunk_map_misc_t *)(arena_mapbits_size_encode(size) |
CHUNK_MAP_KEY));
}
@ -58,8 +58,7 @@ arena_miscelm_key_size_get(const arena_chunk_map_misc_t *miscelm)
assert(arena_miscelm_is_key(miscelm));
return (((uintptr_t)miscelm & CHUNK_MAP_SIZE_MASK) >>
CHUNK_MAP_SIZE_SHIFT);
return (arena_mapbits_size_decode((uintptr_t)miscelm));
}
JEMALLOC_INLINE_C size_t
@ -73,7 +72,7 @@ arena_miscelm_size_get(arena_chunk_map_misc_t *miscelm)
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(miscelm);
pageind = arena_miscelm_to_pageind(miscelm);
mapbits = arena_mapbits_get(chunk, pageind);
return ((mapbits & CHUNK_MAP_SIZE_MASK) >> CHUNK_MAP_SIZE_SHIFT);
return (arena_mapbits_size_decode(mapbits));
}
JEMALLOC_INLINE_C int
@ -315,7 +314,7 @@ arena_run_reg_dalloc(arena_run_t *run, void *ptr)
arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
size_t mapbits = arena_mapbits_get(chunk, pageind);
index_t binind = arena_ptr_small_binind_get(ptr, mapbits);
szind_t binind = arena_ptr_small_binind_get(ptr, mapbits);
arena_bin_info_t *bin_info = &arena_bin_info[binind];
unsigned regind = arena_run_regind(run, bin_info, ptr);
@ -508,7 +507,7 @@ arena_run_init_large(arena_t *arena, arena_run_t *run, size_t size, bool zero)
static bool
arena_run_split_small(arena_t *arena, arena_run_t *run, size_t size,
index_t binind)
szind_t binind)
{
arena_chunk_t *chunk;
arena_chunk_map_misc_t *miscelm;
@ -780,7 +779,7 @@ arena_chunk_dalloc(arena_t *arena, arena_chunk_t *chunk)
static void
arena_huge_malloc_stats_update(arena_t *arena, size_t usize)
{
index_t index = size2index(usize) - nlclasses - NBINS;
szind_t index = size2index(usize) - nlclasses - NBINS;
cassert(config_stats);
@ -793,7 +792,7 @@ arena_huge_malloc_stats_update(arena_t *arena, size_t usize)
static void
arena_huge_malloc_stats_update_undo(arena_t *arena, size_t usize)
{
index_t index = size2index(usize) - nlclasses - NBINS;
szind_t index = size2index(usize) - nlclasses - NBINS;
cassert(config_stats);
@ -806,7 +805,7 @@ arena_huge_malloc_stats_update_undo(arena_t *arena, size_t usize)
static void
arena_huge_dalloc_stats_update(arena_t *arena, size_t usize)
{
index_t index = size2index(usize) - nlclasses - NBINS;
szind_t index = size2index(usize) - nlclasses - NBINS;
cassert(config_stats);
@ -819,7 +818,7 @@ arena_huge_dalloc_stats_update(arena_t *arena, size_t usize)
static void
arena_huge_dalloc_stats_update_undo(arena_t *arena, size_t usize)
{
index_t index = size2index(usize) - nlclasses - NBINS;
szind_t index = size2index(usize) - nlclasses - NBINS;
cassert(config_stats);
@ -1125,7 +1124,7 @@ arena_run_alloc_large(arena_t *arena, size_t size, bool zero)
}
static arena_run_t *
arena_run_alloc_small_helper(arena_t *arena, size_t size, index_t binind)
arena_run_alloc_small_helper(arena_t *arena, size_t size, szind_t binind)
{
arena_run_t *run = arena_run_first_best_fit(arena, size);
if (run != NULL) {
@ -1136,7 +1135,7 @@ arena_run_alloc_small_helper(arena_t *arena, size_t size, index_t binind)
}
static arena_run_t *
arena_run_alloc_small(arena_t *arena, size_t size, index_t binind)
arena_run_alloc_small(arena_t *arena, size_t size, szind_t binind)
{
arena_chunk_t *chunk;
arena_run_t *run;
@ -1749,15 +1748,6 @@ arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty, bool cleaned,
arena_maybe_purge(arena);
}
static void
arena_run_dalloc_decommit(arena_t *arena, arena_chunk_t *chunk,
arena_run_t *run)
{
bool committed = arena_run_decommit(arena, chunk, run);
arena_run_dalloc(arena, run, committed, false, !committed);
}
static void
arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
size_t oldsize, size_t newsize)
@ -1889,7 +1879,7 @@ static arena_run_t *
arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin)
{
arena_run_t *run;
index_t binind;
szind_t binind;
arena_bin_info_t *bin_info;
/* Look for a usable run. */
@ -1940,7 +1930,7 @@ static void *
arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin)
{
void *ret;
index_t binind;
szind_t binind;
arena_bin_info_t *bin_info;
arena_run_t *run;
@ -1986,7 +1976,7 @@ arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin)
}
void
arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin, index_t binind,
arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin, szind_t binind,
uint64_t prof_accumbytes)
{
unsigned i, nfill;
@ -2131,7 +2121,7 @@ arena_dalloc_junk_small_t *arena_dalloc_junk_small =
void
arena_quarantine_junk_small(void *ptr, size_t usize)
{
index_t binind;
szind_t binind;
arena_bin_info_t *bin_info;
cassert(config_fill);
assert(opt_junk_free);
@ -2149,7 +2139,7 @@ arena_malloc_small(arena_t *arena, size_t size, bool zero)
void *ret;
arena_bin_t *bin;
arena_run_t *run;
index_t binind;
szind_t binind;
binind = size2index(size);
assert(binind < NBINS);
@ -2233,7 +2223,7 @@ arena_malloc_large(arena_t *arena, size_t size, bool zero)
ret = (void *)((uintptr_t)arena_miscelm_to_rpages(miscelm) +
random_offset);
if (config_stats) {
index_t index = size2index(usize) - NBINS;
szind_t index = size2index(usize) - NBINS;
arena->stats.nmalloc_large++;
arena->stats.nrequests_large++;
@ -2326,7 +2316,7 @@ arena_palloc_large(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment,
ret = arena_miscelm_to_rpages(miscelm);
if (config_stats) {
index_t index = size2index(usize) - NBINS;
szind_t index = size2index(usize) - NBINS;
arena->stats.nmalloc_large++;
arena->stats.nrequests_large++;
@ -2385,7 +2375,7 @@ arena_prof_promoted(const void *ptr, size_t size)
{
arena_chunk_t *chunk;
size_t pageind;
index_t binind;
szind_t binind;
cassert(config_prof);
assert(ptr != NULL);
@ -2413,7 +2403,7 @@ arena_dissociate_bin_run(arena_chunk_t *chunk, arena_run_t *run,
if (run == bin->runcur)
bin->runcur = NULL;
else {
index_t binind = arena_bin_index(extent_node_arena_get(
szind_t binind = arena_bin_index(extent_node_arena_get(
&chunk->node), bin);
arena_bin_info_t *bin_info = &arena_bin_info[binind];
@ -2440,7 +2430,7 @@ arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
malloc_mutex_unlock(&bin->lock);
/******************************/
malloc_mutex_lock(&arena->lock);
arena_run_dalloc_decommit(arena, chunk, run);
arena_run_dalloc(arena, run, true, false, false);
malloc_mutex_unlock(&arena->lock);
/****************************/
malloc_mutex_lock(&bin->lock);
@ -2477,7 +2467,7 @@ arena_dalloc_bin_locked_impl(arena_t *arena, arena_chunk_t *chunk, void *ptr,
arena_run_t *run;
arena_bin_t *bin;
arena_bin_info_t *bin_info;
index_t binind;
szind_t binind;
pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
rpages_ind = pageind - arena_mapbits_small_runind_get(chunk, pageind);
@ -2574,7 +2564,7 @@ arena_dalloc_large_locked_impl(arena_t *arena, arena_chunk_t *chunk,
if (!junked)
arena_dalloc_junk_large(ptr, usize);
if (config_stats) {
index_t index = size2index(usize) - NBINS;
szind_t index = size2index(usize) - NBINS;
arena->stats.ndalloc_large++;
arena->stats.allocated_large -= usize;
@ -2583,7 +2573,7 @@ arena_dalloc_large_locked_impl(arena_t *arena, arena_chunk_t *chunk,
}
}
arena_run_dalloc_decommit(arena, chunk, run);
arena_run_dalloc(arena, run, true, false, false);
}
void
@ -2621,8 +2611,8 @@ arena_ralloc_large_shrink(arena_t *arena, arena_chunk_t *chunk, void *ptr,
arena_run_trim_tail(arena, chunk, run, oldsize + large_pad, size +
large_pad, true);
if (config_stats) {
index_t oldindex = size2index(oldsize) - NBINS;
index_t index = size2index(size) - NBINS;
szind_t oldindex = size2index(oldsize) - NBINS;
szind_t index = size2index(size) - NBINS;
arena->stats.ndalloc_large++;
arena->stats.allocated_large -= oldsize;
@ -2700,8 +2690,8 @@ arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, void *ptr,
pageind+npages-1)));
if (config_stats) {
index_t oldindex = size2index(oldsize) - NBINS;
index_t index = size2index(size) - NBINS;
szind_t oldindex = size2index(oldsize) - NBINS;
szind_t index = size2index(size) - NBINS;
arena->stats.ndalloc_large++;
arena->stats.allocated_large -= oldsize;

Просмотреть файл

@ -148,11 +148,12 @@ huge_ralloc_no_move_similar(void *ptr, size_t oldsize, size_t usize,
/* Fill if necessary (shrinking). */
if (oldsize > usize) {
size_t sdiff = oldsize - usize;
zeroed = !chunk_purge_wrapper(arena, &chunk_hooks, ptr,
CHUNK_CEILING(usize), usize, sdiff);
if (config_fill && unlikely(opt_junk_free)) {
memset((void *)((uintptr_t)ptr + usize), 0x5a, sdiff);
zeroed = false;
} else {
zeroed = !chunk_purge_wrapper(arena, &chunk_hooks, ptr,
CHUNK_CEILING(oldsize), usize, sdiff);
}
} else
zeroed = true;
@ -202,14 +203,15 @@ huge_ralloc_no_move_shrink(void *ptr, size_t oldsize, size_t usize)
if (oldsize > usize) {
size_t sdiff = oldsize - usize;
zeroed = !chunk_purge_wrapper(arena, &chunk_hooks,
CHUNK_ADDR2BASE((uintptr_t)ptr + usize),
CHUNK_CEILING(usize), CHUNK_ADDR2OFFSET((uintptr_t)ptr +
usize), sdiff);
if (config_fill && unlikely(opt_junk_free)) {
huge_dalloc_junk((void *)((uintptr_t)ptr + usize),
sdiff);
zeroed = false;
} else {
zeroed = !chunk_purge_wrapper(arena, &chunk_hooks,
CHUNK_ADDR2BASE((uintptr_t)ptr + usize),
CHUNK_CEILING(oldsize),
CHUNK_ADDR2OFFSET((uintptr_t)ptr + usize), sdiff);
}
} else
zeroed = true;

Просмотреть файл

@ -179,13 +179,24 @@ static bool malloc_initializer = NO_INITIALIZER;
static malloc_mutex_t init_lock = SRWLOCK_INIT;
#else
static malloc_mutex_t init_lock;
static bool init_lock_initialized = false;
JEMALLOC_ATTR(constructor)
static void WINAPI
_init_init_lock(void)
{
malloc_mutex_init(&init_lock);
/* If another constructor in the same binary is using mallctl to
* e.g. setup chunk hooks, it may end up running before this one,
* and malloc_init_hard will crash trying to lock the uninitialized
* lock. So we force an initialization of the lock in
* malloc_init_hard as well. We don't try to care about atomicity
* of the accessed to the init_lock_initialized boolean, since it
* really only matters early in the process creation, before any
* separate thread normally starts doing anything. */
if (!init_lock_initialized)
malloc_mutex_init(&init_lock);
init_lock_initialized = true;
}
#ifdef _MSC_VER
@ -510,17 +521,17 @@ arena_get_hard(tsd_t *tsd, unsigned ind, bool init_if_missing)
assert(ind < narenas_actual || !init_if_missing);
narenas_cache = (ind < narenas_actual) ? narenas_actual : ind+1;
if (!*arenas_cache_bypassp) {
if (tsd_nominal(tsd) && !*arenas_cache_bypassp) {
*arenas_cache_bypassp = true;
arenas_cache = (arena_t **)a0malloc(sizeof(arena_t *) *
narenas_cache);
*arenas_cache_bypassp = false;
} else
arenas_cache = NULL;
}
if (arenas_cache == NULL) {
/*
* This function must always tell the truth, even if
* it's slow, so don't let OOM or recursive allocation
* it's slow, so don't let OOM, thread cleanup (note
* tsd_nominal check), nor recursive allocation
* avoidance (note arenas_cache_bypass check) get in the
* way.
*/
@ -531,6 +542,7 @@ arena_get_hard(tsd_t *tsd, unsigned ind, bool init_if_missing)
malloc_mutex_unlock(&arenas_lock);
return (arena);
}
assert(tsd_nominal(tsd) && !*arenas_cache_bypassp);
tsd_arenas_cache_set(tsd, arenas_cache);
tsd_narenas_cache_set(tsd, narenas_cache);
}
@ -649,8 +661,10 @@ arenas_cache_cleanup(tsd_t *tsd)
arena_t **arenas_cache;
arenas_cache = tsd_arenas_cache_get(tsd);
if (arenas_cache != NULL)
if (arenas_cache != NULL) {
tsd_arenas_cache_set(tsd, NULL);
a0dalloc(arenas_cache);
}
}
void
@ -1297,6 +1311,9 @@ static bool
malloc_init_hard(void)
{
#if defined(_WIN32) && _WIN32_WINNT < 0x0600
_init_init_lock();
#endif
malloc_mutex_lock(&init_lock);
if (!malloc_init_hard_needed()) {
malloc_mutex_unlock(&init_lock);

Просмотреть файл

@ -219,7 +219,7 @@ void
prof_malloc_sample_object(const void *ptr, size_t usize, prof_tctx_t *tctx)
{
prof_tctx_set(ptr, tctx);
prof_tctx_set(ptr, usize, tctx);
malloc_mutex_lock(tctx->tdata->lock);
tctx->cnts.curobjs++;

Просмотреть файл

@ -32,7 +32,7 @@ size_t tcache_salloc(const void *ptr)
void
tcache_event_hard(tsd_t *tsd, tcache_t *tcache)
{
index_t binind = tcache->next_gc_bin;
szind_t binind = tcache->next_gc_bin;
tcache_bin_t *tbin = &tcache->tbins[binind];
tcache_bin_info_t *tbin_info = &tcache_bin_info[binind];
@ -72,7 +72,7 @@ tcache_event_hard(tsd_t *tsd, tcache_t *tcache)
void *
tcache_alloc_small_hard(tsd_t *tsd, arena_t *arena, tcache_t *tcache,
tcache_bin_t *tbin, index_t binind)
tcache_bin_t *tbin, szind_t binind)
{
void *ret;
@ -87,7 +87,7 @@ tcache_alloc_small_hard(tsd_t *tsd, arena_t *arena, tcache_t *tcache,
void
tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
index_t binind, unsigned rem)
szind_t binind, unsigned rem)
{
arena_t *arena;
void *ptr;
@ -166,7 +166,7 @@ tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
}
void
tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, index_t binind,
tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind,
unsigned rem, tcache_t *tcache)
{
arena_t *arena;

Просмотреть файл

@ -1,5 +1,9 @@
#include "test/jemalloc_test.h"
#ifdef JEMALLOC_FILL
const char *malloc_conf = "junk:false";
#endif
static chunk_hooks_t orig_hooks;
static chunk_hooks_t old_hooks;

Просмотреть файл

@ -16,6 +16,27 @@ prof_dump_open_intercept(bool propagate_err, const char *filename)
return (fd);
}
static size_t
get_lg_prof_sample(void)
{
size_t lg_prof_sample;
size_t sz = sizeof(size_t);
assert_d_eq(mallctl("prof.lg_sample", &lg_prof_sample, &sz, NULL, 0), 0,
"Unexpected mallctl failure while reading profiling sample rate");
return (lg_prof_sample);
}
static void
do_prof_reset(size_t lg_prof_sample)
{
assert_d_eq(mallctl("prof.reset", NULL, NULL,
&lg_prof_sample, sizeof(size_t)), 0,
"Unexpected mallctl failure while resetting profile data");
assert_zu_eq(lg_prof_sample, get_lg_prof_sample(),
"Expected profile sample rate change");
}
TEST_BEGIN(test_prof_reset_basic)
{
size_t lg_prof_sample_orig, lg_prof_sample, lg_prof_sample_next;
@ -30,9 +51,7 @@ TEST_BEGIN(test_prof_reset_basic)
"Unexpected mallctl failure while reading profiling sample rate");
assert_zu_eq(lg_prof_sample_orig, 0,
"Unexpected profiling sample rate");
sz = sizeof(size_t);
assert_d_eq(mallctl("prof.lg_sample", &lg_prof_sample, &sz, NULL, 0), 0,
"Unexpected mallctl failure while reading profiling sample rate");
lg_prof_sample = get_lg_prof_sample();
assert_zu_eq(lg_prof_sample_orig, lg_prof_sample,
"Unexpected disagreement between \"opt.lg_prof_sample\" and "
"\"prof.lg_sample\"");
@ -41,10 +60,7 @@ TEST_BEGIN(test_prof_reset_basic)
for (i = 0; i < 2; i++) {
assert_d_eq(mallctl("prof.reset", NULL, NULL, NULL, 0), 0,
"Unexpected mallctl failure while resetting profile data");
sz = sizeof(size_t);
assert_d_eq(mallctl("prof.lg_sample", &lg_prof_sample, &sz,
NULL, 0), 0, "Unexpected mallctl failure while reading "
"profiling sample rate");
lg_prof_sample = get_lg_prof_sample();
assert_zu_eq(lg_prof_sample_orig, lg_prof_sample,
"Unexpected profile sample rate change");
}
@ -52,22 +68,15 @@ TEST_BEGIN(test_prof_reset_basic)
/* Test resets with prof.lg_sample changes. */
lg_prof_sample_next = 1;
for (i = 0; i < 2; i++) {
assert_d_eq(mallctl("prof.reset", NULL, NULL,
&lg_prof_sample_next, sizeof(size_t)), 0,
"Unexpected mallctl failure while resetting profile data");
sz = sizeof(size_t);
assert_d_eq(mallctl("prof.lg_sample", &lg_prof_sample, &sz,
NULL, 0), 0, "Unexpected mallctl failure while reading "
"profiling sample rate");
do_prof_reset(lg_prof_sample_next);
lg_prof_sample = get_lg_prof_sample();
assert_zu_eq(lg_prof_sample, lg_prof_sample_next,
"Expected profile sample rate change");
lg_prof_sample_next = lg_prof_sample_orig;
}
/* Make sure the test code restored prof.lg_sample. */
sz = sizeof(size_t);
assert_d_eq(mallctl("prof.lg_sample", &lg_prof_sample, &sz, NULL, 0), 0,
"Unexpected mallctl failure while reading profiling sample rate");
lg_prof_sample = get_lg_prof_sample();
assert_zu_eq(lg_prof_sample_orig, lg_prof_sample,
"Unexpected disagreement between \"opt.lg_prof_sample\" and "
"\"prof.lg_sample\"");
@ -182,6 +191,7 @@ thd_start(void *varg)
TEST_BEGIN(test_prof_reset)
{
size_t lg_prof_sample_orig;
bool active;
thd_t thds[NTHREADS];
unsigned thd_args[NTHREADS];
@ -195,6 +205,9 @@ TEST_BEGIN(test_prof_reset)
"Unexpected pre-existing tdata structures");
tdata_count = prof_tdata_count();
lg_prof_sample_orig = get_lg_prof_sample();
do_prof_reset(5);
active = true;
assert_d_eq(mallctl("prof.active", NULL, NULL, &active, sizeof(active)),
0, "Unexpected mallctl failure while activating profiling");
@ -214,6 +227,8 @@ TEST_BEGIN(test_prof_reset)
active = false;
assert_d_eq(mallctl("prof.active", NULL, NULL, &active, sizeof(active)),
0, "Unexpected mallctl failure while deactivating profiling");
do_prof_reset(lg_prof_sample_orig);
}
TEST_END
#undef NTHREADS

Просмотреть файл

@ -26,7 +26,7 @@ get_max_size_class(void)
TEST_BEGIN(test_size_classes)
{
size_t size_class, max_size_class;
index_t index, max_index;
szind_t index, max_index;
max_size_class = get_max_size_class();
max_index = size2index(max_size_class);

Просмотреть файл

@ -56,9 +56,14 @@ static void *
thd_start(void *arg)
{
data_t d = (data_t)(uintptr_t)arg;
void *p;
assert_x_eq(*data_tsd_get(), DATA_INIT,
"Initial tsd get should return initialization value");
p = malloc(1);
assert_ptr_not_null(p, "Unexpected malloc() failure");
data_tsd_set(&d);
assert_x_eq(*data_tsd_get(), d,
"After tsd set, tsd get should return value that was set");
@ -67,6 +72,7 @@ thd_start(void *arg)
assert_x_eq(*data_tsd_get(), (data_t)(uintptr_t)arg,
"Resetting local data should have no effect on tsd");
free(p);
return (NULL);
}

Просмотреть файл

@ -1,2 +1,2 @@
UPSTREAM_REPO=https://github.com/jemalloc/jemalloc
UPSTREAM_COMMIT=4.0.0
UPSTREAM_REPO=https://github.com/glandium/jemalloc
UPSTREAM_COMMIT=ed4883285e111b426e5769b24dad164ebacaa5b9

Просмотреть файл

@ -18,7 +18,7 @@ if CONFIG['MOZ_MEMORY']:
'mozjemalloc',
]
if CONFIG['MOZ_JEMALLOC3'] or CONFIG['MOZ_REPLACE_MALLOC']:
if CONFIG['MOZ_JEMALLOC4'] or CONFIG['MOZ_REPLACE_MALLOC']:
if not CONFIG['MOZ_NATIVE_JEMALLOC']:
DIRS += ['jemalloc']

Просмотреть файл

@ -3,7 +3,7 @@
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
ifndef MOZ_JEMALLOC3
ifndef MOZ_JEMALLOC4
# Force optimize mozjemalloc on --disable-optimize builds.
# This works around the issue that the Android NDK's definition of ffs is
# broken when compiling without optimization, while avoiding to add yet another

Просмотреть файл

@ -8,7 +8,7 @@ EXPORTS += [
'jemalloc_types.h',
]
if not CONFIG['MOZ_JEMALLOC3']:
if not CONFIG['MOZ_JEMALLOC4']:
SOURCES += [
'jemalloc.c',
]

Просмотреть файл

@ -0,0 +1,7 @@
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
# Dummy rule to transition from .c file to .cpp file without a clobber.
# (bug 1005486)
%/jemalloc_config.c: ;

Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше