зеркало из https://github.com/mozilla/gecko-dev.git
1066 строки
29 KiB
C
1066 строки
29 KiB
C
/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
|
|
*
|
|
* The contents of this file are subject to the Netscape Public
|
|
* License Version 1.1 (the "License"); you may not use this file
|
|
* except in compliance with the License. You may obtain a copy of
|
|
* the License at http://www.mozilla.org/NPL/
|
|
*
|
|
* Software distributed under the License is distributed on an "AS
|
|
* IS" basis, WITHOUT WARRANTY OF ANY KIND, either express oqr
|
|
* implied. See the License for the specific language governing
|
|
* rights and limitations under the License.
|
|
*
|
|
* The Original Code is Mozilla Communicator client code, released
|
|
* March 31, 1998.
|
|
*
|
|
* The Initial Developer of the Original Code is Netscape
|
|
* Communications Corporation. Portions created by Netscape are
|
|
* Copyright (C) 1998 Netscape Communications Corporation. All
|
|
* Rights Reserved.
|
|
*
|
|
* Contributor(s):
|
|
*
|
|
* Alternatively, the contents of this file may be used under the
|
|
* terms of the GNU Public License (the "GPL"), in which case the
|
|
* provisions of the GPL are applicable instead of those above.
|
|
* If you wish to allow use of your version of this file only
|
|
* under the terms of the GPL and not to allow others to use your
|
|
* version of this file under the NPL, indicate your decision by
|
|
* deleting the provisions above and replace them with the notice
|
|
* and other provisions required by the GPL. If you do not delete
|
|
* the provisions above, a recipient may use your version of this
|
|
* file under either the NPL or the GPL.
|
|
*/
|
|
|
|
#ifdef JS_THREADSAFE
|
|
|
|
/*
|
|
* JS locking stubs.
|
|
*/
|
|
#include "jsstddef.h"
|
|
#include <stdlib.h>
|
|
#include "jspubtd.h"
|
|
#include "prthread.h"
|
|
#include "jsutil.h" /* Added by JSIFY */
|
|
#include "jstypes.h"
|
|
#include "jsbit.h"
|
|
#include "jscntxt.h"
|
|
#include "jsscope.h"
|
|
#include "jspubtd.h"
|
|
#include "jslock.h"
|
|
|
|
#define ReadWord(W) (W)
|
|
|
|
#ifndef NSPR_LOCK
|
|
|
|
#include <memory.h>
|
|
|
|
static PRLock **global_locks;
|
|
static uint32 global_lock_count = 1;
|
|
static uint32 global_locks_log2 = 0;
|
|
static uint32 global_locks_mask = 0;
|
|
|
|
#define GLOBAL_LOCK_INDEX(id) (((uint32)(id) >> 2) & global_locks_mask)
|
|
|
|
static void
|
|
js_LockGlobal(void *id)
|
|
{
|
|
uint32 i = GLOBAL_LOCK_INDEX(id);
|
|
PR_Lock(global_locks[i]);
|
|
}
|
|
|
|
static void
|
|
js_UnlockGlobal(void *id)
|
|
{
|
|
uint32 i = GLOBAL_LOCK_INDEX(id);
|
|
PR_Unlock(global_locks[i]);
|
|
}
|
|
|
|
/* Exclude Alpha NT. */
|
|
#if defined(_WIN32) && defined(_M_IX86)
|
|
#pragma warning( disable : 4035 )
|
|
|
|
static JS_INLINE int
|
|
js_CompareAndSwap(jsword *w, jsword ov, jsword nv)
|
|
{
|
|
__asm {
|
|
mov eax, ov
|
|
mov ecx, nv
|
|
mov ebx, w
|
|
lock cmpxchg [ebx], ecx
|
|
sete al
|
|
and eax, 1h
|
|
}
|
|
}
|
|
|
|
#elif defined(__GNUC__) && defined(__i386__)
|
|
|
|
/* Note: This fails on 386 cpus, cmpxchgl is a >= 486 instruction */
|
|
static JS_INLINE int
|
|
js_CompareAndSwap(jsword *w, jsword ov, jsword nv)
|
|
{
|
|
unsigned int res;
|
|
|
|
__asm__ __volatile__ (
|
|
"lock\n"
|
|
"cmpxchgl %2, (%1)\n"
|
|
"sete %%al\n"
|
|
"andl $1, %%eax\n"
|
|
: "=a" (res)
|
|
: "r" (w), "r" (nv), "a" (ov)
|
|
: "cc", "memory");
|
|
return (int)res;
|
|
}
|
|
|
|
#elif defined(SOLARIS) && defined(sparc) && defined(ULTRA_SPARC)
|
|
|
|
static JS_INLINE int
|
|
js_CompareAndSwap(jsword *w, jsword ov, jsword nv)
|
|
{
|
|
#if defined(__GNUC__)
|
|
unsigned int res;
|
|
JS_ASSERT(ov != nv);
|
|
asm volatile ("\
|
|
stbar\n\
|
|
cas [%1],%2,%3\n\
|
|
cmp %2,%3\n\
|
|
be,a 1f\n\
|
|
mov 1,%0\n\
|
|
mov 0,%0\n\
|
|
1:"
|
|
: "=r" (res)
|
|
: "r" (w), "r" (ov), "r" (nv));
|
|
return (int)res;
|
|
#else /* !__GNUC__ */
|
|
extern int compare_and_swap(jsword*, jsword, jsword);
|
|
JS_ASSERT(ov != nv);
|
|
return compare_and_swap(w, ov, nv);
|
|
#endif
|
|
}
|
|
|
|
#elif defined(AIX)
|
|
|
|
#include <sys/atomic_op.h>
|
|
|
|
static JS_INLINE int
|
|
js_CompareAndSwap(jsword *w, jsword ov, jsword nv)
|
|
{
|
|
return !_check_lock((atomic_p)w, ov, nv);
|
|
}
|
|
|
|
#else
|
|
|
|
#error "Define NSPR_LOCK if your platform lacks a compare-and-swap instruction."
|
|
|
|
#endif /* arch-tests */
|
|
|
|
#endif /* !NSPR_LOCK */
|
|
|
|
jsword
|
|
js_CurrentThreadId()
|
|
{
|
|
return CurrentThreadId();
|
|
}
|
|
|
|
void
|
|
js_InitLock(JSThinLock *tl)
|
|
{
|
|
#ifdef NSPR_LOCK
|
|
tl->owner = 0;
|
|
tl->fat = (JSFatLock*)JS_NEW_LOCK();
|
|
#else
|
|
memset(tl, 0, sizeof(JSThinLock));
|
|
#endif
|
|
}
|
|
|
|
void
|
|
js_FinishLock(JSThinLock *tl)
|
|
{
|
|
#ifdef NSPR_LOCK
|
|
tl->owner = 0xdeadbeef;
|
|
if (tl->fat)
|
|
JS_DESTROY_LOCK(((JSLock*)tl->fat));
|
|
#else
|
|
JS_ASSERT(tl->owner == 0);
|
|
JS_ASSERT(tl->fat == NULL);
|
|
#endif
|
|
}
|
|
|
|
static void js_Dequeue(JSThinLock *);
|
|
|
|
#ifdef DEBUG_SCOPE_COUNT
|
|
|
|
#include <stdio.h>
|
|
#include "jsdhash.h"
|
|
|
|
static FILE *logfp;
|
|
static JSDHashTable logtbl;
|
|
|
|
typedef struct logentry {
|
|
JSDHashEntryStub stub;
|
|
char op;
|
|
const char *file;
|
|
int line;
|
|
} logentry;
|
|
|
|
static void
|
|
logit(JSScope *scope, char op, const char *file, int line)
|
|
{
|
|
logentry *entry;
|
|
|
|
if (!logfp) {
|
|
logfp = fopen("/tmp/scope.log", "w");
|
|
if (!logfp)
|
|
return;
|
|
setvbuf(logfp, NULL, _IONBF, 0);
|
|
}
|
|
fprintf(logfp, "%p %c %s %d\n", scope, op, file, line);
|
|
|
|
if (!logtbl.entryStore &&
|
|
!JS_DHashTableInit(&logtbl, JS_DHashGetStubOps(), NULL,
|
|
sizeof(logentry), 100)) {
|
|
return;
|
|
}
|
|
entry = (logentry *) JS_DHashTableOperate(&logtbl, scope, JS_DHASH_ADD);
|
|
if (!entry)
|
|
return;
|
|
entry->stub.key = scope;
|
|
entry->op = op;
|
|
entry->file = file;
|
|
entry->line = line;
|
|
}
|
|
|
|
void
|
|
js_unlog_scope(JSScope *scope)
|
|
{
|
|
if (!logtbl.entryStore)
|
|
return;
|
|
(void) JS_DHashTableOperate(&logtbl, scope, JS_DHASH_REMOVE);
|
|
}
|
|
|
|
# define LOGIT(scope,op) logit(scope, op, __FILE__, __LINE__)
|
|
|
|
#else
|
|
|
|
# define LOGIT(scope,op) /* nothing */
|
|
|
|
#endif /* DEBUG_SCOPE_COUNT */
|
|
|
|
/*
|
|
* Return true if scope's ownercx, or the ownercx of a single-threaded scope
|
|
* for which ownercx is waiting to become multi-threaded and shared, is cx.
|
|
* That condition implies deadlock in ClaimScope if cx's thread were to wait
|
|
* to share scope.
|
|
*
|
|
* (i) rt->gcLock held
|
|
*/
|
|
static JSBool
|
|
WillDeadlock(JSScope *scope, JSContext *cx)
|
|
{
|
|
JSContext *ownercx;
|
|
|
|
do {
|
|
ownercx = scope->ownercx;
|
|
if (ownercx == cx) {
|
|
JS_RUNTIME_METER(cx->runtime, deadlocksAvoided);
|
|
return JS_TRUE;
|
|
}
|
|
} while (ownercx && (scope = ownercx->scopeToShare) != NULL);
|
|
return JS_FALSE;
|
|
}
|
|
|
|
/*
|
|
* Make scope multi-threaded, i.e. share its ownership among contexts in rt
|
|
* using a "thin" or (if necessary due to contention) "fat" lock. Called only
|
|
* from ClaimScope, immediately below, when we detect deadlock were we to wait
|
|
* for scope's lock, because its ownercx is waiting on a scope owned by the
|
|
* calling cx.
|
|
*
|
|
* (i) rt->gcLock held
|
|
*/
|
|
static void
|
|
ShareScope(JSRuntime *rt, JSScope *scope)
|
|
{
|
|
JSScope **todop;
|
|
|
|
if (scope->u.link) {
|
|
for (todop = &rt->scopeSharingTodo; *todop != scope;
|
|
todop = &(*todop)->u.link) {
|
|
JS_ASSERT(*todop != NO_SCOPE_SHARING_TODO);
|
|
}
|
|
*todop = scope->u.link;
|
|
JS_NOTIFY_ALL_CONDVAR(rt->scopeSharingDone);
|
|
}
|
|
js_InitLock(&scope->lock);
|
|
if (scope == rt->setSlotScope) {
|
|
/*
|
|
* Nesting locks on another thread that's using scope->ownercx: give
|
|
* the held lock a reentrancy count of 1 and set its lock.owner field
|
|
* directly (no compare-and-swap needed while scope->ownercx is still
|
|
* non-null). See below in ClaimScope, before the ShareScope call,
|
|
* for more on why this is necessary.
|
|
*
|
|
* If NSPR_LOCK is defined, we cannot deadlock holding rt->gcLock and
|
|
* acquiring scope->lock.fat here, against another thread holding that
|
|
* fat lock and trying to grab rt->gcLock. This is because no other
|
|
* thread can attempt to acquire scope->lock.fat until scope->ownercx
|
|
* is null *and* our thread has released rt->gcLock, which interlocks
|
|
* scope->ownercx's transition to null against tests of that member
|
|
* in ClaimScope.
|
|
*/
|
|
scope->lock.owner = scope->ownercx->thread;
|
|
#ifdef NSPR_LOCK
|
|
JS_ACQUIRE_LOCK((JSLock*)scope->lock.fat);
|
|
#endif
|
|
scope->u.count = 1;
|
|
} else {
|
|
scope->u.count = 0;
|
|
}
|
|
scope->ownercx = NULL; /* NB: set last, after lock init */
|
|
JS_RUNTIME_METER(rt, sharedScopes);
|
|
}
|
|
|
|
/*
|
|
* Given a scope with apparently non-null ownercx different from cx, try to
|
|
* set ownercx to cx, claiming exclusive (single-threaded) ownership of scope.
|
|
* If we claim ownership, return true. Otherwise, we wait for ownercx to be
|
|
* set to null (indicating that scope is multi-threaded); or if waiting would
|
|
* deadlock, we set ownercx to null ourselves via ShareScope. In any case,
|
|
* once ownercx is null we return false.
|
|
*/
|
|
static JSBool
|
|
ClaimScope(JSScope *scope, JSContext *cx)
|
|
{
|
|
JSRuntime *rt;
|
|
JSContext *ownercx;
|
|
jsrefcount saveDepth;
|
|
PRStatus stat;
|
|
|
|
rt = cx->runtime;
|
|
JS_RUNTIME_METER(rt, claimAttempts);
|
|
JS_LOCK_GC(rt);
|
|
|
|
/* Reload in case ownercx went away while we blocked on the lock. */
|
|
while ((ownercx = scope->ownercx) != NULL) {
|
|
/*
|
|
* Avoid selflock if ownercx is dead, or is not running a request, or
|
|
* has the same thread as cx. Set scope->ownercx to cx so that the
|
|
* matching JS_UNLOCK_SCOPE or JS_UNLOCK_OBJ macro call will take the
|
|
* fast path around the corresponding js_UnlockScope or js_UnlockObj
|
|
* function call.
|
|
*
|
|
* If scope->u.link is non-null, scope has already been inserted on
|
|
* the rt->scopeSharingTodo list, because another thread's context
|
|
* already wanted to lock scope while ownercx was running a request.
|
|
* We can't claim any scope whose u.link is non-null at this point,
|
|
* even if ownercx->requestDepth is 0 (see below where we suspend our
|
|
* request before waiting on rt->scopeSharingDone).
|
|
*/
|
|
if (!scope->u.link &&
|
|
(!js_LiveContext(rt, ownercx) ||
|
|
!ownercx->requestDepth ||
|
|
ownercx->thread == cx->thread)) {
|
|
JS_ASSERT(scope->u.count == 0);
|
|
scope->ownercx = cx;
|
|
JS_UNLOCK_GC(rt);
|
|
JS_RUNTIME_METER(rt, claimedScopes);
|
|
return JS_TRUE;
|
|
}
|
|
|
|
/*
|
|
* Avoid deadlock if scope's owner context is waiting on a scope that
|
|
* we own, by revoking scope's ownership. This approach to deadlock
|
|
* avoidance works because the engine never nests scope locks, except
|
|
* for the notable case of js_SetProtoOrParent (see jsobj.c).
|
|
*
|
|
* If cx could hold locks on ownercx->scopeToShare, or if ownercx
|
|
* could hold locks on scope, we would need to keep reentrancy counts
|
|
* for all such "flyweight" (ownercx != NULL) locks, so that control
|
|
* would unwind properly once these locks became "thin" or "fat".
|
|
* Apart from the js_SetProtoOrParent exception, the engine promotes
|
|
* a scope from exclusive to shared access only when locking, never
|
|
* when holding or unlocking.
|
|
*
|
|
* If ownercx's thread is calling js_SetProtoOrParent, trying to lock
|
|
* the inner scope (the scope of the object being set as the prototype
|
|
* of the outer object), ShareScope will find the outer object's scope
|
|
* at rt->setSlotScope. If it's the same as scope, we give it a lock
|
|
* held by ownercx's thread with reentrancy count of 1, then we return
|
|
* here and break. After that we unwind to js_[GS]etSlotThreadSafe or
|
|
* js_LockScope (our caller), where we wait on the newly-fattened lock
|
|
* until ownercx's thread unwinds from js_SetProtoOrParent.
|
|
*/
|
|
if (ownercx->scopeToShare &&
|
|
WillDeadlock(ownercx->scopeToShare, cx)) {
|
|
ShareScope(rt, scope);
|
|
break;
|
|
}
|
|
|
|
/*
|
|
* Thanks to the non-zero NO_SCOPE_SHARING_TODO link terminator, we
|
|
* can decide whether scope is on rt->scopeSharingTodo with a single
|
|
* non-null test, and avoid double-insertion bugs.
|
|
*/
|
|
if (!scope->u.link) {
|
|
scope->u.link = rt->scopeSharingTodo;
|
|
rt->scopeSharingTodo = scope;
|
|
js_HoldObjectMap(cx, &scope->map);
|
|
}
|
|
|
|
/*
|
|
* Inline JS_SuspendRequest before we wait on rt->scopeSharingDone,
|
|
* saving and clearing cx->requestDepth so we don't deadlock if the
|
|
* GC needs to run on ownercx.
|
|
*/
|
|
saveDepth = cx->requestDepth;
|
|
if (saveDepth) {
|
|
cx->requestDepth = 0;
|
|
JS_ASSERT(rt->requestCount > 0);
|
|
rt->requestCount--;
|
|
if (rt->requestCount == 0)
|
|
JS_NOTIFY_REQUEST_DONE(rt);
|
|
}
|
|
|
|
/*
|
|
* We know that some other thread's context owns scope, which is now
|
|
* linked onto rt->scopeSharingTodo, awaiting the end of that other
|
|
* thread's request. So it is safe to wait on rt->scopeSharingDone.
|
|
*/
|
|
cx->scopeToShare = scope;
|
|
stat = PR_WaitCondVar(rt->scopeSharingDone, PR_INTERVAL_NO_TIMEOUT);
|
|
JS_ASSERT(stat != PR_FAILURE);
|
|
cx->scopeToShare = NULL;
|
|
|
|
/*
|
|
* Inline JS_ResumeRequest after waiting on rt->scopeSharingDone,
|
|
* restoring cx->requestDepth.
|
|
*/
|
|
if (saveDepth) {
|
|
if (rt->gcThread != cx->thread) {
|
|
while (rt->gcLevel > 0)
|
|
JS_AWAIT_GC_DONE(rt);
|
|
}
|
|
rt->requestCount++;
|
|
cx->requestDepth = saveDepth;
|
|
}
|
|
}
|
|
|
|
JS_UNLOCK_GC(rt);
|
|
return JS_FALSE;
|
|
}
|
|
|
|
jsval
|
|
js_GetSlotThreadSafe(JSContext *cx, JSObject *obj, uint32 slot)
|
|
{
|
|
jsval v;
|
|
JSScope *scope;
|
|
#ifndef NSPR_LOCK
|
|
JSThinLock *tl;
|
|
jsword me;
|
|
#endif
|
|
|
|
JS_ASSERT(OBJ_IS_NATIVE(obj));
|
|
scope = OBJ_SCOPE(obj);
|
|
JS_ASSERT(scope->ownercx != cx);
|
|
JS_ASSERT(obj->slots && slot < obj->map->freeslot);
|
|
if (scope->ownercx && ClaimScope(scope, cx))
|
|
return obj->slots[slot];
|
|
|
|
#ifndef NSPR_LOCK
|
|
tl = &scope->lock;
|
|
me = cx->thread;
|
|
JS_ASSERT(me == CurrentThreadId());
|
|
if (js_CompareAndSwap(&tl->owner, 0, me)) {
|
|
/*
|
|
* Got the lock with one compare-and-swap. Even so, someone else may
|
|
* have mutated obj so it now has its own scope and lock, which would
|
|
* require either a restart from the top of this routine, or a thin
|
|
* lock release followed by fat lock acquisition.
|
|
*/
|
|
if (scope == OBJ_SCOPE(obj)) {
|
|
v = obj->slots[slot];
|
|
if (!js_CompareAndSwap(&tl->owner, me, 0)) {
|
|
/* Assert that scope locks never revert to flyweight. */
|
|
JS_ASSERT(scope->ownercx != cx);
|
|
LOGIT(scope, '1');
|
|
scope->u.count = 1;
|
|
js_UnlockObj(cx, obj);
|
|
}
|
|
return v;
|
|
}
|
|
if (!js_CompareAndSwap(&tl->owner, me, 0))
|
|
js_Dequeue(tl);
|
|
}
|
|
else if (Thin_RemoveWait(ReadWord(tl->owner)) == me) {
|
|
return obj->slots[slot];
|
|
}
|
|
#endif
|
|
|
|
js_LockObj(cx, obj);
|
|
v = obj->slots[slot];
|
|
|
|
/*
|
|
* Test whether cx took ownership of obj's scope during js_LockObj.
|
|
*
|
|
* This does not mean that a given scope reverted to flyweight from "thin"
|
|
* or "fat" -- it does mean that obj's map pointer changed due to another
|
|
* thread setting a property, requiring obj to cease sharing a prototype
|
|
* object's scope (whose lock was not flyweight, else we wouldn't be here
|
|
* in the first place!).
|
|
*/
|
|
scope = OBJ_SCOPE(obj);
|
|
if (scope->ownercx != cx)
|
|
js_UnlockScope(cx, scope);
|
|
return v;
|
|
}
|
|
|
|
void
|
|
js_SetSlotThreadSafe(JSContext *cx, JSObject *obj, uint32 slot, jsval v)
|
|
{
|
|
JSScope *scope;
|
|
#ifndef NSPR_LOCK
|
|
JSThinLock *tl;
|
|
jsword me;
|
|
#endif
|
|
|
|
JS_ASSERT(OBJ_IS_NATIVE(obj));
|
|
scope = OBJ_SCOPE(obj);
|
|
JS_ASSERT(scope->ownercx != cx);
|
|
JS_ASSERT(obj->slots && slot < obj->map->freeslot);
|
|
if (scope->ownercx && ClaimScope(scope, cx)) {
|
|
obj->slots[slot] = v;
|
|
return;
|
|
}
|
|
|
|
#ifndef NSPR_LOCK
|
|
tl = &scope->lock;
|
|
me = cx->thread;
|
|
JS_ASSERT(me == CurrentThreadId());
|
|
if (js_CompareAndSwap(&tl->owner, 0, me)) {
|
|
if (scope == OBJ_SCOPE(obj)) {
|
|
obj->slots[slot] = v;
|
|
if (!js_CompareAndSwap(&tl->owner, me, 0)) {
|
|
/* Assert that scope locks never revert to flyweight. */
|
|
JS_ASSERT(scope->ownercx != cx);
|
|
LOGIT(scope, '1');
|
|
scope->u.count = 1;
|
|
js_UnlockObj(cx, obj);
|
|
}
|
|
return;
|
|
}
|
|
if (!js_CompareAndSwap(&tl->owner, me, 0))
|
|
js_Dequeue(tl);
|
|
}
|
|
else if (Thin_RemoveWait(ReadWord(tl->owner)) == me) {
|
|
obj->slots[slot] = v;
|
|
return;
|
|
}
|
|
#endif
|
|
|
|
js_LockObj(cx, obj);
|
|
obj->slots[slot] = v;
|
|
|
|
/*
|
|
* Same drill as above, in js_GetSlotThreadSafe. Note that we cannot
|
|
* assume obj has its own mutable scope (where scope->object == obj) yet,
|
|
* because OBJ_SET_SLOT is called for the "universal", common slots such
|
|
* as JSSLOT_PROTO and JSSLOT_PARENT, without a prior js_GetMutableScope.
|
|
* See also the JSPROP_SHARED attribute and its usage.
|
|
*/
|
|
scope = OBJ_SCOPE(obj);
|
|
if (scope->ownercx != cx)
|
|
js_UnlockScope(cx, scope);
|
|
}
|
|
|
|
#ifndef NSPR_LOCK
|
|
|
|
static JSFatLock *
|
|
NewFatlock()
|
|
{
|
|
JSFatLock *fl = (JSFatLock *)malloc(sizeof(JSFatLock)); /* for now */
|
|
if (!fl) return NULL;
|
|
fl->susp = 0;
|
|
fl->next = NULL;
|
|
fl->prevp = NULL;
|
|
fl->slock = PR_NewLock();
|
|
fl->svar = PR_NewCondVar(fl->slock);
|
|
return fl;
|
|
}
|
|
|
|
static void
|
|
DestroyFatlock(JSFatLock *fl)
|
|
{
|
|
PR_DestroyLock(fl->slock);
|
|
PR_DestroyCondVar(fl->svar);
|
|
free(fl);
|
|
}
|
|
|
|
static JSFatLock *
|
|
ListOfFatlocks(int l)
|
|
{
|
|
JSFatLock *m;
|
|
JSFatLock *m0;
|
|
int i;
|
|
|
|
JS_ASSERT(l>0);
|
|
m0 = m = NewFatlock();
|
|
for (i=1; i<l; i++) {
|
|
m->next = NewFatlock();
|
|
m = m->next;
|
|
}
|
|
return m0;
|
|
}
|
|
|
|
static void
|
|
DeleteListOfFatlocks(JSFatLock *m)
|
|
{
|
|
JSFatLock *m0;
|
|
for (; m; m=m0) {
|
|
m0 = m->next;
|
|
DestroyFatlock(m);
|
|
}
|
|
}
|
|
|
|
static JSFatLockTable *fl_list_table = NULL;
|
|
static uint32 fl_list_table_len = 0;
|
|
|
|
static JSFatLock *
|
|
GetFatlock(void *id)
|
|
{
|
|
JSFatLock *m;
|
|
|
|
uint32 i = GLOBAL_LOCK_INDEX(id);
|
|
if (fl_list_table[i].free == NULL) {
|
|
#ifdef DEBUG
|
|
printf("Ran out of fat locks!\n");
|
|
#endif
|
|
fl_list_table[i].free = ListOfFatlocks(10);
|
|
}
|
|
m = fl_list_table[i].free;
|
|
fl_list_table[i].free = m->next;
|
|
m->susp = 0;
|
|
m->next = fl_list_table[i].taken;
|
|
m->prevp = &fl_list_table[i].taken;
|
|
if (fl_list_table[i].taken)
|
|
fl_list_table[i].taken->prevp = &m->next;
|
|
fl_list_table[i].taken = m;
|
|
return m;
|
|
}
|
|
|
|
static void
|
|
PutFatlock(JSFatLock *m, void *id)
|
|
{
|
|
uint32 i;
|
|
if (m == NULL)
|
|
return;
|
|
|
|
/* Unlink m from fl_list_table[i].taken. */
|
|
*m->prevp = m->next;
|
|
if (m->next)
|
|
m->next->prevp = m->prevp;
|
|
|
|
/* Insert m in fl_list_table[i].free. */
|
|
i = GLOBAL_LOCK_INDEX(id);
|
|
m->next = fl_list_table[i].free;
|
|
fl_list_table[i].free = m;
|
|
}
|
|
|
|
#endif /* !NSPR_LOCK */
|
|
|
|
JSBool
|
|
js_SetupLocks(int l, int g)
|
|
{
|
|
#ifndef NSPR_LOCK
|
|
uint32 i;
|
|
|
|
if (global_locks)
|
|
return JS_TRUE;
|
|
#ifdef DEBUG
|
|
if (l > 10000 || l < 0) /* l == number of initially allocated fat locks */
|
|
printf("Bad number %d in js_SetupLocks()!\n", l);
|
|
if (g > 100 || g < 0) /* g equals number of global locks */
|
|
printf("Bad number %d in js_SetupLocks()!\n", l);
|
|
#endif
|
|
global_locks_log2 = JS_CeilingLog2(g);
|
|
global_locks_mask = JS_BITMASK(global_locks_log2);
|
|
global_lock_count = JS_BIT(global_locks_log2);
|
|
global_locks = (PRLock **) malloc(global_lock_count * sizeof(PRLock*));
|
|
if (!global_locks)
|
|
return JS_FALSE;
|
|
for (i = 0; i < global_lock_count; i++) {
|
|
global_locks[i] = PR_NewLock();
|
|
if (!global_locks[i]) {
|
|
global_lock_count = i;
|
|
js_CleanupLocks();
|
|
return JS_FALSE;
|
|
}
|
|
}
|
|
fl_list_table = (JSFatLockTable *) malloc(i * sizeof(JSFatLockTable));
|
|
if (!fl_list_table) {
|
|
js_CleanupLocks();
|
|
return JS_FALSE;
|
|
}
|
|
fl_list_table_len = global_lock_count;
|
|
for (i = 0; i < global_lock_count; i++) {
|
|
fl_list_table[i].free = ListOfFatlocks(l);
|
|
if (!fl_list_table[i].free) {
|
|
fl_list_table_len = i;
|
|
js_CleanupLocks();
|
|
return JS_FALSE;
|
|
}
|
|
fl_list_table[i].taken = NULL;
|
|
}
|
|
#endif /* !NSPR_LOCK */
|
|
return JS_TRUE;
|
|
}
|
|
|
|
/* pull in the cleanup function from jsdtoa.c */
|
|
extern void js_FinishDtoa(void);
|
|
|
|
void
|
|
js_CleanupLocks()
|
|
{
|
|
#ifndef NSPR_LOCK
|
|
uint32 i;
|
|
|
|
if (global_locks) {
|
|
for (i = 0; i < global_lock_count; i++)
|
|
PR_DestroyLock(global_locks[i]);
|
|
free(global_locks);
|
|
global_locks = NULL;
|
|
global_lock_count = 1;
|
|
global_locks_log2 = 0;
|
|
global_locks_mask = 0;
|
|
}
|
|
if (fl_list_table) {
|
|
for (i = 0; i < fl_list_table_len; i++) {
|
|
DeleteListOfFatlocks(fl_list_table[i].free);
|
|
fl_list_table[i].free = NULL;
|
|
DeleteListOfFatlocks(fl_list_table[i].taken);
|
|
fl_list_table[i].taken = NULL;
|
|
}
|
|
free(fl_list_table);
|
|
fl_list_table = NULL;
|
|
fl_list_table_len = 0;
|
|
}
|
|
#endif /* !NSPR_LOCK */
|
|
js_FinishDtoa();
|
|
}
|
|
|
|
void
|
|
js_InitContextForLocking(JSContext *cx)
|
|
{
|
|
cx->thread = CurrentThreadId();
|
|
JS_ASSERT(Thin_GetWait(cx->thread) == 0);
|
|
}
|
|
|
|
#ifndef NSPR_LOCK
|
|
|
|
/*
|
|
* Fast locking and unlocking is implemented by delaying the allocation of a
|
|
* system lock (fat lock) until contention. As long as a locking thread A
|
|
* runs uncontended, the lock is represented solely by storing A's identity in
|
|
* the object being locked.
|
|
*
|
|
* If another thread B tries to lock the object currently locked by A, B is
|
|
* enqueued into a fat lock structure (which might have to be allocated and
|
|
* pointed to by the object), and suspended using NSPR conditional variables
|
|
* (wait). A wait bit (Bacon bit) is set in the lock word of the object,
|
|
* signalling to A that when releasing the lock, B must be dequeued and
|
|
* notified.
|
|
*
|
|
* The basic operation of the locking primitives (js_Lock, js_Unlock,
|
|
* js_Enqueue, and js_Dequeue) is compare-and-swap. Hence, when locking into
|
|
* the word pointed at by p, compare-and-swap(p, 0, A) success implies that p
|
|
* is unlocked. Similarly, when unlocking p, if compare-and-swap(p, A, 0)
|
|
* succeeds this implies that p is uncontended (no one is waiting because the
|
|
* wait bit is not set).
|
|
*
|
|
* When dequeueing, the lock is released, and one of the threads suspended on
|
|
* the lock is notified. If other threads still are waiting, the wait bit is
|
|
* kept (in js_Enqueue), and if not, the fat lock is deallocated.
|
|
*
|
|
* The functions js_Enqueue, js_Dequeue, js_SuspendThread, and js_ResumeThread
|
|
* are serialized using a global lock. For scalability, a hashtable of global
|
|
* locks is used, which is indexed modulo the thin lock pointer.
|
|
*/
|
|
|
|
/*
|
|
* Invariants:
|
|
* (i) global lock is held
|
|
* (ii) fl->susp >= 0
|
|
*/
|
|
static int
|
|
js_SuspendThread(JSThinLock *tl)
|
|
{
|
|
JSFatLock *fl;
|
|
PRStatus stat;
|
|
|
|
if (tl->fat == NULL)
|
|
fl = tl->fat = GetFatlock(tl);
|
|
else
|
|
fl = tl->fat;
|
|
JS_ASSERT(fl->susp >= 0);
|
|
fl->susp++;
|
|
PR_Lock(fl->slock);
|
|
js_UnlockGlobal(tl);
|
|
stat = PR_WaitCondVar(fl->svar, PR_INTERVAL_NO_TIMEOUT);
|
|
JS_ASSERT(stat != PR_FAILURE);
|
|
PR_Unlock(fl->slock);
|
|
js_LockGlobal(tl);
|
|
fl->susp--;
|
|
if (fl->susp == 0) {
|
|
PutFatlock(fl, tl);
|
|
tl->fat = NULL;
|
|
}
|
|
return tl->fat == NULL;
|
|
}
|
|
|
|
/*
|
|
* (i) global lock is held
|
|
* (ii) fl->susp > 0
|
|
*/
|
|
static void
|
|
js_ResumeThread(JSThinLock *tl)
|
|
{
|
|
JSFatLock *fl = tl->fat;
|
|
PRStatus stat;
|
|
|
|
JS_ASSERT(fl != NULL);
|
|
JS_ASSERT(fl->susp > 0);
|
|
PR_Lock(fl->slock);
|
|
js_UnlockGlobal(tl);
|
|
stat = PR_NotifyCondVar(fl->svar);
|
|
JS_ASSERT(stat != PR_FAILURE);
|
|
PR_Unlock(fl->slock);
|
|
}
|
|
|
|
static void
|
|
js_Enqueue(JSThinLock *tl, jsword me)
|
|
{
|
|
jsword o, n;
|
|
|
|
js_LockGlobal(tl);
|
|
for (;;) {
|
|
o = ReadWord(tl->owner);
|
|
n = Thin_SetWait(o);
|
|
if (o != 0 && js_CompareAndSwap(&tl->owner, o, n)) {
|
|
if (js_SuspendThread(tl))
|
|
me = Thin_RemoveWait(me);
|
|
else
|
|
me = Thin_SetWait(me);
|
|
}
|
|
else if (js_CompareAndSwap(&tl->owner, 0, me)) {
|
|
js_UnlockGlobal(tl);
|
|
return;
|
|
}
|
|
}
|
|
}
|
|
|
|
static void
|
|
js_Dequeue(JSThinLock *tl)
|
|
{
|
|
jsword o;
|
|
|
|
js_LockGlobal(tl);
|
|
o = ReadWord(tl->owner);
|
|
JS_ASSERT(Thin_GetWait(o) != 0);
|
|
JS_ASSERT(tl->fat != NULL);
|
|
if (!js_CompareAndSwap(&tl->owner, o, 0)) /* release it */
|
|
JS_ASSERT(0);
|
|
js_ResumeThread(tl);
|
|
}
|
|
|
|
JS_INLINE void
|
|
js_Lock(JSThinLock *tl, jsword me)
|
|
{
|
|
JS_ASSERT(me == CurrentThreadId());
|
|
if (js_CompareAndSwap(&tl->owner, 0, me))
|
|
return;
|
|
if (Thin_RemoveWait(ReadWord(tl->owner)) != me)
|
|
js_Enqueue(tl, me);
|
|
#ifdef DEBUG
|
|
else
|
|
JS_ASSERT(0);
|
|
#endif
|
|
}
|
|
|
|
JS_INLINE void
|
|
js_Unlock(JSThinLock *tl, jsword me)
|
|
{
|
|
JS_ASSERT(me == CurrentThreadId());
|
|
if (js_CompareAndSwap(&tl->owner, me, 0))
|
|
return;
|
|
if (Thin_RemoveWait(ReadWord(tl->owner)) == me)
|
|
js_Dequeue(tl);
|
|
#ifdef DEBUG
|
|
else
|
|
JS_ASSERT(0);
|
|
#endif
|
|
}
|
|
|
|
#endif /* !NSPR_LOCK */
|
|
|
|
void
|
|
js_LockRuntime(JSRuntime *rt)
|
|
{
|
|
PR_Lock(rt->rtLock);
|
|
#ifdef DEBUG
|
|
rt->rtLockOwner = CurrentThreadId();
|
|
#endif
|
|
}
|
|
|
|
void
|
|
js_UnlockRuntime(JSRuntime *rt)
|
|
{
|
|
#ifdef DEBUG
|
|
rt->rtLockOwner = 0;
|
|
#endif
|
|
PR_Unlock(rt->rtLock);
|
|
}
|
|
|
|
void
|
|
js_LockScope(JSContext *cx, JSScope *scope)
|
|
{
|
|
jsword me = cx->thread;
|
|
|
|
JS_ASSERT(me == CurrentThreadId());
|
|
JS_ASSERT(scope->ownercx != cx);
|
|
if (scope->ownercx && ClaimScope(scope, cx))
|
|
return;
|
|
|
|
if (Thin_RemoveWait(ReadWord(scope->lock.owner)) == me) {
|
|
JS_ASSERT(scope->u.count > 0);
|
|
LOGIT(scope, '+');
|
|
scope->u.count++;
|
|
} else {
|
|
JSThinLock *tl = &scope->lock;
|
|
JS_LOCK0(tl, me);
|
|
JS_ASSERT(scope->u.count == 0);
|
|
LOGIT(scope, '1');
|
|
scope->u.count = 1;
|
|
}
|
|
}
|
|
|
|
void
|
|
js_UnlockScope(JSContext *cx, JSScope *scope)
|
|
{
|
|
jsword me = cx->thread;
|
|
|
|
JS_ASSERT(scope->ownercx == NULL);
|
|
JS_ASSERT(scope->u.count > 0);
|
|
if (Thin_RemoveWait(ReadWord(scope->lock.owner)) != me) {
|
|
JS_ASSERT(0); /* unbalanced unlock */
|
|
return;
|
|
}
|
|
LOGIT(scope, '-');
|
|
if (--scope->u.count == 0) {
|
|
JSThinLock *tl = &scope->lock;
|
|
JS_UNLOCK0(tl, me);
|
|
}
|
|
}
|
|
|
|
/*
|
|
* NB: oldscope may be null if our caller is js_GetMutableScope and it just
|
|
* dropped the last reference to oldscope.
|
|
*/
|
|
void
|
|
js_TransferScopeLock(JSContext *cx, JSScope *oldscope, JSScope *newscope)
|
|
{
|
|
jsword me;
|
|
JSThinLock *tl;
|
|
|
|
JS_ASSERT(JS_IS_SCOPE_LOCKED(newscope));
|
|
|
|
/*
|
|
* If the last reference to oldscope went away, newscope needs no lock
|
|
* state update.
|
|
*/
|
|
if (!oldscope)
|
|
return;
|
|
JS_ASSERT(JS_IS_SCOPE_LOCKED(oldscope));
|
|
|
|
/*
|
|
* If oldscope is single-threaded, there's nothing to do.
|
|
* XXX if (!newscope->ownercx), assume newscope->u.count is properly set
|
|
*/
|
|
if (oldscope->ownercx) {
|
|
JS_ASSERT(oldscope->ownercx == cx);
|
|
JS_ASSERT(newscope->ownercx == cx || !newscope->ownercx);
|
|
return;
|
|
}
|
|
|
|
/*
|
|
* We transfer oldscope->u.count only if newscope is not single-threaded.
|
|
* Flow unwinds from here through some number of JS_UNLOCK_SCOPE and/or
|
|
* JS_UNLOCK_OBJ macro calls, which will decrement newscope->u.count only
|
|
* if they find newscope->ownercx != cx.
|
|
*/
|
|
if (newscope->ownercx != cx) {
|
|
JS_ASSERT(!newscope->ownercx);
|
|
newscope->u.count = oldscope->u.count;
|
|
}
|
|
|
|
/*
|
|
* Reset oldscope's lock state so that it is completely unlocked.
|
|
*/
|
|
LOGIT(oldscope, '0');
|
|
oldscope->u.count = 0;
|
|
tl = &oldscope->lock;
|
|
me = cx->thread;
|
|
JS_UNLOCK0(tl, me);
|
|
}
|
|
|
|
void
|
|
js_LockObj(JSContext *cx, JSObject *obj)
|
|
{
|
|
JSScope *scope;
|
|
|
|
JS_ASSERT(OBJ_IS_NATIVE(obj));
|
|
for (;;) {
|
|
scope = OBJ_SCOPE(obj);
|
|
js_LockScope(cx, scope);
|
|
|
|
/* If obj still has this scope, we're done. */
|
|
if (scope == OBJ_SCOPE(obj))
|
|
return;
|
|
|
|
/* Lost a race with a mutator; retry with obj's new scope. */
|
|
js_UnlockScope(cx, scope);
|
|
}
|
|
}
|
|
|
|
void
|
|
js_UnlockObj(JSContext *cx, JSObject *obj)
|
|
{
|
|
JS_ASSERT(OBJ_IS_NATIVE(obj));
|
|
js_UnlockScope(cx, OBJ_SCOPE(obj));
|
|
}
|
|
|
|
#ifdef DEBUG
|
|
JSBool
|
|
js_IsRuntimeLocked(JSRuntime *rt)
|
|
{
|
|
return CurrentThreadId() == rt->rtLockOwner;
|
|
}
|
|
|
|
JSBool
|
|
js_IsObjLocked(JSObject *obj)
|
|
{
|
|
JSScope *scope = OBJ_SCOPE(obj);
|
|
|
|
return MAP_IS_NATIVE(&scope->map) &&
|
|
(scope->ownercx ||
|
|
CurrentThreadId() == Thin_RemoveWait(ReadWord(scope->lock.owner)));
|
|
}
|
|
|
|
JSBool
|
|
js_IsScopeLocked(JSScope *scope)
|
|
{
|
|
return scope->ownercx ||
|
|
CurrentThreadId() == Thin_RemoveWait(ReadWord(scope->lock.owner));
|
|
}
|
|
#endif
|
|
|
|
#endif /* JS_THREADSAFE */
|