CLOSED TREE
This commit is contained in:
Ryan VanderMeulen 2017-05-31 14:32:55 -04:00
Родитель 2f30de4e14 054093eb1e
Коммит b9fe830aa0
231 изменённых файлов: 1588 добавлений и 14817 удалений

3
.gitignore поставляемый
Просмотреть файл

@ -90,6 +90,9 @@ GPATH
# Git clone directory for updating web-platform-tests # Git clone directory for updating web-platform-tests
testing/web-platform/sync/ testing/web-platform/sync/
# Third party metadata for web-platform-tests
testing/web-platform/products/
# Android Gradle artifacts. # Android Gradle artifacts.
mobile/android/gradle/.gradle mobile/android/gradle/.gradle

Просмотреть файл

@ -98,6 +98,9 @@ GPATH
# Git clone directory for updating web-platform-tests # Git clone directory for updating web-platform-tests
^testing/web-platform/sync/ ^testing/web-platform/sync/
# Third party metadata for web-platform-tests
^testing/web-platform/products/
# Android Gradle artifacts. # Android Gradle artifacts.
^mobile/android/gradle/.gradle ^mobile/android/gradle/.gradle

Просмотреть файл

@ -2,6 +2,8 @@
/* vim: set sts=2 sw=2 et tw=80: */ /* vim: set sts=2 sw=2 et tw=80: */
"use strict"; "use strict";
requestLongerTimeout(2);
async function testOptionsBrowserStyle(optionsUI, assertMessage) { async function testOptionsBrowserStyle(optionsUI, assertMessage) {
function optionsScript() { function optionsScript() {
browser.test.onMessage.addListener((msgName, optionsUI, assertMessage) => { browser.test.onMessage.addListener((msgName, optionsUI, assertMessage) => {
@ -52,9 +54,9 @@ async function testOptionsBrowserStyle(optionsUI, assertMessage) {
extension.sendMessage("check-style", optionsUI, assertMessage); extension.sendMessage("check-style", optionsUI, assertMessage);
await extension.awaitFinish("options-ui-browser_style"); await extension.awaitFinish("options-ui-browser_style");
await extension.unload();
await BrowserTestUtils.removeTab(tab); await BrowserTestUtils.removeTab(tab);
await extension.unload();
} }
add_task(async function test_options_without_setting_browser_style() { add_task(async function test_options_without_setting_browser_style() {

Просмотреть файл

@ -42,7 +42,7 @@ packages.txt:testing/mozbase/packages.txt
mozilla.pth:testing/taskcluster mozilla.pth:testing/taskcluster
mozilla.pth:testing/tools/autotry mozilla.pth:testing/tools/autotry
mozilla.pth:testing/web-platform mozilla.pth:testing/web-platform
mozilla.pth:testing/web-platform/harness mozilla.pth:testing/web-platform/tests/tools/wptrunner
mozilla.pth:testing/web-platform/tests/tools/wptserve mozilla.pth:testing/web-platform/tests/tools/wptserve
mozilla.pth:testing/web-platform/tests/tools/six mozilla.pth:testing/web-platform/tests/tools/six
mozilla.pth:testing/xpcshell mozilla.pth:testing/xpcshell

Просмотреть файл

@ -9,6 +9,7 @@
#include "mozilla/dom/Promise.h" #include "mozilla/dom/Promise.h"
#include "mozilla/dom/SubtleCryptoBinding.h" #include "mozilla/dom/SubtleCryptoBinding.h"
#include "mozilla/dom/WebCryptoTask.h" #include "mozilla/dom/WebCryptoTask.h"
#include "mozilla/Telemetry.h"
namespace mozilla { namespace mozilla {
namespace dom { namespace dom {
@ -25,6 +26,7 @@ NS_INTERFACE_MAP_END
SubtleCrypto::SubtleCrypto(nsIGlobalObject* aParent) SubtleCrypto::SubtleCrypto(nsIGlobalObject* aParent)
: mParent(aParent) : mParent(aParent)
, mRecordedTelemetry(false)
{ {
} }
@ -34,14 +36,28 @@ SubtleCrypto::WrapObject(JSContext* aCx, JS::Handle<JSObject*> aGivenProto)
return SubtleCryptoBinding::Wrap(aCx, this, aGivenProto); return SubtleCryptoBinding::Wrap(aCx, this, aGivenProto);
} }
void
SubtleCrypto::RecordTelemetryOnce() {
if (mRecordedTelemetry) {
return;
}
mRecordedTelemetry = true;
JSObject* global = mParent->GetGlobalJSObject();
bool isSecure = JS_GetIsSecureContext(js::GetObjectCompartment(global));
Telemetry::Accumulate(Telemetry::WEBCRYPTO_METHOD_SECURE, isSecure);
}
#define SUBTLECRYPTO_METHOD_BODY(Operation, aRv, ...) \ #define SUBTLECRYPTO_METHOD_BODY(Operation, aRv, ...) \
MOZ_ASSERT(mParent); \ MOZ_ASSERT(mParent); \
RefPtr<Promise> p = Promise::Create(mParent, aRv); \ RefPtr<Promise> p = Promise::Create(mParent, aRv); \
if (aRv.Failed()) { \ if (aRv.Failed()) { \
return nullptr; \ return nullptr; \
} \ } \
RefPtr<WebCryptoTask> task = WebCryptoTask::Create ## Operation ## Task(__VA_ARGS__); \ RecordTelemetryOnce(); \
task->DispatchWithPromise(p); \ RefPtr<WebCryptoTask> task = \
WebCryptoTask::Create ## Operation ## Task(__VA_ARGS__); \
task->DispatchWithPromise(p); \
return p.forget(); return p.forget();
already_AddRefed<Promise> already_AddRefed<Promise>

Просмотреть файл

@ -119,7 +119,10 @@ public:
ErrorResult& aRv); ErrorResult& aRv);
private: private:
void RecordTelemetryOnce();
nsCOMPtr<nsIGlobalObject> mParent; nsCOMPtr<nsIGlobalObject> mParent;
bool mRecordedTelemetry;
}; };
} // namespace dom } // namespace dom

Просмотреть файл

@ -59,6 +59,9 @@ class FetchSignalProxy final : public FetchSignal::Follower
// This is created and released on the main-thread. // This is created and released on the main-thread.
RefPtr<FetchSignal> mSignalMainThread; RefPtr<FetchSignal> mSignalMainThread;
// The main-thread event target for runnable dispatching.
nsCOMPtr<nsIEventTarget> mMainThreadEventTarget;
// This value is used only for the creation of FetchSignal on the // This value is used only for the creation of FetchSignal on the
// main-thread. They are not updated. // main-thread. They are not updated.
const bool mAborted; const bool mAborted;
@ -87,9 +90,11 @@ class FetchSignalProxy final : public FetchSignal::Follower
public: public:
NS_INLINE_DECL_THREADSAFE_REFCOUNTING(FetchSignalProxy) NS_INLINE_DECL_THREADSAFE_REFCOUNTING(FetchSignalProxy)
explicit FetchSignalProxy(FetchSignal* aSignal) FetchSignalProxy(FetchSignal* aSignal, nsIEventTarget* aMainThreadEventTarget)
: mAborted(aSignal->Aborted()) : mMainThreadEventTarget(aMainThreadEventTarget)
, mAborted(aSignal->Aborted())
{ {
MOZ_ASSERT(mMainThreadEventTarget);
Follow(aSignal); Follow(aSignal);
} }
@ -98,7 +103,7 @@ public:
{ {
RefPtr<FetchSignalProxyRunnable> runnable = RefPtr<FetchSignalProxyRunnable> runnable =
new FetchSignalProxyRunnable(this); new FetchSignalProxyRunnable(this);
NS_DispatchToMainThread(runnable); mMainThreadEventTarget->Dispatch(runnable.forget(), NS_DISPATCH_NORMAL);
} }
FetchSignal* FetchSignal*
@ -120,7 +125,7 @@ public:
private: private:
~FetchSignalProxy() ~FetchSignalProxy()
{ {
NS_ReleaseOnMainThread(mSignalMainThread.forget()); NS_ProxyRelease(mMainThreadEventTarget, mSignalMainThread.forget());
} }
}; };
@ -152,7 +157,8 @@ public:
RefPtr<FetchSignalProxy> signalProxy; RefPtr<FetchSignalProxy> signalProxy;
if (aSignal) { if (aSignal) {
signalProxy = new FetchSignalProxy(aSignal); signalProxy =
new FetchSignalProxy(aSignal, aWorkerPrivate->MainThreadEventTarget());
} }
RefPtr<WorkerFetchResolver> r = RefPtr<WorkerFetchResolver> r =
@ -276,14 +282,16 @@ public:
return NS_OK; return NS_OK;
} }
nsCOMPtr<nsIPrincipal> principal = proxy->GetWorkerPrivate()->GetPrincipal(); WorkerPrivate* workerPrivate = proxy->GetWorkerPrivate();
MOZ_ASSERT(workerPrivate);
nsCOMPtr<nsIPrincipal> principal = workerPrivate->GetPrincipal();
MOZ_ASSERT(principal); MOZ_ASSERT(principal);
nsCOMPtr<nsILoadGroup> loadGroup = proxy->GetWorkerPrivate()->GetLoadGroup(); nsCOMPtr<nsILoadGroup> loadGroup = workerPrivate->GetLoadGroup();
MOZ_ASSERT(loadGroup); MOZ_ASSERT(loadGroup);
// We don't track if a worker is spawned from a tracking script for now, // We don't track if a worker is spawned from a tracking script for now,
// so pass false as the last argument to FetchDriver(). // so pass false as the last argument to FetchDriver().
fetch = new FetchDriver(mRequest, principal, loadGroup, false); fetch = new FetchDriver(mRequest, principal, loadGroup,
workerPrivate->MainThreadEventTarget(), false);
nsAutoCString spec; nsAutoCString spec;
if (proxy->GetWorkerPrivate()->GetBaseURI()) { if (proxy->GetWorkerPrivate()->GetBaseURI()) {
proxy->GetWorkerPrivate()->GetBaseURI()->GetAsciiSpec(spec); proxy->GetWorkerPrivate()->GetBaseURI()->GetAsciiSpec(spec);
@ -308,6 +316,8 @@ FetchRequest(nsIGlobalObject* aGlobal, const RequestOrUSVString& aInput,
return nullptr; return nullptr;
} }
MOZ_ASSERT(aGlobal);
// Double check that we have chrome privileges if the Request's content // Double check that we have chrome privileges if the Request's content
// policy type has been overridden. // policy type has been overridden.
MOZ_ASSERT_IF(aInput.IsRequest() && MOZ_ASSERT_IF(aInput.IsRequest() &&
@ -380,7 +390,8 @@ FetchRequest(nsIGlobalObject* aGlobal, const RequestOrUSVString& aInput,
RefPtr<MainThreadFetchResolver> resolver = RefPtr<MainThreadFetchResolver> resolver =
new MainThreadFetchResolver(p, observer); new MainThreadFetchResolver(p, observer);
RefPtr<FetchDriver> fetch = RefPtr<FetchDriver> fetch =
new FetchDriver(r, principal, loadGroup, isTrackingFetch); new FetchDriver(r, principal, loadGroup,
aGlobal->EventTargetFor(TaskCategory::Other), isTrackingFetch);
fetch->SetDocument(doc); fetch->SetDocument(doc);
resolver->SetLoadGroup(loadGroup); resolver->SetLoadGroup(loadGroup);
aRv = fetch->Fetch(signal, resolver); aRv = fetch->Fetch(signal, resolver);
@ -1119,26 +1130,33 @@ public:
}; };
template <class Derived> template <class Derived>
FetchBody<Derived>::FetchBody() FetchBody<Derived>::FetchBody(nsIGlobalObject* aOwner)
: mWorkerHolder(nullptr) : mWorkerHolder(nullptr)
, mOwner(aOwner)
, mBodyUsed(false) , mBodyUsed(false)
#ifdef DEBUG #ifdef DEBUG
, mReadDone(false) , mReadDone(false)
#endif #endif
{ {
MOZ_ASSERT(aOwner);
if (!NS_IsMainThread()) { if (!NS_IsMainThread()) {
mWorkerPrivate = GetCurrentThreadWorkerPrivate(); mWorkerPrivate = GetCurrentThreadWorkerPrivate();
MOZ_ASSERT(mWorkerPrivate); MOZ_ASSERT(mWorkerPrivate);
mMainThreadEventTarget = mWorkerPrivate->MainThreadEventTarget();
} else { } else {
mWorkerPrivate = nullptr; mWorkerPrivate = nullptr;
mMainThreadEventTarget = aOwner->EventTargetFor(TaskCategory::Other);
} }
MOZ_ASSERT(mMainThreadEventTarget);
} }
template template
FetchBody<Request>::FetchBody(); FetchBody<Request>::FetchBody(nsIGlobalObject* aOwner);
template template
FetchBody<Response>::FetchBody(); FetchBody<Response>::FetchBody(nsIGlobalObject* aOwner);
template <class Derived> template <class Derived>
FetchBody<Derived>::~FetchBody() FetchBody<Derived>::~FetchBody()
@ -1235,11 +1253,7 @@ FetchBody<Derived>::BeginConsumeBody()
nsCOMPtr<nsIRunnable> r = new BeginConsumeBodyRunnable<Derived>(this); nsCOMPtr<nsIRunnable> r = new BeginConsumeBodyRunnable<Derived>(this);
nsresult rv = NS_OK; nsresult rv = NS_OK;
if (mWorkerPrivate) { mMainThreadEventTarget->Dispatch(r.forget(), NS_DISPATCH_NORMAL);
rv = mWorkerPrivate->DispatchToMainThread(r.forget());
} else {
rv = NS_DispatchToMainThread(r.forget());
}
if (NS_WARN_IF(NS_FAILED(rv))) { if (NS_WARN_IF(NS_FAILED(rv))) {
ReleaseObject(); ReleaseObject();
return rv; return rv;
@ -1270,7 +1284,8 @@ FetchBody<Derived>::BeginConsumeBodyMainThread()
nsCOMPtr<nsIInputStreamPump> pump; nsCOMPtr<nsIInputStreamPump> pump;
rv = NS_NewInputStreamPump(getter_AddRefs(pump), rv = NS_NewInputStreamPump(getter_AddRefs(pump),
stream); stream, -1, -1, 0, 0, false,
mMainThreadEventTarget);
if (NS_WARN_IF(NS_FAILED(rv))) { if (NS_WARN_IF(NS_FAILED(rv))) {
return; return;
} }
@ -1293,7 +1308,8 @@ FetchBody<Derived>::BeginConsumeBodyMainThread()
type = MutableBlobStorage::eCouldBeInTemporaryFile; type = MutableBlobStorage::eCouldBeInTemporaryFile;
} }
listener = new MutableBlobStreamListener(type, nullptr, mMimeType, p); listener = new MutableBlobStreamListener(type, nullptr, mMimeType, p,
mMainThreadEventTarget);
} else { } else {
nsCOMPtr<nsIStreamLoader> loader; nsCOMPtr<nsIStreamLoader> loader;
rv = NS_NewStreamLoader(getter_AddRefs(loader), p); rv = NS_NewStreamLoader(getter_AddRefs(loader), p);
@ -1311,7 +1327,8 @@ FetchBody<Derived>::BeginConsumeBodyMainThread()
// Now that everything succeeded, we can assign the pump to a pointer that // Now that everything succeeded, we can assign the pump to a pointer that
// stays alive for the lifetime of the FetchBody. // stays alive for the lifetime of the FetchBody.
mConsumeBodyPump = new nsMainThreadPtrHolder<nsIInputStreamPump>(pump); mConsumeBodyPump =
new nsMainThreadPtrHolder<nsIInputStreamPump>(pump, mMainThreadEventTarget);
// It is ok for retargeting to fail and reads to happen on the main thread. // It is ok for retargeting to fail and reads to happen on the main thread.
autoReject.DontFail(); autoReject.DontFail();

Просмотреть файл

@ -23,6 +23,7 @@
#include "mozilla/dom/workers/bindings/WorkerHolder.h" #include "mozilla/dom/workers/bindings/WorkerHolder.h"
class nsIGlobalObject; class nsIGlobalObject;
class nsIEventTarget;
namespace mozilla { namespace mozilla {
namespace dom { namespace dom {
@ -169,7 +170,9 @@ public:
nsAutoPtr<workers::WorkerHolder> mWorkerHolder; nsAutoPtr<workers::WorkerHolder> mWorkerHolder;
protected: protected:
FetchBody(); nsCOMPtr<nsIGlobalObject> mOwner;
explicit FetchBody(nsIGlobalObject* aOwner);
virtual ~FetchBody(); virtual ~FetchBody();
@ -233,6 +236,9 @@ private:
#endif #endif
nsMainThreadPtrHandle<nsIInputStreamPump> mConsumeBodyPump; nsMainThreadPtrHandle<nsIInputStreamPump> mConsumeBodyPump;
// The main-thread event target for runnable dispatching.
nsCOMPtr<nsIEventTarget> mMainThreadEventTarget;
}; };
} // namespace dom } // namespace dom

Просмотреть файл

@ -48,10 +48,12 @@ NS_IMPL_ISUPPORTS(FetchDriver,
nsIThreadRetargetableStreamListener) nsIThreadRetargetableStreamListener)
FetchDriver::FetchDriver(InternalRequest* aRequest, nsIPrincipal* aPrincipal, FetchDriver::FetchDriver(InternalRequest* aRequest, nsIPrincipal* aPrincipal,
nsILoadGroup* aLoadGroup, bool aIsTrackingFetch) nsILoadGroup* aLoadGroup, nsIEventTarget* aMainThreadEventTarget,
bool aIsTrackingFetch)
: mPrincipal(aPrincipal) : mPrincipal(aPrincipal)
, mLoadGroup(aLoadGroup) , mLoadGroup(aLoadGroup)
, mRequest(aRequest) , mRequest(aRequest)
, mMainThreadEventTarget(aMainThreadEventTarget)
, mIsTrackingFetch(aIsTrackingFetch) , mIsTrackingFetch(aIsTrackingFetch)
#ifdef DEBUG #ifdef DEBUG
, mResponseAvailableCalled(false) , mResponseAvailableCalled(false)
@ -60,6 +62,7 @@ FetchDriver::FetchDriver(InternalRequest* aRequest, nsIPrincipal* aPrincipal,
{ {
MOZ_ASSERT(aRequest); MOZ_ASSERT(aRequest);
MOZ_ASSERT(aPrincipal); MOZ_ASSERT(aPrincipal);
MOZ_ASSERT(aMainThreadEventTarget);
} }
FetchDriver::~FetchDriver() FetchDriver::~FetchDriver()
@ -672,7 +675,8 @@ FetchDriver::OnDataAvailable(nsIRequest* aRequest,
mObserver->OnDataAvailable(); mObserver->OnDataAvailable();
} else { } else {
RefPtr<Runnable> runnable = new DataAvailableRunnable(mObserver); RefPtr<Runnable> runnable = new DataAvailableRunnable(mObserver);
nsresult rv = NS_DispatchToMainThread(runnable); nsresult rv =
mMainThreadEventTarget->Dispatch(runnable.forget(), NS_DISPATCH_NORMAL);
if (NS_WARN_IF(NS_FAILED(rv))) { if (NS_WARN_IF(NS_FAILED(rv))) {
return rv; return rv;
} }

Просмотреть файл

@ -21,6 +21,7 @@
class nsIConsoleReportCollector; class nsIConsoleReportCollector;
class nsIDocument; class nsIDocument;
class nsIEventTarget;
class nsIOutputStream; class nsIOutputStream;
class nsILoadGroup; class nsILoadGroup;
class nsIPrincipal; class nsIPrincipal;
@ -97,6 +98,7 @@ public:
FetchDriver(InternalRequest* aRequest, FetchDriver(InternalRequest* aRequest,
nsIPrincipal* aPrincipal, nsIPrincipal* aPrincipal,
nsILoadGroup* aLoadGroup, nsILoadGroup* aLoadGroup,
nsIEventTarget* aMainThreadEventTarget,
bool aIsTrackingFetch); bool aIsTrackingFetch);
nsresult Fetch(FetchSignal* aSignal, nsresult Fetch(FetchSignal* aSignal,
@ -127,6 +129,7 @@ private:
nsCOMPtr<nsIDocument> mDocument; nsCOMPtr<nsIDocument> mDocument;
nsCOMPtr<nsIChannel> mChannel; nsCOMPtr<nsIChannel> mChannel;
nsAutoPtr<SRICheckDataVerifier> mSRIDataVerifier; nsAutoPtr<SRICheckDataVerifier> mSRIDataVerifier;
nsCOMPtr<nsIEventTarget> mMainThreadEventTarget;
SRIMetadata mSRIMetadata; SRIMetadata mSRIMetadata;
nsCString mWorkerScript; nsCString mWorkerScript;
bool mIsTrackingFetch; bool mIsTrackingFetch;

Просмотреть файл

@ -33,8 +33,7 @@ NS_INTERFACE_MAP_BEGIN_CYCLE_COLLECTION(Request)
NS_INTERFACE_MAP_END NS_INTERFACE_MAP_END
Request::Request(nsIGlobalObject* aOwner, InternalRequest* aRequest) Request::Request(nsIGlobalObject* aOwner, InternalRequest* aRequest)
: FetchBody<Request>() : FetchBody<Request>(aOwner)
, mOwner(aOwner)
, mRequest(aRequest) , mRequest(aRequest)
{ {
MOZ_ASSERT(aRequest->Headers()->Guard() == HeadersGuardEnum::Immutable || MOZ_ASSERT(aRequest->Headers()->Guard() == HeadersGuardEnum::Immutable ||

Просмотреть файл

@ -156,7 +156,6 @@ public:
private: private:
~Request(); ~Request();
nsCOMPtr<nsIGlobalObject> mOwner;
RefPtr<InternalRequest> mRequest; RefPtr<InternalRequest> mRequest;
// Lazily created. // Lazily created.
RefPtr<Headers> mHeaders; RefPtr<Headers> mHeaders;

Просмотреть файл

@ -34,8 +34,7 @@ NS_INTERFACE_MAP_BEGIN_CYCLE_COLLECTION(Response)
NS_INTERFACE_MAP_END NS_INTERFACE_MAP_END
Response::Response(nsIGlobalObject* aGlobal, InternalResponse* aInternalResponse) Response::Response(nsIGlobalObject* aGlobal, InternalResponse* aInternalResponse)
: FetchBody<Response>() : FetchBody<Response>(aGlobal)
, mOwner(aGlobal)
, mInternalResponse(aInternalResponse) , mInternalResponse(aInternalResponse)
{ {
MOZ_ASSERT(aInternalResponse->Headers()->Guard() == HeadersGuardEnum::Immutable || MOZ_ASSERT(aInternalResponse->Headers()->Guard() == HeadersGuardEnum::Immutable ||

Просмотреть файл

@ -137,7 +137,6 @@ public:
private: private:
~Response(); ~Response();
nsCOMPtr<nsIGlobalObject> mOwner;
RefPtr<InternalResponse> mInternalResponse; RefPtr<InternalResponse> mInternalResponse;
// Lazily created // Lazily created
RefPtr<Headers> mHeaders; RefPtr<Headers> mHeaders;

Просмотреть файл

@ -5,10 +5,12 @@
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "IPCBlobInputStreamChild.h" #include "IPCBlobInputStreamChild.h"
#include "IPCBlobInputStreamThread.h"
#include "mozilla/ipc/IPCStreamUtils.h" #include "mozilla/ipc/IPCStreamUtils.h"
#include "WorkerPrivate.h"
#include "WorkerHolder.h" #include "WorkerHolder.h"
#include "WorkerPrivate.h"
#include "WorkerRunnable.h"
namespace mozilla { namespace mozilla {
namespace dom { namespace dom {
@ -49,7 +51,9 @@ public:
NS_IMETHOD NS_IMETHOD
Run() override Run() override
{ {
if (mActor->IsAlive()) { MOZ_ASSERT(mActor->State() != IPCBlobInputStreamChild::eActiveMigrating &&
mActor->State() != IPCBlobInputStreamChild::eInactiveMigrating);
if (mActor->State() == IPCBlobInputStreamChild::eActive) {
mActor->SendStreamNeeded(); mActor->SendStreamNeeded();
} }
return NS_OK; return NS_OK;
@ -88,21 +92,35 @@ private:
class IPCBlobInputStreamWorkerHolder final : public WorkerHolder class IPCBlobInputStreamWorkerHolder final : public WorkerHolder
{ {
public: public:
explicit IPCBlobInputStreamWorkerHolder(IPCBlobInputStreamChild* aActor)
: mActor(aActor)
{}
bool Notify(Status aStatus) override bool Notify(Status aStatus) override
{ {
if (aStatus > Running) { // We must keep the worker alive until the migration is completed.
mActor->Shutdown();
// After this the WorkerHolder is gone.
}
return true; return true;
} }
};
class ReleaseWorkerHolderRunnable final : public CancelableRunnable
{
public:
explicit ReleaseWorkerHolderRunnable(UniquePtr<workers::WorkerHolder>&& aWorkerHolder)
: mWorkerHolder(Move(aWorkerHolder))
{}
NS_IMETHOD
Run() override
{
mWorkerHolder = nullptr;
return NS_OK;
}
nsresult
Cancel() override
{
return Run();
}
private: private:
RefPtr<IPCBlobInputStreamChild> mActor; UniquePtr<workers::WorkerHolder> mWorkerHolder;
}; };
} // anonymous } // anonymous
@ -112,7 +130,7 @@ IPCBlobInputStreamChild::IPCBlobInputStreamChild(const nsID& aID,
: mMutex("IPCBlobInputStreamChild::mMutex") : mMutex("IPCBlobInputStreamChild::mMutex")
, mID(aID) , mID(aID)
, mSize(aSize) , mSize(aSize)
, mActorAlive(true) , mState(eActive)
, mOwningThread(NS_GetCurrentThread()) , mOwningThread(NS_GetCurrentThread())
{ {
// If we are running in a worker, we need to send a Close() to the parent side // If we are running in a worker, we need to send a Close() to the parent side
@ -121,7 +139,7 @@ IPCBlobInputStreamChild::IPCBlobInputStreamChild(const nsID& aID,
WorkerPrivate* workerPrivate = GetCurrentThreadWorkerPrivate(); WorkerPrivate* workerPrivate = GetCurrentThreadWorkerPrivate();
if (workerPrivate) { if (workerPrivate) {
UniquePtr<WorkerHolder> workerHolder( UniquePtr<WorkerHolder> workerHolder(
new IPCBlobInputStreamWorkerHolder(this)); new IPCBlobInputStreamWorkerHolder());
if (workerHolder->HoldWorker(workerPrivate, Canceling)) { if (workerHolder->HoldWorker(workerPrivate, Canceling)) {
mWorkerHolder.swap(workerHolder); mWorkerHolder.swap(workerHolder);
} }
@ -142,42 +160,76 @@ IPCBlobInputStreamChild::Shutdown()
mWorkerHolder = nullptr; mWorkerHolder = nullptr;
mPendingOperations.Clear(); mPendingOperations.Clear();
if (mActorAlive) { if (mState == eActive) {
SendClose(); SendClose();
mActorAlive = false; mState = eInactive;
} }
} }
void void
IPCBlobInputStreamChild::ActorDestroy(IProtocol::ActorDestroyReason aReason) IPCBlobInputStreamChild::ActorDestroy(IProtocol::ActorDestroyReason aReason)
{ {
bool migrating = false;
{ {
MutexAutoLock lock(mMutex); MutexAutoLock lock(mMutex);
mActorAlive = false; migrating = mState == eActiveMigrating;
mState = migrating ? eInactiveMigrating : eInactive;
}
if (migrating) {
// We were waiting for this! Now we can migrate the actor in the correct
// thread.
RefPtr<IPCBlobInputStreamThread> thread =
IPCBlobInputStreamThread::GetOrCreate();
ResetManager();
thread->MigrateActor(this);
return;
} }
// Let's cleanup the workerHolder and the pending operation queue. // Let's cleanup the workerHolder and the pending operation queue.
Shutdown(); Shutdown();
} }
bool IPCBlobInputStreamChild::ActorState
IPCBlobInputStreamChild::IsAlive() IPCBlobInputStreamChild::State()
{ {
MutexAutoLock lock(mMutex); MutexAutoLock lock(mMutex);
return mActorAlive; return mState;
} }
already_AddRefed<nsIInputStream> already_AddRefed<nsIInputStream>
IPCBlobInputStreamChild::CreateStream() IPCBlobInputStreamChild::CreateStream()
{ {
MutexAutoLock lock(mMutex); bool shouldMigrate = false;
if (!mActorAlive) {
return nullptr;
}
RefPtr<IPCBlobInputStream> stream = new IPCBlobInputStream(this); RefPtr<IPCBlobInputStream> stream = new IPCBlobInputStream(this);
mStreams.AppendElement(stream);
{
MutexAutoLock lock(mMutex);
if (mState == eInactive) {
return nullptr;
}
// The stream is active but maybe it is not running in the DOM-File thread.
// We should migrate it there.
if (mState == eActive &&
!IPCBlobInputStreamThread::IsOnFileThread(mOwningThread)) {
MOZ_ASSERT(mStreams.IsEmpty());
shouldMigrate = true;
mState = eActiveMigrating;
}
mStreams.AppendElement(stream);
}
// Send__delete__ will call ActorDestroy(). mMutex cannot be locked at this
// time.
if (shouldMigrate) {
Send__delete__(this);
}
return stream.forget(); return stream.forget();
} }
@ -192,7 +244,7 @@ IPCBlobInputStreamChild::ForgetStream(IPCBlobInputStream* aStream)
MutexAutoLock lock(mMutex); MutexAutoLock lock(mMutex);
mStreams.RemoveElement(aStream); mStreams.RemoveElement(aStream);
if (!mStreams.IsEmpty() || !mActorAlive) { if (!mStreams.IsEmpty() || mState != eActive) {
return; return;
} }
} }
@ -212,7 +264,7 @@ IPCBlobInputStreamChild::StreamNeeded(IPCBlobInputStream* aStream,
{ {
MutexAutoLock lock(mMutex); MutexAutoLock lock(mMutex);
if (!mActorAlive) { if (mState == eInactive) {
return; return;
} }
@ -222,6 +274,13 @@ IPCBlobInputStreamChild::StreamNeeded(IPCBlobInputStream* aStream,
opt->mStream = aStream; opt->mStream = aStream;
opt->mEventTarget = aEventTarget ? aEventTarget : NS_GetCurrentThread(); opt->mEventTarget = aEventTarget ? aEventTarget : NS_GetCurrentThread();
if (mState == eActiveMigrating || mState == eInactiveMigrating) {
// This operation will be continued when the migration is completed.
return;
}
MOZ_ASSERT(mState == eActive);
if (mOwningThread == NS_GetCurrentThread()) { if (mOwningThread == NS_GetCurrentThread()) {
SendStreamNeeded(); SendStreamNeeded();
return; return;
@ -242,7 +301,7 @@ IPCBlobInputStreamChild::RecvStreamReady(const OptionalIPCStream& aStream)
{ {
MutexAutoLock lock(mMutex); MutexAutoLock lock(mMutex);
MOZ_ASSERT(!mPendingOperations.IsEmpty()); MOZ_ASSERT(!mPendingOperations.IsEmpty());
MOZ_ASSERT(mActorAlive); MOZ_ASSERT(mState == eActive);
pendingStream = mPendingOperations[0].mStream; pendingStream = mPendingOperations[0].mStream;
eventTarget = mPendingOperations[0].mEventTarget; eventTarget = mPendingOperations[0].mEventTarget;
@ -257,5 +316,36 @@ IPCBlobInputStreamChild::RecvStreamReady(const OptionalIPCStream& aStream)
return IPC_OK(); return IPC_OK();
} }
void
IPCBlobInputStreamChild::Migrated()
{
MutexAutoLock lock(mMutex);
MOZ_ASSERT(mState == eInactiveMigrating);
if (mWorkerHolder) {
RefPtr<ReleaseWorkerHolderRunnable> runnable =
new ReleaseWorkerHolderRunnable(Move(mWorkerHolder));
mOwningThread->Dispatch(runnable, NS_DISPATCH_NORMAL);
}
mOwningThread = NS_GetCurrentThread();
MOZ_ASSERT(IPCBlobInputStreamThread::IsOnFileThread(mOwningThread));
// Maybe we have no reasons to keep this actor alive.
if (mStreams.IsEmpty()) {
mState = eInactive;
SendClose();
return;
}
mState = eActive;
// Let's processing the pending operations. We need a stream for each pending
// operation.
for (uint32_t i = 0; i < mPendingOperations.Length(); ++i) {
SendStreamNeeded();
}
}
} // namespace dom } // namespace dom
} // namespace mozilla } // namespace mozilla

Просмотреть файл

@ -26,6 +26,23 @@ class IPCBlobInputStreamChild final
: public mozilla::ipc::PIPCBlobInputStreamChild : public mozilla::ipc::PIPCBlobInputStreamChild
{ {
public: public:
enum ActorState
{
// The actor is connected via IPDL to the parent.
eActive,
// The actor is disconnected.
eInactive,
// The actor is waiting to be disconnected. Once it has been disconnected,
// it will be reactivated on the DOM-File thread.
eActiveMigrating,
// The actor has been disconnected and it's waiting to be connected on the
// DOM-File thread.
eInactiveMigrating,
};
NS_INLINE_DECL_THREADSAFE_REFCOUNTING(IPCBlobInputStreamChild) NS_INLINE_DECL_THREADSAFE_REFCOUNTING(IPCBlobInputStreamChild)
IPCBlobInputStreamChild(const nsID& aID, uint64_t aSize); IPCBlobInputStreamChild(const nsID& aID, uint64_t aSize);
@ -33,8 +50,8 @@ public:
void void
ActorDestroy(IProtocol::ActorDestroyReason aReason) override; ActorDestroy(IProtocol::ActorDestroyReason aReason) override;
bool ActorState
IsAlive(); State();
already_AddRefed<nsIInputStream> already_AddRefed<nsIInputStream>
CreateStream(); CreateStream();
@ -64,6 +81,9 @@ public:
void void
Shutdown(); Shutdown();
void
Migrated();
private: private:
~IPCBlobInputStreamChild(); ~IPCBlobInputStreamChild();
@ -78,8 +98,7 @@ private:
const nsID mID; const nsID mID;
const uint64_t mSize; const uint64_t mSize;
// false when ActorDestroy() is called. ActorState mState;
bool mActorAlive;
// This struct and the array are used for creating streams when needed. // This struct and the array are used for creating streams when needed.
struct PendingOperation struct PendingOperation

Просмотреть файл

@ -15,7 +15,7 @@ namespace dom {
template<typename M> template<typename M>
/* static */ IPCBlobInputStreamParent* /* static */ IPCBlobInputStreamParent*
IPCBlobInputStreamParent::Create(nsIInputStream* aInputStream, uint64_t aSize, IPCBlobInputStreamParent::Create(nsIInputStream* aInputStream, uint64_t aSize,
nsresult* aRv, M* aManager) uint64_t aChildID, nsresult* aRv, M* aManager)
{ {
MOZ_ASSERT(aInputStream); MOZ_ASSERT(aInputStream);
MOZ_ASSERT(aRv); MOZ_ASSERT(aRv);
@ -26,11 +26,23 @@ IPCBlobInputStreamParent::Create(nsIInputStream* aInputStream, uint64_t aSize,
return nullptr; return nullptr;
} }
IPCBlobInputStreamStorage::Get()->AddStream(aInputStream, id); IPCBlobInputStreamStorage::Get()->AddStream(aInputStream, id, aChildID);
return new IPCBlobInputStreamParent(id, aSize, aManager); return new IPCBlobInputStreamParent(id, aSize, aManager);
} }
/* static */ IPCBlobInputStreamParent*
IPCBlobInputStreamParent::Create(const nsID& aID, uint64_t aSize,
PBackgroundParent* aManager)
{
IPCBlobInputStreamParent* actor =
new IPCBlobInputStreamParent(aID, aSize, aManager);
actor->mCallback = IPCBlobInputStreamStorage::Get()->TakeCallback(aID);
return actor;
}
IPCBlobInputStreamParent::IPCBlobInputStreamParent(const nsID& aID, IPCBlobInputStreamParent::IPCBlobInputStreamParent(const nsID& aID,
uint64_t aSize, uint64_t aSize,
nsIContentParent* aManager) nsIContentParent* aManager)
@ -38,6 +50,7 @@ IPCBlobInputStreamParent::IPCBlobInputStreamParent(const nsID& aID,
, mSize(aSize) , mSize(aSize)
, mContentManager(aManager) , mContentManager(aManager)
, mPBackgroundManager(nullptr) , mPBackgroundManager(nullptr)
, mMigrating(false)
{} {}
IPCBlobInputStreamParent::IPCBlobInputStreamParent(const nsID& aID, IPCBlobInputStreamParent::IPCBlobInputStreamParent(const nsID& aID,
@ -47,6 +60,7 @@ IPCBlobInputStreamParent::IPCBlobInputStreamParent(const nsID& aID,
, mSize(aSize) , mSize(aSize)
, mContentManager(nullptr) , mContentManager(nullptr)
, mPBackgroundManager(aManager) , mPBackgroundManager(aManager)
, mMigrating(false)
{} {}
void void
@ -57,11 +71,23 @@ IPCBlobInputStreamParent::ActorDestroy(IProtocol::ActorDestroyReason aReason)
mContentManager = nullptr; mContentManager = nullptr;
mPBackgroundManager = nullptr; mPBackgroundManager = nullptr;
IPCBlobInputStreamStorage::Get()->ForgetStream(mID);
RefPtr<IPCBlobInputStreamParentCallback> callback; RefPtr<IPCBlobInputStreamParentCallback> callback;
mCallback.swap(callback); mCallback.swap(callback);
RefPtr<IPCBlobInputStreamStorage> storage = IPCBlobInputStreamStorage::Get();
if (mMigrating) {
if (callback && storage) {
// We need to assign this callback to the next parent.
IPCBlobInputStreamStorage::Get()->StoreCallback(mID, callback);
}
return;
}
if (storage) {
storage->ForgetStream(mID);
}
if (callback) { if (callback) {
callback->ActorDestroyed(mID); callback->ActorDestroyed(mID);
} }
@ -123,5 +149,21 @@ IPCBlobInputStreamParent::RecvClose()
return IPC_OK(); return IPC_OK();
} }
mozilla::ipc::IPCResult
IPCBlobInputStreamParent::Recv__delete__()
{
MOZ_ASSERT(mContentManager || mPBackgroundManager);
mMigrating = true;
return IPC_OK();
}
bool
IPCBlobInputStreamParent::HasValidStream() const
{
nsCOMPtr<nsIInputStream> stream;
IPCBlobInputStreamStorage::Get()->GetStream(mID, getter_AddRefs(stream));
return !!stream;
}
} // namespace dom } // namespace dom
} // namespace mozilla } // namespace mozilla

Просмотреть файл

@ -36,8 +36,12 @@ public:
// case the stream is a nsFileStream. // case the stream is a nsFileStream.
template<typename M> template<typename M>
static IPCBlobInputStreamParent* static IPCBlobInputStreamParent*
Create(nsIInputStream* aInputStream, uint64_t aSize, nsresult* aRv, Create(nsIInputStream* aInputStream, uint64_t aSize,
M* aManager); uint64_t aChildID, nsresult* aRv, M* aManager);
static IPCBlobInputStreamParent*
Create(const nsID& aID, uint64_t aSize,
mozilla::ipc::PBackgroundParent* aManager);
void void
ActorDestroy(IProtocol::ActorDestroyReason aReason) override; ActorDestroy(IProtocol::ActorDestroyReason aReason) override;
@ -63,6 +67,12 @@ public:
mozilla::ipc::IPCResult mozilla::ipc::IPCResult
RecvClose() override; RecvClose() override;
mozilla::ipc::IPCResult
Recv__delete__() override;
bool
HasValidStream() const;
private: private:
IPCBlobInputStreamParent(const nsID& aID, uint64_t aSize, IPCBlobInputStreamParent(const nsID& aID, uint64_t aSize,
nsIContentParent* aManager); nsIContentParent* aManager);
@ -79,6 +89,8 @@ private:
mozilla::ipc::PBackgroundParent* mPBackgroundManager; mozilla::ipc::PBackgroundParent* mPBackgroundManager;
RefPtr<IPCBlobInputStreamParentCallback> mCallback; RefPtr<IPCBlobInputStreamParentCallback> mCallback;
bool mMigrating;
}; };
} // namespace dom } // namespace dom

Просмотреть файл

@ -6,12 +6,16 @@
#include "IPCBlobInputStreamStorage.h" #include "IPCBlobInputStreamStorage.h"
#include "mozilla/ClearOnShutdown.h" #include "mozilla/dom/ContentParent.h"
#include "mozilla/StaticMutex.h" #include "mozilla/StaticMutex.h"
#include "mozilla/StaticPtr.h" #include "mozilla/StaticPtr.h"
#include "nsIPropertyBag2.h"
#include "nsStreamUtils.h" #include "nsStreamUtils.h"
namespace mozilla { namespace mozilla {
using namespace hal;
namespace dom { namespace dom {
namespace { namespace {
@ -19,6 +23,17 @@ StaticMutex gMutex;
StaticRefPtr<IPCBlobInputStreamStorage> gStorage; StaticRefPtr<IPCBlobInputStreamStorage> gStorage;
} }
NS_INTERFACE_MAP_BEGIN(IPCBlobInputStreamStorage)
NS_INTERFACE_MAP_ENTRY_AMBIGUOUS(nsISupports, nsIObserver)
NS_INTERFACE_MAP_ENTRY(nsIObserver)
NS_INTERFACE_MAP_END
NS_IMPL_ADDREF(IPCBlobInputStreamStorage)
NS_IMPL_RELEASE(IPCBlobInputStreamStorage)
IPCBlobInputStreamStorage::IPCBlobInputStreamStorage()
{}
IPCBlobInputStreamStorage::~IPCBlobInputStreamStorage() IPCBlobInputStreamStorage::~IPCBlobInputStreamStorage()
{} {}
@ -34,17 +49,66 @@ IPCBlobInputStreamStorage::Initialize()
MOZ_ASSERT(!gStorage); MOZ_ASSERT(!gStorage);
gStorage = new IPCBlobInputStreamStorage(); gStorage = new IPCBlobInputStreamStorage();
ClearOnShutdown(&gStorage);
nsCOMPtr<nsIObserverService> obs = mozilla::services::GetObserverService();
if (obs) {
obs->AddObserver(gStorage, "xpcom-shutdown", false);
obs->AddObserver(gStorage, "ipc:content-shutdown", false);
}
}
NS_IMETHODIMP
IPCBlobInputStreamStorage::Observe(nsISupports* aSubject, const char* aTopic,
const char16_t* aData)
{
if (!strcmp(aTopic, "xpcom-shutdown")) {
nsCOMPtr<nsIObserverService> obs = mozilla::services::GetObserverService();
if (obs) {
obs->RemoveObserver(this, "xpcom-shutdown");
obs->RemoveObserver(this, "ipc:content-shutdown");
}
gStorage = nullptr;
return NS_OK;
}
MOZ_ASSERT(!strcmp(aTopic, "ipc:content-shutdown"));
nsCOMPtr<nsIPropertyBag2> props = do_QueryInterface(aSubject);
if (NS_WARN_IF(!props)) {
return NS_ERROR_FAILURE;
}
uint64_t childID = CONTENT_PROCESS_ID_UNKNOWN;
props->GetPropertyAsUint64(NS_LITERAL_STRING("childID"), &childID);
if (NS_WARN_IF(childID == CONTENT_PROCESS_ID_UNKNOWN)) {
return NS_ERROR_FAILURE;
}
mozilla::StaticMutexAutoLock lock(gMutex);
for (auto iter = mStorage.Iter(); !iter.Done(); iter.Next()) {
if (iter.Data()->mChildID == childID) {
iter.Remove();
}
}
return NS_OK;
} }
void void
IPCBlobInputStreamStorage::AddStream(nsIInputStream* aInputStream, IPCBlobInputStreamStorage::AddStream(nsIInputStream* aInputStream,
const nsID& aID) const nsID& aID,
uint64_t aChildID)
{ {
MOZ_ASSERT(aInputStream); MOZ_ASSERT(aInputStream);
StreamData* data = new StreamData();
data->mInputStream = aInputStream;
data->mChildID = aChildID;
mozilla::StaticMutexAutoLock lock(gMutex); mozilla::StaticMutexAutoLock lock(gMutex);
mStorage.Put(aID, aInputStream); mStorage.Put(aID, data);
} }
void void
@ -59,8 +123,8 @@ IPCBlobInputStreamStorage::GetStream(const nsID& aID,
nsIInputStream** aInputStream) nsIInputStream** aInputStream)
{ {
mozilla::StaticMutexAutoLock lock(gMutex); mozilla::StaticMutexAutoLock lock(gMutex);
nsCOMPtr<nsIInputStream> stream = mStorage.Get(aID); StreamData* data = mStorage.Get(aID);
if (!stream) { if (!data) {
*aInputStream = nullptr; *aInputStream = nullptr;
return; return;
} }
@ -72,18 +136,46 @@ IPCBlobInputStreamStorage::GetStream(const nsID& aID,
nsCOMPtr<nsIInputStream> replacementStream; nsCOMPtr<nsIInputStream> replacementStream;
nsresult rv = nsresult rv =
NS_CloneInputStream(stream, getter_AddRefs(clonedStream), NS_CloneInputStream(data->mInputStream, getter_AddRefs(clonedStream),
getter_AddRefs(replacementStream)); getter_AddRefs(replacementStream));
if (NS_WARN_IF(NS_FAILED(rv))) { if (NS_WARN_IF(NS_FAILED(rv))) {
return; return;
} }
if (replacementStream) { if (replacementStream) {
mStorage.Put(aID, replacementStream); data->mInputStream = replacementStream;
} }
clonedStream.forget(aInputStream); clonedStream.forget(aInputStream);
} }
void
IPCBlobInputStreamStorage::StoreCallback(const nsID& aID,
IPCBlobInputStreamParentCallback* aCallback)
{
MOZ_ASSERT(aCallback);
mozilla::StaticMutexAutoLock lock(gMutex);
StreamData* data = mStorage.Get(aID);
if (data) {
MOZ_ASSERT(!data->mCallback);
data->mCallback = aCallback;
}
}
already_AddRefed<IPCBlobInputStreamParentCallback>
IPCBlobInputStreamStorage::TakeCallback(const nsID& aID)
{
mozilla::StaticMutexAutoLock lock(gMutex);
StreamData* data = mStorage.Get(aID);
if (!data) {
return nullptr;
}
RefPtr<IPCBlobInputStreamParentCallback> callback;
data->mCallback.swap(callback);
return callback.forget();
}
} // namespace dom } // namespace dom
} // namespace mozilla } // namespace mozilla

Просмотреть файл

@ -8,8 +8,8 @@
#define mozilla_dom_ipc_IPCBlobInputStreamStorage_h #define mozilla_dom_ipc_IPCBlobInputStreamStorage_h
#include "mozilla/RefPtr.h" #include "mozilla/RefPtr.h"
#include "nsInterfaceHashtable.h" #include "nsClassHashtable.h"
#include "nsISupportsImpl.h" #include "nsIObserver.h"
class nsIInputStream; class nsIInputStream;
struct nsID; struct nsID;
@ -17,10 +17,13 @@ struct nsID;
namespace mozilla { namespace mozilla {
namespace dom { namespace dom {
class IPCBlobInputStreamStorage final class IPCBlobInputStreamParentCallback;
class IPCBlobInputStreamStorage final : public nsIObserver
{ {
public: public:
NS_INLINE_DECL_THREADSAFE_REFCOUNTING(IPCBlobInputStreamStorage); NS_DECL_THREADSAFE_ISUPPORTS
NS_DECL_NSIOBSERVER
// This initializes the singleton and it must be called on the main-thread. // This initializes the singleton and it must be called on the main-thread.
static void static void
@ -30,7 +33,7 @@ public:
Get(); Get();
void void
AddStream(nsIInputStream* aInputStream, const nsID& aID); AddStream(nsIInputStream* aInputStream, const nsID& aID, uint64_t aChildID);
void void
ForgetStream(const nsID& aID); ForgetStream(const nsID& aID);
@ -38,10 +41,27 @@ public:
void void
GetStream(const nsID& aID, nsIInputStream** aInputStream); GetStream(const nsID& aID, nsIInputStream** aInputStream);
void
StoreCallback(const nsID& aID, IPCBlobInputStreamParentCallback* aCallback);
already_AddRefed<IPCBlobInputStreamParentCallback>
TakeCallback(const nsID& aID);
private: private:
IPCBlobInputStreamStorage();
~IPCBlobInputStreamStorage(); ~IPCBlobInputStreamStorage();
nsInterfaceHashtable<nsIDHashKey, nsIInputStream> mStorage; struct StreamData
{
nsCOMPtr<nsIInputStream> mInputStream;
RefPtr<IPCBlobInputStreamParentCallback> mCallback;
// This is the Process ID connected with this inputStream. We need to store
// this information in order to delete it if the child crashes/shutdowns.
uint64_t mChildID;
};
nsClassHashtable<nsIDHashKey, StreamData> mStorage;
}; };
} // namespace dom } // namespace dom

Просмотреть файл

@ -0,0 +1,203 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "IPCBlobInputStreamThread.h"
#include "mozilla/StaticMutex.h"
#include "mozilla/ipc/BackgroundChild.h"
#include "mozilla/ipc/PBackgroundChild.h"
#include "nsIIPCBackgroundChildCreateCallback.h"
#include "nsXPCOMPrivate.h"
namespace mozilla {
using namespace ipc;
namespace dom {
namespace {
StaticMutex gIPCBlobThreadMutex;
StaticRefPtr<IPCBlobInputStreamThread> gIPCBlobThread;
bool gShutdownHasStarted = false;
class ThreadInitializeRunnable final : public Runnable
{
public:
NS_IMETHOD
Run() override
{
mozilla::StaticMutexAutoLock lock(gIPCBlobThreadMutex);
MOZ_ASSERT(gIPCBlobThread);
gIPCBlobThread->Initialize();
return NS_OK;
}
};
class MigrateActorRunnable final : public Runnable
, public nsIIPCBackgroundChildCreateCallback
{
public:
NS_DECL_ISUPPORTS_INHERITED
explicit MigrateActorRunnable(IPCBlobInputStreamChild* aActor)
: mActor(aActor)
{
MOZ_ASSERT(mActor);
}
NS_IMETHOD
Run() override
{
BackgroundChild::GetOrCreateForCurrentThread(this);
return NS_OK;
}
void
ActorFailed() override
{
// We cannot continue. We are probably shutting down.
}
void
ActorCreated(mozilla::ipc::PBackgroundChild* aActor) override
{
MOZ_ASSERT(mActor->State() == IPCBlobInputStreamChild::eInactiveMigrating);
if (aActor->SendPIPCBlobInputStreamConstructor(mActor, mActor->ID(),
mActor->Size())) {
// We need manually to increase the reference for this actor because the
// IPC allocator method is not triggered. The Release() is called by IPDL
// when the actor is deleted.
mActor.get()->AddRef();
mActor->Migrated();
}
}
private:
~MigrateActorRunnable() = default;
RefPtr<IPCBlobInputStreamChild> mActor;
};
NS_IMPL_ISUPPORTS_INHERITED(MigrateActorRunnable, Runnable,
nsIIPCBackgroundChildCreateCallback)
} // anonymous
NS_IMPL_ISUPPORTS(IPCBlobInputStreamThread, nsIObserver)
/* static */ bool
IPCBlobInputStreamThread::IsOnFileThread(nsIThread* aThread)
{
MOZ_ASSERT(aThread);
mozilla::StaticMutexAutoLock lock(gIPCBlobThreadMutex);
return gIPCBlobThread && aThread == gIPCBlobThread->mThread;
}
/* static */ IPCBlobInputStreamThread*
IPCBlobInputStreamThread::GetOrCreate()
{
mozilla::StaticMutexAutoLock lock(gIPCBlobThreadMutex);
if (gShutdownHasStarted) {
return nullptr;
}
if (!gIPCBlobThread) {
gIPCBlobThread = new IPCBlobInputStreamThread();
gIPCBlobThread->Initialize();
}
return gIPCBlobThread;
}
void
IPCBlobInputStreamThread::Initialize()
{
if (!NS_IsMainThread()) {
NS_DispatchToMainThread(new ThreadInitializeRunnable());
return;
}
nsCOMPtr<nsIObserverService> obs = services::GetObserverService();
if (NS_WARN_IF(!obs)) {
return;
}
nsresult rv =
obs->AddObserver(this, NS_XPCOM_SHUTDOWN_THREADS_OBSERVER_ID, false);
if (NS_WARN_IF(NS_FAILED(rv))) {
return;
}
nsCOMPtr<nsIThread> thread;
rv = NS_NewNamedThread("DOM File", getter_AddRefs(thread));
if (NS_WARN_IF(NS_FAILED(rv))) {
return;
}
mThread = thread;
if (!mPendingActors.IsEmpty()) {
for (uint32_t i = 0; i < mPendingActors.Length(); ++i) {
MigrateActorInternal(mPendingActors[i]);
}
mPendingActors.Clear();
}
}
NS_IMETHODIMP
IPCBlobInputStreamThread::Observe(nsISupports* aSubject,
const char* aTopic,
const char16_t* aData)
{
MOZ_ASSERT(!strcmp(aTopic, NS_XPCOM_SHUTDOWN_THREADS_OBSERVER_ID));
mozilla::StaticMutexAutoLock lock(gIPCBlobThreadMutex);
if (mThread) {
mThread->Shutdown();
mThread = nullptr;
}
gShutdownHasStarted = true;
gIPCBlobThread = nullptr;
return NS_OK;
}
void
IPCBlobInputStreamThread::MigrateActor(IPCBlobInputStreamChild* aActor)
{
MOZ_ASSERT(aActor->State() == IPCBlobInputStreamChild::eInactiveMigrating);
mozilla::StaticMutexAutoLock lock(gIPCBlobThreadMutex);
if (gShutdownHasStarted) {
return;
}
if (!mThread) {
// The thread is not initialized yet.
mPendingActors.AppendElement(aActor);
return;
}
MigrateActorInternal(aActor);
}
void
IPCBlobInputStreamThread::MigrateActorInternal(IPCBlobInputStreamChild* aActor)
{
RefPtr<Runnable> runnable = new MigrateActorRunnable(aActor);
mThread->Dispatch(runnable, NS_DISPATCH_NORMAL);
}
} // dom namespace
} // mozilla namespace

Просмотреть файл

@ -0,0 +1,53 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef mozilla_dom_IPCBlobInputStreamThread_h
#define mozilla_dom_IPCBlobInputStreamThread_h
#include "nsIObserverService.h"
class nsIThread;
namespace mozilla {
namespace dom {
class IPCBlobInputStreamChild;
class IPCBlobInputStreamThread final : public nsIObserver
{
public:
NS_DECL_THREADSAFE_ISUPPORTS
NS_DECL_NSIOBSERVER
static bool
IsOnFileThread(nsIThread* aThread);
static IPCBlobInputStreamThread*
GetOrCreate();
void
MigrateActor(IPCBlobInputStreamChild* aActor);
void
Initialize();
private:
~IPCBlobInputStreamThread() = default;
void
MigrateActorInternal(IPCBlobInputStreamChild* aActor);
nsCOMPtr<nsIThread> mThread;
// This is populated if MigrateActor() is called before the initialization of
// the thread.
nsTArray<RefPtr<IPCBlobInputStreamChild>> mPendingActors;
};
} // dom namespace
} // mozilla namespace
#endif // mozilla_dom_IPCBlobInputStreamThread_h

Просмотреть файл

@ -11,6 +11,7 @@
#include "IPCBlobInputStreamStorage.h" #include "IPCBlobInputStreamStorage.h"
#include "mozilla/dom/IPCBlob.h" #include "mozilla/dom/IPCBlob.h"
#include "mozilla/dom/nsIContentParent.h" #include "mozilla/dom/nsIContentParent.h"
#include "mozilla/ipc/BackgroundParent.h"
#include "mozilla/ipc/IPCStreamUtils.h" #include "mozilla/ipc/IPCStreamUtils.h"
#include "StreamBlobImpl.h" #include "StreamBlobImpl.h"
#include "prtime.h" #include "prtime.h"
@ -79,14 +80,15 @@ Deserialize(const IPCBlob& aIPCBlob)
template<typename M> template<typename M>
nsresult nsresult
SerializeInputStreamParent(nsIInputStream* aInputStream, uint64_t aSize, SerializeInputStreamParent(nsIInputStream* aInputStream, uint64_t aSize,
IPCBlob& aIPCBlob, M* aManager) uint64_t aChildID, IPCBlob& aIPCBlob, M* aManager)
{ {
// Parent to Child we always send a IPCBlobInputStream. // Parent to Child we always send a IPCBlobInputStream.
MOZ_ASSERT(XRE_IsParentProcess()); MOZ_ASSERT(XRE_IsParentProcess());
nsresult rv; nsresult rv;
IPCBlobInputStreamParent* parentActor = IPCBlobInputStreamParent* parentActor =
IPCBlobInputStreamParent::Create(aInputStream, aSize, &rv, aManager); IPCBlobInputStreamParent::Create(aInputStream, aSize, aChildID, &rv,
aManager);
if (!parentActor) { if (!parentActor) {
return rv; return rv;
} }
@ -117,32 +119,62 @@ SerializeInputStreamChild(nsIInputStream* aInputStream, IPCBlob& aIPCBlob,
nsresult nsresult
SerializeInputStream(nsIInputStream* aInputStream, uint64_t aSize, SerializeInputStream(nsIInputStream* aInputStream, uint64_t aSize,
IPCBlob& aIPCBlob, nsIContentParent* aManager) uint64_t aChildID, IPCBlob& aIPCBlob,
nsIContentParent* aManager)
{ {
return SerializeInputStreamParent(aInputStream, aSize, aIPCBlob, aManager); return SerializeInputStreamParent(aInputStream, aSize, aChildID, aIPCBlob,
aManager);
} }
nsresult nsresult
SerializeInputStream(nsIInputStream* aInputStream, uint64_t aSize, SerializeInputStream(nsIInputStream* aInputStream, uint64_t aSize,
IPCBlob& aIPCBlob, PBackgroundParent* aManager) uint64_t aChildID, IPCBlob& aIPCBlob,
PBackgroundParent* aManager)
{ {
return SerializeInputStreamParent(aInputStream, aSize, aIPCBlob, aManager); return SerializeInputStreamParent(aInputStream, aSize, aChildID, aIPCBlob,
aManager);
} }
nsresult nsresult
SerializeInputStream(nsIInputStream* aInputStream, uint64_t aSize, SerializeInputStream(nsIInputStream* aInputStream, uint64_t aSize,
IPCBlob& aIPCBlob, nsIContentChild* aManager) uint64_t aChildID, IPCBlob& aIPCBlob,
nsIContentChild* aManager)
{ {
return SerializeInputStreamChild(aInputStream, aIPCBlob, aManager); return SerializeInputStreamChild(aInputStream, aIPCBlob, aManager);
} }
nsresult nsresult
SerializeInputStream(nsIInputStream* aInputStream, uint64_t aSize, SerializeInputStream(nsIInputStream* aInputStream, uint64_t aSize,
IPCBlob& aIPCBlob, PBackgroundChild* aManager) uint64_t aChildID, IPCBlob& aIPCBlob,
PBackgroundChild* aManager)
{ {
return SerializeInputStreamChild(aInputStream, aIPCBlob, aManager); return SerializeInputStreamChild(aInputStream, aIPCBlob, aManager);
} }
uint64_t
ChildIDFromManager(nsIContentParent* aManager)
{
return aManager->ChildID();
}
uint64_t
ChildIDFromManager(PBackgroundParent* aManager)
{
return BackgroundParent::GetChildID(aManager);
}
uint64_t
ChildIDFromManager(nsIContentChild* aManager)
{
return 0;
}
uint64_t
ChildIDFromManager(PBackgroundChild* aManager)
{
return 0;
}
template<typename M> template<typename M>
nsresult nsresult
SerializeInternal(BlobImpl* aBlobImpl, M* aManager, IPCBlob& aIPCBlob) SerializeInternal(BlobImpl* aBlobImpl, M* aManager, IPCBlob& aIPCBlob)
@ -194,7 +226,8 @@ SerializeInternal(BlobImpl* aBlobImpl, M* aManager, IPCBlob& aIPCBlob)
return rv.StealNSResult(); return rv.StealNSResult();
} }
rv = SerializeInputStream(inputStream, aIPCBlob.size(), aIPCBlob, aManager); rv = SerializeInputStream(inputStream, aIPCBlob.size(),
ChildIDFromManager(aManager), aIPCBlob, aManager);
if (NS_WARN_IF(rv.Failed())) { if (NS_WARN_IF(rv.Failed())) {
return rv.StealNSResult(); return rv.StealNSResult();
} }

Просмотреть файл

@ -129,6 +129,55 @@
* callback will be executed and, from that moment, any IPCBlobInputStream * callback will be executed and, from that moment, any IPCBlobInputStream
* method will be forwarded to the 'real' stream ones. This means that the * method will be forwarded to the 'real' stream ones. This means that the
* reading will be available. * reading will be available.
*
* DOM-File Thread
* ~~~~~~~~~~~~~~~
*
* IPCBlobInputStreamChild actor can be created in any thread (sort of) and
* their top-level IPDL protocol is PBackground. These actors are wrapped by 1
* or more IPCBlobInputStream objects in order to expose nsIInputStream
* interface and be thread-safe.
*
* But IPDL actors are not thread-safe and any SendFoo() method must be executed
* on the owning thread. This means that this thread must be kept alive for the
* life-time of the IPCBlobInputStream.
*
* In doing this, there are 2 main issues:
* a. if a remote Blob is created on a worker (because of a
* BroadcastChannel/MessagePort for instance) and it sent to the main-thread
* via PostMessage(), we have to keep that worker alive.
* b. if the remote Blob is created on the main-thread, any SendFoo() has to be
* executed on the main-thread. This is true also when the inputStream is
* used on another thread (note that nsIInputStream could do I/O and usually
* they are used on special I/O threads).
*
* In order to avoid this, IPCBlobInputStreamChild are 'migrated' to a DOM-File
* thread. This is done in this way:
*
* 1. If IPCBlobInputStreamChild actor is not already owned by DOM-File thread,
* it calls Send__delete__ in order to inform the parent side that we don't
* need this IPC channel on the current thread.
* 3. IPCBlobInputStreamParent::Recv__delete__ is called on the parent side and
* the parent actor is deleted. Doing this we don't remove the UUID from
* IPCBlobInputStreamStorage.
* 4. When IPCBlobInputStreamChild::ActorDestroy() is called, we are sure that
* the IPC channel is completely released. IPCBlobInputStreamThread is be
* used to assign IPCBlobInputStreamChild actor to the DOM-File thread.
* IPCBlobInputStreamThread::GetOrCreate() creates the DOM-File thread if it
* doesn't exist yet and it initializes PBackground on it if needed.
* 5. IPCBlobInputStreamChild is reused on the DOM-File thread for the creation
* of a new IPCBlobInputStreamParent actor on the parent side. Doing this,
* IPCBlobInputStreamChild will now be owned by the DOM-File thread.
* 6. When the new IPCBlobInputStreamParent actor is created, it will receive
* the same UUID of the previous parent actor. The nsIInputStream will be
* retrieved from IPCBlobInputStreamStorage.
* 7. In order to avoid leaks, IPCBlobInputStreamStorage will monitor child
* processes and in case one of them dies, it will release the
* nsIInputStream objects belonging to that process.
*
* If any API wants to retrieve a 'real inputStream when the migration is in
* progress, that operation is stored in a pending queue and processed at the
* end of the migration.
*/ */
namespace mozilla { namespace mozilla {

Просмотреть файл

@ -20,10 +20,23 @@ protocol PIPCBlobInputStream
parent: parent:
async StreamNeeded(); async StreamNeeded();
// When this is called, the parent releases the inputStream and sends a
// __delete__.
async Close(); async Close();
child: child:
async StreamReady(OptionalIPCStream aStream); async StreamReady(OptionalIPCStream aStream);
both:
// __delete__ can be called by parent and by child for 2 reasons:
// - parent->child: This happens after a Close(). The child wants to inform
// the parent that no other messages will be dispatched and
// that the channel can be interrupted.
// - child->parent: before any operation, the child could start a migration
// from the current thread to a dedicated DOM-File one. The
// reason why a __delete__ is sent from child to parent is
// because it doesn't require any additional runnables.
async __delete__(); async __delete__();
}; };

Просмотреть файл

@ -22,6 +22,7 @@ UNIFIED_SOURCES += [
'IPCBlobInputStreamChild.cpp', 'IPCBlobInputStreamChild.cpp',
'IPCBlobInputStreamParent.cpp', 'IPCBlobInputStreamParent.cpp',
'IPCBlobInputStreamStorage.cpp', 'IPCBlobInputStreamStorage.cpp',
'IPCBlobInputStreamThread.cpp',
'IPCBlobUtils.cpp', 'IPCBlobUtils.cpp',
'PendingIPCBlobChild.cpp', 'PendingIPCBlobChild.cpp',
'PendingIPCBlobParent.cpp', 'PendingIPCBlobParent.cpp',
@ -38,6 +39,7 @@ LOCAL_INCLUDES += [
'/dom/file', '/dom/file',
'/dom/ipc', '/dom/ipc',
'/dom/workers', '/dom/workers',
'/xpcom/build',
] ]
include('/ipc/chromium/chromium-config.mozbuild') include('/ipc/chromium/chromium-config.mozbuild')

Просмотреть файл

@ -9,32 +9,111 @@
<body> <body>
<script type="text/javascript"> <script type="text/javascript">
function workerScript() { function test_workerOwner() {
onmessage = e => { info("test_workerOwner");
e.ports[0].onmessage = event => {
let reader = new FileReader(); function workerScript() {
reader.readAsText(event.data); onmessage = e => {
reader.onloadend = () => { e.ports[0].onmessage = event => {
let status = reader.result == 'hello world'; let reader = new FileReader();
postMessage(status); reader.readAsText(event.data);
reader.onloadend = () => {
let status = reader.result == 'hello world';
postMessage(status);
}
} }
} }
} }
let mc = new MessageChannel();
mc.port1.postMessage(new Blob(['hello world']));
let workerUrl = URL.createObjectURL(new Blob(["(", workerScript.toSource(), ")()"]));
let worker = new Worker(workerUrl);
worker.postMessage("", [mc.port2]);
worker.onmessage = event => {
ok(event.data, "All is done!");
next();
}
}
function test_workerToMainThread() {
info("test_workerToMainThread");
function workerScript() {
onmessage = e => {
e.ports[0].onmessage = event => {
postMessage(event.data);
}
}
}
let mc = new MessageChannel();
mc.port1.postMessage(new Blob(['hello world']));
let workerUrl = URL.createObjectURL(new Blob(["(", workerScript.toSource(), ")()"]));
let worker = new Worker(workerUrl);
worker.postMessage("", [mc.port2]);
worker.onmessage = event => {
info("Blob received back, terminate the worker and force GC");
worker.terminate();
worker = null;
SpecialPowers.forceGC();
var fr = new FileReader();
fr.readAsText(event.data);
fr.onloadend = () => {
is(fr.result, "hello world", "Data matches");
next();
}
}
} }
let mc = new MessageChannel(); function test_workerOwnerPlusFileReaderSync() {
mc.port1.postMessage(new Blob(['hello world'])); info("test_workerOwnerPlusFileReaderSync");
let workerUrl = URL.createObjectURL(new Blob(["(", workerScript.toSource(), ")()"])); function workerScript() {
let worker = new Worker(workerUrl); onmessage = e => {
e.ports[0].onmessage = event => {
let reader = new FileReaderSync();
let status = reader.readAsText(event.data) == 'hello world';
postMessage(status);
}
}
}
worker.postMessage("", [mc.port2]); let mc = new MessageChannel();
worker.onmessage = event => { mc.port1.postMessage(new Blob(['hello world']));
ok(event.data, "All is done!");
SimpleTest.finish(); let workerUrl = URL.createObjectURL(new Blob(["(", workerScript.toSource(), ")()"]));
let worker = new Worker(workerUrl);
worker.postMessage("", [mc.port2]);
worker.onmessage = event => {
ok(event.data, "All is done!");
next();
}
}
var tests = [
test_workerOwner,
test_workerToMainThread,
test_workerOwnerPlusFileReaderSync,
];
function next() {
if (!tests.length) {
SimpleTest.finish();
return;
}
var test = tests.shift();
test();
} }
SimpleTest.waitForExplicitFinish(); SimpleTest.waitForExplicitFinish();
next();
</script> </script>
</pre> </pre>

Просмотреть файл

@ -767,6 +767,14 @@ DXGITextureHostD3D11::SetTextureSourceProvider(TextureSourceProvider* aProvider)
return; return;
} }
if (mDevice && (aProvider->GetD3D11Device() != mDevice)) {
if (mTextureSource) {
mTextureSource->Reset();
}
mTextureSource = nullptr;
return;
}
mProvider = aProvider; mProvider = aProvider;
mDevice = aProvider->GetD3D11Device(); mDevice = aProvider->GetD3D11Device();
@ -1181,23 +1189,6 @@ DataTextureSourceD3D11::GetTileRect()
return IntRect(rect.x, rect.y, rect.width, rect.height); return IntRect(rect.x, rect.y, rect.width, rect.height);
} }
void
DataTextureSourceD3D11::SetTextureSourceProvider(TextureSourceProvider* aProvider)
{
ID3D11Device* newDevice = aProvider ? aProvider->GetD3D11Device() : nullptr;
if (!mDevice) {
mDevice = newDevice;
} else if (mDevice != newDevice) {
// We do not support switching devices.
Reset();
mDevice = nullptr;
}
if (mNextSibling) {
mNextSibling->SetTextureSourceProvider(aProvider);
}
}
CompositingRenderTargetD3D11::CompositingRenderTargetD3D11(ID3D11Texture2D* aTexture, CompositingRenderTargetD3D11::CompositingRenderTargetD3D11(ID3D11Texture2D* aTexture,
const gfx::IntPoint& aOrigin, const gfx::IntPoint& aOrigin,
DXGI_FORMAT aFormatOverride) DXGI_FORMAT aFormatOverride)

Просмотреть файл

@ -246,8 +246,6 @@ public:
virtual gfx::SurfaceFormat GetFormat() const override { return mFormat; } virtual gfx::SurfaceFormat GetFormat() const override { return mFormat; }
virtual void SetTextureSourceProvider(TextureSourceProvider* aProvider) override;
// BigImageIterator // BigImageIterator
virtual BigImageIterator* AsBigImageIterator() override { return mIsTiled ? this : nullptr; } virtual BigImageIterator* AsBigImageIterator() override { return mIsTiled ? this : nullptr; }
@ -266,11 +264,10 @@ public:
mCurrentTile = 0; mCurrentTile = 0;
} }
void Reset();
protected: protected:
gfx::IntRect GetTileRect(uint32_t aIndex) const; gfx::IntRect GetTileRect(uint32_t aIndex) const;
void Reset();
std::vector< RefPtr<ID3D11Texture2D> > mTileTextures; std::vector< RefPtr<ID3D11Texture2D> > mTileTextures;
std::vector< RefPtr<ID3D11ShaderResourceView> > mTileSRVs; std::vector< RefPtr<ID3D11ShaderResourceView> > mTileSRVs;
RefPtr<ID3D11Device> mDevice; RefPtr<ID3D11Device> mDevice;

Просмотреть файл

@ -212,6 +212,10 @@ private:
static intptr_t static intptr_t
GetRawContentParentForComparison(PBackgroundParent* aBackgroundActor); GetRawContentParentForComparison(PBackgroundParent* aBackgroundActor);
// Forwarded from BackgroundParent.
static uint64_t
GetChildID(PBackgroundParent* aBackgroundActor);
// Forwarded from BackgroundParent. // Forwarded from BackgroundParent.
static bool static bool
Alloc(ContentParent* aContent, Alloc(ContentParent* aContent,
@ -816,6 +820,13 @@ BackgroundParent::GetRawContentParentForComparison(
return ParentImpl::GetRawContentParentForComparison(aBackgroundActor); return ParentImpl::GetRawContentParentForComparison(aBackgroundActor);
} }
// static
uint64_t
BackgroundParent::GetChildID(PBackgroundParent* aBackgroundActor)
{
return ParentImpl::GetChildID(aBackgroundActor);
}
// static // static
bool bool
BackgroundParent::Alloc(ContentParent* aContent, BackgroundParent::Alloc(ContentParent* aContent,
@ -968,6 +979,26 @@ ParentImpl::GetRawContentParentForComparison(
return intptr_t(static_cast<nsIContentParent*>(actor->mContent.get())); return intptr_t(static_cast<nsIContentParent*>(actor->mContent.get()));
} }
// static
uint64_t
ParentImpl::GetChildID(PBackgroundParent* aBackgroundActor)
{
AssertIsOnBackgroundThread();
MOZ_ASSERT(aBackgroundActor);
auto actor = static_cast<ParentImpl*>(aBackgroundActor);
if (actor->mActorDestroyed) {
MOZ_ASSERT(false, "GetContentParent called after ActorDestroy was called!");
return 0;
}
if (actor->mContent) {
return actor->mContent->ChildID();
}
return 0;
}
// static // static
bool bool
ParentImpl::Alloc(ContentParent* aContent, ParentImpl::Alloc(ContentParent* aContent,

Просмотреть файл

@ -64,6 +64,9 @@ public:
static intptr_t static intptr_t
GetRawContentParentForComparison(PBackgroundParent* aBackgroundActor); GetRawContentParentForComparison(PBackgroundParent* aBackgroundActor);
static uint64_t
GetChildID(PBackgroundParent* aBackgroundActor);
private: private:
// Only called by ContentParent for cross-process actors. // Only called by ContentParent for cross-process actors.
static bool static bool

Просмотреть файл

@ -262,7 +262,22 @@ PIPCBlobInputStreamParent*
BackgroundParentImpl::AllocPIPCBlobInputStreamParent(const nsID& aID, BackgroundParentImpl::AllocPIPCBlobInputStreamParent(const nsID& aID,
const uint64_t& aSize) const uint64_t& aSize)
{ {
MOZ_CRASH("PIPCBlobInputStreamParent actors should be manually constructed!"); AssertIsInMainProcess();
AssertIsOnBackgroundThread();
return mozilla::dom::IPCBlobInputStreamParent::Create(aID, aSize, this);
}
mozilla::ipc::IPCResult
BackgroundParentImpl::RecvPIPCBlobInputStreamConstructor(PIPCBlobInputStreamParent* aActor,
const nsID& aID,
const uint64_t& aSize)
{
if (!static_cast<mozilla::dom::IPCBlobInputStreamParent*>(aActor)->HasValidStream()) {
return IPC_FAIL_NO_REASON(this);
}
return IPC_OK();
} }
bool bool

Просмотреть файл

@ -73,6 +73,11 @@ protected:
AllocPIPCBlobInputStreamParent(const nsID& aID, AllocPIPCBlobInputStreamParent(const nsID& aID,
const uint64_t& aSize) override; const uint64_t& aSize) override;
virtual mozilla::ipc::IPCResult
RecvPIPCBlobInputStreamConstructor(PIPCBlobInputStreamParent* aActor,
const nsID& aID,
const uint64_t& aSize) override;
virtual bool virtual bool
DeallocPIPCBlobInputStreamParent(PIPCBlobInputStreamParent* aActor) override; DeallocPIPCBlobInputStreamParent(PIPCBlobInputStreamParent* aActor) override;

Просмотреть файл

@ -123,11 +123,13 @@ child:
async PParentToChildStream(); async PParentToChildStream();
async PIPCBlobInputStream(nsID aID, uint64_t aSize);
async PPendingIPCBlob(IPCBlob blob); async PPendingIPCBlob(IPCBlob blob);
both: both:
// PIPCBlobInputStream is created on the parent side only if the child starts
// a migration.
async PIPCBlobInputStream(nsID aID, uint64_t aSize);
async PFileDescriptorSet(FileDescriptor fd); async PFileDescriptorSet(FileDescriptor fd);
}; };

Просмотреть файл

@ -204,6 +204,7 @@ protected:
friend class IToplevelProtocol; friend class IToplevelProtocol;
void SetId(int32_t aId) { mId = aId; } void SetId(int32_t aId) { mId = aId; }
void ResetManager() { mManager = nullptr; }
void SetManager(IProtocol* aManager); void SetManager(IProtocol* aManager);
void SetIPCChannel(MessageChannel* aChannel) { mChannel = aChannel; } void SetIPCChannel(MessageChannel* aChannel) { mChannel = aChannel; }

Просмотреть файл

@ -277,61 +277,58 @@ DoTypeUpdateFallback(JSContext* cx, BaselineFrame* frame, ICUpdatedStub* stub, H
FallbackICSpew(cx, stub->getChainFallback(), "TypeUpdate(%s)", FallbackICSpew(cx, stub->getChainFallback(), "TypeUpdate(%s)",
ICStub::KindString(stub->kind())); ICStub::KindString(stub->kind()));
MOZ_ASSERT(stub->isCacheIR_Updated());
RootedScript script(cx, frame->script()); RootedScript script(cx, frame->script());
RootedObject obj(cx, &objval.toObject()); RootedObject obj(cx, &objval.toObject());
RootedId id(cx);
switch (stub->kind()) { RootedId id(cx, stub->toCacheIR_Updated()->updateStubId());
case ICStub::CacheIR_Updated: { MOZ_ASSERT(id != JSID_EMPTY);
id = stub->toCacheIR_Updated()->updateStubId();
MOZ_ASSERT(id != JSID_EMPTY);
// The group should match the object's group, except when the object is // The group should match the object's group, except when the object is
// an unboxed expando object: in that case, the group is the group of // an unboxed expando object: in that case, the group is the group of
// the unboxed object. // the unboxed object.
RootedObjectGroup group(cx, stub->toCacheIR_Updated()->updateStubGroup()); RootedObjectGroup group(cx, stub->toCacheIR_Updated()->updateStubGroup());
#ifdef DEBUG #ifdef DEBUG
if (obj->is<UnboxedExpandoObject>()) if (obj->is<UnboxedExpandoObject>())
MOZ_ASSERT(group->clasp() == &UnboxedPlainObject::class_); MOZ_ASSERT(group->clasp() == &UnboxedPlainObject::class_);
else else
MOZ_ASSERT(obj->group() == group); MOZ_ASSERT(obj->group() == group);
#endif #endif
// If we're storing null/undefined to a typed object property, check if // If we're storing null/undefined to a typed object property, check if
// we want to include it in this property's type information. // we want to include it in this property's type information.
if (MOZ_UNLIKELY(obj->is<TypedObject>()) && value.isNullOrUndefined()) { bool addType = true;
StructTypeDescr* structDescr = &obj->as<TypedObject>().typeDescr().as<StructTypeDescr>(); if (MOZ_UNLIKELY(obj->is<TypedObject>()) && value.isNullOrUndefined()) {
size_t fieldIndex; StructTypeDescr* structDescr = &obj->as<TypedObject>().typeDescr().as<StructTypeDescr>();
MOZ_ALWAYS_TRUE(structDescr->fieldIndex(id, &fieldIndex)); size_t fieldIndex;
MOZ_ALWAYS_TRUE(structDescr->fieldIndex(id, &fieldIndex));
TypeDescr* fieldDescr = &structDescr->fieldDescr(fieldIndex); TypeDescr* fieldDescr = &structDescr->fieldDescr(fieldIndex);
ReferenceTypeDescr::Type type = fieldDescr->as<ReferenceTypeDescr>().type(); ReferenceTypeDescr::Type type = fieldDescr->as<ReferenceTypeDescr>().type();
if (type == ReferenceTypeDescr::TYPE_ANY) { if (type == ReferenceTypeDescr::TYPE_ANY) {
// Ignore undefined values, which are included implicitly in type // Ignore undefined values, which are included implicitly in type
// information for this property. // information for this property.
if (value.isUndefined()) if (value.isUndefined())
break; addType = false;
} else { } else {
MOZ_ASSERT(type == ReferenceTypeDescr::TYPE_OBJECT); MOZ_ASSERT(type == ReferenceTypeDescr::TYPE_OBJECT);
// Ignore null values being written here. Null is included // Ignore null values being written here. Null is included
// implicitly in type information for this property. Note that // implicitly in type information for this property. Note that
// non-object, non-null values are not possible here, these // non-object, non-null values are not possible here, these
// should have been filtered out by the IR emitter. // should have been filtered out by the IR emitter.
if (value.isNull()) if (value.isNull())
break; addType = false;
}
} }
JSObject* maybeSingleton = obj->isSingleton() ? obj.get() : nullptr;
AddTypePropertyId(cx, group, maybeSingleton, id, value);
break;
}
default:
MOZ_CRASH("Invalid stub");
} }
if (!stub->addUpdateStubForValue(cx, script /* = outerScript */, obj, id, value)) { if (MOZ_LIKELY(addType)) {
JSObject* maybeSingleton = obj->isSingleton() ? obj.get() : nullptr;
AddTypePropertyId(cx, group, maybeSingleton, id, value);
}
if (MOZ_UNLIKELY(!stub->addUpdateStubForValue(cx, script, obj, id, value))) {
// The calling JIT code assumes this function is infallible (for // The calling JIT code assumes this function is infallible (for
// instance we may reallocate dynamic slots before calling this), // instance we may reallocate dynamic slots before calling this),
// so ignore OOMs if we failed to attach a stub. // so ignore OOMs if we failed to attach a stub.
@ -381,16 +378,8 @@ ICTypeUpdate_PrimitiveSet::Compiler::generateStubCode(MacroAssembler& masm)
if (flags_ & TypeToFlag(JSVAL_TYPE_SYMBOL)) if (flags_ & TypeToFlag(JSVAL_TYPE_SYMBOL))
masm.branchTestSymbol(Assembler::Equal, R0, &success); masm.branchTestSymbol(Assembler::Equal, R0, &success);
// Currently, we will never generate primitive stub checks for object. However,
// when we do get to the point where we want to collapse our monitor chains of
// objects and singletons down (when they get too long) to a generic "any object"
// in coordination with the typeset doing the same thing, this will need to
// be re-enabled.
/*
if (flags_ & TypeToFlag(JSVAL_TYPE_OBJECT)) if (flags_ & TypeToFlag(JSVAL_TYPE_OBJECT))
masm.branchTestObject(Assembler::Equal, R0, &success); masm.branchTestObject(Assembler::Equal, R0, &success);
*/
MOZ_ASSERT(!(flags_ & TypeToFlag(JSVAL_TYPE_OBJECT)));
if (flags_ & TypeToFlag(JSVAL_TYPE_NULL)) if (flags_ & TypeToFlag(JSVAL_TYPE_NULL))
masm.branchTestNull(Assembler::Equal, R0, &success); masm.branchTestNull(Assembler::Equal, R0, &success);
@ -451,6 +440,15 @@ ICTypeUpdate_ObjectGroup::Compiler::generateStubCode(MacroAssembler& masm)
return true; return true;
} }
bool
ICTypeUpdate_AnyValue::Compiler::generateStubCode(MacroAssembler& masm)
{
// AnyValue always matches so return true.
masm.mov(ImmWord(1), R1.scratchReg());
EmitReturnFromIC(masm);
return true;
}
// //
// ToBool_Fallback // ToBool_Fallback
// //

Просмотреть файл

@ -191,6 +191,30 @@ class ICTypeUpdate_ObjectGroup : public ICStub
}; };
}; };
class ICTypeUpdate_AnyValue : public ICStub
{
friend class ICStubSpace;
explicit ICTypeUpdate_AnyValue(JitCode* stubCode)
: ICStub(TypeUpdate_AnyValue, stubCode)
{}
public:
class Compiler : public ICStubCompiler {
protected:
MOZ_MUST_USE bool generateStubCode(MacroAssembler& masm);
public:
explicit Compiler(JSContext* cx)
: ICStubCompiler(cx, TypeUpdate_AnyValue, Engine::Baseline)
{}
ICTypeUpdate_AnyValue* getStub(ICStubSpace* space) {
return newStub<ICTypeUpdate_AnyValue>(space, getStubCode());
}
};
};
// ToBool // ToBool
// JSOP_IFNE // JSOP_IFNE

Просмотреть файл

@ -24,6 +24,7 @@ namespace jit {
_(TypeUpdate_SingleObject) \ _(TypeUpdate_SingleObject) \
_(TypeUpdate_ObjectGroup) \ _(TypeUpdate_ObjectGroup) \
_(TypeUpdate_PrimitiveSet) \ _(TypeUpdate_PrimitiveSet) \
_(TypeUpdate_AnyValue) \
\ \
_(NewArray_Fallback) \ _(NewArray_Fallback) \
_(NewObject_Fallback) \ _(NewObject_Fallback) \

Просмотреть файл

@ -428,6 +428,22 @@ ICTypeMonitor_Fallback::resetMonitorStubChain(Zone* zone)
} }
} }
void
ICUpdatedStub::resetUpdateStubChain(Zone* zone)
{
while (!firstUpdateStub_->isTypeUpdate_Fallback()) {
if (zone->needsIncrementalBarrier()) {
// We are removing edges from update stubs to gcthings (JitCode).
// Perform one final trace of all update stubs for incremental GC,
// as it must know about those edges.
firstUpdateStub_->trace(zone->barrierTracer());
}
firstUpdateStub_ = firstUpdateStub_->next();
}
numOptimizedStubs_ = 0;
}
ICMonitoredStub::ICMonitoredStub(Kind kind, JitCode* stubCode, ICStub* firstMonitorStub) ICMonitoredStub::ICMonitoredStub(Kind kind, JitCode* stubCode, ICStub* firstMonitorStub)
: ICStub(kind, ICStub::Monitored, stubCode), : ICStub(kind, ICStub::Monitored, stubCode),
firstMonitorStub_(firstMonitorStub) firstMonitorStub_(firstMonitorStub)
@ -2531,12 +2547,6 @@ bool
ICUpdatedStub::addUpdateStubForValue(JSContext* cx, HandleScript outerScript, HandleObject obj, ICUpdatedStub::addUpdateStubForValue(JSContext* cx, HandleScript outerScript, HandleObject obj,
HandleId id, HandleValue val) HandleId id, HandleValue val)
{ {
if (numOptimizedStubs_ >= MAX_OPTIMIZED_STUBS) {
// TODO: if the TypeSet becomes unknown or has the AnyObject type,
// replace stubs with a single stub to handle these.
return true;
}
EnsureTrackPropertyTypes(cx, obj, id); EnsureTrackPropertyTypes(cx, obj, id);
// Make sure that undefined values are explicitly included in the property // Make sure that undefined values are explicitly included in the property
@ -2544,7 +2554,38 @@ ICUpdatedStub::addUpdateStubForValue(JSContext* cx, HandleScript outerScript, Ha
if (val.isUndefined() && CanHaveEmptyPropertyTypesForOwnProperty(obj)) if (val.isUndefined() && CanHaveEmptyPropertyTypesForOwnProperty(obj))
AddTypePropertyId(cx, obj, id, val); AddTypePropertyId(cx, obj, id, val);
if (val.isPrimitive()) { HeapTypeSet* types = nullptr;
if (!obj->group()->unknownProperties()) {
types = obj->group()->maybeGetProperty(id);
MOZ_ASSERT(types);
}
// Don't attach too many SingleObject/ObjectGroup stubs unless we can
// replace them with a single PrimitiveSet or AnyValue stub.
if (numOptimizedStubs_ >= MAX_OPTIMIZED_STUBS &&
val.isObject() &&
(types && !types->unknownObject()))
{
return true;
}
if (!types || types->unknown()) {
// Attach a stub that always succeeds. We should not have a
// TypeUpdate_AnyValue stub yet.
MOZ_ASSERT(!hasTypeUpdateStub(TypeUpdate_AnyValue));
// Discard existing stubs.
resetUpdateStubChain(cx->zone());
ICTypeUpdate_AnyValue::Compiler compiler(cx);
ICStub* stub = compiler.getStub(compiler.getStubSpace(outerScript));
if (!stub)
return false;
JitSpew(JitSpew_BaselineIC, " Added TypeUpdate stub %p for any value", stub);
addOptimizedUpdateStub(stub);
} else if (val.isPrimitive() || types->unknownObject()) {
JSValueType type = val.isDouble() ? JSVAL_TYPE_DOUBLE : val.extractNonDoubleType(); JSValueType type = val.isDouble() ? JSVAL_TYPE_DOUBLE : val.extractNonDoubleType();
// Check for existing TypeUpdate stub. // Check for existing TypeUpdate stub.
@ -2552,11 +2593,17 @@ ICUpdatedStub::addUpdateStubForValue(JSContext* cx, HandleScript outerScript, Ha
for (ICStubConstIterator iter(firstUpdateStub_); !iter.atEnd(); iter++) { for (ICStubConstIterator iter(firstUpdateStub_); !iter.atEnd(); iter++) {
if (iter->isTypeUpdate_PrimitiveSet()) { if (iter->isTypeUpdate_PrimitiveSet()) {
existingStub = iter->toTypeUpdate_PrimitiveSet(); existingStub = iter->toTypeUpdate_PrimitiveSet();
if (existingStub->containsType(type)) MOZ_ASSERT(!existingStub->containsType(type));
return true;
} }
} }
if (val.isObject()) {
// Discard existing ObjectGroup/SingleObject stubs.
resetUpdateStubChain(cx->zone());
if (existingStub)
addOptimizedUpdateStub(existingStub);
}
ICTypeUpdate_PrimitiveSet::Compiler compiler(cx, existingStub, type); ICTypeUpdate_PrimitiveSet::Compiler compiler(cx, existingStub, type);
ICStub* stub = existingStub ? compiler.updateStub() ICStub* stub = existingStub ? compiler.updateStub()
: compiler.getStub(compiler.getStubSpace(outerScript)); : compiler.getStub(compiler.getStubSpace(outerScript));
@ -2573,14 +2620,13 @@ ICUpdatedStub::addUpdateStubForValue(JSContext* cx, HandleScript outerScript, Ha
} else if (val.toObject().isSingleton()) { } else if (val.toObject().isSingleton()) {
RootedObject obj(cx, &val.toObject()); RootedObject obj(cx, &val.toObject());
// Check for existing TypeUpdate stub. #ifdef DEBUG
// We should not have a stub for this object.
for (ICStubConstIterator iter(firstUpdateStub_); !iter.atEnd(); iter++) { for (ICStubConstIterator iter(firstUpdateStub_); !iter.atEnd(); iter++) {
if (iter->isTypeUpdate_SingleObject() && MOZ_ASSERT_IF(iter->isTypeUpdate_SingleObject(),
iter->toTypeUpdate_SingleObject()->object() == obj) iter->toTypeUpdate_SingleObject()->object() != obj);
{
return true;
}
} }
#endif
ICTypeUpdate_SingleObject::Compiler compiler(cx, obj); ICTypeUpdate_SingleObject::Compiler compiler(cx, obj);
ICStub* stub = compiler.getStub(compiler.getStubSpace(outerScript)); ICStub* stub = compiler.getStub(compiler.getStubSpace(outerScript));
@ -2594,14 +2640,13 @@ ICUpdatedStub::addUpdateStubForValue(JSContext* cx, HandleScript outerScript, Ha
} else { } else {
RootedObjectGroup group(cx, val.toObject().group()); RootedObjectGroup group(cx, val.toObject().group());
// Check for existing TypeUpdate stub. #ifdef DEBUG
// We should not have a stub for this group.
for (ICStubConstIterator iter(firstUpdateStub_); !iter.atEnd(); iter++) { for (ICStubConstIterator iter(firstUpdateStub_); !iter.atEnd(); iter++) {
if (iter->isTypeUpdate_ObjectGroup() && MOZ_ASSERT_IF(iter->isTypeUpdate_ObjectGroup(),
iter->toTypeUpdate_ObjectGroup()->group() == group) iter->toTypeUpdate_ObjectGroup()->group() != group);
{
return true;
}
} }
#endif
ICTypeUpdate_ObjectGroup::Compiler compiler(cx, group); ICTypeUpdate_ObjectGroup::Compiler compiler(cx, group);
ICStub* stub = compiler.getStub(compiler.getStubSpace(outerScript)); ICStub* stub = compiler.getStub(compiler.getStubSpace(outerScript));

Просмотреть файл

@ -962,6 +962,8 @@ class ICUpdatedStub : public ICStub
return firstUpdateStub_; return firstUpdateStub_;
} }
void resetUpdateStubChain(Zone* zone);
bool hasTypeUpdateStub(ICStub::Kind kind) { bool hasTypeUpdateStub(ICStub::Kind kind) {
ICStub* stub = firstUpdateStub_; ICStub* stub = firstUpdateStub_;
do { do {

Просмотреть файл

@ -1620,6 +1620,8 @@ void
AssemblerMIPSShared::as_sync(uint32_t stype) AssemblerMIPSShared::as_sync(uint32_t stype)
{ {
MOZ_ASSERT(stype <= 31); MOZ_ASSERT(stype <= 31);
if (isLoongson())
stype = 0;
writeInst(InstReg(op_special, zero, zero, zero, stype, ff_sync).encode()); writeInst(InstReg(op_special, zero, zero, zero, stype, ff_sync).encode());
} }

Просмотреть файл

@ -183,6 +183,10 @@ END_TEST(testGCHeapPostBarriers)
BEGIN_TEST(testUnbarrieredEquality) BEGIN_TEST(testUnbarrieredEquality)
{ {
#ifdef JS_GC_ZEAL
AutoLeaveZeal nozeal(cx);
#endif /* JS_GC_ZEAL */
// Use ArrayBuffers because they have finalizers, which allows using them // Use ArrayBuffers because they have finalizers, which allows using them
// in ObjectPtr without awkward conversations about nursery allocatability. // in ObjectPtr without awkward conversations about nursery allocatability.
JS::RootedObject robj(cx, JS_NewArrayBuffer(cx, 20)); JS::RootedObject robj(cx, JS_NewArrayBuffer(cx, 20));

Просмотреть файл

@ -5,12 +5,7 @@ const {classes: Cc, interfaces: Ci, utils: Cu, results: Cr} = Components;
Cu.import("resource://gre/modules/Services.jsm"); Cu.import("resource://gre/modules/Services.jsm");
Cu.import("resource://gre/modules/NetUtil.jsm"); Cu.import("resource://gre/modules/NetUtil.jsm");
Cu.import("resource://gre/modules/Timer.jsm"); Cu.import("resource://gre/modules/Timer.jsm");
Cu.import("resource://testing-common/TestUtils.jsm");
function promiseEvent(target, event) {
return new Promise(resolve => {
target.addEventListener(event, resolve, {capture: true, once: true});
});
}
let aps = Cc["@mozilla.org/addons/policy-service;1"].getService(Ci.nsIAddonPolicyService).wrappedJSObject; let aps = Cc["@mozilla.org/addons/policy-service;1"].getService(Ci.nsIAddonPolicyService).wrappedJSObject;
@ -63,8 +58,8 @@ add_task(function*() {
webnavB.close(); webnavB.close();
// Wrappers are nuked asynchronously, so wait a tick. // Wrappers are destroyed asynchronously, so wait for that to happen.
yield new Promise(resolve => setTimeout(resolve, 0)); yield TestUtils.topicObserved("inner-window-destroyed");
// Check that it can't be accessed after he window has been closed. // Check that it can't be accessed after he window has been closed.
let result = getThing(); let result = getThing();

Просмотреть файл

@ -3920,7 +3920,7 @@ ContainerState::SetupMaskLayerForCSSMask(Layer* aLayer,
maskCtx->SetMatrix(gfxMatrix::Translation(-itemRect.TopLeft())); maskCtx->SetMatrix(gfxMatrix::Translation(-itemRect.TopLeft()));
maskCtx->Multiply(gfxMatrix::Scaling(mParameters.mXScale, mParameters.mYScale)); maskCtx->Multiply(gfxMatrix::Scaling(mParameters.mXScale, mParameters.mYScale));
aMaskItem->PaintMask(mBuilder, maskCtx); bool isPaintFinished = aMaskItem->PaintMask(mBuilder, maskCtx);
RefPtr<ImageContainer> imgContainer = RefPtr<ImageContainer> imgContainer =
imageData.CreateImageAndImageContainer(); imageData.CreateImageAndImageContainer();
@ -3929,7 +3929,9 @@ ContainerState::SetupMaskLayerForCSSMask(Layer* aLayer,
} }
maskLayer->SetContainer(imgContainer); maskLayer->SetContainer(imgContainer);
*oldUserData = Move(newUserData); if (isPaintFinished) {
*oldUserData = Move(newUserData);
}
aLayer->SetMaskLayer(maskLayer); aLayer->SetMaskLayer(maskLayer);
} }

Просмотреть файл

@ -1969,23 +1969,32 @@ nsCSSRendering::CanBuildWebRenderDisplayItemsForStyleImageLayer(LayerManager* aM
} }
} }
// We only support painting gradients and image for a single style image layer
const nsStyleImage* styleImage = &aBackgroundStyle->mImage.mLayers[aLayer].mImage; const nsStyleImage* styleImage = &aBackgroundStyle->mImage.mLayers[aLayer].mImage;
if (styleImage->GetType() == eStyleImageType_Image) {
// We only support image with image container. if (styleImage->GetCropRect()) {
if (!styleImage->IsEmpty() && styleImage->GetType() == eStyleImageType_Image) { return false;
imgRequestProxy* requestProxy = styleImage->GetImageData();
if (requestProxy) {
nsCOMPtr<imgIContainer> srcImage;
requestProxy->GetImage(getter_AddRefs(srcImage));
if (srcImage && !srcImage->IsImageContainerAvailable(aManager, imgIContainer::FLAG_NONE)) {
return false;
}
} }
imgRequestProxy* requestProxy = styleImage->GetImageData();
if (!requestProxy) {
return false;
}
nsCOMPtr<imgIContainer> srcImage;
requestProxy->GetImage(getter_AddRefs(srcImage));
if (!srcImage || !srcImage->IsImageContainerAvailable(aManager, imgIContainer::FLAG_NONE)) {
return false;
}
return true;
} }
// We only support painting gradients and image for a single style image layer if (styleImage->GetType() == eStyleImageType_Gradient) {
return styleImage->GetType() == eStyleImageType_Gradient || return true;
styleImage->GetType() == eStyleImageType_Image; }
return false;
} }
DrawResult DrawResult

Просмотреть файл

@ -3988,8 +3988,8 @@ nsDisplayImageContainer::ConfigureLayer(ImageLayer* aLayer,
: IntSize(imageWidth, imageHeight); : IntSize(imageWidth, imageHeight);
const int32_t factor = mFrame->PresContext()->AppUnitsPerDevPixel(); const int32_t factor = mFrame->PresContext()->AppUnitsPerDevPixel();
const LayoutDeviceRect destRect = const LayoutDeviceRect destRect(
LayoutDeviceRect::FromAppUnits(GetDestRect(), factor); LayoutDeviceIntRect::FromAppUnitsToNearest(GetDestRect(), factor));
const LayoutDevicePoint p = destRect.TopLeft(); const LayoutDevicePoint p = destRect.TopLeft();
Matrix transform = Matrix::Translation(p.x, p.y); Matrix transform = Matrix::Translation(p.x, p.y);
@ -4045,8 +4045,8 @@ nsDisplayImageContainer::CanOptimizeToImageLayer(LayerManager* aManager,
} }
const int32_t factor = mFrame->PresContext()->AppUnitsPerDevPixel(); const int32_t factor = mFrame->PresContext()->AppUnitsPerDevPixel();
const LayoutDeviceRect destRect = const LayoutDeviceRect destRect(
LayoutDeviceRect::FromAppUnits(GetDestRect(), factor); LayoutDeviceIntRect::FromAppUnitsToNearest(GetDestRect(), factor));
// Calculate the scaling factor for the frame. // Calculate the scaling factor for the frame.
const gfxSize scale = gfxSize(destRect.width / imageWidth, const gfxSize scale = gfxSize(destRect.width / imageWidth,
@ -8390,7 +8390,7 @@ nsDisplayMask::BuildLayer(nsDisplayListBuilder* aBuilder,
return container.forget(); return container.forget();
} }
void bool
nsDisplayMask::PaintMask(nsDisplayListBuilder* aBuilder, nsDisplayMask::PaintMask(nsDisplayListBuilder* aBuilder,
gfxContext* aMaskContext) gfxContext* aMaskContext)
{ {
@ -8409,6 +8409,8 @@ nsDisplayMask::PaintMask(nsDisplayListBuilder* aBuilder,
nsSVGIntegrationUtils::PaintMask(params); nsSVGIntegrationUtils::PaintMask(params);
nsDisplayMaskGeometry::UpdateDrawResult(this, imgParmas.result); nsDisplayMaskGeometry::UpdateDrawResult(this, imgParmas.result);
return imgParmas.result == mozilla::image::DrawResult::SUCCESS;
} }
LayerState LayerState

Просмотреть файл

@ -4435,9 +4435,10 @@ public:
LayerManager* aManager); LayerManager* aManager);
/* /*
* Paint mask onto aMaskContext in mFrame's coordinate space. * Paint mask onto aMaskContext in mFrame's coordinate space and
* return whether the mask layer was painted successfully.
*/ */
void PaintMask(nsDisplayListBuilder* aBuilder, gfxContext* aMaskContext); bool PaintMask(nsDisplayListBuilder* aBuilder, gfxContext* aMaskContext);
const nsTArray<nsRect>& GetDestRects() const nsTArray<nsRect>& GetDestRects()
{ {

Просмотреть файл

@ -361,6 +361,15 @@ CSSStyleSheet::CSSStyleSheet(const CSSStyleSheet& aCopy,
mRuleProcessors(nullptr) mRuleProcessors(nullptr)
{ {
mParent = aParentToUse; mParent = aParentToUse;
if (mDirty) { // CSSOM's been there, force full copy now
NS_ASSERTION(mInner->mComplete, "Why have rules been accessed on an incomplete sheet?");
// FIXME: handle failure?
//
// NOTE: It's important to call this from the subclass, since it could
// access uninitialized members otherwise.
EnsureUniqueInner();
}
} }
CSSStyleSheet::~CSSStyleSheet() CSSStyleSheet::~CSSStyleSheet()

Просмотреть файл

@ -87,6 +87,15 @@ ServoStyleSheet::ServoStyleSheet(const ServoStyleSheet& aCopy,
: StyleSheet(aCopy, aOwnerRuleToUse, aDocumentToUse, aOwningNodeToUse) : StyleSheet(aCopy, aOwnerRuleToUse, aDocumentToUse, aOwningNodeToUse)
{ {
mParent = aParentToUse; mParent = aParentToUse;
if (mDirty) { // CSSOM's been there, force full copy now
NS_ASSERTION(mInner->mComplete, "Why have rules been accessed on an incomplete sheet?");
// FIXME: handle failure?
//
// NOTE: It's important to call this from the subclass, since this could
// access uninitialized members otherwise.
EnsureUniqueInner();
}
} }
ServoStyleSheet::~ServoStyleSheet() ServoStyleSheet::~ServoStyleSheet()

Просмотреть файл

@ -54,12 +54,6 @@ StyleSheet::StyleSheet(const StyleSheet& aCopy,
MOZ_ASSERT(mInner, "Should only copy StyleSheets with an mInner."); MOZ_ASSERT(mInner, "Should only copy StyleSheets with an mInner.");
mInner->AddSheet(this); mInner->AddSheet(this);
if (mDirty) { // CSSOM's been there, force full copy now
NS_ASSERTION(mInner->mComplete, "Why have rules been accessed on an incomplete sheet?");
// FIXME: handle failure?
EnsureUniqueInner();
}
if (aCopy.mMedia) { if (aCopy.mMedia) {
// XXX This is wrong; we should be keeping @import rules and // XXX This is wrong; we should be keeping @import rules and
// sheets in sync! // sheets in sync!

Просмотреть файл

@ -5,4 +5,4 @@ Makefile.in build files for the Mozilla build system.
The cubeb git repository is: git://github.com/kinetiknz/cubeb.git The cubeb git repository is: git://github.com/kinetiknz/cubeb.git
The git commit ID used was 087dc942a9a3bb5cbb88e3763cde7fe709db99e9 (2017-05-29 15:51:19 +1200) The git commit ID used was 3428c2b08d2668a026469f2e71a6f6aa95614aeb (2017-05-31 16:26:45 +1200)

Просмотреть файл

@ -114,6 +114,7 @@ struct cubeb_stream {
cubeb_data_callback data_callback = nullptr; cubeb_data_callback data_callback = nullptr;
cubeb_state_callback state_callback = nullptr; cubeb_state_callback state_callback = nullptr;
cubeb_device_changed_callback device_changed_callback = nullptr; cubeb_device_changed_callback device_changed_callback = nullptr;
owned_critical_section device_changed_callback_lock;
/* Stream creation parameters */ /* Stream creation parameters */
cubeb_stream_params input_stream_params = { CUBEB_SAMPLE_FLOAT32NE, 0, 0, CUBEB_LAYOUT_UNDEFINED }; cubeb_stream_params input_stream_params = { CUBEB_SAMPLE_FLOAT32NE, 0, 0, CUBEB_LAYOUT_UNDEFINED };
cubeb_stream_params output_stream_params = { CUBEB_SAMPLE_FLOAT32NE, 0, 0, CUBEB_LAYOUT_UNDEFINED }; cubeb_stream_params output_stream_params = { CUBEB_SAMPLE_FLOAT32NE, 0, 0, CUBEB_LAYOUT_UNDEFINED };
@ -704,7 +705,7 @@ audiounit_property_listener_callback(AudioObjectID /* id */, UInt32 address_coun
case kAudioDevicePropertyDeviceIsAlive: case kAudioDevicePropertyDeviceIsAlive:
/* fall through */ /* fall through */
case kAudioDevicePropertyDataSource: { case kAudioDevicePropertyDataSource: {
auto_lock lock(stm->mutex); auto_lock dev_cb_lock(stm->device_changed_callback_lock);
if (stm->device_changed_callback) { if (stm->device_changed_callback) {
stm->device_changed_callback(stm->user_ptr); stm->device_changed_callback(stm->user_ptr);
} }
@ -2640,8 +2641,7 @@ audiounit_stream_stop(cubeb_stream * stm)
static int static int
audiounit_stream_get_position(cubeb_stream * stm, uint64_t * position) audiounit_stream_get_position(cubeb_stream * stm, uint64_t * position)
{ {
auto_lock lock(stm->mutex); assert(stm);
*position = stm->frames_played; *position = stm->frames_played;
return CUBEB_OK; return CUBEB_OK;
} }
@ -2871,14 +2871,11 @@ int audiounit_stream_device_destroy(cubeb_stream * /* stream */,
int audiounit_stream_register_device_changed_callback(cubeb_stream * stream, int audiounit_stream_register_device_changed_callback(cubeb_stream * stream,
cubeb_device_changed_callback device_changed_callback) cubeb_device_changed_callback device_changed_callback)
{ {
auto_lock dev_cb_lock(stream->device_changed_callback_lock);
/* Note: second register without unregister first causes 'nope' error. /* Note: second register without unregister first causes 'nope' error.
* Current implementation requires unregister before register a new cb. */ * Current implementation requires unregister before register a new cb. */
assert(!stream->device_changed_callback); assert(!stream->device_changed_callback);
auto_lock lock(stream->mutex);
stream->device_changed_callback = device_changed_callback; stream->device_changed_callback = device_changed_callback;
return CUBEB_OK; return CUBEB_OK;
} }

Просмотреть файл

@ -9,6 +9,15 @@ with Files("**"):
include('/build/gyp.mozbuild') include('/build/gyp.mozbuild')
# Set gyp vars that libyuv needs when building under various analysis tools.
gyp_vars_copy = gyp_vars.copy()
if CONFIG['MOZ_VALGRIND']:
gyp_vars_copy.update(build_for_tool="memcheck")
elif CONFIG['MOZ_ASAN']:
gyp_vars_copy.update(build_for_tool="asan")
elif CONFIG['MOZ_TSAN']:
gyp_vars_copy.update(build_for_tool="tsan")
libyuv_non_unified_sources = [ libyuv_non_unified_sources = [
'libyuv/source/convert.cc', 'libyuv/source/convert.cc',
'libyuv/source/convert_from.cc', 'libyuv/source/convert_from.cc',
@ -21,7 +30,7 @@ libyuv_non_unified_sources = [
GYP_DIRS += ['libyuv'] GYP_DIRS += ['libyuv']
GYP_DIRS['libyuv'].input = 'libyuv/libyuv.gyp' GYP_DIRS['libyuv'].input = 'libyuv/libyuv.gyp'
GYP_DIRS['libyuv'].variables = gyp_vars GYP_DIRS['libyuv'].variables = gyp_vars_copy
GYP_DIRS['libyuv'].sandbox_vars['FINAL_LIBRARY'] = 'xul' GYP_DIRS['libyuv'].sandbox_vars['FINAL_LIBRARY'] = 'xul'
GYP_DIRS['libyuv'].non_unified_sources += libyuv_non_unified_sources GYP_DIRS['libyuv'].non_unified_sources += libyuv_non_unified_sources

14
media/mtransport/third_party/moz.build поставляемый
Просмотреть файл

@ -11,6 +11,16 @@ GYP_DIRS += [
'nrappkit', 'nrappkit',
] ]
# Set gyp vars that webrtc needs when building under various analysis tools.
# Primarily this prevents webrtc from setting NVALGRIND and breaking builds.
gyp_vars_copy = gyp_vars.copy()
if CONFIG['MOZ_VALGRIND']:
gyp_vars_copy.update(build_for_tool="memcheck")
elif CONFIG['MOZ_ASAN']:
gyp_vars_copy.update(build_for_tool="asan")
elif CONFIG['MOZ_TSAN']:
gyp_vars_copy.update(build_for_tool="tsan")
# These files cannot be built in unified mode because of name clashes on RCSSTRING # These files cannot be built in unified mode because of name clashes on RCSSTRING
nICEr_non_unified_sources = [ nICEr_non_unified_sources = [
'nICEr/src/crypto/nr_crypto.c', 'nICEr/src/crypto/nr_crypto.c',
@ -62,14 +72,14 @@ nrappkit_non_unified_sources = [
] ]
GYP_DIRS['nICEr'].input = 'nICEr/nicer.gyp' GYP_DIRS['nICEr'].input = 'nICEr/nicer.gyp'
GYP_DIRS['nICEr'].variables = gyp_vars GYP_DIRS['nICEr'].variables = gyp_vars_copy
# We allow warnings for third-party code that can be updated from upstream. # We allow warnings for third-party code that can be updated from upstream.
GYP_DIRS['nICEr'].sandbox_vars['ALLOW_COMPILER_WARNINGS'] = True GYP_DIRS['nICEr'].sandbox_vars['ALLOW_COMPILER_WARNINGS'] = True
GYP_DIRS['nICEr'].sandbox_vars['FINAL_LIBRARY'] = 'xul' GYP_DIRS['nICEr'].sandbox_vars['FINAL_LIBRARY'] = 'xul'
GYP_DIRS['nICEr'].non_unified_sources += nICEr_non_unified_sources GYP_DIRS['nICEr'].non_unified_sources += nICEr_non_unified_sources
GYP_DIRS['nrappkit'].input = 'nrappkit/nrappkit.gyp' GYP_DIRS['nrappkit'].input = 'nrappkit/nrappkit.gyp'
GYP_DIRS['nrappkit'].variables = gyp_vars GYP_DIRS['nrappkit'].variables = gyp_vars_copy
# We allow warnings for third-party code that can be updated from upstream. # We allow warnings for third-party code that can be updated from upstream.
GYP_DIRS['nrappkit'].sandbox_vars['ALLOW_COMPILER_WARNINGS'] = True GYP_DIRS['nrappkit'].sandbox_vars['ALLOW_COMPILER_WARNINGS'] = True
GYP_DIRS['nrappkit'].sandbox_vars['FINAL_LIBRARY'] = 'xul' GYP_DIRS['nrappkit'].sandbox_vars['FINAL_LIBRARY'] = 'xul'

Просмотреть файл

@ -2919,11 +2919,7 @@ pref("layout.css.control-characters.visible", true);
pref("layout.css.column-span.enabled", false); pref("layout.css.column-span.enabled", false);
// Is effect of xml:base disabled for style attribute? // Is effect of xml:base disabled for style attribute?
#ifdef RELEASE_OR_BETA
pref("layout.css.style-attr-with-xml-base.disabled", false);
#else
pref("layout.css.style-attr-with-xml-base.disabled", true); pref("layout.css.style-attr-with-xml-base.disabled", true);
#endif
// pref for which side vertical scrollbars should be on // pref for which side vertical scrollbars should be on
// 0 = end-side in UI direction // 0 = end-side in UI direction

Двоичные данные
other-licenses/7zstub/firefox/7zSD.sfx

Двоичный файл не отображается.

Просмотреть файл

@ -172,9 +172,13 @@ class TestMemoryUsage(MarionetteTestCase):
checkpoint_file = "memory-report-%s-%d.json.gz" % (checkpointName, iteration) checkpoint_file = "memory-report-%s-%d.json.gz" % (checkpointName, iteration)
checkpoint_path = os.path.join(self._resultsDir, checkpoint_file) checkpoint_path = os.path.join(self._resultsDir, checkpoint_file)
# Escape the Windows directory separator \ to prevent it from # On Windows, replace / with the Windows directory
# being interpreted as an escape character. # separator \ and escape it to prevent it from being
checkpoint_path = checkpoint_path.replace('\\', '\\\\') # interpreted as an escape character.
if sys.platform.startswith('win'):
checkpoint_path = (checkpoint_path.
replace('\\', '\\\\').
replace('/', '\\\\'))
checkpoint_script = r""" checkpoint_script = r"""
const Cc = Components.classes; const Cc = Components.classes;

Просмотреть файл

@ -235,45 +235,14 @@ Running Tests In Other Browsers
web-platform-tests is cross browser, and the runner is compatible with web-platform-tests is cross browser, and the runner is compatible with
multiple browsers. Therefore it's possible to check the behaviour of multiple browsers. Therefore it's possible to check the behaviour of
tests in other browsers. This is somewhat more involved than running tests in other browsers. By default Chrome, Edge and Servo are
them in Firefox since extra dependencies may be required. For example supported. In order to run the tests in these browsers use the
to test in Chrome: `--product` argument to wptrunner:
1. Download the chromedriver binary and place it somewhere sensible mach wpt --product chrome dom/historical.html
e.g. `~/bin`
2. In your gecko source tree activate the virtualenv created by mach, By default these browsers run without expectation metadata, but it can
since this has most dependencies already installed. This is typically be added in the `testing/web-platform/products/<product>`
in objdir/_virtualenv and is activated via e.g. directory. To run with the same metadata as for Firefox (so that
differences are reported as unexpected results), pass `--meta
source objdir/_virtualenv/bin/activate testing/web-platform/meta` to the mach command.
3. Install the extra requirements:
cd testing/web-platform/harness
pip install -r requirements_chrome.txt
4. Edit the config file `testing/web-platform/wptrunner.ini` so that
Chrome support is enabled by changing the section that reads:
[products]
firefox =
to read
[products]
firefox =
chrome =
(alternatively create a new config file elsewhere and use the
`--config` option to `runtests.py` to point wptrunner at this config
file).
5. Run `runtests.py` using the location of chromedriver as
the binary:
cd testing/web-platform
python runtests.py --product=chrome --binary=~/bin/chromedriver --log-mach=-
By default this will use the same test checkout and metadata as are in
the Gecko tree, so it's easy to compare behaviour relative to Firefox.

7
testing/web-platform/harness/.gitignore поставляемый
Просмотреть файл

@ -1,7 +0,0 @@
*.py[co]
*~
*#
\#*
_virtualenv
test/test.cfg
test/metadata/MANIFEST.json

Просмотреть файл

@ -1,20 +0,0 @@
language: python
python: 2.7
sudo: false
cache:
directories:
- $HOME/.cache/pip
env:
- TOXENV="{py27,pypy}-base"
- TOXENV="{py27,pypy}-chrome"
- TOXENV="{py27,pypy}-firefox"
- TOXENV="{py27,pypy}-servo"
install:
- pip install -U tox
script:
- tox

Просмотреть файл

@ -1,17 +0,0 @@
exclude MANIFEST.in
include requirements.txt
include wptrunner/browsers/b2g_setup/*
include wptrunner.default.ini
include wptrunner/testharness_runner.html
include wptrunner/testharnessreport.js
include wptrunner/testharnessreport-servo.js
include wptrunner/testharnessreport-servodriver.js
include wptrunner/executors/testharness_marionette.js
include wptrunner/executors/testharness_servodriver.js
include wptrunner/executors/testharness_webdriver.js
include wptrunner/executors/reftest.js
include wptrunner/executors/reftest-wait.js
include wptrunner/executors/reftest-wait_servodriver.js
include wptrunner/executors/reftest-wait_webdriver.js
include wptrunner/config.json
include wptrunner/browsers/server-locations.txt

Просмотреть файл

@ -1,242 +0,0 @@
wptrunner: A web-platform-tests harness
=======================================
wptrunner is a harness for running the W3C `web-platform-tests testsuite`_.
.. contents::
Installation
~~~~~~~~~~~~
wptrunner is expected to be installed into a virtualenv using pip. For
development, it can be installed using the `-e` option::
pip install -e ./
Running the Tests
~~~~~~~~~~~~~~~~~
After installation, the command ``wptrunner`` should be available to run
the tests.
The ``wptrunner`` command takes multiple options, of which the
following are most significant:
``--product`` (defaults to `firefox`)
The product to test against: `b2g`, `chrome`, `firefox`, or `servo`.
``--binary`` (required if product is `firefox` or `servo`)
The path to a binary file for the product (browser) to test against.
``--webdriver-binary`` (required if product is `chrome`)
The path to a `driver` binary; e.g., a `chromedriver` binary.
``--certutil-binary`` (required if product is `firefox` [#]_)
The path to a `certutil` binary (for tests that must be run over https).
``--metadata`` (required)
The path to a directory containing test metadata. [#]_
``--tests`` (required)
The path to a directory containing a web-platform-tests checkout.
``--prefs-root`` (required only when testing a Firefox binary)
The path to a directory containing Firefox test-harness preferences. [#]_
``--config`` (should default to `wptrunner.default.ini`)
The path to the config (ini) file.
.. [#] The ``--certutil-binary`` option is required when the product is
``firefox`` unless ``--ssl-type=none`` is specified.
.. [#] The ``--metadata`` path is to a directory that contains:
* a ``MANIFEST.json`` file (instructions on generating this file are
available in the `detailed documentation
<http://wptrunner.readthedocs.org/en/latest/usage.html#installing-wptrunner>`_);
and
* (optionally) any expectation files (see below)
.. [#] Example ``--prefs-root`` value: ``~/mozilla-central/testing/profiles``.
There are also a variety of other options available; use ``--help`` to
list them.
-------------------------------
Example: How to start wptrunner
-------------------------------
To test a Firefox Nightly build in an OS X environment, you might start
wptrunner using something similar to the following example::
wptrunner --metadata=~/web-platform-tests/ --tests=~/web-platform-tests/ \
--binary=~/mozilla-central/obj-x86_64-apple-darwin14.3.0/dist/Nightly.app/Contents/MacOS/firefox \
--certutil-binary=~/mozilla-central/obj-x86_64-apple-darwin14.3.0/security/nss/cmd/certutil/certutil \
--prefs-root=~/mozilla-central/testing/profiles
And to test a Chromium build in an OS X environment, you might start
wptrunner using something similar to the following example::
wptrunner --metadata=~/web-platform-tests/ --tests=~/web-platform-tests/ \
--binary=~/chromium/src/out/Release/Chromium.app/Contents/MacOS/Chromium \
--webdriver-binary=/usr/local/bin/chromedriver --product=chrome
-------------------------------------
Example: How to run a subset of tests
-------------------------------------
To restrict a test run just to tests in a particular web-platform-tests
subdirectory, specify the directory name in the positional arguments after
the options; for example, run just the tests in the `dom` subdirectory::
wptrunner --metadata=~/web-platform-tests/ --tests=~/web-platform-tests/ \
--binary=/path/to/firefox --certutil-binary=/path/to/certutil \
--prefs-root=/path/to/testing/profiles \
dom
Output
~~~~~~
By default wptrunner just dumps its entire output as raw JSON messages
to stdout. This is convenient for piping into other tools, but not ideal
for humans reading the output.
As an alternative, you can use the ``--log-mach`` option, which provides
output in a reasonable format for humans. The option requires a value:
either the path for a file to write the `mach`-formatted output to, or
"`-`" (a hyphen) to write the `mach`-formatted output to stdout.
When using ``--log-mach``, output of the full raw JSON log is still
available, from the ``--log-raw`` option. So to output the full raw JSON
log to a file and a human-readable summary to stdout, you might start
wptrunner using something similar to the following example::
wptrunner --metadata=~/web-platform-tests/ --tests=~/web-platform-tests/ \
--binary=/path/to/firefox --certutil-binary=/path/to/certutil \
--prefs-root=/path/to/testing/profiles \
--log-raw=output.log --log-mach=-
Expectation Data
~~~~~~~~~~~~~~~~
wptrunner is designed to be used in an environment where it is not
just necessary to know which tests passed, but to compare the results
between runs. For this reason it is possible to store the results of a
previous run in a set of ini-like "expectation files". This format is
documented below. To generate the expectation files use `wptrunner` with
the `--log-raw=/path/to/log/file` option. This can then be used as
input to the `wptupdate` tool.
Expectation File Format
~~~~~~~~~~~~~~~~~~~~~~~
Metadata about tests, notably including their expected results, is
stored in a modified ini-like format that is designed to be human
editable, but also to be machine updatable.
Each test file that requires metadata to be specified (because it has
a non-default expectation or because it is disabled, for example) has
a corresponding expectation file in the `metadata` directory. For
example a test file `html/test1.html` containing a failing test would
have an expectation file called `html/test1.html.ini` in the
`metadata` directory.
An example of an expectation file is::
example_default_key: example_value
[filename.html]
type: testharness
[subtest1]
expected: FAIL
[subtest2]
expected:
if platform == 'win': TIMEOUT
if platform == 'osx': ERROR
FAIL
[filename.html?query=something]
type: testharness
disabled: bug12345
The file consists of two elements, key-value pairs and
sections.
Sections are delimited by headings enclosed in square brackets. Any
closing square bracket in the heading itself my be escaped with a
backslash. Each section may then contain any number of key-value pairs
followed by any number of subsections. So that it is clear which data
belongs to each section without the use of end-section markers, the
data for each section (i.e. the key-value pairs and subsections) must
be indented using spaces. Indentation need only be consistent, but
using two spaces per level is recommended.
In a test expectation file, each resource provided by the file has a
single section, with the section heading being the part after the last
`/` in the test url. Tests that have subsections may have subsections
for those subtests in which the heading is the name of the subtest.
Simple key-value pairs are of the form::
key: value
Note that unlike ini files, only `:` is a valid seperator; `=` will
not work as expected. Key-value pairs may also have conditional
values of the form::
key:
if condition1: value1
if condition2: value2
default
In this case each conditional is evaluated in turn and the value is
that on the right hand side of the first matching conditional. In the
case that no condition matches, the unconditional default is used. If
no condition matches and no default is provided it is equivalent to
the key not being present. Conditionals use a simple python-like expression
language e.g.::
if debug and (platform == "linux" or platform == "osx"): FAIL
For test expectations the avaliable variables are those in the
`run_info` which for desktop are `version`, `os`, `bits`, `processor`,
`debug` and `product`.
Key-value pairs specified at the top level of the file before any
sections are special as they provide defaults for the rest of the file
e.g.::
key1: value1
[section 1]
key2: value2
[section 2]
key1: value3
In this case, inside section 1, `key1` would have the value `value1`
and `key2` the value `value2` whereas in section 2 `key1` would have
the value `value3` and `key2` would be undefined.
The web-platform-test harness knows about several keys:
`expected`
Must evaluate to a possible test status indicating the expected
result of the test. The implicit default is PASS or OK when the
field isn't present.
`disabled`
Any value indicates that the test is disabled.
`type`
The test type e.g. `testharness`, `reftest`, or `wdspec`.
`reftype`
The type of comparison for reftests; either `==` or `!=`.
`refurl`
The reference url for reftests.
.. _`web-platform-tests testsuite`: https://github.com/w3c/web-platform-tests

Просмотреть файл

@ -1,177 +0,0 @@
# Makefile for Sphinx documentation
#
# You can set these variables from the command line.
SPHINXOPTS =
SPHINXBUILD = sphinx-build
PAPER =
BUILDDIR = _build
# User-friendly check for sphinx-build
ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1)
$(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/)
endif
# Internal variables.
PAPEROPT_a4 = -D latex_paper_size=a4
PAPEROPT_letter = -D latex_paper_size=letter
ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
# the i18n builder cannot share the environment and doctrees with the others
I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext
help:
@echo "Please use \`make <target>' where <target> is one of"
@echo " html to make standalone HTML files"
@echo " dirhtml to make HTML files named index.html in directories"
@echo " singlehtml to make a single large HTML file"
@echo " pickle to make pickle files"
@echo " json to make JSON files"
@echo " htmlhelp to make HTML files and a HTML help project"
@echo " qthelp to make HTML files and a qthelp project"
@echo " devhelp to make HTML files and a Devhelp project"
@echo " epub to make an epub"
@echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
@echo " latexpdf to make LaTeX files and run them through pdflatex"
@echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx"
@echo " text to make text files"
@echo " man to make manual pages"
@echo " texinfo to make Texinfo files"
@echo " info to make Texinfo files and run them through makeinfo"
@echo " gettext to make PO message catalogs"
@echo " changes to make an overview of all changed/added/deprecated items"
@echo " xml to make Docutils-native XML files"
@echo " pseudoxml to make pseudoxml-XML files for display purposes"
@echo " linkcheck to check all external links for integrity"
@echo " doctest to run all doctests embedded in the documentation (if enabled)"
clean:
rm -rf $(BUILDDIR)/*
html:
$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
@echo
@echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
dirhtml:
$(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
@echo
@echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
singlehtml:
$(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml
@echo
@echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml."
pickle:
$(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
@echo
@echo "Build finished; now you can process the pickle files."
json:
$(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
@echo
@echo "Build finished; now you can process the JSON files."
htmlhelp:
$(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
@echo
@echo "Build finished; now you can run HTML Help Workshop with the" \
".hhp project file in $(BUILDDIR)/htmlhelp."
qthelp:
$(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
@echo
@echo "Build finished; now you can run "qcollectiongenerator" with the" \
".qhcp project file in $(BUILDDIR)/qthelp, like this:"
@echo "# qcollectiongenerator $(BUILDDIR)/qthelp/wptrunner.qhcp"
@echo "To view the help file:"
@echo "# assistant -collectionFile $(BUILDDIR)/qthelp/wptrunner.qhc"
devhelp:
$(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp
@echo
@echo "Build finished."
@echo "To view the help file:"
@echo "# mkdir -p $$HOME/.local/share/devhelp/wptrunner"
@echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/wptrunner"
@echo "# devhelp"
epub:
$(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub
@echo
@echo "Build finished. The epub file is in $(BUILDDIR)/epub."
latex:
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
@echo
@echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
@echo "Run \`make' in that directory to run these through (pdf)latex" \
"(use \`make latexpdf' here to do that automatically)."
latexpdf:
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
@echo "Running LaTeX files through pdflatex..."
$(MAKE) -C $(BUILDDIR)/latex all-pdf
@echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
latexpdfja:
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
@echo "Running LaTeX files through platex and dvipdfmx..."
$(MAKE) -C $(BUILDDIR)/latex all-pdf-ja
@echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
text:
$(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text
@echo
@echo "Build finished. The text files are in $(BUILDDIR)/text."
man:
$(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man
@echo
@echo "Build finished. The manual pages are in $(BUILDDIR)/man."
texinfo:
$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
@echo
@echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo."
@echo "Run \`make' in that directory to run these through makeinfo" \
"(use \`make info' here to do that automatically)."
info:
$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
@echo "Running Texinfo files through makeinfo..."
make -C $(BUILDDIR)/texinfo info
@echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo."
gettext:
$(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale
@echo
@echo "Build finished. The message catalogs are in $(BUILDDIR)/locale."
changes:
$(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
@echo
@echo "The overview file is in $(BUILDDIR)/changes."
linkcheck:
$(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
@echo
@echo "Link check complete; look for any errors in the above output " \
"or in $(BUILDDIR)/linkcheck/output.txt."
doctest:
$(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
@echo "Testing of doctests in the sources finished, look at the " \
"results in $(BUILDDIR)/doctest/output.txt."
xml:
$(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml
@echo
@echo "Build finished. The XML files are in $(BUILDDIR)/xml."
pseudoxml:
$(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml
@echo
@echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml."

Различия файлов скрыты, потому что одна или несколько строк слишком длинны

До

Ширина:  |  Высота:  |  Размер: 19 KiB

Просмотреть файл

@ -1,267 +0,0 @@
# -*- coding: utf-8 -*-
#
# wptrunner documentation build configuration file, created by
# sphinx-quickstart on Mon May 19 18:14:20 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'wptrunner'
copyright = u''
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.3'
# The full version, including alpha/beta/rc tags.
release = '0.3'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'wptrunnerdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'wptrunner.tex', u'wptrunner Documentation',
u'James Graham', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'wptrunner', u'wptrunner Documentation',
[u'James Graham'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'wptrunner', u'wptrunner Documentation',
u'James Graham', 'wptrunner', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'python': ('http://docs.python.org/', None),
'mozlog': ('http://mozbase.readthedocs.org/en/latest/', None)}

Просмотреть файл

@ -1,106 +0,0 @@
wptrunner Design
================
The design of wptrunner is intended to meet the following
requirements:
* Possible to run tests from W3C web-platform-tests.
* Tests should be run as fast as possible. In particular it should
not be necessary to restart the browser between tests, or similar.
* As far as possible, the tests should run in a "normal" browser and
browsing context. In particular many tests assume that they are
running in a top-level browsing context, so we must avoid the use
of an ``iframe`` test container.
* It must be possible to deal with all kinds of behaviour of the
browser runder test, for example, crashing, hanging, etc.
* It should be possible to add support for new platforms and browsers
with minimal code changes.
* It must be possible to run tests in parallel to further improve
performance.
* Test output must be in a machine readable form.
Architecture
------------
In order to meet the above requirements, wptrunner is designed to
push as much of the test scheduling as possible into the harness. This
allows the harness to monitor the state of the browser and perform
appropriate action if it gets into an unwanted state e.g. kill the
browser if it appears to be hung.
The harness will typically communicate with the browser via some remote
control protocol such as WebDriver. However for browsers where no such
protocol is supported, other implementation strategies are possible,
typically at the expense of speed.
The overall architecture of wptrunner is shown in the diagram below:
.. image:: architecture.svg
The main entry point to the code is :py:func:`run_tests` in
``wptrunner.py``. This is responsible for setting up the test
environment, loading the list of tests to be executed, and invoking
the remainder of the code to actually execute some tests.
The test environment is encapsulated in the
:py:class:`TestEnvironment` class. This defers to code in
``web-platform-tests`` which actually starts the required servers to
run the tests.
The set of tests to run is defined by the
:py:class:`TestLoader`. This is constructed with a
:py:class:`TestFilter` (not shown), which takes any filter arguments
from the command line to restrict the set of tests that will be
run. The :py:class:`TestLoader` reads both the ``web-platform-tests``
JSON manifest and the expectation data stored in ini files and
produces a :py:class:`multiprocessing.Queue` of tests to run, and
their expected results.
Actually running the tests happens through the
:py:class:`ManagerGroup` object. This takes the :py:class:`Queue` of
tests to be run and starts a :py:class:`testrunner.TestRunnerManager` for each
instance of the browser under test that will be started. These
:py:class:`TestRunnerManager` instances are each started in their own
thread.
A :py:class:`TestRunnerManager` coordinates starting the product under
test, and outputting results from the test. In the case that the test
has timed out or the browser has crashed, it has to restart the
browser to ensure the test run can continue. The functionality for
initialising the browser under test, and probing its state
(e.g. whether the process is still alive) is implemented through a
:py:class:`Browser` object. An implementation of this class must be
provided for each product that is supported.
The functionality for actually running the tests is provided by a
:py:class:`TestRunner` object. :py:class:`TestRunner` instances are
run in their own child process created with the
:py:mod:`multiprocessing` module. This allows them to run concurrently
and to be killed and restarted as required. Communication between the
:py:class:`TestRunnerManager` and the :py:class:`TestRunner` is
provided by a pair of queues, one for sending messages in each
direction. In particular test results are sent from the
:py:class:`TestRunner` to the :py:class:`TestRunnerManager` using one
of these queues.
The :py:class:`TestRunner` object is generic in that the same
:py:class:`TestRunner` is used regardless of the product under
test. However the details of how to run the test may vary greatly with
the product since different products support different remote control
protocols (or none at all). These protocol-specific parts are placed
in the :py:class:`Executor` object. There is typically a different
:py:class:`Executor` class for each combination of control protocol
and test type. The :py:class:`TestRunner` is responsible for pulling
each test off the :py:class:`Queue` of tests and passing it down to
the :py:class:`Executor`.
The executor often requires access to details of the particular
browser instance that it is testing so that it knows e.g. which port
to connect to to send commands to the browser. These details are
encapsulated in the :py:class:`ExecutorBrowser` class.

Просмотреть файл

@ -1,248 +0,0 @@
Expectation Data
================
Introduction
------------
For use in continuous integration systems, and other scenarios where
regression tracking is required, wptrunner supports storing and
loading the expected result of each test in a test run. Typically
these expected results will initially be generated by running the
testsuite in a baseline build. They may then be edited by humans as
new features are added to the product that change the expected
results. The expected results may also vary for a single product
depending on the platform on which it is run. Therefore, the raw
structured log data is not a suitable format for storing these
files. Instead something is required that is:
* Human readable
* Human editable
* Machine readable / writable
* Capable of storing test id / result pairs
* Suitable for storing in a version control system (i.e. text-based)
The need for different results per platform means either having
multiple expectation files for each platform, or having a way to
express conditional values within a certain file. The former would be
rather cumbersome for humans updating the expectation files, so the
latter approach has been adopted, leading to the requirement:
* Capable of storing result values that are conditional on the platform.
There are few extant formats that meet these requirements, so
wptrunner uses a bespoke ``expectation manifest`` format, which is
closely based on the standard ``ini`` format.
Directory Layout
----------------
Expectation manifest files must be stored under the ``metadata``
directory passed to the test runner. The directory layout follows that
of web-platform-tests with each test path having a corresponding
manifest file. Tests that differ only by query string, or reftests
with the same test path but different ref paths share the same
reference file. The file name is taken from the last /-separated part
of the path, suffixed with ``.ini``.
As an optimisation, files which produce only default results
(i.e. ``PASS`` or ``OK``) don't require a corresponding manifest file.
For example a test with url::
/spec/section/file.html?query=param
would have an expectation file ::
metadata/spec/section/file.html.ini
.. _wptupdate-label:
Generating Expectation Files
----------------------------
wptrunner provides the tool ``wptupdate`` to generate expectation
files from the results of a set of baseline test runs. The basic
syntax for this is::
wptupdate [options] [logfile]...
Each ``logfile`` is a structured log file from a previous run. These
can be generated from wptrunner using the ``--log-raw`` option
e.g. ``--log-raw=structured.log``. The default behaviour is to update
all the test data for the particular combination of hardware and OS
used in the run corresponding to the log data, whilst leaving any
other expectations untouched.
wptupdate takes several useful options:
``--sync``
Pull the latest version of web-platform-tests from the
upstream specified in the config file. If this is specified in
combination with logfiles, it is assumed that the results in the log
files apply to the post-update tests.
``--no-check-clean``
Don't attempt to check if the working directory is clean before
doing the update (assuming that the working directory is a git or
mercurial tree).
``--patch``
Create a a git commit, or a mq patch, with the changes made by wptupdate.
``--ignore-existing``
Overwrite all the expectation data for any tests that have a result
in the passed log files, not just data for the same platform.
Examples
~~~~~~~~
Update the local copy of web-platform-tests without changing the
expectation data and commit (or create a mq patch for) the result::
wptupdate --patch --sync
Update all the expectations from a set of cross-platform test runs::
wptupdate --no-check-clean --patch osx.log linux.log windows.log
Add expectation data for some new tests that are expected to be
platform-independent::
wptupdate --no-check-clean --patch --ignore-existing tests.log
Manifest Format
---------------
The format of the manifest files is based on the ini format. Files are
divided into sections, each (apart from the root section) having a
heading enclosed in square braces. Within each section are key-value
pairs. There are several notable differences from standard .ini files,
however:
* Sections may be hierarchically nested, with significant whitespace
indicating nesting depth.
* Only ``:`` is valid as a key/value separator
A simple example of a manifest file is::
root_key: root_value
[section]
section_key: section_value
[subsection]
subsection_key: subsection_value
[another_section]
another_key: another_value
Conditional Values
~~~~~~~~~~~~~~~~~~
In order to support values that depend on some external data, the
right hand side of a key/value pair can take a set of conditionals
rather than a plain value. These values are placed on a new line
following the key, with significant indentation. Conditional values
are prefixed with ``if`` and terminated with a colon, for example::
key:
if cond1: value1
if cond2: value2
value3
In this example, the value associated with ``key`` is determined by
first evaluating ``cond1`` against external data. If that is true,
``key`` is assigned the value ``value1``, otherwise ``cond2`` is
evaluated in the same way. If both ``cond1`` and ``cond2`` are false,
the unconditional ``value3`` is used.
Conditions themselves use a Python-like expression syntax. Operands
can either be variables, corresponding to data passed in, numbers
(integer or floating point; exponential notation is not supported) or
quote-delimited strings. Equality is tested using ``==`` and
inequality by ``!=``. The operators ``and``, ``or`` and ``not`` are
used in the expected way. Parentheses can also be used for
grouping. For example::
key:
if (a == 2 or a == 3) and b == "abc": value1
if a == 1 or b != "abc": value2
value3
Here ``a`` and ``b`` are variables, the value of which will be
supplied when the manifest is used.
Expectation Manifests
---------------------
When used for expectation data, manifests have the following format:
* A section per test URL described by the manifest, with the section
heading being the part of the test URL following the last ``/`` in
the path (this allows multiple tests in a single manifest file with
the same path part of the URL, but different query parts).
* A subsection per subtest, with the heading being the title of the
subtest.
* A key ``type`` indicating the test type. This takes the values
``testharness`` and ``reftest``.
* For reftests, keys ``reftype`` indicating the reference type
(``==`` or ``!=``) and ``refurl`` indicating the URL of the
reference.
* A key ``expected`` giving the expectation value of each (sub)test.
* A key ``disabled`` which can be set to any value to indicate that
the (sub)test is disabled and should either not be run (for tests)
or that its results should be ignored (subtests).
* A key ``restart-after`` which can be set to any value to indicate that
the runner should restart the browser after running this test (e.g. to
clear out unwanted state).
* Variables ``debug``, ``os``, ``version``, ``processor`` and
``bits`` that describe the configuration of the browser under
test. ``debug`` is a boolean indicating whether a build is a debug
build. ``os`` is a string indicating the operating system, and
``version`` a string indicating the particular version of that
operating system. ``processor`` is a string indicating the
processor architecture and ``bits`` an integer indicating the
number of bits. This information is typically provided by
:py:mod:`mozinfo`.
* Top level keys are taken as defaults for the whole file. So, for
example, a top level key with ``expected: FAIL`` would indicate
that all tests and subtests in the file are expected to fail,
unless they have an ``expected`` key of their own.
An simple example manifest might look like::
[test.html?variant=basic]
type: testharness
[Test something unsupported]
expected: FAIL
[test.html?variant=broken]
expected: ERROR
[test.html?variant=unstable]
disabled: http://test.bugs.example.org/bugs/12345
A more complex manifest with conditional properties might be::
[canvas_test.html]
expected:
if os == "osx": FAIL
if os == "windows" and version == "XP": FAIL
PASS
Note that ``PASS`` in the above works, but is unnecessary; ``PASS``
(or ``OK``) is always the default expectation for (sub)tests.

Просмотреть файл

@ -1,24 +0,0 @@
.. wptrunner documentation master file, created by
sphinx-quickstart on Mon May 19 18:14:20 2014.
You can adapt this file completely to your liking, but it should at least
contain the root `toctree` directive.
Welcome to wptrunner's documentation!
=====================================
Contents:
.. toctree::
:maxdepth: 2
usage
expectation
design
Indices and tables
==================
* :ref:`genindex`
* :ref:`modindex`
* :ref:`search`

Просмотреть файл

@ -1,242 +0,0 @@
@ECHO OFF
REM Command file for Sphinx documentation
if "%SPHINXBUILD%" == "" (
set SPHINXBUILD=sphinx-build
)
set BUILDDIR=_build
set ALLSPHINXOPTS=-d %BUILDDIR%/doctrees %SPHINXOPTS% .
set I18NSPHINXOPTS=%SPHINXOPTS% .
if NOT "%PAPER%" == "" (
set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS%
set I18NSPHINXOPTS=-D latex_paper_size=%PAPER% %I18NSPHINXOPTS%
)
if "%1" == "" goto help
if "%1" == "help" (
:help
echo.Please use `make ^<target^>` where ^<target^> is one of
echo. html to make standalone HTML files
echo. dirhtml to make HTML files named index.html in directories
echo. singlehtml to make a single large HTML file
echo. pickle to make pickle files
echo. json to make JSON files
echo. htmlhelp to make HTML files and a HTML help project
echo. qthelp to make HTML files and a qthelp project
echo. devhelp to make HTML files and a Devhelp project
echo. epub to make an epub
echo. latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter
echo. text to make text files
echo. man to make manual pages
echo. texinfo to make Texinfo files
echo. gettext to make PO message catalogs
echo. changes to make an overview over all changed/added/deprecated items
echo. xml to make Docutils-native XML files
echo. pseudoxml to make pseudoxml-XML files for display purposes
echo. linkcheck to check all external links for integrity
echo. doctest to run all doctests embedded in the documentation if enabled
goto end
)
if "%1" == "clean" (
for /d %%i in (%BUILDDIR%\*) do rmdir /q /s %%i
del /q /s %BUILDDIR%\*
goto end
)
%SPHINXBUILD% 2> nul
if errorlevel 9009 (
echo.
echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
echo.installed, then set the SPHINXBUILD environment variable to point
echo.to the full path of the 'sphinx-build' executable. Alternatively you
echo.may add the Sphinx directory to PATH.
echo.
echo.If you don't have Sphinx installed, grab it from
echo.http://sphinx-doc.org/
exit /b 1
)
if "%1" == "html" (
%SPHINXBUILD% -b html %ALLSPHINXOPTS% %BUILDDIR%/html
if errorlevel 1 exit /b 1
echo.
echo.Build finished. The HTML pages are in %BUILDDIR%/html.
goto end
)
if "%1" == "dirhtml" (
%SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% %BUILDDIR%/dirhtml
if errorlevel 1 exit /b 1
echo.
echo.Build finished. The HTML pages are in %BUILDDIR%/dirhtml.
goto end
)
if "%1" == "singlehtml" (
%SPHINXBUILD% -b singlehtml %ALLSPHINXOPTS% %BUILDDIR%/singlehtml
if errorlevel 1 exit /b 1
echo.
echo.Build finished. The HTML pages are in %BUILDDIR%/singlehtml.
goto end
)
if "%1" == "pickle" (
%SPHINXBUILD% -b pickle %ALLSPHINXOPTS% %BUILDDIR%/pickle
if errorlevel 1 exit /b 1
echo.
echo.Build finished; now you can process the pickle files.
goto end
)
if "%1" == "json" (
%SPHINXBUILD% -b json %ALLSPHINXOPTS% %BUILDDIR%/json
if errorlevel 1 exit /b 1
echo.
echo.Build finished; now you can process the JSON files.
goto end
)
if "%1" == "htmlhelp" (
%SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% %BUILDDIR%/htmlhelp
if errorlevel 1 exit /b 1
echo.
echo.Build finished; now you can run HTML Help Workshop with the ^
.hhp project file in %BUILDDIR%/htmlhelp.
goto end
)
if "%1" == "qthelp" (
%SPHINXBUILD% -b qthelp %ALLSPHINXOPTS% %BUILDDIR%/qthelp
if errorlevel 1 exit /b 1
echo.
echo.Build finished; now you can run "qcollectiongenerator" with the ^
.qhcp project file in %BUILDDIR%/qthelp, like this:
echo.^> qcollectiongenerator %BUILDDIR%\qthelp\wptrunner.qhcp
echo.To view the help file:
echo.^> assistant -collectionFile %BUILDDIR%\qthelp\wptrunner.ghc
goto end
)
if "%1" == "devhelp" (
%SPHINXBUILD% -b devhelp %ALLSPHINXOPTS% %BUILDDIR%/devhelp
if errorlevel 1 exit /b 1
echo.
echo.Build finished.
goto end
)
if "%1" == "epub" (
%SPHINXBUILD% -b epub %ALLSPHINXOPTS% %BUILDDIR%/epub
if errorlevel 1 exit /b 1
echo.
echo.Build finished. The epub file is in %BUILDDIR%/epub.
goto end
)
if "%1" == "latex" (
%SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex
if errorlevel 1 exit /b 1
echo.
echo.Build finished; the LaTeX files are in %BUILDDIR%/latex.
goto end
)
if "%1" == "latexpdf" (
%SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex
cd %BUILDDIR%/latex
make all-pdf
cd %BUILDDIR%/..
echo.
echo.Build finished; the PDF files are in %BUILDDIR%/latex.
goto end
)
if "%1" == "latexpdfja" (
%SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex
cd %BUILDDIR%/latex
make all-pdf-ja
cd %BUILDDIR%/..
echo.
echo.Build finished; the PDF files are in %BUILDDIR%/latex.
goto end
)
if "%1" == "text" (
%SPHINXBUILD% -b text %ALLSPHINXOPTS% %BUILDDIR%/text
if errorlevel 1 exit /b 1
echo.
echo.Build finished. The text files are in %BUILDDIR%/text.
goto end
)
if "%1" == "man" (
%SPHINXBUILD% -b man %ALLSPHINXOPTS% %BUILDDIR%/man
if errorlevel 1 exit /b 1
echo.
echo.Build finished. The manual pages are in %BUILDDIR%/man.
goto end
)
if "%1" == "texinfo" (
%SPHINXBUILD% -b texinfo %ALLSPHINXOPTS% %BUILDDIR%/texinfo
if errorlevel 1 exit /b 1
echo.
echo.Build finished. The Texinfo files are in %BUILDDIR%/texinfo.
goto end
)
if "%1" == "gettext" (
%SPHINXBUILD% -b gettext %I18NSPHINXOPTS% %BUILDDIR%/locale
if errorlevel 1 exit /b 1
echo.
echo.Build finished. The message catalogs are in %BUILDDIR%/locale.
goto end
)
if "%1" == "changes" (
%SPHINXBUILD% -b changes %ALLSPHINXOPTS% %BUILDDIR%/changes
if errorlevel 1 exit /b 1
echo.
echo.The overview file is in %BUILDDIR%/changes.
goto end
)
if "%1" == "linkcheck" (
%SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% %BUILDDIR%/linkcheck
if errorlevel 1 exit /b 1
echo.
echo.Link check complete; look for any errors in the above output ^
or in %BUILDDIR%/linkcheck/output.txt.
goto end
)
if "%1" == "doctest" (
%SPHINXBUILD% -b doctest %ALLSPHINXOPTS% %BUILDDIR%/doctest
if errorlevel 1 exit /b 1
echo.
echo.Testing of doctests in the sources finished, look at the ^
results in %BUILDDIR%/doctest/output.txt.
goto end
)
if "%1" == "xml" (
%SPHINXBUILD% -b xml %ALLSPHINXOPTS% %BUILDDIR%/xml
if errorlevel 1 exit /b 1
echo.
echo.Build finished. The XML files are in %BUILDDIR%/xml.
goto end
)
if "%1" == "pseudoxml" (
%SPHINXBUILD% -b pseudoxml %ALLSPHINXOPTS% %BUILDDIR%/pseudoxml
if errorlevel 1 exit /b 1
echo.
echo.Build finished. The pseudo-XML files are in %BUILDDIR%/pseudoxml.
goto end
)
:end

Просмотреть файл

@ -1,238 +0,0 @@
Getting Started
===============
Installing wptrunner
--------------------
The easiest way to install wptrunner is into a virtualenv, using pip::
virtualenv wptrunner
cd wptrunner
source bin/activate
pip install wptrunner
This will install the base dependencies for wptrunner, but not any
extra dependencies required to test against specific browsers. In
order to do this you must use use the extra requirements files in
``$VIRTUAL_ENV/requirements/requirements_browser.txt``. For example,
in order to test against Firefox you would have to run::
pip install -r requirements/requirements_firefox.txt
If you intend to work on the code, the ``-e`` option to pip should be
used in combination with a source checkout i.e. inside a virtual
environment created as above::
git clone https://github.com/w3c/wptrunner.git
cd wptrunner
pip install -e ./
In addition to the dependencies installed by pip, wptrunner requires
a copy of the web-platform-tests repository. This can be located
anywhere on the filesystem, but the easiest option is to put it
under the same parent directory as the wptrunner checkout::
git clone https://github.com/w3c/web-platform-tests.git
It is also necessary to generate a web-platform-tests ``MANIFEST.json``
file. It's recommended to also put that under the same parent directory as
the wptrunner checkout, in a directory named ``meta``::
mkdir meta
cd web-platform-tests
python manifest --path ../meta/MANIFEST.json
The ``MANIFEST.json`` file needs to be regenerated each time the
web-platform-tests checkout is updated. To aid with the update process
there is a tool called ``wptupdate``, which is described in
:ref:`wptupdate-label`.
Running the Tests
-----------------
A test run is started using the ``wptrunner`` command. The command
takes multiple options, of which the following are most significant:
``--product`` (defaults to `firefox`)
The product to test against: `b2g`, `chrome`, `firefox`, or `servo`.
``--binary`` (required if product is `firefox` or `servo`)
The path to a binary file for the product (browser) to test against.
``--webdriver-binary`` (required if product is `chrome`)
The path to a `*driver` binary; e.g., a `chromedriver` binary.
``--certutil-binary`` (required if product is `firefox` [#]_)
The path to a `certutil` binary (for tests that must be run over https).
``--metadata`` (required only when not `using default paths`_)
The path to a directory containing test metadata. [#]_
``--tests`` (required only when not `using default paths`_)
The path to a directory containing a web-platform-tests checkout.
``--prefs-root`` (required only when testing a Firefox binary)
The path to a directory containing Firefox test-harness preferences. [#]_
``--config`` (should default to `wptrunner.default.ini`)
The path to the config (ini) file.
.. [#] The ``--certutil-binary`` option is required when the product is
``firefox`` unless ``--ssl-type=none`` is specified.
.. [#] The ``--metadata`` path is to a directory that contains:
* a ``MANIFEST.json`` file (the web-platform-tests documentation has
instructions on generating this file)
* (optionally) any expectation files (see :ref:`wptupdate-label`)
.. [#] Example ``--prefs-root`` value: ``~/mozilla-central/testing/profiles``.
There are also a variety of other command-line options available; use
``--help`` to list them.
The following examples show how to start wptrunner with various options.
------------------
Starting wptrunner
------------------
The examples below assume the following directory layout,
though no specific folder structure is required::
~/testtwf/wptrunner # wptrunner checkout
~/testtwf/web-platform-tests # web-platform-tests checkout
~/testtwf/meta # metadata
To test a Firefox Nightly build in an OS X environment, you might start
wptrunner using something similar to the following example::
wptrunner --metadata=~/testtwf/meta/ --tests=~/testtwf/web-platform-tests/ \
--binary=~/mozilla-central/obj-x86_64-apple-darwin14.3.0/dist/Nightly.app/Contents/MacOS/firefox \
--certutil-binary=~/mozilla-central/obj-x86_64-apple-darwin14.3.0/security/nss/cmd/certutil/certutil \
--prefs-root=~/mozilla-central/testing/profiles
And to test a Chromium build in an OS X environment, you might start
wptrunner using something similar to the following example::
wptrunner --metadata=~/testtwf/meta/ --tests=~/testtwf/web-platform-tests/ \
--binary=~/chromium/src/out/Release/Chromium.app/Contents/MacOS/Chromium \
--webdriver-binary=/usr/local/bin/chromedriver --product=chrome
--------------------
Running test subsets
--------------------
To restrict a test run just to tests in a particular web-platform-tests
subdirectory, specify the directory name in the positional arguments after
the options; for example, run just the tests in the `dom` subdirectory::
wptrunner --metadata=~/testtwf/meta --tests=~/testtwf/web-platform-tests/ \
--binary=/path/to/firefox --certutil-binary=/path/to/certutil \
--prefs-root=/path/to/testing/profiles \
dom
-------------------
Running in parallel
-------------------
To speed up the testing process, use the ``--processes`` option to have
wptrunner run multiple browser instances in parallel. For example, to
have wptrunner attempt to run tests against with six browser instances
in parallel, specify ``--processes=6``. But note that behaviour in this
mode is necessarily less deterministic than with ``--processes=1`` (the
default), so there may be more noise in the test results.
-------------------
Using default paths
-------------------
The (otherwise-required) ``--tests`` and ``--metadata`` command-line
options/flags be omitted if any configuration file is found that
contains a section specifying the ``tests`` and ``metadata`` keys.
See the `Configuration File`_ section for more information about
configuration files, including information about their expected
locations.
The content of the ``wptrunner.default.ini`` default configuration file
makes wptrunner look for tests (that is, a web-platform-tests checkout)
as a subdirectory of the current directory named ``tests``, and for
metadata files in a subdirectory of the current directory named ``meta``.
Output
------
wptrunner uses the :py:mod:`mozlog` package for output. This
structures events such as test results or log messages as JSON objects
that can then be fed to other tools for interpretation. More details
about the message format are given in the
:py:mod:`mozlog` documentation.
By default the raw JSON messages are dumped to stdout. This is
convenient for piping into other tools, but not ideal for humans
reading the output. :py:mod:`mozlog` comes with several other
formatters, which are accessible through command line options. The
general format of these options is ``--log-name=dest``, where ``name``
is the name of the format and ``dest`` is a path to a destination
file, or ``-`` for stdout. The raw JSON data is written by the ``raw``
formatter so, the default setup corresponds to ``--log-raw=-``.
A reasonable output format for humans is provided as ``mach``. So in
order to output the full raw log to a file and a human-readable
summary to stdout, one might pass the options::
--log-raw=output.log --log-mach=-
Configuration File
------------------
wptrunner uses a ``.ini`` file to control some configuration
sections. The file has three sections; ``[products]``,
``[manifest:default]`` and ``[web-platform-tests]``.
``[products]`` is used to
define the set of available products. By default this section is empty
which means that all the products distributed with wptrunner are
enabled (although their dependencies may not be installed). The set
of enabled products can be set by using the product name as the
key. For built in products the value is empty. It is also possible to
provide the path to a script implementing the browser functionality
e.g.::
[products]
chrome =
netscape4 = path/to/netscape.py
``[manifest:default]`` specifies the default paths for the tests and metadata,
relative to the config file. For example::
[manifest:default]
tests = ~/testtwf/web-platform-tests
metadata = ~/testtwf/meta
``[web-platform-tests]`` is used to set the properties of the upstream
repository when updating the paths. ``remote_url`` specifies the git
url to pull from; ``branch`` the branch to sync against and
``sync_path`` the local path, relative to the configuration file, to
use when checking out the tests e.g.::
[web-platform-tests]
remote_url = https://github.com/w3c/web-platform-tests.git
branch = master
sync_path = sync
A configuration file must contain all the above fields; falling back
to the default values for unspecified fields is not yet supported.
The ``wptrunner`` and ``wptupdate`` commands will use configuration
files in the following order:
* Any path supplied with a ``--config`` flag to the command.
* A file called ``wptrunner.ini`` in the current directory
* The default configuration file (``wptrunner.default.ini`` in the
source directory)

Просмотреть файл

@ -1,4 +0,0 @@
html5lib >= 0.99
mozinfo >= 0.7
mozlog >= 3.3
mozdebug >= 0.1

Просмотреть файл

@ -1,2 +0,0 @@
mozprocess >= 0.19
selenium >= 2.41.0

Просмотреть файл

@ -1,6 +0,0 @@
marionette_driver >= 0.4
mozprofile >= 0.21
mozprocess >= 0.19
mozcrash >= 0.13
mozrunner >= 6.7
mozleak >= 0.1

Просмотреть файл

@ -1 +0,0 @@
mozprocess >= 0.19

Просмотреть файл

@ -1,73 +0,0 @@
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
import glob
import os
import sys
import textwrap
from setuptools import setup, find_packages
here = os.path.split(__file__)[0]
PACKAGE_NAME = 'wptrunner'
PACKAGE_VERSION = '1.14'
# Dependencies
with open(os.path.join(here, "requirements.txt")) as f:
deps = f.read().splitlines()
# Browser-specific requirements
requirements_files = glob.glob("requirements_*.txt")
profile_dest = None
dest_exists = False
setup(name=PACKAGE_NAME,
version=PACKAGE_VERSION,
description="Harness for running the W3C web-platform-tests against various products",
author='Mozilla Automation and Testing Team',
author_email='tools@lists.mozilla.org',
license='MPL 2.0',
packages=find_packages(exclude=["tests", "metadata", "prefs"]),
entry_points={
'console_scripts': [
'wptrunner = wptrunner.wptrunner:main',
'wptupdate = wptrunner.update:main',
]
},
zip_safe=False,
platforms=['Any'],
classifiers=['Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: Mozilla Public License 2.0 (MPL 2.0)',
'Operating System :: OS Independent'],
package_data={"wptrunner": ["executors/testharness_marionette.js",
"executors/testharness_webdriver.js",
"executors/reftest.js",
"executors/reftest-wait.js",
"testharnessreport.js",
"testharness_runner.html",
"config.json",
"wptrunner.default.ini",
"browsers/server-locations.txt",
"browsers/b2g_setup/*",
"prefs/*"]},
include_package_data=True,
data_files=[("requirements", requirements_files)],
install_requires=deps
)
if "install" in sys.argv:
path = os.path.relpath(os.path.join(sys.prefix, "requirements"), os.curdir)
print textwrap.fill("""In order to use with one of the built-in browser
products, you will need to install the extra dependencies. These are provided
as requirements_[name].txt in the %s directory and can be installed using
e.g.""" % path, 80)
print """
pip install -r %s/requirements_firefox.txt
""" % path

Просмотреть файл

@ -1,3 +0,0 @@
[reftest_and_fail.html]
type: reftest
expected: FAIL

Просмотреть файл

@ -1,3 +0,0 @@
[reftest_cycle_fail.html]
type: reftest
expected: FAIL

Просмотреть файл

@ -1,3 +0,0 @@
[reftest_match_fail.html]
type: reftest
expected: FAIL

Просмотреть файл

@ -1,3 +0,0 @@
[reftest_mismatch_fail.html]
type: reftest
expected: FAIL

Просмотреть файл

@ -1,3 +0,0 @@
[reftest_ref_timeout.html]
type: reftest
expected: TIMEOUT

Просмотреть файл

@ -1,3 +0,0 @@
[reftest_timeout.html]
type: reftest
expected: TIMEOUT

Просмотреть файл

@ -1,2 +0,0 @@
prefs: ["browser.display.foreground_color:#FF0000",
"browser.display.background_color:#000000"]

Просмотреть файл

@ -1,2 +0,0 @@
[test_pref_reset.html]
prefs: [@Reset]

Просмотреть файл

@ -1,3 +0,0 @@
[test_pref_set.html]
prefs: ["browser.display.foreground_color:#00FF00",
"browser.display.background_color:#000000"]

Просмотреть файл

@ -1 +0,0 @@
disabled: true

Просмотреть файл

@ -1,2 +0,0 @@
[testharness_1.html]
disabled: @False

Просмотреть файл

@ -1,4 +0,0 @@
[testharness_0.html]
type: testharness
[Test that should fail]
expected: FAIL

Просмотреть файл

@ -1,3 +0,0 @@
[testharness_error.html]
type: testharness
expected: ERROR

Просмотреть файл

@ -1,3 +0,0 @@
[testharness_timeout.html]
type: testharness
expected: TIMEOUT

Просмотреть файл

@ -1,20 +0,0 @@
[general]
tests=/path/to/web-platform-tests/
metadata=/path/to/web-platform-tests/
ssl-type=none
# [firefox]
# binary=/path/to/firefox
# prefs-root=/path/to/gecko-src/testing/profiles/
# [servo]
# binary=/path/to/servo-src/target/release/servo
# exclude=testharness # Because it needs a special testharness.js
# [servodriver]
# binary=/path/to/servo-src/target/release/servo
# exclude=testharness # Because it needs a special testharness.js
# [chrome]
# binary=/path/to/chrome
# webdriver-binary=/path/to/chromedriver

Просмотреть файл

@ -1,166 +0,0 @@
import ConfigParser
import argparse
import json
import os
import sys
import tempfile
import threading
import time
from StringIO import StringIO
from mozlog import structuredlog, reader
from mozlog.handlers import BaseHandler, StreamHandler, StatusHandler
from mozlog.formatters import MachFormatter
from wptrunner import wptcommandline, wptrunner
here = os.path.abspath(os.path.dirname(__file__))
def setup_wptrunner_logging(logger):
structuredlog.set_default_logger(logger)
wptrunner.logger = logger
wptrunner.wptlogging.setup_stdlib_logger()
class ResultHandler(BaseHandler):
def __init__(self, verbose=False, logger=None):
self.inner = StreamHandler(sys.stdout, MachFormatter())
BaseHandler.__init__(self, self.inner)
self.product = None
self.verbose = verbose
self.logger = logger
self.register_message_handlers("wptrunner-test", {"set-product": self.set_product})
def set_product(self, product):
self.product = product
def __call__(self, data):
if self.product is not None and data["action"] in ["suite_start", "suite_end"]:
# Hack: mozlog sets some internal state to prevent multiple suite_start or
# suite_end messages. We actually want that here (one from the metaharness
# and one from the individual test type harness), so override that internal
# state (a better solution might be to not share loggers, but this works well
# enough)
self.logger._state.suite_started = True
return
if (not self.verbose and
(data["action"] == "process_output" or
data["action"] == "log" and data["level"] not in ["error", "critical"])):
return
if "test" in data:
data = data.copy()
data["test"] = "%s: %s" % (self.product, data["test"])
return self.inner(data)
def test_settings():
return {
"include": "_test",
"manifest-update": "",
"no-capture-stdio": ""
}
def read_config():
parser = ConfigParser.ConfigParser()
parser.read("test.cfg")
rv = {"general":{},
"products":{}}
rv["general"].update(dict(parser.items("general")))
# This only allows one product per whatever for now
for product in parser.sections():
if product != "general":
dest = rv["products"][product] = {}
for key, value in parser.items(product):
rv["products"][product][key] = value
return rv
def run_tests(product, kwargs):
kwargs["test_paths"]["/_test/"] = {"tests_path": os.path.join(here, "testdata"),
"metadata_path": os.path.join(here, "metadata")}
wptrunner.run_tests(**kwargs)
def settings_to_argv(settings):
rv = []
for name, value in settings.iteritems():
key = "--%s" % name
if not value:
rv.append(key)
elif isinstance(value, list):
for item in value:
rv.extend([key, item])
else:
rv.extend([key, value])
return rv
def set_from_args(settings, args):
if args.test:
settings["include"] = args.test
if args.tags:
settings["tags"] = args.tags
def run(config, args):
logger = structuredlog.StructuredLogger("web-platform-tests")
logger.add_handler(ResultHandler(logger=logger, verbose=args.verbose))
setup_wptrunner_logging(logger)
parser = wptcommandline.create_parser()
logger.suite_start(tests=[])
for product, product_settings in config["products"].iteritems():
if args.product and product not in args.product:
continue
settings = test_settings()
settings.update(config["general"])
settings.update(product_settings)
settings["product"] = product
set_from_args(settings, args)
kwargs = vars(parser.parse_args(settings_to_argv(settings)))
wptcommandline.check_args(kwargs)
logger.send_message("wptrunner-test", "set-product", product)
run_tests(product, kwargs)
logger.send_message("wptrunner-test", "set-product", None)
logger.suite_end()
def get_parser():
parser = argparse.ArgumentParser()
parser.add_argument("-v", "--verbose", action="store_true", default=False,
help="verbose log output")
parser.add_argument("--product", action="append",
help="Specific product to include in test run")
parser.add_argument("--pdb", action="store_true",
help="Invoke pdb on uncaught exception")
parser.add_argument("--tag", action="append", dest="tags",
help="tags to select tests")
parser.add_argument("test", nargs="*",
help="Specific tests to include in test run")
return parser
def main():
config = read_config()
args = get_parser().parse_args()
try:
run(config, args)
except Exception:
if args.pdb:
import pdb, traceback
print traceback.format_exc()
pdb.post_mortem()
else:
raise
if __name__ == "__main__":
main()

Просмотреть файл

@ -1,4 +0,0 @@
<link rel=match href=green.html>
<style>
:root {background-color:green}
</style>

Просмотреть файл

@ -1,3 +0,0 @@
<style>
:root {background-color:green}
</style>

Просмотреть файл

@ -1,3 +0,0 @@
<style>
:root {background-color:red}
</style>

Просмотреть файл

@ -1,9 +0,0 @@
<link rel=match href=green.html>
<style>
:root {background-color:red}
</style>
<script>
if (window.location.protocol === "https:") {
document.documentElement.style.backgroundColor = "green";
}
</script>

Просмотреть файл

@ -1,5 +0,0 @@
<title>Reftest chain that should fail</title>
<link rel=match href=reftest_and_fail_0-ref.html>
<style>
:root {background-color:green}
</style>

Просмотреть файл

@ -1,5 +0,0 @@
<title>Reftest chain that should fail</title>
<link rel=match href=red.html>
<style>
:root {background-color:green}
</style>

Просмотреть файл

@ -1,5 +0,0 @@
<title>Reftest with cycle, all match</title>
<link rel=match href=reftest_cycle_0-ref.html>
<style>
:root {background-color:green}
</style>

Просмотреть файл

@ -1,5 +0,0 @@
<title>OR match that should pass</title>
<link rel=match href=reftest_cycle_1-ref.html>
<style>
:root {background-color:green}
</style>

Просмотреть файл

@ -1,5 +0,0 @@
<title>Reftest with cycle, all match</title>
<link rel=match href=reftest_cycle.html>
<style>
:root {background-color:green}
</style>

Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше