Merge m-c to autoland, a=merge

MozReview-Commit-ID: B7oWq7qfpJ0
This commit is contained in:
Wes Kocher 2017-07-24 18:13:05 -07:00
Родитель 827e22d063 2673406be5
Коммит a1eadef812
75 изменённых файлов: 22072 добавлений и 24650 удалений

Просмотреть файл

@ -1024,9 +1024,10 @@ function formatDate(datestr, unknown) {
if (!date.valueOf())
return unknown;
const dtOptions = { year: "numeric", month: "long", day: "numeric",
hour: "numeric", minute: "numeric", second: "numeric" };
return date.toLocaleString(undefined, dtOptions);
const dateTimeFormatter = Services.intl.createDateTimeFormat(undefined, {
dateStyle: "long", timeStyle: "long"
});
return dateTimeFormatter.format(date);
}
function doCopy() {

Просмотреть файл

@ -497,9 +497,10 @@ var gCookiesWindow = {
formatExpiresString(aExpires) {
if (aExpires) {
var date = new Date(1000 * aExpires);
const dtOptions = { year: "numeric", month: "long", day: "numeric",
hour: "numeric", minute: "numeric", second: "numeric" };
return date.toLocaleString(undefined, dtOptions);
const dateTimeFormatter = Services.intl.createDateTimeFormat(undefined, {
dateStyle: "long", timeStyle: "long"
});
return dateTimeFormatter.format(date);
}
return this._bundle.getString("expireAtEndOfSession");
},

Просмотреть файл

@ -10,12 +10,24 @@ const URL = "http://mochi.test:8888/browser/" +
const OUTER_VALUE = "outer-value-" + RAND;
function getEstimateChars() {
let snap;
if (gMultiProcessBrowser) {
snap = Services.telemetry.histogramSnapshots.content["FX_SESSION_RESTORE_DOM_STORAGE_SIZE_ESTIMATE_CHARS"];
} else {
snap = Services.telemetry.histogramSnapshots.parent["FX_SESSION_RESTORE_DOM_STORAGE_SIZE_ESTIMATE_CHARS"];
}
if (!snap) {
return 0;
}
return snap.counts[4];
}
// Test that we record the size of messages.
add_task(async function test_telemetry() {
Services.telemetry.canRecordExtended = true;
let suffix = gMultiProcessBrowser ? "#content" : "";
let histogram = Services.telemetry.getHistogramById("FX_SESSION_RESTORE_DOM_STORAGE_SIZE_ESTIMATE_CHARS" + suffix);
let snap1 = histogram.snapshot();
let prev = getEstimateChars()
let tab = BrowserTestUtils.addTab(gBrowser, URL);
let browser = tab.linkedBrowser;
@ -27,7 +39,7 @@ add_task(async function test_telemetry() {
// There is no good way to make sure that the parent received the histogram entries from the child processes.
// Let's stick to the ugly, spinning the event loop until we have a good approach (Bug 1357509).
await BrowserTestUtils.waitForCondition(() => {
return histogram.snapshot().counts[4] > snap1.counts[4];
return getEstimateChars() > prev;
});
Assert.ok(true);

Просмотреть файл

@ -23,7 +23,6 @@ const {
const promise = require("promise");
const defer = require("devtools/shared/defer");
const { Task } = require("devtools/shared/task");
const { Class } = require("sdk/core/heritage");
const events = require("sdk/event/core");
const object = require("sdk/util/object");
const nodeConstants = require("devtools/shared/dom-node-constants.js");
@ -36,42 +35,42 @@ const HIDDEN_CLASS = "__fx-devtools-hide-shortcut__";
* Convenience API for building a list of attribute modifications
* for the `modifyAttributes` request.
*/
const AttributeModificationList = Class({
initialize: function (node) {
class AttributeModificationList {
constructor(node) {
this.node = node;
this.modifications = [];
},
}
apply: function () {
apply() {
let ret = this.node.modifyAttributes(this.modifications);
return ret;
},
}
destroy: function () {
destroy() {
this.node = null;
this.modification = null;
},
}
setAttributeNS: function (ns, name, value) {
setAttributeNS(ns, name, value) {
this.modifications.push({
attributeNamespace: ns,
attributeName: name,
newValue: value
});
},
}
setAttribute: function (name, value) {
setAttribute(name, value) {
this.setAttributeNS(undefined, name, value);
},
}
removeAttributeNS: function (ns, name) {
removeAttributeNS(ns, name) {
this.setAttributeNS(ns, name, undefined);
},
}
removeAttribute: function (name) {
removeAttribute(name) {
this.setAttributeNS(undefined, name, undefined);
}
});
}
/**
* Client side of the node actor.
@ -348,7 +347,7 @@ const NodeFront = FrontClassWithSpec(nodeSpec, {
* Return a new AttributeModificationList for this node.
*/
startModifyingAttributes: function () {
return AttributeModificationList(this);
return new AttributeModificationList(this);
},
_cacheAttributes: function () {

Просмотреть файл

@ -16,7 +16,6 @@ const {
} = require("devtools/shared/specs/styles");
const promise = require("promise");
const { Task } = require("devtools/shared/task");
const { Class } = require("sdk/core/heritage");
const { RuleRewriter } = require("devtools/shared/css/parsing-utils");
/**
@ -295,15 +294,15 @@ exports.StyleRuleFront = StyleRuleFront;
* This lets the inspector use (mostly) the same code, regardless of
* whether the server implements setRuleText.
*/
var RuleModificationList = Class({
class RuleModificationList {
/**
* Initialize a RuleModificationList.
* @param {StyleRuleFront} rule the associated rule
*/
initialize: function (rule) {
constructor(rule) {
this.rule = rule;
this.modifications = [];
},
}
/**
* Apply the modifications in this object to the associated rule.
@ -311,9 +310,9 @@ var RuleModificationList = Class({
* @return {Promise} A promise which will be resolved when the modifications
* are complete; @see StyleRuleActor.modifyProperties.
*/
apply: function () {
apply() {
return this.rule.modifyProperties(this.modifications);
},
}
/**
* Add a "set" entry to the modification list.
@ -328,14 +327,14 @@ var RuleModificationList = Class({
* @param {String} priority the property's priority, either the empty
* string or "important"
*/
setProperty: function (index, name, value, priority) {
setProperty(index, name, value, priority) {
this.modifications.push({
type: "set",
name: name,
value: value,
priority: priority
});
},
}
/**
* Add a "remove" entry to the modification list.
@ -347,12 +346,12 @@ var RuleModificationList = Class({
* on an element's style.
* @param {String} name the name of the property to remove
*/
removeProperty: function (index, name) {
removeProperty(index, name) {
this.modifications.push({
type: "remove",
name: name
});
},
}
/**
* Rename a property. This implementation acts like
@ -370,9 +369,9 @@ var RuleModificationList = Class({
* code also defined the interface implemented by @see RuleRewriter.
* @param {String} newName new name of the property
*/
renameProperty: function (index, name) {
renameProperty(index, name) {
this.removeProperty(index, name);
},
}
/**
* Enable or disable a property. This implementation acts like
@ -388,11 +387,11 @@ var RuleModificationList = Class({
* @param {Boolean} isEnabled true if the property should be enabled;
* false if it should be disabled
*/
setPropertyEnabled: function (index, name, isEnabled) {
setPropertyEnabled(index, name, isEnabled) {
if (!isEnabled) {
this.removeProperty(index, name);
}
},
}
/**
* Create a new property. This implementation does nothing, because
@ -415,7 +414,7 @@ var RuleModificationList = Class({
* @param {Boolean} enabled True if the new property should be
* enabled, false if disabled
*/
createProperty: function () {
createProperty() {
// Nothing.
},
});
}
}

Просмотреть файл

@ -108,15 +108,25 @@ function waitForPageLoad(browser) {
function grabHistogramsFromContent(use_counter_middlefix, page_before = null) {
let telemetry = Cc["@mozilla.org/base/telemetry;1"].getService(Ci.nsITelemetry);
let suffix = Services.appinfo.browserTabsRemoteAutostart ? "#content" : "";
let gather = () => [
telemetry.getHistogramById("USE_COUNTER2_" + use_counter_middlefix + "_PAGE" + suffix).snapshot().sum,
telemetry.getHistogramById("USE_COUNTER2_" + use_counter_middlefix + "_DOCUMENT" + suffix).snapshot().sum,
telemetry.getHistogramById("CONTENT_DOCUMENTS_DESTROYED" + suffix).snapshot().sum,
telemetry.getHistogramById("TOP_LEVEL_CONTENT_DOCUMENTS_DESTROYED" + suffix).snapshot().sum,
];
let gather = () => {
let snapshots;
if (Services.appinfo.browserTabsRemoteAutostart) {
snapshots = telemetry.histogramSnapshots.content;
} else {
snapshots = telemetry.histogramSnapshots.parent;
}
let checkGet = (probe) => {
return snapshots[probe] ? snapshots[probe].sum : 0;
};
return [
checkGet("USE_COUNTER2_" + use_counter_middlefix + "_PAGE"),
checkGet("USE_COUNTER2_" + use_counter_middlefix + "_DOCUMENT"),
checkGet("CONTENT_DOCUMENTS_DESTROYED"),
checkGet("TOP_LEVEL_CONTENT_DOCUMENTS_DESTROYED"),
];
};
return BrowserTestUtils.waitForCondition(() => {
return page_before != telemetry.getHistogramById("USE_COUNTER2_" + use_counter_middlefix + "_PAGE" + suffix).snapshot().sum;
return page_before != gather()[0];
}).then(gather, gather);
}

Просмотреть файл

@ -46,10 +46,12 @@ function testWyciwyg() {
}
function runTest() {
iframe = document.createElement('iframe');
iframe.setAttribute('mozbrowser', 'true');
document.body.appendChild(iframe);
testWyciwyg();
SpecialPowers.pushPrefEnv({set: [["network.http.rcwn.enabled", false]]}, _=>{
iframe = document.createElement('iframe');
iframe.setAttribute('mozbrowser', 'true');
document.body.appendChild(iframe);
testWyciwyg();
});
}
addEventListener('testready', runTest);

Просмотреть файл

@ -5232,7 +5232,10 @@ CanvasRenderingContext2D::DrawImage(const CanvasImageSource& aImage,
}
{
gl->MakeCurrent();
if (!gl->MakeCurrent()) {
aError.Throw(NS_ERROR_NOT_AVAILABLE);
return;
}
GLuint videoTexture = 0;
gl->fGenTextures(1, &videoTexture);
// skiaGL expect upload on drawing, and uses texture 0 for texturing,

Просмотреть файл

@ -887,7 +887,10 @@ TexUnpackSurface::TexOrSubImage(bool isSubImage, bool needsRespec, const char* f
////
const auto& gl = webgl->gl;
MOZ_ALWAYS_TRUE( gl->MakeCurrent() );
if (!gl->MakeCurrent()) {
*out_error = LOCAL_GL_CONTEXT_LOST;
return true;
}
gl->fPixelStorei(LOCAL_GL_UNPACK_ALIGNMENT, dstAlignment);
if (webgl->IsWebGL2()) {

Просмотреть файл

@ -13,10 +13,7 @@ add_task(async function test_memory_distribution() {
Services.telemetry.canRecordExtended = true;
registerCleanupFunction(() => Services.telemetry.canRecordExtended = canRecordExtended);
// Note the #content suffix after the id. This is the only way this API lets us fetch the
// histogram entries reported by a content process.
let histogram = Services.telemetry.getKeyedHistogramById("FX_TAB_REMOTE_NAVIGATION_DELAY_MS#content");
histogram.clear();
Services.telemetry.snapshotSubsessionKeyedHistograms(true /*clear*/);
// Open a remote page in a new tab to trigger the WebNavigation:LoadURI.
let tab1 = await BrowserTestUtils.openNewForegroundTab(gBrowser, "http://example.com");
@ -32,11 +29,11 @@ add_task(async function test_memory_distribution() {
// There is no good way to make sure that the parent received the histogram entries from the child processes.
// Let's stick to the ugly, spinning the event loop until we have a good approach (Bug 1357509).
await BrowserTestUtils.waitForCondition(() => {
let s = histogram.snapshot();
return "WebNavigation:LoadURI" in s && "SessionStore:restoreTabContent" in s;
let s = Services.telemetry.snapshotSubsessionKeyedHistograms().content["FX_TAB_REMOTE_NAVIGATION_DELAY_MS"];
return s && "WebNavigation:LoadURI" in s && "SessionStore:restoreTabContent" in s;
});
let s = histogram.snapshot();
let s = Services.telemetry.snapshotSubsessionKeyedHistograms().content["FX_TAB_REMOTE_NAVIGATION_DELAY_MS"];
let restoreTabSnapshot = s["SessionStore:restoreTabContent"];
ok(restoreTabSnapshot.sum > 0, "Zero delay for the restoreTabContent case is unlikely.");
ok(restoreTabSnapshot.sum < 10000, "More than 10 seconds delay for the restoreTabContent case is unlikely.");
@ -45,7 +42,7 @@ add_task(async function test_memory_distribution() {
ok(loadURISnapshot.sum > 0, "Zero delay for the LoadURI case is unlikely.");
ok(loadURISnapshot.sum < 10000, "More than 10 seconds delay for the LoadURI case is unlikely.");
histogram.clear();
Services.telemetry.snapshotSubsessionKeyedHistograms(true /*clear*/);
await BrowserTestUtils.removeTab(tab2);
await BrowserTestUtils.removeTab(tab1);

Просмотреть файл

@ -15,26 +15,25 @@ https://bugzilla.mozilla.org/show_bug.cgi?id=489415
<a target="_blank" href="https://bugzilla.mozilla.org/show_bug.cgi?id=1018299">Test for MediaRecorder Principal Handling</a>
</div>
<video id="v1" preload="metadata"></video>
<video id="v2" preload="metadata"></video>
<video id="v1" preload="auto"></video>
<video id="v2" preload="auto"></video>
<pre id="test">
<script type="text/javascript">
SimpleTest.waitForExplicitFinish();
let throwOutside = e => setTimeout(() => { throw e; });
var pushPrefs = (...p) => SpecialPowers.pushPrefEnv({set: p});
var throwOutside = e => setTimeout(() => { throw e; });
// Generate a random key. The first load with that key will return
// data, the second and subsequent loads with that key will return a redirect
// to a different origin ('localhost:8888' will be redirected to 'example.org',
// and 'example.org' will be redirected to 'localhost:8888').
// and 'example.org' will be redirected to 'localhost:8888'). We rely on the
// fact that Ogg will do a seek to the end of the resource, triggering a new
// load with the same key which will return a same-origin resource.
// Loading data from two different origins should be detected by the media
// cache and result in a null principal so that the MediaRecorder usages below
// fail.
// This test relies on that preloading the metadata then loading a sufficiently
// long video will result in two separate requests to load the resource. This
// ends up relying on the impl of MediaCache and friends and we should probably
// replace this test with a more robust gtest or the like.
let key = Math.floor(Math.random()*100000000);
let interval;
@ -43,26 +42,31 @@ function testPrincipals(resource) {
todo(false, "No types supported");
return;
}
// First test: Load file from same-origin first, then get redirected to
// another origin before attempting to record stream.
let video = document.getElementById("v1");
video.src =
"http://mochi.test:8888/tests/dom/media/test/dynamic_redirect.sjs?key=v1_" +
key + "&res=" + resource.name;
video.load();
// To limit readahead, avoid racing with playback and "catching up" mode.
return new Promise(resolve => video.onloadeddata = resolve).then(() => {
video.play();
interval = setInterval(() => info("video.currentTime = "+ video.currentTime), 1000);
// Reduce cache size and cache-ahead to make HTMLMediaElement do partial requests.
return pushPrefs(['media.cache_readahead_limit', 2],
['media.cache_size', 192])
.then(() => {
// First test: Load file from same-origin first, then get redirected to
// another origin before attempting to record stream.
let video = document.getElementById("v1");
video.src =
"http://mochi.test:8888/tests/dom/media/test/dynamic_redirect.sjs?key=v1_" +
key + "&res=" + resource.name;
video.load();
// To limit readahead, avoid racing with playback and "catching up" mode.
return new Promise(resolve => video.oncanplaythrough = resolve).then(() => {
video.play();
interval = setInterval(() => info("video.currentTime = "+ video.currentTime), 1000);
let msg = "mediaRecorder.start() must throw SecurityError";
return new Promise(resolve => video.onplaying = resolve)
.then(() => waitUntil(() => video.currentTime > resource.duration / 5))
// Test failure of the next step only, so "catch-bypass" any errors above.
.then(() => Promise.resolve()
.then(() => new MediaRecorder(video.mozCaptureStreamUntilEnded()).start())
.then(() => ok(false, msg), e => is(e.name, "SecurityError", msg)), 0)
.then(() => clearInterval(interval));
let msg = "mediaRecorder.start() must throw SecurityError";
return new Promise(resolve => video.onplaying = resolve)
.then(() => waitUntil(() => video.currentTime > resource.duration / 2))
// Test failure of the next step only, so "catch-bypass" any errors above.
.then(() => Promise.resolve()
.then(() => new MediaRecorder(video.mozCaptureStreamUntilEnded()).start())
.then(() => ok(false, msg), e => is(e.name, "SecurityError", msg)), 0)
.then(() => clearInterval(interval));
});
})
.then(() => {
// Second test: Load file from same-origin first, but record ASAP, before
@ -78,7 +82,7 @@ function testPrincipals(resource) {
let msgNoThrow = "mediaRecorder.start() should not throw here";
let msgSecErr = "mediaRecorder.onerror must fire SecurityError";
let msgOnStop = "mediaRecorder.onstop must also have fired";
return new Promise(resolve => video.onloadeddata = resolve).then(() => {
return new Promise(resolve => video.onloadedmetadata = resolve).then(() => {
rec = new MediaRecorder(video.mozCaptureStreamUntilEnded());
rec.ondataavailable = e => data.push(e.data);
rec.start();
@ -97,15 +101,15 @@ function testPrincipals(resource) {
});
}
testPrincipals({ name:"pixel_aspect_ratio.mp4", type:"video/mp4", duration:28 })
testPrincipals(getPlayableVideo(gSeekTests))
.catch(e => throwOutside(e))
.then(() => SimpleTest.finish())
.catch(e => throwOutside(e));
let stop = stream => stream.getTracks().forEach(track => track.stop());
let wait = ms => new Promise(resolve => setTimeout(resolve, ms));
let waitUntil = f => new Promise(resolve => {
let ival = setInterval(() => f() && resolve(clearInterval(ival)), 100);
var stop = stream => stream.getTracks().forEach(track => track.stop());
var wait = ms => new Promise(resolve => setTimeout(resolve, ms));
var waitUntil = f => new Promise(resolve => {
var ival = setInterval(() => f() && resolve(clearInterval(ival)), 100);
});
</script>

Просмотреть файл

@ -23,6 +23,8 @@ interface HTMLIFrameElement : HTMLElement {
// attribute boolean seamless;
[CEReactions, SetterThrows, Pure]
attribute boolean allowFullscreen;
[CEReactions, SetterThrows, Pure]
attribute boolean allowPaymentRequest;
[CEReactions, SetterThrows, Pure]
attribute DOMString width;
[CEReactions, SetterThrows, Pure]

Просмотреть файл

@ -3745,7 +3745,7 @@ XMLHttpRequestMainThread::HandleProgressTimerCallback()
}
if (InUploadPhase()) {
if (mUpload && !mUploadComplete) {
if (mUpload && !mUploadComplete && mFlagHadUploadListenersOnSend) {
DispatchProgressEvent(mUpload, ProgressEventType::progress,
mUploadTransferred, mUploadTotal);
}

Просмотреть файл

@ -535,7 +535,9 @@ GLContext::InitWithPrefixImpl(const char* prefix, bool trygl)
////////////////
MakeCurrent();
if (!MakeCurrent()) {
return false;
}
const std::string versionStr = (const char*)fGetString(LOCAL_GL_VERSION);
if (versionStr.find("OpenGL ES") == 0) {
@ -2449,8 +2451,9 @@ GLContext::FlushIfHeavyGLCallsSinceLastFlush()
if (!mHeavyGLCallsSinceLastFlush) {
return;
}
MakeCurrent();
fFlush();
if (MakeCurrent()) {
fFlush();
}
}
/*static*/ bool
@ -2509,7 +2512,7 @@ SplitByChar(const nsACString& str, const char delim, std::vector<nsCString>* con
out->push_back(nsCString(substr));
}
void
bool
GLContext::Readback(SharedSurface* src, gfx::DataSourceSurface* dest)
{
MOZ_ASSERT(src && dest);
@ -2517,7 +2520,9 @@ GLContext::Readback(SharedSurface* src, gfx::DataSourceSurface* dest)
MOZ_ASSERT(dest->GetFormat() == (src->mHasAlpha ? SurfaceFormat::B8G8R8A8
: SurfaceFormat::B8G8R8X8));
MakeCurrent();
if (!MakeCurrent()) {
return false;
}
SharedSurface* prev = GetLockedSurface();
@ -2596,6 +2601,8 @@ GLContext::Readback(SharedSurface* src, gfx::DataSourceSurface* dest)
if (prev)
prev->LockProd();
}
return true;
}
// Do whatever tear-down is necessary after drawing to our offscreen FBO,
@ -2897,7 +2904,9 @@ GLContext::InitOffscreen(const gfx::IntSize& size, const SurfaceCaps& caps)
if (!CreateScreenBuffer(size, caps))
return false;
MakeCurrent();
if (!MakeCurrent()) {
return false;
}
fBindFramebuffer(LOCAL_GL_FRAMEBUFFER, 0);
fScissor(0, 0, size.width, size.height);
fViewport(0, 0, size.width, size.height);

Просмотреть файл

@ -3675,7 +3675,7 @@ public:
void FlushIfHeavyGLCallsSinceLastFlush();
static bool ShouldSpew();
static bool ShouldDumpExts();
void Readback(SharedSurface* src, gfx::DataSourceSurface* dest);
bool Readback(SharedSurface* src, gfx::DataSourceSurface* dest);
////

Просмотреть файл

@ -143,7 +143,10 @@ ShareableCanvasLayer::UpdateTarget(DrawTarget* aDestTarget)
if (destSize == readSize && destFormat == format) {
RefPtr<DataSourceSurface> data =
Factory::CreateWrappingDataSourceSurface(destData, destStride, destSize, destFormat);
mGLContext->Readback(frontbuffer, data);
if (!mGLContext->Readback(frontbuffer, data)) {
aDestTarget->ReleaseBits(destData);
return false;
}
if (needsPremult) {
gfxUtils::PremultiplyDataSurface(data, data);
}
@ -161,7 +164,9 @@ ShareableCanvasLayer::UpdateTarget(DrawTarget* aDestTarget)
}
// Readback handles Flush/MarkDirty.
mGLContext->Readback(frontbuffer, resultSurf);
if (!mGLContext->Readback(frontbuffer, resultSurf)) {
return false;
}
if (needsPremult) {
gfxUtils::PremultiplyDataSurface(resultSurf, resultSurf);
}

Просмотреть файл

@ -68,7 +68,10 @@ BasicCanvasLayer::UpdateSurface()
}
// Readback handles Flush/MarkDirty.
mGLContext->Readback(frontbuffer, resultSurf);
if (!mGLContext->Readback(frontbuffer, resultSurf)) {
NS_WARNING("Failed to read back canvas surface.");
return nullptr;
}
if (needsPremult) {
gfxUtils::PremultiplyDataSurface(resultSurf, resultSurf);
}

Просмотреть файл

@ -364,10 +364,16 @@ CrossProcessCompositorBridgeParent::DidComposite(
{
sIndirectLayerTreesLock->AssertCurrentThreadOwns();
if (LayerTransactionParent *layerTree = sIndirectLayerTrees[aId].mLayerTree) {
Unused << SendDidComposite(aId, layerTree->GetPendingTransactionId(), aCompositeStart, aCompositeEnd);
layerTree->SetPendingTransactionId(0);
uint64_t transactionId = layerTree->GetPendingTransactionId();
if (transactionId) {
Unused << SendDidComposite(aId, transactionId, aCompositeStart, aCompositeEnd);
layerTree->SetPendingTransactionId(0);
}
} else if (WebRenderBridgeParent* wrbridge = sIndirectLayerTrees[aId].mWrBridge) {
Unused << SendDidComposite(aId, wrbridge->FlushPendingTransactionIds(), aCompositeStart, aCompositeEnd);
uint64_t transactionId = wrbridge->FlushPendingTransactionIds();
if (transactionId) {
Unused << SendDidComposite(aId, transactionId, aCompositeStart, aCompositeEnd);
}
}
}

Просмотреть файл

@ -420,10 +420,10 @@ WebRenderBridgeParent::HandleDPEnd(const gfx::IntSize& aSize,
// to early-return from RecvDPEnd without doing so.
AutoWebRenderBridgeParentAsyncMessageSender autoAsyncMessageSender(this, &aToDestroy);
++mWrEpoch; // Update webrender epoch
ProcessWebRenderCommands(aSize, aCommands, wr::NewEpoch(mWrEpoch),
uint32_t wrEpoch = GetNextWrEpoch();
ProcessWebRenderCommands(aSize, aCommands, wr::NewEpoch(wrEpoch),
aContentSize, dl, dlDesc, aIdNameSpace);
HoldPendingTransactionId(mWrEpoch, aTransactionId);
HoldPendingTransactionId(wrEpoch, aTransactionId);
mScrollData = aScrollData;
UpdateAPZ();
@ -846,8 +846,7 @@ WebRenderBridgeParent::RecvClearCachedResources()
mCompositorBridge->ObserveLayerUpdate(GetLayersId(), GetChildLayerObserverEpoch(), false);
// Clear resources
++mWrEpoch; // Update webrender epoch
mApi->ClearRootDisplayList(wr::NewEpoch(mWrEpoch), mPipelineId);
mApi->ClearRootDisplayList(wr::NewEpoch(GetNextWrEpoch()), mPipelineId);
// Schedule composition to clean up Pipeline
mCompositorScheduler->ScheduleComposition();
DeleteOldImages();
@ -896,7 +895,7 @@ WebRenderBridgeParent::UpdateWebRender(CompositorVsyncScheduler* aScheduler,
mCompositableHolder = aHolder;
mAnimStorage = aAnimStorage;
++mWrEpoch; // Update webrender epoch
Unused << GetNextWrEpoch(); // Update webrender epoch
// Register pipeline to updated CompositableHolder.
mCompositableHolder->AddPipeline(mPipelineId);
}
@ -1180,12 +1179,16 @@ WebRenderBridgeParent::FlushTransactionIdsForEpoch(const wr::Epoch& aEpoch)
{
uint64_t id = 0;
while (!mPendingTransactionIds.empty()) {
id = mPendingTransactionIds.front().mId;
if (mPendingTransactionIds.front().mEpoch == aEpoch) {
mPendingTransactionIds.pop();
int64_t diff =
static_cast<int64_t>(aEpoch.mHandle) - static_cast<int64_t>(mPendingTransactionIds.front().mEpoch.mHandle);
if (diff < 0) {
break;
}
id = mPendingTransactionIds.front().mId;
mPendingTransactionIds.pop();
if (diff == 0) {
break;
}
}
return id;
}
@ -1270,8 +1273,8 @@ WebRenderBridgeParent::ClearResources()
return;
}
++mWrEpoch; // Update webrender epoch
mApi->ClearRootDisplayList(wr::NewEpoch(mWrEpoch), mPipelineId);
uint32_t wrEpoch = GetNextWrEpoch();
mApi->ClearRootDisplayList(wr::NewEpoch(wrEpoch), mPipelineId);
// Schedule composition to clean up Pipeline
mCompositorScheduler->ScheduleComposition();
// XXX webrender does not hava a way to delete a group of resources/keys,
@ -1298,7 +1301,7 @@ WebRenderBridgeParent::ClearResources()
}
mAsyncCompositables.Clear();
mCompositableHolder->RemovePipeline(mPipelineId, wr::NewEpoch(mWrEpoch));
mCompositableHolder->RemovePipeline(mPipelineId, wr::NewEpoch(wrEpoch));
for (std::unordered_set<uint64_t>::iterator iter = mActiveAnimations.begin(); iter != mActiveAnimations.end(); iter++) {
mAnimStorage->ClearById(*iter);
@ -1418,5 +1421,12 @@ WebRenderBridgeParent::GetTextureFactoryIdentifier()
mApi->GetUseANGLE());
}
uint32_t
WebRenderBridgeParent::GetNextWrEpoch()
{
MOZ_RELEASE_ASSERT(mWrEpoch != UINT32_MAX);
return ++mWrEpoch;
}
} // namespace layers
} // namespace mozilla

Просмотреть файл

@ -252,6 +252,8 @@ private:
// id of this bridge, and may return null if the APZC wasn't found.
already_AddRefed<AsyncPanZoomController> GetTargetAPZC(const FrameMetrics::ViewID& aId);
uint32_t GetNextWrEpoch();
private:
struct PendingTransactionId {
PendingTransactionId(wr::Epoch aEpoch, uint64_t aId)

Просмотреть файл

@ -81,8 +81,7 @@ typedef Histogram::Count Count;
// static
const size_t Histogram::kBucketCount_MAX = 16384u;
Histogram* Histogram::FactoryGet(const std::string& name,
Sample minimum,
Histogram* Histogram::FactoryGet(Sample minimum,
Sample maximum,
size_t bucket_count,
Flags flags) {
@ -94,29 +93,20 @@ Histogram* Histogram::FactoryGet(const std::string& name,
if (maximum > kSampleType_MAX - 1)
maximum = kSampleType_MAX - 1;
if (!StatisticsRecorder::FindHistogram(name, &histogram)) {
// Extra variable is not needed... but this keeps this section basically
// identical to other derived classes in this file (and compiler will
// optimize away the extra variable.
Histogram* tentative_histogram =
new Histogram(name, minimum, maximum, bucket_count);
tentative_histogram->InitializeBucketRange();
tentative_histogram->SetFlags(flags);
histogram =
StatisticsRecorder::RegisterOrDeleteDuplicate(tentative_histogram);
}
histogram = new Histogram(minimum, maximum, bucket_count);
histogram->InitializeBucketRange();
histogram->SetFlags(flags);
DCHECK_EQ(HISTOGRAM, histogram->histogram_type());
DCHECK(histogram->HasConstructorArguments(minimum, maximum, bucket_count));
return histogram;
}
Histogram* Histogram::FactoryTimeGet(const std::string& name,
TimeDelta minimum,
Histogram* Histogram::FactoryTimeGet(TimeDelta minimum,
TimeDelta maximum,
size_t bucket_count,
Flags flags) {
return FactoryGet(name, minimum.InMilliseconds(), maximum.InMilliseconds(),
return FactoryGet(minimum.InMilliseconds(), maximum.InMilliseconds(),
bucket_count, flags);
}
@ -160,80 +150,6 @@ void Histogram::SetRangeDescriptions(const DescriptionPair descriptions[]) {
DCHECK(false);
}
// The following methods provide a graphical histogram display.
void Histogram::WriteHTMLGraph(std::string* output) const {
// TBD(jar) Write a nice HTML bar chart, with divs an mouse-overs etc.
output->append("<PRE>");
WriteAscii(true, "<br>", output);
output->append("</PRE>");
}
void Histogram::WriteAscii(bool graph_it, const std::string& newline,
std::string* output) const {
// Get local (stack) copies of all effectively volatile class data so that we
// are consistent across our output activities.
SampleSet snapshot;
SnapshotSample(&snapshot);
Count sample_count = snapshot.TotalCount();
WriteAsciiHeader(snapshot, sample_count, output);
output->append(newline);
// Prepare to normalize graphical rendering of bucket contents.
double max_size = 0;
if (graph_it)
max_size = GetPeakBucketSize(snapshot);
// Calculate space needed to print bucket range numbers. Leave room to print
// nearly the largest bucket range without sliding over the histogram.
size_t largest_non_empty_bucket = bucket_count() - 1;
while (0 == snapshot.counts(largest_non_empty_bucket)) {
if (0 == largest_non_empty_bucket)
break; // All buckets are empty.
--largest_non_empty_bucket;
}
// Calculate largest print width needed for any of our bucket range displays.
size_t print_width = 1;
for (size_t i = 0; i < bucket_count(); ++i) {
if (snapshot.counts(i)) {
size_t width = GetAsciiBucketRange(i).size() + 1;
if (width > print_width)
print_width = width;
}
}
int64_t remaining = sample_count;
int64_t past = 0;
// Output the actual histogram graph.
for (size_t i = 0; i < bucket_count(); ++i) {
Count current = snapshot.counts(i);
if (!current && !PrintEmptyBucket(i))
continue;
remaining -= current;
std::string range = GetAsciiBucketRange(i);
output->append(range);
for (size_t j = 0; range.size() + j < print_width + 1; ++j)
output->push_back(' ');
if (0 == current &&
i < bucket_count() - 1 && 0 == snapshot.counts(i + 1)) {
while (i < bucket_count() - 1 && 0 == snapshot.counts(i + 1))
++i;
output->append("... ");
output->append(newline);
continue; // No reason to plot emptiness.
}
double current_size = GetBucketSize(current, i);
if (graph_it)
WriteAsciiBucketGraph(current_size, max_size, output);
WriteAsciiBucketContext(past, current, remaining, i, output);
output->append(newline);
past += current;
}
DCHECK_EQ(sample_count, past);
}
//------------------------------------------------------------------------------
// Methods for the validating a sample and a related histogram.
//------------------------------------------------------------------------------
@ -270,12 +186,10 @@ Histogram::FindCorruption(const SampleSet& snapshot) const
// then we may try to use 2 or 3 for this slop value.
const int kCommonRaceBasedCountMismatch = 1;
if (delta > 0) {
UMA_HISTOGRAM_COUNTS("Histogram.InconsistentCountHigh", delta);
if (delta > kCommonRaceBasedCountMismatch)
inconsistencies |= COUNT_HIGH_ERROR;
} else {
DCHECK_GT(0, delta);
UMA_HISTOGRAM_COUNTS("Histogram.InconsistentCountLow", -delta);
if (-delta > kCommonRaceBasedCountMismatch)
inconsistencies |= COUNT_LOW_ERROR;
}
@ -337,41 +251,29 @@ Histogram::SampleSet::SizeOfExcludingThis(mozilla::MallocSizeOf aMallocSizeOf)
return aMallocSizeOf(&counts_[0]);
}
Histogram::Histogram(const std::string& name, Sample minimum,
Sample maximum, size_t bucket_count)
Histogram::Histogram(Sample minimum, Sample maximum, size_t bucket_count)
: sample_(),
histogram_name_(name),
declared_min_(minimum),
declared_max_(maximum),
bucket_count_(bucket_count),
flags_(kNoFlags),
ranges_(bucket_count + 1, 0),
range_checksum_(0),
recording_enabled_(true) {
range_checksum_(0) {
Initialize();
}
Histogram::Histogram(const std::string& name, TimeDelta minimum,
TimeDelta maximum, size_t bucket_count)
Histogram::Histogram(TimeDelta minimum, TimeDelta maximum, size_t bucket_count)
: sample_(),
histogram_name_(name),
declared_min_(static_cast<int> (minimum.InMilliseconds())),
declared_max_(static_cast<int> (maximum.InMilliseconds())),
bucket_count_(bucket_count),
flags_(kNoFlags),
ranges_(bucket_count + 1, 0),
range_checksum_(0),
recording_enabled_(true) {
range_checksum_(0) {
Initialize();
}
Histogram::~Histogram() {
if (StatisticsRecorder::dump_on_exit()) {
std::string output;
WriteAscii(true, "\n", &output);
CHROMIUM_LOG(INFO) << output;
}
// Just to make sure most derived class did this properly...
DCHECK(ValidateBucketRanges());
}
@ -560,57 +462,6 @@ double Histogram::GetPeakBucketSize(const SampleSet& snapshot) const {
return max;
}
void Histogram::WriteAsciiHeader(const SampleSet& snapshot,
Count sample_count,
std::string* output) const {
StringAppendF(output,
"Histogram: %s recorded %d samples",
histogram_name().c_str(),
sample_count);
int64_t snapshot_sum = snapshot.sum();
if (0 == sample_count) {
DCHECK_EQ(snapshot_sum, 0);
} else {
double average = static_cast<float>(snapshot_sum) / sample_count;
StringAppendF(output, ", average = %.1f", average);
}
if (flags_ & ~kHexRangePrintingFlag)
StringAppendF(output, " (flags = 0x%x)", flags_ & ~kHexRangePrintingFlag);
}
void Histogram::WriteAsciiBucketContext(const int64_t past,
const Count current,
const int64_t remaining,
const size_t i,
std::string* output) const {
double scaled_sum = (past + current + remaining) / 100.0;
WriteAsciiBucketValue(current, scaled_sum, output);
if (0 < i) {
double percentage = past / scaled_sum;
StringAppendF(output, " {%3.1f%%}", percentage);
}
}
void Histogram::WriteAsciiBucketValue(Count current, double scaled_sum,
std::string* output) const {
StringAppendF(output, " (%d = %3.1f%%)", current, current/scaled_sum);
}
void Histogram::WriteAsciiBucketGraph(double current_size, double max_size,
std::string* output) const {
const int k_line_length = 72; // Maximal horizontal width of graph.
int x_count = static_cast<int>(k_line_length * (current_size / max_size)
+ 0.5);
int x_remainder = k_line_length - x_count;
while (0 < x_count--)
output->append("-");
output->append("O");
while (0 < x_remainder--)
output->append(" ");
}
//------------------------------------------------------------------------------
// Methods for the Histogram::SampleSet class
//------------------------------------------------------------------------------
@ -665,8 +516,7 @@ void Histogram::SampleSet::Add(const SampleSet& other) {
LinearHistogram::~LinearHistogram() {
}
Histogram* LinearHistogram::FactoryGet(const std::string& name,
Sample minimum,
Histogram* LinearHistogram::FactoryGet(Sample minimum,
Sample maximum,
size_t bucket_count,
Flags flags) {
@ -677,26 +527,22 @@ Histogram* LinearHistogram::FactoryGet(const std::string& name,
if (maximum > kSampleType_MAX - 1)
maximum = kSampleType_MAX - 1;
if (!StatisticsRecorder::FindHistogram(name, &histogram)) {
LinearHistogram* tentative_histogram =
new LinearHistogram(name, minimum, maximum, bucket_count);
tentative_histogram->InitializeBucketRange();
tentative_histogram->SetFlags(flags);
histogram =
StatisticsRecorder::RegisterOrDeleteDuplicate(tentative_histogram);
}
LinearHistogram* linear_histogram =
new LinearHistogram(minimum, maximum, bucket_count);
linear_histogram->InitializeBucketRange();
linear_histogram->SetFlags(flags);
histogram = linear_histogram;
DCHECK_EQ(LINEAR_HISTOGRAM, histogram->histogram_type());
DCHECK(histogram->HasConstructorArguments(minimum, maximum, bucket_count));
return histogram;
}
Histogram* LinearHistogram::FactoryTimeGet(const std::string& name,
TimeDelta minimum,
Histogram* LinearHistogram::FactoryTimeGet(TimeDelta minimum,
TimeDelta maximum,
size_t bucket_count,
Flags flags) {
return FactoryGet(name, minimum.InMilliseconds(), maximum.InMilliseconds(),
return FactoryGet(minimum.InMilliseconds(), maximum.InMilliseconds(),
bucket_count, flags);
}
@ -715,18 +561,16 @@ void LinearHistogram::SetRangeDescriptions(
}
}
LinearHistogram::LinearHistogram(const std::string& name,
Sample minimum,
LinearHistogram::LinearHistogram(Sample minimum,
Sample maximum,
size_t bucket_count)
: Histogram(name, minimum >= 1 ? minimum : 1, maximum, bucket_count) {
: Histogram(minimum >= 1 ? minimum : 1, maximum, bucket_count) {
}
LinearHistogram::LinearHistogram(const std::string& name,
TimeDelta minimum,
LinearHistogram::LinearHistogram(TimeDelta minimum,
TimeDelta maximum,
size_t bucket_count)
: Histogram(name, minimum >= TimeDelta::FromMilliseconds(1) ?
: Histogram(minimum >= TimeDelta::FromMilliseconds(1) ?
minimum : TimeDelta::FromMilliseconds(1),
maximum, bucket_count) {
}
@ -769,16 +613,13 @@ bool LinearHistogram::PrintEmptyBucket(size_t index) const {
// This section provides implementation for BooleanHistogram.
//------------------------------------------------------------------------------
Histogram* BooleanHistogram::FactoryGet(const std::string& name, Flags flags) {
Histogram* BooleanHistogram::FactoryGet(Flags flags) {
Histogram* histogram(NULL);
if (!StatisticsRecorder::FindHistogram(name, &histogram)) {
BooleanHistogram* tentative_histogram = new BooleanHistogram(name);
tentative_histogram->InitializeBucketRange();
tentative_histogram->SetFlags(flags);
histogram =
StatisticsRecorder::RegisterOrDeleteDuplicate(tentative_histogram);
}
BooleanHistogram* tentative_histogram = new BooleanHistogram();
tentative_histogram->InitializeBucketRange();
tentative_histogram->SetFlags(flags);
histogram = tentative_histogram;
DCHECK_EQ(BOOLEAN_HISTOGRAM, histogram->histogram_type());
return histogram;
@ -792,8 +633,8 @@ void BooleanHistogram::AddBoolean(bool value) {
Add(value ? 1 : 0);
}
BooleanHistogram::BooleanHistogram(const std::string& name)
: LinearHistogram(name, 1, 2, 3) {
BooleanHistogram::BooleanHistogram()
: LinearHistogram(1, 2, 3) {
}
void
@ -809,24 +650,22 @@ BooleanHistogram::Accumulate(Sample value, Count count, size_t index)
//------------------------------------------------------------------------------
Histogram *
FlagHistogram::FactoryGet(const std::string &name, Flags flags)
FlagHistogram::FactoryGet(Flags flags)
{
Histogram *h(nullptr);
if (!StatisticsRecorder::FindHistogram(name, &h)) {
FlagHistogram *fh = new FlagHistogram(name);
fh->InitializeBucketRange();
fh->SetFlags(flags);
size_t zero_index = fh->BucketIndex(0);
fh->LinearHistogram::Accumulate(0, 1, zero_index);
h = StatisticsRecorder::RegisterOrDeleteDuplicate(fh);
}
FlagHistogram *fh = new FlagHistogram();
fh->InitializeBucketRange();
fh->SetFlags(flags);
size_t zero_index = fh->BucketIndex(0);
fh->LinearHistogram::Accumulate(0, 1, zero_index);
h = fh;
return h;
}
FlagHistogram::FlagHistogram(const std::string &name)
: BooleanHistogram(name), mSwitched(false) {
FlagHistogram::FlagHistogram()
: BooleanHistogram(), mSwitched(false) {
}
Histogram::ClassType
@ -889,22 +728,20 @@ FlagHistogram::Clear() {
//------------------------------------------------------------------------------
Histogram *
CountHistogram::FactoryGet(const std::string &name, Flags flags)
CountHistogram::FactoryGet(Flags flags)
{
Histogram *h(nullptr);
if (!StatisticsRecorder::FindHistogram(name, &h)) {
CountHistogram *fh = new CountHistogram(name);
fh->InitializeBucketRange();
fh->SetFlags(flags);
h = StatisticsRecorder::RegisterOrDeleteDuplicate(fh);
}
CountHistogram *fh = new CountHistogram();
fh->InitializeBucketRange();
fh->SetFlags(flags);
h = fh;
return h;
}
CountHistogram::CountHistogram(const std::string &name)
: LinearHistogram(name, 1, 2, 3) {
CountHistogram::CountHistogram()
: LinearHistogram(1, 2, 3) {
}
Histogram::ClassType
@ -942,8 +779,7 @@ CountHistogram::AddSampleSet(const SampleSet& sample) {
// CustomHistogram:
//------------------------------------------------------------------------------
Histogram* CustomHistogram::FactoryGet(const std::string& name,
const std::vector<Sample>& custom_ranges,
Histogram* CustomHistogram::FactoryGet(const std::vector<Sample>& custom_ranges,
Flags flags) {
Histogram* histogram(NULL);
@ -960,13 +796,10 @@ Histogram* CustomHistogram::FactoryGet(const std::string& name,
DCHECK_LT(ranges.back(), kSampleType_MAX);
if (!StatisticsRecorder::FindHistogram(name, &histogram)) {
CustomHistogram* tentative_histogram = new CustomHistogram(name, ranges);
tentative_histogram->InitializedCustomBucketRange(ranges);
tentative_histogram->SetFlags(flags);
histogram =
StatisticsRecorder::RegisterOrDeleteDuplicate(tentative_histogram);
}
CustomHistogram* custom_histogram = new CustomHistogram(ranges);
custom_histogram->InitializedCustomBucketRange(ranges);
custom_histogram->SetFlags(flags);
histogram = custom_histogram;
DCHECK_EQ(histogram->histogram_type(), CUSTOM_HISTOGRAM);
DCHECK(histogram->HasConstructorArguments(ranges[1], ranges.back(),
@ -978,9 +811,8 @@ Histogram::ClassType CustomHistogram::histogram_type() const {
return CUSTOM_HISTOGRAM;
}
CustomHistogram::CustomHistogram(const std::string& name,
const std::vector<Sample>& custom_ranges)
: Histogram(name, custom_ranges[1], custom_ranges.back(),
CustomHistogram::CustomHistogram(const std::vector<Sample>& custom_ranges)
: Histogram(custom_ranges[1], custom_ranges.back(),
custom_ranges.size()) {
DCHECK_GT(custom_ranges.size(), 1u);
DCHECK_EQ(custom_ranges[0], 0);
@ -1000,177 +832,4 @@ double CustomHistogram::GetBucketSize(Count current, size_t i) const {
return 1;
}
//------------------------------------------------------------------------------
// The next section handles global (central) support for all histograms, as well
// as startup/teardown of this service.
//------------------------------------------------------------------------------
// This singleton instance should be started during the single threaded portion
// of main(), and hence it is not thread safe. It initializes globals to
// provide support for all future calls.
StatisticsRecorder::StatisticsRecorder() {
DCHECK(!histograms_);
if (lock_ == NULL) {
// This will leak on purpose. It's the only way to make sure we won't race
// against the static uninitialization of the module while one of our
// static methods relying on the lock get called at an inappropriate time
// during the termination phase. Since it's a static data member, we will
// leak one per process, which would be similar to the instance allocated
// during static initialization and released only on process termination.
lock_ = new base::Lock;
}
base::AutoLock auto_lock(*lock_);
histograms_ = new HistogramMap;
}
StatisticsRecorder::~StatisticsRecorder() {
DCHECK(histograms_ && lock_);
if (dump_on_exit_) {
std::string output;
WriteGraph("", &output);
CHROMIUM_LOG(INFO) << output;
}
// Clean up.
HistogramMap* histograms = NULL;
{
base::AutoLock auto_lock(*lock_);
histograms = histograms_;
histograms_ = NULL;
for (HistogramMap::iterator it = histograms->begin();
histograms->end() != it;
++it) {
// No other clients permanently hold Histogram references, so we
// have the only one and it is safe to delete it.
delete it->second;
}
}
delete histograms;
// We don't delete lock_ on purpose to avoid having to properly protect
// against it going away after we checked for NULL in the static methods.
}
// static
bool StatisticsRecorder::IsActive() {
if (lock_ == NULL)
return false;
base::AutoLock auto_lock(*lock_);
return NULL != histograms_;
}
Histogram* StatisticsRecorder::RegisterOrDeleteDuplicate(Histogram* histogram) {
DCHECK(histogram->HasValidRangeChecksum());
if (lock_ == NULL)
return histogram;
base::AutoLock auto_lock(*lock_);
if (!histograms_)
return histogram;
const std::string name = histogram->histogram_name();
HistogramMap::iterator it = histograms_->find(name);
// Avoid overwriting a previous registration.
if (histograms_->end() == it) {
(*histograms_)[name] = histogram;
} else {
delete histogram; // We already have one by this name.
histogram = it->second;
}
return histogram;
}
// static
void StatisticsRecorder::WriteHTMLGraph(const std::string& query,
std::string* output) {
if (!IsActive())
return;
output->append("<html><head><title>About Histograms");
if (!query.empty())
output->append(" - " + query);
output->append("</title>"
// We'd like the following no-cache... but it doesn't work.
// "<META HTTP-EQUIV=\"Pragma\" CONTENT=\"no-cache\">"
"</head><body>");
Histograms snapshot;
GetSnapshot(query, &snapshot);
for (Histograms::iterator it = snapshot.begin();
it != snapshot.end();
++it) {
(*it)->WriteHTMLGraph(output);
output->append("<br><hr><br>");
}
output->append("</body></html>");
}
// static
void StatisticsRecorder::WriteGraph(const std::string& query,
std::string* output) {
if (!IsActive())
return;
if (query.length())
StringAppendF(output, "Collections of histograms for %s\n", query.c_str());
else
output->append("Collections of all histograms\n");
Histograms snapshot;
GetSnapshot(query, &snapshot);
for (Histograms::iterator it = snapshot.begin();
it != snapshot.end();
++it) {
(*it)->WriteAscii(true, "\n", output);
output->append("\n");
}
}
// static
void StatisticsRecorder::GetHistograms(Histograms* output) {
if (lock_ == NULL)
return;
base::AutoLock auto_lock(*lock_);
if (!histograms_)
return;
for (HistogramMap::iterator it = histograms_->begin();
histograms_->end() != it;
++it) {
DCHECK_EQ(it->first, it->second->histogram_name());
output->push_back(it->second);
}
}
bool StatisticsRecorder::FindHistogram(const std::string& name,
Histogram** histogram) {
if (lock_ == NULL)
return false;
base::AutoLock auto_lock(*lock_);
if (!histograms_)
return false;
HistogramMap::iterator it = histograms_->find(name);
if (histograms_->end() == it)
return false;
*histogram = it->second;
return true;
}
// private static
void StatisticsRecorder::GetSnapshot(const std::string& query,
Histograms* snapshot) {
if (lock_ == NULL)
return;
base::AutoLock auto_lock(*lock_);
if (!histograms_)
return;
for (HistogramMap::iterator it = histograms_->begin();
histograms_->end() != it;
++it) {
if (it->first.find(query) != std::string::npos)
snapshot->push_back(it->second);
}
}
// static
StatisticsRecorder::HistogramMap* StatisticsRecorder::histograms_ = NULL;
// static
base::Lock* StatisticsRecorder::lock_ = NULL;
// static
bool StatisticsRecorder::dump_on_exit_ = false;
} // namespace base

Просмотреть файл

@ -55,206 +55,6 @@
namespace base {
//------------------------------------------------------------------------------
// Provide easy general purpose histogram in a macro, just like stats counters.
// The first four macros use 50 buckets.
#define HISTOGRAM_TIMES(name, sample) HISTOGRAM_CUSTOM_TIMES( \
name, sample, base::TimeDelta::FromMilliseconds(1), \
base::TimeDelta::FromSeconds(10), 50)
#define HISTOGRAM_COUNTS(name, sample) HISTOGRAM_CUSTOM_COUNTS( \
name, sample, 1, 1000000, 50)
#define HISTOGRAM_COUNTS_100(name, sample) HISTOGRAM_CUSTOM_COUNTS( \
name, sample, 1, 100, 50)
#define HISTOGRAM_COUNTS_10000(name, sample) HISTOGRAM_CUSTOM_COUNTS( \
name, sample, 1, 10000, 50)
#define HISTOGRAM_CUSTOM_COUNTS(name, sample, min, max, bucket_count) do { \
static base::Histogram* counter(NULL); \
if (!counter) \
counter = base::Histogram::FactoryGet(name, min, max, bucket_count, \
base::Histogram::kNoFlags); \
DCHECK_EQ(name, counter->histogram_name()); \
counter->Add(sample); \
} while (0)
#define HISTOGRAM_PERCENTAGE(name, under_one_hundred) \
HISTOGRAM_ENUMERATION(name, under_one_hundred, 101)
// For folks that need real specific times, use this to select a precise range
// of times you want plotted, and the number of buckets you want used.
#define HISTOGRAM_CUSTOM_TIMES(name, sample, min, max, bucket_count) do { \
static base::Histogram* counter(NULL); \
if (!counter) \
counter = base::Histogram::FactoryTimeGet(name, min, max, bucket_count, \
base::Histogram::kNoFlags); \
DCHECK_EQ(name, counter->histogram_name()); \
counter->AddTime(sample); \
} while (0)
// DO NOT USE THIS. It is being phased out, in favor of HISTOGRAM_CUSTOM_TIMES.
#define HISTOGRAM_CLIPPED_TIMES(name, sample, min, max, bucket_count) do { \
static base::Histogram* counter(NULL); \
if (!counter) \
counter = base::Histogram::FactoryTimeGet(name, min, max, bucket_count, \
base::Histogram::kNoFlags); \
DCHECK_EQ(name, counter->histogram_name()); \
if ((sample) < (max)) counter->AddTime(sample); \
} while (0)
// Support histograming of an enumerated value. The samples should always be
// less than boundary_value.
#define HISTOGRAM_ENUMERATION(name, sample, boundary_value) do { \
static base::Histogram* counter(NULL); \
if (!counter) \
counter = base::LinearHistogram::FactoryGet(name, 1, boundary_value, \
boundary_value + 1, base::Histogram::kNoFlags); \
DCHECK_EQ(name, counter->histogram_name()); \
counter->Add(sample); \
} while (0)
#define HISTOGRAM_CUSTOM_ENUMERATION(name, sample, custom_ranges) do { \
static base::Histogram* counter(NULL); \
if (!counter) \
counter = base::CustomHistogram::FactoryGet(name, custom_ranges, \
base::Histogram::kNoFlags); \
DCHECK_EQ(name, counter->histogram_name()); \
counter->Add(sample); \
} while (0)
//------------------------------------------------------------------------------
// Define Debug vs non-debug flavors of macros.
#ifndef NDEBUG
#define DHISTOGRAM_TIMES(name, sample) HISTOGRAM_TIMES(name, sample)
#define DHISTOGRAM_COUNTS(name, sample) HISTOGRAM_COUNTS(name, sample)
#define DHISTOGRAM_PERCENTAGE(name, under_one_hundred) HISTOGRAM_PERCENTAGE(\
name, under_one_hundred)
#define DHISTOGRAM_CUSTOM_TIMES(name, sample, min, max, bucket_count) \
HISTOGRAM_CUSTOM_TIMES(name, sample, min, max, bucket_count)
#define DHISTOGRAM_CLIPPED_TIMES(name, sample, min, max, bucket_count) \
HISTOGRAM_CLIPPED_TIMES(name, sample, min, max, bucket_count)
#define DHISTOGRAM_CUSTOM_COUNTS(name, sample, min, max, bucket_count) \
HISTOGRAM_CUSTOM_COUNTS(name, sample, min, max, bucket_count)
#define DHISTOGRAM_ENUMERATION(name, sample, boundary_value) \
HISTOGRAM_ENUMERATION(name, sample, boundary_value)
#define DHISTOGRAM_CUSTOM_ENUMERATION(name, sample, custom_ranges) \
HISTOGRAM_CUSTOM_ENUMERATION(name, sample, custom_ranges)
#else // NDEBUG
#define DHISTOGRAM_TIMES(name, sample) do {} while (0)
#define DHISTOGRAM_COUNTS(name, sample) do {} while (0)
#define DHISTOGRAM_PERCENTAGE(name, under_one_hundred) do {} while (0)
#define DHISTOGRAM_CUSTOM_TIMES(name, sample, min, max, bucket_count) \
do {} while (0)
#define DHISTOGRAM_CLIPPED_TIMES(name, sample, min, max, bucket_count) \
do {} while (0)
#define DHISTOGRAM_CUSTOM_COUNTS(name, sample, min, max, bucket_count) \
do {} while (0)
#define DHISTOGRAM_ENUMERATION(name, sample, boundary_value) do {} while (0)
#define DHISTOGRAM_CUSTOM_ENUMERATION(name, sample, custom_ranges) \
do {} while (0)
#endif // NDEBUG
//------------------------------------------------------------------------------
// The following macros provide typical usage scenarios for callers that wish
// to record histogram data, and have the data submitted/uploaded via UMA.
// Not all systems support such UMA, but if they do, the following macros
// should work with the service.
#define UMA_HISTOGRAM_TIMES(name, sample) UMA_HISTOGRAM_CUSTOM_TIMES( \
name, sample, base::TimeDelta::FromMilliseconds(1), \
base::TimeDelta::FromSeconds(10), 50)
#define UMA_HISTOGRAM_MEDIUM_TIMES(name, sample) UMA_HISTOGRAM_CUSTOM_TIMES( \
name, sample, base::TimeDelta::FromMilliseconds(10), \
base::TimeDelta::FromMinutes(3), 50)
// Use this macro when times can routinely be much longer than 10 seconds.
#define UMA_HISTOGRAM_LONG_TIMES(name, sample) UMA_HISTOGRAM_CUSTOM_TIMES( \
name, sample, base::TimeDelta::FromMilliseconds(1), \
base::TimeDelta::FromHours(1), 50)
#define UMA_HISTOGRAM_CUSTOM_TIMES(name, sample, min, max, bucket_count) do { \
static base::Histogram* counter(NULL); \
if (!counter) \
counter = base::Histogram::FactoryTimeGet(name, min, max, bucket_count, \
base::Histogram::kUmaTargetedHistogramFlag); \
DCHECK_EQ(name, counter->histogram_name()); \
counter->AddTime(sample); \
} while (0)
// DO NOT USE THIS. It is being phased out, in favor of HISTOGRAM_CUSTOM_TIMES.
#define UMA_HISTOGRAM_CLIPPED_TIMES(name, sample, min, max, bucket_count) do { \
static base::Histogram* counter(NULL); \
if (!counter) \
counter = base::Histogram::FactoryTimeGet(name, min, max, bucket_count, \
base::Histogram::kUmaTargetedHistogramFlag); \
DCHECK_EQ(name, counter->histogram_name()); \
if ((sample) < (max)) counter->AddTime(sample); \
} while (0)
#define UMA_HISTOGRAM_COUNTS(name, sample) UMA_HISTOGRAM_CUSTOM_COUNTS( \
name, sample, 1, 1000000, 50)
#define UMA_HISTOGRAM_COUNTS_100(name, sample) UMA_HISTOGRAM_CUSTOM_COUNTS( \
name, sample, 1, 100, 50)
#define UMA_HISTOGRAM_COUNTS_10000(name, sample) UMA_HISTOGRAM_CUSTOM_COUNTS( \
name, sample, 1, 10000, 50)
#define UMA_HISTOGRAM_CUSTOM_COUNTS(name, sample, min, max, bucket_count) do { \
static base::Histogram* counter(NULL); \
if (!counter) \
counter = base::Histogram::FactoryGet(name, min, max, bucket_count, \
base::Histogram::kUmaTargetedHistogramFlag); \
DCHECK_EQ(name, counter->histogram_name()); \
counter->Add(sample); \
} while (0)
#define UMA_HISTOGRAM_MEMORY_KB(name, sample) UMA_HISTOGRAM_CUSTOM_COUNTS( \
name, sample, 1000, 500000, 50)
#define UMA_HISTOGRAM_MEMORY_MB(name, sample) UMA_HISTOGRAM_CUSTOM_COUNTS( \
name, sample, 1, 1000, 50)
#define UMA_HISTOGRAM_PERCENTAGE(name, under_one_hundred) \
UMA_HISTOGRAM_ENUMERATION(name, under_one_hundred, 101)
#define UMA_HISTOGRAM_BOOLEAN(name, sample) do { \
static base::Histogram* counter(NULL); \
if (!counter) \
counter = base::BooleanHistogram::FactoryGet(name, \
base::Histogram::kUmaTargetedHistogramFlag); \
DCHECK_EQ(name, counter->histogram_name()); \
counter->AddBoolean(sample); \
} while (0)
#define UMA_HISTOGRAM_ENUMERATION(name, sample, boundary_value) do { \
static base::Histogram* counter(NULL); \
if (!counter) \
counter = base::LinearHistogram::FactoryGet(name, 1, boundary_value, \
boundary_value + 1, base::Histogram::kUmaTargetedHistogramFlag); \
DCHECK_EQ(name, counter->histogram_name()); \
counter->Add(sample); \
} while (0)
#define UMA_HISTOGRAM_CUSTOM_ENUMERATION(name, sample, custom_ranges) do { \
static base::Histogram* counter(NULL); \
if (!counter) \
counter = base::CustomHistogram::FactoryGet(name, custom_ranges, \
base::Histogram::kUmaTargetedHistogramFlag); \
DCHECK_EQ(name, counter->histogram_name()); \
counter->Add(sample); \
} while (0)
//------------------------------------------------------------------------------
class BooleanHistogram;
@ -373,26 +173,20 @@ class Histogram {
//----------------------------------------------------------------------------
// minimum should start from 1. 0 is invalid as a minimum. 0 is an implicit
// default underflow bucket.
static Histogram* FactoryGet(const std::string& name,
Sample minimum,
static Histogram* FactoryGet(Sample minimum,
Sample maximum,
size_t bucket_count,
Flags flags);
static Histogram* FactoryTimeGet(const std::string& name,
base::TimeDelta minimum,
static Histogram* FactoryTimeGet(base::TimeDelta minimum,
base::TimeDelta maximum,
size_t bucket_count,
Flags flags);
virtual ~Histogram();
void Add(int value);
void Subtract(int value);
// TODO: Currently recording_enabled_ is not used by any Histogram class, but
// rather examined only by the telemetry code (via IsRecordingEnabled).
// Move handling to Histogram's Add() etc after simplifying Histogram.
void SetRecordingEnabled(bool aEnabled) { recording_enabled_ = aEnabled; };
bool IsRecordingEnabled() const { return recording_enabled_; };
// This method is an interface, used only by BooleanHistogram.
virtual void AddBoolean(bool value);
@ -408,11 +202,6 @@ class Histogram {
// This method is an interface, used only by LinearHistogram.
virtual void SetRangeDescriptions(const DescriptionPair descriptions[]);
// The following methods provide graphical histogram displays.
void WriteHTMLGraph(std::string* output) const;
void WriteAscii(bool graph_it, const std::string& newline,
std::string* output) const;
// Support generic flagging of Histograms.
// 0x1 Currently used to mark this histogram to be recorded by UMA..
// 0x8000 means print ranges in hex.
@ -431,7 +220,6 @@ class Histogram {
// Accessors for factory constuction, serialization and testing.
//----------------------------------------------------------------------------
virtual ClassType histogram_type() const;
const std::string& histogram_name() const { return histogram_name_; }
Sample declared_min() const { return declared_min_; }
Sample declared_max() const { return declared_max_; }
virtual Sample ranges(size_t i) const;
@ -453,12 +241,8 @@ class Histogram {
bool HasValidRangeChecksum() const;
protected:
Histogram(const std::string& name, Sample minimum,
Sample maximum, size_t bucket_count);
Histogram(const std::string& name, TimeDelta minimum,
TimeDelta maximum, size_t bucket_count);
virtual ~Histogram();
Histogram(Sample minimum, Sample maximum, size_t bucket_count);
Histogram(TimeDelta minimum, TimeDelta maximum, size_t bucket_count);
// Initialize ranges_ mapping.
void InitializeBucketRange();
@ -504,8 +288,6 @@ class Histogram {
SampleSet sample_;
private:
friend class StatisticsRecorder; // To allow it to delete duplicates.
// Post constructor initialization.
void Initialize();
@ -518,34 +300,12 @@ class Histogram {
// Find out how large the (graphically) the largest bucket will appear to be.
double GetPeakBucketSize(const SampleSet& snapshot) const;
// Write a common header message describing this histogram.
void WriteAsciiHeader(const SampleSet& snapshot,
Count sample_count, std::string* output) const;
// Write information about previous, current, and next buckets.
// Information such as cumulative percentage, etc.
void WriteAsciiBucketContext(const int64_t past, const Count current,
const int64_t remaining, const size_t i,
std::string* output) const;
// Write textual description of the bucket contents (relative to histogram).
// Output is the count in the buckets, as well as the percentage.
void WriteAsciiBucketValue(Count current, double scaled_sum,
std::string* output) const;
// Produce actual graph (set of blank vs non blank char's) for a bucket.
void WriteAsciiBucketGraph(double current_size, double max_size,
std::string* output) const;
//----------------------------------------------------------------------------
// Table for generating Crc32 values.
static const uint32_t kCrcTable[256];
//----------------------------------------------------------------------------
// Invariant values set at/near construction time
// ASCII version of original name given to the constructor. All identically
// named instances will be coalesced cross-project.
const std::string histogram_name_;
Sample declared_min_; // Less than this goes into counts_[0]
Sample declared_max_; // Over this goes into counts_[bucket_count_ - 1].
size_t bucket_count_; // Dimension of counts_[].
@ -564,9 +324,6 @@ class Histogram {
// have been corrupted.
uint32_t range_checksum_;
// When false, new samples are completely ignored.
mozilla::Atomic<bool, mozilla::Relaxed> recording_enabled_;
DISALLOW_COPY_AND_ASSIGN(Histogram);
};
@ -580,13 +337,11 @@ class LinearHistogram : public Histogram {
/* minimum should start from 1. 0 is as minimum is invalid. 0 is an implicit
default underflow bucket. */
static Histogram* FactoryGet(const std::string& name,
Sample minimum,
static Histogram* FactoryGet(Sample minimum,
Sample maximum,
size_t bucket_count,
Flags flags);
static Histogram* FactoryTimeGet(const std::string& name,
TimeDelta minimum,
static Histogram* FactoryTimeGet(TimeDelta minimum,
TimeDelta maximum,
size_t bucket_count,
Flags flags);
@ -601,11 +356,9 @@ class LinearHistogram : public Histogram {
virtual void SetRangeDescriptions(const DescriptionPair descriptions[]);
protected:
LinearHistogram(const std::string& name, Sample minimum,
Sample maximum, size_t bucket_count);
LinearHistogram(Sample minimum, Sample maximum, size_t bucket_count);
LinearHistogram(const std::string& name, TimeDelta minimum,
TimeDelta maximum, size_t bucket_count);
LinearHistogram(TimeDelta minimum, TimeDelta maximum, size_t bucket_count);
// Initialize ranges_ mapping.
void InitializeBucketRange();
@ -634,7 +387,7 @@ class LinearHistogram : public Histogram {
// BooleanHistogram is a histogram for booleans.
class BooleanHistogram : public LinearHistogram {
public:
static Histogram* FactoryGet(const std::string& name, Flags flags);
static Histogram* FactoryGet(Flags flags);
virtual ClassType histogram_type() const;
@ -643,7 +396,7 @@ class BooleanHistogram : public LinearHistogram {
virtual void Accumulate(Sample value, Count count, size_t index);
protected:
explicit BooleanHistogram(const std::string& name);
explicit BooleanHistogram();
DISALLOW_COPY_AND_ASSIGN(BooleanHistogram);
};
@ -654,7 +407,7 @@ class BooleanHistogram : public LinearHistogram {
class FlagHistogram : public BooleanHistogram
{
public:
static Histogram *FactoryGet(const std::string &name, Flags flags);
static Histogram *FactoryGet(Flags flags);
virtual ClassType histogram_type() const;
@ -665,7 +418,7 @@ public:
virtual void Clear();
private:
explicit FlagHistogram(const std::string &name);
explicit FlagHistogram();
bool mSwitched;
DISALLOW_COPY_AND_ASSIGN(FlagHistogram);
@ -675,7 +428,7 @@ private:
class CountHistogram : public LinearHistogram
{
public:
static Histogram *FactoryGet(const std::string &name, Flags flags);
static Histogram *FactoryGet(Flags flags);
virtual ClassType histogram_type() const;
@ -684,7 +437,7 @@ public:
virtual void AddSampleSet(const SampleSet& sample);
private:
explicit CountHistogram(const std::string &name);
explicit CountHistogram();
DISALLOW_COPY_AND_ASSIGN(CountHistogram);
};
@ -695,16 +448,14 @@ private:
class CustomHistogram : public Histogram {
public:
static Histogram* FactoryGet(const std::string& name,
const std::vector<Sample>& custom_ranges,
static Histogram* FactoryGet(const std::vector<Sample>& custom_ranges,
Flags flags);
// Overridden from Histogram:
virtual ClassType histogram_type() const;
protected:
CustomHistogram(const std::string& name,
const std::vector<Sample>& custom_ranges);
explicit CustomHistogram(const std::vector<Sample>& custom_ranges);
// Initialize ranges_ mapping.
void InitializedCustomBucketRange(const std::vector<Sample>& custom_ranges);
@ -713,68 +464,6 @@ class CustomHistogram : public Histogram {
DISALLOW_COPY_AND_ASSIGN(CustomHistogram);
};
//------------------------------------------------------------------------------
// StatisticsRecorder handles all histograms in the system. It provides a
// general place for histograms to register, and supports a global API for
// accessing (i.e., dumping, or graphing) the data in all the histograms.
class StatisticsRecorder {
public:
typedef std::vector<Histogram*> Histograms;
StatisticsRecorder();
~StatisticsRecorder();
// Find out if histograms can now be registered into our list.
static bool IsActive();
// Register, or add a new histogram to the collection of statistics. If an
// identically named histogram is already registered, then the argument
// |histogram| will deleted. The returned value is always the registered
// histogram (either the argument, or the pre-existing registered histogram).
static Histogram* RegisterOrDeleteDuplicate(Histogram* histogram);
// Methods for printing histograms. Only histograms which have query as
// a substring are written to output (an empty string will process all
// registered histograms).
static void WriteHTMLGraph(const std::string& query, std::string* output);
static void WriteGraph(const std::string& query, std::string* output);
// Method for extracting histograms which were marked for use by UMA.
static void GetHistograms(Histograms* output);
// Find a histogram by name. It matches the exact name. This method is thread
// safe. If a matching histogram is not found, then the |histogram| is
// not changed.
static bool FindHistogram(const std::string& query, Histogram** histogram);
static bool dump_on_exit() { return dump_on_exit_; }
static void set_dump_on_exit(bool enable) { dump_on_exit_ = enable; }
// GetSnapshot copies some of the pointers to registered histograms into the
// caller supplied vector (Histograms). Only histograms with names matching
// query are returned. The query must be a substring of histogram name for its
// pointer to be copied.
static void GetSnapshot(const std::string& query, Histograms* snapshot);
private:
// We keep all registered histograms in a map, from name to histogram.
typedef std::map<std::string, Histogram*> HistogramMap;
static HistogramMap* histograms_;
// lock protects access to the above map.
static Lock* lock_;
// Dump all known histograms to log.
static bool dump_on_exit_;
DISALLOW_COPY_AND_ASSIGN(StatisticsRecorder);
};
} // namespace base
#endif // BASE_METRICS_HISTOGRAM_H_

Просмотреть файл

@ -161,8 +161,6 @@ void MessagePumpForUI::PumpOutPendingPaintMessages() {
if (state_->should_quit) // Handle WM_QUIT.
break;
}
// Histogram what was really being used, to help to adjust kMaxPeekCount.
DHISTOGRAM_COUNTS("Loop.PumpOutPendingPaintMessages Peeks", peek_count);
}
//-----------------------------------------------------------------------------

Просмотреть файл

@ -29,3 +29,16 @@ segment_capacity = 12288
[PMessagePort::ReceiveData]
segment_capacity = 12288
#------------------------------------------------------------
# Small-size messages.
#------------------------------------------------------------
[PCompositorBridge::DidComposite]
segment_capacity = 128
[PBrowser::RealMouseMoveEvent]
segment_capacity = 192
[PCompositorBridge::PTextureConstructor]
segment_capacity = 192
[PLayerTransaction::InitReadLocks]
segment_capacity = 256
[PHttpBackgroundChannel::OnStopRequest]
segment_capacity = 192

Просмотреть файл

@ -804,9 +804,16 @@ class GCRuntime
void removeBlackRootsTracer(JSTraceDataOp traceOp, void* data);
bool triggerGCForTooMuchMalloc() {
if (!triggerGC(JS::gcreason::TOO_MUCH_MALLOC))
return false;
// Even though this method may be called off the main thread it is safe
// to access mallocCounter here since triggerGC() will return false in
// that case.
stats().recordTrigger(mallocCounter.bytes(), mallocCounter.maxBytes());
return triggerGC(JS::gcreason::TOO_MUCH_MALLOC);
return true;
}
int32_t getMallocBytes() const { return mallocCounter.bytes(); }
size_t maxMallocBytesAllocated() const { return mallocCounter.maxBytes(); }
bool isTooMuchMalloc() const { return mallocCounter.isTooMuchMalloc(); }
@ -930,7 +937,8 @@ class GCRuntime
void startTask(GCParallelTask& task, gcstats::PhaseKind phase, AutoLockHelperThreadState& locked);
void joinTask(GCParallelTask& task, gcstats::PhaseKind phase, AutoLockHelperThreadState& locked);
private:
// Delete an empty zone group after its contents have been merged.
void deleteEmptyZoneGroup(ZoneGroup* group);
private:
enum IncrementalResult
@ -1089,7 +1097,10 @@ class GCRuntime
UnprotectedData<ZoneGroup*> systemZoneGroup;
// List of all zone groups (protected by the GC lock).
ActiveThreadOrGCTaskData<ZoneGroupVector> groups;
private:
ActiveThreadOrGCTaskData<ZoneGroupVector> groups_;
public:
ZoneGroupVector& groups() { return groups_.ref(); }
// The unique atoms zone, which has no zone group.
WriteOnceData<Zone*> atomsZone;

Просмотреть файл

@ -73,6 +73,8 @@ JS::Zone::Zone(JSRuntime* rt, ZoneGroup* group)
Zone::~Zone()
{
MOZ_ASSERT(compartments_.ref().empty());
JSRuntime* rt = runtimeFromAnyThread();
if (this == rt->gc.systemZone)
rt->gc.systemZone = nullptr;
@ -87,7 +89,8 @@ Zone::~Zone()
#endif
}
bool Zone::init(bool isSystemArg)
bool
Zone::init(bool isSystemArg)
{
isSystem = isSystemArg;
return uniqueIds().init() &&
@ -376,6 +379,21 @@ Zone::addTypeDescrObject(JSContext* cx, HandleObject obj)
return true;
}
void
Zone::deleteEmptyCompartment(JSCompartment* comp)
{
MOZ_ASSERT(comp->zone() == this);
MOZ_ASSERT(arenas.checkEmptyArenaLists());
for (auto& i : compartments()) {
if (i == comp) {
compartments().erase(&i);
comp->destroy(runtimeFromActiveCooperatingThread()->defaultFreeOp());
return;
}
}
MOZ_CRASH("Compartment not found");
}
ZoneList::ZoneList()
: head(nullptr), tail(nullptr)
{}

Просмотреть файл

@ -161,6 +161,7 @@ struct Zone : public JS::shadow::Zone,
explicit Zone(JSRuntime* rt, js::ZoneGroup* group);
~Zone();
MOZ_MUST_USE bool init(bool isSystem);
void destroy(js::FreeOp *fop);
private:
js::ZoneGroup* const group_;
@ -622,6 +623,9 @@ struct Zone : public JS::shadow::Zone,
keepShapeTables_ = b;
}
// Delete an empty compartment after its contents have been merged.
void deleteEmptyCompartment(JSCompartment* comp);
private:
js::ZoneGroupData<js::jit::JitZone*> jitZone_;
@ -654,8 +658,8 @@ class ZoneGroupsIter
public:
explicit ZoneGroupsIter(JSRuntime* rt) : iterMarker(&rt->gc) {
it = rt->gc.groups.ref().begin();
end = rt->gc.groups.ref().end();
it = rt->gc.groups().begin();
end = rt->gc.groups().end();
if (!done() && (*it)->usedByHelperThread)
next();

Просмотреть файл

@ -130,4 +130,20 @@ ZoneGroup::ionLazyLinkListAdd(jit::IonBuilder* builder)
ionLazyLinkListSize_++;
}
void
ZoneGroup::deleteEmptyZone(Zone* zone)
{
MOZ_ASSERT(CurrentThreadCanAccessRuntime(runtime));
MOZ_ASSERT(zone->group() == this);
MOZ_ASSERT(zone->compartments().empty());
for (auto& i : zones()) {
if (i == zone) {
zones().erase(&i);
zone->destroy(runtime->defaultFreeOp());
return;
}
}
MOZ_CRASH("Zone not found");
}
} // namespace js

Просмотреть файл

@ -76,6 +76,9 @@ class ZoneGroup
// See the useExclusiveLocking field above.
void setUseExclusiveLocking() { useExclusiveLocking = true; }
// Delete an empty zone after its contents have been merged.
void deleteEmptyZone(Zone* zone);
#ifdef DEBUG
private:
// The number of possible bailing places encounters before forcefully bailing

Просмотреть файл

@ -0,0 +1,6 @@
if (helperThreadCount() === 0)
quit();
var fe = "vv";
for (i = 0; i < 24; i++) fe += fe;
offThreadCompileScript(fe, {});

Просмотреть файл

@ -863,6 +863,7 @@ struct JSCompartment
~JSCompartment();
MOZ_MUST_USE bool init(JSContext* maybecx);
void destroy(js::FreeOp* fop);
MOZ_MUST_USE inline bool wrap(JSContext* cx, JS::MutableHandleValue vp);

Просмотреть файл

@ -1220,11 +1220,12 @@ GCRuntime::finish()
for (ZonesIter zone(rt, WithAtoms); !zone.done(); zone.next()) {
for (CompartmentsInZoneIter comp(zone); !comp.done(); comp.next())
js_delete(comp.get());
zone->compartments().clear();
js_delete(zone.get());
}
}
groups.ref().clear();
groups().clear();
FreeChunkPool(rt, fullChunks_.ref());
FreeChunkPool(rt, availableChunks_.ref());
@ -3483,6 +3484,25 @@ JS::Zone::sweepUniqueIds(js::FreeOp* fop)
uniqueIds().sweep();
}
void
JSCompartment::destroy(FreeOp* fop)
{
JSRuntime* rt = fop->runtime();
if (auto callback = rt->destroyCompartmentCallback)
callback(fop, this);
if (principals())
JS_DropPrincipals(TlsContext.get(), principals());
fop->delete_(this);
rt->gc.stats().sweptCompartment();
}
void
Zone::destroy(FreeOp* fop)
{
fop->delete_(this);
fop->runtime()->gc.stats().sweptZone();
}
/*
* It's simpler if we preserve the invariant that every zone has at least one
* compartment. If we know we're deleting the entire zone, then
@ -3495,8 +3515,9 @@ JS::Zone::sweepUniqueIds(js::FreeOp* fop)
void
Zone::sweepCompartments(FreeOp* fop, bool keepAtleastOne, bool destroyingRuntime)
{
JSRuntime* rt = runtimeFromActiveCooperatingThread();
JSDestroyCompartmentCallback callback = rt->destroyCompartmentCallback;
MOZ_ASSERT(!compartments().empty());
mozilla::DebugOnly<JSRuntime*> rt = runtimeFromActiveCooperatingThread();
JSCompartment** read = compartments().begin();
JSCompartment** end = compartments().end();
@ -3512,12 +3533,7 @@ Zone::sweepCompartments(FreeOp* fop, bool keepAtleastOne, bool destroyingRuntime
*/
bool dontDelete = read == end && !foundOne && keepAtleastOne;
if ((!comp->marked && !dontDelete) || destroyingRuntime) {
if (callback)
callback(fop, comp);
if (comp->principals())
JS_DropPrincipals(TlsContext.get(), comp->principals());
js_delete(comp);
rt->gc.stats().sweptCompartment();
comp->destroy(fop);
} else {
*write++ = comp;
foundOne = true;
@ -3530,6 +3546,8 @@ Zone::sweepCompartments(FreeOp* fop, bool keepAtleastOne, bool destroyingRuntime
void
GCRuntime::sweepZones(FreeOp* fop, ZoneGroup* group, bool destroyingRuntime)
{
MOZ_ASSERT(!group->zones().empty());
Zone** read = group->zones().begin();
Zone** end = group->zones().end();
Zone** write = read;
@ -3558,8 +3576,7 @@ GCRuntime::sweepZones(FreeOp* fop, ZoneGroup* group, bool destroyingRuntime)
zone->sweepCompartments(fop, false, destroyingRuntime);
MOZ_ASSERT(zone->compartments().empty());
MOZ_ASSERT_IF(arenasEmptyAtShutdown, zone->typeDescrObjects().empty());
fop->delete_(zone);
stats().sweptZone();
zone->destroy(fop);
continue;
}
zone->sweepCompartments(fop, true, destroyingRuntime);
@ -3580,8 +3597,8 @@ GCRuntime::sweepZoneGroups(FreeOp* fop, bool destroyingRuntime)
assertBackgroundSweepingFinished();
ZoneGroup** read = groups.ref().begin();
ZoneGroup** end = groups.ref().end();
ZoneGroup** read = groups().begin();
ZoneGroup** end = groups().end();
ZoneGroup** write = read;
while (read < end) {
@ -3595,7 +3612,7 @@ GCRuntime::sweepZoneGroups(FreeOp* fop, bool destroyingRuntime)
*write++ = group;
}
}
groups.ref().shrinkTo(write - groups.ref().begin());
groups().shrinkTo(write - groups().begin());
}
#ifdef DEBUG
@ -7367,7 +7384,7 @@ js::NewCompartment(JSContext* cx, JSPrincipals* principals,
}
if (groupHolder) {
if (!rt->gc.groups.ref().append(group)) {
if (!rt->gc.groups().append(group)) {
ReportOutOfMemory(cx);
return nullptr;
}
@ -7397,6 +7414,10 @@ gc::MergeCompartments(JSCompartment* source, JSCompartment* target)
MOZ_ASSERT(source->creationOptions().addonIdOrNull() ==
target->creationOptions().addonIdOrNull());
MOZ_ASSERT(!source->hasBeenEntered());
MOZ_ASSERT(source->zone()->compartments().length() == 1);
MOZ_ASSERT(source->zone()->group()->zones().length() == 1);
JSContext* cx = source->runtimeFromActiveCooperatingThread()->activeContextFromOwnThread();
MOZ_ASSERT(!source->zone()->wasGCStarted());
@ -7489,6 +7510,32 @@ gc::MergeCompartments(JSCompartment* source, JSCompartment* target)
source->scriptNameMap->clear();
}
// The source compartment is now completely empty, and is the only
// compartment in its zone, which is the only zone in its group. Delete
// compartment, zone and group without waiting for this to be cleaned up by
// a full GC.
Zone* sourceZone = source->zone();
ZoneGroup* sourceGroup = sourceZone->group();
sourceZone->deleteEmptyCompartment(source);
sourceGroup->deleteEmptyZone(sourceZone);
cx->runtime()->gc.deleteEmptyZoneGroup(sourceGroup);
}
void
GCRuntime::deleteEmptyZoneGroup(ZoneGroup* group)
{
MOZ_ASSERT(group->zones().empty());
MOZ_ASSERT(groups().length() > 1);
for (auto& i : groups()) {
if (i == group) {
groups().erase(&i);
js_delete(group);
return;
}
}
MOZ_CRASH("ZoneGroup not found");
}
void

Просмотреть файл

@ -1598,9 +1598,10 @@ GlobalHelperThreadState::mergeParseTaskCompartment(JSContext* cx, ParseTask* par
JS::AutoAssertNoGC nogc(cx);
LeaveParseTaskZone(cx->runtime(), parseTask);
AutoCompartment ac(cx, parseTask->parseGlobal);
{
AutoCompartment ac(cx, parseTask->parseGlobal);
// Generator functions don't have Function.prototype as prototype but a
// different function object, so the IdentifyStandardPrototype trick
// below won't work. Just special-case it.

Просмотреть файл

@ -35,8 +35,6 @@
#include "nsIXULRuntime.h"
#include "GeckoProfiler.h"
#include "base/histogram.h"
#ifdef ANDROID
#include <android/log.h>
#endif
@ -1225,11 +1223,6 @@ XRE_XPCShellMain(int argc, char** argv, char** envp,
mozilla::LogModule::Init();
// A initializer to initialize histogram collection
// used by telemetry.
auto telStats =
mozilla::MakeUnique<base::StatisticsRecorder>();
char aLocal;
profiler_init(&aLocal);
@ -1559,8 +1552,6 @@ XRE_XPCShellMain(int argc, char** argv, char** envp,
rv = NS_ShutdownXPCOM( nullptr );
MOZ_ASSERT(NS_SUCCEEDED(rv), "NS_ShutdownXPCOM failed");
telStats = nullptr;
#ifdef MOZ_CRASHREPORTER
// Shut down the crashreporter service to prevent leaking some strings it holds.
if (CrashReporter::GetEnabled())

Просмотреть файл

@ -833,13 +833,12 @@ nsCSSRendering::PaintBorderWithStyleBorder(nsPresContext* aPresContext,
nsCSSBorderImageRenderer::CreateBorderImageRenderer(aPresContext, aForFrame, aBorderArea,
aStyleBorder, aDirtyRect, aSkipSides,
irFlags, &result);
if (aStyleBorder.IsBorderImageLoaded()) {
if (renderer) {
result &= renderer->DrawBorderImage(aPresContext, aRenderingContext,
aForFrame, aDirtyRect);
}
return result;
// renderer was created successfully, which means border image is ready to
// be used.
if (renderer) {
MOZ_ASSERT(result == DrawResult::SUCCESS);
return renderer->DrawBorderImage(aPresContext, aRenderingContext,
aForFrame, aDirtyRect);
}
}

Просмотреть файл

@ -100,13 +100,19 @@ Preferences::DirtyCallback()
// ignore it for now.
return;
}
if (gHashTable && sPreferences && !sPreferences->mDirty) {
if (!gHashTable || !sPreferences) {
return;
}
if (sPreferences->mProfileShutdown) {
NS_WARNING("Setting user pref after profile shutdown.");
return;
}
if (!sPreferences->mDirty) {
sPreferences->mDirty = true;
NS_WARNING_ASSERTION(!sPreferences->mProfileShutdown,
"Setting user pref after profile shutdown.");
if (sPreferences->AllowOffMainThreadSave() && !sPreferences->mSavePending) {
if (sPreferences->mCurrentFile &&
sPreferences->AllowOffMainThreadSave()
&& !sPreferences->mSavePending) {
sPreferences->mSavePending = true;
static const int PREF_DELAY_MS = 500;
NS_DelayedDispatchToCurrentThread(
@ -782,15 +788,22 @@ Preferences::Init()
}
// static
nsresult
Preferences::ResetAndReadUserPrefs()
void
Preferences::InitializeUserPrefs()
{
sPreferences->ResetUserPrefs();
MOZ_ASSERT(!sPreferences->mCurrentFile, "Should only initialize prefs once");
nsresult rv = sPreferences->UseDefaultPrefFile();
sPreferences->UseUserPrefFile();
// prefs which are set before we initialize the profile are silently discarded.
// This is stupid, but there are various tests which depend on this behavior.
sPreferences->ResetUserPrefs();
nsCOMPtr<nsIFile> prefsFile = sPreferences->ReadSavedPrefs();
sPreferences->ReadUserOverridePrefs();
sPreferences->mDirty = false;
// Don't set mCurrentFile until we're done so that dirty flags work properly
sPreferences->mCurrentFile = prefsFile.forget();
// Migrate the old prerelease telemetry pref
if (!Preferences::GetBool(kOldTelemetryPref, true)) {
@ -799,7 +812,6 @@ Preferences::ResetAndReadUserPrefs()
}
sPreferences->NotifyServiceObservers(NS_PREFSERVICE_READ_TOPIC_ID);
return rv;
}
NS_IMETHODIMP
@ -1082,18 +1094,16 @@ Preferences::NotifyServiceObservers(const char *aTopic)
return NS_OK;
}
nsresult
Preferences::UseDefaultPrefFile()
already_AddRefed<nsIFile>
Preferences::ReadSavedPrefs()
{
nsCOMPtr<nsIFile> file;
nsresult rv = NS_GetSpecialDirectory(NS_APP_PREFS_50_FILE,
getter_AddRefs(file));
if (NS_WARN_IF(NS_FAILED(rv))) {
return rv;
return nullptr;
}
mCurrentFile = file;
rv = openPrefFile(file);
if (rv == NS_ERROR_FILE_NOT_FOUND) {
// this is a normal case for new users
@ -1107,11 +1117,11 @@ Preferences::UseDefaultPrefFile()
MakeBackupPrefFile(file);
}
return rv;
return file.forget();
}
void
Preferences::UseUserPrefFile()
Preferences::ReadUserOverridePrefs()
{
nsCOMPtr<nsIFile> aFile;
nsresult rv = NS_GetSpecialDirectory(NS_APP_PREFS_50_DIR,

Просмотреть файл

@ -72,9 +72,9 @@ public:
static bool IsServiceAvailable();
/**
* Reset loaded user prefs then read them
* Initialize user prefs from prefs.js/user.js
*/
static nsresult ResetAndReadUserPrefs();
static void InitializeUserPrefs();
/**
* Returns the singleton instance which is addreffed.
@ -436,13 +436,17 @@ protected:
nsresult NotifyServiceObservers(const char *aSubject);
/**
* Reads the default pref file or, if that failed, try to save a new one.
* Loads the prefs.js file from the profile, or creates a new one.
*
* @return NS_OK if either action succeeded,
* or the error code related to the read attempt.
* @return the prefs file if successful, or nullptr on failure.
*/
nsresult UseDefaultPrefFile();
void UseUserPrefFile();
already_AddRefed<nsIFile> ReadSavedPrefs();
/**
* Loads the user.js file from the profile if present.
*/
void ReadUserOverridePrefs();
nsresult MakeBackupPrefFile(nsIFile *aFile);
// Default pref file save can be blocking or not.

Просмотреть файл

@ -789,8 +789,9 @@ nsHttpConnectionMgr::UpdateCoalescingForNewConn(nsHttpConnection *newConn,
MOZ_ASSERT(OnSocketThread(), "not on socket thread");
MOZ_ASSERT(newConn);
MOZ_ASSERT(newConn->ConnectionInfo());
MOZ_ASSERT(ent);
MOZ_DIAGNOSTIC_ASSERT(ent);
MOZ_ASSERT(mCT.GetWeak(newConn->ConnectionInfo()->HashKey()) == ent);
CheckConnEntryMustBeInmCT(ent->mConnInfo);
nsHttpConnection *existingConn = FindCoalescableConnection(ent, true);
if (existingConn) {
@ -1253,6 +1254,7 @@ nsHttpConnectionMgr::AtActiveConnectionLimit(nsConnectionEntry *ent, uint32_t ca
void
nsHttpConnectionMgr::ClosePersistentConnections(nsConnectionEntry *ent)
{
CheckConnEntryMustBeInmCT(ent->mConnInfo);
LOG(("nsHttpConnectionMgr::ClosePersistentConnections [ci=%s]\n",
ent->mConnInfo->HashKey().get()));
while (ent->mIdleConns.Length()) {
@ -1661,6 +1663,7 @@ nsHttpConnectionMgr::DispatchTransaction(nsConnectionEntry *ent,
nsHttpTransaction *trans,
nsHttpConnection *conn)
{
CheckConnEntryMustBeInmCT(ent->mConnInfo);
uint32_t caps = trans->Caps();
int32_t priority = trans->Priority();
nsresult rv;
@ -1756,6 +1759,7 @@ nsHttpConnectionMgr::DispatchAbstractTransaction(nsConnectionEntry *ent,
int32_t priority)
{
MOZ_DIAGNOSTIC_ASSERT(ent);
CheckConnEntryMustBeInmCT(ent->mConnInfo);
nsresult rv;
MOZ_ASSERT(!conn->UsingSpdy(),
"Spdy Must Not Use DispatchAbstractTransaction");
@ -1904,6 +1908,7 @@ void
nsHttpConnectionMgr::AddActiveConn(nsHttpConnection *conn,
nsConnectionEntry *ent)
{
CheckConnEntryMustBeInmCT(ent->mConnInfo);
ent->mActiveConns.AppendElement(conn);
mNumActiveConns++;
ActivateTimeoutTick();
@ -2091,7 +2096,8 @@ nsHttpConnection *
nsHttpConnectionMgr::GetSpdyActiveConn(nsConnectionEntry *ent)
{
MOZ_ASSERT(OnSocketThread(), "not on socket thread");
MOZ_ASSERT(ent);
MOZ_DIAGNOSTIC_ASSERT(ent);
CheckConnEntryMustBeInmCT(ent->mConnInfo);
nsHttpConnection *experienced = nullptr;
nsHttpConnection *noExperience = nullptr;
@ -2413,6 +2419,7 @@ nsHttpConnectionMgr::OnMsgCancelTransaction(int32_t reason, ARefBase *param)
for (uint32_t index = 0;
ent && (index < ent->mActiveConns.Length());
++index) {
CheckConnEntryMustBeInmCT(ent->mConnInfo);
nsHttpConnection *activeConn = ent->mActiveConns[index];
nsAHttpTransaction *liveTransaction = activeConn->Transaction();
if (liveTransaction && liveTransaction->IsNullTransaction()) {
@ -4061,6 +4068,7 @@ nsHalfOpenSocket::OnOutputStreamReady(nsIAsyncOutputStream *out)
MOZ_DIAGNOSTIC_ASSERT(out == mStreamOut || out == mBackupStreamOut,
"stream mismatch");
MOZ_DIAGNOSTIC_ASSERT(mEnt);
gHttpHandler->ConnMgr()->CheckConnEntryMustBeInmCT(mEnt->mConnInfo);
LOG(("nsHalfOpenSocket::OnOutputStreamReady [this=%p ent=%s %s]\n",
this, mEnt->mConnInfo->Origin(),
@ -4119,7 +4127,9 @@ nsHalfOpenSocket::OnOutputStreamReady(nsIAsyncOutputStream *out)
mFastOpenInProgress = false;
mConnectionNegotiatingFastOpen = nullptr;
}
MOZ_DIAGNOSTIC_ASSERT(mEnt);
gHttpHandler->ConnMgr()->CheckConnEntryMustBeInmCT(mEnt->mConnInfo);
nsresult rv = SetupConn(out, false);
if (mEnt) {
mEnt->mDoNotDestroy = false;
@ -4135,6 +4145,8 @@ nsHalfOpenSocket::FastOpenEnabled()
MOZ_DIAGNOSTIC_ASSERT(mEnt);
gHttpHandler->ConnMgr()->CheckConnEntryMustBeInmCT(mEnt->mConnInfo);
if (!mEnt) {
return false;
}
@ -4187,12 +4199,15 @@ nsHalfOpenSocket::StartFastOpen()
MOZ_DIAGNOSTIC_ASSERT(!mBackupTransport);
MOZ_DIAGNOSTIC_ASSERT(mEnt);
gHttpHandler->ConnMgr()->CheckConnEntryMustBeInmCT(mEnt->mConnInfo);
LOG(("nsHalfOpenSocket::StartFastOpen [this=%p]\n",
this));
RefPtr<nsHalfOpenSocket> deleteProtector(this);
mFastOpenInProgress = true;
mEnt->mDoNotDestroy = true;
// Remove this HalfOpen from mEnt->mHalfOpens.
// The new connection will take care of closing this HalfOpen from now on!
if (!mEnt->mHalfOpens.RemoveElement(this)) {
@ -4206,7 +4221,7 @@ nsHalfOpenSocket::StartFastOpen()
Abandon();
return NS_ERROR_ABORT;
}
mEnt->mDoNotDestroy = true;
MOZ_ASSERT(gHttpHandler->ConnMgr()->mNumHalfOpenConns);
if (gHttpHandler->ConnMgr()->mNumHalfOpenConns) { // just in case
gHttpHandler->ConnMgr()->mNumHalfOpenConns--;
@ -4243,6 +4258,9 @@ nsHalfOpenSocket::StartFastOpen()
} else {
LOG(("nsHalfOpenSocket::StartFastOpen [this=%p conn=%p]\n",
this, mConnectionNegotiatingFastOpen.get()));
gHttpHandler->ConnMgr()->CheckConnEntryMustBeInmCT(mEnt->mConnInfo);
mEnt->mHalfOpenFastOpenBackups.AppendElement(this);
// SetupBackupTimer should setup timer which will hold a ref to this
// halfOpen. It will failed only if it cannot create timer. Anyway just
@ -4267,6 +4285,8 @@ nsHalfOpenSocket::SetFastOpenConnected(nsresult aError, bool aWillRetry)
MOZ_DIAGNOSTIC_ASSERT(mFastOpenInProgress);
MOZ_DIAGNOSTIC_ASSERT(mEnt);
gHttpHandler->ConnMgr()->CheckConnEntryMustBeInmCT(mEnt->mConnInfo);
LOG(("nsHalfOpenSocket::SetFastOpenConnected [this=%p conn=%p error=%x]\n",
this, mConnectionNegotiatingFastOpen.get(),
static_cast<uint32_t>(aError)));
@ -4327,6 +4347,7 @@ nsHalfOpenSocket::SetFastOpenConnected(nsresult aError, bool aWillRetry)
// mConnectionNegotiatingFastOpen is going away and halfOpen is taking
// this mSocketTransport so add halfOpen to mEnt and update
// mNumActiveConns.
gHttpHandler->ConnMgr()->CheckConnEntryMustBeInmCT(mEnt->mConnInfo);
mEnt->mHalfOpens.AppendElement(this);
gHttpHandler->ConnMgr()->mNumHalfOpenConns++;
gHttpHandler->ConnMgr()->StartedConnect();
@ -4385,6 +4406,8 @@ nsHalfOpenSocket::CancelFastOpenConnection()
LOG(("nsHalfOpenSocket::CancelFastOpenConnection [this=%p conn=%p]\n",
this, mConnectionNegotiatingFastOpen.get()));
gHttpHandler->ConnMgr()->CheckConnEntryMustBeInmCT(mEnt->mConnInfo);
RefPtr<nsHalfOpenSocket> deleteProtector(this);
mEnt->mHalfOpenFastOpenBackups.RemoveElement(this);
mSocketTransport->SetFastOpenCallback(nullptr);
@ -4594,6 +4617,7 @@ nsHalfOpenSocket::SetupConn(nsIAsyncOutputStream *out,
MOZ_DIAGNOSTIC_ASSERT(static_cast<int32_t>(mEnt->mIdleConns.IndexOf(conn)) == -1);
int32_t idx = mEnt->mActiveConns.IndexOf(conn);
if (NS_SUCCEEDED(rv) && (idx != -1)) {
gHttpHandler->ConnMgr()->CheckConnEntryMustBeInmCT(mEnt->mConnInfo);
mConnectionNegotiatingFastOpen = conn;
} else {
conn->SetFastOpen(false);
@ -5153,5 +5177,17 @@ nsHttpConnectionMgr::MoveToWildCardConnEntry(nsHttpConnectionInfo *specificCI,
}
}
void
nsHttpConnectionMgr::CheckConnEntryMustBeInmCT(nsHttpConnectionInfo *ci)
{
nsConnectionEntry *ent = mCT.GetWeak(ci->HashKey());
MOZ_DIAGNOSTIC_ASSERT(ent);
if (ent->mHowItWasRemoved == nsConnectionEntry::CONN_ENTRY_CLEAR_CONNECTION_HISTORY) {
MOZ_DIAGNOSTIC_ASSERT(false);
} else if (ent->mHowItWasRemoved == nsConnectionEntry::CONN_ENTRY_REMOVED_SHUTDOWN) {
MOZ_DIAGNOSTIC_ASSERT(false);
}
}
} // namespace net
} // namespace mozilla

Просмотреть файл

@ -747,6 +747,9 @@ private:
nsTArray<RefPtr<PendingTransactionInfo>>*
GetTransactionPendingQHelper(nsConnectionEntry *ent, nsAHttpTransaction *trans);
// This is only a diagnostic check end it will e removed soon.
void CheckConnEntryMustBeInmCT(nsHttpConnectionInfo *ci);
};
NS_DEFINE_STATIC_IID_ACCESSOR(nsHttpConnectionMgr::nsHalfOpenSocket, NS_HALFOPENSOCKET_IID)

Просмотреть файл

@ -1149,4 +1149,4 @@ static const TransportSecurityPreload kPublicKeyPinningPreloadList[] = {
static const int32_t kUnknownId = -1;
static const PRTime kPreloadPKPinsExpirationTime = INT64_C(1509293772741000);
static const PRTime kPreloadPKPinsExpirationTime = INT64_C(1509380702655000);

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Разница между файлами не показана из-за своего большого размера Загрузить разницу

1
security/nss/.gitignore поставляемый
Просмотреть файл

@ -18,3 +18,4 @@ GTAGS
fuzz/libFuzzer/*
fuzz/corpus
fuzz/out
.chk

Просмотреть файл

@ -1 +1 @@
825e5d444e99
f212be04f3d0

Просмотреть файл

@ -10,3 +10,4 @@
*/
#error "Do not include this header file."

Просмотреть файл

@ -1,4 +1,4 @@
#!/bin/sh
#!/usr/bin/env bash
set -e
@ -15,18 +15,19 @@ echo "Copy '$COMMIT' from '$REPO' to '$DIR'"
if [ -f $DIR/.git-copy ]; then
CURRENT=$(cat $DIR/.git-copy)
if [ $(echo -n $COMMIT | wc -c) != "40" ]; then
# On the off chance that $COMMIT is a remote head.
ACTUAL=$(git ls-remote $REPO $COMMIT | cut -c 1-40 -)
else
ACTUAL=$COMMIT
fi
if [ CURRENT = ACTUAL ]; then
if [ "$CURRENT" = "$ACTUAL" ]; then
echo "Up to date."
exit
fi
fi
mkdir -p $DIR
git -C $DIR init -q
git init -q $DIR
git -C $DIR fetch -q --depth=1 $REPO $COMMIT:git-copy-tmp
git -C $DIR reset --hard git-copy-tmp
git -C $DIR show-ref HEAD | cut -c 1-40 - > $DIR/.git-copy
git -C $DIR rev-parse --verify HEAD > $DIR/.git-copy
rm -rf $DIR/.git

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -46,8 +46,8 @@
* It's recommend to switch back to 0 after having reached version 98/99.
*/
#define NSS_BUILTINS_LIBRARY_VERSION_MAJOR 2
#define NSS_BUILTINS_LIBRARY_VERSION_MINOR 14
#define NSS_BUILTINS_LIBRARY_VERSION "2.14"
#define NSS_BUILTINS_LIBRARY_VERSION_MINOR 16
#define NSS_BUILTINS_LIBRARY_VERSION "2.16"
/* These version numbers detail the semantic changes to the ckfw engine. */
#define NSS_BUILTINS_HARDWARE_VERSION_MAJOR 1

Просмотреть файл

@ -226,15 +226,16 @@ nssSlot_GetToken(
NSSSlot *slot)
{
NSSToken *rvToken = NULL;
nssSlot_EnterMonitor(slot);
/* Even if a token should be present, check `slot->token` too as it
* might be gone already. This would happen mostly on shutdown. */
if (nssSlot_IsTokenPresent(slot) && slot->token) {
rvToken = nssToken_AddRef(slot->token);
if (nssSlot_IsTokenPresent(slot)) {
/* Even if a token should be present, check `slot->token` too as it
* might be gone already. This would happen mostly on shutdown. */
nssSlot_EnterMonitor(slot);
if (slot->token)
rvToken = nssToken_AddRef(slot->token);
nssSlot_ExitMonitor(slot);
}
nssSlot_ExitMonitor(slot);
return rvToken;
}

Просмотреть файл

@ -2639,6 +2639,11 @@ NSC_SignInit(CK_SESSION_HANDLE hSession,
#define INIT_HMAC_MECH(mmm) \
case CKM_##mmm##_HMAC_GENERAL: \
PORT_Assert(pMechanism->pParameter); \
if (!pMechanism->pParameter) { \
crv = CKR_MECHANISM_PARAM_INVALID; \
break; \
} \
crv = sftk_doHMACInit(context, HASH_Alg##mmm, key, \
*(CK_ULONG *)pMechanism->pParameter); \
break; \
@ -2654,6 +2659,11 @@ NSC_SignInit(CK_SESSION_HANDLE hSession,
INIT_HMAC_MECH(SHA512)
case CKM_SHA_1_HMAC_GENERAL:
PORT_Assert(pMechanism->pParameter);
if (!pMechanism->pParameter) {
crv = CKR_MECHANISM_PARAM_INVALID;
break;
}
crv = sftk_doHMACInit(context, HASH_AlgSHA1, key,
*(CK_ULONG *)pMechanism->pParameter);
break;
@ -2662,10 +2672,20 @@ NSC_SignInit(CK_SESSION_HANDLE hSession,
break;
case CKM_SSL3_MD5_MAC:
PORT_Assert(pMechanism->pParameter);
if (!pMechanism->pParameter) {
crv = CKR_MECHANISM_PARAM_INVALID;
break;
}
crv = sftk_doSSLMACInit(context, SEC_OID_MD5, key,
*(CK_ULONG *)pMechanism->pParameter);
break;
case CKM_SSL3_SHA1_MAC:
PORT_Assert(pMechanism->pParameter);
if (!pMechanism->pParameter) {
crv = CKR_MECHANISM_PARAM_INVALID;
break;
}
crv = sftk_doSSLMACInit(context, SEC_OID_SHA1, key,
*(CK_ULONG *)pMechanism->pParameter);
break;
@ -3314,6 +3334,11 @@ NSC_VerifyInit(CK_SESSION_HANDLE hSession,
INIT_HMAC_MECH(SHA512)
case CKM_SHA_1_HMAC_GENERAL:
PORT_Assert(pMechanism->pParameter);
if (!pMechanism->pParameter) {
crv = CKR_MECHANISM_PARAM_INVALID;
break;
}
crv = sftk_doHMACInit(context, HASH_AlgSHA1, key,
*(CK_ULONG *)pMechanism->pParameter);
break;
@ -3322,10 +3347,20 @@ NSC_VerifyInit(CK_SESSION_HANDLE hSession,
break;
case CKM_SSL3_MD5_MAC:
PORT_Assert(pMechanism->pParameter);
if (!pMechanism->pParameter) {
crv = CKR_MECHANISM_PARAM_INVALID;
break;
}
crv = sftk_doSSLMACInit(context, SEC_OID_MD5, key,
*(CK_ULONG *)pMechanism->pParameter);
break;
case CKM_SSL3_SHA1_MAC:
PORT_Assert(pMechanism->pParameter);
if (!pMechanism->pParameter) {
crv = CKR_MECHANISM_PARAM_INVALID;
break;
}
crv = sftk_doSSLMACInit(context, SEC_OID_SHA1, key,
*(CK_ULONG *)pMechanism->pParameter);
break;

Просмотреть файл

@ -66,7 +66,7 @@ class cfAction(argparse.Action):
for file in files:
with open(os.path.join(dirname, file), "rb") as f:
hash.update(f.read())
chk_file = cwd + "/out/.chk"
chk_file = cwd + "/.chk"
old_chk = ""
new_chk = hash.hexdigest()
if os.path.exists(chk_file):

Просмотреть файл

@ -26,6 +26,7 @@ interop_init()
if [ ! -d "$INTEROP" ]; then
git clone -q https://github.com/mozilla/tls-interop "$INTEROP"
fi
INTEROP=$(cd "$INTEROP";pwd -P)
# We use the BoringSSL keyfiles
BORING=${BORING:=boringssl}
@ -33,6 +34,7 @@ interop_init()
git clone -q https://boringssl.googlesource.com/boringssl "$BORING"
git -C "$BORING" checkout -q ea80f9d5df4c302de391e999395e1c87f9c786b3
fi
BORING=$(cd "$BORING";pwd -P)
SCRIPTNAME="interop.sh"
html_head "interop test"
@ -53,7 +55,7 @@ interop_run()
server=$3
(cd "$INTEROP";
cargo run -- --client ${client} --server ${server} --rootdir ../${BORING}/ssl/test/runner/ --test-cases cases.json) 2>interop-${test_name}.errors | tee interop-${test_name}.log
cargo run -- --client "$client" --server "$server" --rootdir "$BORING"/ssl/test/runner/ --test-cases cases.json) 2>interop-${test_name}.errors | tee interop-${test_name}.log
html_msg "${PIPESTATUS[0]}" 0 "Interop" "Run successfully"
grep -i 'FAILED\|Assertion failure' interop-${test_name}.errors
html_msg $? 1 "Interop" "No failures"
@ -62,7 +64,7 @@ interop_run()
cd "$(dirname "$0")"
SOURCE_DIR="$PWD"/../..
interop_init
NSS_SHIM="${BINDIR}"/nss_bogo_shim
BORING_SHIM="../${BORING}"/build/ssl/test/bssl_shim
NSS_SHIM="$BINDIR"/nss_bogo_shim
BORING_SHIM="$BORING"/build/ssl/test/bssl_shim
interop_run "nss_nss" ${NSS_SHIM} ${NSS_SHIM}
interop_cleanup

Просмотреть файл

@ -410001,7 +410001,7 @@
"testharness"
],
"XMLHttpRequest/event-upload-progress-crossorigin.htm": [
"329b648fb3dc0169c5bf185ad9bb88245e7f889d",
"7a18f690ea1c7679d52ff0fd39ea931650d6b9c5",
"testharness"
],
"XMLHttpRequest/event-upload-progress.htm": [

Просмотреть файл

@ -3016,9 +3016,6 @@
[HTMLIFrameElement interface: attribute allowUserMedia]
expected: FAIL
[HTMLIFrameElement interface: attribute allowPaymentRequest]
expected: FAIL
[Window interface: window must inherit property "oncancel" with the proper type (41)]
expected: FAIL

Просмотреть файл

@ -971,100 +971,3 @@
[iframe.allowUserMedia: IDL set to object "test-valueOf"]
expected: FAIL
[iframe.allowPaymentRequest: typeof IDL attribute]
expected: FAIL
[iframe.allowPaymentRequest: IDL get with DOM attribute unset]
expected: FAIL
[iframe.allowPaymentRequest: setAttribute() to ""]
expected: FAIL
[iframe.allowPaymentRequest: setAttribute() to " foo "]
expected: FAIL
[iframe.allowPaymentRequest: setAttribute() to undefined]
expected: FAIL
[iframe.allowPaymentRequest: setAttribute() to null]
expected: FAIL
[iframe.allowPaymentRequest: setAttribute() to 7]
expected: FAIL
[iframe.allowPaymentRequest: setAttribute() to 1.5]
expected: FAIL
[iframe.allowPaymentRequest: setAttribute() to true]
expected: FAIL
[iframe.allowPaymentRequest: setAttribute() to false]
expected: FAIL
[iframe.allowPaymentRequest: setAttribute() to object "[object Object\]"]
expected: FAIL
[iframe.allowPaymentRequest: setAttribute() to NaN]
expected: FAIL
[iframe.allowPaymentRequest: setAttribute() to Infinity]
expected: FAIL
[iframe.allowPaymentRequest: setAttribute() to -Infinity]
expected: FAIL
[iframe.allowPaymentRequest: setAttribute() to "\\0"]
expected: FAIL
[iframe.allowPaymentRequest: setAttribute() to object "test-toString"]
expected: FAIL
[iframe.allowPaymentRequest: setAttribute() to object "test-valueOf"]
expected: FAIL
[iframe.allowPaymentRequest: setAttribute() to "allowPaymentRequest"]
expected: FAIL
[iframe.allowPaymentRequest: IDL set to ""]
expected: FAIL
[iframe.allowPaymentRequest: IDL set to " foo "]
expected: FAIL
[iframe.allowPaymentRequest: IDL set to undefined]
expected: FAIL
[iframe.allowPaymentRequest: IDL set to null]
expected: FAIL
[iframe.allowPaymentRequest: IDL set to 7]
expected: FAIL
[iframe.allowPaymentRequest: IDL set to 1.5]
expected: FAIL
[iframe.allowPaymentRequest: IDL set to false]
expected: FAIL
[iframe.allowPaymentRequest: IDL set to object "[object Object\]"]
expected: FAIL
[iframe.allowPaymentRequest: IDL set to NaN]
expected: FAIL
[iframe.allowPaymentRequest: IDL set to Infinity]
expected: FAIL
[iframe.allowPaymentRequest: IDL set to -Infinity]
expected: FAIL
[iframe.allowPaymentRequest: IDL set to "\\0"]
expected: FAIL
[iframe.allowPaymentRequest: IDL set to object "test-toString"]
expected: FAIL
[iframe.allowPaymentRequest: IDL set to object "test-valueOf"]
expected: FAIL

Просмотреть файл

@ -4,23 +4,30 @@
<title>XMLHttpRequest: upload progress event for cross-origin requests</title>
<script src="/resources/testharness.js"></script>
<script src="/resources/testharnessreport.js"></script>
<link rel="help" href="https://xhr.spec.whatwg.org/#handler-xhr-onprogress" data-tested-assertations="../.." />
<link rel="help" href="https://xhr.spec.whatwg.org/#the-send()-method" data-tested-assertations="following::*//a[contains(@href,'#make-upload-progress-notifications')] following::ol[1]/li[8]" />
<link rel="help" href="https://xhr.spec.whatwg.org/#dom-xmlhttprequest-upload" data-tested-assertations=".." />
<div id="log"></div>
<script src="/common/get-host-info.sub.js"></script>
<div id="log"></div>
<script>
var test = async_test();
test.step(function() {
var client = new XMLHttpRequest();
client.upload.onprogress = test.step_func(function() {
test.done();
});
client.onload = test.step_func(function() {
assert_unreached("onprogress not called.");
});
client.open("POST", get_host_info().HTTP_REMOTE_ORIGIN + "/XMLHttpRequest/resources/corsenabled.py");
client.send("This is a test string.");
});
const remote = get_host_info().HTTP_REMOTE_ORIGIN + "/XMLHttpRequest/resources/corsenabled.py",
redirect = "resources/redirect.py?code=307&location=" + remote;
[remote, redirect].forEach(url => {
async_test(test => {
const client = new XMLHttpRequest();
client.upload.onprogress = test.step_func_done()
client.onload = test.unreached_func()
client.open("POST", url)
client.send("On time: " + url)
}, "Upload events registered on time (" + url + ")");
});
[remote, redirect].forEach(url => {
async_test(test => {
const client = new XMLHttpRequest();
client.onload = test.step_func_done();
client.open("POST", url);
client.send("Too late: " + url);
client.upload.onloadstart = test.unreached_func(); // registered too late
client.upload.onprogress = test.unreached_func(); // registered too late
}, "Upload events registered too late (" + url + ")");
});
</script>

Просмотреть файл

@ -27,12 +27,12 @@ add_task(async function test_startup() {
let LAUNCH = "OSFILE_WORKER_LAUNCH_MS";
let READY = "OSFILE_WORKER_READY_MS";
let before = Services.telemetry.histogramSnapshots;
let before = Services.telemetry.histogramSnapshots.parent;
// Launch the OS.File worker
await File.getCurrentDirectory();
let after = Services.telemetry.histogramSnapshots;
let after = Services.telemetry.histogramSnapshots.parent;
do_print("Ensuring that we have recorded measures for histograms");
@ -47,13 +47,13 @@ add_task(async function test_startup() {
add_task(async function test_writeAtomic() {
let LABEL = "OSFILE_WRITEATOMIC_JANK_MS";
let before = Services.telemetry.histogramSnapshots;
let before = Services.telemetry.histogramSnapshots.parent;
// Perform a write.
let path = Path.join(Constants.Path.profileDir, "test_osfile_telemetry.tmp");
await File.writeAtomic(path, LABEL, { tmpPath: path + ".tmp" } );
let after = Services.telemetry.histogramSnapshots;
let after = Services.telemetry.histogramSnapshots.parent;
do_check_eq(getCount(after[LABEL]), getCount(before[LABEL]) + 1);
});

Просмотреть файл

@ -858,7 +858,20 @@ TelemetryImpl::SnapshotSubsessionHistograms(bool clearSubsession,
NS_IMETHODIMP
TelemetryImpl::GetKeyedHistogramSnapshots(JSContext *cx, JS::MutableHandle<JS::Value> ret)
{
return TelemetryHistogram::GetKeyedHistogramSnapshots(cx, ret);
return TelemetryHistogram::GetKeyedHistogramSnapshots(cx, ret, false, false);
}
NS_IMETHODIMP
TelemetryImpl::SnapshotSubsessionKeyedHistograms(bool clearSubsession,
JSContext *cx,
JS::MutableHandle<JS::Value> ret)
{
#if !defined(MOZ_WIDGET_ANDROID)
return TelemetryHistogram::GetKeyedHistogramSnapshots(cx, ret, true,
clearSubsession);
#else
return NS_OK;
#endif
}
bool
@ -2416,16 +2429,6 @@ SetProfileDir(nsIFile* aProfD)
sTelemetryIOObserver->AddPath(profDirPath, NS_LITERAL_STRING("{profile}"));
}
void CreateStatisticsRecorder()
{
TelemetryHistogram::CreateStatisticsRecorder();
}
void DestroyStatisticsRecorder()
{
TelemetryHistogram::DestroyStatisticsRecorder();
}
// Scalar API C++ Endpoints
void

Просмотреть файл

@ -45,13 +45,6 @@ enum TimerResolution {
Microsecond
};
/**
* Create and destroy the underlying base::StatisticsRecorder singleton.
* Creation has to be done very early in the startup sequence.
*/
void CreateStatisticsRecorder();
void DestroyStatisticsRecorder();
/**
* Initialize the Telemetry service on the main thread at startup.
*/
@ -150,7 +143,7 @@ void AccumulateCategorical(HistogramID id, const nsCString& label);
void AccumulateTimeDelta(HistogramID id, TimeStamp start, TimeStamp end = TimeStamp::Now());
/**
* Enable/disable recording for this histogram at runtime.
* Enable/disable recording for this histogram in this process at runtime.
* Recording is enabled by default, unless listed at kRecordingInitiallyDisabledIDs[].
* id must be a valid telemetry enum, otherwise an assertion is triggered.
*

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -19,9 +19,6 @@
namespace TelemetryHistogram {
void CreateStatisticsRecorder();
void DestroyStatisticsRecorder();
void InitializeGlobalState(bool canRecordBase, bool canRecordExtended);
void DeInitializeGlobalState();
#ifdef DEBUG
@ -75,7 +72,8 @@ RegisteredKeyedHistograms(uint32_t aDataset, uint32_t *aCount,
char*** aHistograms);
nsresult
GetKeyedHistogramSnapshots(JSContext *cx, JS::MutableHandle<JS::Value> ret);
GetKeyedHistogramSnapshots(JSContext *cx, JS::MutableHandle<JS::Value> ret,
bool subsession, bool clearSubsession);
size_t
GetMapShallowSizesOfExcludingThis(mozilla::MallocSizeOf aMallocSizeOf);

Просмотреть файл

@ -40,13 +40,6 @@ const REASON_TEST_PING = "test-ping";
const REASON_ENVIRONMENT_CHANGE = "environment-change";
const REASON_SHUTDOWN = "shutdown";
const HISTOGRAM_SUFFIXES = {
PARENT: "",
CONTENT: "#content",
GPU: "#gpu",
EXTENSION: "#extension",
};
const ENVIRONMENT_CHANGE_LISTENER = "TelemetrySession::onEnvironmentChange";
const MS_IN_ONE_HOUR = 60 * 60 * 1000;
@ -936,13 +929,11 @@ var Impl = {
: Telemetry.histogramSnapshots;
let ret = {};
for (let name of registered) {
for (let suffix of Object.values(HISTOGRAM_SUFFIXES)) {
if (name + suffix in hls) {
if (!(suffix in ret)) {
ret[suffix] = {};
}
ret[suffix][name] = this.packHistogram(hls[name + suffix]);
for (let [process, histograms] of Object.entries(hls)) {
ret[process] = {};
for (let [name, value] of Object.entries(histograms)) {
if (registered.includes(name)) {
ret[process][name] = this.packHistogram(value);
}
}
}
@ -957,31 +948,24 @@ var Impl = {
// Omit telemetry test histograms outside of tests.
registered = registered.filter(id => !id.startsWith("TELEMETRY_TEST_"));
}
let khs = subsession ? Telemetry.snapshotSubsessionKeyedHistograms(clearSubsession)
: Telemetry.keyedHistogramSnapshots;
let ret = {};
for (let id of registered) {
for (let suffix of Object.values(HISTOGRAM_SUFFIXES)) {
let keyed = Telemetry.getKeyedHistogramById(id + suffix);
let snapshot = null;
if (subsession) {
snapshot = clearSubsession ? keyed.snapshotSubsessionAndClear()
: keyed.subsessionSnapshot();
} else {
snapshot = keyed.snapshot();
}
let keys = Object.keys(snapshot);
if (keys.length == 0) {
// Skip empty keyed histogram.
continue;
}
if (!(suffix in ret)) {
ret[suffix] = {};
}
ret[suffix][id] = {};
for (let key of keys) {
ret[suffix][id][key] = this.packHistogram(snapshot[key]);
for (let [process, histograms] of Object.entries(khs)) {
ret[process] = {};
for (let [name, value] of Object.entries(histograms)) {
if (registered.includes(name)) {
let keys = Object.keys(value);
if (keys.length == 0) {
// Skip empty keyed histogram
continue;
}
ret[process][name] = {};
for (let [key, hgram] of Object.entries(value)) {
ret[process][name][key] = this.packHistogram(hgram);
}
}
}
}
@ -1326,8 +1310,8 @@ var Impl = {
let keyedScalars = protect(() => this.getScalars(isSubsession, clearSubsession, true), {});
let events = protect(() => this.getEvents(isSubsession, clearSubsession))
payloadObj.histograms = histograms[HISTOGRAM_SUFFIXES.PARENT] || {};
payloadObj.keyedHistograms = keyedHistograms[HISTOGRAM_SUFFIXES.PARENT] || {};
payloadObj.histograms = histograms.parent || {};
payloadObj.keyedHistograms = keyedHistograms.parent || {};
payloadObj.processes = {
parent: {
scalars: scalars["parent"] || {},
@ -1337,29 +1321,29 @@ var Impl = {
content: {
scalars: scalars["content"],
keyedScalars: keyedScalars["content"],
histograms: histograms[HISTOGRAM_SUFFIXES.CONTENT],
keyedHistograms: keyedHistograms[HISTOGRAM_SUFFIXES.CONTENT],
histograms: histograms["content"],
keyedHistograms: keyedHistograms["content"],
events: events["content"] || [],
},
extension: {
scalars: scalars["extension"],
keyedScalars: keyedScalars["extension"],
histograms: histograms[HISTOGRAM_SUFFIXES.EXTENSION],
keyedHistograms: keyedHistograms[HISTOGRAM_SUFFIXES.EXTENSION],
histograms: histograms["extension"],
keyedHistograms: keyedHistograms["extension"],
events: events["extension"] || [],
},
};
// Only include the GPU process if we've accumulated data for it.
if (HISTOGRAM_SUFFIXES.GPU in histograms ||
HISTOGRAM_SUFFIXES.GPU in keyedHistograms ||
if ("gpu" in histograms ||
"gpu" in keyedHistograms ||
"gpu" in scalars ||
"gpu" in keyedScalars) {
payloadObj.processes.gpu = {
scalars: scalars["gpu"],
keyedScalars: keyedScalars["gpu"],
histograms: histograms[HISTOGRAM_SUFFIXES.GPU],
keyedHistograms: keyedHistograms[HISTOGRAM_SUFFIXES.GPU],
histograms: histograms["gpu"],
keyedHistograms: keyedHistograms["gpu"],
events: events["gpu"] || [],
};
}

Просмотреть файл

@ -41,7 +41,7 @@ def write_histogram_table(output, histograms):
label_table = []
label_count = 0
print("constexpr HistogramInfo gHistograms[] = {", file=output)
print("constexpr HistogramInfo gHistogramInfos[] = {", file=output)
for histogram in histograms:
name_index = string_table.stringIndex(histogram.name())
exp_index = string_table.stringIndex(histogram.expiration())

Просмотреть файл

@ -52,8 +52,8 @@ interface nsITelemetry : nsISupports
/**
* An object containing a snapshot from all of the currently registered histograms.
* { name1: {data1}, name2:{data2}...}
* An object containing a snapshot from all of the currently registered histograms from all processes.
* { process1: {name1: {data1}, name2:{data2}...} }
* where data is consists of the following properties:
* min - Minimal bucket size
* max - Maximum bucket size
@ -257,12 +257,20 @@ interface nsITelemetry : nsISupports
/*
* An object containing a snapshot from all of the currently registered keyed histograms.
* { name1: {histogramData1}, name2:{histogramData2}...}
* { process1: {name1: {histogramData1}, name2:{histogramData2}...}}
* where the histogramData is as described in histogramSnapshots.
*/
[implicit_jscontext]
readonly attribute jsval keyedHistogramSnapshots;
/**
* Get a snapshot of the internally duplicated subsession keyed histograms.
* @param clear Whether to clear out the subsession histograms after snapshotting.
* @return An object as histogramSnapshots, except this contains the internally duplicated keyed histograms for subsession telemetry.
*/
[implicit_jscontext]
jsval snapshotSubsessionKeyedHistograms([optional] in boolean clear);
/**
* Returns an array whose values are the names of histograms defined
* in Histograms.json.

Просмотреть файл

@ -30,18 +30,6 @@ GetAndClearHistogram(JSContext* cx, nsCOMPtr<nsITelemetry> mTelemetry,
JS::HandleValueArray::empty(), &rval)) << "Cannot clear histogram";
}
void
GetSnapshots(JSContext* cx, nsCOMPtr<nsITelemetry> mTelemetry,
const char* name, JS::MutableHandleValue valueOut, bool is_keyed)
{
JS::RootedValue snapshot(cx);
nsresult rv = is_keyed ? mTelemetry->GetKeyedHistogramSnapshots(cx, &snapshot)
: mTelemetry->GetHistogramSnapshots(cx, &snapshot);
ASSERT_EQ(rv, NS_OK) << "Cannot call histogram snapshots";
valueOut.set(snapshot);
}
void
GetProperty(JSContext* cx, const char* name, JS::HandleValue valueIn,
JS::MutableHandleValue valueOut)
@ -53,6 +41,21 @@ GetProperty(JSContext* cx, const char* name, JS::HandleValue valueIn,
valueOut.set(property);
}
void
GetSnapshots(JSContext* cx, nsCOMPtr<nsITelemetry> mTelemetry,
const char* name, JS::MutableHandleValue valueOut, bool is_keyed)
{
JS::RootedValue snapshots(cx);
nsresult rv = is_keyed ? mTelemetry->GetKeyedHistogramSnapshots(cx, &snapshots)
: mTelemetry->GetHistogramSnapshots(cx, &snapshots);
JS::RootedValue snapshot(cx);
GetProperty(cx, "parent", snapshots, &snapshot);
ASSERT_EQ(rv, NS_OK) << "Cannot call histogram snapshots";
valueOut.set(snapshot);
}
}
TEST_F(TelemetryTestFixture, AccumulateCountHistogram)

Просмотреть файл

@ -72,7 +72,7 @@ function check_histogram(histogram_type, name, min, max, bucket_count) {
for (let i of s.counts) {
do_check_eq(i, 1);
}
var hgrams = Telemetry.histogramSnapshots
var hgrams = Telemetry.histogramSnapshots.parent;
let gh = hgrams[name]
do_check_eq(gh.histogram_type, histogram_type);
@ -114,7 +114,8 @@ function test_instantiate() {
// |add| will not instantiate the histogram.
h.add(1);
let snapshot = h.snapshot();
let subsession = Telemetry.snapshotSubsessionHistograms();
let subsession = Telemetry.snapshotSubsessionHistograms().parent;
Assert.ok(ID in subsession);
Assert.equal(snapshot.sum, subsession[ID].sum,
"Histogram and subsession histogram sum must match.");
// Clear the histogram, so we don't void the assumptions from the other tests.
@ -171,7 +172,7 @@ add_task(async function test_noSerialization() {
// Instantiate the storage for this histogram and make sure it doesn't
// get reflected into JS, as it has no interesting data in it.
Telemetry.getHistogramById("NEWTAB_PAGE_PINNED_SITES_COUNT");
do_check_false("NEWTAB_PAGE_PINNED_SITES_COUNT" in Telemetry.histogramSnapshots);
do_check_false("NEWTAB_PAGE_PINNED_SITES_COUNT" in Telemetry.histogramSnapshots.parent);
});
add_task(async function test_boolean_histogram() {
@ -334,7 +335,7 @@ add_task(async function test_API_return_values() {
for (let returnValue of RETURN_VALUES) {
Assert.strictEqual(returnValue, undefined,
"The function must return undefined.");
"The function must return undefined");
}
});
@ -438,8 +439,14 @@ add_task(async function test_expired_histogram() {
dummy.add(1);
do_check_eq(Telemetry.histogramSnapshots["__expired__"], undefined);
do_check_eq(Telemetry.histogramSnapshots[test_expired_id], undefined);
for (let process of ["main", "content", "gpu", "extension"]) {
if (!(process in Telemetry.histogramSnapshots)) {
do_print("Nothing present for process " + process);
continue;
}
do_check_eq(Telemetry.histogramSnapshots[process]["__expired__"], undefined);
}
do_check_eq(Telemetry.histogramSnapshots.parent[test_expired_id], undefined);
do_check_eq(rh[test_expired_id], undefined);
});
@ -496,7 +503,7 @@ add_task(async function test_keyed_boolean_histogram() {
Assert.deepEqual(h.keys().sort(), testKeys);
Assert.deepEqual(h.snapshot(), testSnapShot);
let allSnapshots = Telemetry.keyedHistogramSnapshots;
let allSnapshots = Telemetry.keyedHistogramSnapshots.parent;
Assert.deepEqual(allSnapshots[KEYED_ID], testSnapShot);
h.clear();
@ -552,7 +559,7 @@ add_task(async function test_keyed_count_histogram() {
Assert.deepEqual(h.keys().sort(), testKeys);
Assert.deepEqual(h.snapshot(), testSnapShot);
let allSnapshots = Telemetry.keyedHistogramSnapshots;
let allSnapshots = Telemetry.keyedHistogramSnapshots.parent;
Assert.deepEqual(allSnapshots[KEYED_ID], testSnapShot);
// Test clearing categorical histogram.
@ -625,7 +632,7 @@ add_task(async function test_keyed_flag_histogram() {
Assert.deepEqual(h.keys().sort(), [KEY]);
Assert.deepEqual(h.snapshot(), testSnapshot);
let allSnapshots = Telemetry.keyedHistogramSnapshots;
let allSnapshots = Telemetry.keyedHistogramSnapshots.parent;
Assert.deepEqual(allSnapshots[KEYED_ID], testSnapshot);
h.clear();
@ -775,7 +782,7 @@ add_task(async function test_histogramSnapshots() {
keyed.add("a", 1);
// Check that keyed histograms are not returned
Assert.ok(!("TELEMETRY_TEST_KEYED_COUNT#a" in Telemetry.histogramSnapshots));
Assert.ok(!("TELEMETRY_TEST_KEYED_COUNT" in Telemetry.histogramSnapshots.parent));
});
add_task(async function test_datasets() {
@ -811,56 +818,57 @@ add_task({
skip_if: () => gIsAndroid
},
function test_subsession() {
const ID = "TELEMETRY_TEST_COUNT";
const COUNT = "TELEMETRY_TEST_COUNT";
const FLAG = "TELEMETRY_TEST_FLAG";
let h = Telemetry.getHistogramById(ID);
let h = Telemetry.getHistogramById(COUNT);
let flag = Telemetry.getHistogramById(FLAG);
// Both original and duplicate should start out the same.
h.clear();
let snapshot = Telemetry.histogramSnapshots;
let subsession = Telemetry.snapshotSubsessionHistograms();
Assert.ok(!(ID in snapshot));
Assert.ok(!(ID in subsession));
let snapshot = Telemetry.histogramSnapshots.parent;
let subsession = Telemetry.snapshotSubsessionHistograms().parent;
Assert.ok(!(COUNT in snapshot));
Assert.ok(!(COUNT in subsession));
// They should instantiate and pick-up the count.
h.add(1);
snapshot = Telemetry.histogramSnapshots;
subsession = Telemetry.snapshotSubsessionHistograms();
Assert.ok(ID in snapshot);
Assert.ok(ID in subsession);
Assert.equal(snapshot[ID].sum, 1);
Assert.equal(subsession[ID].sum, 1);
snapshot = Telemetry.histogramSnapshots.parent;
subsession = Telemetry.snapshotSubsessionHistograms().parent;
Assert.ok(COUNT in snapshot);
Assert.ok(COUNT in subsession);
Assert.equal(snapshot[COUNT].sum, 1);
Assert.equal(subsession[COUNT].sum, 1);
// They should still reset properly.
h.clear();
snapshot = Telemetry.histogramSnapshots;
subsession = Telemetry.snapshotSubsessionHistograms();
Assert.ok(!(ID in snapshot));
Assert.ok(!(ID in subsession));
snapshot = Telemetry.histogramSnapshots.parent;
subsession = Telemetry.snapshotSubsessionHistograms().parent;
Assert.ok(!(COUNT in snapshot));
Assert.ok(!(COUNT in subsession));
// Both should instantiate and pick-up the count.
h.add(1);
snapshot = Telemetry.histogramSnapshots;
subsession = Telemetry.snapshotSubsessionHistograms();
Assert.equal(snapshot[ID].sum, 1);
Assert.equal(subsession[ID].sum, 1);
snapshot = Telemetry.histogramSnapshots.parent;
subsession = Telemetry.snapshotSubsessionHistograms().parent;
Assert.ok(COUNT in snapshot);
Assert.ok(COUNT in subsession);
Assert.equal(snapshot[COUNT].sum, 1);
Assert.equal(subsession[COUNT].sum, 1);
// Check that we are able to only reset the duplicate histogram.
h.clear(true);
snapshot = Telemetry.histogramSnapshots;
subsession = Telemetry.snapshotSubsessionHistograms();
Assert.ok(ID in snapshot);
Assert.ok(ID in subsession);
Assert.equal(snapshot[ID].sum, 1);
Assert.equal(subsession[ID].sum, 0);
snapshot = Telemetry.histogramSnapshots.parent;
subsession = Telemetry.snapshotSubsessionHistograms().parent;
Assert.ok(COUNT in snapshot);
Assert.ok(!(COUNT in subsession));
Assert.equal(snapshot[COUNT].sum, 1);
// Both should register the next count.
h.add(1);
snapshot = Telemetry.histogramSnapshots;
subsession = Telemetry.snapshotSubsessionHistograms();
Assert.equal(snapshot[ID].sum, 2);
Assert.equal(subsession[ID].sum, 1);
snapshot = Telemetry.histogramSnapshots.parent;
subsession = Telemetry.snapshotSubsessionHistograms().parent;
Assert.equal(snapshot[COUNT].sum, 2);
Assert.equal(subsession[COUNT].sum, 1);
// Retrieve a subsession snapshot and pass the flag to
// clear subsession histograms too.
@ -868,27 +876,26 @@ function test_subsession() {
flag.clear();
h.add(1);
flag.add(1);
snapshot = Telemetry.histogramSnapshots;
subsession = Telemetry.snapshotSubsessionHistograms(true);
Assert.ok(ID in snapshot);
Assert.ok(ID in subsession);
snapshot = Telemetry.histogramSnapshots.parent;
subsession = Telemetry.snapshotSubsessionHistograms(true).parent;
Assert.ok(COUNT in snapshot);
Assert.ok(COUNT in subsession);
Assert.ok(FLAG in snapshot);
Assert.ok(FLAG in subsession);
Assert.equal(snapshot[ID].sum, 1);
Assert.equal(subsession[ID].sum, 1);
Assert.equal(snapshot[COUNT].sum, 1);
Assert.equal(subsession[COUNT].sum, 1);
Assert.equal(snapshot[FLAG].sum, 1);
Assert.equal(subsession[FLAG].sum, 1);
// The next subsesssion snapshot should show the histograms
// got reset.
snapshot = Telemetry.histogramSnapshots;
subsession = Telemetry.snapshotSubsessionHistograms();
Assert.ok(ID in snapshot);
Assert.ok(ID in subsession);
snapshot = Telemetry.histogramSnapshots.parent;
subsession = Telemetry.snapshotSubsessionHistograms().parent;
Assert.ok(COUNT in snapshot);
Assert.ok(!(COUNT in subsession));
Assert.ok(FLAG in snapshot);
Assert.ok(FLAG in subsession);
Assert.equal(snapshot[ID].sum, 1);
Assert.equal(subsession[ID].sum, 0);
Assert.equal(snapshot[COUNT].sum, 1);
Assert.equal(snapshot[FLAG].sum, 1);
Assert.equal(subsession[FLAG].sum, 0);
});

Просмотреть файл

@ -779,34 +779,34 @@ add_task(async function test_checkSubsessionHistograms() {
// "classic" histograms. However, histograms can change
// between us collecting the different payloads, so we only
// check for deep equality on known stable histograms.
let checkHistograms = (classic, subsession) => {
for (let id of Object.keys(classic)) {
let checkHistograms = (classic, subsession, message) => {
for (let id of Object.keys(subsession)) {
if (!registeredIds.has(id)) {
continue;
}
Assert.ok(id in subsession);
Assert.ok(id in classic, message + ` (${id})`);
if (stableHistograms.has(id)) {
Assert.deepEqual(classic[id],
subsession[id]);
subsession[id], message);
} else {
Assert.equal(classic[id].histogram_type,
subsession[id].histogram_type);
subsession[id].histogram_type, message);
}
}
};
// Same as above, except for keyed histograms.
let checkKeyedHistograms = (classic, subsession) => {
for (let id of Object.keys(classic)) {
let checkKeyedHistograms = (classic, subsession, message) => {
for (let id of Object.keys(subsession)) {
if (!registeredIds.has(id)) {
continue;
}
Assert.ok(id in subsession);
Assert.ok(id in classic, message);
if (stableKeyedHistograms.has(id)) {
Assert.deepEqual(classic[id],
subsession[id]);
subsession[id], message);
}
}
};
@ -825,8 +825,8 @@ add_task(async function test_checkSubsessionHistograms() {
Assert.ok(!(KEYED_ID in classic.keyedHistograms));
Assert.ok(!(KEYED_ID in subsession.keyedHistograms));
checkHistograms(classic.histograms, subsession.histograms);
checkKeyedHistograms(classic.keyedHistograms, subsession.keyedHistograms);
checkHistograms(classic.histograms, subsession.histograms, "Should start the same");
checkKeyedHistograms(classic.keyedHistograms, subsession.keyedHistograms, "Keyed should start the same");
// Adding values should get picked up in both.
count.add(1);
@ -843,8 +843,8 @@ add_task(async function test_checkSubsessionHistograms() {
Assert.equal(classic.keyedHistograms[KEYED_ID]["a"].sum, 1);
Assert.equal(classic.keyedHistograms[KEYED_ID]["b"].sum, 1);
checkHistograms(classic.histograms, subsession.histograms);
checkKeyedHistograms(classic.keyedHistograms, subsession.keyedHistograms);
checkHistograms(classic.histograms, subsession.histograms, "Added values should be picked up");
checkKeyedHistograms(classic.keyedHistograms, subsession.keyedHistograms, "Added values should be picked up by keyed");
// Values should still reset properly.
count.clear();
@ -857,8 +857,8 @@ add_task(async function test_checkSubsessionHistograms() {
Assert.ok(!(KEYED_ID in classic.keyedHistograms));
Assert.ok(!(KEYED_ID in subsession.keyedHistograms));
checkHistograms(classic.histograms, subsession.histograms);
checkKeyedHistograms(classic.keyedHistograms, subsession.keyedHistograms);
checkHistograms(classic.histograms, subsession.histograms, "Values should reset");
checkKeyedHistograms(classic.keyedHistograms, subsession.keyedHistograms, "Keyed values should reset");
// Adding values should get picked up in both.
count.add(1);
@ -875,8 +875,8 @@ add_task(async function test_checkSubsessionHistograms() {
Assert.equal(classic.keyedHistograms[KEYED_ID]["a"].sum, 1);
Assert.equal(classic.keyedHistograms[KEYED_ID]["b"].sum, 1);
checkHistograms(classic.histograms, subsession.histograms);
checkKeyedHistograms(classic.keyedHistograms, subsession.keyedHistograms);
checkHistograms(classic.histograms, subsession.histograms, "Adding values should be picked up again");
checkKeyedHistograms(classic.keyedHistograms, subsession.keyedHistograms, "Adding values should be picked up by keyed again");
// We should be able to reset only the subsession histograms.
// First check that "snapshot and clear" still returns the old state...
@ -887,8 +887,8 @@ add_task(async function test_checkSubsessionHistograms() {
Assert.equal(subsessionStartDate.toISOString(), expectedDate.toISOString());
subsessionStartDate = new Date(subsession.info.subsessionStartDate);
Assert.equal(subsessionStartDate.toISOString(), expectedDate.toISOString());
checkHistograms(classic.histograms, subsession.histograms);
checkKeyedHistograms(classic.keyedHistograms, subsession.keyedHistograms);
checkHistograms(classic.histograms, subsession.histograms, "Should be able to reset subsession");
checkKeyedHistograms(classic.keyedHistograms, subsession.keyedHistograms, "Should be able to reset subsession keyed");
// ... then check that the next snapshot shows the subsession
// histograms got reset.
@ -896,9 +896,8 @@ add_task(async function test_checkSubsessionHistograms() {
subsession = TelemetrySession.getPayload("environment-change");
Assert.ok(COUNT_ID in classic.histograms);
Assert.ok(COUNT_ID in subsession.histograms);
Assert.ok(!(COUNT_ID in subsession.histograms));
Assert.equal(classic.histograms[COUNT_ID].sum, 1);
Assert.equal(subsession.histograms[COUNT_ID].sum, 0);
Assert.ok(KEYED_ID in classic.keyedHistograms);
Assert.ok(!(KEYED_ID in subsession.keyedHistograms));
@ -1057,7 +1056,7 @@ add_task(async function test_dailyCollection() {
subsessionStartDate = new Date(ping.payload.info.subsessionStartDate);
Assert.equal(subsessionStartDate.toISOString(), expectedDate.toISOString());
Assert.equal(ping.payload.histograms[COUNT_ID].sum, 0);
Assert.ok(!(COUNT_ID in ping.payload.histograms));
Assert.ok(!(KEYED_ID in ping.payload.keyedHistograms));
// Trigger and collect another daily ping, with the histograms being set again.
@ -1261,7 +1260,7 @@ add_task(async function test_environmentChange() {
subsessionStartDate = new Date(ping.payload.info.subsessionStartDate);
Assert.equal(subsessionStartDate.toISOString(), startHour.toISOString());
Assert.equal(ping.payload.histograms[COUNT_ID].sum, 0);
Assert.ok(!(COUNT_ID in ping.payload.histograms));
Assert.ok(!(KEYED_ID in ping.payload.keyedHistograms));
await TelemetryController.testShutdown();

Просмотреть файл

@ -33,7 +33,7 @@ add_task(async function init() {
add_task(async function test_reload() {
do_print("Forging data");
let data = {};
let telemetrySnapshots = Services.telemetry.histogramSnapshots;
let telemetrySnapshots = Services.telemetry.histogramSnapshots.parent;
let i = 0;
for (let k of Object.keys(HISTOGRAMS)) {
let id = HISTOGRAMS[k];
@ -64,7 +64,7 @@ add_task(async function test_reload() {
// Now wait until Telemetry is updated
await wait;
telemetrySnapshots = Services.telemetry.histogramSnapshots;
telemetrySnapshots = Services.telemetry.histogramSnapshots.parent;
for (let k of Object.keys(HISTOGRAMS)) {
let id = HISTOGRAMS[k];
do_print("Testing histogram " + id);

Просмотреть файл

@ -135,7 +135,7 @@ function getDistributionPrefValue(aPrefName) {
}
function getSystemCapabilities() {
return gInstructionSet + "," + getMemoryMB();
return "ISET:" + gInstructionSet + ",MEM:" + getMemoryMB();
}
/**

Просмотреть файл

@ -345,7 +345,7 @@ add_task(async function test_custom() {
// url constructed with %SYSTEM_CAPABILITIES%
add_task(async function test_systemCapabilities() {
let url = URL_PREFIX + "%SYSTEM_CAPABILITIES%/";
let systemCapabilities = getInstructionSet() + "," + getMemoryMB();
let systemCapabilities = "ISET:" + getInstructionSet() + ",MEM:" + getMemoryMB();
Assert.equal(await getResult(url), systemCapabilities,
"the url param for %SYSTEM_CAPABILITIES%" + MSG_SHOULD_EQUAL);
});

Просмотреть файл

@ -240,6 +240,7 @@ skip-if = os == "android"
run-sequentially = Uses hardcoded ports in xpi files.
[test_isDebuggable.js]
[test_legacy.js]
skip-if = appname == "thunderbird"
[test_locale.js]
[test_locked.js]
[test_locked2.js]

Просмотреть файл

@ -3799,7 +3799,6 @@ UpdatePrompt.prototype = {
*/
showUpdateElevationRequired: function UP_showUpdateElevationRequired() {
if (getPref("getBoolPref", PREF_APP_UPDATE_SILENT, false) ||
getPref("getBoolPref", PREF_APP_UPDATE_DOORHANGER, false) ||
this._getAltUpdateWindow()) {
return;
}

Просмотреть файл

@ -4622,15 +4622,6 @@ void XRE_GlibInit()
}
#endif
// Separate stub function to let us specifically suppress it in Valgrind
void
XRE_CreateStatsObject()
{
// Initialize global variables used by histogram collection
// machinery that is used by by Telemetry. Note: is never de-initialised.
Telemetry::CreateStatisticsRecorder();
}
/*
* XRE_main - A class based main entry point used by most platforms.
* Note that on OSX, aAppData->xreDirectory will point to
@ -4643,16 +4634,6 @@ XREMain::XRE_main(int argc, char* argv[], const BootstrapConfig& aConfig)
mozilla::LogModule::Init();
// NB: this must happen after the creation of |ScopedLogging log| since
// ScopedLogging::ScopedLogging calls NS_LogInit, and
// XRE_CreateStatsObject calls Telemetry::CreateStatisticsRecorder,
// and NS_LogInit must be called before Telemetry::CreateStatisticsRecorder.
// NS_LogInit must be called before Telemetry::CreateStatisticsRecorder
// so as to avoid many log messages of the form
// WARNING: XPCOM objects created/destroyed from static ctor/dtor: [..]
// See bug 1279614.
XRE_CreateStatsObject();
#if defined(MOZ_SANDBOX) && defined(XP_LINUX) && !defined(ANDROID)
SandboxInfo::ThreadingCheck();
#endif

Просмотреть файл

@ -80,8 +80,6 @@
#include "GeckoProfiler.h"
#include "mozilla/Telemetry.h"
#if defined(MOZ_SANDBOX) && defined(XP_WIN)
#include "mozilla/sandboxTarget.h"
#include "mozilla/sandboxing/loggingCallbacks.h"
@ -407,14 +405,6 @@ XRE_InitChildProcess(int aArgc,
// NB: This must be called before profiler_init
ScopedLogging logger;
// This is needed by Telemetry to initialize histogram collection.
// NB: This must be called after NS_LogInit().
// NS_LogInit must be called before Telemetry::CreateStatisticsRecorder
// so as to avoid many log messages of the form
// WARNING: XPCOM objects created/destroyed from static ctor/dtor: [..]
// See bug 1279614.
Telemetry::CreateStatisticsRecorder();
mozilla::LogModule::Init();
char aLocal;
@ -728,7 +718,6 @@ XRE_InitChildProcess(int aArgc,
}
#endif
Telemetry::DestroyStatisticsRecorder();
return XRE_DeinitCommandLine();
}

Просмотреть файл

@ -1026,6 +1026,8 @@ nsXREDirProvider::GetDirectory(nsIFile* *aResult)
NS_IMETHODIMP
nsXREDirProvider::DoStartup()
{
nsresult rv;
if (!mProfileNotified) {
nsCOMPtr<nsIObserverService> obsSvc =
mozilla::services::GetObserverService();
@ -1038,8 +1040,7 @@ nsXREDirProvider::DoStartup()
crashes and because we want to begin crash tracking before other code run
from this notification since they may cause crashes.
*/
nsresult rv = mozilla::Preferences::ResetAndReadUserPrefs();
if (NS_FAILED(rv)) NS_WARNING("Failed to setup pref service.");
mozilla::Preferences::InitializeUserPrefs();
bool safeModeNecessary = false;
nsCOMPtr<nsIAppStartup> appStartup (do_GetService(NS_APPSTARTUP_CONTRACTID));