Merge mozilla-central to autoland. a=merge

This commit is contained in:
Cosmin Sabou 2018-07-23 12:32:31 +03:00
Родитель 3554bddce5 e5eb80645b
Коммит 2dbbfa9843
101 изменённых файлов: 31161 добавлений и 221 удалений

Просмотреть файл

@ -11,6 +11,7 @@ DIRS += [
'highlighters',
'inspector',
'object',
'replay',
'targets',
'utils',
'webconsole',

Просмотреть файл

@ -0,0 +1,673 @@
/* -*- indent-tabs-mode: nil; js-indent-level: 2; js-indent-level: 2 -*- */
/* vim: set ft=javascript ts=2 et sw=2 tw=80: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
/* eslint-disable spaced-comment, brace-style, indent-legacy */
// When recording/replaying an execution with Web Replay, Devtools server code
// runs in the middleman process instead of the recording/replaying process the
// code is interested in.
//
// This file defines replay objects analogous to those constructed by the
// C++ Debugger (Debugger, Debugger.Object, etc.), which implement similar
// methods and properties to those C++ objects. These replay objects are
// created in the middleman process, and describe things that exist in the
// recording/replaying process, inspecting them via the RecordReplayControl
// interface.
"use strict";
const RecordReplayControl = require("RecordReplayControl");
///////////////////////////////////////////////////////////////////////////////
// ReplayDebugger
///////////////////////////////////////////////////////////////////////////////
function ReplayDebugger() {
RecordReplayControl.registerReplayDebugger(this);
// All breakpoints (per BreakpointPosition) installed by this debugger.
this._breakpoints = [];
// All ReplayDebuggerFramees that have been created while paused at the
// current position, indexed by their index (zero is the oldest frame, with
// the index increasing for newer frames). These are invalidated when
// unpausing.
this._frames = [];
// All ReplayDebuggerObjects and ReplayDebuggerEnvironments that have been
// created while paused at the current position, indexed by their id. These
// are invalidated when unpausing.
this._objects = [];
// All ReplayDebuggerScripts and ReplayDebuggerScriptSources that have been
// created, indexed by their id. These stay valid even after unpausing.
this._scripts = [];
this._scriptSources = [];
}
// Frame index used to refer to the newest frame in the child process.
const NewestFrameIndex = -1;
ReplayDebugger.prototype = {
/////////////////////////////////////////////////////////
// General methods
/////////////////////////////////////////////////////////
replaying: true,
canRewind: RecordReplayControl.canRewind,
replayResumeBackward() { RecordReplayControl.resume(/* forward = */ false); },
replayResumeForward() { RecordReplayControl.resume(/* forward = */ true); },
replayPause: RecordReplayControl.pause,
addDebuggee() {},
removeAllDebuggees() {},
replayingContent(url) {
return this._sendRequest({ type: "getContent", url });
},
_sendRequest(request) {
const data = RecordReplayControl.sendRequest(request);
//dump("SendRequest: " +
// JSON.stringify(request) + " -> " + JSON.stringify(data) + "\n");
if (data.exception) {
ThrowError(data.exception);
}
return data;
},
_setBreakpoint(handler, position, data) {
const id = RecordReplayControl.setBreakpoint(handler, position);
this._breakpoints.push({id, position, data});
},
_clearMatchingBreakpoints(callback) {
this._breakpoints = this._breakpoints.filter(breakpoint => {
if (callback(breakpoint)) {
RecordReplayControl.clearBreakpoint(breakpoint.id);
return false;
}
return true;
});
},
_searchBreakpoints(callback) {
for (const breakpoint of this._breakpoints) {
const v = callback(breakpoint);
if (v) {
return v;
}
}
return undefined;
},
// This is called on all ReplayDebuggers whenever the child process is about
// to unpause. Clear out all data that is invalidated as a result.
invalidateAfterUnpause() {
this._frames.forEach(frame => {
if (frame) {
frame._invalidate();
}
});
this._frames.length = 0;
this._objects.forEach(obj => obj._invalidate());
this._objects.length = 0;
},
/////////////////////////////////////////////////////////
// Script methods
/////////////////////////////////////////////////////////
_getScript(id) {
if (!id) {
return null;
}
const rv = this._scripts[id];
if (rv) {
return rv;
}
return this._addScript(this._sendRequest({ type: "getScript", id }));
},
_addScript(data) {
if (!this._scripts[data.id]) {
this._scripts[data.id] = new ReplayDebuggerScript(this, data);
}
return this._scripts[data.id];
},
findScripts() {
// Note: Debugger's findScripts() method takes a query argument, which
// we ignore here.
const data = this._sendRequest({ type: "findScripts" });
return data.map(script => this._addScript(script));
},
/////////////////////////////////////////////////////////
// ScriptSource methods
/////////////////////////////////////////////////////////
_getSource(id) {
if (!this._scriptSources[id]) {
const data = this._sendRequest({ type: "getSource", id });
this._scriptSources[id] = new ReplayDebuggerScriptSource(this, data);
}
return this._scriptSources[id];
},
/////////////////////////////////////////////////////////
// Object methods
/////////////////////////////////////////////////////////
_getObject(id) {
if (id && !this._objects[id]) {
const data = this._sendRequest({ type: "getObject", id });
switch (data.kind) {
case "Object":
this._objects[id] = new ReplayDebuggerObject(this, data);
break;
case "Environment":
this._objects[id] = new ReplayDebuggerEnvironment(this, data);
break;
default:
ThrowError("Unknown object kind");
}
}
return this._objects[id];
},
_convertValue(value) {
if (value && typeof value == "object") {
if (value.object) {
return this._getObject(value.object);
} else if (value.special == "undefined") {
return undefined;
} else if (value.special == "NaN") {
return NaN;
} else if (value.special == "Infinity") {
return Infinity;
} else if (value.special == "-Infinity") {
return -Infinity;
}
}
return value;
},
_convertCompletionValue(value) {
if ("return" in value) {
return { return: this._convertValue(value.return) };
}
if ("throw" in value) {
return { throw: this._convertValue(value.throw) };
}
ThrowError("Unexpected completion value");
return null; // For eslint
},
/////////////////////////////////////////////////////////
// Frame methods
/////////////////////////////////////////////////////////
_getFrame(index) {
if (index == NewestFrameIndex) {
if (this._frames.length) {
return this._frames[this._frames.length - 1];
}
} else {
assert(index < this._frames.length);
if (this._frames[index]) {
return this._frames[index];
}
}
const data = this._sendRequest({ type: "getFrame", index });
if (index == NewestFrameIndex) {
if ("index" in data) {
index = data.index;
} else {
// There are no frames on the stack.
return null;
}
// Fill in the older frames.
while (index >= this._frames.length) {
this._frames.push(null);
}
}
this._frames[index] = new ReplayDebuggerFrame(this, data);
return this._frames[index];
},
getNewestFrame() {
return this._getFrame(NewestFrameIndex);
},
get onNewScript() {
return this._searchBreakpoints(({position, data}) => {
return position.kind == "NewScript" ? data : null;
});
},
set onNewScript(handler) {
if (handler) {
this._setBreakpoint(() => {
const script = this._sendRequest({ type: "getNewScript" });
const debugScript = this._addScript(script);
handler.call(this, debugScript);
}, { kind: "NewScript" }, handler);
} else {
this._clearMatchingBreakpoints(({position}) => position.kind == "NewScript");
}
},
get onEnterFrame() {
return this._searchBreakpoints(({position, data}) => {
return position.kind == "EnterFrame" ? data : null;
});
},
set onEnterFrame(handler) {
if (handler) {
this._setBreakpoint(() => handler.call(this, this.getNewestFrame()),
{ kind: "EnterFrame" }, handler);
} else {
this._clearMatchingBreakpoints(({position}) => position.kind == "EnterFrame");
}
},
get replayingOnPopFrame() {
return this._searchBreakpoints(({position, data}) => {
return (position.kind == "OnPop" && !position.script) ? data : null;
});
},
set replayingOnPopFrame(handler) {
if (handler) {
this._setBreakpoint(() => handler.call(this, this.getNewestFrame()),
{ kind: "OnPop" }, handler);
} else {
this._clearMatchingBreakpoints(({position}) => {
return position.kind == "EnterFrame" && !position.script;
});
}
},
clearAllBreakpoints: NYI,
}; // ReplayDebugger.prototype
///////////////////////////////////////////////////////////////////////////////
// ReplayDebuggerScript
///////////////////////////////////////////////////////////////////////////////
function ReplayDebuggerScript(dbg, data) {
this._dbg = dbg;
this._data = data;
}
ReplayDebuggerScript.prototype = {
get displayName() { return this._data.displayName; },
get url() { return this._data.url; },
get startLine() { return this._data.startLine; },
get lineCount() { return this._data.lineCount; },
get source() { return this._dbg._getSource(this._data.sourceId); },
get sourceStart() { return this._data.sourceStart; },
get sourceLength() { return this._data.sourceLength; },
_forward(type, value) {
return this._dbg._sendRequest({ type, id: this._data.id, value });
},
getLineOffsets(line) { return this._forward("getLineOffsets", line); },
getOffsetLocation(pc) { return this._forward("getOffsetLocation", pc); },
getSuccessorOffsets(pc) { return this._forward("getSuccessorOffsets", pc); },
getPredecessorOffsets(pc) { return this._forward("getPredecessorOffsets", pc); },
setBreakpoint(offset, handler) {
this._dbg._setBreakpoint(() => { handler.hit(this._dbg.getNewestFrame()); },
{ kind: "Break", script: this._data.id, offset },
handler);
},
clearBreakpoint(handler) {
this._dbg._clearMatchingBreakpoints(({position, data}) => {
return position.script == this._data.id && handler == data;
});
},
get isGeneratorFunction() { NYI(); },
get isAsyncFunction() { NYI(); },
get format() { NYI(); },
getChildScripts: NYI,
getAllOffsets: NYI,
getAllColumnOffsets: NYI,
getBreakpoints: NYI,
clearAllBreakpoints: NYI,
isInCatchScope: NYI,
};
///////////////////////////////////////////////////////////////////////////////
// ReplayDebuggerScriptSource
///////////////////////////////////////////////////////////////////////////////
function ReplayDebuggerScriptSource(dbg, data) {
this._dbg = dbg;
this._data = data;
}
ReplayDebuggerScriptSource.prototype = {
get text() { return this._data.text; },
get url() { return this._data.url; },
get displayURL() { return this._data.displayURL; },
get elementAttributeName() { return this._data.elementAttributeName; },
get introductionOffset() { return this._data.introductionOffset; },
get introductionType() { return this._data.introductionType; },
get sourceMapURL() { return this._data.sourceMapURL; },
get element() { return null; },
get introductionScript() {
return this._dbg._getScript(this._data.introductionScript);
},
get binary() { NYI(); },
};
///////////////////////////////////////////////////////////////////////////////
// ReplayDebuggerFrame
///////////////////////////////////////////////////////////////////////////////
function ReplayDebuggerFrame(dbg, data) {
this._dbg = dbg;
this._data = data;
if (this._data.arguments) {
this._data.arguments =
this._data.arguments.map(this._dbg._convertValue.bind(this._dbg));
}
}
ReplayDebuggerFrame.prototype = {
_invalidate() {
this._data = null;
},
get type() { return this._data.type; },
get callee() { return this._dbg._getObject(this._data.callee); },
get environment() { return this._dbg._getObject(this._data.environment); },
get generator() { return this._data.generator; },
get constructing() { return this._data.constructing; },
get this() { return this._dbg._convertValue(this._data.this); },
get script() { return this._dbg._getScript(this._data.script); },
get offset() { return this._data.offset; },
get arguments() { return this._data.arguments; },
get live() { return true; },
eval(text, options) {
const rv = this._dbg._sendRequest({ type: "frameEvaluate",
index: this._data.index, text, options });
return this._dbg._convertCompletionValue(rv);
},
_positionMatches(position, kind) {
return position.kind == kind
&& position.script == this._data.script
&& position.frameIndex == this._data.index;
},
get onStep() {
return this._dbg._searchBreakpoints(({position, data}) => {
return this._positionMatches(position, "OnStep") ? data : null;
});
},
set onStep(handler) {
if (handler) {
// Use setReplayingOnStep instead.
NotAllowed();
}
this._clearOnStepBreakpoints();
},
_clearOnStepBreakpoints() {
this._dbg._clearMatchingBreakpoints(
({position}) => this._positionMatches(position, "OnStep")
);
},
setReplayingOnStep(handler, offsets) {
this._clearOnStepBreakpoints();
offsets.forEach(offset => {
this._dbg._setBreakpoint(
() => handler.call(this._dbg.getNewestFrame()),
{ kind: "OnStep",
script: this._data.script,
offset,
frameIndex: this._data.index },
handler);
});
},
get onPop() {
return this._dbg._searchBreakpoints(({position, data}) => {
return this._positionMatches(position, "OnPop");
});
},
set onPop(handler) {
if (handler) {
this._dbg._setBreakpoint(() => {
const result = this._dbg._sendRequest({ type: "popFrameResult" });
handler.call(this._dbg.getNewestFrame(),
this._dbg._convertCompletionValue(result));
},
{ kind: "OnPop", script: this._data.script, frameIndex: this._data.index },
handler);
} else {
this._dbg._clearMatchingBreakpoints(
({position}) => this._positionMatches(position, "OnPop")
);
}
},
get older() {
if (this._data.index == 0) {
// This is the oldest frame.
return null;
}
return this._dbg._getFrame(this._data.index - 1);
},
get implementation() { NYI(); },
evalWithBindings: NYI,
};
///////////////////////////////////////////////////////////////////////////////
// ReplayDebuggerObject
///////////////////////////////////////////////////////////////////////////////
function ReplayDebuggerObject(dbg, data) {
this._dbg = dbg;
this._data = data;
this._properties = null;
}
ReplayDebuggerObject.prototype = {
_invalidate() {
this._data = null;
this._properties = null;
},
get callable() { return this._data.callable; },
get isBoundFunction() { return this._data.isBoundFunction; },
get isArrowFunction() { return this._data.isArrowFunction; },
get isGeneratorFunction() { return this._data.isGeneratorFunction; },
get isAsyncFunction() { return this._data.isAsyncFunction; },
get proto() { return this._dbg._getObject(this._data.proto); },
get class() { return this._data.class; },
get name() { return this._data.name; },
get displayName() { return this._data.displayName; },
get parameterNames() { return this._data.parameterNames; },
get script() { return this._dbg._getScript(this._data.script); },
get environment() { return this._dbg._getObject(this._data.environment); },
get boundTargetFunction() { return this.isBoundFunction ? NYI() : undefined; },
get boundThis() { return this.isBoundFunction ? NYI() : undefined; },
get boundArguments() { return this.isBoundFunction ? NYI() : undefined; },
get global() { return this._dbg._getObject(this._data.global); },
get isProxy() { return this._data.isProxy; },
isExtensible() { return this._data.isExtensible; },
isSealed() { return this._data.isSealed; },
isFrozen() { return this._data.isFrozen; },
unwrap() { return this.isProxy ? NYI() : this; },
unsafeDereference() {
// Direct access to the referent is not currently available.
return null;
},
getOwnPropertyNames() {
this._ensureProperties();
return Object.keys(this._properties);
},
getOwnPropertySymbols() {
// Symbol properties are not handled yet.
return [];
},
getOwnPropertyDescriptor(name) {
this._ensureProperties();
return this._properties[name];
},
_ensureProperties() {
if (!this._properties) {
const properties = this._dbg._sendRequest({
type: "getObjectProperties",
id: this._data.id
});
this._properties = {};
properties.forEach(({name, desc}) => {
if ("value" in desc) {
desc.value = this._dbg._convertValue(desc.value);
}
if ("get" in desc) {
desc.get = this._dbg._getObject(desc.get);
}
if ("set" in desc) {
desc.set = this._dbg._getObject(desc.set);
}
this._properties[name] = desc;
});
}
},
get allocationSite() { NYI(); },
get errorMessageName() { NYI(); },
get errorNotes() { NYI(); },
get errorLineNumber() { NYI(); },
get errorColumnNumber() { NYI(); },
get proxyTarget() { NYI(); },
get proxyHandler() { NYI(); },
get isPromise() { NYI(); },
call: NYI,
apply: NYI,
asEnvironment: NYI,
executeInGlobal: NYI,
executeInGlobalWithBindings: NYI,
makeDebuggeeValue: NYI,
preventExtensions: NotAllowed,
seal: NotAllowed,
freeze: NotAllowed,
defineProperty: NotAllowed,
defineProperties: NotAllowed,
deleteProperty: NotAllowed,
forceLexicalInitializationByName: NotAllowed,
};
///////////////////////////////////////////////////////////////////////////////
// ReplayDebuggerEnvironment
///////////////////////////////////////////////////////////////////////////////
function ReplayDebuggerEnvironment(dbg, data) {
this._dbg = dbg;
this._data = data;
this._names = null;
}
ReplayDebuggerEnvironment.prototype = {
_invalidate() {
this._data = null;
this._names = null;
},
get type() { return this._data.type; },
get parent() { return this._dbg._getObject(this._data.parent); },
get object() { return this._dbg._getObject(this._data.object); },
get callee() { return this._dbg._getObject(this._data.callee); },
get optimizedOut() { return this._data.optimizedOut; },
_ensureNames() {
if (!this._names) {
const names =
this._dbg._sendRequest({ type: "getEnvironmentNames", id: this._data.id });
this._names = {};
names.forEach(({ name, value }) => {
this._names[name] = this._dbg._convertValue(value);
});
}
},
names() {
this._ensureNames();
return Object.keys(this._names);
},
getVariable(name) {
this._ensureNames();
return this._names[name];
},
get inspectable() {
// All ReplayDebugger environments are inspectable, as all compartments in
// the replayed process are considered to be debuggees.
return true;
},
find: NYI,
setVariable: NotAllowed,
};
///////////////////////////////////////////////////////////////////////////////
// Utilities
///////////////////////////////////////////////////////////////////////////////
function NYI() {
ThrowError("Not yet implemented");
}
function NotAllowed() {
ThrowError("Not allowed");
}
function ThrowError(msg)
{
const error = new Error(msg);
dump("ReplayDebugger Server Error: " + msg + " Stack: " + error.stack + "\n");
throw error;
}
function assert(v) {
if (!v) {
throw new Error("Assertion Failed!");
}
}
module.exports = ReplayDebugger;

Просмотреть файл

@ -0,0 +1,67 @@
/* -*- indent-tabs-mode: nil; js-indent-level: 2; js-indent-level: 2 -*- */
/* vim: set ft=javascript ts=2 et sw=2 tw=80: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
// This module defines the routines used when updating the graphics shown by a
// middleman process tab. Middleman processes have their own window/document
// which are connected to the compositor in the UI process in the usual way.
// We need to update the contents of the document to draw the raw graphics data
// provided by the child process.
"use strict";
ChromeUtils.import("resource://gre/modules/Services.jsm");
function updateWindow(window, buffer, width, height) {
// Make sure the window has a canvas filling the screen.
let canvas = window.middlemanCanvas;
if (!canvas) {
canvas = window.document.createElement("canvas");
window.document.body.style.margin = "0px";
window.document.body.insertBefore(canvas, window.document.body.firstChild);
window.middlemanCanvas = canvas;
}
canvas.width = width;
canvas.height = height;
// If there is a scale for this window, then the graphics will already have
// been scaled in the child process. To avoid scaling the graphics twice,
// transform the canvas to undo the scaling.
const scale = window.devicePixelRatio;
if (scale != 1) {
canvas.style.transform =
`scale(${ 1 / scale }) translate(-${ width / scale }px, -${ height / scale }px)`;
}
const graphicsData = new Uint8Array(buffer);
const imageData = canvas.getContext("2d").getImageData(0, 0, width, height);
imageData.data.set(graphicsData);
canvas.getContext("2d").putImageData(imageData, 0, 0);
// Make recording/replaying tabs easier to differentiate from other tabs.
window.document.title = "RECORD/REPLAY";
}
// Entry point for when we have some new graphics data from the child process
// to draw.
// eslint-disable-next-line no-unused-vars
function Update(buffer, width, height) {
try {
// Paint to all windows we can find. Hopefully there is only one.
const windowEnumerator = Services.ww.getWindowEnumerator();
while (windowEnumerator.hasMoreElements()) {
const window = windowEnumerator.getNext().QueryInterface(Ci.nsIDOMWindow);
updateWindow(window, buffer, width, height);
}
} catch (e) {
dump("Middleman Graphics Update Exception: " + e + "\n");
}
}
// eslint-disable-next-line no-unused-vars
var EXPORTED_SYMBOLS = [
"Update",
];

Просмотреть файл

@ -0,0 +1,11 @@
# -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*-
# vim: set filetype=python:
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
DevToolsModules(
'debugger.js',
'graphics.js',
'replay.js',
)

Просмотреть файл

@ -0,0 +1,598 @@
/* -*- indent-tabs-mode: nil; js-indent-level: 2; js-indent-level: 2 -*- */
/* vim: set ft=javascript ts=2 et sw=2 tw=80: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
/* eslint-disable spaced-comment, brace-style, indent-legacy */
// This file defines the logic that runs in the record/replay devtools sandbox.
// This code is loaded into all recording/replaying processes, and responds to
// requests and other instructions from the middleman via the exported symbols
// defined at the end of this file.
//
// Like all other JavaScript in the recording/replaying process, this code's
// state is included in memory snapshots and reset when checkpoints are
// restored. In the process of handling the middleman's requests, however, its
// state may vary between recording and replaying, or between different
// replays. As a result, we have to be very careful about performing operations
// that might interact with the recording --- any time we enter the debuggee
// and evaluate code or perform other operations.
// The RecordReplayControl.maybeDivergeFromRecording function should be used at
// any point where such interactions might occur.
// eslint-disable spaced-comment
"use strict";
const CC = Components.Constructor;
// Create a sandbox with the resources we need. require() doesn't work here.
const sandbox = Cu.Sandbox(CC("@mozilla.org/systemprincipal;1", "nsIPrincipal")());
Cu.evalInSandbox(
"Components.utils.import('resource://gre/modules/jsdebugger.jsm');" +
"addDebuggerToGlobal(this);",
sandbox
);
const Debugger = sandbox.Debugger;
const RecordReplayControl = sandbox.RecordReplayControl;
const dbg = new Debugger();
// We are interested in debugging all globals in the process.
dbg.onNewGlobalObject = function(global) {
dbg.addDebuggee(global);
};
///////////////////////////////////////////////////////////////////////////////
// Utilities
///////////////////////////////////////////////////////////////////////////////
function assert(v) {
if (!v) {
RecordReplayControl.dump("Assertion Failed: " + (new Error()).stack + "\n");
throw new Error("Assertion Failed!");
}
}
// Bidirectional map between objects and IDs.
function IdMap() {
this._idToObject = [ undefined ];
this._objectToId = new Map();
}
IdMap.prototype = {
add(object) {
assert(object && !this._objectToId.has(object));
const id = this._idToObject.length;
this._idToObject.push(object);
this._objectToId.set(object, id);
return id;
},
getId(object) {
const id = this._objectToId.get(object);
return (id === undefined) ? 0 : id;
},
getObject(id) {
return this._idToObject[id];
},
forEach(callback) {
for (let i = 1; i < this._idToObject.length; i++) {
callback(i, this._idToObject[i]);
}
},
lastId() {
return this._idToObject.length - 1;
},
};
function countScriptFrames() {
let count = 0;
let frame = dbg.getNewestFrame();
while (frame) {
if (considerScript(frame.script)) {
count++;
}
frame = frame.older;
}
return count;
}
function scriptFrameForIndex(index) {
let indexFromTop = countScriptFrames() - 1 - index;
let frame = dbg.getNewestFrame();
while (true) {
if (considerScript(frame.script)) {
if (indexFromTop-- == 0) {
break;
}
}
frame = frame.older;
}
return frame;
}
///////////////////////////////////////////////////////////////////////////////
// Persistent State
///////////////////////////////////////////////////////////////////////////////
// Association between Debugger.Scripts and their IDs. The indices that this
// table assigns to scripts are stable across the entire recording, even though
// this table (like all JS state) is included in snapshots, rolled back when
// rewinding, and so forth. In debuggee time, this table only grows (there is
// no way to remove entries). Scripts created for debugger activity (e.g. eval)
// are ignored, and off thread compilation is disabled, so this table acquires
// the same scripts in the same order as we roll back and run forward in the
// recording.
const gScripts = new IdMap();
function addScript(script) {
gScripts.add(script);
script.getChildScripts().forEach(addScript);
}
// Association between Debugger.ScriptSources and their IDs. As for gScripts,
// the indices assigned to a script source are consistent across all replays
// and rewinding.
const gScriptSources = new IdMap();
function addScriptSource(source) {
gScriptSources.add(source);
}
function considerScript(script) {
return script.url
&& !script.url.startsWith("resource:")
&& !script.url.startsWith("chrome:");
}
dbg.onNewScript = function(script) {
if (RecordReplayControl.areThreadEventsDisallowed()) {
// This script is part of an eval on behalf of the debugger.
return;
}
if (!considerScript(script)) {
return;
}
addScript(script);
addScriptSource(script.source);
// Each onNewScript call advances the progress counter, to preserve the
// ProgressCounter invariant when onNewScript is called multiple times
// without executing any scripts.
RecordReplayControl.advanceProgressCounter();
if (gHasNewScriptHandler) {
RecordReplayControl.positionHit({ kind: "NewScript" });
}
// Check in case any handlers we need to install are on the scripts just
// created.
installPendingHandlers();
};
///////////////////////////////////////////////////////////////////////////////
// Position Handler State
///////////////////////////////////////////////////////////////////////////////
// Whether there is a position handler for NewScript.
let gHasNewScriptHandler = false;
// Whether there is a position handler for EnterFrame.
let gHasEnterFrameHandler = false;
// Handlers we tried to install but couldn't due to a script not existing.
// Breakpoints requested by the middleman --- which are preserved when
// restoring earlier checkpoints --- identify target scripts by their stable ID
// in gScripts. This array holds the breakpoints for scripts whose IDs we know
// but which have not been created yet.
const gPendingPcHandlers = [];
// Script/offset pairs where we have installed a breakpoint handler. We have to
// avoid installing duplicate handlers here because they will both be called.
const gInstalledPcHandlers = [];
// Callbacks to test whether a frame should have an OnPop handler.
const gOnPopFilters = [];
// eslint-disable-next-line no-unused-vars
function ClearPositionHandlers() {
dbg.clearAllBreakpoints();
dbg.onEnterFrame = undefined;
gHasNewScriptHandler = false;
gHasEnterFrameHandler = false;
gPendingPcHandlers.length = 0;
gInstalledPcHandlers.length = 0;
gOnPopFilters.length = 0;
}
function installPendingHandlers() {
const pending = gPendingPcHandlers.map(position => position);
gPendingPcHandlers.length = 0;
pending.forEach(EnsurePositionHandler);
}
// The completion state of any frame that is being popped.
let gPopFrameResult = null;
function onPopFrame(completion) {
gPopFrameResult = completion;
RecordReplayControl.positionHit({
kind: "OnPop",
script: gScripts.getId(this.script),
frameIndex: countScriptFrames() - 1,
});
gPopFrameResult = null;
}
function onEnterFrame(frame) {
if (gHasEnterFrameHandler) {
RecordReplayControl.positionHit({ kind: "EnterFrame" });
}
if (considerScript(frame.script)) {
gOnPopFilters.forEach(filter => {
if (filter(frame)) {
frame.onPop = onPopFrame;
}
});
}
}
function addOnPopFilter(filter) {
let frame = dbg.getNewestFrame();
while (frame) {
if (considerScript(frame.script) && filter(frame)) {
frame.onPop = onPopFrame;
}
frame = frame.older;
}
gOnPopFilters.push(filter);
dbg.onEnterFrame = onEnterFrame;
}
function EnsurePositionHandler(position) {
switch (position.kind) {
case "Break":
case "OnStep":
let debugScript;
if (position.script) {
debugScript = gScripts.getObject(position.script);
if (!debugScript) {
// The script referred to in this position does not exist yet, so we
// can't install a handler for it. Add a pending handler so that we
// can install the handler once the script is created.
gPendingPcHandlers.push(position);
return;
}
}
const match = function({script, offset}) {
return script == position.script && offset == position.offset;
};
if (gInstalledPcHandlers.some(match)) {
return;
}
gInstalledPcHandlers.push({ script: position.script, offset: position.offset });
debugScript.setBreakpoint(position.offset, {
hit() {
RecordReplayControl.positionHit({
kind: "OnStep",
script: position.script,
offset: position.offset,
frameIndex: countScriptFrames() - 1,
});
}
});
break;
case "OnPop":
if (position.script) {
addOnPopFilter(frame => gScripts.getId(frame.script) == position.script);
} else {
addOnPopFilter(frame => true);
}
break;
case "EnterFrame":
gHasEnterFrameHandler = true;
dbg.onEnterFrame = onEnterFrame;
break;
case "NewScript":
gHasNewScriptHandler = true;
break;
}
}
// eslint-disable-next-line no-unused-vars
function GetEntryPosition(position) {
if (position.kind == "Break" || position.kind == "OnStep") {
const script = gScripts.getObject(position.script);
if (script) {
return {
kind: "Break",
script: position.script,
offset: script.mainOffset,
};
}
}
return null;
}
///////////////////////////////////////////////////////////////////////////////
// Paused State
///////////////////////////////////////////////////////////////////////////////
let gPausedObjects = new IdMap();
function getObjectId(obj) {
const id = gPausedObjects.getId(obj);
if (!id && obj) {
assert((obj instanceof Debugger.Object) ||
(obj instanceof Debugger.Environment));
return gPausedObjects.add(obj);
}
return id;
}
function convertValue(value) {
if (value instanceof Debugger.Object) {
return { object: getObjectId(value) };
}
if (value === undefined) {
return { special: "undefined" };
}
if (value !== value) { // eslint-disable-line no-self-compare
return { special: "NaN" };
}
if (value == Infinity) {
return { special: "Infinity" };
}
if (value == -Infinity) {
return { special: "-Infinity" };
}
return value;
}
function convertCompletionValue(value) {
if ("return" in value) {
return { return: convertValue(value.return) };
}
if ("throw" in value) {
return { throw: convertValue(value.throw) };
}
throw new Error("Unexpected completion value");
}
// eslint-disable-next-line no-unused-vars
function ClearPausedState() {
gPausedObjects = new IdMap();
}
///////////////////////////////////////////////////////////////////////////////
// Handler Helpers
///////////////////////////////////////////////////////////////////////////////
function getScriptData(id) {
const script = gScripts.getObject(id);
return {
id,
sourceId: gScriptSources.getId(script.source),
startLine: script.startLine,
lineCount: script.lineCount,
sourceStart: script.sourceStart,
sourceLength: script.sourceLength,
displayName: script.displayName,
url: script.url,
};
}
function forwardToScript(name) {
return request => gScripts.getObject(request.id)[name](request.value);
}
///////////////////////////////////////////////////////////////////////////////
// Handlers
///////////////////////////////////////////////////////////////////////////////
const gRequestHandlers = {
findScripts(request) {
const rv = [];
gScripts.forEach((id) => {
rv.push(getScriptData(id));
});
return rv;
},
getScript(request) {
return getScriptData(request.id);
},
getNewScript(request) {
return getScriptData(gScripts.lastId());
},
getContent(request) {
return RecordReplayControl.getContent(request.url);
},
getSource(request) {
const source = gScriptSources.getObject(request.id);
const introductionScript = gScripts.getId(source.introductionScript);
return {
id: request.id,
text: source.text,
url: source.url,
displayURL: source.displayURL,
elementAttributeName: source.elementAttributeName,
introductionScript,
introductionOffset: introductionScript ? source.introductionOffset : undefined,
introductionType: source.introductionType,
sourceMapURL: source.sourceMapURL,
};
},
getObject(request) {
const object = gPausedObjects.getObject(request.id);
if (object instanceof Debugger.Object) {
return {
id: request.id,
kind: "Object",
callable: object.callable,
isBoundFunction: object.isBoundFunction,
isArrowFunction: object.isArrowFunction,
isGeneratorFunction: object.isGeneratorFunction,
isAsyncFunction: object.isAsyncFunction,
proto: getObjectId(object.proto),
class: object.class,
name: object.name,
displayName: object.displayName,
parameterNames: object.parameterNames,
script: gScripts.getId(object.script),
environment: getObjectId(object.environment),
global: getObjectId(object.global),
isProxy: object.isProxy,
isExtensible: object.isExtensible(),
isSealed: object.isSealed(),
isFrozen: object.isFrozen(),
};
}
if (object instanceof Debugger.Environment) {
return {
id: request.id,
kind: "Environment",
type: object.type,
parent: getObjectId(object.parent),
object: object.type == "declarative" ? 0 : getObjectId(object.object),
callee: getObjectId(object.callee),
optimizedOut: object.optimizedOut,
};
}
throw new Error("Unknown object kind");
},
getObjectProperties(request) {
if (!RecordReplayControl.maybeDivergeFromRecording()) {
return [{
name: "Unknown properties",
desc: {
value: "Recording divergence in getObjectProperties",
enumerable: true
},
}];
}
const object = gPausedObjects.getObject(request.id);
const names = object.getOwnPropertyNames();
return names.map(name => {
const desc = object.getOwnPropertyDescriptor(name);
if ("value" in desc) {
desc.value = convertValue(desc.value);
}
if ("get" in desc) {
desc.get = getObjectId(desc.get);
}
if ("set" in desc) {
desc.set = getObjectId(desc.set);
}
return { name, desc };
});
},
getEnvironmentNames(request) {
if (!RecordReplayControl.maybeDivergeFromRecording()) {
return [{name: "Unknown names",
value: "Recording divergence in getEnvironmentNames" }];
}
const env = gPausedObjects.getObject(request.id);
const names = env.names();
return names.map(name => {
return { name, value: convertValue(env.getVariable(name)) };
});
},
getFrame(request) {
if (request.index == -1 /* NewestFrameIndex */) {
const numFrames = countScriptFrames();
if (!numFrames) {
// Return an empty object when there are no frames.
return {};
}
request.index = numFrames - 1;
}
const frame = scriptFrameForIndex(request.index);
let _arguments = null;
if (frame.arguments) {
_arguments = [];
for (let i = 0; i < frame.arguments.length; i++) {
_arguments.push(convertValue(frame.arguments[i]));
}
}
return {
index: request.index,
type: frame.type,
callee: getObjectId(frame.callee),
environment: getObjectId(frame.environment),
generator: frame.generator,
constructing: frame.constructing,
this: convertValue(frame.this),
script: gScripts.getId(frame.script),
offset: frame.offset,
arguments: _arguments,
};
},
getLineOffsets: forwardToScript("getLineOffsets"),
getOffsetLocation: forwardToScript("getOffsetLocation"),
getSuccessorOffsets: forwardToScript("getSuccessorOffsets"),
getPredecessorOffsets: forwardToScript("getPredecessorOffsets"),
frameEvaluate(request) {
if (!RecordReplayControl.maybeDivergeFromRecording()) {
return { throw: "Recording divergence in frameEvaluate" };
}
const frame = scriptFrameForIndex(request.index);
const rv = frame.eval(request.text, request.options);
return convertCompletionValue(rv);
},
popFrameResult(request) {
return gPopFrameResult ? convertCompletionValue(gPopFrameResult) : {};
},
};
// eslint-disable-next-line no-unused-vars
function ProcessRequest(request) {
try {
if (gRequestHandlers[request.type]) {
return gRequestHandlers[request.type](request);
}
return { exception: "No handler for " + request.type };
} catch (e) {
RecordReplayControl.dump("ReplayDebugger Record/Replay Error: " + e + "\n");
return { exception: "" + e };
}
}
// eslint-disable-next-line no-unused-vars
var EXPORTED_SYMBOLS = [
"EnsurePositionHandler",
"ClearPositionHandlers",
"ClearPausedState",
"ProcessRequest",
"GetEntryPosition",
];

Просмотреть файл

@ -605,7 +605,8 @@ public:
// present.
if (!targetProcess) {
targetProcess =
ContentParent::GetNewOrUsedBrowserProcess(NS_LITERAL_STRING(DEFAULT_REMOTE_TYPE),
ContentParent::GetNewOrUsedBrowserProcess(nullptr,
NS_LITERAL_STRING(DEFAULT_REMOTE_TYPE),
ContentParent::GetInitialProcessPriority(nullptr),
nullptr);
}

Просмотреть файл

@ -113,4 +113,10 @@ interface nsITabParent : nsISupports
* autoscrolled.
*/
void stopApzAutoscroll(in nsViewID aScrollId, in uint32_t aPresShellId);
/**
* Save a recording of the associated content process' behavior to the
* specified filename. Returns whether the process is being recorded.
*/
bool saveRecording(in AString aFileName);
};

Просмотреть файл

@ -77,6 +77,7 @@
#include "mozilla/PerformanceUtils.h"
#include "mozilla/plugins/PluginInstanceParent.h"
#include "mozilla/plugins/PluginModuleParent.h"
#include "mozilla/recordreplay/ParentIPC.h"
#include "mozilla/widget/ScreenManager.h"
#include "mozilla/widget/WidgetMessageUtils.h"
#include "nsBaseDragService.h"
@ -673,6 +674,12 @@ ContentChild::Init(MessageLoop* aIOLoop,
return false;
}
// Middleman processes use a special channel for forwarding messages to
// their own children.
if (recordreplay::IsMiddleman()) {
SetMiddlemanIPCChannel(recordreplay::parent::ChannelToUIProcess());
}
if (!Open(aChannel, aParentPid, aIOLoop)) {
return false;
}
@ -3821,6 +3828,13 @@ ContentChild::RecvAddDynamicScalars(nsTArray<DynamicScalarDefinition>&& aDefs)
return IPC_OK();
}
mozilla::ipc::IPCResult
ContentChild::RecvSaveRecording(const FileDescriptor& aFile)
{
recordreplay::parent::SaveRecording(aFile);
return IPC_OK();
}
already_AddRefed<nsIEventTarget>
ContentChild::GetSpecificMessageEventTarget(const Message& aMsg)
{

Просмотреть файл

@ -736,6 +736,9 @@ public:
virtual bool
DeallocPClientOpenWindowOpChild(PClientOpenWindowOpChild* aActor) override;
mozilla::ipc::IPCResult
RecvSaveRecording(const FileDescriptor& aFile) override;
#ifdef NIGHTLY_BUILD
// Fetch the current number of pending input events.
//

Просмотреть файл

@ -94,6 +94,7 @@
#include "mozilla/Preferences.h"
#include "mozilla/ProcessHangMonitor.h"
#include "mozilla/ProcessHangMonitorIPC.h"
#include "mozilla/recordreplay/ParentIPC.h"
#include "mozilla/Scheduler.h"
#include "mozilla/ScopeExit.h"
#include "mozilla/ScriptPreloader.h"
@ -116,6 +117,8 @@
#include "nsConsoleService.h"
#include "nsContentUtils.h"
#include "nsDebugImpl.h"
#include "nsDirectoryServiceDefs.h"
#include "nsEmbedCID.h"
#include "nsFrameLoader.h"
#include "nsFrameMessageManager.h"
#include "nsHashPropertyBag.h"
@ -143,6 +146,7 @@
#include "nsIObserverService.h"
#include "nsIParentChannel.h"
#include "nsIPresShell.h"
#include "nsIPromptService.h"
#include "nsIRemoteWindowContext.h"
#include "nsIScriptError.h"
#include "nsIScriptSecurityManager.h"
@ -608,7 +612,9 @@ ContentParent::PreallocateProcess()
{
RefPtr<ContentParent> process =
new ContentParent(/* aOpener = */ nullptr,
NS_LITERAL_STRING(DEFAULT_REMOTE_TYPE));
NS_LITERAL_STRING(DEFAULT_REMOTE_TYPE),
eNotRecordingOrReplaying,
/* aRecordingFile = */ EmptyString());
PreallocatedProcessManager::AddBlocker(process);
@ -764,18 +770,55 @@ ContentParent::MinTabSelect(const nsTArray<ContentParent*>& aContentParents,
return candidate.forget();
}
static bool
CreateTemporaryRecordingFile(nsAString& aResult)
{
unsigned long elapsed = (TimeStamp::Now() - TimeStamp::ProcessCreation()).ToMilliseconds();
nsCOMPtr<nsIFile> file;
return !NS_FAILED(NS_GetSpecialDirectory(NS_OS_TEMP_DIR, getter_AddRefs(file)))
&& !NS_FAILED(file->AppendNative(nsPrintfCString("Recording%lu", elapsed)))
&& !NS_FAILED(file->GetPath(aResult));
}
/*static*/ already_AddRefed<ContentParent>
ContentParent::GetNewOrUsedBrowserProcess(const nsAString& aRemoteType,
ContentParent::GetNewOrUsedBrowserProcess(Element* aFrameElement,
const nsAString& aRemoteType,
ProcessPriority aPriority,
ContentParent* aOpener,
bool aPreferUsed)
{
// Figure out if this process will be recording or replaying, and which file
// to use for the recording.
RecordReplayState recordReplayState = eNotRecordingOrReplaying;
nsAutoString recordingFile;
if (aFrameElement) {
aFrameElement->GetAttr(kNameSpaceID_None, nsGkAtoms::ReplayExecution, recordingFile);
if (!recordingFile.IsEmpty()) {
recordReplayState = eReplaying;
} else {
aFrameElement->GetAttr(kNameSpaceID_None, nsGkAtoms::RecordExecution, recordingFile);
if (recordingFile.IsEmpty() && recordreplay::parent::SaveAllRecordingsDirectory()) {
recordingFile.AssignLiteral("*");
}
if (!recordingFile.IsEmpty()) {
if (recordingFile.EqualsLiteral("*") && !CreateTemporaryRecordingFile(recordingFile)) {
return nullptr;
}
recordReplayState = eRecording;
}
}
}
nsTArray<ContentParent*>& contentParents = GetOrCreatePool(aRemoteType);
uint32_t maxContentParents = GetMaxProcessCount(aRemoteType);
if (aRemoteType.EqualsLiteral(LARGE_ALLOCATION_REMOTE_TYPE)) {
if (recordReplayState != eNotRecordingOrReplaying) {
// Fall through and always create a new process when recording or replaying.
} else if (aRemoteType.EqualsLiteral(LARGE_ALLOCATION_REMOTE_TYPE)) {
// We never want to re-use Large-Allocation processes.
if (contentParents.Length() >= maxContentParents) {
return GetNewOrUsedBrowserProcess(NS_LITERAL_STRING(DEFAULT_REMOTE_TYPE),
return GetNewOrUsedBrowserProcess(aFrameElement,
NS_LITERAL_STRING(DEFAULT_REMOTE_TYPE),
aPriority,
aOpener);
}
@ -833,7 +876,7 @@ ContentParent::GetNewOrUsedBrowserProcess(const nsAString& aRemoteType,
}
// Create a new process from scratch.
RefPtr<ContentParent> p = new ContentParent(aOpener, aRemoteType);
RefPtr<ContentParent> p = new ContentParent(aOpener, aRemoteType, recordReplayState, recordingFile);
// Until the new process is ready let's not allow to start up any preallocated processes.
PreallocatedProcessManager::AddBlocker(p);
@ -842,7 +885,10 @@ ContentParent::GetNewOrUsedBrowserProcess(const nsAString& aRemoteType,
return nullptr;
}
contentParents.AppendElement(p);
if (recordReplayState == eNotRecordingOrReplaying) {
contentParents.AppendElement(p);
}
p->mActivateTS = TimeStamp::Now();
return p.forget();
}
@ -939,7 +985,7 @@ ContentParent::RecvCreateChildProcess(const IPCTabContext& aContext,
aPriority);
}
else {
cp = GetNewOrUsedBrowserProcess(NS_LITERAL_STRING(DEFAULT_REMOTE_TYPE),
cp = GetNewOrUsedBrowserProcess(nullptr, NS_LITERAL_STRING(DEFAULT_REMOTE_TYPE),
aPriority, this);
}
@ -1160,7 +1206,7 @@ ContentParent::CreateBrowser(const TabContext& aContext,
initialPriority);
} else {
constructorSender =
GetNewOrUsedBrowserProcess(remoteType, initialPriority,
GetNewOrUsedBrowserProcess(aFrameElement, remoteType, initialPriority,
nullptr, isPreloadBrowser);
}
if (!constructorSender) {
@ -1402,6 +1448,18 @@ ContentParent::ShutDownProcess(ShutDownMethod aMethod)
// other methods. We first call Shutdown() in the child. After the child is
// ready, it calls FinishShutdown() on us. Then we close the channel.
if (aMethod == SEND_SHUTDOWN_MESSAGE) {
if (const char* directory = recordreplay::parent::SaveAllRecordingsDirectory()) {
// Save a recording for the child process before it shuts down.
unsigned long elapsed = (TimeStamp::Now() - TimeStamp::ProcessCreation()).ToMilliseconds();
nsCOMPtr<nsIFile> file;
if (!NS_FAILED(NS_NewNativeLocalFile(nsDependentCString(directory), false,
getter_AddRefs(file))) &&
!NS_FAILED(file->AppendNative(nsPrintfCString("Recording%lu", elapsed)))) {
bool unused;
SaveRecording(file, &unused);
}
}
if (mIPCOpen && !mShutdownPending) {
// Stop sending input events with input priority when shutting down.
SetInputPriorityEventEnabled(false);
@ -1791,6 +1849,14 @@ ContentParent::ActorDestroy(ActorDestroyReason why)
DelayedDeleteSubprocess, mSubprocess));
mSubprocess = nullptr;
// Delete any remaining replaying children.
for (auto& replayingProcess : mReplayingChildren) {
if (replayingProcess) {
DelayedDeleteSubprocess(replayingProcess);
replayingProcess = nullptr;
}
}
// IPDL rules require actors to live on past ActorDestroy, but it
// may be that the kungFuDeathGrip above is the last reference to
// |this|. If so, when we go out of scope here, we're deleted and
@ -1978,6 +2044,65 @@ ContentParent::NotifyTabDestroyed(const TabId& aTabId,
}
}
mozilla::ipc::IPCResult
ContentParent::RecvOpenRecordReplayChannel(const uint32_t& aChannelId,
FileDescriptor* aConnection)
{
// We should only get this message from the child if it is recording or replaying.
if (!recordreplay::IsRecordingOrReplaying()) {
return IPC_FAIL_NO_REASON(this);
}
recordreplay::parent::OpenChannel(Pid(), aChannelId, aConnection);
return IPC_OK();
}
mozilla::ipc::IPCResult
ContentParent::RecvCreateReplayingProcess(const uint32_t& aChannelId)
{
// We should only get this message from the child if it is recording or replaying.
if (!recordreplay::IsRecordingOrReplaying()) {
return IPC_FAIL_NO_REASON(this);
}
while (aChannelId >= mReplayingChildren.length()) {
if (!mReplayingChildren.append(nullptr)) {
return IPC_FAIL_NO_REASON(this);
}
}
if (mReplayingChildren[aChannelId]) {
return IPC_FAIL_NO_REASON(this);
}
std::vector<std::string> extraArgs;
recordreplay::parent::GetArgumentsForChildProcess(Pid(), aChannelId,
NS_ConvertUTF16toUTF8(mRecordingFile).get(),
/* aRecording = */ false,
extraArgs);
mReplayingChildren[aChannelId] = new GeckoChildProcessHost(GeckoProcessType_Content);
if (!mReplayingChildren[aChannelId]->LaunchAndWaitForProcessHandle(extraArgs)) {
return IPC_FAIL_NO_REASON(this);
}
return IPC_OK();
}
mozilla::ipc::IPCResult
ContentParent::RecvTerminateReplayingProcess(const uint32_t& aChannelId)
{
// We should only get this message from the child if it is recording or replaying.
if (!recordreplay::IsRecordingOrReplaying()) {
return IPC_FAIL_NO_REASON(this);
}
if (aChannelId < mReplayingChildren.length() && mReplayingChildren[aChannelId]) {
DelayedDeleteSubprocess(mReplayingChildren[aChannelId]);
mReplayingChildren[aChannelId] = nullptr;
}
return IPC_OK();
}
jsipc::CPOWManager*
ContentParent::GetCPOWManager()
{
@ -2098,6 +2223,18 @@ ContentParent::LaunchSubprocess(ProcessPriority aInitialPriority /* = PROCESS_PR
extraArgs.push_back("-parentBuildID");
extraArgs.push_back(parentBuildID.get());
// Specify whether the process is recording or replaying an execution.
if (mRecordReplayState != eNotRecordingOrReplaying) {
nsPrintfCString buf("%d", mRecordReplayState == eRecording
? (int) recordreplay::ProcessKind::MiddlemanRecording
: (int) recordreplay::ProcessKind::MiddlemanReplaying);
extraArgs.push_back(recordreplay::gProcessKindOption);
extraArgs.push_back(buf.get());
extraArgs.push_back(recordreplay::gRecordingFileOption);
extraArgs.push_back(NS_ConvertUTF16toUTF8(mRecordingFile).get());
}
SetOtherProcessId(kInvalidProcessId, ProcessIdState::ePending);
#ifdef ASYNC_CONTENTPROC_LAUNCH
if (!mSubprocess->Launch(extraArgs)) {
@ -2151,6 +2288,8 @@ ContentParent::LaunchSubprocess(ProcessPriority aInitialPriority /* = PROCESS_PR
ContentParent::ContentParent(ContentParent* aOpener,
const nsAString& aRemoteType,
RecordReplayState aRecordReplayState,
const nsAString& aRecordingFile,
int32_t aJSPluginID)
: nsIContentParent()
, mSubprocess(nullptr)
@ -2165,6 +2304,8 @@ ContentParent::ContentParent(ContentParent* aOpener,
, mIsAvailable(true)
, mIsAlive(true)
, mIsForBrowser(!mRemoteType.IsEmpty())
, mRecordReplayState(aRecordReplayState)
, mRecordingFile(aRecordingFile)
, mCalledClose(false)
, mCalledKillHard(false)
, mCreatedPairedMinidumps(false)
@ -5088,6 +5229,18 @@ ContentParent::RecvGraphicsError(const nsCString& aError)
return IPC_OK();
}
mozilla::ipc::IPCResult
ContentParent::RecvRecordReplayFatalError(const nsCString& aError)
{
nsCOMPtr<nsIPromptService> promptService(do_GetService(NS_PROMPTSERVICE_CONTRACTID));
MOZ_RELEASE_ASSERT(promptService);
nsAutoCString str(aError);
promptService->Alert(nullptr, u"Fatal Record/Replay Error", NS_ConvertUTF8toUTF16(str).get());
return IPC_OK();
}
mozilla::ipc::IPCResult
ContentParent::RecvBeginDriverCrashGuard(const uint32_t& aGuardType, bool* aOutCrashed)
{
@ -5752,6 +5905,31 @@ ContentParent::CanCommunicateWith(ContentParentId aOtherProcess)
return parentId == aOtherProcess;
}
nsresult
ContentParent::SaveRecording(nsIFile* aFile, bool* aRetval)
{
if (mRecordReplayState != eRecording) {
*aRetval = false;
return NS_OK;
}
PRFileDesc* prfd;
nsresult rv = aFile->OpenNSPRFileDesc(PR_WRONLY | PR_TRUNCATE | PR_CREATE_FILE, 0644, &prfd);
if (NS_FAILED(rv)) {
return rv;
}
FileDescriptor::PlatformHandleType handle =
FileDescriptor::PlatformHandleType(PR_FileDesc2NativeHandle(prfd));
Unused << SendSaveRecording(FileDescriptor(handle));
PR_Close(prfd);
*aRetval = true;
return NS_OK;
}
mozilla::ipc::IPCResult
ContentParent::RecvMaybeReloadPlugins()
{

Просмотреть файл

@ -174,7 +174,8 @@ public:
* 3. normal iframe
*/
static already_AddRefed<ContentParent>
GetNewOrUsedBrowserProcess(const nsAString& aRemoteType,
GetNewOrUsedBrowserProcess(Element* aFrameElement,
const nsAString& aRemoteType,
hal::ProcessPriority aPriority =
hal::ProcessPriority::PROCESS_PRIORITY_FOREGROUND,
ContentParent* aOpener = nullptr,
@ -303,6 +304,11 @@ public:
virtual mozilla::ipc::IPCResult RecvBridgeToChildProcess(const ContentParentId& aCpId,
Endpoint<PContentBridgeParent>* aEndpoint) override;
virtual mozilla::ipc::IPCResult RecvOpenRecordReplayChannel(const uint32_t& channelId,
FileDescriptor* connection) override;
virtual mozilla::ipc::IPCResult RecvCreateReplayingProcess(const uint32_t& aChannelId) override;
virtual mozilla::ipc::IPCResult RecvTerminateReplayingProcess(const uint32_t& aChannelId) override;
virtual mozilla::ipc::IPCResult RecvCreateGMPService() override;
virtual mozilla::ipc::IPCResult RecvLoadPlugin(const uint32_t& aPluginId, nsresult* aRv,
@ -743,16 +749,28 @@ private:
FORWARD_SHMEM_ALLOCATOR_TO(PContentParent)
enum RecordReplayState
{
eNotRecordingOrReplaying,
eRecording,
eReplaying
};
explicit ContentParent(int32_t aPluginID)
: ContentParent(nullptr, EmptyString(), aPluginID)
: ContentParent(nullptr, EmptyString(), eNotRecordingOrReplaying, EmptyString(), aPluginID)
{}
ContentParent(ContentParent* aOpener,
const nsAString& aRemoteType)
: ContentParent(aOpener, aRemoteType, nsFakePluginTag::NOT_JSPLUGIN)
const nsAString& aRemoteType,
RecordReplayState aRecordReplayState = eNotRecordingOrReplaying,
const nsAString& aRecordingFile = EmptyString())
: ContentParent(aOpener, aRemoteType, aRecordReplayState, aRecordingFile,
nsFakePluginTag::NOT_JSPLUGIN)
{}
ContentParent(ContentParent* aOpener,
const nsAString& aRemoteType,
RecordReplayState aRecordReplayState,
const nsAString& aRecordingFile,
int32_t aPluginID);
// Launch the subprocess and associated initialization.
@ -1126,6 +1144,8 @@ public:
virtual mozilla::ipc::IPCResult RecvGraphicsError(const nsCString& aError) override;
virtual mozilla::ipc::IPCResult RecvRecordReplayFatalError(const nsCString& aError) override;
virtual mozilla::ipc::IPCResult
RecvBeginDriverCrashGuard(const uint32_t& aGuardType,
bool* aOutCrashed) override;
@ -1246,6 +1266,8 @@ public:
bool CanCommunicateWith(ContentParentId aOtherProcess);
nsresult SaveRecording(nsIFile* aFile, bool* aRetval);
private:
// If you add strong pointers to cycle collected objects here, be sure to
@ -1289,6 +1311,16 @@ private:
bool mIsForBrowser;
// Whether this process is recording or replaying its execution, and any
// associated recording file.
RecordReplayState mRecordReplayState;
nsString mRecordingFile;
// When recording or replaying, the child process is a middleman. This vector
// stores any replaying children we have spawned on behalf of that middleman,
// indexed by their record/replay channel ID.
Vector<mozilla::ipc::GeckoChildProcessHost*> mReplayingChildren;
// These variables track whether we've called Close() and KillHard() on our
// channel.
bool mCalledClose;

Просмотреть файл

@ -10,6 +10,7 @@
#include "base/shared_memory.h"
#include "mozilla/Preferences.h"
#include "mozilla/Scheduler.h"
#include "mozilla/recordreplay/ParentIPC.h"
#if defined(XP_MACOSX) && defined(MOZ_CONTENT_SANDBOX)
#include <stdlib.h>
@ -261,30 +262,46 @@ ContentProcess::Init(int aArgc, char* aArgv[])
return false;
}
// Init the shared-memory base preference mapping first, so that only changed
// preferences wind up in heap memory.
Preferences::InitSnapshot(prefMapHandle.ref(), *prefMapSize);
if (recordreplay::IsRecordingOrReplaying()) {
// Set up early prefs from shmem contents passed to us by the middleman.
Preferences::DeserializePreferences(recordreplay::child::PrefsShmemContents(*prefsLen),
*prefsLen);
} else {
// Init the shared-memory base preference mapping first, so that only changed
// preferences wind up in heap memory.
Preferences::InitSnapshot(prefMapHandle.ref(), *prefMapSize);
// Set up early prefs from the shared memory.
base::SharedMemory shm;
if (!shm.SetHandle(*prefsHandle, /* read_only */ true)) {
NS_ERROR("failed to open shared memory in the child");
return false;
// Set up early prefs from the shared memory.
base::SharedMemory shm;
if (!shm.SetHandle(*prefsHandle, /* read_only */ true)) {
NS_ERROR("failed to open shared memory in the child");
return false;
}
if (!shm.Map(*prefsLen)) {
NS_ERROR("failed to map shared memory in the child");
return false;
}
Preferences::DeserializePreferences(static_cast<char*>(shm.memory()),
*prefsLen);
if (recordreplay::IsMiddleman()) {
recordreplay::parent::NotePrefsShmemContents(static_cast<char*>(shm.memory()),
*prefsLen);
}
}
if (!shm.Map(*prefsLen)) {
NS_ERROR("failed to map shared memory in the child");
return false;
}
Preferences::DeserializePreferences(static_cast<char*>(shm.memory()),
*prefsLen);
Scheduler::SetPrefs(*schedulerPrefs);
if (recordreplay::IsMiddleman()) {
recordreplay::parent::InitializeMiddleman(aArgc, aArgv, ParentPid());
}
mContent.Init(IOThreadChild::message_loop(),
ParentPid(),
*parentBuildID,
IOThreadChild::channel(),
*childID,
*isForBrowser);
mXREEmbed.Start();
#if (defined(XP_MACOSX)) && defined(MOZ_CONTENT_SANDBOX)
mContent.SetProfileDir(profileDir);

Просмотреть файл

@ -726,6 +726,9 @@ child:
*/
async PClientOpenWindowOp(ClientOpenWindowArgs aArgs);
/* Save the execution up to the current point in a recording process. */
async SaveRecording(FileDescriptor file);
parent:
async InitBackground(Endpoint<PBackgroundParent> aEndpoint);
@ -737,6 +740,11 @@ parent:
sync BridgeToChildProcess(ContentParentId cpId)
returns (Endpoint<PContentBridgeParent> endpoint);
sync OpenRecordReplayChannel(uint32_t channelId)
returns (FileDescriptor connection);
async CreateReplayingProcess(uint32_t channelId);
async TerminateReplayingProcess(uint32_t channelId);
async CreateGMPService();
async InitStreamFilter(uint64_t channelId, nsString addonId)
@ -931,6 +939,9 @@ parent:
// Graphics errors
async GraphicsError(nsCString aError);
// Record/replay errors.
async RecordReplayFatalError(nsCString aError);
// Driver crash guards. aGuardType must be a member of CrashGuardType.
sync BeginDriverCrashGuard(uint32_t aGuardType) returns (bool crashDetected);
sync EndDriverCrashGuard(uint32_t aGuardType);

Просмотреть файл

@ -659,6 +659,8 @@ public:
void PaintWhileInterruptingJS(uint64_t aLayerObserverEpoch,
bool aForceRepaint);
uint64_t LayerObserverEpoch() const { return mLayerObserverEpoch; }
#if defined(XP_WIN) && defined(ACCESSIBILITY)
uintptr_t GetNativeWindowHandle() const { return mNativeWindowHandle; }
#endif

Просмотреть файл

@ -2964,6 +2964,17 @@ TabParent::PreserveLayers(bool aPreserveLayers)
return NS_OK;
}
NS_IMETHODIMP
TabParent::SaveRecording(const nsAString& aFilename, bool* aRetval)
{
nsCOMPtr<nsIFile> file;
nsresult rv = NS_NewLocalFile(aFilename, false, getter_AddRefs(file));
if (NS_FAILED(rv)) {
return rv;
}
return Manager()->AsContentParent()->SaveRecording(file, aRetval);
}
NS_IMETHODIMP
TabParent::SuppressDisplayport(bool aEnabled)
{

Просмотреть файл

@ -171,6 +171,10 @@ public:
return false;
}
virtual void ForceComposeToTarget(gfx::DrawTarget* aTarget, const gfx::IntRect* aRect = nullptr) {
MOZ_CRASH();
}
protected:
~CompositorBridgeParentBase() override;
@ -446,7 +450,7 @@ public:
widget::CompositorWidget* GetWidget() { return mWidget; }
void ForceComposeToTarget(gfx::DrawTarget* aTarget, const gfx::IntRect* aRect = nullptr);
virtual void ForceComposeToTarget(gfx::DrawTarget* aTarget, const gfx::IntRect* aRect = nullptr) override;
PAPZCTreeManagerParent* AllocPAPZCTreeManagerParent(const LayersId& aLayersId) override;
bool DeallocPAPZCTreeManagerParent(PAPZCTreeManagerParent* aActor) override;

Просмотреть файл

@ -162,6 +162,12 @@ LayerTransactionParent::RecvPaintTime(const TransactionId& aTransactionId,
mozilla::ipc::IPCResult
LayerTransactionParent::RecvUpdate(const TransactionInfo& aInfo)
{
auto guard = MakeScopeExit([&] {
if (recordreplay::IsRecordingOrReplaying()) {
recordreplay::child::NotifyPaintComplete();
}
});
AUTO_PROFILER_TRACING("Paint", "LayerTransaction");
AUTO_PROFILER_LABEL("LayerTransactionParent::RecvUpdate", GRAPHICS);
@ -489,6 +495,11 @@ LayerTransactionParent::RecvUpdate(const TransactionInfo& aInfo)
mLayerManager->RecordUpdateTime((TimeStamp::Now() - updateStart).ToMilliseconds());
}
// Compose after every update when recording/replaying.
if (recordreplay::IsRecordingOrReplaying()) {
mCompositorBridge->ForceComposeToTarget(nullptr);
}
return IPC_OK();
}

Просмотреть файл

@ -767,6 +767,10 @@ ShadowLayerForwarder::EndTransaction(const nsIntRegion& aRegionToClear,
// finish. If it does we don't have to delay messages at all.
GetCompositorBridgeChild()->PostponeMessagesIfAsyncPainting();
if (recordreplay::IsRecordingOrReplaying()) {
recordreplay::child::NotifyPaintStart();
}
MOZ_LAYERS_LOG(("[LayersForwarder] sending transaction..."));
RenderTraceScope rendertrace3("Forward Transaction", "000093");
if (!mShadowManager->SendUpdate(info)) {
@ -774,6 +778,10 @@ ShadowLayerForwarder::EndTransaction(const nsIntRegion& aRegionToClear,
return false;
}
if (recordreplay::IsRecordingOrReplaying()) {
recordreplay::child::WaitForPaintToComplete();
}
if (startTime) {
mPaintTiming.sendMs() = (TimeStamp::Now() - startTime.value()).ToMilliseconds();
mShadowManager->SendRecordPaintTimes(mPaintTiming);

Просмотреть файл

@ -175,6 +175,14 @@ Pickle& Pickle::operator=(Pickle&& other) {
return *this;
}
void Pickle::CopyFrom(const Pickle& other) {
MOZ_ALWAYS_TRUE(buffers_.CopyFrom(other.buffers_));
MOZ_ASSERT(other.header_ == reinterpret_cast<const Header*>(other.buffers_.Start()));
header_ = reinterpret_cast<Header*>(buffers_.Start());
header_size_ = other.header_size_;
}
bool Pickle::ReadBool(PickleIterator* iter, bool* result) const {
DCHECK(iter);

Просмотреть файл

@ -78,6 +78,8 @@ class Pickle {
Pickle& operator=(Pickle&& other);
void CopyFrom(const Pickle& other);
// Returns the size of the Pickle's data.
uint32_t size() const { return header_size_ + header_->payload_size; }

Просмотреть файл

@ -145,6 +145,12 @@ Message& Message::operator=(Message&& other) {
return *this;
}
void Message::CopyFrom(const Message& other) {
Pickle::CopyFrom(other);
#if defined(OS_POSIX)
file_descriptor_set_ = other.file_descriptor_set_;
#endif
}
#if defined(OS_POSIX)
bool Message::WriteFileDescriptor(const base::FileDescriptor& descriptor) {

Просмотреть файл

@ -213,6 +213,8 @@ class Message : public Pickle {
Message& operator=(const Message& other) = delete;
Message& operator=(Message&& other);
void CopyFrom(const Message& other);
// Helper method for the common case (default segmentCapacity, recording
// the write latency of messages) of IPDL message creation. This helps
// move the malloc and some of the parameter setting out of autogenerated

Просмотреть файл

@ -18,6 +18,8 @@
#include "mozilla/dom/ContentParent.h"
#include "mozilla/ipc/MessageChannel.h"
#include "mozilla/ipc/Transport.h"
#include "mozilla/recordreplay/ChildIPC.h"
#include "mozilla/recordreplay/ParentIPC.h"
#include "mozilla/StaticMutex.h"
#include "mozilla/SystemGroup.h"
#include "mozilla/Unused.h"
@ -723,7 +725,15 @@ IToplevelProtocol::SetOtherProcessId(base::ProcessId aOtherPid,
ProcessIdState aState)
{
MonitorAutoLock lock(mMonitor);
mOtherPid = aOtherPid;
// When recording an execution, all communication we do is forwarded from
// the middleman to the parent process, so use its pid instead of the
// middleman's pid.
if (recordreplay::IsRecordingOrReplaying() &&
aOtherPid == recordreplay::child::MiddlemanProcessId()) {
mOtherPid = recordreplay::child::ParentProcessId();
} else {
mOtherPid = aOtherPid;
}
mOtherPidState = aState;
lock.NotifyAll();
}
@ -1066,13 +1076,13 @@ IToplevelProtocol::ToplevelState::ReplaceEventTargetForActor(
const MessageChannel*
IToplevelProtocol::ToplevelState::GetIPCChannel() const
{
return &mChannel;
return ProtocolState::mChannel ? ProtocolState::mChannel : &mChannel;
}
MessageChannel*
IToplevelProtocol::ToplevelState::GetIPCChannel()
{
return &mChannel;
return ProtocolState::mChannel ? ProtocolState::mChannel : &mChannel;
}
} // namespace ipc

Просмотреть файл

@ -24,6 +24,7 @@
#include "mozilla/ipc/Shmem.h"
#include "mozilla/ipc/Transport.h"
#include "mozilla/ipc/MessageLink.h"
#include "mozilla/recordreplay/ChildIPC.h"
#include "mozilla/LinkedList.h"
#include "mozilla/Maybe.h"
#include "mozilla/MozPromise.h"
@ -288,6 +289,13 @@ public:
{
return mState->GetIPCChannel();
}
void SetMiddlemanIPCChannel(MessageChannel* aChannel)
{
// Middleman processes sometimes need to change the channel used by a
// protocol.
MOZ_RELEASE_ASSERT(recordreplay::IsMiddleman());
mState->SetIPCChannel(aChannel);
}
// XXX odd ducks, acknowledged
virtual ProcessId OtherPid() const;
@ -865,7 +873,16 @@ public:
bool Bind(PFooSide* aActor)
{
MOZ_RELEASE_ASSERT(mValid);
MOZ_RELEASE_ASSERT(mMyPid == base::GetCurrentProcId());
if (mMyPid != base::GetCurrentProcId()) {
// These pids must match, unless we are recording or replaying, in
// which case the parent process will have supplied the pid for the
// middleman process instead. Fix this here. If we're replaying
// we'll see the pid of the middleman used while recording.
MOZ_RELEASE_ASSERT(recordreplay::IsRecordingOrReplaying());
MOZ_RELEASE_ASSERT(recordreplay::IsReplaying() ||
mMyPid == recordreplay::child::MiddlemanProcessId());
mMyPid = base::GetCurrentProcId();
}
UniquePtr<Transport> t = mozilla::ipc::OpenDescriptor(mTransport, mMode);
if (!t) {

Просмотреть файл

@ -849,6 +849,8 @@ description =
description =
[PContent::BridgeToChildProcess]
description =
[PContent::OpenRecordReplayChannel]
description = bug 1475898 this could be async
[PContent::LoadPlugin]
description =
[PContent::ConnectPluginBridge]

Просмотреть файл

@ -697,6 +697,9 @@ interface nsIXPCComponents_Utils : nsISupports
*/
void blockThreadedExecution(in nsIBlockThreadedExecutionCallback aBlockedCallback);
void unblockThreadedExecution();
/* Give a directive to the record/replay system. */
void recordReplayDirective(in long directive);
};
/**

Просмотреть файл

@ -3069,6 +3069,13 @@ nsXPCComponents_Utils::UnblockThreadedExecution()
return NS_OK;
}
NS_IMETHODIMP
nsXPCComponents_Utils::RecordReplayDirective(int aDirective)
{
recordreplay::RecordReplayDirective(aDirective);
return NS_OK;
}
/***************************************************************************/
/***************************************************************************/
/***************************************************************************/

Просмотреть файл

@ -135,6 +135,28 @@ class BufferList : private AllocPolicy
return AllocateSegment(aInitialSize, aInitialCapacity);
}
bool CopyFrom(const BufferList& aOther)
{
MOZ_ASSERT(mOwning);
Clear();
// We don't make an exact copy of aOther. Instead, create a single segment
// with enough space to hold all data in aOther.
if (!Init(aOther.mSize, (aOther.mSize + kSegmentAlignment - 1) & ~(kSegmentAlignment - 1))) {
return false;
}
size_t offset = 0;
for (const Segment& segment : aOther.mSegments) {
memcpy(Start() + offset, segment.mData, segment.mSize);
offset += segment.mSize;
}
MOZ_ASSERT(offset == mSize);
return true;
}
// Returns the sum of the sizes of all the buffers.
size_t Size() const { return mSize; }

Просмотреть файл

@ -33,11 +33,11 @@ namespace recordreplay {
(const PLDHashTableOps* aOps), (aOps)) \
Macro(InternalUnwrapPLDHashTableCallbacks, const PLDHashTableOps*, \
(const PLDHashTableOps* aOps), (aOps)) \
Macro(AllocateMemory, void*, (size_t aSize, AllocatedMemoryKind aKind), (aSize, aKind)) \
Macro(InternalThingIndex, size_t, (void* aThing), (aThing)) \
Macro(InternalVirtualThingName, const char*, (void* aThing), (aThing)) \
Macro(NewCheckpoint, bool, (bool aTemporary), (aTemporary)) \
Macro(SpewEnabled, bool, (), ())
Macro(ExecutionProgressCounter, ProgressCounter*, (), ()) \
Macro(IsInternalScript, bool, (const char* aURL), (aURL)) \
Macro(DefineRecordReplayControlObject, bool, (JSContext* aCx, JSObject* aObj), (aCx, aObj))
#define FOR_EACH_INTERFACE_VOID(Macro) \
Macro(InternalBeginOrderedAtomicAccess, (), ()) \
@ -50,7 +50,6 @@ namespace recordreplay {
Macro(InternalEndCaptureEventStacks, (), ()) \
Macro(InternalRecordReplayBytes, \
(void* aData, size_t aSize), (aData, aSize)) \
Macro(DisallowUnhandledDivergeFromRecording, (), ()) \
Macro(NotifyUnrecordedWait, \
(const std::function<void()>& aCallback), (aCallback)) \
Macro(MaybeWaitForCheckpointSave, (), ()) \
@ -60,16 +59,8 @@ namespace recordreplay {
Macro(InternalMovePLDHashTableContents, \
(const PLDHashTableOps* aFirstOps, const PLDHashTableOps* aSecondOps), \
(aFirstOps, aSecondOps)) \
Macro(SetCheckpointHooks, \
(BeforeCheckpointHook aBefore, AfterCheckpointHook aAfter), \
(aBefore, aAfter)) \
Macro(ResumeExecution, (), ()) \
Macro(RestoreCheckpointAndResume, (const CheckpointId& aId), (aId)) \
Macro(DivergeFromRecording, (), ()) \
Macro(DeallocateMemory, \
(void* aAddress, size_t aSize, AllocatedMemoryKind aKind), (aAddress, aSize, aKind)) \
Macro(SetWeakPointerJSRoot, \
(const void* aPtr, void* aJSObj), (aPtr, aJSObj)) \
(const void* aPtr, JSObject* aJSObj), (aPtr, aJSObj)) \
Macro(RegisterTrigger, \
(void* aObj, const std::function<void()>& aCallback), \
(aObj, aCallback)) \
@ -83,7 +74,13 @@ namespace recordreplay {
Macro(InternalRegisterThing, (void* aThing), (aThing)) \
Macro(InternalUnregisterThing, (void* aThing), (aThing)) \
Macro(InternalRecordReplayDirective, (long aDirective), (aDirective)) \
Macro(InternalPrint, (const char* aFormat, va_list aArgs), (aFormat, aArgs))
Macro(BeginContentParse, \
(const void* aToken, const char* aURL, const char* aContentType), \
(aToken, aURL, aContentType)) \
Macro(AddContentParseData, \
(const void* aToken, const char16_t* aBuffer, size_t aLength), \
(aToken, aBuffer, aLength)) \
Macro(EndContentParse, (const void* aToken), (aToken))
#define DECLARE_SYMBOL(aName, aReturnType, aFormals, _) \
static aReturnType (*gPtr ##aName) aFormals;

Просмотреть файл

@ -18,6 +18,8 @@
#include <stdarg.h>
struct PLDHashTableOps;
struct JSContext;
class JSObject;
namespace mozilla {
namespace recordreplay {
@ -201,7 +203,7 @@ static inline void MovePLDHashTableContents(const PLDHashTableOps* aFirstOps,
// Associate an arbitrary pointer with a JS object root while replaying. This
// is useful for replaying the behavior of weak pointers.
MFBT_API void SetWeakPointerJSRoot(const void* aPtr, /*JSObject*/void* aJSObj);
MFBT_API void SetWeakPointerJSRoot(const void* aPtr, JSObject* aJSObj);
// API for ensuring that a function executes at a consistent point when
// recording or replaying. This is primarily needed for finalizers and other
@ -323,160 +325,59 @@ static const char gProcessKindOption[] = "-recordReplayKind";
static const char gRecordingFileOption[] = "-recordReplayFile";
///////////////////////////////////////////////////////////////////////////////
// Devtools API
// JS interface
///////////////////////////////////////////////////////////////////////////////
// This interface is used by devtools C++ code (e.g. the JS Debugger) running
// in a child or middleman process.
// Get the counter used to keep track of how much progress JS execution has
// made while running on the main thread. Progress must advance whenever a JS
// function is entered or loop entry point is reached, so that no script
// location may be hit twice while the progress counter is the same. See
// JSControl.h for more.
typedef uint64_t ProgressCounter;
MFBT_API ProgressCounter* ExecutionProgressCounter();
// The ID of a checkpoint in a child process. Checkpoints are either normal or
// temporary. Normal checkpoints occur at the same point in the recording and
// all replays, while temporary checkpoints are not used while recording and
// may be at different points in different replays.
struct CheckpointId
static inline void
AdvanceExecutionProgressCounter()
{
// ID of the most recent normal checkpoint, which are numbered in sequence
// starting at FirstCheckpointId.
size_t mNormal;
++*ExecutionProgressCounter();
}
// Special IDs for normal checkpoints.
static const size_t Invalid = 0;
static const size_t First = 1;
// Return whether a script is internal to the record/replay infrastructure,
// may run non-deterministically between recording and replaying, and whose
// execution must not update the progress counter.
MFBT_API bool IsInternalScript(const char* aURL);
// How many temporary checkpoints have been generated since the most recent
// normal checkpoint, zero if this represents the normal checkpoint itself.
size_t mTemporary;
// Define a RecordReplayControl object on the specified global object, with
// methods specialized to the current recording/replaying or middleman process
// kind.
MFBT_API bool DefineRecordReplayControlObject(JSContext* aCx, JSObject* aObj);
explicit CheckpointId(size_t aNormal = Invalid, size_t aTemporary = 0)
: mNormal(aNormal), mTemporary(aTemporary)
{}
// Notify the infrastructure that some URL which contains JavaScript is
// being parsed. This is used to provide the complete contents of the URL to
// devtools code when it is inspecting the state of this process; that devtools
// code can't simply fetch the URL itself since it may have been changed since
// the recording was made or may no longer exist. The token for a parse may not
// be used in other parses until after EndContentParse() is called.
MFBT_API void BeginContentParse(const void* aToken,
const char* aURL, const char* aContentType);
inline bool operator==(const CheckpointId& o) const {
return mNormal == o.mNormal && mTemporary == o.mTemporary;
}
// Add some parse data to an existing content parse.
MFBT_API void AddContentParseData(const void* aToken,
const char16_t* aBuffer, size_t aLength);
inline bool operator!=(const CheckpointId& o) const {
return mNormal != o.mNormal || mTemporary != o.mTemporary;
}
};
// Mark a content parse as having completed.
MFBT_API void EndContentParse(const void* aToken);
// Signature for the hook called when running forward, immediately before
// hitting a normal or temporary checkpoint.
typedef void (*BeforeCheckpointHook)();
// Signature for the hook called immediately after hitting a normal or
// temporary checkpoint, either when running forward or after rewinding.
typedef void (*AfterCheckpointHook)(const CheckpointId& aCheckpoint);
// Set hooks to call when encountering checkpoints.
MFBT_API void SetCheckpointHooks(BeforeCheckpointHook aBeforeCheckpoint,
AfterCheckpointHook aAfterCheckpoint);
// When paused at a breakpoint or at a checkpoint, unpause and proceed with
// execution.
MFBT_API void ResumeExecution();
// When paused at a breakpoint or at a checkpoint, restore a checkpoint that
// was saved earlier and resume execution.
MFBT_API void RestoreCheckpointAndResume(const CheckpointId& aCheckpoint);
// Allow execution after this point to diverge from the recording. Execution
// will remain diverged until an earlier checkpoint is restored.
//
// If an unhandled divergence occurs (see the 'Recording Divergence' comment
// in ProcessRewind.h) then the process rewinds to the most recent saved
// checkpoint.
MFBT_API void DivergeFromRecording();
// After a call to DivergeFromRecording(), this may be called to prevent future
// unhandled divergence from causing earlier checkpoints to be restored
// (the process will immediately crash instead). This state lasts until a new
// call to DivergeFromRecording, or to an explicit restore of an earlier
// checkpoint.
MFBT_API void DisallowUnhandledDivergeFromRecording();
// Note a checkpoint at the current execution position. This checkpoint will be
// saved if either (a) it is temporary, or (b) the middleman has instructed
// this process to save this normal checkpoint. This method returns true if the
// checkpoint was just saved, and false if it was just restored.
MFBT_API bool NewCheckpoint(bool aTemporary);
// Print information about record/replay state. Printing is independent from
// the recording and will be printed by any recording, replaying, or middleman
// process. Spew is only printed when enabled via the RECORD_REPLAY_SPEW
// environment variable.
static inline void Print(const char* aFormat, ...);
static inline void PrintSpew(const char* aFormat, ...);
MFBT_API bool SpewEnabled();
///////////////////////////////////////////////////////////////////////////////
// Allocation policies
///////////////////////////////////////////////////////////////////////////////
// Type describing what kind of memory to allocate/deallocate by APIs below.
// TrackedMemoryKind is reserved for memory that is saved and restored when
// saving or restoring checkpoints. All other values refer to memory that is
// untracked, and whose contents are preserved when restoring checkpoints.
// Different values are used to distinguish different classes of memory for
// diagnosing leaks and reporting memory usage.
typedef size_t AllocatedMemoryKind;
static const AllocatedMemoryKind TrackedMemoryKind = 0;
// Memory kind to use for untracked debugger memory.
static const AllocatedMemoryKind DebuggerAllocatedMemoryKind = 1;
// Allocate or deallocate a block of memory of a particular kind. Allocated
// memory is initially zeroed.
MFBT_API void* AllocateMemory(size_t aSize, AllocatedMemoryKind aKind);
MFBT_API void DeallocateMemory(void* aAddress, size_t aSize, AllocatedMemoryKind aKind);
// Allocation policy for managing memory of a particular kind.
template <AllocatedMemoryKind Kind>
class AllocPolicy
// Perform an entire content parse, when the entire URL is available at once.
static inline void
NoteContentParse(const void* aToken,
const char* aURL, const char* aContentType,
const char16_t* aBuffer, size_t aLength)
{
public:
template <typename T>
T* maybe_pod_calloc(size_t aNumElems) {
if (aNumElems & tl::MulOverflowMask<sizeof(T)>::value) {
MOZ_CRASH();
}
// Note: AllocateMemory always returns zeroed memory.
return static_cast<T*>(AllocateMemory(aNumElems * sizeof(T), Kind));
}
template <typename T>
void free_(T* aPtr, size_t aSize) {
DeallocateMemory(aPtr, aSize * sizeof(T), Kind);
}
template <typename T>
T* maybe_pod_realloc(T* aPtr, size_t aOldSize, size_t aNewSize) {
T* res = maybe_pod_calloc<T>(aNewSize);
memcpy(res, aPtr, aOldSize * sizeof(T));
free_<T>(aPtr, aOldSize);
return res;
}
template <typename T>
T* maybe_pod_malloc(size_t aNumElems) { return maybe_pod_calloc<T>(aNumElems); }
template <typename T>
T* pod_malloc(size_t aNumElems) { return maybe_pod_malloc<T>(aNumElems); }
template <typename T>
T* pod_calloc(size_t aNumElems) { return maybe_pod_calloc<T>(aNumElems); }
template <typename T>
T* pod_realloc(T* aPtr, size_t aOldSize, size_t aNewSize) {
return maybe_pod_realloc<T>(aPtr, aOldSize, aNewSize);
}
void reportAllocOverflow() const {}
MOZ_MUST_USE bool checkSimulatedOOM() const {
return true;
}
};
BeginContentParse(aToken, aURL, aContentType);
AddContentParseData(aToken, aBuffer, aLength);
EndContentParse(aToken);
}
///////////////////////////////////////////////////////////////////////////////
// API inline function implementation
@ -571,25 +472,6 @@ RecordReplayAssert(const char* aFormat, ...)
}
}
MFBT_API void InternalPrint(const char* aFormat, va_list aArgs);
#define MOZ_MakeRecordReplayPrinter(aName, aSpewing) \
static inline void \
aName(const char* aFormat, ...) \
{ \
if ((IsRecordingOrReplaying() || IsMiddleman()) && (!aSpewing || SpewEnabled())) { \
va_list ap; \
va_start(ap, aFormat); \
InternalPrint(aFormat, ap); \
va_end(ap); \
} \
}
MOZ_MakeRecordReplayPrinter(Print, false)
MOZ_MakeRecordReplayPrinter(PrintSpew, true)
#undef MOZ_MakeRecordReplayPrinter
} // recordreplay
} // mozilla

Просмотреть файл

@ -187,19 +187,27 @@ public class GeckoJavaSampler {
@WrapForJNI
public static void stop() {
Thread samplingThread;
synchronized (GeckoJavaSampler.class) {
if (sSamplingThread == null) {
return;
}
sSamplingRunnable.mStopSampler = true;
try {
sSamplingThread.join();
} catch (InterruptedException e) {
e.printStackTrace();
}
samplingThread = sSamplingThread;
sSamplingThread = null;
sSamplingRunnable = null;
}
boolean retry = true;
while (retry) {
try {
samplingThread.join();
retry = false;
} catch (InterruptedException e) {
e.printStackTrace();
}
}
}
}

Просмотреть файл

@ -318652,9 +318652,9 @@
{}
]
],
"css/css-logical/animation-003.tenative.html": [
"css/css-logical/animation-003.tentative.html": [
[
"/css/css-logical/animation-003.tenative.html",
"/css/css-logical/animation-003.tentative.html",
{}
]
],
@ -521847,7 +521847,7 @@
"205a6330ecf0bf69dc3fca0b4f4afa9850e3a782",
"testharness"
],
"css/css-logical/animation-003.tenative.html": [
"css/css-logical/animation-003.tentative.html": [
"bdb7e952eb7fecf402f64129a00b511d89470195",
"testharness"
],

Просмотреть файл

@ -1 +1,3 @@
prefs: [dom.animations-api.core.enabled:true]
prefs: [dom.animations-api.core.enabled:true,
dom.animations-api.getAnimations.enabled:true,
dom.animations-api.implicit-keyframes.enabled:true]

Просмотреть файл

@ -1,5 +1,4 @@
[animation-001.html]
prefs: [dom.animations-api.implicit-keyframes.enabled:true]
[Logical shorthands follow the usual prioritization based on number of component longhands]
expected: FAIL
bug: https://bugzilla.mozilla.org/show_bug.cgi?id=1289155, https://bugzilla.mozilla.org/show_bug.cgi?id=1370404

Просмотреть файл

@ -170,6 +170,7 @@
<li><a href="about:license#sunsoft">SunSoft License</a></li>
<li><a href="about:license#superfasthash">SuperFastHash License</a></li>
<li><a href="about:license#synstructure">synstructure License</a></li>
<li><a href="about:license#udis86">udis86 License</a></li>
<li><a href="about:license#unicase">unicase License</a></li>
<li><a href="about:license#unicode">Unicode License</a></li>
<li><a href="about:license#ucal">University of California License</a></li>
@ -6112,6 +6113,38 @@ THE SOFTWARE.
</pre>
<hr>
<h1><a id="udis86"></a>udis86 License</h1>
<p>This license applies to files in the directory
<code>toolkit/recordreplay/udis86</code>.</p>
<pre>
Copyright (c) 2002-2012, Vivek Thampi <vivek.mt@gmail.com>
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
</pre>
<hr>
<h1><a id="unicase"></a>unicase License</h1>

Просмотреть файл

@ -17,6 +17,7 @@ DIRS += [
'mozapps/preferences',
'pluginproblem',
'profile',
'recordreplay',
'themes',
]

Просмотреть файл

@ -0,0 +1,345 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "Assembler.h"
#include "ProcessRecordReplay.h"
#include "udis86/types.h"
#include <sys/mman.h>
namespace mozilla {
namespace recordreplay {
Assembler::Assembler()
: mCursor(nullptr)
, mCursorEnd(nullptr)
, mCanAllocateStorage(true)
{}
Assembler::Assembler(uint8_t* aStorage, size_t aSize)
: mCursor(aStorage)
, mCursorEnd(aStorage + aSize)
, mCanAllocateStorage(false)
{}
Assembler::~Assembler()
{
// Patch each jump to the point where the jump's target was copied, if there
// is one.
for (auto pair : mJumps) {
uint8_t* source = pair.first;
uint8_t* target = pair.second;
for (auto copyPair : mCopiedInstructions) {
if (copyPair.first == target) {
PatchJump(source, copyPair.second);
break;
}
}
}
}
void
Assembler::NoteOriginalInstruction(uint8_t* aIp)
{
mCopiedInstructions.emplaceBack(aIp, Current());
}
void
Assembler::Advance(size_t aSize)
{
MOZ_RELEASE_ASSERT(aSize <= MaximumAdvance);
mCursor += aSize;
}
static const size_t JumpBytes = 17;
uint8_t*
Assembler::Current()
{
// Reallocate the buffer if there is not enough space. We need enough for the
// maximum space used by any of the assembling functions, as well as for a
// following jump for fallthrough to the next allocated space.
if (size_t(mCursorEnd - mCursor) <= MaximumAdvance + JumpBytes) {
MOZ_RELEASE_ASSERT(mCanAllocateStorage);
// Allocate some writable, executable memory.
static const size_t BufferSize = PageSize;
uint8_t* buffer = new uint8_t[PageSize];
UnprotectExecutableMemory(buffer, PageSize);
if (mCursor) {
// Patch a jump for fallthrough from the last allocation.
MOZ_RELEASE_ASSERT(size_t(mCursorEnd - mCursor) >= JumpBytes);
PatchJump(mCursor, buffer);
}
mCursor = buffer;
mCursorEnd = &buffer[BufferSize];
}
return mCursor;
}
static void
Push16(uint8_t** aIp, uint16_t aValue)
{
(*aIp)[0] = 0x66;
(*aIp)[1] = 0x68;
*reinterpret_cast<uint16_t*>(*aIp + 2) = aValue;
(*aIp) += 4;
}
/* static */ void
Assembler::PatchJump(uint8_t* aIp, void* aTarget)
{
// Push the target literal onto the stack, 2 bytes at a time. This is
// apparently the best way of getting an arbitrary 8 byte literal onto the
// stack, as 4 byte literals we push will be sign extended to 8 bytes.
size_t ntarget = reinterpret_cast<size_t>(aTarget);
Push16(&aIp, ntarget >> 48);
Push16(&aIp, ntarget >> 32);
Push16(&aIp, ntarget >> 16);
Push16(&aIp, ntarget);
*aIp = 0xC3; // ret
}
void
Assembler::Jump(void* aTarget)
{
PatchJump(Current(), aTarget);
mJumps.emplaceBack(Current(), (uint8_t*) aTarget);
Advance(JumpBytes);
}
static uint8_t
OppositeJump(uint8_t aOpcode)
{
// Get the opposite single byte jump opcode for a one or two byte conditional
// jump. Opposite opcodes are adjacent, e.g. 0x7C -> jl and 0x7D -> jge.
if (aOpcode >= 0x80 && aOpcode <= 0x8F) {
aOpcode -= 0x10;
} else {
MOZ_RELEASE_ASSERT(aOpcode >= 0x70 && aOpcode <= 0x7F);
}
return (aOpcode & 1) ? aOpcode - 1 : aOpcode + 1;
}
void
Assembler::ConditionalJump(uint8_t aCode, void* aTarget)
{
uint8_t* ip = Current();
ip[0] = OppositeJump(aCode);
ip[1] = (uint8_t) JumpBytes;
Advance(2);
Jump(aTarget);
}
void
Assembler::CopyInstruction(uint8_t* aIp, size_t aSize)
{
MOZ_RELEASE_ASSERT(aSize <= MaximumInstructionLength);
memcpy(Current(), aIp, aSize);
Advance(aSize);
}
void
Assembler::PushRax()
{
NewInstruction(0x50);
}
void
Assembler::PopRax()
{
NewInstruction(0x58);
}
void
Assembler::JumpToRax()
{
NewInstruction(0xFF, 0xE0);
}
void
Assembler::CallRax()
{
NewInstruction(0xFF, 0xD0);
}
void
Assembler::LoadRax(size_t aWidth)
{
switch (aWidth) {
case 1: NewInstruction(0x8A, 0x00); break;
case 2: NewInstruction(0x66, 0x8B, 0x00); break;
case 4: NewInstruction(0x8B, 0x00); break;
case 8: NewInstruction(0x48, 0x8B, 0x00); break;
default: MOZ_CRASH();
}
}
void
Assembler::CompareRaxWithTopOfStack()
{
NewInstruction(0x48, 0x39, 0x04, 0x24);
}
void
Assembler::PushRbx()
{
NewInstruction(0x53);
}
void
Assembler::PopRbx()
{
NewInstruction(0x5B);
}
void
Assembler::StoreRbxToRax(size_t aWidth)
{
switch (aWidth) {
case 1: NewInstruction(0x88, 0x18); break;
case 2: NewInstruction(0x66, 0x89, 0x18); break;
case 4: NewInstruction(0x89, 0x18); break;
case 8: NewInstruction(0x48, 0x89, 0x18); break;
default: MOZ_CRASH();
}
}
void
Assembler::CompareValueWithRax(uint8_t aValue, size_t aWidth)
{
switch (aWidth) {
case 1: NewInstruction(0x3C, aValue); break;
case 2: NewInstruction(0x66, 0x83, 0xF8, aValue); break;
case 4: NewInstruction(0x83, 0xF8, aValue); break;
case 8: NewInstruction(0x48, 0x83, 0xF8, aValue); break;
default: MOZ_CRASH();
}
}
static const size_t MoveImmediateBytes = 10;
/* static */ void
Assembler::PatchMoveImmediateToRax(uint8_t* aIp, void* aValue)
{
aIp[0] = 0x40 | (1 << 3);
aIp[1] = 0xB8;
*reinterpret_cast<void**>(aIp + 2) = aValue;
}
void
Assembler::MoveImmediateToRax(void* aValue)
{
PatchMoveImmediateToRax(Current(), aValue);
Advance(MoveImmediateBytes);
}
void
Assembler::MoveRaxToRegister(/*ud_type*/ int aRegister)
{
MOZ_RELEASE_ASSERT(aRegister == NormalizeRegister(aRegister));
uint8_t* ip = Current();
if (aRegister <= UD_R_RDI) {
ip[0] = 0x48;
ip[1] = 0x89;
ip[2] = 0xC0 + aRegister - UD_R_RAX;
} else {
ip[0] = 0x49;
ip[1] = 0x89;
ip[2] = 0xC0 + aRegister - UD_R_R8;
}
Advance(3);
}
void
Assembler::MoveRegisterToRax(/*ud_type*/ int aRegister)
{
MOZ_RELEASE_ASSERT(aRegister == NormalizeRegister(aRegister));
uint8_t* ip = Current();
if (aRegister <= UD_R_RDI) {
ip[0] = 0x48;
ip[1] = 0x89;
ip[2] = 0xC0 + (aRegister - UD_R_RAX) * 8;
} else {
ip[0] = 0x4C;
ip[1] = 0x89;
ip[2] = 0xC0 + (aRegister - UD_R_R8) * 8;
}
Advance(3);
}
/* static */ /*ud_type*/ int
Assembler::NormalizeRegister(/*ud_type*/ int aRegister)
{
if (aRegister >= UD_R_AL && aRegister <= UD_R_R15B) {
return aRegister - UD_R_AL + UD_R_RAX;
}
if (aRegister >= UD_R_AX && aRegister <= UD_R_R15W) {
return aRegister - UD_R_AX + UD_R_RAX;
}
if (aRegister >= UD_R_EAX && aRegister <= UD_R_R15D) {
return aRegister - UD_R_EAX + UD_R_RAX;
}
if (aRegister >= UD_R_RAX && aRegister <= UD_R_R15) {
return aRegister;
}
return UD_NONE;
}
/* static */ bool
Assembler::CanPatchShortJump(uint8_t* aIp, void* aTarget)
{
return (aIp + 2 - 128 <= aTarget) && (aIp + 2 + 127 >= aTarget);
}
/* static */ void
Assembler::PatchShortJump(uint8_t* aIp, void* aTarget)
{
MOZ_RELEASE_ASSERT(CanPatchShortJump(aIp, aTarget));
aIp[0] = 0xEB;
aIp[1] = uint8_t(static_cast<uint8_t*>(aTarget) - aIp - 2);
}
/* static */ void
Assembler::PatchJumpClobberRax(uint8_t* aIp, void* aTarget)
{
PatchMoveImmediateToRax(aIp, aTarget);
aIp[10] = 0x50; // push %rax
aIp[11] = 0xC3; // ret
}
/* static */ void
Assembler::PatchClobber(uint8_t* aIp)
{
aIp[0] = 0xCC; // int3
}
static uint8_t*
PageStart(uint8_t* aPtr)
{
static_assert(sizeof(size_t) == sizeof(uintptr_t), "Unsupported Platform");
return reinterpret_cast<uint8_t*>(reinterpret_cast<size_t>(aPtr) & ~(PageSize - 1));
}
void
UnprotectExecutableMemory(uint8_t* aAddress, size_t aSize)
{
MOZ_ASSERT(aSize);
uint8_t* pageStart = PageStart(aAddress);
uint8_t* pageEnd = PageStart(aAddress + aSize - 1) + PageSize;
int ret = mprotect(pageStart, pageEnd - pageStart, PROT_READ | PROT_EXEC | PROT_WRITE);
MOZ_RELEASE_ASSERT(ret >= 0);
}
} // namespace recordreplay
} // namespace mozilla

Просмотреть файл

@ -0,0 +1,181 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef mozilla_recordreplay_Assembler_h
#define mozilla_recordreplay_Assembler_h
#include "InfallibleVector.h"
#include <utility>
namespace mozilla {
namespace recordreplay {
// Assembler for x64 instructions. This is a simple assembler that is primarily
// designed for use in copying instructions from a function that is being
// redirected.
class Assembler
{
public:
// Create an assembler that allocates its own instruction storage. Assembled
// code will never be reclaimed by the system.
Assembler();
// Create an assembler that uses the specified memory range for instruction
// storage.
Assembler(uint8_t* aStorage, size_t aSize);
~Assembler();
// Mark the point at which we start copying an instruction in the original
// range.
void NoteOriginalInstruction(uint8_t* aIp);
// Get the address where the next assembled instruction will be placed.
uint8_t* Current();
///////////////////////////////////////////////////////////////////////////////
// Routines for assembling instructions in new instruction storage
///////////////////////////////////////////////////////////////////////////////
// Jump to aTarget. If aTarget is in the range of instructions being copied,
// the target will be the copy of aTarget instead.
void Jump(void* aTarget);
// Conditionally jump to aTarget, depending on the short jump opcode aCode.
// If aTarget is in the range of instructions being copied, the target will
// be the copy of aTarget instead.
void ConditionalJump(uint8_t aCode, void* aTarget);
// Copy an instruction verbatim from aIp.
void CopyInstruction(uint8_t* aIp, size_t aSize);
// push/pop %rax
void PushRax();
void PopRax();
// jump *%rax
void JumpToRax();
// call *%rax
void CallRax();
// movq/movl/movb 0(%rax), %rax
void LoadRax(size_t aWidth);
// cmpq %rax, 0(%rsp)
void CompareRaxWithTopOfStack();
// push/pop %rbx
void PushRbx();
void PopRbx();
// movq/movl/movb %rbx, 0(%rax)
void StoreRbxToRax(size_t aWidth);
// cmpq/cmpb $literal8, %rax
void CompareValueWithRax(uint8_t aValue, size_t aWidth);
// movq $value, %rax
void MoveImmediateToRax(void* aValue);
// movq %rax, register
void MoveRaxToRegister(/*ud_type*/ int aRegister);
// movq register, %rax
void MoveRegisterToRax(/*ud_type*/ int aRegister);
// Normalize a Udis86 register to its 8 byte version, returning UD_NONE/zero
// for unexpected registers.
static /*ud_type*/ int NormalizeRegister(/*ud_type*/ int aRegister);
///////////////////////////////////////////////////////////////////////////////
// Routines for assembling instructions at arbitrary locations
///////////////////////////////////////////////////////////////////////////////
// Return whether it is possible to patch a short jump to aTarget from aIp.
static bool CanPatchShortJump(uint8_t* aIp, void* aTarget);
// Patch a short jump to aTarget at aIp.
static void PatchShortJump(uint8_t* aIp, void* aTarget);
// Patch a long jump to aTarget at aIp. Rax may be clobbered.
static void PatchJumpClobberRax(uint8_t* aIp, void* aTarget);
// Patch the value used in an earlier MoveImmediateToRax call.
static void PatchMoveImmediateToRax(uint8_t* aIp, void* aValue);
// Patch an int3 breakpoint instruction at Ip.
static void PatchClobber(uint8_t* aIp);
private:
// Patch a jump that doesn't clobber any instructions.
static void PatchJump(uint8_t* aIp, void* aTarget);
// Consume some instruction storage.
void Advance(size_t aSize);
// The maximum amount we can write at a time without a jump potentially
// being introduced into the instruction stream.
static const size_t MaximumAdvance = 20;
inline size_t CountBytes() { return 0; }
template <typename... Tail>
inline size_t CountBytes(uint8_t aByte, Tail... aMoreBytes) {
return 1 + CountBytes(aMoreBytes...);
}
inline void CopyBytes(uint8_t* aIp) {}
template <typename... Tail>
inline void CopyBytes(uint8_t* aIp, uint8_t aByte, Tail... aMoreBytes) {
*aIp = aByte;
CopyBytes(aIp + 1, aMoreBytes...);
}
// Write a complete instruction with bytes specified as parameters.
template <typename... ByteList>
inline void NewInstruction(ByteList... aBytes) {
size_t numBytes = CountBytes(aBytes...);
MOZ_ASSERT(numBytes <= MaximumAdvance);
uint8_t* ip = Current();
CopyBytes(ip, aBytes...);
Advance(numBytes);
}
// Storage for assembling new instructions.
uint8_t* mCursor;
uint8_t* mCursorEnd;
bool mCanAllocateStorage;
// Association between the instruction original and copy pointers, for all
// instructions that have been copied.
InfallibleVector<std::pair<uint8_t*,uint8_t*>> mCopiedInstructions;
// For jumps we have copied, association between the source (in generated
// code) and target (in the original code) of the jump. These will be updated
// to refer to their copy (if there is one) in generated code in the
// assembler's destructor.
InfallibleVector<std::pair<uint8_t*,uint8_t*>> mJumps;
};
// The number of instruction bytes required for a short jump.
static const size_t ShortJumpBytes = 2;
// The number of instruction bytes required for a jump that may clobber rax.
static const size_t JumpBytesClobberRax = 12;
// The maximum byte length of an x86/x64 instruction.
static const size_t MaximumInstructionLength = 15;
// Make a region of memory RWX.
void UnprotectExecutableMemory(uint8_t* aAddress, size_t aSize);
} // recordreplay
} // mozilla
#endif // mozilla_recordreplay_Assembler_h

Просмотреть файл

@ -0,0 +1,116 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef mozilla_recordreplay_CallFunction_h
#define mozilla_recordreplay_CallFunction_h
namespace mozilla {
namespace recordreplay {
// These macros define functions for calling a void* function pointer with
// a particular ABI and arbitrary arguments. In principle we could do this
// with varargs (i.e. cast to 'int (ABI *)(...)' before calling), but MSVC
// treats 'int (__stdcall *)(...)' as 'int (__cdecl *)(...)', unfortunately.
//
// After instantiating DefineAllCallFunctions, the resulting functions will
// be overloaded and have the form, for a given ABI:
//
// template <typename ReturnType>
// ReturnType CallFunctionABI(void* fn);
//
// template <typename ReturnType, typename T0>
// ReturnType CallFunctionABI(void* fn, T0 a0);
//
// template <typename ReturnType, typename T0, typename T1>
// ReturnType CallFunctionABI(void* fn, T0 a0, T1 a1);
//
// And so forth.
#define DefineCallFunction(aABI, aReturnType, aFormals, aFormalTypes, aActuals) \
static inline aReturnType CallFunction ##aABI aFormals { \
MOZ_ASSERT(aFn); \
return BitwiseCast<aReturnType (aABI *) aFormalTypes>(aFn) aActuals; \
}
#define DefineAllCallFunctions(aABI) \
template <typename ReturnType> \
DefineCallFunction(aABI, ReturnType, (void* aFn), (), ()) \
template <typename ReturnType, typename T0> \
DefineCallFunction(aABI, ReturnType, \
(void* aFn, T0 a0), (T0), (a0)) \
template <typename ReturnType, typename T0, typename T1> \
DefineCallFunction(aABI, ReturnType, \
(void* aFn, T0 a0, T1 a1), (T0, T1), (a0, a1)) \
template <typename ReturnType, typename T0, typename T1, typename T2> \
DefineCallFunction(aABI, ReturnType, \
(void* aFn, T0 a0, T1 a1, T2 a2), \
(T0, T1, T2), (a0, a1, a2)) \
template <typename ReturnType, typename T0, typename T1, typename T2, typename T3> \
DefineCallFunction(aABI, ReturnType, \
(void* aFn, T0 a0, T1 a1, T2 a2, T3 a3), \
(T0, T1, T2, T3), \
(a0, a1, a2, a3)) \
template <typename ReturnType, typename T0, typename T1, typename T2, typename T3, \
typename T4> \
DefineCallFunction(aABI, ReturnType, \
(void* aFn, T0 a0, T1 a1, T2 a2, T3 a3, T4 a4), \
(T0, T1, T2, T3, T4), \
(a0, a1, a2, a3, a4)) \
template <typename ReturnType, typename T0, typename T1, typename T2, typename T3, \
typename T4, typename T5> \
DefineCallFunction(aABI, ReturnType, \
(void* aFn, T0 a0, T1 a1, T2 a2, T3 a3, T4 a4, T5 a5), \
(T0, T1, T2, T3, T4, T5), \
(a0, a1, a2, a3, a4, a5)) \
template <typename ReturnType, typename T0, typename T1, typename T2, typename T3, \
typename T4, typename T5, typename T6> \
DefineCallFunction(aABI, ReturnType, \
(void* aFn, T0 a0, T1 a1, T2 a2, T3 a3, T4 a4, T5 a5, \
T6 a6), \
(T0, T1, T2, T3, T4, T5, T6), \
(a0, a1, a2, a3, a4, a5, a6)) \
template <typename ReturnType, typename T0, typename T1, typename T2, typename T3, \
typename T4, typename T5, typename T6, typename T7> \
DefineCallFunction(aABI, ReturnType, \
(void* aFn, T0 a0, T1 a1, T2 a2, T3 a3, T4 a4, T5 a5, \
T6 a6, T7 a7), \
(T0, T1, T2, T3, T4, T5, T6, T7), \
(a0, a1, a2, a3, a4, a5, a6, a7)) \
template <typename ReturnType, typename T0, typename T1, typename T2, typename T3, \
typename T4, typename T5, typename T6, typename T7, \
typename T8> \
DefineCallFunction(aABI, ReturnType, \
(void* aFn, T0 a0, T1 a1, T2 a2, T3 a3, T4 a4, T5 a5, \
T6 a6, T7 a7, T8 a8), \
(T0, T1, T2, T3, T4, T5, T6, T7, T8), \
(a0, a1, a2, a3, a4, a5, a6, a7, a8)) \
template <typename ReturnType, typename T0, typename T1, typename T2, typename T3, \
typename T4, typename T5, typename T6, typename T7, \
typename T8, typename T9> \
DefineCallFunction(aABI, ReturnType, \
(void* aFn, T0 a0, T1 a1, T2 a2, T3 a3, T4 a4, T5 a5, \
T6 a6, T7 a7, T8 a8, T9 a9), \
(T0, T1, T2, T3, T4, T5, T6, T7, T8, T9), \
(a0, a1, a2, a3, a4, a5, a6, a7, a8, a9)) \
template <typename ReturnType, typename T0, typename T1, typename T2, typename T3, \
typename T4, typename T5, typename T6, typename T7, \
typename T8, typename T9, typename T10> \
DefineCallFunction(aABI, ReturnType, \
(void* aFn, T0 a0, T1 a1, T2 a2, T3 a3, T4 a4, T5 a5, \
T6 a6, T7 a7, T8 a8, T9 a9, T10 a10), \
(T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10), \
(a0, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10)) \
template <typename ReturnType, typename T0, typename T1, typename T2, typename T3, \
typename T4, typename T5, typename T6, typename T7, \
typename T8, typename T9, typename T10, typename T11> \
DefineCallFunction(aABI, ReturnType, \
(void* aFn, T0 a0, T1 a1, T2 a2, T3 a3, T4 a4, T5 a5, \
T6 a6, T7 a7, T8 a8, T9 a9, T10 a10, T11 a11), \
(T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11), \
(a0, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11))
} // recordreplay
} // mozilla
#endif // mozilla_recordreplay_CallFunction_h

Просмотреть файл

@ -0,0 +1,142 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "Callback.h"
#include "ipc/ChildIPC.h"
#include "mozilla/Assertions.h"
#include "mozilla/RecordReplay.h"
#include "mozilla/StaticMutex.h"
#include "ProcessRewind.h"
#include "Thread.h"
#include "ValueIndex.h"
namespace mozilla {
namespace recordreplay {
static ValueIndex* gCallbackData;
static StaticMutexNotRecorded gCallbackMutex;
void
RegisterCallbackData(void* aData)
{
MOZ_RELEASE_ASSERT(IsRecordingOrReplaying());
MOZ_RELEASE_ASSERT(!AreThreadEventsPassedThrough());
if (!aData) {
return;
}
RecordReplayAssert("RegisterCallbackData");
AutoOrderedAtomicAccess at;
StaticMutexAutoLock lock(gCallbackMutex);
if (!gCallbackData) {
gCallbackData = new ValueIndex();
}
gCallbackData->Insert(aData);
}
void
BeginCallback(size_t aCallbackId)
{
MOZ_RELEASE_ASSERT(IsRecording());
MOZ_RELEASE_ASSERT(!AreThreadEventsDisallowed());
Thread* thread = Thread::Current();
if (thread->IsMainThread()) {
child::EndIdleTime();
}
thread->SetPassThrough(false);
thread->Events().RecordOrReplayThreadEvent(ThreadEvent::ExecuteCallback);
thread->Events().WriteScalar(aCallbackId);
}
void
EndCallback()
{
MOZ_RELEASE_ASSERT(IsRecording());
MOZ_RELEASE_ASSERT(!AreThreadEventsPassedThrough());
MOZ_RELEASE_ASSERT(!AreThreadEventsDisallowed());
Thread* thread = Thread::Current();
if (thread->IsMainThread()) {
child::BeginIdleTime();
}
thread->SetPassThrough(true);
}
void
SaveOrRestoreCallbackData(void** aData)
{
MOZ_RELEASE_ASSERT(IsRecordingOrReplaying());
MOZ_RELEASE_ASSERT(!AreThreadEventsPassedThrough());
MOZ_RELEASE_ASSERT(!AreThreadEventsDisallowed());
MOZ_RELEASE_ASSERT(gCallbackData);
Thread* thread = Thread::Current();
RecordReplayAssert("RestoreCallbackData");
thread->Events().RecordOrReplayThreadEvent(ThreadEvent::RestoreCallbackData);
size_t index = 0;
if (IsRecording() && *aData) {
StaticMutexAutoLock lock(gCallbackMutex);
index = gCallbackData->GetIndex(*aData);
}
thread->Events().RecordOrReplayScalar(&index);
if (IsReplaying()) {
*aData = const_cast<void*>(gCallbackData->GetValue(index));
}
}
void
RemoveCallbackData(void* aData)
{
MOZ_RELEASE_ASSERT(IsRecordingOrReplaying());
StaticMutexAutoLock lock(gCallbackMutex);
gCallbackData->Remove(aData);
}
void
PassThroughThreadEventsAllowCallbacks(const std::function<void()>& aFn)
{
MOZ_RELEASE_ASSERT(IsRecordingOrReplaying());
MOZ_RELEASE_ASSERT(!AreThreadEventsDisallowed());
Thread* thread = Thread::Current();
if (IsRecording()) {
if (thread->IsMainThread()) {
child::BeginIdleTime();
}
thread->SetPassThrough(true);
aFn();
if (thread->IsMainThread()) {
child::EndIdleTime();
}
thread->SetPassThrough(false);
thread->Events().RecordOrReplayThreadEvent(ThreadEvent::CallbacksFinished);
} else {
while (true) {
ThreadEvent ev = (ThreadEvent) thread->Events().ReadScalar();
if (ev != ThreadEvent::ExecuteCallback) {
if (ev != ThreadEvent::CallbacksFinished) {
child::ReportFatalError("Unexpected event while replaying callback events");
}
break;
}
size_t id = thread->Events().ReadScalar();
ReplayInvokeCallback(id);
}
}
}
} // namespace recordreplay
} // namespace mozilla

Просмотреть файл

@ -0,0 +1,96 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef mozilla_recordreplay_Callback_h
#define mozilla_recordreplay_Callback_h
#include "mozilla/GuardObjects.h"
#include <functional>
namespace mozilla {
namespace recordreplay {
// Callbacks Overview.
//
// Record/replay callbacks are used to record and replay the use of callbacks
// within system libraries to reenter Gecko code. There are three challenges
// to replaying callbacks:
//
// 1. Invocations of the callbacks must be replayed so that they occur inside
// the same system call and in the same order as during recording.
//
// 2. Data passed to the callback which originates in Gecko itself (e.g.
// opaque data pointers) need to match up with the Gecko data which was
// passed to the callback while recording.
//
// 3. Data passed to the callback which originates in the system library also
// needs to match up with the data passed while recording.
//
// Each platform defines a CallbackEvent enum with the different callback
// signatures that the platform is able to redirect. Callback wrapper functions
// are then defined for each callback event.
//
// The following additional steps are taken to handle #1 above:
//
// A. System libraries which Gecko callbacks are passed to are redirected so
// that they replace the Gecko callback with the callback wrapper for that
// signature.
//
// B. When recording, system libraries which can invoke Gecko callbacks are
// redirected to call the library API inside a call to
// PassThroughThreadEventsAllowCallbacks.
//
// C. When a callback wrapper is invoked within the library, it calls
// {Begin,End}Callback to stop passing through thread events while the
// callback executes.
//
// D. {Begin,End}Callback additionally adds ExecuteCallback events for the
// thread, and PassThroughThreadEventsAllowCallbacks adds a
// CallbacksFinished event at the end. While replaying, calling
// PassThroughThreadEventsAllowCallbacks will read these callback events
// from the thread's events file and plas back calls to the wrappers which
// executed while recording.
//
// #2 above is handled with the callback data API below. When a Gecko callback
// or opaque data pointer is passed to a system library API, that API is
// redirected so that it will call RegisterCallbackData on the Gecko pointer.
// Later, when the callback wrapper actually executes, it can use
// SaveOrRestoreCallbackData to record which Gecko pointer was used and later,
// during replay, restore the corresponding value in that execution.
//
// #3 above can be recorded and replayed using the standard
// RecordReplay{Value,Bytes} functions, in a similar manner to the handling of
// outputs of redirected functions.
// Note or remove a pointer passed to a system library API which might be a
// Gecko callback or a data pointer used by a Gecko callback.
void RegisterCallbackData(void* aData);
void RemoveCallbackData(void* aData);
// Record/replay a pointer that was passed to RegisterCallbackData earlier.
void SaveOrRestoreCallbackData(void** aData);
// If recording, call aFn with events passed through, allowing Gecko callbacks
// to execute within aFn. If replaying, execute only the Gecko callbacks which
// executed while recording.
void PassThroughThreadEventsAllowCallbacks(const std::function<void()>& aFn);
// Within a callback wrapper, bracket the execution of the code for the Gecko
// callback and record the callback as having executed. This stops passing
// through thread events so that behaviors in the Gecko callback are
// recorded/replayed.
void BeginCallback(size_t aCallbackId);
void EndCallback();
// During replay, invoke a callback with the specified id. This is platform
// specific and is defined in the various ProcessRedirect*.cpp files.
void ReplayInvokeCallback(size_t aCallbackId);
} // namespace recordreplay
} // namespace mozilla
#endif // mozilla_recordreplay_Callback_h

Просмотреть файл

@ -0,0 +1,108 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef mozilla_recordreplay_ChunkAllocator_h
#define mozilla_recordreplay_ChunkAllocator_h
#include "SpinLock.h"
namespace mozilla {
namespace recordreplay {
// ChunkAllocator is a simple allocator class for creating objects which can be
// fetched by their integer id. Objects are stored as a linked list of arrays;
// like a linked list, existing entries can be accessed without taking or
// holding a lock, and using an array in each element mitigates the runtime
// cost of O(n) lookup.
//
// ChunkAllocator contents are never destroyed.
template <typename T>
class ChunkAllocator
{
struct Chunk;
typedef Atomic<Chunk*, SequentiallyConsistent, Behavior::DontPreserve> ChunkPointer;
// A page sized block holding a next pointer and an array of as many things
// as possible.
struct Chunk
{
uint8_t mStorage[PageSize - sizeof(Chunk*)];
ChunkPointer mNext;
Chunk() : mStorage{}, mNext(nullptr) {}
static size_t MaxThings() {
return sizeof(mStorage) / sizeof(T);
}
T* GetThing(size_t i) {
MOZ_RELEASE_ASSERT(i < MaxThings());
return reinterpret_cast<T*>(&mStorage[i * sizeof(T)]);
}
};
ChunkPointer mFirstChunk;
Atomic<size_t, SequentiallyConsistent, Behavior::DontPreserve> mCapacity;
SpinLock mLock;
void EnsureChunk(ChunkPointer* aChunk) {
if (!*aChunk) {
*aChunk = new Chunk();
mCapacity += Chunk::MaxThings();
}
}
ChunkAllocator(const ChunkAllocator&) = delete;
ChunkAllocator& operator=(const ChunkAllocator&) = delete;
public:
// ChunkAllocators are allocated in static storage and should not have
// constructors. Their memory will be initially zero.
ChunkAllocator() = default;
~ChunkAllocator() = default;
// Get an existing entry from the allocator.
inline T* Get(size_t aId) {
Chunk* chunk = mFirstChunk;
while (aId >= Chunk::MaxThings()) {
aId -= Chunk::MaxThings();
chunk = chunk->mNext;
}
return chunk->GetThing(aId);
}
// Get an existing entry from the allocator, or null. This may return an
// entry that has not been created yet.
inline T* MaybeGet(size_t aId) {
return (aId < mCapacity) ? Get(aId) : nullptr;
}
// Create a new entry with the specified ID. This must not be called on IDs
// that have already been used with this allocator.
inline T* Create(size_t aId) {
if (aId < mCapacity) {
T* res = Get(aId);
return new(res) T();
}
AutoSpinLock lock(mLock);
ChunkPointer* pchunk = &mFirstChunk;
while (aId >= Chunk::MaxThings()) {
aId -= Chunk::MaxThings();
EnsureChunk(pchunk);
Chunk* chunk = *pchunk;
pchunk = &chunk->mNext;
}
EnsureChunk(pchunk);
Chunk* chunk = *pchunk;
T* res = chunk->GetThing(aId);
return new(res) T();
}
};
} // namespace recordreplay
} // namespace mozilla
#endif // mozilla_recordreplay_ChunkAllocator_h

Просмотреть файл

@ -0,0 +1,125 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "DirtyMemoryHandler.h"
#include "ipc/ChildIPC.h"
#include "mozilla/Sprintf.h"
#include "MemorySnapshot.h"
#include "Thread.h"
#include <mach/exc.h>
#include <mach/mach.h>
#include <mach/mach_vm.h>
#include <sys/time.h>
namespace mozilla {
namespace recordreplay {
static mach_port_t gDirtyMemoryExceptionPort;
// See AsmJSSignalHandlers.cpp.
static const mach_msg_id_t sExceptionId = 2405;
// This definition was generated by mig (the Mach Interface Generator) for the
// routine 'exception_raise' (exc.defs). See js/src/wasm/WasmSignalHandlers.cpp.
#pragma pack(4)
typedef struct {
mach_msg_header_t Head;
/* start of the kernel processed data */
mach_msg_body_t msgh_body;
mach_msg_port_descriptor_t thread;
mach_msg_port_descriptor_t task;
/* end of the kernel processed data */
NDR_record_t NDR;
exception_type_t exception;
mach_msg_type_number_t codeCnt;
int64_t code[2];
} Request__mach_exception_raise_t;
#pragma pack()
typedef struct {
Request__mach_exception_raise_t body;
mach_msg_trailer_t trailer;
} ExceptionRequest;
static void
DirtyMemoryExceptionHandlerThread(void*)
{
kern_return_t kret;
while (true) {
ExceptionRequest request;
kret = mach_msg(&request.body.Head, MACH_RCV_MSG, 0, sizeof(request),
gDirtyMemoryExceptionPort, MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL);
kern_return_t replyCode = KERN_FAILURE;
if (kret == KERN_SUCCESS &&
request.body.Head.msgh_id == sExceptionId &&
request.body.exception == EXC_BAD_ACCESS &&
request.body.codeCnt == 2)
{
uint8_t* faultingAddress = (uint8_t*) request.body.code[1];
if (HandleDirtyMemoryFault(faultingAddress)) {
replyCode = KERN_SUCCESS;
} else {
child::ReportFatalError("HandleDirtyMemoryFault failed %p %s", faultingAddress,
gMozCrashReason ? gMozCrashReason : "");
}
} else {
child::ReportFatalError("DirtyMemoryExceptionHandlerThread mach_msg returned unexpected data");
}
__Reply__exception_raise_t reply;
reply.Head.msgh_bits = MACH_MSGH_BITS(MACH_MSGH_BITS_REMOTE(request.body.Head.msgh_bits), 0);
reply.Head.msgh_size = sizeof(reply);
reply.Head.msgh_remote_port = request.body.Head.msgh_remote_port;
reply.Head.msgh_local_port = MACH_PORT_NULL;
reply.Head.msgh_id = request.body.Head.msgh_id + 100;
reply.NDR = NDR_record;
reply.RetCode = replyCode;
mach_msg(&reply.Head, MACH_SEND_MSG, sizeof(reply), 0, MACH_PORT_NULL,
MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL);
}
}
void
SetupDirtyMemoryHandler()
{
// Allow repeated calls.
static bool hasDirtyMemoryHandler = false;
if (hasDirtyMemoryHandler) {
return;
}
hasDirtyMemoryHandler = true;
MOZ_RELEASE_ASSERT(AreThreadEventsPassedThrough());
kern_return_t kret;
// Get a port which can send and receive data.
kret = mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_RECEIVE, &gDirtyMemoryExceptionPort);
MOZ_RELEASE_ASSERT(kret == KERN_SUCCESS);
kret = mach_port_insert_right(mach_task_self(),
gDirtyMemoryExceptionPort, gDirtyMemoryExceptionPort,
MACH_MSG_TYPE_MAKE_SEND);
MOZ_RELEASE_ASSERT(kret == KERN_SUCCESS);
// Create a thread to block on reading the port.
Thread::SpawnNonRecordedThread(DirtyMemoryExceptionHandlerThread, nullptr);
// Set exception ports on the entire task. Unfortunately, this clobbers any
// other exception ports for the task, and forwarding to those other ports
// is not easy to get right.
kret = task_set_exception_ports(mach_task_self(),
EXC_MASK_BAD_ACCESS,
gDirtyMemoryExceptionPort,
EXCEPTION_DEFAULT | MACH_EXCEPTION_CODES,
THREAD_STATE_NONE);
MOZ_RELEASE_ASSERT(kret == KERN_SUCCESS);
}
} // namespace recordreplay
} // namespace mozilla

Просмотреть файл

@ -0,0 +1,20 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef mozilla_recordreplay_DirtyMemoryHandler_h
#define mozilla_recordreplay_DirtyMemoryHandler_h
namespace mozilla {
namespace recordreplay {
// Set up a handler to catch SEGV hardware exceptions and pass them on to
// HandleDirtyMemoryFault in MemorySnapshot.h for handling.
void SetupDirtyMemoryHandler();
} // namespace recordreplay
} // namespace mozilla
#endif // mozilla_recordreplay_DirtyMemoryHandler_h

Просмотреть файл

@ -0,0 +1,446 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "File.h"
#include "ipc/ChildIPC.h"
#include "mozilla/Compression.h"
#include "mozilla/Sprintf.h"
#include "ProcessRewind.h"
#include "SpinLock.h"
#include <algorithm>
namespace mozilla {
namespace recordreplay {
///////////////////////////////////////////////////////////////////////////////
// Stream
///////////////////////////////////////////////////////////////////////////////
void
Stream::ReadBytes(void* aData, size_t aSize)
{
MOZ_RELEASE_ASSERT(mFile->OpenForReading());
size_t totalRead = 0;
while (true) {
// Read what we can from the data buffer.
MOZ_RELEASE_ASSERT(mBufferPos <= mBufferLength);
size_t bufAvailable = mBufferLength - mBufferPos;
size_t bufRead = std::min(bufAvailable, aSize);
if (aData) {
memcpy(aData, &mBuffer[mBufferPos], bufRead);
aData = (char*)aData + bufRead;
}
mBufferPos += bufRead;
mStreamPos += bufRead;
totalRead += bufRead;
aSize -= bufRead;
if (!aSize) {
return;
}
MOZ_RELEASE_ASSERT(mBufferPos == mBufferLength);
// If we try to read off the end of a stream then we must have hit the end
// of the replay for this thread.
while (mChunkIndex == mChunks.length()) {
MOZ_RELEASE_ASSERT(mName == StreamName::Event || mName == StreamName::Assert);
HitEndOfRecording();
}
const StreamChunkLocation& chunk = mChunks[mChunkIndex++];
EnsureMemory(&mBallast, &mBallastSize, chunk.mCompressedSize, BallastMaxSize(),
DontCopyExistingData);
mFile->ReadChunk(mBallast.get(), chunk);
EnsureMemory(&mBuffer, &mBufferSize, chunk.mDecompressedSize, BUFFER_MAX,
DontCopyExistingData);
size_t bytesWritten;
if (!Compression::LZ4::decompress(mBallast.get(), chunk.mCompressedSize,
mBuffer.get(), chunk.mDecompressedSize, &bytesWritten) ||
bytesWritten != chunk.mDecompressedSize)
{
MOZ_CRASH();
}
mBufferPos = 0;
mBufferLength = chunk.mDecompressedSize;
}
}
bool
Stream::AtEnd()
{
MOZ_RELEASE_ASSERT(mFile->OpenForReading());
return mBufferPos == mBufferLength && mChunkIndex == mChunks.length();
}
void
Stream::WriteBytes(const void* aData, size_t aSize)
{
MOZ_RELEASE_ASSERT(mFile->OpenForWriting());
// Prevent the entire file from being flushed while we write this data.
AutoReadSpinLock streamLock(mFile->mStreamLock);
while (true) {
// Fill up the data buffer first.
MOZ_RELEASE_ASSERT(mBufferPos <= mBufferSize);
size_t bufAvailable = mBufferSize - mBufferPos;
size_t bufWrite = (bufAvailable < aSize) ? bufAvailable : aSize;
memcpy(&mBuffer[mBufferPos], aData, bufWrite);
mBufferPos += bufWrite;
mStreamPos += bufWrite;
if (bufWrite == aSize) {
return;
}
aData = (char*)aData + bufWrite;
aSize -= bufWrite;
// Grow the file's buffer if it is not at its maximum size.
if (mBufferSize < BUFFER_MAX) {
EnsureMemory(&mBuffer, &mBufferSize, mBufferSize + 1, BUFFER_MAX, CopyExistingData);
continue;
}
Flush(/* aTakeLock = */ true);
}
}
size_t
Stream::ReadScalar()
{
// Read back a pointer sized value using the same encoding as WriteScalar.
size_t value = 0, shift = 0;
while (true) {
uint8_t bits;
ReadBytes(&bits, 1);
value |= (size_t)(bits & 127) << shift;
if (!(bits & 128)) {
break;
}
shift += 7;
}
return value;
}
void
Stream::WriteScalar(size_t aValue)
{
// Pointer sized values are written out as unsigned values with an encoding
// optimized for small values. Each written byte successively captures 7 bits
// of data from the value, starting at the low end, with the high bit in the
// byte indicating whether there are any more non-zero bits in the value.
//
// With this encoding, values less than 2^7 (128) require one byte, values
// less than 2^14 (16384) require two bytes, and so forth, but negative
// numbers end up requiring ten bytes on a 64 bit architecture.
do {
uint8_t bits = aValue & 127;
aValue = aValue >> 7;
if (aValue) {
bits |= 128;
}
WriteBytes(&bits, 1);
} while (aValue);
}
void
Stream::CheckInput(size_t aValue)
{
size_t oldValue = aValue;
RecordOrReplayScalar(&oldValue);
if (oldValue != aValue) {
child::ReportFatalError("Input Mismatch: Recorded: %zu Replayed %zu\n", oldValue, aValue);
Unreachable();
}
}
void
Stream::EnsureMemory(UniquePtr<char[]>* aBuf, size_t* aSize,
size_t aNeededSize, size_t aMaxSize, ShouldCopy aCopy)
{
// Once a stream buffer grows, it never shrinks again. Buffers start out
// small because most streams are very small.
MOZ_RELEASE_ASSERT(!!*aBuf == !!*aSize);
MOZ_RELEASE_ASSERT(aNeededSize <= aMaxSize);
if (*aSize < aNeededSize) {
size_t newSize = std::min(std::max<size_t>(256, aNeededSize * 2), aMaxSize);
char* newBuf = new char[newSize];
if (*aBuf && aCopy == CopyExistingData) {
memcpy(newBuf, aBuf->get(), *aSize);
}
aBuf->reset(newBuf);
*aSize = newSize;
}
}
void
Stream::Flush(bool aTakeLock)
{
MOZ_RELEASE_ASSERT(mFile && mFile->OpenForWriting());
if (!mBufferPos) {
return;
}
size_t bound = Compression::LZ4::maxCompressedSize(mBufferPos);
EnsureMemory(&mBallast, &mBallastSize, bound, BallastMaxSize(),
DontCopyExistingData);
size_t compressedSize = Compression::LZ4::compress(mBuffer.get(), mBufferPos, mBallast.get());
MOZ_RELEASE_ASSERT(compressedSize != 0);
MOZ_RELEASE_ASSERT((size_t)compressedSize <= bound);
StreamChunkLocation chunk =
mFile->WriteChunk(mBallast.get(), compressedSize, mBufferPos, aTakeLock);
mChunks.append(chunk);
MOZ_ALWAYS_TRUE(++mChunkIndex == mChunks.length());
mBufferPos = 0;
}
/* static */ size_t
Stream::BallastMaxSize()
{
return Compression::LZ4::maxCompressedSize(BUFFER_MAX);
}
///////////////////////////////////////////////////////////////////////////////
// File
///////////////////////////////////////////////////////////////////////////////
// Information in a file index about a chunk.
struct FileIndexChunk
{
uint32_t /* StreamName */ mName;
uint32_t mNameIndex;
StreamChunkLocation mChunk;
FileIndexChunk()
{
PodZero(this);
}
FileIndexChunk(StreamName aName, uint32_t aNameIndex, const StreamChunkLocation& aChunk)
: mName((uint32_t) aName), mNameIndex(aNameIndex), mChunk(aChunk)
{}
};
// We expect to find this at every index in a file.
static const uint64_t MagicValue = 0xd3e7f5fae445b3ac;
// Index of chunks in a file. There is an index at the start of the file
// (which is always empty) and at various places within the file itself.
struct FileIndex
{
// This should match MagicValue.
uint64_t mMagic;
// How many FileIndexChunk instances follow this structure.
uint32_t mNumChunks;
// The location of the next index in the file, or zero.
uint64_t mNextIndexOffset;
explicit FileIndex(uint32_t aNumChunks)
: mMagic(MagicValue), mNumChunks(aNumChunks), mNextIndexOffset(0)
{}
};
bool
File::Open(const char* aName, Mode aMode)
{
MOZ_RELEASE_ASSERT(!mFd);
MOZ_RELEASE_ASSERT(aName);
mMode = aMode;
mFd = DirectOpenFile(aName, mMode == WRITE);
if (OpenForWriting()) {
// Write an empty index at the start of the file.
FileIndex index(0);
DirectWrite(mFd, &index, sizeof(index));
mWriteOffset += sizeof(index);
return true;
}
// Read in every index in the file.
ReadIndexResult result;
do {
result = ReadNextIndex(nullptr);
if (result == ReadIndexResult::InvalidFile) {
return false;
}
} while (result == ReadIndexResult::FoundIndex);
return true;
}
void
File::Close()
{
if (!mFd) {
return;
}
if (OpenForWriting()) {
Flush();
}
Clear();
}
File::ReadIndexResult
File::ReadNextIndex(InfallibleVector<Stream*>* aUpdatedStreams)
{
// Unlike in the Flush() case, we don't have to worry about other threads
// attempting to read data from streams in this file while we are reading
// the new index.
MOZ_ASSERT(OpenForReading());
// Read in the last index to see if there is another one.
DirectSeekFile(mFd, mLastIndexOffset + offsetof(FileIndex, mNextIndexOffset));
uint64_t nextIndexOffset;
if (DirectRead(mFd, &nextIndexOffset, sizeof(nextIndexOffset)) != sizeof(nextIndexOffset)) {
return ReadIndexResult::InvalidFile;
}
if (!nextIndexOffset) {
return ReadIndexResult::EndOfFile;
}
mLastIndexOffset = nextIndexOffset;
FileIndex index(0);
DirectSeekFile(mFd, nextIndexOffset);
if (DirectRead(mFd, &index, sizeof(index)) != sizeof(index)) {
return ReadIndexResult::InvalidFile;
}
if (index.mMagic != MagicValue) {
return ReadIndexResult::InvalidFile;
}
MOZ_RELEASE_ASSERT(index.mNumChunks);
size_t indexBytes = index.mNumChunks * sizeof(FileIndexChunk);
FileIndexChunk* chunks = new FileIndexChunk[index.mNumChunks];
if (DirectRead(mFd, chunks, indexBytes) != indexBytes) {
return ReadIndexResult::InvalidFile;
}
for (size_t i = 0; i < index.mNumChunks; i++) {
const FileIndexChunk& indexChunk = chunks[i];
Stream* stream = OpenStream((StreamName) indexChunk.mName, indexChunk.mNameIndex);
stream->mChunks.append(indexChunk.mChunk);
if (aUpdatedStreams) {
aUpdatedStreams->append(stream);
}
}
delete[] chunks;
return ReadIndexResult::FoundIndex;
}
bool
File::Flush()
{
MOZ_ASSERT(OpenForWriting());
AutoSpinLock lock(mLock);
InfallibleVector<FileIndexChunk> newChunks;
for (auto& vector : mStreams) {
for (const UniquePtr<Stream>& stream : vector) {
if (stream) {
stream->Flush(/* aTakeLock = */ false);
for (size_t i = stream->mFlushedChunks; i < stream->mChunkIndex; i++) {
newChunks.emplaceBack(stream->mName, stream->mNameIndex, stream->mChunks[i]);
}
stream->mFlushedChunks = stream->mChunkIndex;
}
}
}
if (newChunks.empty()) {
return false;
}
// Write the new index information at the end of the file.
uint64_t indexOffset = mWriteOffset;
size_t indexBytes = newChunks.length() * sizeof(FileIndexChunk);
FileIndex index(newChunks.length());
DirectWrite(mFd, &index, sizeof(index));
DirectWrite(mFd, newChunks.begin(), indexBytes);
mWriteOffset += sizeof(index) + indexBytes;
// Update the next index offset for the last index written.
MOZ_RELEASE_ASSERT(sizeof(index.mNextIndexOffset) == sizeof(indexOffset));
DirectSeekFile(mFd, mLastIndexOffset + offsetof(FileIndex, mNextIndexOffset));
DirectWrite(mFd, &indexOffset, sizeof(indexOffset));
DirectSeekFile(mFd, mWriteOffset);
mLastIndexOffset = indexOffset;
return true;
}
StreamChunkLocation
File::WriteChunk(const char* aStart,
size_t aCompressedSize, size_t aDecompressedSize,
bool aTakeLock)
{
Maybe<AutoSpinLock> lock;
if (aTakeLock) {
lock.emplace(mLock);
}
StreamChunkLocation chunk;
chunk.mOffset = mWriteOffset;
chunk.mCompressedSize = aCompressedSize;
chunk.mDecompressedSize = aDecompressedSize;
DirectWrite(mFd, aStart, aCompressedSize);
mWriteOffset += aCompressedSize;
return chunk;
}
void
File::ReadChunk(char* aDest, const StreamChunkLocation& aChunk)
{
AutoSpinLock lock(mLock);
DirectSeekFile(mFd, aChunk.mOffset);
size_t res = DirectRead(mFd, aDest, aChunk.mCompressedSize);
if (res != aChunk.mCompressedSize) {
MOZ_CRASH();
}
}
Stream*
File::OpenStream(StreamName aName, size_t aNameIndex)
{
AutoSpinLock lock(mLock);
auto& vector = mStreams[(size_t)aName];
while (aNameIndex >= vector.length()) {
vector.emplaceBack();
}
UniquePtr<Stream>& stream = vector[aNameIndex];
if (!stream) {
stream.reset(new Stream(this, aName, aNameIndex));
}
return stream.get();
}
} // namespace recordreplay
} // namespace mozilla

277
toolkit/recordreplay/File.h Normal file
Просмотреть файл

@ -0,0 +1,277 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef mozilla_recordreplay_File_h
#define mozilla_recordreplay_File_h
#include "InfallibleVector.h"
#include "ProcessRecordReplay.h"
#include "SpinLock.h"
#include "mozilla/PodOperations.h"
#include "mozilla/RecordReplay.h"
#include "mozilla/UniquePtr.h"
namespace mozilla {
namespace recordreplay {
// Structure managing file I/O. Each file contains an index for a set of named
// streams, whose contents are compressed and interleaved throughout the file.
// Additionally, we directly manage the file handle and all associated memory.
// This makes it easier to restore memory snapshots without getting confused
// about the state of the file handles which the process has opened. Data
// written and read from files is automatically compressed with LZ4.
//
// Files are used internally for any disk accesses which the record/replay
// infrastructure needs to make. Currently, this is only for accessing the
// recording file.
//
// File is threadsafe for simultaneous read/read and write/write accesses.
// Stream is not threadsafe.
// A location of a chunk of a stream within a file.
struct StreamChunkLocation
{
// Offset into the file of the start of the chunk.
uint64_t mOffset;
// Compressed (stored) size of the chunk.
uint32_t mCompressedSize;
// Decompressed size of the chunk.
uint32_t mDecompressedSize;
inline bool operator == (const StreamChunkLocation& aOther) const {
return mOffset == aOther.mOffset
&& mCompressedSize == aOther.mCompressedSize
&& mDecompressedSize == aOther.mDecompressedSize;
}
};
enum class StreamName
{
Main,
Lock,
Event,
Assert,
Count
};
class File;
class Stream
{
friend class File;
// File this stream belongs to.
File* mFile;
// Prefix name for this stream.
StreamName mName;
// Index which, when combined to mName, uniquely identifies this stream in
// the file.
size_t mNameIndex;
// When writing, all chunks that have been flushed to disk. When reading, all
// chunks in the entire stream.
InfallibleVector<StreamChunkLocation> mChunks;
// Data buffer.
UniquePtr<char[]> mBuffer;
// The maximum number of bytes to buffer before compressing and writing to
// disk, and the maximum number of bytes that can be decompressed at once.
static const size_t BUFFER_MAX = 1024 * 1024;
// The capacity of mBuffer, at most BUFFER_MAX.
size_t mBufferSize;
// During reading, the number of accessible bytes in mBuffer.
size_t mBufferLength;
// The number of bytes read or written from mBuffer.
size_t mBufferPos;
// The number of uncompressed bytes read or written from the stream.
size_t mStreamPos;
// Any buffer available for use when decompressing or compressing data.
UniquePtr<char[]> mBallast;
size_t mBallastSize;
// The number of chunks that have been completely read or written. When
// writing, this equals mChunks.length().
size_t mChunkIndex;
// When writing, the number of chunks in this stream when the file was last
// flushed.
size_t mFlushedChunks;
Stream(File* aFile, StreamName aName, size_t aNameIndex)
: mFile(aFile)
, mName(aName)
, mNameIndex(aNameIndex)
, mBuffer(nullptr)
, mBufferSize(0)
, mBufferLength(0)
, mBufferPos(0)
, mStreamPos(0)
, mBallast(nullptr)
, mBallastSize(0)
, mChunkIndex(0)
, mFlushedChunks(0)
{}
public:
StreamName Name() const { return mName; }
size_t NameIndex() const { return mNameIndex; }
void ReadBytes(void* aData, size_t aSize);
void WriteBytes(const void* aData, size_t aSize);
size_t ReadScalar();
void WriteScalar(size_t aValue);
bool AtEnd();
inline void RecordOrReplayBytes(void* aData, size_t aSize) {
if (IsRecording()) {
WriteBytes(aData, aSize);
} else {
ReadBytes(aData, aSize);
}
}
template <typename T>
inline void RecordOrReplayScalar(T* aPtr) {
if (IsRecording()) {
WriteScalar((size_t)*aPtr);
} else {
*aPtr = (T)ReadScalar();
}
}
template <typename T>
inline void RecordOrReplayValue(T* aPtr) {
RecordOrReplayBytes(aPtr, sizeof(T));
}
// Make sure that a value is the same while replaying as it was while
// recording.
void CheckInput(size_t aValue);
// Add a thread event to this file. Each thread event in a file is followed
// by additional data specific to that event. Generally, CheckInput should be
// used while recording or replaying the data for a thread event so that any
// discrepancies with the recording are found immediately.
inline void RecordOrReplayThreadEvent(ThreadEvent aEvent) {
CheckInput((size_t)aEvent);
}
inline size_t StreamPosition() {
return mStreamPos;
}
private:
enum ShouldCopy {
DontCopyExistingData,
CopyExistingData
};
void EnsureMemory(UniquePtr<char[]>* aBuf, size_t* aSize, size_t aNeededSize, size_t aMaxSize,
ShouldCopy aCopy);
void Flush(bool aTakeLock);
static size_t BallastMaxSize();
};
class File
{
public:
enum Mode {
WRITE,
READ
};
friend class Stream;
private:
// Open file handle, or 0 if closed.
FileHandle mFd;
// Whether this file is open for writing or reading.
Mode mMode;
// When writing, the current offset into the file.
uint64_t mWriteOffset;
// The offset of the last index read or written to the file.
uint64_t mLastIndexOffset;
// All streams in this file, indexed by stream name and name index.
typedef InfallibleVector<UniquePtr<Stream>> StreamVector;
StreamVector mStreams[(size_t) StreamName::Count];
// Lock protecting access to this file.
SpinLock mLock;
// When writing, lock for synchronizing file flushes (writer) with other
// threads writing to streams in this file (readers).
ReadWriteSpinLock mStreamLock;
void Clear() {
mFd = 0;
mMode = READ;
mWriteOffset = 0;
mLastIndexOffset = 0;
for (auto& vector : mStreams) {
vector.clear();
}
PodZero(&mLock);
PodZero(&mStreamLock);
}
public:
File() { Clear(); }
~File() { Close(); }
bool Open(const char* aName, Mode aMode);
void Close();
bool OpenForWriting() const { return mFd && mMode == WRITE; }
bool OpenForReading() const { return mFd && mMode == READ; }
Stream* OpenStream(StreamName aName, size_t aNameIndex);
// Prevent/allow other threads to write to streams in this file.
void PreventStreamWrites() { mStreamLock.WriteLock(); }
void AllowStreamWrites() { mStreamLock.WriteUnlock(); }
// Flush any changes since the last Flush() call to disk, returning whether
// there were such changes.
bool Flush();
enum class ReadIndexResult {
InvalidFile,
EndOfFile,
FoundIndex
};
// Read any data added to the file by a Flush() call. aUpdatedStreams is
// optional and filled in with streams whose contents have changed, and may
// have duplicates.
ReadIndexResult ReadNextIndex(InfallibleVector<Stream*>* aUpdatedStreams);
private:
StreamChunkLocation WriteChunk(const char* aStart,
size_t aCompressedSize, size_t aDecompressedSize,
bool aTakeLock);
void ReadChunk(char* aDest, const StreamChunkLocation& aChunk);
};
} // namespace recordreplay
} // namespace mozilla
#endif // mozilla_recordreplay_File_h

Просмотреть файл

@ -0,0 +1,502 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "mozilla/Assertions.h"
#include "mozilla/Maybe.h"
#include "mozilla/StaticMutex.h"
#include "HashTable.h"
#include "InfallibleVector.h"
#include "ProcessRecordReplay.h"
#include "ProcessRedirect.h"
#include "ValueIndex.h"
#include "PLDHashTable.h"
namespace mozilla {
namespace recordreplay {
// Hash tables frequently incorporate pointer values into the hash numbers they
// compute, which are not guaranteed to be the same between recording and
// replaying and consequently lead to inconsistent hash numbers and iteration
// order between recording and replaying, which can in turn affect the order in
// which recorded events occur. HashTable stabilization is designed to deal
// with this problem, for specific kinds of hashtables (PLD and PL tables)
// which are based on callbacks.
//
// When the table is constructed, if we are recording/replaying then the
// callbacks are replaced with an alternate set that produces consistent hash
// numbers between recording and replay. If during replay the additions and
// removals to the tables occur in the same order that they did during
// recording, then the structure of the tables and the order in which elements
// are visited during iteration will be the same.
//
// Ensuring that hash numbers are consistent is done as follows: for each
// table, we keep track of the keys that are in the table. When computing the
// hash of an arbitrary key, we look for a matching key in the table, using
// that key's hash if found. Otherwise, a new hash is generated from an
// incrementing counter.
typedef uint32_t HashNumber;
class StableHashTableInfo
{
// Magic number for attempting to determine whether we are dealing with an
// actual StableHashTableInfo. Despite our best efforts some hashtables do
// not go through stabilization (e.g. they have static constructors that run
// before record/replay state is initialized).
size_t mMagic;
static const size_t MagicNumber = 0xDEADBEEFDEADBEEF;
// Information about a key in the table: the key pointer, along with the new
// hash number we have generated for the key.
struct KeyInfo {
const void* mKey;
HashNumber mNewHash;
};
// Table mapping original hash numbers (produced by the table's hash
// function) to a vector with all keys sharing that original hash number.
struct HashInfo {
InfallibleVector<KeyInfo> mKeys;
};
typedef std::unordered_map<HashNumber, UniquePtr<HashInfo>> HashToKeyMap;
HashToKeyMap mHashToKey;
// Table mapping key pointers to their original hash number.
typedef std::unordered_map<const void*, HashNumber> KeyToHashMap;
KeyToHashMap mKeyToHash;
// The last key which the hash function was called on, and the new hash
// number which we generated for that key.
const void* mLastKey;
HashNumber mLastNewHash;
// Counter for generating new hash numbers for entries added to the table.
// This increases monotonically, though it is fine if it overflows.
uint32_t mHashGenerator;
// Buffer with executable memory for use in binding functions.
uint8_t* mCallbackStorage;
static const size_t CallbackStorageCapacity = 4096;
// Get an existing key in the table.
KeyInfo* FindKeyInfo(HashNumber aOriginalHash, const void* aKey, HashInfo** aHashInfo = nullptr) {
HashToKeyMap::iterator iter = mHashToKey.find(aOriginalHash);
MOZ_ASSERT(iter != mHashToKey.end());
HashInfo* hashInfo = iter->second.get();
for (KeyInfo& keyInfo : hashInfo->mKeys) {
if (keyInfo.mKey == aKey) {
if (aHashInfo) {
*aHashInfo = hashInfo;
}
return &keyInfo;
}
}
MOZ_CRASH();
}
public:
StableHashTableInfo()
: mMagic(MagicNumber)
, mLastKey(nullptr)
, mLastNewHash(0)
, mHashGenerator(0)
, mCallbackStorage(nullptr)
{
// Use AllocateMemory, as the result will have RWX permissions.
mCallbackStorage = (uint8_t*) AllocateMemory(CallbackStorageCapacity, MemoryKind::Tracked);
}
~StableHashTableInfo() {
MOZ_ASSERT(mHashToKey.empty());
DeallocateMemory(mCallbackStorage, CallbackStorageCapacity, MemoryKind::Tracked);
}
bool AppearsValid() {
return mMagic == MagicNumber;
}
void AddKey(HashNumber aOriginalHash, const void* aKey, HashNumber aNewHash) {
HashToKeyMap::iterator iter = mHashToKey.find(aOriginalHash);
if (iter == mHashToKey.end()) {
iter = mHashToKey.insert(HashToKeyMap::value_type(aOriginalHash, MakeUnique<HashInfo>())).first;
}
HashInfo* hashInfo = iter->second.get();
KeyInfo key;
key.mKey = aKey;
key.mNewHash = aNewHash;
hashInfo->mKeys.append(key);
mKeyToHash.insert(KeyToHashMap::value_type(aKey, aOriginalHash));
}
void RemoveKey(HashNumber aOriginalHash, const void* aKey) {
HashInfo* hashInfo;
KeyInfo* keyInfo = FindKeyInfo(aOriginalHash, aKey, &hashInfo);
hashInfo->mKeys.erase(keyInfo);
if (hashInfo->mKeys.length() == 0) {
mHashToKey.erase(aOriginalHash);
}
mKeyToHash.erase(aKey);
}
HashNumber FindKeyHash(HashNumber aOriginalHash, const void* aKey) {
KeyInfo* info = FindKeyInfo(aOriginalHash, aKey);
return info->mNewHash;
}
// Look for a key in the table with a matching original hash and for which
// aMatch() is true for, returning its new hash number if found.
bool HasMatchingKey(HashNumber aOriginalHash,
const std::function<bool(const void*)>& aMatch,
HashNumber* aNewHash)
{
HashToKeyMap::const_iterator iter = mHashToKey.find(aOriginalHash);
if (iter != mHashToKey.end()) {
HashInfo* hashInfo = iter->second.get();
for (const KeyInfo& keyInfo : hashInfo->mKeys) {
if (aMatch(keyInfo.mKey)) {
*aNewHash = keyInfo.mNewHash;
return true;
}
}
}
return false;
}
HashNumber GetOriginalHashNumber(const void* aKey) {
KeyToHashMap::iterator iter = mKeyToHash.find(aKey);
MOZ_ASSERT(iter != mKeyToHash.end());
return iter->second;
}
class Assembler : public recordreplay::Assembler {
public:
explicit Assembler(StableHashTableInfo& aInfo)
: recordreplay::Assembler(aInfo.mCallbackStorage, CallbackStorageCapacity)
{}
};
// Use the callback storage buffer to create a new function T which has one
// fewer argument than S and calls S with aArgument bound to the last
// argument position. See BindFunctionArgument in ProcessRedirect.h
template <typename S, typename T>
void NewBoundFunction(Assembler& aAssembler, S aFunction, void* aArgument,
size_t aArgumentPosition, T* aTarget) {
void* nfn = BindFunctionArgument(BitwiseCast<void*>(aFunction), aArgument, aArgumentPosition,
aAssembler);
BitwiseCast(nfn, aTarget);
}
// Set the last queried key for this table, and generate a new hash number
// for it.
HashNumber SetLastKey(const void* aKey) {
// Remember the last key queried, so that if it is then added to the table
// we know what hash number to use.
mLastKey = aKey;
mLastNewHash = mHashGenerator++;
return mLastNewHash;
}
bool HasLastKey() {
return !!mLastKey;
}
HashNumber GetLastNewHash(const void* aKey) {
MOZ_ASSERT(aKey == mLastKey);
return mLastNewHash;
}
bool IsEmpty() { return mHashToKey.empty(); }
// Move aOther's contents into this one and clear aOther out. Callbacks for
// the tables are left alone.
void MoveContentsFrom(StableHashTableInfo& aOther) {
mHashToKey = std::move(aOther.mHashToKey);
mKeyToHash = std::move(aOther.mKeyToHash);
mHashGenerator = aOther.mHashGenerator;
aOther.mHashToKey.clear();
aOther.mKeyToHash.clear();
aOther.mHashGenerator = 0;
mLastKey = aOther.mLastKey = nullptr;
mLastNewHash = aOther.mLastNewHash = 0;
}
};
///////////////////////////////////////////////////////////////////////////////
// PLHashTable Stabilization
///////////////////////////////////////////////////////////////////////////////
// For each PLHashTable in the process, a PLHashTableInfo is generated. This
// structure becomes the |allocPriv| for the table, handled by the new
// callbacks given to it.
struct PLHashTableInfo : public StableHashTableInfo
{
// Original callbacks for the table.
PLHashFunction mKeyHash;
PLHashComparator mKeyCompare;
PLHashComparator mValueCompare;
const PLHashAllocOps* mAllocOps;
// Original private value for the table.
void* mAllocPrivate;
PLHashTableInfo(PLHashFunction aKeyHash,
PLHashComparator aKeyCompare, PLHashComparator aValueCompare,
const PLHashAllocOps* aAllocOps, void* aAllocPrivate)
: mKeyHash(aKeyHash),
mKeyCompare(aKeyCompare),
mValueCompare(aValueCompare),
mAllocOps(aAllocOps),
mAllocPrivate(aAllocPrivate)
{}
static PLHashTableInfo* FromPrivate(void* aAllocPrivate) {
PLHashTableInfo* info = reinterpret_cast<PLHashTableInfo*>(aAllocPrivate);
MOZ_RELEASE_ASSERT(info->AppearsValid());
return info;
}
};
static void*
WrapPLHashAllocTable(void* aAllocPrivate, PRSize aSize)
{
PLHashTableInfo* info = PLHashTableInfo::FromPrivate(aAllocPrivate);
return info->mAllocOps
? info->mAllocOps->allocTable(info->mAllocPrivate, aSize)
: malloc(aSize);
}
static void
WrapPLHashFreeTable(void* aAllocPrivate, void* aItem)
{
PLHashTableInfo* info = PLHashTableInfo::FromPrivate(aAllocPrivate);
if (info->mAllocOps) {
info->mAllocOps->freeTable(info->mAllocPrivate, aItem);
} else {
free(aItem);
}
}
static PLHashEntry*
WrapPLHashAllocEntry(void* aAllocPrivate, const void* aKey)
{
PLHashTableInfo* info = PLHashTableInfo::FromPrivate(aAllocPrivate);
if (info->HasLastKey()) {
uint32_t originalHash = info->mKeyHash(aKey);
info->AddKey(originalHash, aKey, info->GetLastNewHash(aKey));
} else {
// A few PLHashTables are manipulated directly by Gecko code, in which case
// the hashes are supplied directly to the table and we don't have a chance
// to modify them. Fortunately, none of these tables are iterated in a way
// that can cause the replay to diverge, so just punt in these cases.
MOZ_ASSERT(info->IsEmpty());
}
return info->mAllocOps
? info->mAllocOps->allocEntry(info->mAllocPrivate, aKey)
: (PLHashEntry*) malloc(sizeof(PLHashEntry));
}
static void
WrapPLHashFreeEntry(void *aAllocPrivate, PLHashEntry *he, PRUintn flag)
{
PLHashTableInfo* info = PLHashTableInfo::FromPrivate(aAllocPrivate);
// Ignore empty tables, due to the raw table manipulation described above.
if (flag == HT_FREE_ENTRY && !info->IsEmpty()) {
uint32_t originalHash = info->GetOriginalHashNumber(he->key);
info->RemoveKey(originalHash, he->key);
}
if (info->mAllocOps) {
info->mAllocOps->freeEntry(info->mAllocPrivate, he, flag);
} else if (flag == HT_FREE_ENTRY) {
free(he);
}
}
static PLHashAllocOps gWrapPLHashAllocOps = {
WrapPLHashAllocTable, WrapPLHashFreeTable,
WrapPLHashAllocEntry, WrapPLHashFreeEntry
};
static uint32_t
PLHashComputeHash(void* aKey, PLHashTableInfo* aInfo)
{
uint32_t originalHash = aInfo->mKeyHash(aKey);
HashNumber newHash;
if (aInfo->HasMatchingKey(originalHash,
[=](const void* aExistingKey) {
return aInfo->mKeyCompare(aKey, aExistingKey);
}, &newHash)) {
return newHash;
}
return aInfo->SetLastKey(aKey);
}
void
GeneratePLHashTableCallbacks(PLHashFunction* aKeyHash,
PLHashComparator* aKeyCompare,
PLHashComparator* aValueCompare,
const PLHashAllocOps** aAllocOps,
void** aAllocPrivate)
{
PLHashTableInfo* info = new PLHashTableInfo(*aKeyHash, *aKeyCompare, *aValueCompare,
*aAllocOps, *aAllocPrivate);
PLHashTableInfo::Assembler assembler(*info);
info->NewBoundFunction(assembler, PLHashComputeHash, info, 1, aKeyHash);
*aAllocOps = &gWrapPLHashAllocOps;
*aAllocPrivate = info;
}
void
DestroyPLHashTableCallbacks(void* aAllocPrivate)
{
PLHashTableInfo* info = PLHashTableInfo::FromPrivate(aAllocPrivate);
delete info;
}
///////////////////////////////////////////////////////////////////////////////
// PLDHashTable Stabilization
///////////////////////////////////////////////////////////////////////////////
// For each PLDHashTable in the process, a PLDHashTableInfo is generated. This
// structure is supplied to its callbacks using bound functions.
struct PLDHashTableInfo : public StableHashTableInfo
{
// Original callbacks for the table.
const PLDHashTableOps* mOps;
// Wrapper callbacks for the table.
PLDHashTableOps mNewOps;
explicit PLDHashTableInfo(const PLDHashTableOps* aOps)
: mOps(aOps)
{
PodZero(&mNewOps);
}
static PLDHashTableInfo* MaybeFromOps(const PLDHashTableOps* aOps) {
PLDHashTableInfo* res = reinterpret_cast<PLDHashTableInfo*>
((uint8_t*)aOps - offsetof(PLDHashTableInfo, mNewOps));
return res->AppearsValid() ? res : nullptr;
}
static PLDHashTableInfo* FromOps(const PLDHashTableOps* aOps) {
PLDHashTableInfo* res = MaybeFromOps(aOps);
MOZ_RELEASE_ASSERT(res);
return res;
}
};
static PLDHashNumber
PLDHashTableComputeHash(const void* aKey, PLDHashTableInfo* aInfo)
{
uint32_t originalHash = aInfo->mOps->hashKey(aKey);
HashNumber newHash;
if (aInfo->HasMatchingKey(originalHash,
[=](const void* aExistingKey) {
return aInfo->mOps->matchEntry((PLDHashEntryHdr*) aExistingKey, aKey);
}, &newHash)) {
return newHash;
}
return aInfo->SetLastKey(aKey);
}
static void
PLDHashTableMoveEntry(PLDHashTable* aTable, const PLDHashEntryHdr* aFrom, PLDHashEntryHdr* aTo,
PLDHashTableInfo* aInfo)
{
aInfo->mOps->moveEntry(aTable, aFrom, aTo);
uint32_t originalHash = aInfo->GetOriginalHashNumber(aFrom);
uint32_t newHash = aInfo->FindKeyHash(originalHash, aFrom);
aInfo->RemoveKey(originalHash, aFrom);
aInfo->AddKey(originalHash, aTo, newHash);
}
static void
PLDHashTableClearEntry(PLDHashTable* aTable, PLDHashEntryHdr* aEntry, PLDHashTableInfo* aInfo)
{
aInfo->mOps->clearEntry(aTable, aEntry);
uint32_t originalHash = aInfo->GetOriginalHashNumber(aEntry);
aInfo->RemoveKey(originalHash, aEntry);
}
static void
PLDHashTableInitEntry(PLDHashEntryHdr* aEntry, const void* aKey, PLDHashTableInfo* aInfo)
{
if (aInfo->mOps->initEntry) {
aInfo->mOps->initEntry(aEntry, aKey);
}
uint32_t originalHash = aInfo->mOps->hashKey(aKey);
aInfo->AddKey(originalHash, aEntry, aInfo->GetLastNewHash(aKey));
}
extern "C" {
MOZ_EXPORT const PLDHashTableOps*
RecordReplayInterface_InternalGeneratePLDHashTableCallbacks(const PLDHashTableOps* aOps)
{
PLDHashTableInfo* info = new PLDHashTableInfo(aOps);
PLDHashTableInfo::Assembler assembler(*info);
info->NewBoundFunction(assembler, PLDHashTableComputeHash, info, 1, &info->mNewOps.hashKey);
info->mNewOps.matchEntry = aOps->matchEntry;
info->NewBoundFunction(assembler, PLDHashTableMoveEntry, info, 3, &info->mNewOps.moveEntry);
info->NewBoundFunction(assembler, PLDHashTableClearEntry, info, 2, &info->mNewOps.clearEntry);
info->NewBoundFunction(assembler, PLDHashTableInitEntry, info, 2, &info->mNewOps.initEntry);
return &info->mNewOps;
}
MOZ_EXPORT const PLDHashTableOps*
RecordReplayInterface_InternalUnwrapPLDHashTableCallbacks(const PLDHashTableOps* aOps)
{
PLDHashTableInfo* info = PLDHashTableInfo::FromOps(aOps);
return info->mOps;
}
MOZ_EXPORT void
RecordReplayInterface_InternalDestroyPLDHashTableCallbacks(const PLDHashTableOps* aOps)
{
// Primordial PLDHashTables used in the copy constructor might not have any ops.
if (!aOps) {
return;
}
// Note: PLDHashTables with static ctors might have been constructed before
// record/replay state was initialized and have their normal ops. Check the
// magic number via MaybeFromOps before destroying the info.
PLDHashTableInfo* info = PLDHashTableInfo::MaybeFromOps(aOps);
delete info;
}
MOZ_EXPORT void
RecordReplayInterface_InternalMovePLDHashTableContents(const PLDHashTableOps* aFirstOps,
const PLDHashTableOps* aSecondOps)
{
PLDHashTableInfo* firstInfo = PLDHashTableInfo::FromOps(aFirstOps);
PLDHashTableInfo* secondInfo = PLDHashTableInfo::FromOps(aSecondOps);
secondInfo->MoveContentsFrom(*firstInfo);
}
} // extern "C"
} // namespace recordreplay
} // namespace mozilla

Просмотреть файл

@ -0,0 +1,27 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef mozilla_recordreplay_HashTable_h
#define mozilla_recordreplay_HashTable_h
#include "plhash.h"
namespace mozilla {
namespace recordreplay {
// Routines for creating specialized callbacks for PLHashTables that preserve
// iteration order, similar to those for PLDHashTables in RecordReplay.h.
void GeneratePLHashTableCallbacks(PLHashFunction* aKeyHash,
PLHashComparator* aKeyCompare,
PLHashComparator* aValueCompare,
const PLHashAllocOps** aAllocOps,
void** aAllocPrivate);
void DestroyPLHashTableCallbacks(void* aAllocPrivate);
} // recordreplay
} // mozilla
#endif // mozilla_recordreplay_HashTable_h

Просмотреть файл

@ -0,0 +1,140 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef mozilla_recordreplay_InfallibleVector_h
#define mozilla_recordreplay_InfallibleVector_h
#include "mozilla/Vector.h"
namespace mozilla {
namespace recordreplay {
// This file declares two classes, InfallibleVector and StaticInfallibleVector,
// which behave like normal vectors except that all their operations are
// infallible: we will immediately crash if any operation on the underlying
// vector fails.
//
// StaticInfallibleVector is designed for use in static storage, and does not
// have a static constructor or destructor in release builds.
template<typename Outer, typename T, size_t MinInlineCapacity, class AllocPolicy>
class InfallibleVectorOperations
{
typedef Vector<T, MinInlineCapacity, AllocPolicy> InnerVector;
InnerVector& Vector() { return static_cast<Outer*>(this)->Vector(); }
const InnerVector& Vector() const { return static_cast<const Outer*>(this)->Vector(); }
public:
size_t length() const { return Vector().length(); }
bool empty() const { return Vector().empty(); }
T* begin() { return Vector().begin(); }
const T* begin() const { return Vector().begin(); }
T* end() { return Vector().end(); }
const T* end() const { return Vector().end(); }
T& operator[](size_t aIndex) { return Vector()[aIndex]; }
const T& operator[](size_t aIndex) const { return Vector()[aIndex]; }
T& back() { return Vector().back(); }
const T& back() const { return Vector().back(); }
void popBack() { Vector().popBack(); }
T popCopy() { return Vector().popCopy(); }
void erase(T* aT) { Vector().erase(aT); }
void clear() { Vector().clear(); }
void reserve(size_t aRequest) {
if (!Vector().reserve(aRequest)) {
MOZ_CRASH();
}
}
void resize(size_t aNewLength) {
if (!Vector().resize(aNewLength)) {
MOZ_CRASH();
}
}
template<typename U> void append(U&& aU) {
if (!Vector().append(std::forward<U>(aU))) {
MOZ_CRASH();
}
}
template<typename U> void append(const U* aBegin, size_t aLength) {
if (!Vector().append(aBegin, aLength)) {
MOZ_CRASH();
}
}
void appendN(const T& aT, size_t aN) {
if (!Vector().appendN(aT, aN)) {
MOZ_CRASH();
}
}
template<typename... Args> void emplaceBack(Args&&... aArgs) {
if (!Vector().emplaceBack(std::forward<Args>(aArgs)...)) {
MOZ_CRASH();
}
}
template<typename... Args> void infallibleEmplaceBack(Args&&... aArgs) {
Vector().infallibleEmplaceBack(std::forward<Args>(aArgs)...);
}
template<typename U> void insert(T* aP, U&& aVal) {
if (!Vector().insert(aP, std::forward<U>(aVal))) {
MOZ_CRASH();
}
}
};
template<typename T,
size_t MinInlineCapacity = 0,
class AllocPolicy = MallocAllocPolicy>
class InfallibleVector
: public InfallibleVectorOperations<InfallibleVector<T, MinInlineCapacity, AllocPolicy>,
T, MinInlineCapacity, AllocPolicy>
{
typedef Vector<T, MinInlineCapacity, AllocPolicy> InnerVector;
InnerVector mVector;
public:
InnerVector& Vector() { return mVector; }
const InnerVector& Vector() const { return mVector; }
};
template<typename T,
size_t MinInlineCapacity = 0,
class AllocPolicy = MallocAllocPolicy>
class StaticInfallibleVector
: public InfallibleVectorOperations<StaticInfallibleVector<T, MinInlineCapacity, AllocPolicy>,
T, MinInlineCapacity, AllocPolicy>
{
typedef Vector<T, MinInlineCapacity, AllocPolicy> InnerVector;
mutable InnerVector* mVector;
void EnsureVector() const {
if (!mVector) {
// N.B. This class can only be used with alloc policies that have a
// default constructor.
AllocPolicy policy;
void* memory = policy.template pod_malloc<InnerVector>(1);
MOZ_RELEASE_ASSERT(memory);
mVector = new(memory) InnerVector();
}
}
public:
// InfallibleVectors are allocated in static storage and should not have
// constructors. Their memory will be initially zero.
InnerVector& Vector() { EnsureVector(); return *mVector; }
const InnerVector& Vector() const { EnsureVector(); return *mVector; }
};
} // namespace recordreplay
} // namespace mozilla
#endif // mozilla_recordreplay_InfallibleVector_h

Просмотреть файл

@ -0,0 +1,236 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "Lock.h"
#include "mozilla/StaticMutex.h"
#include "ChunkAllocator.h"
#include "InfallibleVector.h"
#include "SpinLock.h"
#include "Thread.h"
#include <unordered_map>
namespace mozilla {
namespace recordreplay {
// The total number of locks that have been created. Reserved IDs:
// 0: Locks that are not recorded.
// 1: Used by gAtomicLock for atomic accesses.
//
// This is only used while recording, and increments gradually as locks are
// created.
static const size_t gAtomicLockId = 1;
static Atomic<size_t, SequentiallyConsistent, Behavior::DontPreserve> gNumLocks;
struct LockAcquires
{
// List of thread acquire orders for the lock. This is protected by the lock
// itself.
Stream* mAcquires;
// During replay, the next thread id to acquire the lock. Writes to this are
// protected by the lock itself, though reads may occur on other threads.
Atomic<size_t, SequentiallyConsistent, Behavior::DontPreserve> mNextOwner;
static const size_t NoNextOwner = 0;
void ReadAndNotifyNextOwner(Thread* aCurrentThread) {
MOZ_RELEASE_ASSERT(IsReplaying());
if (mAcquires->AtEnd()) {
mNextOwner = NoNextOwner;
} else {
mNextOwner = mAcquires->ReadScalar();
if (mNextOwner != aCurrentThread->Id()) {
Thread::Notify(mNextOwner);
}
}
}
};
// Acquires for each lock, indexed by the lock ID.
static ChunkAllocator<LockAcquires> gLockAcquires;
///////////////////////////////////////////////////////////////////////////////
// Locking Interface
///////////////////////////////////////////////////////////////////////////////
// Table mapping native lock pointers to the associated Lock structure, for
// every recorded lock in existence.
typedef std::unordered_map<void*, Lock*> LockMap;
static LockMap* gLocks;
static ReadWriteSpinLock gLocksLock;
/* static */ void
Lock::New(void* aNativeLock)
{
if (AreThreadEventsPassedThrough() || HasDivergedFromRecording()) {
Destroy(aNativeLock); // Clean up any old lock, as below.
return;
}
MOZ_RELEASE_ASSERT(!AreThreadEventsDisallowed());
Thread* thread = Thread::Current();
RecordReplayAssert("CreateLock");
thread->Events().RecordOrReplayThreadEvent(ThreadEvent::CreateLock);
size_t id;
if (IsRecording()) {
id = gNumLocks++;
}
thread->Events().RecordOrReplayScalar(&id);
LockAcquires* info = gLockAcquires.Create(id);
info->mAcquires = gRecordingFile->OpenStream(StreamName::Lock, id);
if (IsReplaying()) {
info->ReadAndNotifyNextOwner(thread);
}
// Tolerate new locks being created with identical pointers, even if there
// was no DestroyLock call for the old one.
Destroy(aNativeLock);
AutoWriteSpinLock ex(gLocksLock);
thread->BeginDisallowEvents();
if (!gLocks) {
gLocks = new LockMap();
}
gLocks->insert(LockMap::value_type(aNativeLock, new Lock(id)));
thread->EndDisallowEvents();
}
/* static */ void
Lock::Destroy(void* aNativeLock)
{
Lock* lock = nullptr;
{
AutoWriteSpinLock ex(gLocksLock);
if (gLocks) {
LockMap::iterator iter = gLocks->find(aNativeLock);
if (iter != gLocks->end()) {
lock = iter->second;
gLocks->erase(iter);
}
}
}
delete lock;
}
/* static */ Lock*
Lock::Find(void* aNativeLock)
{
MOZ_RELEASE_ASSERT(IsRecordingOrReplaying());
AutoReadSpinLock ex(gLocksLock);
if (gLocks) {
LockMap::iterator iter = gLocks->find(aNativeLock);
if (iter != gLocks->end()) {
// Now that we know the lock is recorded, check whether thread events
// should be generated right now. Doing things in this order avoids
// reentrancy issues when initializing the thread-local state used by
// these calls.
if (AreThreadEventsPassedThrough() || HasDivergedFromRecording()) {
return nullptr;
}
return iter->second;
}
}
return nullptr;
}
void
Lock::Enter(const std::function<void()>& aCallback)
{
MOZ_RELEASE_ASSERT(!AreThreadEventsPassedThrough() && !HasDivergedFromRecording());
MOZ_RELEASE_ASSERT(!AreThreadEventsDisallowed());
RecordReplayAssert("Lock %d", (int) mId);
// Include an event in each thread's record when a lock acquire begins. This
// is not required by the replay but is used to check that lock acquire order
// is consistent with the recording and that we will fail explicitly instead
// of deadlocking.
Thread* thread = Thread::Current();
thread->Events().RecordOrReplayThreadEvent(ThreadEvent::Lock);
thread->Events().CheckInput(mId);
LockAcquires* acquires = gLockAcquires.Get(mId);
if (IsRecording()) {
acquires->mAcquires->WriteScalar(thread->Id());
} else {
// Wait until this thread is next in line to acquire the lock.
while (thread->Id() != acquires->mNextOwner) {
Thread::Wait();
}
// Acquire the lock before updating the next owner.
aCallback();
acquires->ReadAndNotifyNextOwner(thread);
}
}
struct AtomicLock : public detail::MutexImpl
{
using detail::MutexImpl::lock;
using detail::MutexImpl::unlock;
};
// Lock which is held during code sections that run atomically. This is a
// PRLock instead of an OffTheBooksMutex because the latter performs atomic
// operations during initialization.
static AtomicLock* gAtomicLock = nullptr;
/* static */ void
Lock::InitializeLocks()
{
MOZ_RELEASE_ASSERT(!AreThreadEventsPassedThrough());
gNumLocks = gAtomicLockId;
gAtomicLock = new AtomicLock();
MOZ_RELEASE_ASSERT(!IsRecording() || gNumLocks == gAtomicLockId + 1);
}
/* static */ void
Lock::LockAquiresUpdated(size_t aLockId)
{
LockAcquires* acquires = gLockAcquires.MaybeGet(aLockId);
if (acquires && acquires->mAcquires && acquires->mNextOwner == LockAcquires::NoNextOwner) {
acquires->ReadAndNotifyNextOwner(Thread::Current());
}
}
extern "C" {
MOZ_EXPORT void
RecordReplayInterface_InternalBeginOrderedAtomicAccess()
{
MOZ_RELEASE_ASSERT(IsRecordingOrReplaying());
if (!gInitializationFailureMessage) {
gAtomicLock->lock();
}
}
MOZ_EXPORT void
RecordReplayInterface_InternalEndOrderedAtomicAccess()
{
MOZ_RELEASE_ASSERT(IsRecordingOrReplaying());
if (!gInitializationFailureMessage) {
gAtomicLock->unlock();
}
}
} // extern "C"
} // namespace recordreplay
} // namespace mozilla

Просмотреть файл

@ -0,0 +1,68 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef mozilla_recordreplay_Lock_h
#define mozilla_recordreplay_Lock_h
#include "mozilla/PodOperations.h"
#include "mozilla/Types.h"
#include "File.h"
namespace mozilla {
namespace recordreplay {
// Recorded Locks Overview.
//
// Each platform has some types used for native locks (e.g. pthread_mutex_t or
// CRITICAL_SECTION). System APIs which operate on these native locks are
// redirected so that lock behavior can be tracked. If a native lock is
// created when thread events are not passed through then that native lock is
// recorded, and lock acquire orders will be replayed in the same order with
// which they originally occurred.
// Information about a recorded lock.
class Lock
{
// Unique ID for this lock.
size_t mId;
public:
explicit Lock(size_t aId)
: mId(aId)
{
MOZ_ASSERT(aId);
}
size_t Id() { return mId; }
// When recording, this is called after the lock has been acquired, and
// records the acquire in the lock's acquire order stream. When replaying,
// this is called before the lock has been acquired, and blocks the thread
// until it is next in line to acquire the lock before acquiring it via
// aCallback.
void Enter(const std::function<void()>& aCallback);
// Create a new Lock corresponding to a native lock, with a fresh ID.
static void New(void* aNativeLock);
// Destroy any Lock associated with a native lock.
static void Destroy(void* aNativeLock);
// Get the recorded Lock for a native lock if there is one, otherwise null.
static Lock* Find(void* aNativeLock);
// Initialize locking state.
static void InitializeLocks();
// Note that new data has been read into a lock's acquires stream.
static void LockAquiresUpdated(size_t aLockId);
};
} // namespace recordreplay
} // namespace mozilla
#endif // mozilla_recordreplay_Lock_h

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -0,0 +1,128 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef mozilla_recordreplay_MemorySnapshot_h
#define mozilla_recordreplay_MemorySnapshot_h
#include "mozilla/Types.h"
#include "ProcessRecordReplay.h"
namespace mozilla {
namespace recordreplay {
// Memory Snapshots Overview.
//
// As described in ProcessRewind.h, some subset of the checkpoints which are
// reached during execution are saved, so that their state can be restored
// later. Memory snapshots are used to save and restore the contents of all
// heap memory: everything except thread stacks (see ThreadSnapshot.h for
// saving and restoring these) and untracked memory (which is not saved or
// restored, see ProcessRecordReplay.h).
//
// Each memory snapshot is a diff of the heap memory contents compared to the
// next one. See MemorySnapshot.cpp for how diffs are represented and computed.
//
// Rewinding must restore the exact contents of heap memory that existed when
// the target checkpoint was reached. Because of this, memory that is allocated
// at a point when a checkpoint is saved will never actually be returned to the
// system. We instead keep a set of free blocks that are unused at the current
// point of execution and are available to satisfy new allocations.
// Make sure that a block of memory in a fixed allocation is already allocated.
void CheckFixedMemory(void* aAddress, size_t aSize);
// After marking a block of memory in a fixed allocation as non-writable,
// restore writability to any dirty pages in the range.
void RestoreWritableFixedMemory(void* aAddress, size_t aSize);
// Allocate memory, trying to use a specific address if provided but only if
// it is free.
void* AllocateMemoryTryAddress(void* aAddress, size_t aSize, MemoryKind aKind);
// Note a range of memory that was just allocated from the system, and the
// kind of memory allocation that was performed.
void RegisterAllocatedMemory(void* aBaseAddress, size_t aSize, MemoryKind aKind);
// Initialize the memory snapshots system.
void InitializeMemorySnapshots();
// Take the first heap memory snapshot.
void TakeFirstMemorySnapshot();
// Take a differential heap memory snapshot compared to the last one,
// associated with the last saved checkpoint.
void TakeDiffMemorySnapshot();
// Restore all heap memory to its state when the most recent checkpoint was
// saved. This requires no checkpoints to have been saved after this one.
void RestoreMemoryToLastSavedCheckpoint();
// Restore all heap memory to its state at a checkpoint where a complete diff
// was saved vs. the following saved checkpoint. This requires that no
// tracked heap memory has been changed since the last saved checkpoint.
void RestoreMemoryToLastSavedDiffCheckpoint();
// Erase all information from the last diff snapshot taken, so that tracked
// heap memory changes are with respect to the previous checkpoint.
void EraseLastSavedDiffMemorySnapshot();
// Set whether to allow changes to tracked heap memory at this point. If such
// changes occur when they are not allowed then the process will crash.
void SetMemoryChangesAllowed(bool aAllowed);
struct MOZ_RAII AutoDisallowMemoryChanges
{
AutoDisallowMemoryChanges() { SetMemoryChangesAllowed(false); }
~AutoDisallowMemoryChanges() { SetMemoryChangesAllowed(true); }
};
// After a SEGV on the specified address, check if the violation occurred due
// to the memory having been write protected by the snapshot mechanism. This
// function returns whether the fault has been handled and execution may
// continue.
bool HandleDirtyMemoryFault(uint8_t* aAddress);
// For debugging, note a point where we hit an unrecoverable failure and try
// to make things easier for the debugger.
void UnrecoverableSnapshotFailure();
// After rewinding, mark all memory that has been allocated since the snapshot
// was taken as free.
void FixupFreeRegionsAfterRewind();
// Set whether to allow intentionally crashing in this process via the
// RecordReplayDirective method.
void SetAllowIntentionalCrashes(bool aAllowed);
// When WANT_COUNTDOWN_THREAD is defined (see MemorySnapshot.cpp), set a count
// that, after a thread consumes it, causes the thread to report a fatal error.
// This is used for debugging and is a workaround for lldb often being unable
// to interrupt a running process.
void StartCountdown(size_t aCount);
// Per StartCountdown, set a countdown and remove it on destruction.
struct MOZ_RAII AutoCountdown
{
explicit AutoCountdown(size_t aCount);
~AutoCountdown();
};
// Initialize the thread consuming the countdown.
void InitializeCountdownThread();
// This is an alternative to memmove/memcpy that can be called in areas where
// faults in write protected memory are not allowed. It's hard to avoid dynamic
// code loading when calling memmove/memcpy directly.
void MemoryMove(void* aDst, const void* aSrc, size_t aSize);
// Similarly, zero out a range of memory without doing anything weird with
// dynamic code loading.
void MemoryZero(void* aDst, size_t aSize);
} // namespace recordreplay
} // namespace mozilla
#endif // mozilla_recordreplay_MemorySnapshot_h

Просмотреть файл

@ -0,0 +1,79 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef mozilla_recordreplay_Monitor_h
#define mozilla_recordreplay_Monitor_h
#include "mozilla/PlatformConditionVariable.h"
namespace mozilla {
namespace recordreplay {
// Simple wrapper around mozglue mutexes and condvars. This is a lighter weight
// abstraction than mozilla::Monitor and has simpler interactions with the
// record/replay system.
class Monitor : public detail::MutexImpl
{
public:
Monitor()
: detail::MutexImpl(Behavior::DontPreserve)
{}
void Lock() { detail::MutexImpl::lock(); }
void Unlock() { detail::MutexImpl::unlock(); }
void Wait() { mCondVar.wait(*this); }
void Notify() { mCondVar.notify_one(); }
void NotifyAll() { mCondVar.notify_all(); }
void WaitUntil(TimeStamp aTime) {
AutoEnsurePassThroughThreadEvents pt;
mCondVar.wait_for(*this, aTime - TimeStamp::Now());
}
private:
detail::ConditionVariableImpl mCondVar;
};
// RAII class to lock a monitor.
struct MOZ_RAII MonitorAutoLock
{
explicit MonitorAutoLock(Monitor& aMonitor)
: mMonitor(aMonitor)
{
mMonitor.Lock();
}
~MonitorAutoLock()
{
mMonitor.Unlock();
}
private:
Monitor& mMonitor;
};
// RAII class to unlock a monitor.
struct MOZ_RAII MonitorAutoUnlock
{
explicit MonitorAutoUnlock(Monitor& aMonitor)
: mMonitor(aMonitor)
{
mMonitor.Unlock();
}
~MonitorAutoUnlock()
{
mMonitor.Lock();
}
private:
Monitor& mMonitor;
};
} // namespace recordreplay
} // namespace mozilla
#endif // mozilla_recordreplay_Monitor_h

Просмотреть файл

@ -0,0 +1,671 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "ProcessRecordReplay.h"
#include "ipc/ChildInternal.h"
#include "mozilla/Compression.h"
#include "mozilla/Maybe.h"
#include "mozilla/Sprintf.h"
#include "mozilla/StackWalk.h"
#include "mozilla/StaticMutex.h"
#include "DirtyMemoryHandler.h"
#include "Lock.h"
#include "MemorySnapshot.h"
#include "ProcessRedirect.h"
#include "ProcessRewind.h"
#include "Trigger.h"
#include "ValueIndex.h"
#include "WeakPointer.h"
#include "pratom.h"
#include <fcntl.h>
#include <unistd.h>
namespace mozilla {
namespace recordreplay {
MOZ_NEVER_INLINE void
BusyWait()
{
static volatile int value = 1;
while (value) {}
}
///////////////////////////////////////////////////////////////////////////////
// Basic interface
///////////////////////////////////////////////////////////////////////////////
File* gRecordingFile;
const char* gSnapshotMemoryPrefix;
const char* gSnapshotStackPrefix;
char* gInitializationFailureMessage;
static void DumpRecordingAssertions();
bool gInitialized;
ProcessKind gProcessKind;
char* gRecordingFilename;
// Current process ID.
static int gPid;
// Whether to spew record/replay messages to stderr.
static bool gSpewEnabled;
extern "C" {
MOZ_EXPORT void
RecordReplayInterface_Initialize(int aArgc, char* aArgv[])
{
// Parse command line options for the process kind and recording file.
Maybe<ProcessKind> processKind;
Maybe<char*> recordingFile;
for (int i = 0; i < aArgc; i++) {
if (!strcmp(aArgv[i], gProcessKindOption)) {
MOZ_RELEASE_ASSERT(processKind.isNothing() && i + 1 < aArgc);
processKind.emplace((ProcessKind) atoi(aArgv[i + 1]));
}
if (!strcmp(aArgv[i], gRecordingFileOption)) {
MOZ_RELEASE_ASSERT(recordingFile.isNothing() && i + 1 < aArgc);
recordingFile.emplace(aArgv[i + 1]);
}
}
MOZ_RELEASE_ASSERT(processKind.isSome() && recordingFile.isSome());
gProcessKind = processKind.ref();
gRecordingFilename = strdup(recordingFile.ref());
switch (processKind.ref()) {
case ProcessKind::Recording:
gIsRecording = gIsRecordingOrReplaying = true;
fprintf(stderr, "RECORDING %d %s\n", getpid(), recordingFile.ref());
break;
case ProcessKind::Replaying:
gIsReplaying = gIsRecordingOrReplaying = true;
fprintf(stderr, "REPLAYING %d %s\n", getpid(), recordingFile.ref());
break;
case ProcessKind::MiddlemanRecording:
case ProcessKind::MiddlemanReplaying:
gIsMiddleman = true;
fprintf(stderr, "MIDDLEMAN %d %s\n", getpid(), recordingFile.ref());
break;
default:
MOZ_CRASH("Bad ProcessKind");
}
if (IsRecordingOrReplaying() && TestEnv("WAIT_AT_START")) {
BusyWait();
}
if (IsMiddleman() && TestEnv("MIDDLEMAN_WAIT_AT_START")) {
BusyWait();
}
gPid = getpid();
if (TestEnv("RECORD_REPLAY_SPEW")) {
gSpewEnabled = true;
}
EarlyInitializeRedirections();
if (!IsRecordingOrReplaying()) {
return;
}
gSnapshotMemoryPrefix = mktemp(strdup("/tmp/SnapshotMemoryXXXXXX"));
gSnapshotStackPrefix = mktemp(strdup("/tmp/SnapshotStackXXXXXX"));
InitializeCurrentTime();
gRecordingFile = new File();
if (!gRecordingFile->Open(recordingFile.ref(), IsRecording() ? File::WRITE : File::READ)) {
gInitializationFailureMessage = strdup("Bad recording file");
return;
}
if (!InitializeRedirections()) {
MOZ_RELEASE_ASSERT(gInitializationFailureMessage);
return;
}
Thread::InitializeThreads();
Thread* thread = Thread::GetById(MainThreadId);
MOZ_ASSERT(thread->Id() == MainThreadId);
thread->BindToCurrent();
thread->SetPassThrough(true);
if (IsReplaying() && TestEnv("DUMP_RECORDING")) {
DumpRecordingAssertions();
}
InitializeTriggers();
InitializeWeakPointers();
InitializeMemorySnapshots();
Thread::SpawnAllThreads();
InitializeCountdownThread();
SetupDirtyMemoryHandler();
// Don't create a stylo thread pool when recording or replaying.
putenv((char*) "STYLO_THREADS=1");
thread->SetPassThrough(false);
Lock::InitializeLocks();
InitializeRewindState();
gInitialized = true;
}
MOZ_EXPORT size_t
RecordReplayInterface_InternalRecordReplayValue(size_t aValue)
{
MOZ_ASSERT(IsRecordingOrReplaying());
if (AreThreadEventsPassedThrough()) {
return aValue;
}
EnsureNotDivergedFromRecording();
MOZ_RELEASE_ASSERT(!AreThreadEventsDisallowed());
Thread* thread = Thread::Current();
RecordReplayAssert("Value");
thread->Events().RecordOrReplayThreadEvent(ThreadEvent::Value);
thread->Events().RecordOrReplayValue(&aValue);
return aValue;
}
MOZ_EXPORT void
RecordReplayInterface_InternalRecordReplayBytes(void* aData, size_t aSize)
{
MOZ_ASSERT(IsRecordingOrReplaying());
if (AreThreadEventsPassedThrough()) {
return;
}
EnsureNotDivergedFromRecording();
MOZ_RELEASE_ASSERT(!AreThreadEventsDisallowed());
Thread* thread = Thread::Current();
RecordReplayAssert("Bytes %d", (int) aSize);
thread->Events().RecordOrReplayThreadEvent(ThreadEvent::Bytes);
thread->Events().CheckInput(aSize);
thread->Events().RecordOrReplayBytes(aData, aSize);
}
MOZ_EXPORT void
RecordReplayInterface_InternalInvalidateRecording(const char* aWhy)
{
if (IsRecording()) {
child::ReportFatalError("Recording invalidated: %s", aWhy);
} else {
child::ReportFatalError("Recording invalidated while replaying: %s", aWhy);
}
Unreachable();
}
} // extern "C"
// How many recording endpoints have been flushed to the recording.
static size_t gNumEndpoints;
void
FlushRecording()
{
MOZ_RELEASE_ASSERT(IsRecording());
MOZ_RELEASE_ASSERT(Thread::CurrentIsMainThread());
// Save the endpoint of the recording.
js::ExecutionPoint endpoint = navigation::GetRecordingEndpoint();
Stream* endpointStream = gRecordingFile->OpenStream(StreamName::Main, 0);
endpointStream->WriteScalar(++gNumEndpoints);
endpointStream->WriteBytes(&endpoint, sizeof(endpoint));
gRecordingFile->PreventStreamWrites();
gRecordingFile->Flush();
child::NotifyFlushedRecording();
gRecordingFile->AllowStreamWrites();
}
// Try to load another recording index, returning whether one was found.
static bool
LoadNextRecordingIndex()
{
Thread::WaitForIdleThreads();
InfallibleVector<Stream*> updatedStreams;
File::ReadIndexResult result = gRecordingFile->ReadNextIndex(&updatedStreams);
if (result == File::ReadIndexResult::InvalidFile) {
MOZ_CRASH("Bad recording file");
}
bool found = result == File::ReadIndexResult::FoundIndex;
if (found) {
for (Stream* stream : updatedStreams) {
if (stream->Name() == StreamName::Lock) {
Lock::LockAquiresUpdated(stream->NameIndex());
}
}
}
Thread::ResumeIdleThreads();
return found;
}
bool
HitRecordingEndpoint()
{
MOZ_RELEASE_ASSERT(IsReplaying());
MOZ_RELEASE_ASSERT(Thread::CurrentIsMainThread());
// The debugger will call this method in a loop, so we don't have to do
// anything fancy to try to get the most up to date endpoint. As long as we
// can make some progress in attempting to find a later endpoint, we can
// return control to the debugger.
// Check if there is a new endpoint in the endpoint data stream.
Stream* endpointStream = gRecordingFile->OpenStream(StreamName::Main, 0);
if (!endpointStream->AtEnd()) {
js::ExecutionPoint endpoint;
size_t index = endpointStream->ReadScalar();
endpointStream->ReadBytes(&endpoint, sizeof(endpoint));
navigation::SetRecordingEndpoint(index, endpoint);
return true;
}
// Check if there is more data in the recording.
if (LoadNextRecordingIndex()) {
return true;
}
// OK, we hit the most up to date endpoint in the recording.
return false;
}
void
HitEndOfRecording()
{
MOZ_RELEASE_ASSERT(IsReplaying());
MOZ_RELEASE_ASSERT(!AreThreadEventsPassedThrough());
if (Thread::CurrentIsMainThread()) {
// Load more data from the recording. The debugger is not allowed to let us
// go past the recording endpoint, so there should be more data.
bool found = LoadNextRecordingIndex();
MOZ_RELEASE_ASSERT(found);
} else {
// Non-main threads may wait until more recording data is loaded by the
// main thread.
Thread::Wait();
}
}
bool
SpewEnabled()
{
return gSpewEnabled;
}
void
InternalPrint(const char* aFormat, va_list aArgs)
{
char buf1[2048];
VsprintfLiteral(buf1, aFormat, aArgs);
char buf2[2048];
SprintfLiteral(buf2, "Spew[%d]: %s", gPid, buf1);
DirectPrint(buf2);
}
///////////////////////////////////////////////////////////////////////////////
// Record/Replay Assertions
///////////////////////////////////////////////////////////////////////////////
struct StackWalkData
{
char* mBuf;
size_t mSize;
StackWalkData(char* aBuf, size_t aSize)
: mBuf(aBuf), mSize(aSize)
{}
void append(const char* aText) {
size_t len = strlen(aText);
if (len <= mSize) {
strcpy(mBuf, aText);
mBuf += len;
mSize -= len;
}
}
};
static void
StackWalkCallback(uint32_t aFrameNumber, void* aPC, void* aSP, void* aClosure)
{
StackWalkData* data = (StackWalkData*) aClosure;
MozCodeAddressDetails details;
MozDescribeCodeAddress(aPC, &details);
data->append(" ### ");
data->append(details.function[0] ? details.function : "???");
}
static void
SetCurrentStackString(const char* aAssertion, char* aBuf, size_t aSize)
{
size_t frameCount = 12;
// Locking operations usually have extra stack goop.
if (!strcmp(aAssertion, "Lock 1")) {
frameCount += 8;
} else if (!strncmp(aAssertion, "Lock ", 5)) {
frameCount += 4;
}
StackWalkData data(aBuf, aSize);
MozStackWalk(StackWalkCallback, /* aSkipFrames = */ 2, frameCount, &data);
}
// For debugging.
char*
PrintCurrentStackString()
{
AutoEnsurePassThroughThreadEvents pt;
char* buf = new char[1000];
SetCurrentStackString("", buf, 1000);
return buf;
}
static inline bool
AlwaysCaptureEventStack(const char* aText)
{
return false;
}
// Bit included in assertion stream when the assertion is a text assert, rather
// than a byte sequence.
static const size_t AssertionBit = 1;
extern "C" {
MOZ_EXPORT void
RecordReplayInterface_InternalRecordReplayAssert(const char* aFormat, va_list aArgs)
{
#ifdef INCLUDE_RECORD_REPLAY_ASSERTIONS
if (AreThreadEventsPassedThrough() || HasDivergedFromRecording()) {
return;
}
MOZ_RELEASE_ASSERT(!AreThreadEventsDisallowed());
Thread* thread = Thread::Current();
// Record an assertion string consisting of the name of the assertion and
// stack information about the current point of execution.
char text[1024];
VsprintfLiteral(text, aFormat, aArgs);
if (IsRecording() && (thread->ShouldCaptureEventStacks() || AlwaysCaptureEventStack(text))) {
AutoPassThroughThreadEvents pt;
SetCurrentStackString(text, text + strlen(text), sizeof(text) - strlen(text));
}
size_t textLen = strlen(text);
if (IsRecording()) {
thread->Asserts().WriteScalar(thread->Events().StreamPosition());
if (thread->IsMainThread()) {
thread->Asserts().WriteScalar(*ExecutionProgressCounter());
}
thread->Asserts().WriteScalar((textLen << 1) | AssertionBit);
thread->Asserts().WriteBytes(text, textLen);
} else {
// While replaying, both the assertion's name and the current position in
// the thread's events need to match up with what was recorded. The stack
// portion of the assertion text does not need to match, it is used to help
// track down the reason for the mismatch.
bool match = true;
size_t streamPos = thread->Asserts().ReadScalar();
if (streamPos != thread->Events().StreamPosition()) {
match = false;
}
size_t progress = 0;
if (thread->IsMainThread()) {
progress = thread->Asserts().ReadScalar();
if (progress != *ExecutionProgressCounter()) {
match = false;
}
}
size_t assertLen = thread->Asserts().ReadScalar() >> 1;
char* buffer = thread->TakeBuffer(assertLen + 1);
thread->Asserts().ReadBytes(buffer, assertLen);
buffer[assertLen] = 0;
if (assertLen < textLen || memcmp(buffer, text, textLen) != 0) {
match = false;
}
if (!match) {
for (int i = Thread::NumRecentAsserts - 1; i >= 0; i--) {
if (thread->RecentAssert(i).mText) {
Print("Thread %d Recent %d: %s [%d]\n",
(int) thread->Id(), (int) i,
thread->RecentAssert(i).mText, (int) thread->RecentAssert(i).mPosition);
}
}
{
AutoPassThroughThreadEvents pt;
SetCurrentStackString(text, text + strlen(text), sizeof(text) - strlen(text));
}
child::ReportFatalError("Assertion Mismatch: Thread %d\n"
"Recorded: %s [%d,%d]\n"
"Replayed: %s [%d,%d]\n",
(int) thread->Id(), buffer, (int) streamPos, (int) progress, text,
(int) thread->Events().StreamPosition(),
(int) (thread->IsMainThread() ? *ExecutionProgressCounter() : 0));
Unreachable();
}
thread->RestoreBuffer(buffer);
// Push this assert onto the recent assertions in the thread.
free(thread->RecentAssert(Thread::NumRecentAsserts - 1).mText);
for (size_t i = Thread::NumRecentAsserts - 1; i >= 1; i--) {
thread->RecentAssert(i) = thread->RecentAssert(i - 1);
}
thread->RecentAssert(0).mText = strdup(text);
thread->RecentAssert(0).mPosition = thread->Events().StreamPosition();
}
#endif // INCLUDE_RECORD_REPLAY_ASSERTIONS
}
MOZ_EXPORT void
RecordReplayInterface_InternalRecordReplayAssertBytes(const void* aData, size_t aSize)
{
#ifdef INCLUDE_RECORD_REPLAY_ASSERTIONS
RecordReplayAssert("AssertBytes");
if (AreThreadEventsPassedThrough() || HasDivergedFromRecording()) {
return;
}
MOZ_ASSERT(!AreThreadEventsDisallowed());
Thread* thread = Thread::Current();
if (IsRecording()) {
thread->Asserts().WriteScalar(thread->Events().StreamPosition());
thread->Asserts().WriteScalar(aSize << 1);
thread->Asserts().WriteBytes(aData, aSize);
} else {
bool match = true;
size_t streamPos = thread->Asserts().ReadScalar();
if (streamPos != thread->Events().StreamPosition()) {
match = false;
}
size_t oldSize = thread->Asserts().ReadScalar() >> 1;
if (oldSize != aSize) {
match = false;
}
char* buffer = thread->TakeBuffer(oldSize);
thread->Asserts().ReadBytes(buffer, oldSize);
if (match && memcmp(buffer, aData, oldSize) != 0) {
match = false;
}
if (!match) {
// On a byte mismatch, print out some of the mismatched bytes, up to a
// cutoff in case there are many mismatched bytes.
if (oldSize == aSize) {
static const size_t MAX_MISMATCHES = 100;
size_t mismatches = 0;
for (size_t i = 0; i < aSize; i++) {
if (((char*)aData)[i] != buffer[i]) {
Print("Position %d: %d %d\n", (int) i, (int) buffer[i], (int) ((char*)aData)[i]);
if (++mismatches == MAX_MISMATCHES) {
break;
}
}
}
if (mismatches == MAX_MISMATCHES) {
Print("Position ...\n");
}
}
child::ReportFatalError("Byte Comparison Check Failed: Position %d %d Length %d %d\n",
(int) streamPos, (int) thread->Events().StreamPosition(),
(int) oldSize, (int) aSize);
Unreachable();
}
thread->RestoreBuffer(buffer);
}
#endif // INCLUDE_RECORD_REPLAY_ASSERTIONS
}
MOZ_EXPORT void
RecordReplayRust_Assert(const uint8_t* aBuffer)
{
RecordReplayAssert("%s", (const char*) aBuffer);
}
MOZ_EXPORT void
RecordReplayRust_BeginPassThroughThreadEvents()
{
BeginPassThroughThreadEvents();
}
MOZ_EXPORT void
RecordReplayRust_EndPassThroughThreadEvents()
{
EndPassThroughThreadEvents();
}
} // extern "C"
static void
DumpRecordingAssertions()
{
Thread* thread = Thread::Current();
for (size_t id = MainThreadId; id <= MaxRecordedThreadId; id++) {
Stream* asserts = gRecordingFile->OpenStream(StreamName::Assert, id);
if (asserts->AtEnd()) {
continue;
}
fprintf(stderr, "Thread Assertions %d:\n", (int) id);
while (!asserts->AtEnd()) {
(void) asserts->ReadScalar();
size_t shiftedLen = asserts->ReadScalar();
size_t assertLen = shiftedLen >> 1;
char* buffer = thread->TakeBuffer(assertLen + 1);
asserts->ReadBytes(buffer, assertLen);
buffer[assertLen] = 0;
if (shiftedLen & AssertionBit) {
fprintf(stderr, "%s\n", buffer);
}
thread->RestoreBuffer(buffer);
}
}
fprintf(stderr, "Done with assertions, exiting...\n");
_exit(0);
}
static ValueIndex* gGenericThings;
static StaticMutexNotRecorded gGenericThingsMutex;
extern "C" {
MOZ_EXPORT void
RecordReplayInterface_InternalRegisterThing(void* aThing)
{
if (AreThreadEventsPassedThrough()) {
return;
}
AutoOrderedAtomicAccess at;
StaticMutexAutoLock lock(gGenericThingsMutex);
if (!gGenericThings) {
gGenericThings = new ValueIndex();
}
if (gGenericThings->Contains(aThing)) {
gGenericThings->Remove(aThing);
}
gGenericThings->Insert(aThing);
}
MOZ_EXPORT void
RecordReplayInterface_InternalUnregisterThing(void* aThing)
{
StaticMutexAutoLock lock(gGenericThingsMutex);
if (gGenericThings) {
gGenericThings->Remove(aThing);
}
}
MOZ_EXPORT size_t
RecordReplayInterface_InternalThingIndex(void* aThing)
{
if (!aThing) {
return 0;
}
StaticMutexAutoLock lock(gGenericThingsMutex);
size_t index = 0;
if (gGenericThings) {
gGenericThings->MaybeGetIndex(aThing, &index);
}
return index;
}
MOZ_EXPORT const char*
RecordReplayInterface_InternalVirtualThingName(void* aThing)
{
void* vtable = *(void**)aThing;
const char* name = SymbolNameRaw(vtable);
return name ? name : "(unknown)";
}
} // extern "C"
} // namespace recordreplay
} // namespace mozilla

Просмотреть файл

@ -0,0 +1,395 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef mozilla_recordreplay_ProcessRecordReplay_h
#define mozilla_recordreplay_ProcessRecordReplay_h
#include "mozilla/PodOperations.h"
#include "mozilla/RecordReplay.h"
#include <algorithm>
namespace mozilla {
namespace recordreplay {
// Record/Replay Internal API
//
// See mfbt/RecordReplay.h for the main record/replay public API and a high
// level description of the record/replay system.
//
// This directory contains files used for recording, replaying, and rewinding a
// process. The ipc subdirectory contains files used for IPC between a
// replaying and middleman process, and between a middleman and chrome process.
// ID of an event in a thread's event stream. Each ID in the stream is followed
// by data associated with the event (see File::RecordOrReplayThreadEvent).
enum class ThreadEvent : uint32_t
{
// Spawned another thread.
CreateThread,
// Created a recorded lock.
CreateLock,
// Acquired a recorded lock.
Lock,
// Wait for a condition variable with a timeout.
WaitForCvarUntil,
// Called RecordReplayValue.
Value,
// Called RecordReplayBytes.
Bytes,
// Executed a nested callback (see Callback.h).
ExecuteCallback,
// Finished executing nested callbacks in a library API (see Callback.h).
CallbacksFinished,
// Restoring a data pointer used in a callback (see Callback.h).
RestoreCallbackData,
// Executed a trigger within a call to ExecuteTriggers.
ExecuteTrigger,
// Finished executing triggers within a call to ExecuteTriggers.
ExecuteTriggersFinished,
// Encoded information about an argument/rval used by a graphics call.
GraphicsArgument,
GraphicsRval,
// The start of event IDs for redirected call events. Event IDs after this
// point are platform specific.
CallStart
};
class File;
// File used during recording and replay.
extern File* gRecordingFile;
// Whether record/replay state has finished initialization.
extern bool gInitialized;
// If we failed to initialize, any associated message.
extern char* gInitializationFailureMessage;
// Whether record/replay assertions should be performed.
//#ifdef DEBUG
#define INCLUDE_RECORD_REPLAY_ASSERTIONS 1
//#endif
// Flush any new recording data to disk.
void FlushRecording();
// Called when any thread hits the end of its event stream.
void HitEndOfRecording();
// Called when the main thread hits the latest recording endpoint it knows
// about.
bool HitRecordingEndpoint();
// Possible directives to give via the RecordReplayDirective function.
enum class Directive
{
// Crash at the next use of MaybeCrash.
CrashSoon = 1,
// Irrevocably crash if CrashSoon has ever been used on the process.
MaybeCrash = 2,
// Always save temporary checkpoints when stepping around in the debugger.
AlwaysSaveTemporaryCheckpoints = 3,
// Mark all future checkpoints as major checkpoints in the middleman.
AlwaysMarkMajorCheckpoints = 4
};
// Get the process kind and recording file specified at the command line.
// These are available in the middleman as well as while recording/replaying.
extern ProcessKind gProcessKind;
extern char* gRecordingFilename;
///////////////////////////////////////////////////////////////////////////////
// Helper Functions
///////////////////////////////////////////////////////////////////////////////
// Wait indefinitely for a debugger to be attached.
void BusyWait();
static inline void Unreachable() { MOZ_CRASH("Unreachable"); }
// Get the symbol name for a function pointer address, if available.
const char* SymbolNameRaw(void* aAddress);
static inline bool
MemoryContains(void* aBase, size_t aSize, void* aPtr, size_t aPtrSize = 1)
{
MOZ_ASSERT(aPtrSize);
return (uint8_t*) aPtr >= (uint8_t*) aBase
&& (uint8_t*) aPtr + aPtrSize <= (uint8_t*) aBase + aSize;
}
static inline bool
MemoryIntersects(void* aBase0, size_t aSize0, void* aBase1, size_t aSize1)
{
MOZ_ASSERT(aSize0 && aSize1);
return MemoryContains(aBase0, aSize0, aBase1)
|| MemoryContains(aBase0, aSize0, (uint8_t*) aBase1 + aSize1 - 1)
|| MemoryContains(aBase1, aSize1, aBase0);
}
static const size_t PageSize = 4096;
static inline uint8_t*
PageBase(void* aAddress)
{
return (uint8_t*)aAddress - ((size_t)aAddress % PageSize);
}
static inline size_t
RoundupSizeToPageBoundary(size_t aSize)
{
if (aSize % PageSize) {
return aSize + PageSize - (aSize % PageSize);
}
return aSize;
}
static inline bool
TestEnv(const char* env)
{
const char* value = getenv(env);
return value && value[0];
}
// Check for membership in a vector.
template <typename Vector, typename Entry>
inline bool
VectorContains(const Vector& aVector, const Entry& aEntry)
{
return std::find(aVector.begin(), aVector.end(), aEntry) != aVector.end();
}
// Add or remove a unique entry to an unsorted vector.
template <typename Vector, typename Entry>
inline void
VectorAddOrRemoveEntry(Vector& aVector, const Entry& aEntry, bool aAdding)
{
for (Entry& existing : aVector) {
if (existing == aEntry) {
MOZ_RELEASE_ASSERT(!aAdding);
aVector.erase(&existing);
return;
}
}
MOZ_RELEASE_ASSERT(aAdding);
aVector.append(aEntry);
}
bool SpewEnabled();
void InternalPrint(const char* aFormat, va_list aArgs);
#define MOZ_MakeRecordReplayPrinter(aName, aSpewing) \
static inline void \
aName(const char* aFormat, ...) \
{ \
if ((IsRecordingOrReplaying() || IsMiddleman()) && (!aSpewing || SpewEnabled())) { \
va_list ap; \
va_start(ap, aFormat); \
InternalPrint(aFormat, ap); \
va_end(ap); \
} \
}
// Print information about record/replay state. Printing is independent from
// the recording and will be printed by any recording, replaying, or middleman
// process. Spew is only printed when enabled via the RECORD_REPLAY_SPEW
// environment variable.
MOZ_MakeRecordReplayPrinter(Print, false)
MOZ_MakeRecordReplayPrinter(PrintSpew, true)
#undef MOZ_MakeRecordReplayPrinter
///////////////////////////////////////////////////////////////////////////////
// Profiling
///////////////////////////////////////////////////////////////////////////////
void InitializeCurrentTime();
// Get a current timestamp, in microseconds.
double CurrentTime();
#define ForEachTimerKind(Macro) \
Macro(Default)
enum class TimerKind {
#define DefineTimerKind(aKind) aKind,
ForEachTimerKind(DefineTimerKind)
#undef DefineTimerKind
Count
};
struct AutoTimer
{
explicit AutoTimer(TimerKind aKind);
~AutoTimer();
private:
TimerKind mKind;
double mStart;
};
void DumpTimers();
///////////////////////////////////////////////////////////////////////////////
// Memory Management
///////////////////////////////////////////////////////////////////////////////
// In cases where memory is tracked and should be saved/restored with
// checkoints, malloc and other standard library functions suffice to allocate
// memory in the record/replay system. The routines below are used for handling
// redirections for the raw system calls underlying the standard libraries, and
// for cases where allocated memory should be untracked: the contents are
// ignored when saving/restoring checkpoints.
// Different kinds of memory used in the system.
enum class MemoryKind {
// Memory whose contents are saved/restored with checkpoints.
Tracked,
// All remaining memory kinds refer to untracked memory.
// Memory not fitting into one of the categories below.
Generic,
// Memory used for thread snapshots.
ThreadSnapshot,
// Memory used by various parts of the memory snapshot system.
TrackedRegions,
FreeRegions,
DirtyPageSet,
SortedDirtyPageSet,
PageCopy,
// Memory used for navigation state.
Navigation,
Count
};
// Allocate or deallocate a block of memory of a particular kind. Allocated
// memory is initially zeroed.
void* AllocateMemory(size_t aSize, MemoryKind aKind);
void DeallocateMemory(void* aAddress, size_t aSize, MemoryKind aKind);
// Allocation policy for managing memory of a particular kind.
template <MemoryKind Kind>
class AllocPolicy
{
public:
template <typename T>
T* maybe_pod_calloc(size_t aNumElems) {
if (aNumElems & tl::MulOverflowMask<sizeof(T)>::value) {
MOZ_CRASH();
}
// Note: AllocateMemory always returns zeroed memory.
return static_cast<T*>(AllocateMemory(aNumElems * sizeof(T), Kind));
}
template <typename T>
void free_(T* aPtr, size_t aSize) {
DeallocateMemory(aPtr, aSize * sizeof(T), Kind);
}
template <typename T>
T* maybe_pod_realloc(T* aPtr, size_t aOldSize, size_t aNewSize) {
T* res = maybe_pod_calloc<T>(aNewSize);
memcpy(res, aPtr, aOldSize * sizeof(T));
free_<T>(aPtr, aOldSize);
return res;
}
template <typename T>
T* maybe_pod_malloc(size_t aNumElems) { return maybe_pod_calloc<T>(aNumElems); }
template <typename T>
T* pod_malloc(size_t aNumElems) { return maybe_pod_malloc<T>(aNumElems); }
template <typename T>
T* pod_calloc(size_t aNumElems) { return maybe_pod_calloc<T>(aNumElems); }
template <typename T>
T* pod_realloc(T* aPtr, size_t aOldSize, size_t aNewSize) {
return maybe_pod_realloc<T>(aPtr, aOldSize, aNewSize);
}
void reportAllocOverflow() const {}
MOZ_MUST_USE bool checkSimulatedOOM() const {
return true;
}
};
///////////////////////////////////////////////////////////////////////////////
// Redirection Bypassing
///////////////////////////////////////////////////////////////////////////////
// The functions below bypass any redirections and give access to the system
// even if events are not passed through in the current thread. These are
// implemented in the various platform ProcessRedirect*.cpp files, and will
// crash on errors which can't be handled internally.
// Generic typedef for a system file handle.
typedef size_t FileHandle;
// Allocate/deallocate a block of memory directly from the system.
void* DirectAllocateMemory(void* aAddress, size_t aSize);
void DirectDeallocateMemory(void* aAddress, size_t aSize);
// Give a block of memory R or RX access.
void DirectWriteProtectMemory(void* aAddress, size_t aSize, bool aExecutable,
bool aIgnoreFailures = false);
// Give a block of memory RW or RWX access.
void DirectUnprotectMemory(void* aAddress, size_t aSize, bool aExecutable,
bool aIgnoreFailures = false);
// Open an existing file for reading or a new file for writing, clobbering any
// existing file.
FileHandle DirectOpenFile(const char* aFilename, bool aWriting);
// Seek to an offset within a file open for reading.
void DirectSeekFile(FileHandle aFd, uint64_t aOffset);
// Close or delete a file.
void DirectCloseFile(FileHandle aFd);
void DirectDeleteFile(const char* aFilename);
// Append data to a file open for writing, blocking until the write completes.
void DirectWrite(FileHandle aFd, const void* aData, size_t aSize);
// Print a string directly to stderr.
void DirectPrint(const char* aString);
// Read data from a file, blocking until the read completes.
size_t DirectRead(FileHandle aFd, void* aData, size_t aSize);
// Create a new pipe.
void DirectCreatePipe(FileHandle* aWriteFd, FileHandle* aReadFd);
// Spawn a new thread.
void DirectSpawnThread(void (*aFunction)(void*), void* aArgument);
} // recordreplay
} // mozilla
#endif // mozilla_recordreplay_ProcessRecordReplay_h

Просмотреть файл

@ -0,0 +1,686 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "ProcessRedirect.h"
#include "InfallibleVector.h"
#include "mozilla/Sprintf.h"
#include <dlfcn.h>
#include <string.h>
namespace {
#include "udis86/udis86.c"
#include "udis86/decode.c"
#include "udis86/itab.c"
} // anonymous namespace
namespace mozilla {
namespace recordreplay {
///////////////////////////////////////////////////////////////////////////////
// Library API Redirections
///////////////////////////////////////////////////////////////////////////////
// Redirecting system library APIs requires delicacy. We have to patch the code
// so that whenever control reaches the beginning of the library API's symbol,
// we will end up jumping to an address of our choice instead. This has to be
// done without corrupting the instructions of any functions in the library,
// which principally means ensuring that there are no internal jumps into the
// code segments we have patched.
//
// The patching we do here might fail: it isn't possible to redirect an
// arbitrary symbol within an arbitrary block of code. We are doing a best
// effort sort of thing, and any failures will be noted for reporting and
// without touching the original code at all.
// Keep track of the jumps we know about which could affect the validity if a
// code patch.
static StaticInfallibleVector<std::pair<uint8_t*,uint8_t*>> gInternalJumps;
// Jump to patch in at the end of redirecting. To avoid issues with calling
// redirected functions before all redirections have been installed
// (particularly due to locks being taken while checking for internal jump
// targets), all modification of the original code is delayed until after no
// further system calls are needed.
struct JumpPatch
{
uint8_t* mStart;
uint8_t* mTarget;
bool mShort;
JumpPatch(uint8_t* aStart, uint8_t* aTarget, bool aShort)
: mStart(aStart), mTarget(aTarget), mShort(aShort)
{}
};
static StaticInfallibleVector<JumpPatch> gJumpPatches;
static void
AddJumpPatch(uint8_t* aStart, uint8_t* aTarget, bool aShort)
{
gInternalJumps.emplaceBack(aStart, aTarget);
gJumpPatches.emplaceBack(aStart, aTarget, aShort);
}
// A range of instructions to clobber at the end of redirecting.
struct ClobberPatch
{
uint8_t* mStart;
uint8_t* mEnd;
ClobberPatch(uint8_t* aStart, uint8_t* aEnd)
: mStart(aStart), mEnd(aEnd)
{}
};
static StaticInfallibleVector<ClobberPatch> gClobberPatches;
static void
AddClobberPatch(uint8_t* aStart, uint8_t* aEnd)
{
if (aStart < aEnd) {
gClobberPatches.emplaceBack(aStart, aEnd);
}
}
static uint8_t*
SymbolBase(uint8_t* aPtr)
{
Dl_info info;
if (!dladdr(aPtr, &info)) {
MOZ_CRASH();
}
return static_cast<uint8_t*>(info.dli_saddr);
}
// Use Udis86 to decode a single instruction, returning the number of bytes
// consumed.
static size_t
DecodeInstruction(uint8_t* aIp, ud_t* aUd)
{
ud_init(aUd);
ud_set_input_buffer(aUd, aIp, MaximumInstructionLength);
ud_set_mode(aUd, 64);
size_t nbytes = ud_decode(aUd);
MOZ_RELEASE_ASSERT(nbytes && nbytes <= MaximumInstructionLength);
return nbytes;
}
// If it is unsafe to patch new instructions into [aIpStart, aIpEnd> then
// return an instruction at which a new search can be started from.
static uint8_t*
MaybeInternalJumpTarget(uint8_t* aIpStart, uint8_t* aIpEnd)
{
// The start and end have to be associated with the same symbol, as otherwise
// a jump could come into the start of the later symbol.
const char* startName = SymbolNameRaw(aIpStart);
const char* endName = SymbolNameRaw(aIpEnd - 1);
if (strcmp(startName, endName)) {
return SymbolBase(aIpEnd - 1);
}
// Look for any internal jumps from outside the patch range into the middle
// of the patch range.
for (auto jump : gInternalJumps) {
if (!(jump.first >= aIpStart && jump.first < aIpEnd) &&
jump.second > aIpStart && jump.second < aIpEnd) {
return jump.second;
}
}
// Treat patched regions of code as if they had internal jumps.
for (auto patch : gJumpPatches) {
uint8_t* end = patch.mStart + (patch.mShort ? ShortJumpBytes : JumpBytesClobberRax);
if (MemoryIntersects(aIpStart, aIpEnd - aIpStart, patch.mStart, end - patch.mStart)) {
return end;
}
}
for (auto patch : gClobberPatches) {
if (MemoryIntersects(aIpStart, aIpEnd - aIpStart, patch.mStart, patch.mEnd - patch.mStart)) {
return patch.mEnd;
}
}
if ((size_t)(aIpEnd - aIpStart) > ShortJumpBytes) {
// Manually annotate functions which might have backedges that interfere
// with redirecting the initial bytes of the function. Ideally we would
// find these backedges with some binary analysis, but this is easier said
// than done, especially since there doesn't seem to be a standard way to
// determine the extent of a symbol's code on OSX. Use strstr to avoid
// issues with goo in the symbol names.
if ((strstr(startName, "CTRunGetGlyphs") &&
!strstr(startName, "CTRunGetGlyphsPtr")) ||
(strstr(startName, "CTRunGetPositions") &&
!strstr(startName, "CTRunGetPositionsPtr")) ||
(strstr(startName, "CTRunGetStringIndices") &&
!strstr(startName, "CTRunGetStringIndicesPtr")) ||
strstr(startName, "CGColorSpaceCreateDeviceRGB") ||
// For these functions, there is a syscall near the beginning which
// other system threads might be inside.
strstr(startName, "__workq_kernreturn") ||
strstr(startName, "kevent64")) {
return aIpEnd - 1;
}
}
return nullptr;
}
// Any reasons why redirection failed.
static StaticInfallibleVector<char*> gRedirectFailures;
static void
RedirectFailure(const char* aFormat, ...)
{
va_list ap;
va_start(ap, aFormat);
char buf[4096];
VsprintfLiteral(buf, aFormat, ap);
va_end(ap);
gRedirectFailures.emplaceBack(strdup(buf));
}
static void
UnknownInstruction(const char* aName, uint8_t* aIp, size_t aNbytes)
{
char buf[4096];
char* ptr = buf;
for (size_t i = 0; i < aNbytes; i++) {
int written = snprintf(ptr, sizeof(buf) - (ptr - buf), " %d", (int) aIp[i]);
ptr += written;
}
RedirectFailure("Unknown instruction in %s:%s", aName, buf);
}
// Try to emit instructions to |aAssembler| with equivalent behavior to any
// special jump or ip-dependent instruction at |aIp|, returning true if the
// instruction was copied.
static bool
CopySpecialInstruction(uint8_t* aIp, ud_t* aUd, size_t aNbytes, Assembler& aAssembler)
{
aAssembler.NoteOriginalInstruction(aIp);
if (aUd->pfx_seg) {
return false;
}
ud_mnemonic_code mnemonic = ud_insn_mnemonic(aUd);
if (mnemonic == UD_Icall || mnemonic == UD_Ijmp || (mnemonic >= UD_Ijo && mnemonic <= UD_Ijg)) {
MOZ_RELEASE_ASSERT(!ud_insn_opr(aUd, 1));
const ud_operand* op = ud_insn_opr(aUd, 0);
if (op->type == UD_OP_JIMM) {
// Call or jump relative to rip.
uint8_t* target = aIp + aNbytes;
switch (op->size) {
case 8: target += op->lval.sbyte; break;
case 32: target += op->lval.sdword; break;
default: return false;
}
gInternalJumps.emplaceBack(nullptr, target);
if (mnemonic == UD_Icall) {
aAssembler.MoveImmediateToRax(target);
aAssembler.CallRax();
} else if (mnemonic == UD_Ijmp) {
aAssembler.Jump(target);
} else {
aAssembler.ConditionalJump(aUd->primary_opcode, target);
}
return true;
}
if (op->type == UD_OP_MEM && op->base == UD_R_RIP && !op->index && op->offset == 32) {
// jmp *$offset32(%rip)
uint8_t* addr = aIp + aNbytes + op->lval.sdword;
aAssembler.MoveImmediateToRax(addr);
aAssembler.LoadRax(8);
aAssembler.JumpToRax();
return true;
}
}
if (mnemonic == UD_Imov || mnemonic == UD_Ilea) {
MOZ_RELEASE_ASSERT(!ud_insn_opr(aUd, 2));
const ud_operand* dst = ud_insn_opr(aUd, 0);
const ud_operand* src = ud_insn_opr(aUd, 1);
if (dst->type == UD_OP_REG &&
src->type == UD_OP_MEM && src->base == UD_R_RIP && !src->index && src->offset == 32) {
// mov/lea $offset32(%rip), reg
int reg = Assembler::NormalizeRegister(dst->base);
if (!reg) {
return false;
}
uint8_t* addr = aIp + aNbytes + src->lval.sdword;
if (reg != UD_R_RAX) {
aAssembler.PushRax();
}
aAssembler.MoveImmediateToRax(addr);
if (mnemonic == UD_Imov) {
aAssembler.LoadRax(src->size / 8);
}
if (reg != UD_R_RAX) {
aAssembler.MoveRaxToRegister(reg);
aAssembler.PopRax();
}
return true;
}
if (dst->type == UD_OP_MEM && dst->base == UD_R_RIP && !dst->index && dst->offset == 32 &&
src->type == UD_OP_REG && mnemonic == UD_Imov) {
// movl reg, $offset32(%rip)
int reg = Assembler::NormalizeRegister(src->base);
if (!reg) {
return false;
}
uint8_t* addr = aIp + aNbytes + dst->lval.sdword;
aAssembler.PushRax();
aAssembler.PushRbx();
aAssembler.MoveRegisterToRax(reg);
aAssembler.PushRax();
aAssembler.PopRbx();
aAssembler.MoveImmediateToRax(addr);
aAssembler.StoreRbxToRax(src->size / 8);
aAssembler.PopRbx();
aAssembler.PopRax();
return true;
}
}
if (mnemonic == UD_Icmp) {
MOZ_RELEASE_ASSERT(!ud_insn_opr(aUd, 2));
const ud_operand* dst = ud_insn_opr(aUd, 0);
const ud_operand* src = ud_insn_opr(aUd, 1);
if (dst->type == UD_OP_MEM && dst->base == UD_R_RIP && !dst->index && dst->offset == 32 &&
src->type == UD_OP_IMM && src->size == 8) {
// cmp $literal8, $offset32(%rip)
uint8_t value = src->lval.ubyte;
uint8_t* addr = aIp + aNbytes + dst->lval.sdword;
aAssembler.PushRax();
aAssembler.MoveImmediateToRax(addr);
aAssembler.LoadRax(dst->size / 8);
aAssembler.CompareValueWithRax(value, dst->size / 8);
aAssembler.PopRax();
return true;
}
if (dst->type == UD_OP_REG &&
src->type == UD_OP_MEM && src->base == UD_R_RIP && !src->index && src->offset == 32) {
// cmpq $offset32(%rip), reg
int reg = Assembler::NormalizeRegister(dst->base);
if (!reg) {
return false;
}
uint8_t* addr = aIp + aNbytes + src->lval.sdword;
aAssembler.PushRax();
aAssembler.MoveRegisterToRax(reg);
aAssembler.PushRax();
aAssembler.MoveImmediateToRax(addr);
aAssembler.LoadRax(8);
aAssembler.CompareRaxWithTopOfStack();
aAssembler.PopRax();
aAssembler.PopRax();
return true;
}
}
return false;
}
// Copy an instruction to aAssembler, returning the number of bytes used by the
// instruction.
static size_t
CopyInstruction(const char* aName, uint8_t* aIp, Assembler& aAssembler)
{
// Use Udis86 to decode a single instruction.
ud_t ud;
size_t nbytes = DecodeInstruction(aIp, &ud);
// Check for a special cased instruction.
if (CopySpecialInstruction(aIp, &ud, nbytes, aAssembler)) {
return nbytes;
}
// Don't copy call and jump instructions. We should have special cased these,
// and these may not behave correctly after a naive copy if their behavior is
// relative to the instruction pointer.
ud_mnemonic_code_t mnemonic = ud_insn_mnemonic(&ud);
if (mnemonic == UD_Icall || (mnemonic >= UD_Ijo && mnemonic <= UD_Ijmp)) {
UnknownInstruction(aName, aIp, nbytes);
return nbytes;
}
// Don't copy instructions which have the instruction pointer as an operand.
// We should have special cased these, and as above these will not behave
// correctly after being naively copied due to their dependence on the
// instruction pointer.
for (size_t i = 0;; i++) {
const ud_operand_t* op = ud_insn_opr(&ud, i);
if (!op) {
break;
}
switch (op->type) {
case UD_OP_MEM:
if (op->index == UD_R_RIP) {
UnknownInstruction(aName, aIp, nbytes);
return nbytes;
}
MOZ_FALLTHROUGH;
case UD_OP_REG:
if (op->base == UD_R_RIP) {
UnknownInstruction(aName, aIp, nbytes);
return nbytes;
}
break;
default:
break;
}
}
aAssembler.CopyInstruction(aIp, nbytes);
return nbytes;
}
// Copy all instructions containing bytes in the range [aIpStart, aIpEnd) to
// the given assembler, returning the address of the first instruction not
// copied (i.e. the fallthrough instruction from the copied range).
static uint8_t*
CopyInstructions(const char* aName, uint8_t* aIpStart, uint8_t* aIpEnd,
Assembler& aAssembler)
{
MOZ_RELEASE_ASSERT(!MaybeInternalJumpTarget(aIpStart, aIpEnd));
uint8_t* ip = aIpStart;
while (ip < aIpEnd) {
ip += CopyInstruction(aName, ip, aAssembler);
}
return ip;
}
// Get the instruction pointer to use as the address of the base function for a
// redirection.
static uint8_t*
FunctionStartAddress(Redirection& aRedirection)
{
uint8_t* addr = static_cast<uint8_t*>(dlsym(RTLD_DEFAULT, aRedirection.mName));
if (!addr)
return nullptr;
if (addr[0] == 0xFF && addr[1] == 0x25) {
return *(uint8_t**)(addr + 6 + *reinterpret_cast<int32_t*>(addr + 2));
}
return addr;
}
// Setup a redirection: overwrite the machine code for its base function, and
// fill in its original function, to satisfy the function pointer behaviors
// described in the Redirection structure. aCursor and aCursorEnd are used to
// allocate executable memory for use in the redirection.
static void
Redirect(Redirection& aRedirection, Assembler& aAssembler, bool aFirstPass)
{
// The patching we do here might fail: it isn't possible to redirect an
// arbitrary instruction pointer within an arbitrary block of code. This code
// is doing a best effort sort of thing, and on failure it will crash safely.
// The main thing we want to avoid is corrupting the code so that it has been
// redirected but might crash or behave incorrectly when executed.
uint8_t* functionStart = aRedirection.mBaseFunction;
uint8_t* ro = functionStart;
if (!functionStart) {
if (aFirstPass) {
PrintSpew("Could not find symbol %s for redirecting.\n", aRedirection.mName);
}
return;
}
if (aRedirection.mOriginalFunction != aRedirection.mBaseFunction) {
// We already redirected this function.
MOZ_RELEASE_ASSERT(!aFirstPass);
return;
}
// First, see if we can overwrite JumpBytesClobberRax bytes of instructions
// at the base function with a direct jump to the new function. Rax is never
// live at the start of a function and we can emit a jump to an arbitrary
// location with fewer instruction bytes on x64 if we clobber it.
//
// This will work if there are no extraneous jump targets within the region
// of memory we are overwriting. If there are, we will corrupt the behavior
// of those jumps if we patch the memory.
uint8_t* extent = ro + JumpBytesClobberRax;
if (!MaybeInternalJumpTarget(ro, extent)) {
// Given code instructions for the base function as follows (AA are
// instructions we will end up copying, -- are instructions that will never
// be inspected or modified):
//
// base function: AA--
//
// Transform the code into:
//
// base function: J0--
// generated code: AAJ1
//
// Where J0 jumps to the new function, the original function is at AA, and
// J1 jumps to the point after J0.
// Set the new function to the start of the generated code.
aRedirection.mOriginalFunction = aAssembler.Current();
// Copy AA into generated code.
ro = CopyInstructions(aRedirection.mName, ro, extent, aAssembler);
// Emit jump J1.
aAssembler.Jump(ro);
// Emit jump J0.
AddJumpPatch(functionStart, aRedirection.mNewFunction, /* aShort = */ false);
AddClobberPatch(functionStart + JumpBytesClobberRax, ro);
return;
}
// We don't have enough space to patch in a long jump to an arbitrary
// instruction. Attempt to find another region of code that is long enough
// for two long jumps, has no internal jump targets, and is within range of
// the base function for a short jump.
//
// Given code instructions for the base function, with formatting as above:
//
// base function: AA--BBBB--
//
// Transform the code into:
//
// base function: J0--J1J2--
// generated code: AAJ3 BBBBJ4
//
// With the original function at AA, the jump targets are as follows:
//
// J0: short jump to J2
// J1: jump to BBBB
// J2: jump to the new function
// J3: jump to the point after J0
// J4: jump to the point after J2
// Skip this during the first pass, we don't want to patch a jump in over the
// initial bytes of a function we haven't redirected yet.
if (aFirstPass) {
return;
}
// The original symbol must have enough bytes to insert a short jump.
MOZ_RELEASE_ASSERT(!MaybeInternalJumpTarget(ro, ro + ShortJumpBytes));
// Copy AA into generated code.
aRedirection.mOriginalFunction = aAssembler.Current();
uint8_t* nro = CopyInstructions(aRedirection.mName, ro, ro + ShortJumpBytes, aAssembler);
// Emit jump J3.
aAssembler.Jump(nro);
// Keep advancing the instruction pointer until we get to a region that is
// large enough for two long jump patches.
ro = SymbolBase(extent);
while (true) {
extent = ro + JumpBytesClobberRax * 2;
uint8_t* target = MaybeInternalJumpTarget(ro, extent);
if (target) {
ro = target;
continue;
}
break;
}
// Copy BBBB into generated code.
uint8_t* firstJumpTarget = aAssembler.Current();
uint8_t* afterip = CopyInstructions(aRedirection.mName, ro, extent, aAssembler);
// Emit jump J4.
aAssembler.Jump(afterip);
// Emit jump J1.
AddJumpPatch(ro, firstJumpTarget, /* aShort = */ false);
// Emit jump J2.
AddJumpPatch(ro + JumpBytesClobberRax, aRedirection.mNewFunction, /* aShort = */ false);
AddClobberPatch(ro + 2 * JumpBytesClobberRax, afterip);
// Emit jump J0.
AddJumpPatch(functionStart, ro + JumpBytesClobberRax, /* aShort = */ true);
AddClobberPatch(functionStart + ShortJumpBytes, nro);
}
void
EarlyInitializeRedirections()
{
for (size_t i = 0;; i++) {
Redirection& redirection = gRedirections[i];
if (!redirection.mName) {
break;
}
MOZ_ASSERT(!redirection.mBaseFunction);
MOZ_ASSERT(redirection.mNewFunction);
MOZ_ASSERT(!redirection.mOriginalFunction);
redirection.mBaseFunction = FunctionStartAddress(redirection);
redirection.mOriginalFunction = redirection.mBaseFunction;
if (redirection.mBaseFunction && IsRecordingOrReplaying()) {
// We will get confused if we try to redirect the same address in multiple places.
for (size_t j = 0; j < i; j++) {
if (gRedirections[j].mBaseFunction == redirection.mBaseFunction) {
PrintSpew("Redirection %s shares the same address as %s, skipping.\n",
redirection.mName, gRedirections[j].mName);
redirection.mBaseFunction = nullptr;
break;
}
}
}
}
}
bool
InitializeRedirections()
{
MOZ_ASSERT(IsRecordingOrReplaying());
{
Assembler assembler;
for (size_t i = 0;; i++) {
Redirection& redirection = gRedirections[i];
if (!redirection.mName) {
break;
}
Redirect(redirection, assembler, /* aFirstPass = */ true);
}
for (size_t i = 0;; i++) {
Redirection& redirection = gRedirections[i];
if (!redirection.mName) {
break;
}
Redirect(redirection, assembler, /* aFirstPass = */ false);
}
}
// Don't install redirections if we had any failures.
if (!gRedirectFailures.empty()) {
size_t len = 4096;
gInitializationFailureMessage = new char[4096];
gInitializationFailureMessage[--len] = 0;
char* ptr = gInitializationFailureMessage;
for (char* reason : gRedirectFailures) {
size_t n = snprintf(ptr, len, "%s\n", reason);
if (n >= len) {
break;
}
ptr += n;
len -= n;
}
return false;
}
// Remove write protection from all patched regions, so that we don't call
// into the system while we are in the middle of redirecting.
for (const JumpPatch& patch : gJumpPatches) {
UnprotectExecutableMemory(patch.mStart, patch.mShort ? ShortJumpBytes : JumpBytesClobberRax);
}
for (const ClobberPatch& patch : gClobberPatches) {
UnprotectExecutableMemory(patch.mStart, patch.mEnd - patch.mStart);
}
// Do the actual patching of executable code for the functions we are
// redirecting.
for (const JumpPatch& patch : gJumpPatches) {
if (patch.mShort) {
Assembler::PatchShortJump(patch.mStart, patch.mTarget);
} else {
Assembler::PatchJumpClobberRax(patch.mStart, patch.mTarget);
}
}
for (const ClobberPatch& patch : gClobberPatches) {
for (uint8_t* ip = patch.mStart; ip < patch.mEnd; ip++) {
Assembler::PatchClobber(ip);
}
}
return true;
}
///////////////////////////////////////////////////////////////////////////////
// Utility
///////////////////////////////////////////////////////////////////////////////
Atomic<size_t, SequentiallyConsistent, Behavior::DontPreserve> gMemoryLeakBytes;
void*
BindFunctionArgument(void* aFunction, void* aArgument, size_t aArgumentPosition,
Assembler& aAssembler)
{
void* res = aAssembler.Current();
// On x64 the argument will be in a register, so to add an extra argument for
// the callee we just need to fill in the appropriate register for the
// argument position with the bound argument value.
aAssembler.MoveImmediateToRax(aArgument);
switch (aArgumentPosition) {
case 1: aAssembler.MoveRaxToRegister(UD_R_RSI); break;
case 2: aAssembler.MoveRaxToRegister(UD_R_RDX); break;
case 3: aAssembler.MoveRaxToRegister(UD_R_RCX); break;
default: MOZ_CRASH();
}
// Jump to the function that was bound.
aAssembler.Jump(aFunction);
return res;
}
} // namespace recordreplay
} // namespace mozilla

Просмотреть файл

@ -0,0 +1,774 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef mozilla_recordreplay_ProcessRedirect_h
#define mozilla_recordreplay_ProcessRedirect_h
#include "Assembler.h"
#include "Callback.h"
#include "CallFunction.h"
#include "ProcessRecordReplay.h"
#include "ProcessRewind.h"
#include "Thread.h"
#include "ipc/Channel.h"
#include "mozilla/Assertions.h"
#include "mozilla/Atomics.h"
#include "mozilla/Casting.h"
#include <errno.h>
namespace mozilla {
namespace recordreplay {
// Redirections Overview.
//
// The vast majority of recording and replaying is done through function
// redirections. When the record/replay system is initialized, a set of system
// library API functions have their machine code modified so that when that API
// is called it redirects control to a custom record/replay function with the
// same signature. Machine code is also generated that contains any overwritten
// instructions in the API, and which may be called to get the API's original
// behavior before it was redirected.
//
// In the usual case, a record/replay function redirection does the following
// standard steps:
//
// 1. If events are being passed through, the original function is called and
// its results returned to the caller, as if the redirection was not there
// at all.
//
// 2. If events are not passed through and we are recording, the original
// function is called, and then an event is recorded for the current thread
// along with any outputs produced by the call.
//
// 3. If events are not passed through and we are replaying, the original
// function is *not* called, but rather the event and outputs are read from
// the recording and sent back to the caller.
//
// Macros are provided below to streamline this process. Redirections do not
// need to adhere to this protocol, however, and can have whatever behaviors
// that are necessary for reliable record/replay.
//
// Some platforms need additional redirection techniques for handling different
// features of that platform. See the individual ProcessRedirect*.cpp files for
// descriptions of these.
//
// The main advantage of using redirections is that Gecko code does not need to
// be modified at all to take advantage of them. Redirected APIs should be
// functions that are directly called by Gecko code and are part of system
// libraries. These APIs are well defined, well documented by the platform, and
// stable. The main maintenance burden going forward is in handling new library
// APIs that were not previously called by Gecko.
//
// The main risk with using function redirections is that the set of redirected
// functions is incomplete. If a library API is not redirected then it might
// behave differently between recording and replaying, or it might crash while
// replaying.
///////////////////////////////////////////////////////////////////////////////
// Function Redirections
///////////////////////////////////////////////////////////////////////////////
// Information about a system library API function which is being redirected.
struct Redirection
{
// Name of the function being redirected.
const char* mName;
// Address of the function which is being redirected. The code for this
// function is modified so that attempts to call this function will instead
// call mNewFunction.
uint8_t* mBaseFunction;
// Function with the same signature as mBaseFunction, which may have
// different behavior for recording/replaying the call.
uint8_t* mNewFunction;
// Function with the same signature and original behavior as
// mBaseFunction.
uint8_t* mOriginalFunction;
};
// All platform specific redirections, indexed by the call event.
extern Redirection gRedirections[];
// Do early initialization of redirections. This is done on both
// recording/replaying and middleman processes, and allows OriginalCall() to
// work in either case.
void EarlyInitializeRedirections();
// Set up all platform specific redirections, or fail and set
// gInitializationFailureMessage.
bool InitializeRedirections();
// Generic type for a system error code.
typedef ssize_t ErrorType;
// Functions for saving or restoring system error codes.
static inline ErrorType SaveError() { return errno; }
static inline void RestoreError(ErrorType aError) { errno = aError; }
// Specify the default ABI to use by the record/replay macros below.
#define DEFAULTABI
// Define CallFunction(...) for all supported ABIs.
DefineAllCallFunctions(DEFAULTABI)
// Get the address of the original function for a call event ID.
static inline void*
OriginalFunction(size_t aCallId)
{
return gRedirections[aCallId].mOriginalFunction;
}
#define TokenPaste(aFirst, aSecond) aFirst ## aSecond
// Call the original function for a call event ID with a particular ABI and any
// number of arguments.
#define OriginalCallABI(aName, aReturnType, aABI, ...) \
TokenPaste(CallFunction, aABI) <aReturnType> \
(OriginalFunction(CallEvent_ ##aName), ##__VA_ARGS__)
// Call the original function for a call event ID with the default ABI.
#define OriginalCall(aName, aReturnType, ...) \
OriginalCallABI(aName, aReturnType, DEFAULTABI, ##__VA_ARGS__)
// State for a function redirection which performs the standard steps (see the
// comment at the start of this file). This should not be created directly, but
// rather through one of the macros below.
struct AutoRecordReplayFunctionVoid
{
// The current thread, or null if events are being passed through.
Thread* mThread;
// Any system error generated by the call which was redirected.
ErrorType mError;
protected:
// Information about the call being recorded.
size_t mCallId;
const char* mCallName;
public:
AutoRecordReplayFunctionVoid(size_t aCallId, const char* aCallName)
: mThread(AreThreadEventsPassedThrough() ? nullptr : Thread::Current()),
mError(0), mCallId(aCallId), mCallName(aCallName)
{
if (mThread) {
// Calling any redirection which performs the standard steps will cause
// debugger operations that have diverged from the recording to fail.
EnsureNotDivergedFromRecording();
MOZ_ASSERT(!AreThreadEventsDisallowed());
// Pass through events in case we are calling the original function.
mThread->SetPassThrough(true);
}
}
~AutoRecordReplayFunctionVoid()
{
if (mThread) {
// Restore any error saved or replayed earlier to the system.
RestoreError(mError);
}
}
// Begin recording or replaying data for the call. This must be called before
// destruction if mThread is non-null.
inline void StartRecordReplay() {
MOZ_ASSERT(mThread);
// Save any system error in case we want to record/replay it.
mError = SaveError();
// Stop the event passing through that was initiated in the constructor.
mThread->SetPassThrough(false);
// Add an event for the thread.
RecordReplayAssert("%s", mCallName);
ThreadEvent ev = (ThreadEvent)((uint32_t)ThreadEvent::CallStart + mCallId);
mThread->Events().RecordOrReplayThreadEvent(ev);
}
};
// State for a function redirection that performs the standard steps and also
// returns a value.
template <typename ReturnType>
struct AutoRecordReplayFunction : AutoRecordReplayFunctionVoid
{
// The value which this function call should return.
ReturnType mRval;
AutoRecordReplayFunction(size_t aCallId, const char* aCallName)
: AutoRecordReplayFunctionVoid(aCallId, aCallName)
{}
};
// Macros for recording or replaying a function that performs the standard
// steps. These macros should be used near the start of the body of a
// redirection function, and will fall through only if events are not
// passed through and the outputs of the function need to be recorded or
// replayed.
//
// These macros define an AutoRecordReplayFunction local |rrf| with state for
// the redirection, and additional locals |events| and (if the function has a
// return value) |rval| for convenient access.
// Record/replay a function that returns a value and has a particular ABI.
#define RecordReplayFunctionABI(aName, aReturnType, aABI, ...) \
AutoRecordReplayFunction<aReturnType> rrf(CallEvent_ ##aName, #aName); \
if (!rrf.mThread) { \
return OriginalCallABI(aName, aReturnType, aABI, ##__VA_ARGS__); \
} \
if (IsRecording()) { \
rrf.mRval = OriginalCallABI(aName, aReturnType, aABI, ##__VA_ARGS__); \
} \
rrf.StartRecordReplay(); \
Stream& events = rrf.mThread->Events(); \
(void) events; \
aReturnType& rval = rrf.mRval
// Record/replay a function that returns a value and has the default ABI.
#define RecordReplayFunction(aName, aReturnType, ...) \
RecordReplayFunctionABI(aName, aReturnType, DEFAULTABI, ##__VA_ARGS__)
// Record/replay a function that has no return value and has a particular ABI.
#define RecordReplayFunctionVoidABI(aName, aABI, ...) \
AutoRecordReplayFunctionVoid rrf(CallEvent_ ##aName, #aName); \
if (!rrf.mThread) { \
OriginalCallABI(aName, void, aABI, ##__VA_ARGS__); \
return; \
} \
if (IsRecording()) { \
OriginalCallABI(aName, void, aABI, ##__VA_ARGS__); \
} \
rrf.StartRecordReplay(); \
Stream& events = rrf.mThread->Events(); \
(void) events
// Record/replay a function that has no return value and has the default ABI.
#define RecordReplayFunctionVoid(aName, ...) \
RecordReplayFunctionVoidABI(aName, DEFAULTABI, ##__VA_ARGS__)
// The following macros are used for functions that do not record an error and
// take or return values of specified types.
//
// aAT == aArgumentType
// aRT == aReturnType
#define RRFunctionTypes0(aName, aRT) \
static aRT DEFAULTABI \
RR_ ##aName () \
{ \
RecordReplayFunction(aName, aRT); \
events.RecordOrReplayValue(&rval); \
return rval; \
}
#define RRFunctionTypes1(aName, aRT, aAT0) \
static aRT DEFAULTABI \
RR_ ##aName (aAT0 a0) \
{ \
RecordReplayFunction(aName, aRT, a0); \
events.RecordOrReplayValue(&rval); \
return rval; \
}
#define RRFunctionTypes2(aName, aRT, aAT0, aAT1) \
static aRT DEFAULTABI \
RR_ ##aName (aAT0 a0, aAT1 a1) \
{ \
RecordReplayFunction(aName, aRT, a0, a1); \
events.RecordOrReplayValue(&rval); \
return rval; \
}
#define RRFunctionTypes3(aName, aRT, aAT0, aAT1, aAT2) \
static aRT DEFAULTABI \
RR_ ##aName (aAT0 a0, aAT1 a1, aAT2 a2) \
{ \
RecordReplayFunction(aName, aRT, a0, a1, a2); \
events.RecordOrReplayValue(&rval); \
return rval; \
}
#define RRFunctionTypes4(aName, aRT, aAT0, aAT1, aAT2, aAT3) \
static aRT DEFAULTABI \
RR_ ##aName (aAT0 a0, aAT1 a1, aAT2 a2, aAT3 a3) \
{ \
RecordReplayFunction(aName, aRT, a0, a1, a2, a3); \
events.RecordOrReplayValue(&rval); \
return rval; \
}
#define RRFunctionTypes5(aName, aRT, aAT0, aAT1, aAT2, aAT3, \
aAT4) \
static aRT DEFAULTABI \
RR_ ##aName (aAT0 a0, aAT1 a1, aAT2 a2, aAT3 a3, aAT4 a4) \
{ \
RecordReplayFunction(aName, aRT, a0, a1, a2, a3, a4); \
events.RecordOrReplayValue(&rval); \
return rval; \
}
#define RRFunctionTypes6(aName, aRT, aAT0, aAT1, aAT2, aAT3, \
aAT4, aAT5) \
static aRT DEFAULTABI \
RR_ ##aName (aAT0 a0, aAT1 a1, aAT2 a2, aAT3 a3, aAT4 a4, \
aAT5 a5) \
{ \
RecordReplayFunction(aName, aRT, a0, a1, a2, a3, a4, a5); \
events.RecordOrReplayValue(&rval); \
return rval; \
}
#define RRFunctionTypes7(aName, aRT, aAT0, aAT1, aAT2, aAT3, \
aAT4, aAT5, aAT6) \
static aRT DEFAULTABI \
RR_ ##aName (aAT0 a0, aAT1 a1, aAT2 a2, aAT3 a3, aAT4 a4, \
aAT5 a5, aAT6 a6) \
{ \
RecordReplayFunction(aName, aRT, a0, a1, a2, a3, a4, a5, a6); \
events.RecordOrReplayValue(&rval); \
return rval; \
}
#define RRFunctionTypes8(aName, aRT, aAT0, aAT1, aAT2, aAT3, \
aAT4, aAT5, aAT6, aAT7) \
static aRT DEFAULTABI \
RR_ ##aName (aAT0 a0, aAT1 a1, aAT2 a2, aAT3 a3, aAT4 a4, \
aAT5 a5, aAT6 a6, aAT7 a7) \
{ \
RecordReplayFunction(aName, aRT, a0, a1, a2, a3, a4, a5, a6, a7); \
events.RecordOrReplayValue(&rval); \
return rval; \
}
#define RRFunctionTypes9(aName, aRT, aAT0, aAT1, aAT2, aAT3, \
aAT4, aAT5, aAT6, aAT7, aAT8) \
static aRT DEFAULTABI \
RR_ ##aName (aAT0 a0, aAT1 a1, aAT2 a2, aAT3 a3, aAT4 a4, \
aAT5 a5, aAT6 a6, aAT7 a7, aAT8 a8) \
{ \
RecordReplayFunction(aName, aRT, a0, a1, a2, a3, a4, a5, a6, a7, a8); \
events.RecordOrReplayValue(&rval); \
return rval; \
}
#define RRFunctionTypes10(aName, aRT, aAT0, aAT1, aAT2, aAT3, \
aAT4, aAT5, aAT6, aAT7, aAT8, aAT9) \
static aRT DEFAULTABI \
RR_ ##aName (aAT0 a0, aAT1 a1, aAT2 a2, aAT3 a3, aAT4 a4, \
aAT5 a5, aAT6 a6, aAT7 a7, aAT8 a8, aAT9 a9) \
{ \
RecordReplayFunction(aName, aRT, a0, a1, a2, a3, a4, a5, a6, a7, a8, a9); \
events.RecordOrReplayValue(&rval); \
return rval; \
}
#define RRFunctionTypesVoid1(aName, aAT0) \
static void DEFAULTABI \
RR_ ##aName (aAT0 a0) \
{ \
RecordReplayFunctionVoid(aName, a0); \
}
#define RRFunctionTypesVoid2(aName, aAT0, aAT1) \
static void DEFAULTABI \
RR_ ##aName (aAT0 a0, aAT1 a1) \
{ \
RecordReplayFunctionVoid(aName, a0, a1); \
}
#define RRFunctionTypesVoid3(aName, aAT0, aAT1, aAT2) \
static void DEFAULTABI \
RR_ ##aName (aAT0 a0, aAT1 a1, aAT2 a2) \
{ \
RecordReplayFunctionVoid(aName, a0, a1, a2); \
}
#define RRFunctionTypesVoid4(aName, aAT0, aAT1, aAT2, aAT3) \
static void DEFAULTABI \
RR_ ##aName (aAT0 a0, aAT1 a1, aAT2 a2, aAT3 a3) \
{ \
RecordReplayFunctionVoid(aName, a0, a1, a2, a3); \
}
#define RRFunctionTypesVoid5(aName, aAT0, aAT1, aAT2, aAT3, aAT4) \
static void DEFAULTABI \
RR_ ##aName (aAT0 a0, aAT1 a1, aAT2 a2, aAT3 a3, aAT4 a4) \
{ \
RecordReplayFunctionVoid(aName, a0, a1, a2, a3, a4); \
}
// The following macros are used for functions that take and return scalar
// values (not a struct or a floating point) and do not record an error
// anywhere.
#define RRFunction0(aName) \
RRFunctionTypes0(aName, size_t)
#define RRFunction1(aName) \
RRFunctionTypes1(aName, size_t, size_t)
#define RRFunction2(aName) \
RRFunctionTypes2(aName, size_t, size_t, size_t)
#define RRFunction3(aName) \
RRFunctionTypes3(aName, size_t, size_t, size_t, size_t)
#define RRFunction4(aName) \
RRFunctionTypes4(aName, size_t, size_t, size_t, size_t, size_t)
#define RRFunction5(aName) \
RRFunctionTypes5(aName, size_t, size_t, size_t, size_t, size_t, size_t)
#define RRFunction6(aName) \
RRFunctionTypes6(aName, size_t, size_t, size_t, size_t, size_t, size_t, size_t)
#define RRFunction7(aName) \
RRFunctionTypes7(aName, size_t, size_t, size_t, size_t, size_t, size_t, size_t, size_t)
#define RRFunction8(aName) \
RRFunctionTypes8(aName, size_t, size_t, size_t, size_t, size_t, size_t, size_t, size_t, \
size_t)
#define RRFunction9(aName) \
RRFunctionTypes9(aName, size_t, size_t, size_t, size_t, size_t, size_t, size_t, size_t, \
size_t, size_t)
#define RRFunction10(aName) \
RRFunctionTypes10(aName, size_t, size_t, size_t, size_t, size_t, size_t, size_t, size_t, \
size_t, size_t, size_t)
// The following macros are used for functions that take scalar arguments and
// do not return a value or record an error anywhere.
#define RRFunctionVoid0(aName) \
static void DEFAULTABI \
RR_ ##aName () \
{ \
RecordReplayFunctionVoid(aName); \
}
#define RRFunctionVoid1(aName) \
RRFunctionTypesVoid1(aName, size_t)
#define RRFunctionVoid2(aName) \
RRFunctionTypesVoid2(aName, size_t, size_t)
#define RRFunctionVoid3(aName) \
RRFunctionTypesVoid3(aName, size_t, size_t, size_t)
#define RRFunctionVoid4(aName) \
RRFunctionTypesVoid4(aName, size_t, size_t, size_t, size_t)
#define RRFunctionVoid5(aName) \
RRFunctionTypesVoid5(aName, size_t, size_t, size_t, size_t, size_t)
// The following macros are used for functions that return a signed integer
// value and record an error if the return value is negative.
#define RRFunctionNegError0(aName) \
static ssize_t DEFAULTABI \
RR_ ##aName () \
{ \
RecordReplayFunction(aName, ssize_t); \
RecordOrReplayHadErrorNegative(rrf); \
return rval; \
}
#define RRFunctionNegError1(aName) \
static ssize_t DEFAULTABI \
RR_ ##aName (size_t a0) \
{ \
RecordReplayFunction(aName, ssize_t, a0); \
RecordOrReplayHadErrorNegative(rrf); \
return rval; \
}
#define RRFunctionNegError2(aName) \
static ssize_t DEFAULTABI \
RR_ ##aName (size_t a0, size_t a1) \
{ \
RecordReplayFunction(aName, ssize_t, a0, a1); \
RecordOrReplayHadErrorNegative(rrf); \
return rval; \
}
#define RRFunctionNegError3(aName) \
static ssize_t DEFAULTABI \
RR_ ##aName (size_t a0, size_t a1, size_t a2) \
{ \
RecordReplayFunction(aName, ssize_t, a0, a1, a2); \
RecordOrReplayHadErrorNegative(rrf); \
return rval; \
}
#define RRFunctionNegError4(aName) \
static ssize_t DEFAULTABI \
RR_ ##aName (size_t a0, size_t a1, size_t a2, size_t a3) \
{ \
RecordReplayFunction(aName, ssize_t, a0, a1, a2, a3); \
RecordOrReplayHadErrorNegative(rrf); \
return rval; \
}
#define RRFunctionNegError5(aName) \
static ssize_t DEFAULTABI \
RR_ ##aName (size_t a0, size_t a1, size_t a2, size_t a3, \
size_t a4) \
{ \
RecordReplayFunction(aName, ssize_t, a0, a1, a2, a3, a4); \
RecordOrReplayHadErrorNegative(rrf); \
return rval; \
}
#define RRFunctionNegError6(aName) \
static ssize_t DEFAULTABI \
RR_ ##aName (size_t a0, size_t a1, size_t a2, size_t a3, \
size_t a4, size_t a5) \
{ \
RecordReplayFunction(aName, ssize_t, a0, a1, a2, a3, a4, a5); \
RecordOrReplayHadErrorNegative(rrf); \
return rval; \
}
// The following macros are used for functions that return an integer
// value and record an error if the return value is zero.
#define RRFunctionZeroError0(aName) \
static size_t __stdcall \
RR_ ##aName () \
{ \
RecordReplayFunction(aName, size_t); \
RecordOrReplayHadErrorZero(rrf); \
return rval; \
}
#define RRFunctionZeroError1(aName) \
static size_t __stdcall \
RR_ ##aName (size_t a0) \
{ \
RecordReplayFunction(aName, size_t, a0); \
RecordOrReplayHadErrorZero(rrf); \
return rval; \
}
#define RRFunctionZeroErrorABI2(aName, aABI) \
static size_t aABI \
RR_ ##aName (size_t a0, size_t a1) \
{ \
RecordReplayFunctionABI(aName, size_t, aABI, a0, a1); \
RecordOrReplayHadErrorZero(rrf); \
return rval; \
}
#define RRFunctionZeroError2(aName) RRFunctionZeroErrorABI2(aName, DEFAULTABI)
#define RRFunctionZeroError3(aName) \
static size_t __stdcall \
RR_ ##aName (size_t a0, size_t a1, size_t a2) \
{ \
RecordReplayFunction(aName, size_t, a0, a1, a2); \
RecordOrReplayHadErrorZero(rrf); \
return rval; \
}
#define RRFunctionZeroError4(aName) \
static size_t __stdcall \
RR_ ##aName (size_t a0, size_t a1, size_t a2, size_t a3) \
{ \
RecordReplayFunction(aName, size_t, a0, a1, a2, a3); \
RecordOrReplayHadErrorZero(rrf); \
return rval; \
}
#define RRFunctionZeroError5(aName) \
static size_t __stdcall \
RR_ ##aName (size_t a0, size_t a1, size_t a2, size_t a3, \
size_t a4) \
{ \
RecordReplayFunction(aName, size_t, a0, a1, a2, a3, a4); \
RecordOrReplayHadErrorZero(rrf); \
return rval; \
}
#define RRFunctionZeroError6(aName) \
static size_t __stdcall \
RR_ ##aName (size_t a0, size_t a1, size_t a2, size_t a3, \
size_t a4, size_t a5) \
{ \
RecordReplayFunction(aName, size_t, a0, a1, a2, a3, a4, a5); \
RecordOrReplayHadErrorZero(rrf); \
return rval; \
}
#define RRFunctionZeroError7(aName) \
static size_t __stdcall \
RR_ ##aName (size_t a0, size_t a1, size_t a2, size_t a3, \
size_t a4, size_t a5, size_t a6) \
{ \
RecordReplayFunction(aName, size_t, a0, a1, a2, a3, a4, a5, a6); \
RecordOrReplayHadErrorZero(rrf); \
return rval; \
}
#define RRFunctionZeroError8(aName) \
static size_t __stdcall \
RR_ ##aName (size_t a0, size_t a1, size_t a2, size_t a3, \
size_t a4, size_t a5, size_t a6, size_t a7) \
{ \
RecordReplayFunction(aName, size_t, a0, a1, a2, a3, a4, a5, a6, a7); \
RecordOrReplayHadErrorZero(rrf); \
return rval; \
}
// Recording template for functions which are used for inter-thread
// synchronization and must be replayed in the original order they executed in.
#define RecordReplayOrderedFunction(aName, aReturnType, aFailureRval, aFormals, ...) \
static aReturnType DEFAULTABI \
RR_ ## aName aFormals \
{ \
BeginOrderedEvent(); /* This is a noop if !mThread */ \
RecordReplayFunction(aName, aReturnType, __VA_ARGS__); \
EndOrderedEvent(); \
events.RecordOrReplayValue(&rval); \
if (rval == aFailureRval) { \
events.RecordOrReplayValue(&rrf.mError); \
} \
return rval; \
}
///////////////////////////////////////////////////////////////////////////////
// Callback Redirections
///////////////////////////////////////////////////////////////////////////////
// Below are helpers for use in handling a common callback pattern used within
// redirections: the system is passed a pointer to a Gecko callback, and a
// pointer to some opaque Gecko data which the system will pass to the callback
// when invoking it.
//
// This pattern may be handled by replacing the Gecko callback with a callback
// wrapper (see Callback.h), and replacing the opaque Gecko data with a pointer
// to a CallbackWrapperData structure, which contains both the original Gecko
// callback to use and the data which should be passed to it.
//
// The RecordReplayCallback is used early in the callback wrapper to save and
// restore both the Gecko callback and its opaque data pointer.
struct CallbackWrapperData
{
void* mFunction;
void* mData;
template <typename FunctionType>
CallbackWrapperData(FunctionType aFunction, void* aData)
: mFunction(BitwiseCast<void*>(aFunction)), mData(aData)
{}
};
// This class should not be used directly, but rather through the macro below.
template <typename FunctionType>
struct AutoRecordReplayCallback
{
FunctionType mFunction;
AutoRecordReplayCallback(void** aDataArgument, size_t aCallbackId)
: mFunction(nullptr)
{
MOZ_ASSERT(IsRecordingOrReplaying());
if (IsRecording()) {
CallbackWrapperData* wrapperData = (CallbackWrapperData*) *aDataArgument;
mFunction = (FunctionType) wrapperData->mFunction;
*aDataArgument = wrapperData->mData;
BeginCallback(aCallbackId);
}
SaveOrRestoreCallbackData((void**)&mFunction);
SaveOrRestoreCallbackData(aDataArgument);
}
~AutoRecordReplayCallback() {
if (IsRecording()) {
EndCallback();
}
}
};
// Macro for using AutoRecordReplayCallback.
#define RecordReplayCallback(aFunctionType, aDataArgument) \
AutoRecordReplayCallback<aFunctionType> rrc(aDataArgument, CallbackEvent_ ##aFunctionType)
///////////////////////////////////////////////////////////////////////////////
// Redirection Helpers
///////////////////////////////////////////////////////////////////////////////
// Read/write a success code (where zero is failure) and errno value on failure.
template <typename T>
static inline bool
RecordOrReplayHadErrorZero(AutoRecordReplayFunction<T>& aRrf)
{
aRrf.mThread->Events().RecordOrReplayValue(&aRrf.mRval);
if (aRrf.mRval == 0) {
aRrf.mThread->Events().RecordOrReplayValue(&aRrf.mError);
return true;
}
return false;
}
// Read/write a success code (where negative values are failure) and errno value on failure.
template <typename T>
static inline bool
RecordOrReplayHadErrorNegative(AutoRecordReplayFunction<T>& aRrf)
{
aRrf.mThread->Events().RecordOrReplayValue(&aRrf.mRval);
if (aRrf.mRval < 0) {
aRrf.mThread->Events().RecordOrReplayValue(&aRrf.mError);
return true;
}
return false;
}
extern Atomic<size_t, SequentiallyConsistent, Behavior::DontPreserve> gMemoryLeakBytes;
// For allocating memory in redirections that will never be reclaimed. This is
// done for simplicity. If the amount of leaked memory from redirected calls
// grows too large then steps can be taken to more closely emulate the library
// behavior.
template <typename T>
static inline T*
NewLeakyArray(size_t aSize)
{
gMemoryLeakBytes += aSize * sizeof(T);
return new T[aSize];
}
///////////////////////////////////////////////////////////////////////////////
// Other Redirection Interfaces
///////////////////////////////////////////////////////////////////////////////
// Given an argument function aFunction, generate code for a new function that
// takes one fewer argument than aFunction and then calls aFunction with all
// its arguments and the aArgument value in the last argument position.
//
// i.e. if aFunction has the signature: size_t (*)(void*, void*, void*);
//
// Then BindFunctionArgument(aFunction, aArgument, 2) produces this function:
//
// size_t result(void* a0, void* a1) {
// return aFunction(a0, a1, aArgument);
// }
//
// Supported positions for the bound argument are 1, 2, and 3.
void*
BindFunctionArgument(void* aFunction, void* aArgument, size_t aArgumentPosition,
Assembler& aAssembler);
} // recordreplay
} // mozilla
#endif // mozilla_recordreplay_ProcessRedirect_h

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -0,0 +1,324 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "ProcessRewind.h"
#include "nsString.h"
#include "ipc/ChildInternal.h"
#include "mozilla/dom/ScriptSettings.h"
#include "mozilla/StaticMutex.h"
#include "InfallibleVector.h"
#include "MemorySnapshot.h"
#include "Monitor.h"
#include "ProcessRecordReplay.h"
#include "ThreadSnapshot.h"
namespace mozilla {
namespace recordreplay {
// Information about the current rewinding state. The contents of this structure
// are in untracked memory.
struct RewindInfo
{
// The most recent checkpoint which was encountered.
CheckpointId mLastCheckpoint;
// Whether this is the active child process. See the comment under
// 'Child Roles' in ParentIPC.cpp.
bool mIsActiveChild;
// Checkpoints which have been saved. This includes only entries from
// mShouldSaveCheckpoints, plus all temporary checkpoints.
InfallibleVector<SavedCheckpoint, 1024, AllocPolicy<MemoryKind::Generic>> mSavedCheckpoints;
// Unsorted list of checkpoints which the middleman has instructed us to
// save. All those equal to or prior to mLastCheckpoint will have been saved.
InfallibleVector<size_t, 1024, AllocPolicy<MemoryKind::Generic>> mShouldSaveCheckpoints;
};
static RewindInfo* gRewindInfo;
// Lock for managing pending main thread callbacks.
static Monitor* gMainThreadCallbackMonitor;
// Callbacks to execute on the main thread, in FIFO order. Protected by
// gMainThreadCallbackMonitor.
static StaticInfallibleVector<std::function<void()>> gMainThreadCallbacks;
void
InitializeRewindState()
{
MOZ_RELEASE_ASSERT(gRewindInfo == nullptr);
void* memory = AllocateMemory(sizeof(RewindInfo), MemoryKind::Generic);
gRewindInfo = new(memory) RewindInfo();
gMainThreadCallbackMonitor = new Monitor();
}
static bool
CheckpointPrecedes(const CheckpointId& aFirst, const CheckpointId& aSecond)
{
return aFirst.mNormal < aSecond.mNormal || aFirst.mTemporary < aSecond.mTemporary;
}
void
RestoreCheckpointAndResume(const CheckpointId& aCheckpoint)
{
MOZ_RELEASE_ASSERT(IsReplaying());
MOZ_RELEASE_ASSERT(Thread::CurrentIsMainThread());
MOZ_RELEASE_ASSERT(!AreThreadEventsPassedThrough());
MOZ_RELEASE_ASSERT(aCheckpoint == gRewindInfo->mLastCheckpoint ||
CheckpointPrecedes(aCheckpoint, gRewindInfo->mLastCheckpoint));
// Make sure we don't lose pending main thread callbacks due to rewinding.
{
MonitorAutoLock lock(*gMainThreadCallbackMonitor);
MOZ_RELEASE_ASSERT(gMainThreadCallbacks.empty());
}
Thread::WaitForIdleThreads();
double start = CurrentTime();
// Rewind heap memory to the target checkpoint, which must have been saved.
CheckpointId newCheckpoint = gRewindInfo->mSavedCheckpoints.back().mCheckpoint;
RestoreMemoryToLastSavedCheckpoint();
while (CheckpointPrecedes(aCheckpoint, newCheckpoint)) {
gRewindInfo->mSavedCheckpoints.back().ReleaseContents();
gRewindInfo->mSavedCheckpoints.popBack();
RestoreMemoryToLastSavedDiffCheckpoint();
newCheckpoint = gRewindInfo->mSavedCheckpoints.back().mCheckpoint;
}
MOZ_RELEASE_ASSERT(newCheckpoint == aCheckpoint);
FixupFreeRegionsAfterRewind();
double end = CurrentTime();
PrintSpew("Restore #%d:%d -> #%d:%d %.2fs\n",
(int) gRewindInfo->mLastCheckpoint.mNormal,
(int) gRewindInfo->mLastCheckpoint.mTemporary,
(int) newCheckpoint.mNormal,
(int) newCheckpoint.mTemporary,
(end - start) / 1000000.0);
// Finally, let threads restore themselves to their stacks at the checkpoint
// we are rewinding to.
RestoreAllThreads(gRewindInfo->mSavedCheckpoints.back());
Unreachable();
}
void
SetSaveCheckpoint(size_t aCheckpoint, bool aSave)
{
MOZ_RELEASE_ASSERT(aCheckpoint > gRewindInfo->mLastCheckpoint.mNormal);
VectorAddOrRemoveEntry(gRewindInfo->mShouldSaveCheckpoints, aCheckpoint, aSave);
}
bool
NewCheckpoint(bool aTemporary)
{
MOZ_RELEASE_ASSERT(Thread::CurrentIsMainThread());
MOZ_RELEASE_ASSERT(!AreThreadEventsPassedThrough());
MOZ_RELEASE_ASSERT(IsReplaying() || !aTemporary);
navigation::BeforeCheckpoint();
// Get the ID of the new checkpoint.
CheckpointId checkpoint = gRewindInfo->mLastCheckpoint.NextCheckpoint(aTemporary);
// Save all checkpoints the middleman tells us to, and temporary checkpoints
// (which the middleman never knows about).
bool save = aTemporary
|| VectorContains(gRewindInfo->mShouldSaveCheckpoints, checkpoint.mNormal);
bool reachedCheckpoint = true;
if (save) {
Thread::WaitForIdleThreads();
PrintSpew("Starting checkpoint...\n");
double start = CurrentTime();
// Record either the first or a subsequent diff memory snapshot.
if (gRewindInfo->mSavedCheckpoints.empty()) {
TakeFirstMemorySnapshot();
} else {
TakeDiffMemorySnapshot();
}
gRewindInfo->mSavedCheckpoints.emplaceBack(checkpoint);
double end = CurrentTime();
// Save all thread stacks for the checkpoint. If we rewind here from a
// later point of execution then this will return false.
if (SaveAllThreads(gRewindInfo->mSavedCheckpoints.back())) {
PrintSpew("Saved checkpoint #%d:%d %.2fs\n",
(int) checkpoint.mNormal, (int) checkpoint.mTemporary,
(end - start) / 1000000.0);
} else {
PrintSpew("Restored checkpoint #%d:%d\n",
(int) checkpoint.mNormal, (int) checkpoint.mTemporary);
reachedCheckpoint = false;
// After restoring, make sure all threads have updated their stacks
// before letting any of them resume execution. Threads might have
// pointers into each others' stacks.
WaitForIdleThreadsToRestoreTheirStacks();
}
Thread::ResumeIdleThreads();
}
gRewindInfo->mLastCheckpoint = checkpoint;
navigation::AfterCheckpoint(checkpoint);
return reachedCheckpoint;
}
static bool gRecordingDiverged;
static bool gUnhandledDivergeAllowed;
void
DivergeFromRecording()
{
MOZ_RELEASE_ASSERT(Thread::CurrentIsMainThread());
MOZ_RELEASE_ASSERT(IsReplaying());
gRecordingDiverged = true;
gUnhandledDivergeAllowed = true;
}
extern "C" {
MOZ_EXPORT bool
RecordReplayInterface_InternalHasDivergedFromRecording()
{
return Thread::CurrentIsMainThread() && gRecordingDiverged;
}
} // extern "C"
void
DisallowUnhandledDivergeFromRecording()
{
MOZ_RELEASE_ASSERT(Thread::CurrentIsMainThread());
gUnhandledDivergeAllowed = false;
}
void
EnsureNotDivergedFromRecording()
{
MOZ_RELEASE_ASSERT(!AreThreadEventsPassedThrough());
if (HasDivergedFromRecording()) {
MOZ_RELEASE_ASSERT(gUnhandledDivergeAllowed);
PrintSpew("Unhandled recording divergence, restoring checkpoint...\n");
RestoreCheckpointAndResume(gRewindInfo->mSavedCheckpoints.back().mCheckpoint);
Unreachable();
}
}
bool
HasSavedCheckpoint()
{
return gRewindInfo && !gRewindInfo->mSavedCheckpoints.empty();
}
CheckpointId
GetLastSavedCheckpoint()
{
MOZ_RELEASE_ASSERT(HasSavedCheckpoint());
return gRewindInfo->mSavedCheckpoints.back().mCheckpoint;
}
static bool gMainThreadShouldPause = false;
bool
MainThreadShouldPause()
{
return gMainThreadShouldPause;
}
void
PauseMainThreadAndServiceCallbacks()
{
MOZ_RELEASE_ASSERT(Thread::CurrentIsMainThread());
MOZ_RELEASE_ASSERT(!AreThreadEventsPassedThrough());
MOZ_RELEASE_ASSERT(!gRecordingDiverged);
// Whether there is a PauseMainThreadAndServiceCallbacks frame on the stack.
static bool gMainThreadIsPaused = false;
if (gMainThreadIsPaused) {
return;
}
gMainThreadIsPaused = true;
MonitorAutoLock lock(*gMainThreadCallbackMonitor);
// Loop and invoke callbacks until one of them unpauses this thread.
while (gMainThreadShouldPause) {
if (!gMainThreadCallbacks.empty()) {
std::function<void()> callback = gMainThreadCallbacks[0];
gMainThreadCallbacks.erase(&gMainThreadCallbacks[0]);
{
MonitorAutoUnlock unlock(*gMainThreadCallbackMonitor);
AutoDisallowThreadEvents disallow;
callback();
}
} else {
gMainThreadCallbackMonitor->Wait();
}
}
// As for RestoreCheckpointAndResume, we shouldn't resume the main thread while
// it still has callbacks to execute.
MOZ_RELEASE_ASSERT(gMainThreadCallbacks.empty());
// If we diverge from the recording the only way we can get back to resuming
// normal execution is to rewind to a checkpoint prior to the divergence.
MOZ_RELEASE_ASSERT(!gRecordingDiverged);
gMainThreadIsPaused = false;
}
void
PauseMainThreadAndInvokeCallback(const std::function<void()>& aCallback)
{
{
MonitorAutoLock lock(*gMainThreadCallbackMonitor);
gMainThreadShouldPause = true;
gMainThreadCallbacks.append(aCallback);
gMainThreadCallbackMonitor->Notify();
}
if (Thread::CurrentIsMainThread()) {
PauseMainThreadAndServiceCallbacks();
}
}
void
ResumeExecution()
{
MonitorAutoLock lock(*gMainThreadCallbackMonitor);
gMainThreadShouldPause = false;
gMainThreadCallbackMonitor->Notify();
}
void
SetIsActiveChild(bool aActive)
{
gRewindInfo->mIsActiveChild = aActive;
}
bool
IsActiveChild()
{
return gRewindInfo->mIsActiveChild;
}
} // namespace recordreplay
} // namespace mozilla

Просмотреть файл

@ -0,0 +1,184 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef mozilla_recordreplay_ProcessRewind_h
#define mozilla_recordreplay_ProcessRewind_h
#include "mozilla/RecordReplay.h"
#include <functional>
namespace mozilla {
namespace recordreplay {
// This file is responsible for keeping track of and managing the current point
// of execution when replaying an execution, and in allowing the process to
// rewind its state to an earlier point of execution.
///////////////////////////////////////////////////////////////////////////////
// Checkpoints Overview.
//
// Checkpoints are reached periodically by the main thread of a recording or
// replaying process. Checkpoints must be reached at consistent points between
// different executions of the recording. Currently they are taken after XPCOM
// initialization and every time compositor updates are performed. Each
// checkpoint has an ID, which monotonically increases during the execution.
// Checkpoints form a basis for identifying a particular point in execution,
// and in allowing replaying processes to rewind themselves.
//
// A subset of checkpoints are saved: the contents of each thread's stack is
// copied, along with enough information to restore the contents of heap memory
// at the checkpoint.
//
// Saved checkpoints are in part represented as diffs vs the following
// saved checkpoint. This requires some different handling for the most recent
// saved checkpoint (whose diff has not been computed) and earlier saved
// checkpoints. See MemorySnapshot.h and Thread.h for more on how saved
// checkpoints are represented.
///////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////
// Controlling a Replaying Process.
//
// 1. While performing the replay, execution proceeds until the main thread
// hits either a breakpoint or a checkpoint.
//
// 2. The main thread then calls a hook (JS::replay::hooks.hitBreakpointReplay
// or gAfterCheckpointHook), which may decide to pause the main thread and
// give it a callback to invoke using PauseMainThreadAndInvokeCallback.
//
// 3. Now that the main thread is paused, the replay message loop thread
// (see ChildIPC.h) can give it additional callbacks to invoke using
// PauseMainThreadAndInvokeCallback.
//
// 4. These callbacks can inspect the paused state, diverge from the recording
// by calling DivergeFromRecording, and eventually can unpause the main
// thread and allow execution to resume by calling ResumeExecution
// (if DivergeFromRecording was not called) or RestoreCheckpointAndResume.
///////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////
// Recording Divergence.
//
// Callbacks invoked while debugging (during step 3 of the above comment) might
// try to interact with the system, triggering thread events and attempting to
// replay behaviors that never occurred while recording.
//
// To allow these callbacks the freedom to operate without bringing down the
// entire replay, the DivergeFromRecording API is provided; see RecordReplay.h
// After this is called, some thread events will happen as if events were
// passed through, but other events that require interacting with the system
// will trigger an unhandled divergence from the recording via
// EnsureNotDivergedFromRecording, causing the process to rewind to the most
// recent saved checkpoint. The debugger will recognize this rewind and play
// back in a way that restores the state when DivergeFromRecording() was
// called, but without performing the later operation that triggered the
// rewind.
///////////////////////////////////////////////////////////////////////////////
// The ID of a checkpoint in a child process. Checkpoints are either normal or
// temporary. Normal checkpoints occur at the same point in the recording and
// all replays, while temporary checkpoints are not used while recording and
// may be at different points in different replays.
struct CheckpointId
{
// ID of the most recent normal checkpoint, which are numbered in sequence
// starting at FirstCheckpointId.
size_t mNormal;
// Special IDs for normal checkpoints.
static const size_t Invalid = 0;
static const size_t First = 1;
// How many temporary checkpoints have been generated since the most recent
// normal checkpoint, zero if this represents the normal checkpoint itself.
size_t mTemporary;
explicit CheckpointId(size_t aNormal = Invalid, size_t aTemporary = 0)
: mNormal(aNormal), mTemporary(aTemporary)
{}
inline bool operator==(const CheckpointId& o) const {
return mNormal == o.mNormal && mTemporary == o.mTemporary;
}
inline bool operator!=(const CheckpointId& o) const {
return mNormal != o.mNormal || mTemporary != o.mTemporary;
}
CheckpointId NextCheckpoint(bool aTemporary) const {
return CheckpointId(aTemporary ? mNormal : mNormal + 1,
aTemporary ? mTemporary + 1 : 0);
}
};
// Initialize state needed for rewinding.
void InitializeRewindState();
// Set whether this process should save a particular checkpoint.
void SetSaveCheckpoint(size_t aCheckpoint, bool aSave);
// Invoke a callback on the main thread, and pause it until ResumeExecution or
// RestoreCheckpointAndResume are called. When the main thread is not paused,
// this must be called on the main thread itself. When the main thread is
// already paused, this may be called from any thread.
void PauseMainThreadAndInvokeCallback(const std::function<void()>& aCallback);
// Return whether the main thread should be paused. This does not necessarily
// mean it is paused, but it will pause at the earliest opportunity.
bool MainThreadShouldPause();
// Pause the current main thread and service any callbacks until the thread no
// longer needs to pause.
void PauseMainThreadAndServiceCallbacks();
// Return whether any checkpoints have been saved.
bool HasSavedCheckpoint();
// Get the ID of the most recent saved checkpoint.
CheckpointId GetLastSavedCheckpoint();
// When paused at a breakpoint or at a checkpoint, restore a checkpoint that
// was saved earlier and resume execution.
void RestoreCheckpointAndResume(const CheckpointId& aCheckpoint);
// When paused at a breakpoint or at a checkpoint, unpause and proceed with
// execution.
void ResumeExecution();
// Allow execution after this point to diverge from the recording. Execution
// will remain diverged until an earlier checkpoint is restored.
//
// If an unhandled divergence occurs (see the 'Recording Divergence' comment
// in ProcessRewind.h) then the process rewinds to the most recent saved
// checkpoint.
void DivergeFromRecording();
// After a call to DivergeFromRecording(), this may be called to prevent future
// unhandled divergence from causing earlier checkpoints to be restored
// (the process will immediately crash instead). This state lasts until a new
// call to DivergeFromRecording, or to an explicit restore of an earlier
// checkpoint.
void DisallowUnhandledDivergeFromRecording();
// Make sure that execution has not diverged from the recording after a call to
// DivergeFromRecording, by rewinding to the last saved checkpoint if so.
void EnsureNotDivergedFromRecording();
// Access the flag for whether this is the active child process.
void SetIsActiveChild(bool aActive);
bool IsActiveChild();
// Note a checkpoint at the current execution position. This checkpoint will be
// saved if either (a) it is temporary, or (b) the middleman has instructed
// this process to save this normal checkpoint. This method returns true if the
// checkpoint was just saved, and false if it was just restored.
bool NewCheckpoint(bool aTemporary);
} // namespace recordreplay
} // namespace mozilla
#endif // mozilla_recordreplay_ProcessRewind_h

Просмотреть файл

@ -0,0 +1,175 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef mozilla_recordreplay_SpinLock_h
#define mozilla_recordreplay_SpinLock_h
#include "mozilla/Assertions.h"
#include "mozilla/Atomics.h"
#include "mozilla/DebugOnly.h"
#include "mozilla/GuardObjects.h"
#include <sched.h>
namespace mozilla {
namespace recordreplay {
// This file provides a couple of primitive lock implementations that are
// implemented using atomic operations. Using these locks does not write to any
// heap locations other than the lock's members, nor will it call any system
// locking APIs. These locks are used in places where reentrance into APIs
// needs to be avoided, or where writes to heap memory are not allowed.
// A basic spin lock.
class SpinLock
{
public:
inline void Lock();
inline void Unlock();
private:
Atomic<bool, SequentiallyConsistent, Behavior::DontPreserve> mLocked;
};
// A basic read/write spin lock. This lock permits either multiple readers and
// no writers, or one writer.
class ReadWriteSpinLock
{
public:
inline void ReadLock();
inline void ReadUnlock();
inline void WriteLock();
inline void WriteUnlock();
private:
SpinLock mLock; // Protects mReaders.
int32_t mReaders; // -1 when in use for writing.
};
// RAII class to lock a spin lock.
struct MOZ_RAII AutoSpinLock
{
explicit AutoSpinLock(SpinLock& aLock)
: mLock(aLock)
{
mLock.Lock();
}
~AutoSpinLock()
{
mLock.Unlock();
}
private:
SpinLock& mLock;
};
// RAII class to lock a read/write spin lock for reading.
struct AutoReadSpinLock
{
explicit AutoReadSpinLock(ReadWriteSpinLock& aLock)
: mLock(aLock)
{
mLock.ReadLock();
}
~AutoReadSpinLock()
{
mLock.ReadUnlock();
}
private:
ReadWriteSpinLock& mLock;
};
// RAII class to lock a read/write spin lock for writing.
struct AutoWriteSpinLock
{
explicit AutoWriteSpinLock(ReadWriteSpinLock& aLock)
: mLock(aLock)
{
mLock.WriteLock();
}
~AutoWriteSpinLock()
{
mLock.WriteUnlock();
}
private:
ReadWriteSpinLock& mLock;
};
///////////////////////////////////////////////////////////////////////////////
// Inline definitions
///////////////////////////////////////////////////////////////////////////////
// Try to yield execution to another thread.
static inline void
ThreadYield()
{
sched_yield();
}
inline void
SpinLock::Lock()
{
while (mLocked.exchange(true)) {
ThreadYield();
}
}
inline void
SpinLock::Unlock()
{
DebugOnly<bool> rv = mLocked.exchange(false);
MOZ_ASSERT(rv);
}
inline void
ReadWriteSpinLock::ReadLock()
{
while (true) {
AutoSpinLock ex(mLock);
if (mReaders != -1) {
mReaders++;
return;
}
}
}
inline void
ReadWriteSpinLock::ReadUnlock()
{
AutoSpinLock ex(mLock);
MOZ_ASSERT(mReaders > 0);
mReaders--;
}
inline void
ReadWriteSpinLock::WriteLock()
{
while (true) {
AutoSpinLock ex(mLock);
if (mReaders == 0) {
mReaders = -1;
return;
}
}
}
inline void
ReadWriteSpinLock::WriteUnlock()
{
AutoSpinLock ex(mLock);
MOZ_ASSERT(mReaders == -1);
mReaders = 0;
}
} // namespace recordreplay
} // namespace mozilla
#endif // mozilla_recordreplay_SpinLock_h

Просмотреть файл

@ -0,0 +1,366 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
* vim: set ts=8 sts=4 et sw=4 tw=99:
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef mozilla_recordreplay_SplayTree_h
#define mozilla_recordreplay_SplayTree_h
#include "mozilla/Types.h"
#include "ProcessRecordReplay.h"
//#define ENABLE_COHERENCY_CHECKS
namespace mozilla {
namespace recordreplay {
/*
* Class which represents a splay tree with nodes allocated from an alloc
* policy.
*
* Splay trees are balanced binary search trees for which search, insert and
* remove are all amortized O(log n).
*
* T indicates the type of tree elements, L has a Lookup type and a static
* 'ssize_t compare(const L::Lookup&, const T&)' method ordering the elements.
*/
template <class T, class L, class AllocPolicy, size_t ChunkPages>
class SplayTree
{
struct Node {
T mItem;
Node* mLeft;
Node* mRight;
Node* mParent;
explicit Node(const T& aItem)
: mItem(aItem), mLeft(nullptr), mRight(nullptr), mParent(nullptr)
{}
};
AllocPolicy mAlloc;
Node* mRoot;
Node* mFreeList;
SplayTree(const SplayTree&) = delete;
SplayTree& operator=(const SplayTree&) = delete;
public:
explicit SplayTree(const AllocPolicy& aAlloc = AllocPolicy())
: mAlloc(aAlloc), mRoot(nullptr), mFreeList(nullptr)
{}
bool empty() const {
return !mRoot;
}
void clear() {
while (mRoot) {
remove(mRoot);
}
}
Maybe<T> maybeLookup(const typename L::Lookup& aLookup, bool aRemove = false) {
if (!mRoot) {
return Nothing();
}
Node* last = lookup(aLookup);
splay(last);
checkCoherency(mRoot, nullptr);
Maybe<T> res;
if (L::compare(aLookup, last->mItem) == 0) {
res = Some(last->mItem);
if (aRemove) {
remove(last);
}
}
return res;
}
// Lookup an item which matches aLookup, or the closest item less than it.
Maybe<T> lookupClosestLessOrEqual(const typename L::Lookup& aLookup, bool aRemove = false) {
if (!mRoot) {
return Nothing();
}
Node* last = lookup(aLookup);
Node* search = last;
while (search && L::compare(aLookup, search->mItem) < 0) {
search = search->mParent;
}
Maybe<T> res = search ? Some(search->mItem) : Nothing();
if (aRemove && search) {
remove(search);
} else {
splay(last);
}
checkCoherency(mRoot, nullptr);
return res;
}
void insert(const typename L::Lookup& aLookup, const T& aValue) {
MOZ_RELEASE_ASSERT(L::compare(aLookup, aValue) == 0);
Node* element = allocateNode(aValue);
if (!mRoot) {
mRoot = element;
return;
}
Node* last = lookup(aLookup);
ssize_t cmp = L::compare(aLookup, last->mItem);
Node** parentPointer;
if (cmp < 0) {
parentPointer = &last->mLeft;
} else if (cmp > 0) {
parentPointer = &last->mRight;
} else {
// The lookup matches an existing entry in the tree. Place it to the left
// of the element just looked up.
if (!last->mLeft) {
parentPointer = &last->mLeft;
} else {
last = last->mLeft;
while (last->mRight) {
last = last->mRight;
}
parentPointer = &last->mRight;
}
}
MOZ_RELEASE_ASSERT(!*parentPointer);
*parentPointer = element;
element->mParent = last;
splay(element);
checkCoherency(mRoot, nullptr);
}
class Iter {
friend class SplayTree;
SplayTree* mTree;
Node* mNode;
bool mRemoved;
Iter(SplayTree* aTree, Node* aNode)
: mTree(aTree), mNode(aNode), mRemoved(false)
{}
public:
const T& ref() {
return mNode->mItem;
}
bool done() {
return !mNode;
}
Iter& operator++() {
MOZ_RELEASE_ASSERT(!mRemoved);
if (mNode->mRight) {
mNode = mNode->mRight;
while (mNode->mLeft) {
mNode = mNode->mLeft;
}
} else {
while (true) {
Node* cur = mNode;
mNode = mNode->mParent;
if (!mNode || mNode->mLeft == cur) {
break;
}
}
}
return *this;
}
void removeEntry() {
mTree->remove(mNode);
mRemoved = true;
}
};
Iter begin() {
Node* node = mRoot;
while (node && node->mLeft) {
node = node->mLeft;
}
return Iter(this, node);
}
private:
// Lookup an item matching aLookup, or the closest node to it.
Node* lookup(const typename L::Lookup& aLookup) const {
MOZ_RELEASE_ASSERT(mRoot);
Node* node = mRoot;
Node* parent;
do {
parent = node;
ssize_t c = L::compare(aLookup, node->mItem);
if (c == 0) {
return node;
}
node = (c < 0) ? node->mLeft : node->mRight;
} while (node);
return parent;
}
void remove(Node* aNode) {
splay(aNode);
MOZ_RELEASE_ASSERT(aNode && aNode == mRoot);
// Find another node which can be swapped in for the root: either the
// rightmost child of the root's left, or the leftmost child of the
// root's right.
Node* swap;
Node* swapChild;
if (mRoot->mLeft) {
swap = mRoot->mLeft;
while (swap->mRight) {
swap = swap->mRight;
}
swapChild = swap->mLeft;
} else if (mRoot->mRight) {
swap = mRoot->mRight;
while (swap->mLeft) {
swap = swap->mLeft;
}
swapChild = swap->mRight;
} else {
freeNode(mRoot);
mRoot = nullptr;
return;
}
// The selected node has at most one child, in swapChild. Detach it
// from the subtree by replacing it with that child.
if (swap == swap->mParent->mLeft) {
swap->mParent->mLeft = swapChild;
} else {
swap->mParent->mRight = swapChild;
}
if (swapChild) {
swapChild->mParent = swap->mParent;
}
mRoot->mItem = swap->mItem;
freeNode(swap);
checkCoherency(mRoot, nullptr);
}
size_t NodesPerChunk() const {
return ChunkPages * PageSize / sizeof(Node);
}
Node* allocateNode(const T& aValue) {
if (!mFreeList) {
Node* nodeArray = mAlloc.template pod_malloc<Node>(NodesPerChunk());
for (size_t i = 0; i < NodesPerChunk() - 1; i++) {
nodeArray[i].mLeft = &nodeArray[i + 1];
}
mFreeList = nodeArray;
}
Node* node = mFreeList;
mFreeList = node->mLeft;
new(node) Node(aValue);
return node;
}
void freeNode(Node* aNode) {
aNode->mLeft = mFreeList;
mFreeList = aNode;
}
void splay(Node* aNode) {
// Rotate the element until it is at the root of the tree. Performing
// the rotations in this fashion preserves the amortized balancing of
// the tree.
MOZ_RELEASE_ASSERT(aNode);
while (aNode != mRoot) {
Node* parent = aNode->mParent;
if (parent == mRoot) {
// Zig rotation.
rotate(aNode);
MOZ_RELEASE_ASSERT(aNode == mRoot);
return;
}
Node* grandparent = parent->mParent;
if ((parent->mLeft == aNode) == (grandparent->mLeft == parent)) {
// Zig-zig rotation.
rotate(parent);
rotate(aNode);
} else {
// Zig-zag rotation.
rotate(aNode);
rotate(aNode);
}
}
}
void rotate(Node* aNode) {
// Rearrange nodes so that node becomes the parent of its current
// parent, while preserving the sortedness of the tree.
Node* parent = aNode->mParent;
if (parent->mLeft == aNode) {
// x y
// y c ==> a x
// a b b c
parent->mLeft = aNode->mRight;
if (aNode->mRight) {
aNode->mRight->mParent = parent;
}
aNode->mRight = parent;
} else {
MOZ_RELEASE_ASSERT(parent->mRight == aNode);
// x y
// a y ==> x c
// b c a b
parent->mRight = aNode->mLeft;
if (aNode->mLeft) {
aNode->mLeft->mParent = parent;
}
aNode->mLeft = parent;
}
aNode->mParent = parent->mParent;
parent->mParent = aNode;
if (Node* grandparent = aNode->mParent) {
if (grandparent->mLeft == parent) {
grandparent->mLeft = aNode;
} else {
grandparent->mRight = aNode;
}
} else {
mRoot = aNode;
}
}
#ifdef ENABLE_COHERENCY_CHECKS
Node* checkCoherency(Node* aNode, Node* aMinimum) {
if (!aNode) {
MOZ_RELEASE_ASSERT(!mRoot);
return nullptr;
}
MOZ_RELEASE_ASSERT(aNode->mParent || aNode == mRoot);
MOZ_RELEASE_ASSERT(!aMinimum || L::compare(L::getLookup(aMinimum->mItem), aNode->mItem) <= 0);
if (aNode->mLeft) {
MOZ_RELEASE_ASSERT(aNode->mLeft->mParent == aNode);
Node* leftMaximum = checkCoherency(aNode->mLeft, aMinimum);
MOZ_RELEASE_ASSERT(L::compare(L::getLookup(leftMaximum->mItem), aNode->mItem) <= 0);
}
if (aNode->mRight) {
MOZ_RELEASE_ASSERT(aNode->mRight->mParent == aNode);
return checkCoherency(aNode->mRight, aNode);
}
return aNode;
}
#else
inline void checkCoherency(Node* aNode, Node* aMinimum) {}
#endif
};
} // namespace recordreplay
} // namespace mozilla
#endif // mozilla_recordreplay_SplayTree_h

Просмотреть файл

@ -0,0 +1,609 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "Thread.h"
#include "ipc/ChildIPC.h"
#include "mozilla/Atomics.h"
#include "mozilla/Maybe.h"
#include "mozilla/StaticMutex.h"
#include "mozilla/ThreadLocal.h"
#include "ChunkAllocator.h"
#include "MemorySnapshot.h"
#include "ProcessRewind.h"
#include "SpinLock.h"
#include "ThreadSnapshot.h"
namespace mozilla {
namespace recordreplay {
///////////////////////////////////////////////////////////////////////////////
// Thread Organization
///////////////////////////////////////////////////////////////////////////////
static MOZ_THREAD_LOCAL(Thread*) gTlsThreadKey;
/* static */ Monitor* Thread::gMonitor;
/* static */ Thread*
Thread::Current()
{
MOZ_ASSERT(IsRecordingOrReplaying());
Thread* thread = gTlsThreadKey.get();
if (!thread && IsReplaying()) {
// Disable system threads when replaying.
WaitForeverNoIdle();
}
return thread;
}
/* static */ bool
Thread::CurrentIsMainThread()
{
Thread* thread = Current();
return thread && thread->IsMainThread();
}
void
Thread::BindToCurrent()
{
MOZ_ASSERT(!mStackBase);
gTlsThreadKey.set(this);
mNativeId = pthread_self();
size_t size = pthread_get_stacksize_np(mNativeId);
uint8_t* base = (uint8_t*)pthread_get_stackaddr_np(mNativeId) - size;
// Lock if we will be notifying later on. We don't do this for the main
// thread because we haven't initialized enough state yet that we can use
// a monitor.
Maybe<MonitorAutoLock> lock;
if (mId != MainThreadId) {
lock.emplace(*gMonitor);
}
mStackBase = base;
mStackSize = size;
// Notify WaitUntilInitialized if it is waiting for this thread to start.
if (mId != MainThreadId) {
gMonitor->NotifyAll();
}
}
// All threads, indexed by the thread ID.
static Thread* gThreads;
/* static */ Thread*
Thread::GetById(size_t aId)
{
MOZ_ASSERT(aId);
MOZ_ASSERT(aId <= MaxThreadId);
return &gThreads[aId];
}
/* static */ Thread*
Thread::GetByNativeId(NativeThreadId aNativeId)
{
for (size_t id = MainThreadId; id <= MaxRecordedThreadId; id++) {
Thread* thread = GetById(id);
if (thread->mNativeId == aNativeId) {
return thread;
}
}
return nullptr;
}
/* static */ Thread*
Thread::GetByStackPointer(void* aSp)
{
if (!gThreads) {
return nullptr;
}
for (size_t i = MainThreadId; i <= MaxThreadId; i++) {
Thread* thread = &gThreads[i];
if (MemoryContains(thread->mStackBase, thread->mStackSize, aSp)) {
return thread;
}
}
return nullptr;
}
/* static */ void
Thread::InitializeThreads()
{
gThreads = new Thread[MaxThreadId + 1];
for (size_t i = MainThreadId; i <= MaxThreadId; i++) {
Thread* thread = &gThreads[i];
PodZero(thread);
new(thread) Thread();
thread->mId = i;
if (i <= MaxRecordedThreadId) {
thread->mEvents = gRecordingFile->OpenStream(StreamName::Event, i);
thread->mAsserts = gRecordingFile->OpenStream(StreamName::Assert, i);
}
DirectCreatePipe(&thread->mNotifyfd, &thread->mIdlefd);
}
if (!gTlsThreadKey.init()) {
MOZ_CRASH();
}
}
/* static */ void
Thread::WaitUntilInitialized(Thread* aThread)
{
MonitorAutoLock lock(*gMonitor);
while (!aThread->mStackBase) {
gMonitor->Wait();
}
}
/* static */ void
Thread::ThreadMain(void* aArgument)
{
MOZ_ASSERT(IsRecordingOrReplaying());
Thread* thread = (Thread*) aArgument;
MOZ_ASSERT(thread->mId > MainThreadId);
thread->BindToCurrent();
while (true) {
// Wait until this thread has been given a start routine.
while (true) {
{
MonitorAutoLock lock(*gMonitor);
if (thread->mStart) {
break;
}
}
Wait();
}
{
Maybe<AutoPassThroughThreadEvents> pt;
if (!thread->IsRecordedThread())
pt.emplace();
thread->mStart(thread->mStartArg);
}
MonitorAutoLock lock(*gMonitor);
// Clear the start routine to indicate to other threads that this one has
// finished executing.
thread->mStart = nullptr;
thread->mStartArg = nullptr;
// Notify any other thread waiting for this to finish in JoinThread.
gMonitor->NotifyAll();
}
}
/* static */ void
Thread::SpawnAllThreads()
{
MOZ_ASSERT(AreThreadEventsPassedThrough());
InitializeThreadSnapshots(MaxRecordedThreadId + 1);
gMonitor = new Monitor();
// All Threads are spawned up front. This allows threads to be scanned
// (e.g. in ReplayUnlock) without worrying about racing with other threads
// being spawned.
for (size_t i = MainThreadId + 1; i <= MaxRecordedThreadId; i++) {
SpawnThread(GetById(i));
}
}
// The number of non-recorded threads that have been spawned.
static Atomic<size_t, SequentiallyConsistent, Behavior::DontPreserve> gNumNonRecordedThreads;
/* static */ Thread*
Thread::SpawnNonRecordedThread(Callback aStart, void* aArgument)
{
if (IsMiddleman() || gInitializationFailureMessage) {
DirectSpawnThread(aStart, aArgument);
return nullptr;
}
size_t id = MaxRecordedThreadId + ++gNumNonRecordedThreads;
MOZ_RELEASE_ASSERT(id <= MaxThreadId);
Thread* thread = GetById(id);
thread->mStart = aStart;
thread->mStartArg = aArgument;
SpawnThread(thread);
return thread;
}
/* static */ void
Thread::SpawnThread(Thread* aThread)
{
DirectSpawnThread(ThreadMain, aThread);
WaitUntilInitialized(aThread);
}
/* static */ NativeThreadId
Thread::StartThread(Callback aStart, void* aArgument, bool aNeedsJoin)
{
MOZ_ASSERT(IsRecordingOrReplaying());
MOZ_ASSERT(!AreThreadEventsPassedThrough());
MOZ_ASSERT(!AreThreadEventsDisallowed());
EnsureNotDivergedFromRecording();
Thread* thread = Thread::Current();
RecordReplayAssert("StartThread");
MonitorAutoLock lock(*gMonitor);
size_t id = 0;
if (IsRecording()) {
// Look for an idle thread.
for (id = MainThreadId + 1; id <= MaxRecordedThreadId; id++) {
Thread* targetThread = Thread::GetById(id);
if (!targetThread->mStart && !targetThread->mNeedsJoin) {
break;
}
}
if (id >= MaxRecordedThreadId) {
child::ReportFatalError("Too many threads");
}
MOZ_RELEASE_ASSERT(id <= MaxRecordedThreadId);
}
thread->Events().RecordOrReplayThreadEvent(ThreadEvent::CreateThread);
thread->Events().RecordOrReplayScalar(&id);
Thread* targetThread = GetById(id);
// Block until the thread is ready for a new start routine.
while (targetThread->mStart) {
MOZ_RELEASE_ASSERT(IsReplaying());
gMonitor->Wait();
}
targetThread->mStart = aStart;
targetThread->mStartArg = aArgument;
targetThread->mNeedsJoin = aNeedsJoin;
// Notify the thread in case it is waiting for a start routine under
// ThreadMain.
Notify(id);
return targetThread->mNativeId;
}
void
Thread::Join()
{
MOZ_ASSERT(!AreThreadEventsPassedThrough());
EnsureNotDivergedFromRecording();
while (true) {
MonitorAutoLock lock(*gMonitor);
if (!mStart) {
MOZ_RELEASE_ASSERT(mNeedsJoin);
mNeedsJoin = false;
break;
}
gMonitor->Wait();
}
}
///////////////////////////////////////////////////////////////////////////////
// Thread Buffers
///////////////////////////////////////////////////////////////////////////////
char*
Thread::TakeBuffer(size_t aSize)
{
MOZ_ASSERT(mBuffer != (char*) 0x1);
if (aSize > mBufferCapacity) {
mBufferCapacity = aSize;
mBuffer = (char*) realloc(mBuffer, aSize);
}
char* buf = mBuffer;
// Poison the buffer in case this thread tries to use it again reentrantly.
mBuffer = (char*) 0x1;
return buf;
}
void
Thread::RestoreBuffer(char* aBuf)
{
MOZ_ASSERT(mBuffer == (char*) 0x1);
mBuffer = aBuf;
}
///////////////////////////////////////////////////////////////////////////////
// Thread Public API Accessors
///////////////////////////////////////////////////////////////////////////////
extern "C" {
MOZ_EXPORT void
RecordReplayInterface_InternalBeginPassThroughThreadEvents()
{
MOZ_ASSERT(IsRecordingOrReplaying());
if (!gInitializationFailureMessage) {
Thread::Current()->SetPassThrough(true);
}
}
MOZ_EXPORT void
RecordReplayInterface_InternalEndPassThroughThreadEvents()
{
MOZ_ASSERT(IsRecordingOrReplaying());
if (!gInitializationFailureMessage) {
Thread::Current()->SetPassThrough(false);
}
}
MOZ_EXPORT bool
RecordReplayInterface_InternalAreThreadEventsPassedThrough()
{
MOZ_ASSERT(IsRecordingOrReplaying());
Thread* thread = Thread::Current();
return !thread || thread->PassThroughEvents();
}
MOZ_EXPORT void
RecordReplayInterface_InternalBeginDisallowThreadEvents()
{
MOZ_ASSERT(IsRecordingOrReplaying());
Thread::Current()->BeginDisallowEvents();
}
MOZ_EXPORT void
RecordReplayInterface_InternalEndDisallowThreadEvents()
{
MOZ_ASSERT(IsRecordingOrReplaying());
Thread::Current()->EndDisallowEvents();
}
MOZ_EXPORT bool
RecordReplayInterface_InternalAreThreadEventsDisallowed()
{
MOZ_ASSERT(IsRecordingOrReplaying());
Thread* thread = Thread::Current();
return thread && thread->AreEventsDisallowed();
}
MOZ_EXPORT void
RecordReplayInterface_InternalBeginCaptureEventStacks()
{
MOZ_ASSERT(IsRecordingOrReplaying());
Thread::Current()->BeginCaptureEventStacks();
}
MOZ_EXPORT void
RecordReplayInterface_InternalEndCaptureEventStacks()
{
MOZ_ASSERT(IsRecordingOrReplaying());
Thread::Current()->EndCaptureEventStacks();
}
} // extern "C"
///////////////////////////////////////////////////////////////////////////////
// Thread Coordination
///////////////////////////////////////////////////////////////////////////////
// Whether all threads should attempt to idle.
static Atomic<bool, SequentiallyConsistent, Behavior::DontPreserve> gThreadsShouldIdle;
// Whether all threads are considered to be idle.
static Atomic<bool, SequentiallyConsistent, Behavior::DontPreserve> gThreadsAreIdle;
/* static */ void
Thread::WaitForIdleThreads()
{
MOZ_RELEASE_ASSERT(CurrentIsMainThread());
MOZ_RELEASE_ASSERT(!gThreadsShouldIdle);
MOZ_RELEASE_ASSERT(!gThreadsAreIdle);
gThreadsShouldIdle = true;
MonitorAutoLock lock(*gMonitor);
for (size_t i = MainThreadId + 1; i <= MaxRecordedThreadId; i++) {
GetById(i)->mUnrecordedWaitNotified = false;
}
while (true) {
bool done = true;
for (size_t i = MainThreadId + 1; i <= MaxRecordedThreadId; i++) {
Thread* thread = GetById(i);
if (!thread->mIdle) {
done = false;
if (thread->mUnrecordedWaitCallback && !thread->mUnrecordedWaitNotified) {
// Set this flag before releasing the idle lock. Otherwise it's
// possible the thread could call NotifyUnrecordedWait while we
// aren't holding the lock, and we would set the flag afterwards
// without first invoking the callback.
thread->mUnrecordedWaitNotified = true;
// Release the idle lock here to avoid any risk of deadlock.
{
MonitorAutoUnlock unlock(*gMonitor);
AutoPassThroughThreadEvents pt;
thread->mUnrecordedWaitCallback();
}
// Releasing the global lock means that we need to start over
// checking whether there are any idle threads. By marking this
// thread as having been notified we have made progress, however.
done = true;
i = MainThreadId;
}
}
}
if (done) {
break;
}
MonitorAutoUnlock unlock(*gMonitor);
WaitNoIdle();
}
gThreadsAreIdle = true;
}
/* static */ void
Thread::ResumeIdleThreads()
{
MOZ_RELEASE_ASSERT(CurrentIsMainThread());
MOZ_RELEASE_ASSERT(gThreadsAreIdle);
gThreadsAreIdle = false;
MOZ_RELEASE_ASSERT(gThreadsShouldIdle);
gThreadsShouldIdle = false;
for (size_t i = MainThreadId + 1; i <= MaxRecordedThreadId; i++) {
Notify(i);
}
}
void
Thread::NotifyUnrecordedWait(const std::function<void()>& aCallback)
{
MonitorAutoLock lock(*gMonitor);
if (mUnrecordedWaitCallback) {
// Per the documentation for NotifyUnrecordedWait, we need to call the
// routine after a notify, even if the routine has been called already
// since the main thread started to wait for idle replay threads.
mUnrecordedWaitNotified = false;
} else {
MOZ_RELEASE_ASSERT(!mUnrecordedWaitNotified);
}
mUnrecordedWaitCallback = aCallback;
// The main thread might be able to make progress now by calling the routine
// if it is waiting for idle replay threads.
if (gThreadsShouldIdle) {
Notify(MainThreadId);
}
}
/* static */ void
Thread::MaybeWaitForCheckpointSave()
{
MonitorAutoLock lock(*gMonitor);
while (gThreadsShouldIdle) {
MonitorAutoUnlock unlock(*gMonitor);
Wait();
}
}
extern "C" {
MOZ_EXPORT void
RecordReplayInterface_NotifyUnrecordedWait(const std::function<void()>& aCallback)
{
Thread::Current()->NotifyUnrecordedWait(aCallback);
}
MOZ_EXPORT void
RecordReplayInterface_MaybeWaitForCheckpointSave()
{
Thread::MaybeWaitForCheckpointSave();
}
} // extern "C"
/* static */ void
Thread::WaitNoIdle()
{
Thread* thread = Current();
uint8_t data = 0;
size_t read = DirectRead(thread->mIdlefd, &data, 1);
MOZ_RELEASE_ASSERT(read == 1);
}
/* static */ void
Thread::Wait()
{
Thread* thread = Current();
MOZ_ASSERT(!thread->mIdle);
MOZ_ASSERT(thread->IsRecordedThread() && !thread->PassThroughEvents());
if (thread->IsMainThread()) {
WaitNoIdle();
return;
}
// The state saved for a thread needs to match up with the most recent
// point at which it became idle, so that when the main thread saves the
// stacks from all threads it saves those stacks at the right point.
// SaveThreadState might trigger thread events, so make sure they are
// passed through.
thread->SetPassThrough(true);
int stackSeparator = 0;
if (!SaveThreadState(thread->Id(), &stackSeparator)) {
// We just restored a checkpoint, notify the main thread since it is waiting
// for all threads to restore their stacks.
Notify(MainThreadId);
}
thread->mIdle = true;
if (gThreadsShouldIdle) {
// Notify the main thread that we just became idle.
Notify(MainThreadId);
}
do {
// Do the actual waiting for another thread to notify this one.
WaitNoIdle();
// Rewind this thread if the main thread told us to do so. The main
// thread is responsible for rewinding its own stack.
if (ShouldRestoreThreadStack(thread->Id())) {
RestoreThreadStack(thread->Id());
Unreachable();
}
} while (gThreadsShouldIdle);
thread->mIdle = false;
thread->SetPassThrough(false);
}
/* static */ void
Thread::WaitForever()
{
while (true) {
Wait();
}
Unreachable();
}
/* static */ void
Thread::WaitForeverNoIdle()
{
FileHandle writeFd, readFd;
DirectCreatePipe(&writeFd, &readFd);
while (true) {
uint8_t data;
DirectRead(readFd, &data, 1);
}
}
/* static */ void
Thread::Notify(size_t aId)
{
uint8_t data = 0;
DirectWrite(GetById(aId)->mNotifyfd, &data, 1);
}
} // namespace recordreplay
} // namespace mozilla

Просмотреть файл

@ -0,0 +1,322 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef mozilla_recordreplay_Thread_h
#define mozilla_recordreplay_Thread_h
#include "mozilla/Atomics.h"
#include "File.h"
#include "Lock.h"
#include "Monitor.h"
#include <pthread.h>
#include <setjmp.h>
namespace mozilla {
namespace recordreplay {
// Threads Overview.
//
// The main thread and each thread that is spawned when thread events are not
// passed through have their behavior recorded.
//
// While recording, each recorded thread has an associated Thread object which
// can be fetched with Thread::Current and stores the thread's ID, its file for
// storing events that occur in the thread, and some other thread local state.
// Otherwise, threads are spawned and destroyed as usual for the process.
//
// While rewinding, the same Thread structure exists for each recorded thread.
// Several additional changes are needed to facilitate rewinding and IPC:
//
// 1. All recorded threads are spawned early on, before any checkpoint has been
// reached. These threads idle until the process calls the system's thread
// creation API, and then they run with the start routine the process
// provided. After the start routine finishes they idle indefinitely,
// potentially running new start routines if their thread ID is reused. This
// allows the process to rewind itself without needing to spawn or destroy
// any threads.
//
// 2. Some additional number of threads are spawned for use by the IPC and
// memory snapshot mechanisms. These have associated Thread
// structures but are not recorded and always pass through thread events.
//
// 3. All recorded threads and must be able to enter a particular blocking
// state, under Thread::Wait, when requested by the main thread calling
// WaitForIdleThreads. For most recorded threads this happens when the
// thread attempts to take a recorded lock and blocks in Lock::Wait.
// The only exception is for JS helper threads, which never take recorded
// locks. For these threads, NotifyUnrecordedWait and
// MaybeWaitForCheckpointSave must be used to enter this state.
//
// 4. Once all recorded threads are idle, the main thread is able to record
// memory snapshots and thread stacks for later rewinding. Additional
// threads created for #2 above do not idle and do not have their state
// included in snapshots, but they are designed to avoid interfering with
// the main thread while it is taking or restoring a checkpoint.
// The ID used by the process main thread.
static const size_t MainThreadId = 1;
// The maximum ID useable by recorded threads.
static const size_t MaxRecordedThreadId = 70;
// The maximum number of threads which are not recorded but need a Thread so
// that they can participate in e.g. Wait/Notify calls.
static const size_t MaxNumNonRecordedThreads = 12;
static const size_t MaxThreadId = MaxRecordedThreadId + MaxNumNonRecordedThreads;
typedef pthread_t NativeThreadId;
// Information about the execution state of a thread.
class Thread
{
public:
// Signature for the start function of a thread.
typedef void (*Callback)(void*);
// Number of recent assertions remembered.
static const size_t NumRecentAsserts = 128;
struct RecentAssertInfo {
char* mText;
size_t mPosition;
};
private:
// Monitor used to protect various thread information (see Thread.h) and to
// wait on or signal progress for a thread.
static Monitor* gMonitor;
// Thread ID in the recording, fixed at creation.
size_t mId;
// Whether to pass events in the thread through without recording/replaying.
// This is only used by the associated thread.
bool mPassThroughEvents;
// Whether to crash if we try to record/replay thread events. This is only
// used by the associated thread.
size_t mDisallowEvents;
// Whether to capture stack information for events while recording. This is
// only used by the associated thread.
size_t mCaptureEventStacks;
// Start routine and argument which the thread is currently executing. This
// is cleared after the routine finishes and another start routine may be
// assigned to the thread. mNeedsJoin specifies whether the thread must be
// joined before it is completely dead and can be reused. This is protected
// by the thread monitor.
Callback mStart;
void* mStartArg;
bool mNeedsJoin;
// ID for this thread used by the system.
NativeThreadId mNativeId;
// Streams with events and assertions for the thread. These are only used by
// the associated thread.
Stream* mEvents;
Stream* mAsserts;
// Recent assertions that have been encountered, for debugging.
RecentAssertInfo mRecentAsserts[NumRecentAsserts];
// Buffer for general use. This is only used by the associated thread.
char* mBuffer;
size_t mBufferCapacity;
// Stack boundary of the thread, protected by the thread monitor.
uint8_t* mStackBase;
size_t mStackSize;
// File descriptor to block on when the thread is idle, fixed at creation.
FileHandle mIdlefd;
// File descriptor to notify to wake the thread up, fixed at creation.
FileHandle mNotifyfd;
// Whether the thread is waiting on idlefd.
Atomic<bool, SequentiallyConsistent, Behavior::DontPreserve> mIdle;
// Any callback which should be invoked so the thread can make progress,
// and whether the callback has been invoked yet while the main thread is
// waiting for threads to become idle. Protected by the thread monitor.
std::function<void()> mUnrecordedWaitCallback;
bool mUnrecordedWaitNotified;
public:
///////////////////////////////////////////////////////////////////////////////
// Public Routines
///////////////////////////////////////////////////////////////////////////////
// Accessors for some members that never change.
size_t Id() { return mId; }
NativeThreadId NativeId() { return mNativeId; }
Stream& Events() { return *mEvents; }
Stream& Asserts() { return *mAsserts; }
uint8_t* StackBase() { return mStackBase; }
size_t StackSize() { return mStackSize; }
inline bool IsMainThread() { return mId == MainThreadId; }
inline bool IsRecordedThread() { return mId <= MaxRecordedThreadId; }
inline bool IsNonMainRecordedThread() { return IsRecordedThread() && !IsMainThread(); }
// Access the flag for whether this thread is passing events through.
void SetPassThrough(bool aPassThrough) {
MOZ_RELEASE_ASSERT(mPassThroughEvents == !aPassThrough);
mPassThroughEvents = aPassThrough;
}
bool PassThroughEvents() {
return mPassThroughEvents;
}
// Access the counter for whether events are disallowed in this thread.
void BeginDisallowEvents() {
mDisallowEvents++;
}
void EndDisallowEvents() {
MOZ_RELEASE_ASSERT(mDisallowEvents);
mDisallowEvents--;
}
bool AreEventsDisallowed() {
return mDisallowEvents != 0;
}
// Access the counter for whether event stacks are captured while recording.
void BeginCaptureEventStacks() {
mCaptureEventStacks++;
}
void EndCaptureEventStacks() {
MOZ_RELEASE_ASSERT(mCaptureEventStacks);
mCaptureEventStacks--;
}
bool ShouldCaptureEventStacks() {
return mCaptureEventStacks != 0;
}
// Access the array of recent assertions in the thread.
RecentAssertInfo& RecentAssert(size_t i) {
MOZ_ASSERT(i < NumRecentAsserts);
return mRecentAsserts[i];
}
// Access a thread local buffer of a guaranteed size. The buffer must be
// restored before it can be taken again.
char* TakeBuffer(size_t aSize);
void RestoreBuffer(char* aBuf);
// The actual start routine at the root of all recorded threads, and of all
// threads when replaying.
static void ThreadMain(void* aArgument);
// Bind this Thread to the current system thread, setting Thread::Current()
// and some other basic state.
void BindToCurrent();
// Initialize thread state.
static void InitializeThreads();
// Get the current thread, or null if this is a system thread.
static Thread* Current();
// Helper to test if this is the process main thread.
static bool CurrentIsMainThread();
// Lookup a Thread by various methods.
static Thread* GetById(size_t aId);
static Thread* GetByNativeId(NativeThreadId aNativeId);
static Thread* GetByStackPointer(void* aSp);
// Spawn all non-main recorded threads used for recording/replaying.
static void SpawnAllThreads();
// Spawn the specified thread.
static void SpawnThread(Thread* aThread);
// Spawn a non-recorded thread with the specified start routine/argument.
static Thread* SpawnNonRecordedThread(Callback aStart, void* aArgument);
// Wait until a thread has initialized its stack and other state.
static void WaitUntilInitialized(Thread* aThread);
// Start an existing thread, for use when the process has called a thread
// creation system API when events were not passed through. The return value
// is the native ID of the result.
static NativeThreadId StartThread(Callback aStart, void* aArgument, bool aNeedsJoin);
// Wait until this thread finishes executing its start routine.
void Join();
///////////////////////////////////////////////////////////////////////////////
// Thread Coordination
///////////////////////////////////////////////////////////////////////////////
// Basic API for threads to coordinate activity with each other, for use
// during replay. Each Notify() on a thread ID will cause that thread to
// return from one call to Wait(). Thus, if a thread Wait()'s and then
// another thread Notify()'s its ID, the first thread will wake up afterward.
// Similarly, if a thread Notify()'s another thread which is not waiting,
// that second thread will return from its next Wait() without needing
// another Notify().
//
// If the main thread has called WaitForIdleThreads, then calling
// Wait() will put this thread in the desired idle state. WaitNoIdle() will
// never cause the thread to enter the idle state, and should be used
// carefully to avoid deadlocks with the main thread.
static void Wait();
static void WaitNoIdle();
static void Notify(size_t aId);
// Wait indefinitely, until the process is rewound.
static void WaitForever();
// Wait indefinitely, without allowing this thread to be rewound.
static void WaitForeverNoIdle();
// See RecordReplay.h.
void NotifyUnrecordedWait(const std::function<void()>& aCallback);
static void MaybeWaitForCheckpointSave();
// Wait for all other threads to enter the idle state necessary for saving
// or restoring a checkpoint. This may only be called on the main thread.
static void WaitForIdleThreads();
// After WaitForIdleThreads(), the main thread will call this to allow
// other threads to resume execution.
static void ResumeIdleThreads();
};
// This uses a stack pointer instead of TLS to make sure events are passed
// through, for avoiding thorny reentrance issues.
class AutoEnsurePassThroughThreadEventsUseStackPointer
{
Thread* mThread;
bool mPassedThrough;
public:
AutoEnsurePassThroughThreadEventsUseStackPointer()
: mThread(Thread::GetByStackPointer(this))
, mPassedThrough(!mThread || mThread->PassThroughEvents())
{
if (!mPassedThrough) {
mThread->SetPassThrough(true);
}
}
~AutoEnsurePassThroughThreadEventsUseStackPointer()
{
if (!mPassedThrough) {
mThread->SetPassThrough(false);
}
}
};
} // namespace recordreplay
} // namespace mozilla
#endif // mozilla_recordreplay_Thread_h

Просмотреть файл

@ -0,0 +1,315 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "ThreadSnapshot.h"
#include "MemorySnapshot.h"
#include "SpinLock.h"
#include "Thread.h"
namespace mozilla {
namespace recordreplay {
#define QuoteString(aString) #aString
#define ExpandAndQuote(aMacro) QuoteString(aMacro)
#define THREAD_STACK_TOP_SIZE 2048
// Information about a thread's state, for use in saving or restoring checkpoints.
// The contents of this structure are in preserved memory.
struct ThreadState {
// Whether this thread should update its state when no longer idle. This is
// only used for non-main threads.
size_t /* bool */ mShouldRestore;
// Register state, as stored by setjmp and restored by longjmp. Saved when a
// non-main thread idles or the main thread begins to save all thread states.
// When |mShouldRestore| is set, this is the state to set it to.
jmp_buf mRegisters; // jmp_buf is 148 bytes
uint32_t mPadding;
// Top of the stack, set as for |registers|. Stack pointer information is
// actually included in |registers| as well, but jmp_buf is opaque.
void* mStackPointer;
// Contents of the top of the stack, set as for |registers|. This captures
// parts of the stack that might mutate between the state being saved and the
// thread actually idling or making a copy of its complete stack.
uint8_t mStackTop[THREAD_STACK_TOP_SIZE];
size_t mStackTopBytes;
// Stack contents to copy to |stackPointer|, non-nullptr if |mShouldRestore| is set.
uint8_t* mStackContents;
// Length of |stackContents|.
size_t mStackBytes;
};
// For each non-main thread, whether that thread should update its stack and
// state when it is no longer idle. This also stores restore info for the
// main thread, which immediately updates its state when restoring checkpoints.
static ThreadState* gThreadState;
void
InitializeThreadSnapshots(size_t aNumThreads)
{
gThreadState = (ThreadState*) AllocateMemory(aNumThreads * sizeof(ThreadState),
MemoryKind::ThreadSnapshot);
jmp_buf buf;
if (setjmp(buf) == 0) {
longjmp(buf, 1);
}
ThreadYield();
}
static void
ClearThreadState(ThreadState* aInfo)
{
MOZ_RELEASE_ASSERT(aInfo->mShouldRestore);
DeallocateMemory(aInfo->mStackContents, aInfo->mStackBytes, MemoryKind::ThreadSnapshot);
aInfo->mShouldRestore = false;
aInfo->mStackContents = nullptr;
aInfo->mStackBytes = 0;
}
extern "C" {
extern int
SaveThreadStateOrReturnFromRestore(ThreadState* aInfo, int (*aSetjmpArg)(jmp_buf),
int* aStackSeparator);
#define THREAD_REGISTERS_OFFSET 8
#define THREAD_STACK_POINTER_OFFSET 160
#define THREAD_STACK_TOP_OFFSET 168
#define THREAD_STACK_TOP_BYTES_OFFSET 2216
#define THREAD_STACK_CONTENTS_OFFSET 2224
#define THREAD_STACK_BYTES_OFFSET 2232
__asm(
"_SaveThreadStateOrReturnFromRestore:"
// On Unix/x64, the first integer arg is in %rdi. Move this into a
// callee save register so that setjmp/longjmp will save/restore it even
// though the rest of the stack is incoherent after the longjmp.
"push %rbx;"
"movq %rdi, %rbx;"
// Update |aInfo->mStackPointer|. Everything above this on the stack will be
// restored after getting here from longjmp.
"movq %rsp, " ExpandAndQuote(THREAD_STACK_POINTER_OFFSET) "(%rbx);"
// Compute the number of bytes to store on the stack top.
"subq %rsp, %rdx;" // rdx is the third arg reg
// Bounds check against the size of the stack top buffer.
"cmpl $" ExpandAndQuote(THREAD_STACK_TOP_SIZE) ", %edx;"
"jg SaveThreadStateOrReturnFromRestore_crash;"
// Store the number of bytes written to the stack top buffer.
"movq %rdx, " ExpandAndQuote(THREAD_STACK_TOP_BYTES_OFFSET) "(%rbx);"
// Load the start of the stack top buffer and the stack pointer.
"movq %rsp, %r8;"
"movq %rbx, %r9;"
"addq $" ExpandAndQuote(THREAD_STACK_TOP_OFFSET) ", %r9;"
"jmp SaveThreadStateOrReturnFromRestore_copyTopRestart;"
// Fill in the stack top buffer.
"SaveThreadStateOrReturnFromRestore_copyTopRestart:"
"testq %rdx, %rdx;"
"je SaveThreadStateOrReturnFromRestore_copyTopDone;"
"movl 0(%r8), %ecx;"
"movl %ecx, 0(%r9);"
"addq $4, %r8;"
"addq $4, %r9;"
"subq $4, %rdx;"
"jmp SaveThreadStateOrReturnFromRestore_copyTopRestart;"
"SaveThreadStateOrReturnFromRestore_copyTopDone:"
// Call setjmp, passing |aInfo->mRegisters|.
"addq $" ExpandAndQuote(THREAD_REGISTERS_OFFSET) ", %rdi;"
"callq *%rsi;" // rsi is the second arg reg
// If setjmp returned zero, we just saved the state and are done.
"testl %eax, %eax;"
"je SaveThreadStateOrReturnFromRestore_done;"
// Otherwise we just returned from longjmp, and need to restore the stack
// contents before anything else can be performed. Use caller save registers
// exclusively for this, don't touch the stack at all.
// Load |mStackPointer|, |mStackContents|, and |mStackBytes| from |aInfo|.
"movq " ExpandAndQuote(THREAD_STACK_POINTER_OFFSET) "(%rbx), %rcx;"
"movq " ExpandAndQuote(THREAD_STACK_CONTENTS_OFFSET) "(%rbx), %r8;"
"movq " ExpandAndQuote(THREAD_STACK_BYTES_OFFSET) "(%rbx), %r9;"
// The stack pointer we loaded should be identical to the stack pointer we have.
"cmpq %rsp, %rcx;"
"jne SaveThreadStateOrReturnFromRestore_crash;"
"jmp SaveThreadStateOrReturnFromRestore_copyAfterRestart;"
// Fill in the contents of the entire stack.
"SaveThreadStateOrReturnFromRestore_copyAfterRestart:"
"testq %r9, %r9;"
"je SaveThreadStateOrReturnFromRestore_done;"
"movl 0(%r8), %edx;"
"movl %edx, 0(%rcx);"
"addq $4, %rcx;"
"addq $4, %r8;"
"subq $4, %r9;"
"jmp SaveThreadStateOrReturnFromRestore_copyAfterRestart;"
"SaveThreadStateOrReturnFromRestore_crash:"
"movq $0, %rbx;"
"movq 0(%rbx), %rbx;"
"SaveThreadStateOrReturnFromRestore_done:"
"pop %rbx;"
"ret;"
);
} // extern "C"
bool
SaveThreadState(size_t aId, int* aStackSeparator)
{
static_assert(offsetof(ThreadState, mRegisters) == THREAD_REGISTERS_OFFSET &&
offsetof(ThreadState, mStackPointer) == THREAD_STACK_POINTER_OFFSET &&
offsetof(ThreadState, mStackTop) == THREAD_STACK_TOP_OFFSET &&
offsetof(ThreadState, mStackTopBytes) == THREAD_STACK_TOP_BYTES_OFFSET &&
offsetof(ThreadState, mStackContents) == THREAD_STACK_CONTENTS_OFFSET &&
offsetof(ThreadState, mStackBytes) == THREAD_STACK_BYTES_OFFSET,
"Incorrect ThreadState offsets");
ThreadState* info = &gThreadState[aId];
MOZ_RELEASE_ASSERT(!info->mShouldRestore);
bool res = SaveThreadStateOrReturnFromRestore(info, setjmp, aStackSeparator) == 0;
if (!res) {
ClearThreadState(info);
}
return res;
}
void
RestoreThreadStack(size_t aId)
{
ThreadState* info = &gThreadState[aId];
longjmp(info->mRegisters, 1);
MOZ_CRASH(); // longjmp does not return.
}
static void
SaveThreadStack(SavedThreadStack& aStack, size_t aId)
{
Thread* thread = Thread::GetById(aId);
ThreadState& info = gThreadState[aId];
aStack.mStackPointer = info.mStackPointer;
MemoryMove(aStack.mRegisters, info.mRegisters, sizeof(jmp_buf));
uint8_t* stackPointer = (uint8_t*) info.mStackPointer;
uint8_t* stackTop = thread->StackBase() + thread->StackSize();
MOZ_RELEASE_ASSERT(stackTop >= stackPointer);
size_t stackBytes = stackTop - stackPointer;
MOZ_RELEASE_ASSERT(stackBytes >= info.mStackTopBytes);
aStack.mStack = (uint8_t*) AllocateMemory(stackBytes, MemoryKind::ThreadSnapshot);
aStack.mStackBytes = stackBytes;
MemoryMove(aStack.mStack, info.mStackTop, info.mStackTopBytes);
MemoryMove(aStack.mStack + info.mStackTopBytes,
stackPointer + info.mStackTopBytes, stackBytes - info.mStackTopBytes);
}
static void
RestoreStackForLoadingByThread(const SavedThreadStack& aStack, size_t aId)
{
ThreadState& info = gThreadState[aId];
MOZ_RELEASE_ASSERT(!info.mShouldRestore);
info.mStackPointer = aStack.mStackPointer;
MemoryMove(info.mRegisters, aStack.mRegisters, sizeof(jmp_buf));
info.mStackBytes = aStack.mStackBytes;
uint8_t* stackContents =
(uint8_t*) AllocateMemory(info.mStackBytes, MemoryKind::ThreadSnapshot);
MemoryMove(stackContents, aStack.mStack, aStack.mStackBytes);
info.mStackContents = stackContents;
info.mShouldRestore = true;
}
bool
ShouldRestoreThreadStack(size_t aId)
{
return gThreadState[aId].mShouldRestore;
}
bool
SaveAllThreads(SavedCheckpoint& aSaved)
{
MOZ_RELEASE_ASSERT(Thread::CurrentIsMainThread());
AutoPassThroughThreadEvents pt; // setjmp may perform system calls.
AutoDisallowMemoryChanges disallow;
int stackSeparator = 0;
if (!SaveThreadState(MainThreadId, &stackSeparator)) {
// We just restored this state from a later point of execution.
return false;
}
for (size_t i = MainThreadId; i <= MaxRecordedThreadId; i++) {
SaveThreadStack(aSaved.mStacks[i - 1], i);
}
return true;
}
void
RestoreAllThreads(const SavedCheckpoint& aSaved)
{
MOZ_RELEASE_ASSERT(Thread::CurrentIsMainThread());
// These will be matched by the Auto* classes in SaveAllThreads().
BeginPassThroughThreadEvents();
SetMemoryChangesAllowed(false);
for (size_t i = MainThreadId; i <= MaxRecordedThreadId; i++) {
RestoreStackForLoadingByThread(aSaved.mStacks[i - 1], i);
}
// Restore this stack to its state when we saved it in SaveAllThreads(), and
// continue executing from there.
RestoreThreadStack(MainThreadId);
Unreachable();
}
void
WaitForIdleThreadsToRestoreTheirStacks()
{
// Wait for all other threads to restore their stack before resuming execution.
while (true) {
bool done = true;
for (size_t i = MainThreadId + 1; i <= MaxRecordedThreadId; i++) {
if (ShouldRestoreThreadStack(i)) {
Thread::Notify(i);
done = false;
}
}
if (done) {
break;
}
Thread::WaitNoIdle();
}
}
} // namespace recordreplay
} // namespace mozilla

Просмотреть файл

@ -0,0 +1,119 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef mozilla_recordreplay_ThreadSnapshot_h
#define mozilla_recordreplay_ThreadSnapshot_h
#include "File.h"
#include "ProcessRewind.h"
#include "Thread.h"
namespace mozilla {
namespace recordreplay {
// Thread Snapshots Overview.
//
// The functions below are used when a thread saves or restores its stack and
// register state at a checkpoint. The steps taken in saving and restoring a
// thread snapshot are as follows:
//
// 1. Before idling (non-main threads) or before reaching a checkpoint (main
// thread), the thread calls SaveThreadState. This saves the register state
// for the thread as well as a portion of the top of the stack, and after
// saving the state it returns true.
//
// 2. Once all other threads are idle, the main thread saves the remainder of
// all thread stacks. (The portion saved earlier gives threads leeway to
// perform operations after saving their stack, mainly for entering an idle
// state.)
//
// 3. The thread stacks are now stored on the heap. Later on, the main thread
// may ensure that all threads are idle and then call, for all threads,
// RestoreStackForLoadingByThread. This prepares the stacks for restoring by
// the associated threads.
//
// 4. While still in their idle state, threads call ShouldRestoreThreadStack to
// see if there is stack information for them to restore.
//
// 5. If ShouldRestoreThreadStack returns true, RestoreThreadStack is then
// called to restore the stack and register state to the point where
// SaveThreadState was originally called.
//
// 6. RestoreThreadStack does not return. Instead, control transfers to the
// call to SaveThreadState, which returns false after being restored to.
// aStackSeparator is a pointer into the stack. Values shallower than this in
// the stack will be preserved as they are at the time of the SaveThreadState
// call, whereas deeper values will be preserved as they are at the point where
// the main thread saves the remainder of the stack.
bool SaveThreadState(size_t aId, int* aStackSeparator);
// Information saved about the state of a thread.
struct SavedThreadStack
{
// Saved stack pointer.
void* mStackPointer;
// Saved stack contents, starting at |mStackPointer|.
uint8_t* mStack;
size_t mStackBytes;
// Saved register state.
jmp_buf mRegisters;
SavedThreadStack()
{
PodZero(this);
}
void ReleaseContents() {
if (mStackBytes) {
DeallocateMemory(mStack, mStackBytes, MemoryKind::ThreadSnapshot);
}
}
};
struct SavedCheckpoint
{
CheckpointId mCheckpoint;
SavedThreadStack mStacks[MaxRecordedThreadId];
explicit SavedCheckpoint(CheckpointId aCheckpoint)
: mCheckpoint(aCheckpoint)
{}
void ReleaseContents() {
for (SavedThreadStack& stack : mStacks) {
stack.ReleaseContents();
}
}
};
// When all other threads are idle, the main thread may call this to save its
// own stack and the stacks of all other threads. The return value is true if
// the stacks were just saved, or false if they were just restored due to a
// rewind from a later point of execution.
bool SaveAllThreads(SavedCheckpoint& aSavedCheckpoint);
// Restore the saved stacks for a checkpoint and rewind state to that point.
// This function does not return.
void RestoreAllThreads(const SavedCheckpoint& aSavedCheckpoint);
// After rewinding to an earlier checkpoint, the main thread will call this to
// ensure that each thread has woken up and restored its own stack contents.
// The main thread does not itself write to the stacks of other threads.
void WaitForIdleThreadsToRestoreTheirStacks();
bool ShouldRestoreThreadStack(size_t aId);
void RestoreThreadStack(size_t aId);
// Initialize state for taking thread snapshots.
void InitializeThreadSnapshots(size_t aNumThreads);
} // namespace recordreplay
} // namespace mozilla
#endif // mozilla_recordreplay_ThreadSnapshot_h

Просмотреть файл

@ -0,0 +1,207 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "Trigger.h"
#include "ipc/ChildIPC.h"
#include "mozilla/Maybe.h"
#include "mozilla/StaticMutex.h"
#include "mozilla/RecordReplay.h"
#include "InfallibleVector.h"
#include "ProcessRewind.h"
#include "Thread.h"
#include "ValueIndex.h"
namespace mozilla {
namespace recordreplay {
// Information about each trigger.
struct TriggerInfo
{
// ID of the thread which registered this trigger.
size_t mThreadId;
// Callback to execute when the trigger is activated.
std::function<void()> mCallback;
// Number of times this trigger has been activated.
size_t mRegisterCount;
TriggerInfo(size_t aThreadId, const std::function<void()>& aCallback)
: mThreadId(aThreadId), mCallback(aCallback), mRegisterCount(1)
{}
};
// All registered triggers.
static ValueIndex* gTriggers;
typedef std::unordered_map<void*, TriggerInfo> TriggerInfoMap;
static TriggerInfoMap* gTriggerInfoMap;
// Triggers which have been activated. This is protected by the global lock.
static StaticInfallibleVector<size_t> gActivatedTriggers;
static StaticMutexNotRecorded gTriggersMutex;
void
InitializeTriggers()
{
gTriggers = new ValueIndex();
gTriggerInfoMap = new TriggerInfoMap();
}
extern "C" {
MOZ_EXPORT void
RecordReplayInterface_RegisterTrigger(void* aObj, const std::function<void()>& aCallback)
{
MOZ_ASSERT(IsRecordingOrReplaying());
MOZ_RELEASE_ASSERT(!AreThreadEventsPassedThrough());
MOZ_RELEASE_ASSERT(aObj);
if (HasDivergedFromRecording()) {
return;
}
MOZ_RELEASE_ASSERT(!AreThreadEventsDisallowed());
size_t threadId = Thread::Current()->Id();
size_t id;
{
AutoOrderedAtomicAccess order;
StaticMutexAutoLock lock(gTriggersMutex);
TriggerInfoMap::iterator iter = gTriggerInfoMap->find(aObj);
if (iter != gTriggerInfoMap->end()) {
id = gTriggers->GetIndex(aObj);
MOZ_RELEASE_ASSERT(iter->second.mThreadId == threadId);
iter->second.mCallback = aCallback;
iter->second.mRegisterCount++;
} else {
id = gTriggers->Insert(aObj);
TriggerInfo info(threadId, aCallback);
gTriggerInfoMap->insert(TriggerInfoMap::value_type(aObj, info));
}
}
RecordReplayAssert("RegisterTrigger %zu", id);
}
MOZ_EXPORT void
RecordReplayInterface_UnregisterTrigger(void* aObj)
{
MOZ_ASSERT(IsRecordingOrReplaying());
MOZ_RELEASE_ASSERT(!AreThreadEventsPassedThrough());
StaticMutexAutoLock lock(gTriggersMutex);
TriggerInfoMap::iterator iter = gTriggerInfoMap->find(aObj);
MOZ_RELEASE_ASSERT(iter != gTriggerInfoMap->end());
if (--iter->second.mRegisterCount == 0) {
gTriggerInfoMap->erase(iter);
gTriggers->Remove(aObj);
}
}
MOZ_EXPORT void
RecordReplayInterface_ActivateTrigger(void* aObj)
{
if (!IsRecording()) {
return;
}
StaticMutexAutoLock lock(gTriggersMutex);
size_t id = gTriggers->GetIndex(aObj);
gActivatedTriggers.emplaceBack(id);
}
static void
InvokeTriggerCallback(size_t aId)
{
void* obj;
std::function<void()> callback;
{
StaticMutexAutoLock lock(gTriggersMutex);
obj = const_cast<void*>(gTriggers->GetValue(aId));
TriggerInfoMap::iterator iter = gTriggerInfoMap->find(obj);
MOZ_RELEASE_ASSERT(iter != gTriggerInfoMap->end());
MOZ_RELEASE_ASSERT(iter->second.mThreadId == Thread::Current()->Id());
MOZ_RELEASE_ASSERT(iter->second.mRegisterCount);
MOZ_RELEASE_ASSERT(iter->second.mCallback);
callback = iter->second.mCallback;
}
callback();
}
static Maybe<size_t>
RemoveTriggerCallbackForThreadId(size_t aThreadId)
{
StaticMutexAutoLock lock(gTriggersMutex);
for (size_t i = 0; i < gActivatedTriggers.length(); i++) {
size_t id = gActivatedTriggers[i];
void* obj = const_cast<void*>(gTriggers->GetValue(id));
TriggerInfoMap::iterator iter = gTriggerInfoMap->find(obj);
MOZ_RELEASE_ASSERT(iter != gTriggerInfoMap->end());
if (iter->second.mThreadId == aThreadId) {
gActivatedTriggers.erase(&gActivatedTriggers[i]);
return Some(id);
}
}
return Nothing();
}
MOZ_EXPORT void
RecordReplayInterface_ExecuteTriggers()
{
MOZ_ASSERT(IsRecordingOrReplaying());
MOZ_RELEASE_ASSERT(!AreThreadEventsPassedThrough());
MOZ_RELEASE_ASSERT(!AreThreadEventsDisallowed());
Thread* thread = Thread::Current();
RecordReplayAssert("ExecuteTriggers");
if (IsRecording()) {
// Invoke the callbacks for any triggers waiting for execution, including
// any whose callbacks are triggered by earlier callback invocations.
while (true) {
Maybe<size_t> id = RemoveTriggerCallbackForThreadId(thread->Id());
if (id.isNothing()) {
break;
}
thread->Events().RecordOrReplayThreadEvent(ThreadEvent::ExecuteTrigger);
thread->Events().WriteScalar(id.ref());
InvokeTriggerCallback(id.ref());
}
thread->Events().RecordOrReplayThreadEvent(ThreadEvent::ExecuteTriggersFinished);
} else {
// Execute the same callbacks which were executed at this point while
// recording.
while (true) {
ThreadEvent ev = (ThreadEvent) thread->Events().ReadScalar();
if (ev != ThreadEvent::ExecuteTrigger) {
if (ev != ThreadEvent::ExecuteTriggersFinished) {
child::ReportFatalError("ExecuteTrigger Mismatch");
Unreachable();
}
break;
}
size_t id = thread->Events().ReadScalar();
InvokeTriggerCallback(id);
}
}
RecordReplayAssert("ExecuteTriggers DONE");
}
} // extern "C"
} // namespace recordreplay
} // namespace mozilla

Просмотреть файл

@ -0,0 +1,21 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef mozilla_recordreplay_Trigger_h
#define mozilla_recordreplay_Trigger_h
namespace mozilla {
namespace recordreplay {
// See RecordReplay.h for a description of the record/replay trigger API.
// Initialize trigger state at the beginning of recording or replaying.
void InitializeTriggers();
} // namespace recordreplay
} // namespace mozilla
#endif // mozilla_recordreplay_Trigger_h

Просмотреть файл

@ -0,0 +1,87 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "ValueIndex.h"
#include "mozilla/Assertions.h"
namespace mozilla {
namespace recordreplay {
size_t
ValueIndex::Insert(const void* aValue)
{
MOZ_RELEASE_ASSERT(!Contains(aValue));
size_t index = mIndexCount++;
mValueToIndex.insert(ValueToIndexMap::value_type(aValue, index));
mIndexToValue.insert(IndexToValueMap::value_type(index, aValue));
return index;
}
void
ValueIndex::Remove(const void* aValue)
{
size_t index;
if (!MaybeGetIndex(aValue, &index)) {
return;
}
mValueToIndex.erase(aValue);
mIndexToValue.erase(index);
}
size_t
ValueIndex::GetIndex(const void* aValue)
{
size_t index;
if (!MaybeGetIndex(aValue, &index)) {
MOZ_CRASH();
}
return index;
}
bool
ValueIndex::MaybeGetIndex(const void* aValue, size_t* aIndex)
{
ValueToIndexMap::const_iterator iter = mValueToIndex.find(aValue);
if (iter != mValueToIndex.end()) {
*aIndex = iter->second;
return true;
}
return false;
}
bool
ValueIndex::Contains(const void* aValue)
{
size_t index;
return MaybeGetIndex(aValue, &index);
}
const void*
ValueIndex::GetValue(size_t aIndex)
{
IndexToValueMap::const_iterator iter = mIndexToValue.find(aIndex);
MOZ_RELEASE_ASSERT(iter != mIndexToValue.end());
return iter->second;
}
bool
ValueIndex::IsEmpty()
{
MOZ_ASSERT(mValueToIndex.empty() == mIndexToValue.empty());
return mValueToIndex.empty();
}
const ValueIndex::ValueToIndexMap&
ValueIndex::GetValueToIndexMap()
{
return mValueToIndex;
}
} // namespace recordreplay
} // namespace mozilla

Просмотреть файл

@ -0,0 +1,83 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef mozilla_recordreplay_ValueIndex_h
#define mozilla_recordreplay_ValueIndex_h
#include "mozilla/Types.h"
#include <unordered_map>
namespace mozilla {
namespace recordreplay {
// ValueIndexes are a bidirectional map between arbitrary pointers and indexes.
// These are used while recording and replaying to handle the general issue
// that pointer values are not preserved during replay: recording a pointer and
// replaying its bits later will not yield a pointer to the same heap value,
// but rather a pointer to garbage that must not be dereferenced.
//
// When entries are added to a ValueIndex at consistent points between
// recording and replaying, then the resulting indexes will be consistent, and
// that index can be recorded and later replayed and used to find the
// replay-specific pointer value corresponding to the pointer used at that
// point in the recording. Entries can be removed from the ValueIndex at
// different points in the recording and replay without affecting the indexes
// that will be generated later.
//
// This is a helper class that is used in various places to help record/replay
// pointers to heap data.
class ValueIndex
{
public:
ValueIndex()
: mIndexCount(0)
{}
typedef std::unordered_map<const void*, size_t> ValueToIndexMap;
// Add a new entry to the map.
size_t Insert(const void* aValue);
// Remove an entry from the map, unless there is no entry for aValue.
void Remove(const void* aValue);
// Get the index for an entry in the map. The entry must exist in the map.
size_t GetIndex(const void* aValue);
// Get the index for an entry in the map if there is one, otherwise return
// false.
bool MaybeGetIndex(const void* aValue, size_t* aIndex);
// Return whether there is an entry for aValue.
bool Contains(const void* aValue);
// Get the value associated with an index. The index must exist in the map.
const void* GetValue(size_t aIndex);
// Whether the map is empty.
bool IsEmpty();
// Raw read-only access to the map contents.
const ValueToIndexMap& GetValueToIndexMap();
private:
typedef std::unordered_map<size_t, const void*> IndexToValueMap;
// Map from pointer values to indexes.
ValueToIndexMap mValueToIndex;
// Map from indexes to pointer values.
IndexToValueMap mIndexToValue;
// The total number of entries that have ever been added to this map.
size_t mIndexCount;
};
} // namespace recordreplay
} // namespace mozilla
#endif // mozilla_recordreplay_ValueIndex_h

Просмотреть файл

@ -0,0 +1,63 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "WeakPointer.h"
#include "mozilla/dom/ScriptSettings.h"
#include "mozilla/StaticMutex.h"
#include "jsapi.h"
#include <unordered_map>
namespace mozilla {
namespace recordreplay {
typedef std::unordered_map<const void*, UniquePtr<JS::PersistentRootedObject>> WeakPointerRootMap;
static WeakPointerRootMap* gWeakPointerRootMap;
static StaticMutexNotRecorded gWeakPointerMutex;
static UniquePtr<JS::PersistentRootedObject>
NewRoot(JSObject* aJSObj)
{
MOZ_RELEASE_ASSERT(aJSObj);
JSContext* cx = dom::danger::GetJSContext();
UniquePtr<JS::PersistentRootedObject> root = MakeUnique<JS::PersistentRootedObject>(cx);
*root = aJSObj;
return root;
}
extern "C" {
MOZ_EXPORT void
RecordReplayInterface_SetWeakPointerJSRoot(const void* aPtr, JSObject* aJSObj)
{
MOZ_RELEASE_ASSERT(IsReplaying());
StaticMutexAutoLock lock(gWeakPointerMutex);
auto iter = gWeakPointerRootMap->find(aPtr);
if (iter != gWeakPointerRootMap->end()) {
if (aJSObj) {
*iter->second = aJSObj;
} else {
gWeakPointerRootMap->erase(aPtr);
}
} else if (aJSObj) {
gWeakPointerRootMap->insert(WeakPointerRootMap::value_type(aPtr, NewRoot(aJSObj)));
}
}
} // extern "C"
void
InitializeWeakPointers()
{
gWeakPointerRootMap = new WeakPointerRootMap();
}
} // namespace recordreplay
} // namespace mozilla

Просмотреть файл

@ -0,0 +1,21 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef mozilla_recordreplay_WeakPointer_h
#define mozilla_recordreplay_WeakPointer_h
namespace mozilla {
namespace recordreplay {
// See RecordReplay.h for a description of the record/replay weak pointer API.
// Initialize weak pointer state.
void InitializeWeakPointers();
} // namespace recordreplay
} // namespace mozilla
#endif // mozilla_recordreplay_WeakPointer_h

Просмотреть файл

@ -0,0 +1,294 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this file,
* You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "Channel.h"
#include "ChildIPC.h"
#include "ProcessRewind.h"
#include "Thread.h"
#include "MainThreadUtils.h"
#include "nsXULAppAPI.h"
#include "base/process_util.h"
#include "mozilla/dom/ContentChild.h"
#include "mozilla/ipc/FileDescriptor.h"
#include <sys/socket.h>
#include <sys/un.h>
namespace mozilla {
namespace recordreplay {
static void
GetSocketAddress(struct sockaddr_un* addr, base::ProcessId aMiddlemanPid, size_t aId)
{
addr->sun_family = AF_UNIX;
int n = snprintf(addr->sun_path, sizeof(addr->sun_path), "/tmp/WebReplay_%d_%d", aMiddlemanPid, (int) aId);
MOZ_RELEASE_ASSERT(n >= 0 && n < (int) sizeof(addr->sun_path));
addr->sun_len = SUN_LEN(addr);
}
namespace parent {
void
OpenChannel(base::ProcessId aMiddlemanPid, uint32_t aChannelId,
ipc::FileDescriptor* aConnection)
{
MOZ_RELEASE_ASSERT(IsMiddleman() || XRE_IsParentProcess());
int connectionFd = socket(AF_UNIX, SOCK_STREAM, 0);
MOZ_RELEASE_ASSERT(connectionFd > 0);
struct sockaddr_un addr;
GetSocketAddress(&addr, aMiddlemanPid, aChannelId);
int rv = bind(connectionFd, (sockaddr*) &addr, SUN_LEN(&addr));
MOZ_RELEASE_ASSERT(rv >= 0);
*aConnection = ipc::FileDescriptor(connectionFd);
close(connectionFd);
}
} // namespace parent
struct HelloMessage
{
int32_t mMagic;
};
Channel::Channel(size_t aId, bool aMiddlemanRecording, const MessageHandler& aHandler)
: mId(aId)
, mHandler(aHandler)
, mInitialized(false)
, mConnectionFd(0)
, mFd(0)
, mMessageBytes(0)
{
MOZ_RELEASE_ASSERT(NS_IsMainThread());
if (IsRecordingOrReplaying()) {
MOZ_RELEASE_ASSERT(AreThreadEventsPassedThrough());
mFd = socket(AF_UNIX, SOCK_STREAM, 0);
MOZ_RELEASE_ASSERT(mFd > 0);
struct sockaddr_un addr;
GetSocketAddress(&addr, child::MiddlemanProcessId(), mId);
int rv = HANDLE_EINTR(connect(mFd, (sockaddr*) &addr, SUN_LEN(&addr)));
MOZ_RELEASE_ASSERT(rv >= 0);
DirectDeleteFile(addr.sun_path);
} else {
MOZ_RELEASE_ASSERT(IsMiddleman());
ipc::FileDescriptor connection;
if (aMiddlemanRecording) {
// When starting the recording child process we have not done enough
// initialization to ask for a channel from the parent, but have also not
// started the sandbox so we can do it ourselves.
parent::OpenChannel(base::GetCurrentProcId(), mId, &connection);
} else {
dom::ContentChild::GetSingleton()->SendOpenRecordReplayChannel(mId, &connection);
MOZ_RELEASE_ASSERT(connection.IsValid());
}
mConnectionFd = connection.ClonePlatformHandle().release();
int rv = listen(mConnectionFd, 1);
MOZ_RELEASE_ASSERT(rv >= 0);
}
Thread::SpawnNonRecordedThread(ThreadMain, this);
}
/* static */ void
Channel::ThreadMain(void* aChannelArg)
{
Channel* channel = (Channel*) aChannelArg;
static const int32_t MagicValue = 0x914522b9;
if (IsRecordingOrReplaying()) {
HelloMessage msg;
int rv = HANDLE_EINTR(recv(channel->mFd, &msg, sizeof(msg), MSG_WAITALL));
MOZ_RELEASE_ASSERT(rv == sizeof(msg));
MOZ_RELEASE_ASSERT(msg.mMagic == MagicValue);
} else {
MOZ_RELEASE_ASSERT(IsMiddleman());
channel->mFd = HANDLE_EINTR(accept(channel->mConnectionFd, nullptr, 0));
MOZ_RELEASE_ASSERT(channel->mFd > 0);
HelloMessage msg;
msg.mMagic = MagicValue;
int rv = HANDLE_EINTR(send(channel->mFd, &msg, sizeof(msg), 0));
MOZ_RELEASE_ASSERT(rv == sizeof(msg));
}
{
MonitorAutoLock lock(channel->mMonitor);
channel->mInitialized = true;
channel->mMonitor.Notify();
}
while (true) {
Message* msg = channel->WaitForMessage();
if (!msg) {
break;
}
channel->mHandler(msg);
}
}
void
Channel::SendMessage(const Message& aMsg)
{
MOZ_RELEASE_ASSERT(NS_IsMainThread() || aMsg.mType == MessageType::FatalError);
// Block until the channel is initialized.
if (!mInitialized) {
MonitorAutoLock lock(mMonitor);
while (!mInitialized) {
mMonitor.Wait();
}
}
PrintMessage("SendMsg", aMsg);
const char* ptr = (const char*) &aMsg;
size_t nbytes = aMsg.mSize;
while (nbytes) {
int rv = HANDLE_EINTR(send(mFd, ptr, nbytes, 0));
MOZ_RELEASE_ASSERT((size_t) rv <= nbytes);
ptr += rv;
nbytes -= rv;
}
}
Message*
Channel::WaitForMessage()
{
if (!mMessageBuffer.length()) {
mMessageBuffer.appendN(0, PageSize);
}
size_t messageSize = 0;
while (true) {
if (mMessageBytes >= sizeof(Message)) {
Message* msg = (Message*) mMessageBuffer.begin();
messageSize = msg->mSize;
if (mMessageBytes >= messageSize) {
break;
}
}
// Make sure the buffer is large enough for the entire incoming message.
if (messageSize > mMessageBuffer.length()) {
mMessageBuffer.appendN(0, messageSize - mMessageBuffer.length());
}
ssize_t nbytes = HANDLE_EINTR(recv(mFd, &mMessageBuffer[mMessageBytes],
mMessageBuffer.length() - mMessageBytes, 0));
if (nbytes < 0) {
MOZ_RELEASE_ASSERT(errno == EAGAIN);
continue;
} else if (nbytes == 0) {
// The other side of the channel has shut down.
if (IsMiddleman()) {
return nullptr;
}
PrintSpew("Channel disconnected, exiting...\n");
_exit(0);
}
mMessageBytes += nbytes;
}
Message* res = ((Message*)mMessageBuffer.begin())->Clone();
// Remove the message we just received from the incoming buffer.
size_t remaining = mMessageBytes - messageSize;
if (remaining) {
memmove(mMessageBuffer.begin(), &mMessageBuffer[messageSize], remaining);
}
mMessageBytes = remaining;
PrintMessage("RecvMsg", *res);
return res;
}
void
Channel::PrintMessage(const char* aPrefix, const Message& aMsg)
{
if (!SpewEnabled()) {
return;
}
AutoEnsurePassThroughThreadEvents pt;
nsCString data;
switch (aMsg.mType) {
case MessageType::HitCheckpoint: {
const HitCheckpointMessage& nmsg = (const HitCheckpointMessage&) aMsg;
data.AppendPrintf("Id %d Endpoint %d Duration %.2f ms",
(int) nmsg.mCheckpointId, nmsg.mRecordingEndpoint,
nmsg.mDurationMicroseconds / 1000.0);
break;
}
case MessageType::HitBreakpoint: {
const HitBreakpointMessage& nmsg = (const HitBreakpointMessage&) aMsg;
data.AppendPrintf("Endpoint %d", nmsg.mRecordingEndpoint);
for (size_t i = 0; i < nmsg.NumBreakpoints(); i++) {
data.AppendPrintf(" Id %d", nmsg.Breakpoints()[i]);
}
break;
}
case MessageType::Resume: {
const ResumeMessage& nmsg = (const ResumeMessage&) aMsg;
data.AppendPrintf("Forward %d", nmsg.mForward);
break;
}
case MessageType::RestoreCheckpoint: {
const RestoreCheckpointMessage& nmsg = (const RestoreCheckpointMessage&) aMsg;
data.AppendPrintf("Id %d", (int) nmsg.mCheckpoint);
break;
}
case MessageType::SetBreakpoint: {
const SetBreakpointMessage& nmsg = (const SetBreakpointMessage&) aMsg;
data.AppendPrintf("Id %d, Kind %s, Script %d, Offset %d, Frame %d",
(int) nmsg.mId, nmsg.mPosition.KindString(), (int) nmsg.mPosition.mScript,
(int) nmsg.mPosition.mOffset, (int) nmsg.mPosition.mFrameIndex);
break;
}
case MessageType::DebuggerRequest: {
const DebuggerRequestMessage& nmsg = (const DebuggerRequestMessage&) aMsg;
data = NS_ConvertUTF16toUTF8(nsDependentString(nmsg.Buffer(), nmsg.BufferSize()));
break;
}
case MessageType::DebuggerResponse: {
const DebuggerResponseMessage& nmsg = (const DebuggerResponseMessage&) aMsg;
data = NS_ConvertUTF16toUTF8(nsDependentString(nmsg.Buffer(), nmsg.BufferSize()));
break;
}
case MessageType::SetIsActive: {
const SetIsActiveMessage& nmsg = (const SetIsActiveMessage&) aMsg;
data.AppendPrintf("%d", nmsg.mActive);
break;
}
case MessageType::SetSaveCheckpoint: {
const SetSaveCheckpointMessage& nmsg = (const SetSaveCheckpointMessage&) aMsg;
data.AppendPrintf("Id %d, Save %d", (int) nmsg.mCheckpoint, nmsg.mSave);
break;
}
default:
break;
}
const char* kind = IsMiddleman() ? "Middleman" : (IsRecording() ? "Recording" : "Replaying");
PrintSpew("%s%s:%d %s %s\n", kind, aPrefix, (int) mId, aMsg.TypeString(), data.get());
}
} // namespace recordreplay
} // namespace mozilla

Просмотреть файл

@ -0,0 +1,463 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this file,
* You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef mozilla_recordreplay_Channel_h
#define mozilla_recordreplay_Channel_h
#include "base/process.h"
#include "mozilla/gfx/Types.h"
#include "mozilla/Maybe.h"
#include "File.h"
#include "JSControl.h"
#include "Monitor.h"
namespace mozilla {
namespace recordreplay {
// This file has definitions for creating and communicating on a special
// bidirectional channel between a middleman process and a recording or
// replaying process. This communication is not included in the recording, and
// when replaying this is the only mechanism the child can use to communicate
// with the middleman process.
//
// Replaying processes can rewind themselves, restoring execution state and the
// contents of all heap memory to that at an earlier point. To keep the
// replaying process and middleman from getting out of sync with each other,
// there are tight constraints on when messages may be sent across the channel
// by one process or the other. At any given time the child process may be
// either paused or unpaused. If it is paused, it is not doing any execution
// and cannot rewind itself. If it is unpaused, it may execute content and may
// rewind itself.
//
// Messages can be sent from the child process to the middleman only when the
// child process is unpaused, and messages can only be sent from the middleman
// to the child process when the child process is paused. This prevents
// messages from being lost when they are sent from the middleman as the
// replaying process rewinds itself. A few exceptions to this rule are noted
// below.
//
// Some additional synchronization is needed between different child processes:
// replaying processes can read from the same file which a recording process is
// writing to. While it is ok for a replaying process to read from the file
// while the recording process is appending new chunks to it (see File.cpp),
// all replaying processes must be paused when the recording process is
// flushing a new index to the file.
#define ForEachMessageType(_Macro) \
/* Messages sent from the middleman to the child process. */ \
\
/* Sent at startup. */ \
_Macro(Introduction) \
\
/* Sent to recording processes when exiting. */ \
_Macro(Terminate) \
\
/* Flush the current recording to disk. */ \
_Macro(FlushRecording) \
\
/* Poke a child that is recording to create an artificial checkpoint, rather than */ \
/* (potentially) idling indefinitely. This has no effect on a replaying process. */ \
_Macro(CreateCheckpoint) \
\
/* Debugger JSON messages are initially sent from the parent. The child unpauses */ \
/* after receiving the message and will pause after it sends a DebuggerResponse. */ \
_Macro(DebuggerRequest) \
\
/* Set or clear a JavaScript breakpoint. */ \
_Macro(SetBreakpoint) \
\
/* Unpause the child and play execution either to the next point when a */ \
/* breakpoint is hit, or to the next checkpoint. Resumption may be either */ \
/* forward or backward. */ \
_Macro(Resume) \
\
/* Rewind to a particular saved checkpoint in the past. */ \
_Macro(RestoreCheckpoint) \
\
/* Notify the child whether it is the active child and should send paint and similar */ \
/* messages to the middleman. */ \
_Macro(SetIsActive) \
\
/* Set whether to perform intentional crashes, for testing. */ \
_Macro(SetAllowIntentionalCrashes) \
\
/* Set whether to save a particular checkpoint. */ \
_Macro(SetSaveCheckpoint) \
\
/* Messages sent from the child process to the middleman. */ \
\
/* Sent in response to a FlushRecording, telling the middleman that the flush */ \
/* has finished. */ \
_Macro(RecordingFlushed) \
\
/* A critical error occurred and execution cannot continue. The child will */ \
/* stop executing after sending this message and will wait to be terminated. */ \
_Macro(FatalError) \
\
/* The child's graphics were repainted. */ \
_Macro(Paint) \
\
/* Notify the middleman that a checkpoint or breakpoint was hit. */ \
/* The child will pause after sending these messages. */ \
_Macro(HitCheckpoint) \
_Macro(HitBreakpoint) \
\
/* Send a response to a DebuggerRequest message. */ \
_Macro(DebuggerResponse) \
\
/* Notify that the 'AlwaysMarkMajorCheckpoints' directive was invoked. */ \
_Macro(AlwaysMarkMajorCheckpoints)
enum class MessageType
{
#define DefineEnum(Kind) Kind,
ForEachMessageType(DefineEnum)
#undef DefineEnum
};
struct Message
{
MessageType mType;
// Total message size, including the header.
uint32_t mSize;
protected:
Message(MessageType aType, uint32_t aSize)
: mType(aType), mSize(aSize)
{
MOZ_RELEASE_ASSERT(mSize >= sizeof(*this));
}
public:
Message* Clone() const {
char* res = (char*) malloc(mSize);
memcpy(res, this, mSize);
return (Message*) res;
}
const char* TypeString() const {
switch (mType) {
#define EnumToString(Kind) case MessageType::Kind: return #Kind;
ForEachMessageType(EnumToString)
#undef EnumToString
default: return "Unknown";
}
}
protected:
template <typename T, typename Elem>
Elem* Data() { return (Elem*) (sizeof(T) + (char*) this); }
template <typename T, typename Elem>
const Elem* Data() const { return (const Elem*) (sizeof(T) + (const char*) this); }
template <typename T, typename Elem>
size_t DataSize() const { return (mSize - sizeof(T)) / sizeof(Elem); }
template <typename T, typename Elem, typename... Args>
static T* NewWithData(size_t aBufferSize, Args&&... aArgs) {
size_t size = sizeof(T) + aBufferSize * sizeof(Elem);
void* ptr = malloc(size);
return new(ptr) T(size, std::forward<Args>(aArgs)...);
}
};
struct IntroductionMessage : public Message
{
base::ProcessId mParentPid;
uint32_t mPrefsLen;
uint32_t mArgc;
IntroductionMessage(uint32_t aSize, base::ProcessId aParentPid, uint32_t aPrefsLen, uint32_t aArgc)
: Message(MessageType::Introduction, aSize)
, mParentPid(aParentPid)
, mPrefsLen(aPrefsLen)
, mArgc(aArgc)
{}
char* PrefsData() { return Data<IntroductionMessage, char>(); }
char* ArgvString() { return Data<IntroductionMessage, char>() + mPrefsLen; }
const char* PrefsData() const { return Data<IntroductionMessage, char>(); }
const char* ArgvString() const { return Data<IntroductionMessage, char>() + mPrefsLen; }
static IntroductionMessage* New(base::ProcessId aParentPid, char* aPrefs, size_t aPrefsLen,
int aArgc, char* aArgv[]) {
size_t argsLen = 0;
for (int i = 0; i < aArgc; i++) {
argsLen += strlen(aArgv[i]) + 1;
}
IntroductionMessage* res =
NewWithData<IntroductionMessage, char>(aPrefsLen + argsLen, aParentPid, aPrefsLen, aArgc);
memcpy(res->PrefsData(), aPrefs, aPrefsLen);
size_t offset = 0;
for (int i = 0; i < aArgc; i++) {
memcpy(&res->ArgvString()[offset], aArgv[i], strlen(aArgv[i]) + 1);
offset += strlen(aArgv[i]) + 1;
}
MOZ_RELEASE_ASSERT(offset == argsLen);
return res;
}
static IntroductionMessage* RecordReplay(const IntroductionMessage& aMsg) {
size_t introductionSize = RecordReplayValue(aMsg.mSize);
IntroductionMessage* msg = (IntroductionMessage*) malloc(introductionSize);
if (IsRecording()) {
memcpy(msg, &aMsg, introductionSize);
}
RecordReplayBytes(msg, introductionSize);
return msg;
}
};
template <MessageType Type>
struct EmptyMessage : public Message
{
EmptyMessage()
: Message(Type, sizeof(*this))
{}
};
typedef EmptyMessage<MessageType::Terminate> TerminateMessage;
typedef EmptyMessage<MessageType::CreateCheckpoint> CreateCheckpointMessage;
typedef EmptyMessage<MessageType::FlushRecording> FlushRecordingMessage;
template <MessageType Type>
struct JSONMessage : public Message
{
explicit JSONMessage(uint32_t aSize)
: Message(Type, aSize)
{}
const char16_t* Buffer() const { return Data<JSONMessage<Type>, char16_t>(); }
size_t BufferSize() const { return DataSize<JSONMessage<Type>, char16_t>(); }
static JSONMessage<Type>* New(const char16_t* aBuffer, size_t aBufferSize) {
JSONMessage<Type>* res = NewWithData<JSONMessage<Type>, char16_t>(aBufferSize);
MOZ_RELEASE_ASSERT(res->BufferSize() == aBufferSize);
PodCopy(res->Data<JSONMessage<Type>, char16_t>(), aBuffer, aBufferSize);
return res;
}
};
typedef JSONMessage<MessageType::DebuggerRequest> DebuggerRequestMessage;
typedef JSONMessage<MessageType::DebuggerResponse> DebuggerResponseMessage;
struct SetBreakpointMessage : public Message
{
// ID of the breakpoint to change.
size_t mId;
// New position of the breakpoint. If this is invalid then the breakpoint is
// being cleared.
js::BreakpointPosition mPosition;
SetBreakpointMessage(size_t aId, const js::BreakpointPosition& aPosition)
: Message(MessageType::SetBreakpoint, sizeof(*this))
, mId(aId)
, mPosition(aPosition)
{}
};
struct ResumeMessage : public Message
{
// Whether to travel forwards or backwards.
bool mForward;
explicit ResumeMessage(bool aForward)
: Message(MessageType::Resume, sizeof(*this))
, mForward(aForward)
{}
};
struct RestoreCheckpointMessage : public Message
{
// The checkpoint to restore.
size_t mCheckpoint;
explicit RestoreCheckpointMessage(size_t aCheckpoint)
: Message(MessageType::RestoreCheckpoint, sizeof(*this))
, mCheckpoint(aCheckpoint)
{}
};
struct SetIsActiveMessage : public Message
{
// Whether this is the active child process (see ParentIPC.cpp).
bool mActive;
explicit SetIsActiveMessage(bool aActive)
: Message(MessageType::SetIsActive, sizeof(*this))
, mActive(aActive)
{}
};
struct SetAllowIntentionalCrashesMessage : public Message
{
// Whether to allow intentional crashes in the future or not.
bool mAllowed;
explicit SetAllowIntentionalCrashesMessage(bool aAllowed)
: Message(MessageType::SetAllowIntentionalCrashes, sizeof(*this))
, mAllowed(aAllowed)
{}
};
struct SetSaveCheckpointMessage : public Message
{
// The checkpoint in question.
size_t mCheckpoint;
// Whether to save this checkpoint whenever it is encountered.
bool mSave;
SetSaveCheckpointMessage(size_t aCheckpoint, bool aSave)
: Message(MessageType::SetSaveCheckpoint, sizeof(*this))
, mCheckpoint(aCheckpoint)
, mSave(aSave)
{}
};
typedef EmptyMessage<MessageType::RecordingFlushed> RecordingFlushedMessage;
struct FatalErrorMessage : public Message
{
explicit FatalErrorMessage(uint32_t aSize)
: Message(MessageType::FatalError, aSize)
{}
const char* Error() const { return Data<FatalErrorMessage, const char>(); }
};
// The format for graphics data which will be sent to the middleman process.
// This needs to match the format expected for canvas image data, to avoid
// transforming the data before rendering it in the middleman process.
static const gfx::SurfaceFormat gSurfaceFormat = gfx::SurfaceFormat::R8G8B8X8;
struct PaintMessage : public Message
{
uint32_t mWidth;
uint32_t mHeight;
PaintMessage(uint32_t aWidth, uint32_t aHeight)
: Message(MessageType::Paint, sizeof(*this))
, mWidth(aWidth)
, mHeight(aHeight)
{}
};
struct HitCheckpointMessage : public Message
{
uint32_t mCheckpointId;
bool mRecordingEndpoint;
// When recording, the amount of non-idle time taken to get to this
// checkpoint from the previous one.
double mDurationMicroseconds;
HitCheckpointMessage(uint32_t aCheckpointId, bool aRecordingEndpoint, double aDurationMicroseconds)
: Message(MessageType::HitCheckpoint, sizeof(*this))
, mCheckpointId(aCheckpointId)
, mRecordingEndpoint(aRecordingEndpoint)
, mDurationMicroseconds(aDurationMicroseconds)
{}
};
struct HitBreakpointMessage : public Message
{
bool mRecordingEndpoint;
HitBreakpointMessage(uint32_t aSize, bool aRecordingEndpoint)
: Message(MessageType::HitBreakpoint, aSize)
, mRecordingEndpoint(aRecordingEndpoint)
{}
const uint32_t* Breakpoints() const { return Data<HitBreakpointMessage, uint32_t>(); }
uint32_t NumBreakpoints() const { return DataSize<HitBreakpointMessage, uint32_t>(); }
static HitBreakpointMessage* New(bool aRecordingEndpoint,
const uint32_t* aBreakpoints, size_t aNumBreakpoints) {
HitBreakpointMessage* res =
NewWithData<HitBreakpointMessage, uint32_t>(aNumBreakpoints, aRecordingEndpoint);
MOZ_RELEASE_ASSERT(res->NumBreakpoints() == aNumBreakpoints);
PodCopy(res->Data<HitBreakpointMessage, uint32_t>(), aBreakpoints, aNumBreakpoints);
return res;
}
};
typedef EmptyMessage<MessageType::AlwaysMarkMajorCheckpoints> AlwaysMarkMajorCheckpointsMessage;
class Channel
{
public:
// Note: the handler is responsible for freeing its input message. It will be
// called on the channel's message thread.
typedef std::function<void(Message*)> MessageHandler;
private:
// ID for this channel, unique for the middleman.
size_t mId;
// Callback to invoke off thread on incoming messages.
MessageHandler mHandler;
// Whether the channel is initialized and ready for outgoing messages.
Atomic<bool, SequentiallyConsistent, Behavior::DontPreserve> mInitialized;
// Descriptor used to accept connections on the parent side.
int mConnectionFd;
// Descriptor used to communicate with the other side.
int mFd;
// For synchronizing initialization of the channel.
Monitor mMonitor;
// Buffer for message data received from the other side of the channel.
InfallibleVector<char, 0, AllocPolicy<MemoryKind::Generic>> mMessageBuffer;
// The number of bytes of data already in the message buffer.
size_t mMessageBytes;
// If spew is enabled, print a message and associated info to stderr.
void PrintMessage(const char* aPrefix, const Message& aMsg);
// Block until a complete message is received from the other side of the
// channel.
Message* WaitForMessage();
// Main routine for the channel's thread.
static void ThreadMain(void* aChannel);
public:
// Initialize this channel, connect to the other side, and spin up a thread
// to process incoming messages by calling aHandler.
Channel(size_t aId, bool aMiddlemanRecording, const MessageHandler& aHandler);
size_t GetId() { return mId; }
// Send a message to the other side of the channel. This must be called on
// the main thread, except for fatal error messages.
void SendMessage(const Message& aMsg);
};
// Command line option used to specify the middleman pid for a child process.
static const char* gMiddlemanPidOption = "-middlemanPid";
// Command line option used to specify the channel ID for a child process.
static const char* gChannelIDOption = "-recordReplayChannelID";
} // namespace recordreplay
} // namespace mozilla
#endif // mozilla_recordreplay_Channel_h

Просмотреть файл

@ -0,0 +1,521 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this file,
* You can obtain one at http://mozilla.org/MPL/2.0/. */
// This file has the logic which the replayed process uses to communicate with
// the middleman process.
#include "ChildInternal.h"
#include "base/message_loop.h"
#include "base/task.h"
#include "chrome/common/child_thread.h"
#include "chrome/common/mach_ipc_mac.h"
#include "ipc/Channel.h"
#include "mozilla/dom/ContentChild.h"
#include "mozilla/layers/ImageDataSerializer.h"
#include "mozilla/Sprintf.h"
#include "mozilla/VsyncDispatcher.h"
#include "InfallibleVector.h"
#include "MemorySnapshot.h"
#include "ParentInternal.h"
#include "ProcessRecordReplay.h"
#include "ProcessRedirect.h"
#include "ProcessRewind.h"
#include "Thread.h"
#include "Units.h"
#include <algorithm>
#include <mach/mach_vm.h>
#include <unistd.h>
namespace mozilla {
namespace recordreplay {
namespace child {
///////////////////////////////////////////////////////////////////////////////
// Record/Replay IPC
///////////////////////////////////////////////////////////////////////////////
// Monitor used for various synchronization tasks.
Monitor* gMonitor;
// The singleton channel for communicating with the middleman.
Channel* gChannel;
static base::ProcessId gMiddlemanPid;
static base::ProcessId gParentPid;
static StaticInfallibleVector<char*> gParentArgv;
static char* gShmemPrefs;
static size_t gShmemPrefsLen;
// File descriptors used by a pipe to create checkpoints when instructed by the
// parent process.
static FileHandle gCheckpointWriteFd;
static FileHandle gCheckpointReadFd;
// Copy of the introduction message we got from the middleman. This is saved on
// receipt and then processed during InitRecordingOrReplayingProcess.
static IntroductionMessage* gIntroductionMessage;
// Processing routine for incoming channel messages.
static void
ChannelMessageHandler(Message* aMsg)
{
MOZ_RELEASE_ASSERT(MainThreadShouldPause() ||
aMsg->mType == MessageType::CreateCheckpoint ||
aMsg->mType == MessageType::Terminate);
switch (aMsg->mType) {
case MessageType::Introduction: {
MOZ_RELEASE_ASSERT(!gIntroductionMessage);
gIntroductionMessage = (IntroductionMessage*) aMsg->Clone();
break;
}
case MessageType::CreateCheckpoint: {
MOZ_RELEASE_ASSERT(IsRecording());
// Ignore requests to create checkpoints before we have reached the first
// paint and finished initializing.
if (navigation::IsInitialized()) {
uint8_t data = 0;
DirectWrite(gCheckpointWriteFd, &data, 1);
}
break;
}
case MessageType::Terminate: {
PrintSpew("Terminate message received, exiting...\n");
MOZ_RELEASE_ASSERT(IsRecording());
_exit(0);
}
case MessageType::SetIsActive: {
const SetIsActiveMessage& nmsg = (const SetIsActiveMessage&) *aMsg;
PauseMainThreadAndInvokeCallback([=]() { SetIsActiveChild(nmsg.mActive); });
break;
}
case MessageType::SetAllowIntentionalCrashes: {
const SetAllowIntentionalCrashesMessage& nmsg = (const SetAllowIntentionalCrashesMessage&) *aMsg;
PauseMainThreadAndInvokeCallback([=]() { SetAllowIntentionalCrashes(nmsg.mAllowed); });
break;
}
case MessageType::SetSaveCheckpoint: {
const SetSaveCheckpointMessage& nmsg = (const SetSaveCheckpointMessage&) *aMsg;
PauseMainThreadAndInvokeCallback([=]() { SetSaveCheckpoint(nmsg.mCheckpoint, nmsg.mSave); });
break;
}
case MessageType::FlushRecording: {
PauseMainThreadAndInvokeCallback(FlushRecording);
break;
}
case MessageType::DebuggerRequest: {
const DebuggerRequestMessage& nmsg = (const DebuggerRequestMessage&) *aMsg;
js::CharBuffer* buf = new js::CharBuffer();
buf->append(nmsg.Buffer(), nmsg.BufferSize());
PauseMainThreadAndInvokeCallback([=]() { navigation::DebuggerRequest(buf); });
break;
}
case MessageType::SetBreakpoint: {
const SetBreakpointMessage& nmsg = (const SetBreakpointMessage&) *aMsg;
PauseMainThreadAndInvokeCallback([=]() {
navigation::SetBreakpoint(nmsg.mId, nmsg.mPosition);
});
break;
}
case MessageType::Resume: {
const ResumeMessage& nmsg = (const ResumeMessage&) *aMsg;
PauseMainThreadAndInvokeCallback([=]() {
navigation::Resume(nmsg.mForward);
});
break;
}
case MessageType::RestoreCheckpoint: {
const RestoreCheckpointMessage& nmsg = (const RestoreCheckpointMessage&) *aMsg;
PauseMainThreadAndInvokeCallback([=]() {
navigation::RestoreCheckpoint(nmsg.mCheckpoint);
});
break;
}
default:
MOZ_CRASH();
}
free(aMsg);
}
char*
PrefsShmemContents(size_t aPrefsLen)
{
MOZ_RELEASE_ASSERT(aPrefsLen == gShmemPrefsLen);
return gShmemPrefs;
}
// Main routine for a thread whose sole purpose is to listen to requests from
// the middleman process to create a new checkpoint. This is separate from the
// channel thread because this thread is recorded and the latter is not
// recorded. By communicating between the two threads with a pipe, this
// thread's behavior will be replicated exactly when replaying and new
// checkpoints will be created at the same point as during recording.
static void
ListenForCheckpointThreadMain(void*)
{
while (true) {
uint8_t data = 0;
ssize_t rv = read(gCheckpointReadFd, &data, 1);
if (rv > 0) {
NS_DispatchToMainThread(NewRunnableFunction("NewCheckpoint", NewCheckpoint,
/* aTemporary = */ false));
} else {
MOZ_RELEASE_ASSERT(errno == EINTR);
}
}
}
void* gGraphicsShmem;
void
InitRecordingOrReplayingProcess(int* aArgc, char*** aArgv)
{
if (!IsRecordingOrReplaying()) {
return;
}
Maybe<int> middlemanPid;
Maybe<int> channelID;
for (int i = 0; i < *aArgc; i++) {
if (!strcmp((*aArgv)[i], gMiddlemanPidOption)) {
MOZ_RELEASE_ASSERT(middlemanPid.isNothing() && i + 1 < *aArgc);
middlemanPid.emplace(atoi((*aArgv)[i + 1]));
}
if (!strcmp((*aArgv)[i], gChannelIDOption)) {
MOZ_RELEASE_ASSERT(channelID.isNothing() && i + 1 < *aArgc);
channelID.emplace(atoi((*aArgv)[i + 1]));
}
}
MOZ_RELEASE_ASSERT(middlemanPid.isSome());
MOZ_RELEASE_ASSERT(channelID.isSome());
gMiddlemanPid = middlemanPid.ref();
Maybe<AutoPassThroughThreadEvents> pt;
pt.emplace();
gMonitor = new Monitor();
gChannel = new Channel(channelID.ref(), /* aMiddlemanRecording = */ false, ChannelMessageHandler);
pt.reset();
DirectCreatePipe(&gCheckpointWriteFd, &gCheckpointReadFd);
Thread::StartThread(ListenForCheckpointThreadMain, nullptr, false);
pt.emplace();
// Setup a mach port to receive the graphics shmem handle over.
ReceivePort receivePort(nsPrintfCString("WebReplay.%d.%d", gMiddlemanPid, (int) channelID.ref()).get());
MachSendMessage handshakeMessage(parent::GraphicsHandshakeMessageId);
handshakeMessage.AddDescriptor(MachMsgPortDescriptor(receivePort.GetPort(), MACH_MSG_TYPE_COPY_SEND));
MachPortSender sender(nsPrintfCString("WebReplay.%d", gMiddlemanPid).get());
kern_return_t kr = sender.SendMessage(handshakeMessage, 1000);
MOZ_RELEASE_ASSERT(kr == KERN_SUCCESS);
// The parent should send us a handle to the graphics shmem.
MachReceiveMessage message;
kr = receivePort.WaitForMessage(&message, 0);
MOZ_RELEASE_ASSERT(kr == KERN_SUCCESS);
MOZ_RELEASE_ASSERT(message.GetMessageID() == parent::GraphicsMemoryMessageId);
mach_port_t graphicsPort = message.GetTranslatedPort(0);
MOZ_RELEASE_ASSERT(graphicsPort != MACH_PORT_NULL);
mach_vm_address_t address = 0;
kr = mach_vm_map(mach_task_self(), &address, parent::GraphicsMemorySize, 0, VM_FLAGS_ANYWHERE,
graphicsPort, 0, false,
VM_PROT_READ | VM_PROT_WRITE, VM_PROT_READ | VM_PROT_WRITE,
VM_INHERIT_NONE);
MOZ_RELEASE_ASSERT(kr == KERN_SUCCESS);
gGraphicsShmem = (void*) address;
pt.reset();
// We are ready to receive initialization messages from the middleman, pause
// so they can be sent.
HitCheckpoint(CheckpointId::Invalid, /* aRecordingEndpoint = */ false);
// Process the introduction message to fill in arguments.
MOZ_RELEASE_ASSERT(!gShmemPrefs);
MOZ_RELEASE_ASSERT(gParentArgv.empty());
gParentPid = gIntroductionMessage->mParentPid;
// Record/replay the introduction message itself so we get consistent args
// and prefs between recording and replaying.
{
IntroductionMessage* msg = IntroductionMessage::RecordReplay(*gIntroductionMessage);
gShmemPrefs = new char[msg->mPrefsLen];
memcpy(gShmemPrefs, msg->PrefsData(), msg->mPrefsLen);
gShmemPrefsLen = msg->mPrefsLen;
const char* pos = msg->ArgvString();
for (size_t i = 0; i < msg->mArgc; i++) {
gParentArgv.append(strdup(pos));
pos += strlen(pos) + 1;
}
free(msg);
}
free(gIntroductionMessage);
gIntroductionMessage = nullptr;
// Some argument manipulation code expects a null pointer at the end.
gParentArgv.append(nullptr);
MOZ_RELEASE_ASSERT(*aArgc >= 1);
MOZ_RELEASE_ASSERT(!strcmp((*aArgv)[0], gParentArgv[0]));
MOZ_RELEASE_ASSERT(gParentArgv.back() == nullptr);
*aArgc = gParentArgv.length() - 1; // For the trailing null.
*aArgv = gParentArgv.begin();
// If we failed to initialize then report it to the user.
if (gInitializationFailureMessage) {
ReportFatalError("%s", gInitializationFailureMessage);
Unreachable();
}
}
base::ProcessId
MiddlemanProcessId()
{
return gMiddlemanPid;
}
base::ProcessId
ParentProcessId()
{
return gParentPid;
}
void
ReportFatalError(const char* aFormat, ...)
{
va_list ap;
va_start(ap, aFormat);
char buf[2048];
VsprintfLiteral(buf, aFormat, ap);
va_end(ap);
// Construct a FatalErrorMessage on the stack, to avoid touching the heap.
char msgBuf[4096];
size_t header = sizeof(FatalErrorMessage);
size_t len = std::min(strlen(buf) + 1, sizeof(msgBuf) - header);
FatalErrorMessage* msg = new(msgBuf) FatalErrorMessage(header + len);
memcpy(&msgBuf[header], buf, len);
msgBuf[sizeof(msgBuf) - 1] = 0;
// Don't take the message lock when sending this, to avoid touching the heap.
gChannel->SendMessage(*msg);
DirectPrint("***** Fatal Record/Replay Error *****\n");
DirectPrint(buf);
DirectPrint("\n");
UnrecoverableSnapshotFailure();
// Block until we get a terminate message and die.
Thread::WaitForeverNoIdle();
}
void
NotifyFlushedRecording()
{
gChannel->SendMessage(RecordingFlushedMessage());
}
void
NotifyAlwaysMarkMajorCheckpoints()
{
if (IsActiveChild()) {
gChannel->SendMessage(AlwaysMarkMajorCheckpointsMessage());
}
}
///////////////////////////////////////////////////////////////////////////////
// Vsyncs
///////////////////////////////////////////////////////////////////////////////
static VsyncObserver* gVsyncObserver;
void
SetVsyncObserver(VsyncObserver* aObserver)
{
MOZ_RELEASE_ASSERT(!gVsyncObserver || !aObserver);
gVsyncObserver = aObserver;
}
void
NotifyVsyncObserver()
{
if (gVsyncObserver) {
gVsyncObserver->NotifyVsync(TimeStamp::Now());
}
}
///////////////////////////////////////////////////////////////////////////////
// Painting
///////////////////////////////////////////////////////////////////////////////
// Graphics memory is only written on the compositor thread and read on the
// main thread and by the middleman. The gPendingPaint flag is used to
// synchronize access, so that data is not read until the paint has completed.
static Maybe<PaintMessage> gPaintMessage;
static bool gPendingPaint;
// Target buffer for the draw target created by the child process widget.
static void* gDrawTargetBuffer;
static size_t gDrawTargetBufferSize;
already_AddRefed<gfx::DrawTarget>
DrawTargetForRemoteDrawing(LayoutDeviceIntSize aSize)
{
MOZ_RELEASE_ASSERT(!NS_IsMainThread());
gPaintMessage = Some(PaintMessage(aSize.width, aSize.height));
gfx::IntSize size(aSize.width, aSize.height);
size_t bufferSize = layers::ImageDataSerializer::ComputeRGBBufferSize(size, gSurfaceFormat);
MOZ_RELEASE_ASSERT(bufferSize <= parent::GraphicsMemorySize);
if (bufferSize != gDrawTargetBufferSize) {
free(gDrawTargetBuffer);
gDrawTargetBuffer = malloc(bufferSize);
gDrawTargetBufferSize = bufferSize;
}
size_t stride = layers::ImageDataSerializer::ComputeRGBStride(gSurfaceFormat, aSize.width);
RefPtr<gfx::DrawTarget> drawTarget =
gfx::Factory::CreateDrawTargetForData(gfx::BackendType::SKIA, (uint8_t*) gDrawTargetBuffer,
size, stride, gSurfaceFormat,
/* aUninitialized = */ true);
if (!drawTarget) {
MOZ_CRASH();
}
return drawTarget.forget();
}
void
NotifyPaintStart()
{
MOZ_RELEASE_ASSERT(NS_IsMainThread());
NewCheckpoint(/* aTemporary = */ false);
gPendingPaint = true;
}
void
WaitForPaintToComplete()
{
MOZ_RELEASE_ASSERT(NS_IsMainThread());
MonitorAutoLock lock(*gMonitor);
while (gPendingPaint) {
gMonitor->Wait();
}
if (IsActiveChild() && gPaintMessage.isSome()) {
memcpy(gGraphicsShmem, gDrawTargetBuffer, gDrawTargetBufferSize);
gChannel->SendMessage(gPaintMessage.ref());
}
}
void
NotifyPaintComplete()
{
MOZ_RELEASE_ASSERT(!NS_IsMainThread());
MonitorAutoLock lock(*gMonitor);
MOZ_RELEASE_ASSERT(gPendingPaint);
gPendingPaint = false;
gMonitor->Notify();
}
///////////////////////////////////////////////////////////////////////////////
// Checkpoint Messages
///////////////////////////////////////////////////////////////////////////////
// When recording, the time when the last HitCheckpoint message was sent.
static double gLastCheckpointTime;
// When recording and we are idle, the time when we became idle.
static double gIdleTimeStart;
void
BeginIdleTime()
{
MOZ_RELEASE_ASSERT(IsRecording() && NS_IsMainThread() && !gIdleTimeStart);
gIdleTimeStart = CurrentTime();
}
void
EndIdleTime()
{
MOZ_RELEASE_ASSERT(IsRecording() && NS_IsMainThread() && gIdleTimeStart);
// Erase the idle time from our measurements by advancing the last checkpoint
// time.
gLastCheckpointTime += CurrentTime() - gIdleTimeStart;
gIdleTimeStart = 0;
}
void
HitCheckpoint(size_t aId, bool aRecordingEndpoint)
{
MOZ_RELEASE_ASSERT(NS_IsMainThread());
double time = CurrentTime();
PauseMainThreadAndInvokeCallback([=]() {
double duration = 0;
if (aId > CheckpointId::First) {
duration = time - gLastCheckpointTime;
MOZ_RELEASE_ASSERT(duration > 0);
}
gChannel->SendMessage(HitCheckpointMessage(aId, aRecordingEndpoint, duration));
});
gLastCheckpointTime = time;
}
///////////////////////////////////////////////////////////////////////////////
// Debugger Messages
///////////////////////////////////////////////////////////////////////////////
void
RespondToRequest(const js::CharBuffer& aBuffer)
{
DebuggerResponseMessage* msg =
DebuggerResponseMessage::New(aBuffer.begin(), aBuffer.length());
gChannel->SendMessage(*msg);
free(msg);
}
void
HitBreakpoint(bool aRecordingEndpoint, const uint32_t* aBreakpoints, size_t aNumBreakpoints)
{
MOZ_RELEASE_ASSERT(NS_IsMainThread());
HitBreakpointMessage* msg =
HitBreakpointMessage::New(aRecordingEndpoint, aBreakpoints, aNumBreakpoints);
PauseMainThreadAndInvokeCallback([=]() {
gChannel->SendMessage(*msg);
free(msg);
});
}
} // namespace child
} // namespace recordreplay
} // namespace mozilla

Просмотреть файл

@ -0,0 +1,77 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this file,
* You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef mozilla_recordreplay_ChildIPC_h
#define mozilla_recordreplay_ChildIPC_h
#include "base/process.h"
#include "mozilla/gfx/2D.h"
#include "Units.h"
namespace mozilla {
class VsyncObserver;
namespace recordreplay {
namespace child {
// Naively replaying a child process execution will not perform any IPC. When
// the replaying process attempts to make system calls that communicate with
// the parent, function redirections are invoked that simply replay the values
// which those calls produced in the original recording.
//
// The replayed process needs to be able to communicate with the parent in some
// ways, however. IPDL messages need to be sent to the compositor in the parent
// to render graphics, and the parent needs to send messages to the client to
// control and debug the replay.
//
// This file manages the real IPC which occurs in a replaying process. New
// threads --- which did not existing while recording --- are spawned to manage
// IPC with the middleman process, and IPDL actors are created up front for use
// in communicating with the middleman using the PReplay protocol.
///////////////////////////////////////////////////////////////////////////////
// Public API
///////////////////////////////////////////////////////////////////////////////
// Initialize replaying IPC state. This is called once during process startup,
// and is a no-op if the process is not recording/replaying.
void InitRecordingOrReplayingProcess(int* aArgc, char*** aArgv);
// Get the contents of the prefs shmem as conveyed to the middleman process.
char* PrefsShmemContents(size_t aPrefsLen);
base::ProcessId MiddlemanProcessId();
base::ProcessId ParentProcessId();
void SetVsyncObserver(VsyncObserver* aObserver);
void NotifyVsyncObserver();
void NotifyPaint();
void NotifyPaintStart();
void NotifyPaintComplete();
void WaitForPaintToComplete();
already_AddRefed<gfx::DrawTarget> DrawTargetForRemoteDrawing(LayoutDeviceIntSize aSize);
// Notify the middleman that the recording was flushed.
void NotifyFlushedRecording();
// Notify the middleman about an AlwaysMarkMajorCheckpoints directive.
void NotifyAlwaysMarkMajorCheckpoints();
// Report a fatal error to the middleman process.
void ReportFatalError(const char* aFormat, ...);
// Mark a time span when the main thread is idle.
void BeginIdleTime();
void EndIdleTime();
} // namespace child
} // namespace recordreplay
} // namespace mozilla
#endif // mozilla_recordreplay_ChildIPC_h

Просмотреть файл

@ -0,0 +1,81 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this file,
* You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef mozilla_recordreplay_ChildInternal_h
#define mozilla_recordreplay_ChildInternal_h
#include "ChildIPC.h"
#include "JSControl.h"
#include "Monitor.h"
namespace mozilla {
namespace recordreplay {
// The navigation namespace has definitions for managing breakpoints and all
// other state that persists across rewinds, and for keeping track of the
// precise execution position of the child process. The middleman will send the
// child process Resume messages to travel forward and backward, but it is up
// to the child process to keep track of the rewinding and resuming necessary
// to find the next or previous point where a breakpoint or checkpoint is hit.
namespace navigation {
// Navigation state is initialized when the first checkpoint is reached.
bool IsInitialized();
// In a recording process, get the current execution point, aka the endpoint
// of the recording.
js::ExecutionPoint GetRecordingEndpoint();
// In a replaying process, set the recording endpoint. |index| is used to
// differentiate different endpoints that have been sequentially written to
// the recording file as it has been flushed.
void SetRecordingEndpoint(size_t aIndex, const js::ExecutionPoint& aEndpoint);
// Save temporary checkpoints at all opportunities during navigation.
void AlwaysSaveTemporaryCheckpoints();
// Process incoming requests from the middleman.
void DebuggerRequest(js::CharBuffer* aBuffer);
void SetBreakpoint(size_t aId, const js::BreakpointPosition& aPosition);
void Resume(bool aForward);
void RestoreCheckpoint(size_t aId);
// Attempt to diverge from the recording so that new recorded events cause
// the process to rewind. Returns false if the divergence failed: either we
// can't rewind, or already diverged here and then had an unhandled divergence.
bool MaybeDivergeFromRecording();
// Notify navigation that a position was hit.
void PositionHit(const js::BreakpointPosition& aPosition);
// Called when running forward, immediately before hitting a normal or
// temporary checkpoint.
void BeforeCheckpoint();
// Called immediately after hitting a normal or temporary checkpoint, either
// when running forward or immediately after rewinding.
void AfterCheckpoint(const CheckpointId& aCheckpoint);
} // namespace navigation
// IPC activity that can be triggered by navigation.
namespace child {
void RespondToRequest(const js::CharBuffer& aBuffer);
void HitCheckpoint(size_t aId, bool aRecordingEndpoint);
void HitBreakpoint(bool aRecordingEndpoint, const uint32_t* aBreakpoints, size_t aNumBreakpoints);
// Monitor used for various synchronization tasks.
extern Monitor* gMonitor;
} // namespace child
} // namespace recordreplay
} // namespace mozilla
#endif // mozilla_recordreplay_ChildInternal_h

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -0,0 +1,663 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this file,
* You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "ParentInternal.h"
#include "base/task.h"
#include "mozilla/dom/ContentChild.h"
#include "Thread.h"
namespace mozilla {
namespace recordreplay {
namespace parent {
// A saved introduction message for sending to all children.
static IntroductionMessage* gIntroductionMessage;
// How many channels have been constructed so far.
static size_t gNumChannels;
// Whether children might be debugged and should not be treated as hung.
static bool gChildrenAreDebugging;
// Whether we are allowed to restart crashed/hung child processes.
static bool gRestartEnabled;
/* static */ void
ChildProcessInfo::SetIntroductionMessage(IntroductionMessage* aMessage)
{
gIntroductionMessage = aMessage;
}
ChildProcessInfo::ChildProcessInfo(UniquePtr<ChildRole> aRole, bool aRecording)
: mChannel(nullptr)
, mRecording(aRecording)
, mRecoveryStage(RecoveryStage::None)
, mPaused(false)
, mPausedMessage(nullptr)
, mLastCheckpoint(CheckpointId::Invalid)
, mNumRecoveredMessages(0)
, mNumRestarts(0)
, mRole(std::move(aRole))
, mPauseNeeded(false)
{
MOZ_RELEASE_ASSERT(NS_IsMainThread());
static bool gFirst = false;
if (!gFirst) {
gFirst = true;
gChildrenAreDebugging = !!getenv("WAIT_AT_START");
gRestartEnabled = !getenv("NO_RESTARTS");
}
mRole->SetProcess(this);
LaunchSubprocess();
// Replaying processes always save the first checkpoint, if saving
// checkpoints is allowed. This is currently assumed by the rewinding
// mechanism in the replaying process, and would be nice to investigate
// removing.
if (!aRecording && CanRewind()) {
SendMessage(SetSaveCheckpointMessage(CheckpointId::First, true));
}
mRole->Initialize();
}
ChildProcessInfo::~ChildProcessInfo()
{
MOZ_RELEASE_ASSERT(NS_IsMainThread());
if (IsRecording()) {
SendMessage(TerminateMessage());
}
}
ChildProcessInfo::Disposition
ChildProcessInfo::GetDisposition()
{
// We can determine the disposition of the child by looking at the first
// resume message sent since the last time it reached a checkpoint.
for (Message* msg : mMessages) {
if (msg->mType == MessageType::Resume) {
const ResumeMessage& nmsg = static_cast<const ResumeMessage&>(*msg);
return nmsg.mForward ? AfterLastCheckpoint : BeforeLastCheckpoint;
}
}
return AtLastCheckpoint;
}
bool
ChildProcessInfo::IsPausedAtCheckpoint()
{
return IsPaused() && mPausedMessage->mType == MessageType::HitCheckpoint;
}
bool
ChildProcessInfo::IsPausedAtRecordingEndpoint()
{
if (!IsPaused()) {
return false;
}
if (mPausedMessage->mType == MessageType::HitCheckpoint) {
return static_cast<HitCheckpointMessage*>(mPausedMessage)->mRecordingEndpoint;
}
if (mPausedMessage->mType == MessageType::HitBreakpoint) {
return static_cast<HitBreakpointMessage*>(mPausedMessage)->mRecordingEndpoint;
}
return false;
}
bool
ChildProcessInfo::IsPausedAtMatchingBreakpoint(const BreakpointFilter& aFilter)
{
if (!IsPaused() || mPausedMessage->mType != MessageType::HitBreakpoint) {
return false;
}
HitBreakpointMessage* npaused = static_cast<HitBreakpointMessage*>(mPausedMessage);
for (size_t i = 0; i < npaused->NumBreakpoints(); i++) {
uint32_t breakpointId = npaused->Breakpoints()[i];
// Find the last time we sent a SetBreakpoint message to this process for
// this breakpoint ID.
SetBreakpointMessage* lastSet = nullptr;
for (Message* msg : mMessages) {
if (msg->mType == MessageType::SetBreakpoint) {
SetBreakpointMessage* nmsg = static_cast<SetBreakpointMessage*>(msg);
if (nmsg->mId == breakpointId) {
lastSet = nmsg;
}
}
}
MOZ_RELEASE_ASSERT(lastSet && lastSet->mPosition.IsValid());
if (aFilter(lastSet->mPosition.mKind)) {
return true;
}
}
return false;
}
void
ChildProcessInfo::AddMajorCheckpoint(size_t aId)
{
// Major checkpoints should be listed in order.
MOZ_RELEASE_ASSERT(mMajorCheckpoints.empty() || aId > mMajorCheckpoints.back());
mMajorCheckpoints.append(aId);
}
void
ChildProcessInfo::SetRole(UniquePtr<ChildRole> aRole)
{
MOZ_RELEASE_ASSERT(!IsRecovering());
PrintSpew("SetRole:%d %s\n", (int) GetId(), ChildRole::TypeString(aRole->GetType()));
mRole = std::move(aRole);
mRole->SetProcess(this);
mRole->Initialize();
}
void
ChildProcessInfo::OnIncomingMessage(size_t aChannelId, const Message& aMsg)
{
MOZ_RELEASE_ASSERT(NS_IsMainThread());
// Ignore messages from channels for subprocesses we terminated already.
if (aChannelId != mChannel->GetId()) {
return;
}
// Always handle fatal errors in the same way.
if (aMsg.mType == MessageType::FatalError) {
const FatalErrorMessage& nmsg = static_cast<const FatalErrorMessage&>(aMsg);
AttemptRestart(nmsg.Error());
return;
}
mLastMessageTime = TimeStamp::Now();
if (IsRecovering()) {
OnIncomingRecoveryMessage(aMsg);
return;
}
// Update paused state.
MOZ_RELEASE_ASSERT(!IsPaused());
switch (aMsg.mType) {
case MessageType::HitCheckpoint:
case MessageType::HitBreakpoint:
MOZ_RELEASE_ASSERT(!mPausedMessage);
mPausedMessage = aMsg.Clone();
MOZ_FALLTHROUGH;
case MessageType::DebuggerResponse:
case MessageType::RecordingFlushed:
MOZ_RELEASE_ASSERT(mPausedMessage);
mPaused = true;
break;
default:
break;
}
if (aMsg.mType == MessageType::HitCheckpoint) {
const HitCheckpointMessage& nmsg = static_cast<const HitCheckpointMessage&>(aMsg);
mLastCheckpoint = nmsg.mCheckpointId;
// All messages sent since the last checkpoint are now obsolete, except
// SetBreakpoint messages.
InfallibleVector<Message*> newMessages;
for (Message* msg : mMessages) {
if (msg->mType == MessageType::SetBreakpoint) {
// Look for an older SetBreakpoint on the same ID to overwrite.
bool found = false;
for (Message*& older : newMessages) {
if (static_cast<SetBreakpointMessage*>(msg)->mId ==
static_cast<SetBreakpointMessage*>(older)->mId) {
free(older);
older = msg;
found = true;
}
}
if (!found) {
newMessages.emplaceBack(msg);
}
} else {
free(msg);
}
}
mMessages = std::move(newMessages);
}
// The primordial HitCheckpoint messages is not forwarded to the role, as it
// has not been initialized yet.
if (aMsg.mType != MessageType::HitCheckpoint || mLastCheckpoint) {
mRole->OnIncomingMessage(aMsg);
}
}
void
ChildProcessInfo::SendMessage(const Message& aMsg)
{
MOZ_RELEASE_ASSERT(!IsRecovering());
MOZ_RELEASE_ASSERT(NS_IsMainThread());
// Update paused state.
MOZ_RELEASE_ASSERT(IsPaused() ||
aMsg.mType == MessageType::CreateCheckpoint ||
aMsg.mType == MessageType::Terminate);
switch (aMsg.mType) {
case MessageType::Resume:
case MessageType::RestoreCheckpoint:
free(mPausedMessage);
mPausedMessage = nullptr;
MOZ_FALLTHROUGH;
case MessageType::DebuggerRequest:
case MessageType::FlushRecording:
mPaused = false;
break;
default:
break;
}
// Keep track of messages which affect the child's behavior.
switch (aMsg.mType) {
case MessageType::Resume:
case MessageType::RestoreCheckpoint:
case MessageType::DebuggerRequest:
case MessageType::SetBreakpoint:
mMessages.emplaceBack(aMsg.Clone());
break;
default:
break;
}
// Keep track of the checkpoints the process will save.
if (aMsg.mType == MessageType::SetSaveCheckpoint) {
const SetSaveCheckpointMessage& nmsg = static_cast<const SetSaveCheckpointMessage&>(aMsg);
MOZ_RELEASE_ASSERT(nmsg.mCheckpoint > MostRecentCheckpoint());
VectorAddOrRemoveEntry(mShouldSaveCheckpoints, nmsg.mCheckpoint, nmsg.mSave);
}
SendMessageRaw(aMsg);
}
void
ChildProcessInfo::SendMessageRaw(const Message& aMsg)
{
MOZ_RELEASE_ASSERT(NS_IsMainThread());
mLastMessageTime = TimeStamp::Now();
mChannel->SendMessage(aMsg);
}
void
ChildProcessInfo::Recover(bool aPaused, Message* aPausedMessage, size_t aLastCheckpoint,
Message** aMessages, size_t aNumMessages)
{
MOZ_RELEASE_ASSERT(IsPaused());
SendMessageRaw(SetIsActiveMessage(false));
size_t mostRecentCheckpoint = MostRecentCheckpoint();
bool pausedAtCheckpoint = IsPausedAtCheckpoint();
// Clear out all messages that have been sent to this process.
for (Message* msg : mMessages) {
if (msg->mType == MessageType::SetBreakpoint) {
SetBreakpointMessage* nmsg = static_cast<SetBreakpointMessage*>(msg);
SendMessageRaw(SetBreakpointMessage(nmsg->mId, js::BreakpointPosition()));
}
free(msg);
}
mMessages.clear();
mPaused = aPaused;
mPausedMessage = aPausedMessage;
mLastCheckpoint = aLastCheckpoint;
for (size_t i = 0; i < aNumMessages; i++) {
mMessages.append(aMessages[i]->Clone());
}
mNumRecoveredMessages = 0;
if (mostRecentCheckpoint < mLastCheckpoint) {
mRecoveryStage = RecoveryStage::ReachingCheckpoint;
SendMessageRaw(ResumeMessage(/* aForward = */ true));
} else if (mostRecentCheckpoint > mLastCheckpoint || !pausedAtCheckpoint) {
mRecoveryStage = RecoveryStage::ReachingCheckpoint;
// Rewind to the last saved checkpoint at or prior to the target.
size_t targetCheckpoint = CheckpointId::Invalid;
for (size_t saved : mShouldSaveCheckpoints) {
if (saved <= mLastCheckpoint && saved > targetCheckpoint) {
targetCheckpoint = saved;
}
}
MOZ_RELEASE_ASSERT(targetCheckpoint != CheckpointId::Invalid);
SendMessageRaw(RestoreCheckpointMessage(targetCheckpoint));
} else {
mRecoveryStage = RecoveryStage::PlayingMessages;
SendNextRecoveryMessage();
}
WaitUntil([=]() { return !IsRecovering(); });
}
void
ChildProcessInfo::Recover(ChildProcessInfo* aTargetProcess)
{
MOZ_RELEASE_ASSERT(aTargetProcess->IsPaused());
Recover(true, aTargetProcess->mPausedMessage->Clone(),
aTargetProcess->mLastCheckpoint,
aTargetProcess->mMessages.begin(), aTargetProcess->mMessages.length());
}
void
ChildProcessInfo::RecoverToCheckpoint(size_t aCheckpoint)
{
HitCheckpointMessage pausedMessage(aCheckpoint,
/* aRecordingEndpoint = */ false,
/* aDuration = */ 0);
Recover(true, pausedMessage.Clone(), aCheckpoint, nullptr, 0);
}
void
ChildProcessInfo::OnIncomingRecoveryMessage(const Message& aMsg)
{
switch (aMsg.mType) {
case MessageType::HitCheckpoint: {
MOZ_RELEASE_ASSERT(mRecoveryStage == RecoveryStage::ReachingCheckpoint);
const HitCheckpointMessage& nmsg = static_cast<const HitCheckpointMessage&>(aMsg);
if (nmsg.mCheckpointId < mLastCheckpoint) {
SendMessageRaw(ResumeMessage(/* aForward = */ true));
} else {
MOZ_RELEASE_ASSERT(nmsg.mCheckpointId == mLastCheckpoint);
mRecoveryStage = RecoveryStage::PlayingMessages;
SendNextRecoveryMessage();
}
break;
}
case MessageType::HitBreakpoint:
case MessageType::DebuggerResponse:
SendNextRecoveryMessage();
break;
default:
MOZ_CRASH("Unexpected message during recovery");
}
}
void
ChildProcessInfo::SendNextRecoveryMessage()
{
MOZ_RELEASE_ASSERT(mRecoveryStage == RecoveryStage::PlayingMessages);
// Keep sending messages to the child as long as it stays paused.
Message* msg;
do {
// Check if we have recovered to the desired paused state.
if (mNumRecoveredMessages == mMessages.length()) {
MOZ_RELEASE_ASSERT(IsPaused());
mRecoveryStage = RecoveryStage::None;
return;
}
msg = mMessages[mNumRecoveredMessages++];
SendMessageRaw(*msg);
// If we just sent a SetBreakpoint message then the child process is still
// paused, so keep sending more messages.
} while (msg->mType == MessageType::SetBreakpoint);
// If we have sent all messages and are in an unpaused state, we are done
// recovering.
if (mNumRecoveredMessages == mMessages.length() && !IsPaused()) {
mRecoveryStage = RecoveryStage::None;
}
}
///////////////////////////////////////////////////////////////////////////////
// Subprocess Management
///////////////////////////////////////////////////////////////////////////////
ipc::GeckoChildProcessHost* gRecordingProcess;
void
GetArgumentsForChildProcess(base::ProcessId aMiddlemanPid, uint32_t aChannelId,
const char* aRecordingFile, bool aRecording,
std::vector<std::string>& aExtraArgs)
{
MOZ_RELEASE_ASSERT(IsMiddleman() || XRE_IsParentProcess());
aExtraArgs.push_back(gMiddlemanPidOption);
aExtraArgs.push_back(nsPrintfCString("%d", aMiddlemanPid).get());
aExtraArgs.push_back(gChannelIDOption);
aExtraArgs.push_back(nsPrintfCString("%d", (int) aChannelId).get());
aExtraArgs.push_back(gProcessKindOption);
aExtraArgs.push_back(nsPrintfCString("%d", aRecording
? (int) ProcessKind::Recording
: (int) ProcessKind::Replaying).get());
aExtraArgs.push_back(gRecordingFileOption);
aExtraArgs.push_back(aRecordingFile);
}
void
ChildProcessInfo::LaunchSubprocess()
{
size_t channelId = gNumChannels++;
// Create a new channel every time we launch a new subprocess, without
// deleting or tearing down the old one's state. This is pretty lame and it
// would be nice if we could do something better here, especially because
// with restarts we could create any number of channels over time.
mChannel = new Channel(channelId, IsRecording(), [=](Message* aMsg) {
ReceiveChildMessageOnMainThread(channelId, aMsg);
});
if (IsRecording()) {
std::vector<std::string> extraArgs;
GetArgumentsForChildProcess(base::GetCurrentProcId(), channelId,
gRecordingFilename, /* aRecording = */ true, extraArgs);
MOZ_RELEASE_ASSERT(!gRecordingProcess);
gRecordingProcess = new ipc::GeckoChildProcessHost(GeckoProcessType_Content);
if (!gRecordingProcess->LaunchAndWaitForProcessHandle(extraArgs)) {
MOZ_CRASH("ChildProcessInfo::LaunchSubprocess");
}
} else {
dom::ContentChild::GetSingleton()->SendCreateReplayingProcess(channelId);
}
mLastMessageTime = TimeStamp::Now();
SendGraphicsMemoryToChild();
// The child should send us a HitCheckpoint with an invalid ID to pause.
WaitUntilPaused();
MOZ_RELEASE_ASSERT(gIntroductionMessage);
SendMessage(*gIntroductionMessage);
}
///////////////////////////////////////////////////////////////////////////////
// Recovering Crashed / Hung Children
///////////////////////////////////////////////////////////////////////////////
// The number of times we will restart a process before giving up.
static const size_t MaxRestarts = 5;
bool
ChildProcessInfo::CanRestart()
{
return gRestartEnabled
&& !IsRecording()
&& !IsPaused()
&& !IsRecovering()
&& mNumRestarts < MaxRestarts;
}
void
ChildProcessInfo::AttemptRestart(const char* aWhy)
{
MOZ_RELEASE_ASSERT(NS_IsMainThread());
PrintSpew("Warning: Child process died [%d]: %s\n", (int) GetId(), aWhy);
if (!CanRestart()) {
nsAutoCString why(aWhy);
dom::ContentChild::GetSingleton()->SendRecordReplayFatalError(why);
Thread::WaitForeverNoIdle();
}
mNumRestarts++;
dom::ContentChild::GetSingleton()->SendTerminateReplayingProcess(mChannel->GetId());
bool newPaused = mPaused;
Message* newPausedMessage = mPausedMessage;
mPaused = false;
mPausedMessage = nullptr;
size_t newLastCheckpoint = mLastCheckpoint;
mLastCheckpoint = CheckpointId::Invalid;
InfallibleVector<Message*> newMessages;
newMessages.append(mMessages.begin(), mMessages.length());
mMessages.clear();
InfallibleVector<size_t> newShouldSaveCheckpoints;
newShouldSaveCheckpoints.append(mShouldSaveCheckpoints.begin(), mShouldSaveCheckpoints.length());
mShouldSaveCheckpoints.clear();
LaunchSubprocess();
// Disallow child processes from intentionally crashing after restarting.
SendMessage(SetAllowIntentionalCrashesMessage(false));
for (size_t checkpoint : newShouldSaveCheckpoints) {
SendMessage(SetSaveCheckpointMessage(checkpoint, true));
}
Recover(newPaused, newPausedMessage, newLastCheckpoint,
newMessages.begin(), newMessages.length());
}
///////////////////////////////////////////////////////////////////////////////
// Handling Channel Messages
///////////////////////////////////////////////////////////////////////////////
// When messages are received from child processes, we want their handler to
// execute on the main thread. The main thread might be blocked in WaitUntil,
// so runnables associated with child processes have special handling.
// All messages received on a channel thread which the main thread has not
// processed yet. This is protected by gMonitor.
struct PendingMessage
{
ChildProcessInfo* mProcess;
size_t mChannelId;
Message* mMsg;
};
static StaticInfallibleVector<PendingMessage> gPendingMessages;
// Whether there is a pending task on the main thread's message loop to handle
// all pending messages.
static bool gHasPendingMessageRunnable;
// Process a pending message from aProcess (or any process if aProcess is null)
// and return whether such a message was found. This must be called on the main
// thread with gMonitor held.
/* static */ bool
ChildProcessInfo::MaybeProcessPendingMessage(ChildProcessInfo* aProcess)
{
MOZ_RELEASE_ASSERT(NS_IsMainThread());
for (size_t i = 0; i < gPendingMessages.length(); i++) {
if (!aProcess || gPendingMessages[i].mProcess == aProcess) {
PendingMessage copy = gPendingMessages[i];
gPendingMessages.erase(&gPendingMessages[i]);
MonitorAutoUnlock unlock(*gMonitor);
copy.mProcess->OnIncomingMessage(copy.mChannelId, *copy.mMsg);
free(copy.mMsg);
return true;
}
}
return false;
}
// How many seconds to wait without hearing from an unpaused child before
// considering that child to be hung.
static const size_t HangSeconds = 5;
void
ChildProcessInfo::WaitUntil(const std::function<bool()>& aCallback)
{
MOZ_RELEASE_ASSERT(NS_IsMainThread());
while (!aCallback()) {
MonitorAutoLock lock(*gMonitor);
if (!MaybeProcessPendingMessage(this)) {
if (gChildrenAreDebugging) {
// Don't watch for hangs when children are being debugged.
gMonitor->Wait();
} else {
TimeStamp deadline = mLastMessageTime + TimeDuration::FromSeconds(HangSeconds);
if (TimeStamp::Now() >= deadline) {
MonitorAutoUnlock unlock(*gMonitor);
AttemptRestart("Child process non-responsive");
}
gMonitor->WaitUntil(deadline);
}
}
}
}
// Runnable created on the main thread to handle any tasks sent by the replay
// message loop thread which were not handled while the main thread was blocked.
/* static */ void
ChildProcessInfo::MaybeProcessPendingMessageRunnable()
{
MOZ_RELEASE_ASSERT(NS_IsMainThread());
MonitorAutoLock lock(*gMonitor);
MOZ_RELEASE_ASSERT(gHasPendingMessageRunnable);
gHasPendingMessageRunnable = false;
while (MaybeProcessPendingMessage(nullptr)) {}
}
// Execute a task that processes a message received from the child. This is
// called on a channel thread, and the function executes asynchronously on
// the main thread.
void
ChildProcessInfo::ReceiveChildMessageOnMainThread(size_t aChannelId, Message* aMsg)
{
MOZ_RELEASE_ASSERT(!NS_IsMainThread());
MonitorAutoLock lock(*gMonitor);
PendingMessage pending;
pending.mProcess = this;
pending.mChannelId = aChannelId;
pending.mMsg = aMsg;
gPendingMessages.append(pending);
// Notify the main thread, if it is waiting in WaitUntil.
gMonitor->NotifyAll();
// Make sure there is a task on the main thread's message loop that can
// process this task if necessary.
if (!gHasPendingMessageRunnable) {
gHasPendingMessageRunnable = true;
MainThreadMessageLoop()->PostTask(NewRunnableFunction("MaybeProcessPendingMessageRunnable",
MaybeProcessPendingMessageRunnable));
}
}
} // namespace parent
} // namespace recordreplay
} // namespace mozilla

Просмотреть файл

@ -0,0 +1,169 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this file,
* You can obtain one at http://mozilla.org/MPL/2.0/. */
// This file provides implementations for the public IPC API on platforms where
// recording/replaying is not enabled.
#include "ChildIPC.h"
#include "ParentIPC.h"
namespace mozilla {
namespace recordreplay {
namespace child {
void
InitRecordingOrReplayingProcess(int* aArgc, char*** aArgv)
{
}
char*
PrefsShmemContents(size_t aPrefsLen)
{
MOZ_CRASH();
}
base::ProcessId
MiddlemanProcessId()
{
MOZ_CRASH();
}
base::ProcessId
ParentProcessId()
{
MOZ_CRASH();
}
void
SetVsyncObserver(VsyncObserver* aObserver)
{
MOZ_CRASH();
}
void
NotifyVsyncObserver()
{
MOZ_CRASH();
}
void
NotifyPaint()
{
MOZ_CRASH();
}
void
NotifyPaintStart()
{
MOZ_CRASH();
}
void
NotifyPaintComplete()
{
MOZ_CRASH();
}
void
WaitForPaintToComplete()
{
MOZ_CRASH();
}
already_AddRefed<gfx::DrawTarget>
DrawTargetForRemoteDrawing(LayoutDeviceIntSize aSize)
{
MOZ_CRASH();
}
void
NotifyFlushedRecording()
{
MOZ_CRASH();
}
void
NotifyAlwaysMarkMajorCheckpoints()
{
MOZ_CRASH();
}
void
ReportFatalError(const char* aFormat, ...)
{
MOZ_CRASH();
}
void
BeginIdleTime()
{
MOZ_CRASH();
}
void
EndIdleTime()
{
MOZ_CRASH();
}
} // namespace child
namespace parent {
void
InitializeUIProcess(int aArgc, char** aArgv)
{
}
const char*
SaveAllRecordingsDirectory()
{
return nullptr;
}
void
SaveRecording(const ipc::FileDescriptor& aFile)
{
MOZ_CRASH();
}
ipc::MessageChannel*
ChannelToUIProcess()
{
MOZ_CRASH();
}
void
InitializeMiddleman(int aArgc, char* aArgv[], base::ProcessId aParentPid)
{
MOZ_CRASH();
}
void
NotePrefsShmemContents(char* aPrefs, size_t aPrefsLen)
{
MOZ_CRASH();
}
void
OpenChannel(base::ProcessId aMiddlemanPid, uint32_t aChannelId, ipc::FileDescriptor* aConnection)
{
MOZ_CRASH();
}
void
GetArgumentsForChildProcess(base::ProcessId aMiddlemanPid, uint32_t aChannelId,
const char* aRecordingFile, bool aRecording,
std::vector<std::string>& aExtraArgs)
{
MOZ_CRASH();
}
} // namespace parent
} // namespace recordreplay
} // namespace mozilla

Просмотреть файл

@ -0,0 +1,726 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
* vim: set ts=8 sts=4 et sw=4 tw=99:
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "JSControl.h"
#include "js/Conversions.h"
#include "ChildInternal.h"
#include "ParentInternal.h"
#include "xpcprivate.h"
using namespace JS;
namespace mozilla {
namespace recordreplay {
namespace js {
// Callback for filling CharBuffers when converting objects to JSON.
static bool
FillCharBufferCallback(const char16_t* buf, uint32_t len, void* data)
{
CharBuffer* buffer = (CharBuffer*) data;
MOZ_RELEASE_ASSERT(buffer->length() == 0);
buffer->append(buf, len);
return true;
}
static JSObject*
NonNullObject(JSContext* aCx, HandleValue aValue)
{
if (!aValue.isObject()) {
JS_ReportErrorASCII(aCx, "Expected object");
return nullptr;
}
return &aValue.toObject();
}
///////////////////////////////////////////////////////////////////////////////
// BreakpointPosition Conversion
///////////////////////////////////////////////////////////////////////////////
// Names of properties which JS code uses to specify the contents of a BreakpointPosition.
static const char gKindProperty[] = "kind";
static const char gScriptProperty[] = "script";
static const char gOffsetProperty[] = "offset";
static const char gFrameIndexProperty[] = "frameIndex";
JSObject*
BreakpointPosition::Encode(JSContext* aCx) const
{
RootedString kindString(aCx, JS_NewStringCopyZ(aCx, KindString()));
RootedObject obj(aCx, JS_NewObject(aCx, nullptr));
if (!kindString || !obj ||
!JS_DefineProperty(aCx, obj, gKindProperty, kindString, JSPROP_ENUMERATE) ||
(mScript != BreakpointPosition::EMPTY_SCRIPT &&
!JS_DefineProperty(aCx, obj, gScriptProperty, mScript, JSPROP_ENUMERATE)) ||
(mOffset != BreakpointPosition::EMPTY_OFFSET &&
!JS_DefineProperty(aCx, obj, gOffsetProperty, mOffset, JSPROP_ENUMERATE)) ||
(mFrameIndex != BreakpointPosition::EMPTY_FRAME_INDEX &&
!JS_DefineProperty(aCx, obj, gFrameIndexProperty, mFrameIndex, JSPROP_ENUMERATE)))
{
return nullptr;
}
return obj;
}
static bool
MaybeGetNumberProperty(JSContext* aCx, HandleObject aObject, const char* aProperty, uint32_t* aResult)
{
RootedValue v(aCx);
if (!JS_GetProperty(aCx, aObject, aProperty, &v)) {
return false;
}
if (v.isNumber()) {
*aResult = (size_t) v.toNumber();
}
return true;
}
bool
BreakpointPosition::Decode(JSContext* aCx, HandleObject aObject)
{
RootedValue v(aCx);
if (!JS_GetProperty(aCx, aObject, gKindProperty, &v)) {
return false;
}
RootedString str(aCx, ToString(aCx, v));
for (size_t i = BreakpointPosition::Invalid + 1; i < BreakpointPosition::sKindCount; i++) {
BreakpointPosition::Kind kind = (BreakpointPosition::Kind) i;
bool match;
if (!JS_StringEqualsAscii(aCx, str, BreakpointPosition::StaticKindString(kind), &match))
return false;
if (match) {
mKind = kind;
break;
}
}
if (mKind == BreakpointPosition::Invalid) {
JS_ReportErrorASCII(aCx, "Could not decode breakpoint position kind");
return false;
}
if (!MaybeGetNumberProperty(aCx, aObject, gScriptProperty, &mScript) ||
!MaybeGetNumberProperty(aCx, aObject, gOffsetProperty, &mOffset) ||
!MaybeGetNumberProperty(aCx, aObject, gFrameIndexProperty, &mFrameIndex))
{
return false;
}
return true;
}
///////////////////////////////////////////////////////////////////////////////
// Middleman Methods
///////////////////////////////////////////////////////////////////////////////
// Keep track of all replay debuggers in existence, so that they can all be
// invalidated when the process is unpaused.
static StaticInfallibleVector<PersistentRootedObject*> gReplayDebuggers;
static bool
Middleman_RegisterReplayDebugger(JSContext* aCx, unsigned aArgc, Value* aVp)
{
CallArgs args = CallArgsFromVp(aArgc, aVp);
RootedObject obj(aCx, NonNullObject(aCx, args.get(0)));
if (!obj) {
return false;
}
PersistentRootedObject* root = new PersistentRootedObject(aCx);
*root = obj;
gReplayDebuggers.append(root);
args.rval().setUndefined();
return true;
}
static bool
InvalidateReplayDebuggersAfterUnpause(JSContext* aCx)
{
RootedValue rval(aCx);
for (auto root : gReplayDebuggers) {
JSAutoRealm ac(aCx, *root);
if (!JS_CallFunctionName(aCx, *root, "invalidateAfterUnpause",
HandleValueArray::empty(), &rval))
{
return false;
}
}
return true;
}
static bool
Middleman_CanRewind(JSContext* aCx, unsigned aArgc, Value* aVp)
{
CallArgs args = CallArgsFromVp(aArgc, aVp);
args.rval().setBoolean(parent::CanRewind());
return true;
}
static bool
Middleman_Resume(JSContext* aCx, unsigned aArgc, Value* aVp)
{
CallArgs args = CallArgsFromVp(aArgc, aVp);
bool forward = ToBoolean(args.get(0));
if (!InvalidateReplayDebuggersAfterUnpause(aCx)) {
return false;
}
parent::Resume(forward);
args.rval().setUndefined();
return true;
}
static bool
Middleman_Pause(JSContext* aCx, unsigned aArgc, Value* aVp)
{
CallArgs args = CallArgsFromVp(aArgc, aVp);
parent::Pause();
args.rval().setUndefined();
return true;
}
static bool
Middleman_SendRequest(JSContext* aCx, unsigned aArgc, Value* aVp)
{
CallArgs args = CallArgsFromVp(aArgc, aVp);
RootedObject requestObject(aCx, NonNullObject(aCx, args.get(0)));
if (!requestObject) {
return false;
}
CharBuffer requestBuffer;
if (!ToJSONMaybeSafely(aCx, requestObject, FillCharBufferCallback, &requestBuffer)) {
return false;
}
CharBuffer responseBuffer;
parent::SendRequest(requestBuffer, &responseBuffer);
return JS_ParseJSON(aCx, responseBuffer.begin(), responseBuffer.length(), args.rval());
}
struct InstalledBreakpoint
{
PersistentRootedObject mHandler;
BreakpointPosition mPosition;
InstalledBreakpoint(JSContext* aCx, JSObject* aHandler, const BreakpointPosition& aPosition)
: mHandler(aCx, aHandler), mPosition(aPosition)
{}
};
static StaticInfallibleVector<InstalledBreakpoint*> gBreakpoints;
static bool
Middleman_SetBreakpoint(JSContext* aCx, unsigned aArgc, Value* aVp)
{
CallArgs args = CallArgsFromVp(aArgc, aVp);
RootedObject handler(aCx, NonNullObject(aCx, args.get(0)));
RootedObject positionObject(aCx, NonNullObject(aCx, args.get(1)));
if (!handler || !positionObject) {
return false;
}
BreakpointPosition position;
if (!position.Decode(aCx, positionObject)) {
return false;
}
size_t breakpointId;
for (breakpointId = 0; breakpointId < gBreakpoints.length(); breakpointId++) {
if (!gBreakpoints[breakpointId]) {
break;
}
}
if (breakpointId == gBreakpoints.length()) {
gBreakpoints.append(nullptr);
}
gBreakpoints[breakpointId] = new InstalledBreakpoint(aCx, handler, position);
parent::SetBreakpoint(breakpointId, position);
args.rval().setInt32(breakpointId);
return true;
}
bool
HitBreakpoint(JSContext* aCx, size_t aId)
{
InstalledBreakpoint* breakpoint = gBreakpoints[aId];
MOZ_RELEASE_ASSERT(breakpoint);
JSAutoRealm ac(aCx, breakpoint->mHandler);
RootedValue handlerValue(aCx, ObjectValue(*breakpoint->mHandler));
RootedValue rval(aCx);
return JS_CallFunctionValue(aCx, nullptr, handlerValue,
HandleValueArray::empty(), &rval)
// The replaying process will resume after this hook returns, if it
// hasn't already been explicitly resumed.
&& InvalidateReplayDebuggersAfterUnpause(aCx);
}
/* static */ bool
Middleman_ClearBreakpoint(JSContext* aCx, unsigned aArgc, Value* aVp)
{
CallArgs args = CallArgsFromVp(aArgc, aVp);
if (!args.get(0).isNumber()) {
JS_ReportErrorASCII(aCx, "Bad breakpoint ID");
return false;
}
size_t breakpointId = (size_t) args.get(0).toNumber();
if (breakpointId >= gBreakpoints.length() || !gBreakpoints[breakpointId]) {
JS_ReportErrorASCII(aCx, "Bad breakpoint ID");
return false;
}
delete gBreakpoints[breakpointId];
gBreakpoints[breakpointId] = nullptr;
parent::SetBreakpoint(breakpointId, BreakpointPosition());
args.rval().setUndefined();
return true;
}
///////////////////////////////////////////////////////////////////////////////
// Devtools Sandbox
///////////////////////////////////////////////////////////////////////////////
static PersistentRootedObject* gDevtoolsSandbox;
// URL of the root script that runs when recording/replaying.
#define ReplayScriptURL "resource://devtools/server/actors/replay/replay.js"
void
SetupDevtoolsSandbox()
{
MOZ_RELEASE_ASSERT(!gDevtoolsSandbox);
dom::AutoJSAPI jsapi;
if (!jsapi.Init(xpc::PrivilegedJunkScope())) {
MOZ_CRASH("SetupDevtoolsSandbox");
}
JSContext* cx = jsapi.cx();
xpc::SandboxOptions options;
options.sandboxName.AssignLiteral("Record/Replay Devtools Sandbox");
options.invisibleToDebugger = true;
RootedValue v(cx);
nsresult rv = CreateSandboxObject(cx, &v, nsXPConnect::SystemPrincipal(), options);
MOZ_RELEASE_ASSERT(NS_SUCCEEDED(rv));
gDevtoolsSandbox = new PersistentRootedObject(cx);
*gDevtoolsSandbox = ::js::UncheckedUnwrap(&v.toObject());
JSAutoRealm ac(cx, *gDevtoolsSandbox);
ErrorResult er;
dom::GlobalObject global(cx, *gDevtoolsSandbox);
RootedObject obj(cx);
dom::ChromeUtils::Import(global, NS_LITERAL_STRING(ReplayScriptURL),
dom::Optional<HandleObject>(), &obj, er);
MOZ_RELEASE_ASSERT(!er.Failed());
}
extern "C" {
MOZ_EXPORT bool
RecordReplayInterface_IsInternalScript(const char* aURL)
{
return !strcmp(aURL, ReplayScriptURL);
}
} // extern "C"
#undef ReplayScriptURL
void
ProcessRequest(const char16_t* aRequest, size_t aRequestLength, CharBuffer* aResponse)
{
AutoDisallowThreadEvents disallow;
AutoSafeJSContext cx;
JSAutoRealm ac(cx, *gDevtoolsSandbox);
RootedValue requestValue(cx);
if (!JS_ParseJSON(cx, aRequest, aRequestLength, &requestValue)) {
MOZ_CRASH("ProcessRequest: ParseJSON failed");
}
RootedValue responseValue(cx);
if (!JS_CallFunctionName(cx, *gDevtoolsSandbox, "ProcessRequest",
HandleValueArray(requestValue), &responseValue)) {
MOZ_CRASH("ProcessRequest: Handler failed");
}
// Processing the request may have called into MaybeDivergeFromRecording.
// Now that we've finished processing it, don't tolerate future events that
// would otherwise cause us to rewind to the last checkpoint.
DisallowUnhandledDivergeFromRecording();
if (!responseValue.isObject()) {
MOZ_CRASH("ProcessRequest: Response must be an object");
}
RootedObject responseObject(cx, &responseValue.toObject());
if (!ToJSONMaybeSafely(cx, responseObject, FillCharBufferCallback, aResponse)) {
MOZ_CRASH("ProcessRequest: ToJSONMaybeSafely failed");
}
}
void
EnsurePositionHandler(const BreakpointPosition& aPosition)
{
AutoDisallowThreadEvents disallow;
AutoSafeJSContext cx;
JSAutoRealm ac(cx, *gDevtoolsSandbox);
RootedObject obj(cx, aPosition.Encode(cx));
if (!obj) {
MOZ_CRASH("EnsurePositionHandler");
}
RootedValue objValue(cx, ObjectValue(*obj));
RootedValue rval(cx);
if (!JS_CallFunctionName(cx, *gDevtoolsSandbox, "EnsurePositionHandler",
HandleValueArray(objValue), &rval)) {
MOZ_CRASH("EnsurePositionHandler");
}
}
void
ClearPositionHandlers()
{
AutoDisallowThreadEvents disallow;
AutoSafeJSContext cx;
JSAutoRealm ac(cx, *gDevtoolsSandbox);
RootedValue rval(cx);
if (!JS_CallFunctionName(cx, *gDevtoolsSandbox, "ClearPositionHandlers",
HandleValueArray::empty(), &rval)) {
MOZ_CRASH("ClearPositionHandlers");
}
}
void
ClearPausedState()
{
AutoDisallowThreadEvents disallow;
AutoSafeJSContext cx;
JSAutoRealm ac(cx, *gDevtoolsSandbox);
RootedValue rval(cx);
if (!JS_CallFunctionName(cx, *gDevtoolsSandbox, "ClearPausedState",
HandleValueArray::empty(), &rval)) {
MOZ_CRASH("ClearPausedState");
}
}
Maybe<BreakpointPosition>
GetEntryPosition(const BreakpointPosition& aPosition)
{
AutoDisallowThreadEvents disallow;
AutoSafeJSContext cx;
JSAutoRealm ac(cx, *gDevtoolsSandbox);
RootedObject positionObject(cx, aPosition.Encode(cx));
if (!positionObject) {
MOZ_CRASH("GetEntryPosition");
}
RootedValue rval(cx);
RootedValue positionValue(cx, ObjectValue(*positionObject));
if (!JS_CallFunctionName(cx, *gDevtoolsSandbox, "GetEntryPosition",
HandleValueArray(positionValue), &rval)) {
MOZ_CRASH("GetEntryPosition");
}
if (!rval.isObject()) {
return Nothing();
}
RootedObject rvalObject(cx, &rval.toObject());
BreakpointPosition entryPosition;
if (!entryPosition.Decode(cx, rvalObject)) {
MOZ_CRASH("GetEntryPosition");
}
return Some(entryPosition);
}
///////////////////////////////////////////////////////////////////////////////
// Replaying process content
///////////////////////////////////////////////////////////////////////////////
struct ContentInfo
{
const void* mToken;
char* mURL;
char* mContentType;
InfallibleVector<char16_t> mContent;
ContentInfo(const void* aToken, const char* aURL, const char* aContentType)
: mToken(aToken),
mURL(strdup(aURL)),
mContentType(strdup(aContentType))
{}
ContentInfo(ContentInfo&& aOther)
: mToken(aOther.mToken),
mURL(aOther.mURL),
mContentType(aOther.mContentType),
mContent(std::move(aOther.mContent))
{
aOther.mURL = nullptr;
aOther.mContentType = nullptr;
}
~ContentInfo()
{
free(mURL);
free(mContentType);
}
};
// All content that has been parsed so far. Protected by child::gMonitor.
static StaticInfallibleVector<ContentInfo> gContent;
extern "C" {
MOZ_EXPORT void
RecordReplayInterface_BeginContentParse(const void* aToken,
const char* aURL, const char* aContentType)
{
MOZ_RELEASE_ASSERT(IsRecordingOrReplaying());
MOZ_RELEASE_ASSERT(aToken);
RecordReplayAssert("BeginContentParse %s", aURL);
MonitorAutoLock lock(*child::gMonitor);
for (ContentInfo& info : gContent) {
MOZ_RELEASE_ASSERT(info.mToken != aToken);
}
gContent.emplaceBack(aToken, aURL, aContentType);
}
MOZ_EXPORT void
RecordReplayInterface_AddContentParseData(const void* aToken,
const char16_t* aBuffer, size_t aLength)
{
MOZ_RELEASE_ASSERT(IsRecordingOrReplaying());
MOZ_RELEASE_ASSERT(aToken);
RecordReplayAssert("AddContentParseDataForRecordReplay %d", (int) aLength);
MonitorAutoLock lock(*child::gMonitor);
for (ContentInfo& info : gContent) {
if (info.mToken == aToken) {
info.mContent.append(aBuffer, aLength);
return;
}
}
MOZ_CRASH("Unknown content parse token");
}
MOZ_EXPORT void
RecordReplayInterface_EndContentParse(const void* aToken)
{
MOZ_RELEASE_ASSERT(IsRecordingOrReplaying());
MOZ_RELEASE_ASSERT(aToken);
MonitorAutoLock lock(*child::gMonitor);
for (ContentInfo& info : gContent) {
if (info.mToken == aToken) {
info.mToken = nullptr;
return;
}
}
MOZ_CRASH("Unknown content parse token");
}
} // extern "C"
static bool
FetchContent(JSContext* aCx, HandleString aURL,
MutableHandleString aContentType, MutableHandleString aContent)
{
MonitorAutoLock lock(*child::gMonitor);
for (ContentInfo& info : gContent) {
if (JS_FlatStringEqualsAscii(JS_ASSERT_STRING_IS_FLAT(aURL), info.mURL)) {
aContentType.set(JS_NewStringCopyZ(aCx, info.mContentType));
aContent.set(JS_NewUCStringCopyN(aCx, (const char16_t*) info.mContent.begin(),
info.mContent.length()));
return aContentType && aContent;
}
}
aContentType.set(JS_NewStringCopyZ(aCx, "text/plain"));
aContent.set(JS_NewStringCopyZ(aCx, "Could not find record/replay content"));
return aContentType && aContent;
}
///////////////////////////////////////////////////////////////////////////////
// Recording/Replaying Methods
///////////////////////////////////////////////////////////////////////////////
static bool
RecordReplay_AreThreadEventsDisallowed(JSContext* aCx, unsigned aArgc, Value* aVp)
{
CallArgs args = CallArgsFromVp(aArgc, aVp);
args.rval().setBoolean(AreThreadEventsDisallowed());
return true;
}
static bool
RecordReplay_MaybeDivergeFromRecording(JSContext* aCx, unsigned aArgc, Value* aVp)
{
CallArgs args = CallArgsFromVp(aArgc, aVp);
args.rval().setBoolean(navigation::MaybeDivergeFromRecording());
return true;
}
static bool
RecordReplay_AdvanceProgressCounter(JSContext* aCx, unsigned aArgc, Value* aVp)
{
CallArgs args = CallArgsFromVp(aArgc, aVp);
AdvanceExecutionProgressCounter();
args.rval().setUndefined();
return true;
}
static bool
RecordReplay_PositionHit(JSContext* aCx, unsigned aArgc, Value* aVp)
{
CallArgs args = CallArgsFromVp(aArgc, aVp);
RootedObject obj(aCx, NonNullObject(aCx, args.get(0)));
if (!obj) {
return false;
}
BreakpointPosition position;
if (!position.Decode(aCx, obj)) {
return false;
}
navigation::PositionHit(position);
args.rval().setUndefined();
return true;
}
static bool
RecordReplay_GetContent(JSContext* aCx, unsigned aArgc, Value* aVp)
{
CallArgs args = CallArgsFromVp(aArgc, aVp);
RootedString url(aCx, ToString(aCx, args.get(0)));
RootedString contentType(aCx), content(aCx);
if (!FetchContent(aCx, url, &contentType, &content)) {
return false;
}
RootedObject obj(aCx, JS_NewObject(aCx, nullptr));
if (!obj ||
!JS_DefineProperty(aCx, obj, "contentType", contentType, JSPROP_ENUMERATE) ||
!JS_DefineProperty(aCx, obj, "content", content, JSPROP_ENUMERATE))
{
return false;
}
args.rval().setObject(*obj);
return true;
}
static bool
RecordReplay_Dump(JSContext* aCx, unsigned aArgc, Value* aVp)
{
// This method is an alternative to dump() that can be used in places where
// thread events are disallowed.
CallArgs args = CallArgsFromVp(aArgc, aVp);
for (size_t i = 0; i < args.length(); i++) {
RootedString str(aCx, ToString(aCx, args[i]));
if (!str) {
return false;
}
char* cstr = JS_EncodeString(aCx, str);
if (!cstr) {
return false;
}
Print("%s", cstr);
JS_free(aCx, cstr);
}
args.rval().setUndefined();
return true;
}
///////////////////////////////////////////////////////////////////////////////
// Plumbing
///////////////////////////////////////////////////////////////////////////////
static const JSFunctionSpec gMiddlemanMethods[] = {
JS_FN("registerReplayDebugger", Middleman_RegisterReplayDebugger, 1, 0),
JS_FN("canRewind", Middleman_CanRewind, 0, 0),
JS_FN("resume", Middleman_Resume, 1, 0),
JS_FN("pause", Middleman_Pause, 0, 0),
JS_FN("sendRequest", Middleman_SendRequest, 1, 0),
JS_FN("setBreakpoint", Middleman_SetBreakpoint, 2, 0),
JS_FN("clearBreakpoint", Middleman_ClearBreakpoint, 1, 0),
JS_FS_END
};
static const JSFunctionSpec gRecordReplayMethods[] = {
JS_FN("areThreadEventsDisallowed", RecordReplay_AreThreadEventsDisallowed, 0, 0),
JS_FN("maybeDivergeFromRecording", RecordReplay_MaybeDivergeFromRecording, 0, 0),
JS_FN("advanceProgressCounter", RecordReplay_AdvanceProgressCounter, 0, 0),
JS_FN("positionHit", RecordReplay_PositionHit, 1, 0),
JS_FN("getContent", RecordReplay_GetContent, 1, 0),
JS_FN("dump", RecordReplay_Dump, 1, 0),
JS_FS_END
};
extern "C" {
MOZ_EXPORT bool
RecordReplayInterface_DefineRecordReplayControlObject(JSContext* aCx, JSObject* aObjectArg)
{
RootedObject object(aCx, aObjectArg);
RootedObject staticObject(aCx, JS_NewObject(aCx, nullptr));
if (!staticObject || !JS_DefineProperty(aCx, object, "RecordReplayControl", staticObject, 0)) {
return false;
}
// FIXME Bug 1475901 Define this interface via WebIDL instead of raw JSAPI.
if (IsMiddleman()) {
if (!JS_DefineFunctions(aCx, staticObject, gMiddlemanMethods)) {
return false;
}
} else if (IsRecordingOrReplaying()) {
if (!JS_DefineFunctions(aCx, staticObject, gRecordReplayMethods)) {
return false;
}
} else {
// Leave RecordReplayControl as an empty object. We still define the object
// to avoid reference errors in scripts that run in normal processes.
}
return true;
}
} // extern "C"
} // namespace js
} // namespace recordreplay
} // namespace mozilla

Просмотреть файл

@ -0,0 +1,208 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
* vim: set ts=8 sts=4 et sw=4 tw=99:
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef mozilla_recordreplay_JSControl_h
#define mozilla_recordreplay_JSControl_h
#include "jsapi.h"
#include "InfallibleVector.h"
#include "ProcessRewind.h"
#include "mozilla/DefineEnum.h"
namespace mozilla {
namespace recordreplay {
namespace js {
// This file manages interactions between the record/replay infrastructure and
// JS code. This interaction can occur in two ways:
//
// - In the middleman process, devtools server code can use the
// RecordReplayControl object to send requests to the recording/replaying
// child process and control its behavior.
//
// - In the recording/replaying process, a JS sandbox is created before the
// first checkpoint is reached, which responds to the middleman's requests.
// The RecordReplayControl object is also provided here, but has a different
// interface which allows the JS to query the current process.
// Identification for a position where a breakpoint can be installed in a child
// process. Breakpoint positions describe all places between checkpoints where
// the child process can pause and be inspected by the middleman. A particular
// BreakpointPosition can be reached any number of times during execution of
// the process.
struct BreakpointPosition
{
MOZ_DEFINE_ENUM_AT_CLASS_SCOPE(Kind, (
Invalid,
// Break at a script offset. Requires script/offset.
Break,
// Break for an on-step handler within a frame.
// Requires script/offset/frameIndex.
OnStep,
// Break either when any frame is popped, or when a specific frame is
// popped. Requires script/frameIndex in the latter case.
OnPop,
// Break when entering any frame.
EnterFrame,
// Break when a new top-level script is created.
NewScript
));
Kind mKind;
// Optional information associated with the breakpoint.
uint32_t mScript;
uint32_t mOffset;
uint32_t mFrameIndex;
static const uint32_t EMPTY_SCRIPT = (uint32_t) -1;
static const uint32_t EMPTY_OFFSET = (uint32_t) -1;
static const uint32_t EMPTY_FRAME_INDEX = (uint32_t) -1;
BreakpointPosition()
: mKind(Invalid), mScript(EMPTY_SCRIPT), mOffset(EMPTY_OFFSET), mFrameIndex(EMPTY_FRAME_INDEX)
{}
explicit BreakpointPosition(Kind aKind,
uint32_t aScript = EMPTY_SCRIPT,
uint32_t aOffset = EMPTY_OFFSET,
uint32_t aFrameIndex = EMPTY_FRAME_INDEX)
: mKind(aKind), mScript(aScript), mOffset(aOffset), mFrameIndex(aFrameIndex)
{}
bool IsValid() const { return mKind != Invalid; }
inline bool operator==(const BreakpointPosition& o) const {
return mKind == o.mKind
&& mScript == o.mScript
&& mOffset == o.mOffset
&& mFrameIndex == o.mFrameIndex;
}
inline bool operator!=(const BreakpointPosition& o) const { return !(*this == o); }
// Return whether an execution point matching |o| also matches this.
inline bool Subsumes(const BreakpointPosition& o) const {
return (*this == o)
|| (mKind == OnPop && o.mKind == OnPop && mScript == EMPTY_SCRIPT)
|| (mKind == Break && o.mKind == OnStep && mScript == o.mScript && mOffset == o.mOffset);
}
static const char* StaticKindString(Kind aKind) {
switch (aKind) {
case Invalid: return "Invalid";
case Break: return "Break";
case OnStep: return "OnStep";
case OnPop: return "OnPop";
case EnterFrame: return "EnterFrame";
case NewScript: return "NewScript";
}
MOZ_CRASH("Bad BreakpointPosition kind");
}
const char* KindString() const {
return StaticKindString(mKind);
}
JSObject* Encode(JSContext* aCx) const;
bool Decode(JSContext* aCx, JS::HandleObject aObject);
};
// Identification for a point in the execution of a child process where it may
// pause and be inspected by the middleman. A particular execution point will
// be reached exactly once during the execution of the process.
struct ExecutionPoint
{
// ID of the last normal checkpoint prior to this point.
size_t mCheckpoint;
// How much progress execution has made prior to reaching the point,
// or zero if the execution point refers to the checkpoint itself.
//
// A given BreakpointPosition may not be reached twice without an intervening
// increment of the global progress counter.
ProgressCounter mProgress;
// The position reached after making the specified amount of progress,
// invalid if the execution point refers to the checkpoint itself.
BreakpointPosition mPosition;
ExecutionPoint()
: mCheckpoint(CheckpointId::Invalid)
, mProgress(0)
{}
explicit ExecutionPoint(size_t aCheckpoint)
: mCheckpoint(aCheckpoint)
, mProgress(0)
{}
ExecutionPoint(size_t aCheckpoint, ProgressCounter aProgress,
const BreakpointPosition& aPosition)
: mCheckpoint(aCheckpoint), mProgress(aProgress), mPosition(aPosition)
{
// ExecutionPoint positions must be as precise as possible, and cannot
// subsume other positions.
MOZ_RELEASE_ASSERT(aPosition.IsValid());
MOZ_RELEASE_ASSERT(aPosition.mKind != BreakpointPosition::OnPop ||
aPosition.mScript != BreakpointPosition::EMPTY_SCRIPT);
MOZ_RELEASE_ASSERT(aPosition.mKind != BreakpointPosition::Break);
}
bool HasPosition() const { return mPosition.IsValid(); }
inline bool operator==(const ExecutionPoint& o) const {
return mCheckpoint == o.mCheckpoint
&& mProgress == o.mProgress
&& mPosition == o.mPosition;
}
inline bool operator!=(const ExecutionPoint& o) const { return !(*this == o); }
};
// Buffer type used for encoding object data.
typedef InfallibleVector<char16_t> CharBuffer;
// Called in the middleman when a breakpoint with the specified id has been hit.
bool HitBreakpoint(JSContext* aCx, size_t id);
// Set up the JS sandbox in the current recording/replaying process and load
// its target script.
void SetupDevtoolsSandbox();
// The following hooks are used in the recording/replaying process to
// call methods defined by the JS sandbox.
// Handle an incoming request from the middleman.
void ProcessRequest(const char16_t* aRequest, size_t aRequestLength,
CharBuffer* aResponse);
// Ensure there is a handler in place that will call RecordReplayControl.positionHit
// whenever the specified execution position is reached.
void EnsurePositionHandler(const BreakpointPosition& aPosition);
// Clear all installed position handlers.
void ClearPositionHandlers();
// Clear all state that is kept while execution is paused.
void ClearPausedState();
// Given an execution position inside a script, get an execution position for
// the entry point of that script, otherwise return nothing.
Maybe<BreakpointPosition> GetEntryPosition(const BreakpointPosition& aPosition);
} // namespace js
} // namespace recordreplay
} // namespace mozilla
#endif // mozilla_recordreplay_JSControl_h

Просмотреть файл

@ -0,0 +1,395 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this file,
* You can obtain one at http://mozilla.org/MPL/2.0/. */
// This file has the logic which the middleman process uses to forward IPDL
// messages from the recording process to the UI process, and from the UI
// process to either itself, the recording process, or both.
#include "ParentInternal.h"
#include "mozilla/dom/PBrowserChild.h"
#include "mozilla/dom/ContentChild.h"
#include "mozilla/layers/CompositorBridgeChild.h"
namespace mozilla {
namespace recordreplay {
namespace parent {
// Known associations between managee and manager routing IDs.
static StaticInfallibleVector<std::pair<int32_t, int32_t>> gProtocolManagers;
// The routing IDs of actors in the parent process that have been destroyed.
static StaticInfallibleVector<int32_t> gDeadRoutingIds;
static void
NoteProtocolManager(int32_t aManagee, int32_t aManager)
{
gProtocolManagers.emplaceBack(aManagee, aManager);
for (auto id : gDeadRoutingIds) {
if (id == aManager) {
gDeadRoutingIds.emplaceBack(aManagee);
}
}
}
static void
DestroyRoutingId(int32_t aId)
{
gDeadRoutingIds.emplaceBack(aId);
for (auto manager : gProtocolManagers) {
if (manager.second == aId) {
DestroyRoutingId(manager.first);
}
}
}
// Return whether a message from the child process to the UI process is being
// sent to a target that is being destroyed, and should be suppressed.
static bool
MessageTargetIsDead(const IPC::Message& aMessage)
{
// After the parent process destroys a browser, we handle the destroy in
// both the middleman and child processes. Both processes will respond to
// the destroy by sending additional messages to the UI process indicating
// the browser has been destroyed, but we need to ignore such messages from
// the child process (if it is still recording) to avoid confusing the UI
// process.
for (int32_t id : gDeadRoutingIds) {
if (id == aMessage.routing_id()) {
return true;
}
}
return false;
}
static bool
HandleMessageInMiddleman(ipc::Side aSide, const IPC::Message& aMessage)
{
IPC::Message::msgid_t type = aMessage.type();
// Ignore messages sent from the child to dead UI process targets.
if (aSide == ipc::ParentSide) {
// When the browser is destroyed in the UI process all its children will
// also be destroyed. Figure out the routing IDs of children which we need
// to recognize as dead once the browser is destroyed. This is not a
// complete list of all the browser's children, but only includes ones
// where crashes have been seen as a result.
if (type == dom::PBrowser::Msg_PDocAccessibleConstructor__ID) {
PickleIterator iter(aMessage);
ipc::ActorHandle handle;
if (!IPC::ReadParam(&aMessage, &iter, &handle))
MOZ_CRASH("IPC::ReadParam failed");
NoteProtocolManager(handle.mId, aMessage.routing_id());
}
if (MessageTargetIsDead(aMessage)) {
PrintSpew("Suppressing %s message to dead target\n", IPC::StringFromIPCMessageType(type));
return true;
}
return false;
}
// Handle messages that should be sent to both the middleman and the
// child process.
if (// Initialization that must be performed in both processes.
type == dom::PContent::Msg_PBrowserConstructor__ID ||
type == dom::PContent::Msg_RegisterChrome__ID ||
type == dom::PContent::Msg_SetXPCOMProcessAttributes__ID ||
type == dom::PContent::Msg_SetProcessSandbox__ID ||
// Graphics messages that affect both processes.
type == dom::PBrowser::Msg_InitRendering__ID ||
type == dom::PBrowser::Msg_SetDocShellIsActive__ID ||
type == dom::PBrowser::Msg_PRenderFrameConstructor__ID ||
type == dom::PBrowser::Msg_RenderLayers__ID ||
type == dom::PBrowser::Msg_UpdateDimensions__ID ||
// This message performs some graphics related initialization.
type == dom::PBrowser::Msg_LoadURL__ID ||
// May be loading devtools code that runs in the middleman process.
type == dom::PBrowser::Msg_LoadRemoteScript__ID ||
// May be sending a message for receipt by devtools code.
type == dom::PBrowser::Msg_AsyncMessage__ID ||
// Teardown that must be performed in both processes.
type == dom::PBrowser::Msg_Destroy__ID) {
ipc::IProtocol::Result r =
dom::ContentChild::GetSingleton()->PContentChild::OnMessageReceived(aMessage);
MOZ_RELEASE_ASSERT(r == ipc::IProtocol::MsgProcessed);
if (type == dom::PContent::Msg_SetXPCOMProcessAttributes__ID) {
// Preferences are initialized via the SetXPCOMProcessAttributes message.
PreferencesLoaded();
}
if (type == dom::PBrowser::Msg_Destroy__ID) {
DestroyRoutingId(aMessage.routing_id());
}
if (type == dom::PBrowser::Msg_RenderLayers__ID) {
// Graphics are being loaded or unloaded for a tab, so update what we are
// showing to the UI process according to the last paint performed.
UpdateGraphicsInUIProcess(nullptr);
}
return false;
}
// Handle messages that should only be sent to the middleman.
if (// Initialization that should only happen in the middleman.
type == dom::PContent::Msg_InitRendering__ID ||
// Record/replay specific messages.
type == dom::PContent::Msg_SaveRecording__ID ||
// Teardown that should only happen in the middleman.
type == dom::PContent::Msg_Shutdown__ID) {
ipc::IProtocol::Result r = dom::ContentChild::GetSingleton()->PContentChild::OnMessageReceived(aMessage);
MOZ_RELEASE_ASSERT(r == ipc::IProtocol::MsgProcessed);
return true;
}
// The content process has its own compositor, so compositor messages from
// the UI process should only be handled in the middleman.
if (type >= layers::PCompositorBridge::PCompositorBridgeStart &&
type <= layers::PCompositorBridge::PCompositorBridgeEnd) {
layers::CompositorBridgeChild* compositorChild = layers::CompositorBridgeChild::Get();
ipc::IProtocol::Result r = compositorChild->OnMessageReceived(aMessage);
MOZ_RELEASE_ASSERT(r == ipc::IProtocol::MsgProcessed);
return true;
}
return false;
}
static bool gMainThreadIsWaitingForIPDLReply = false;
bool
MainThreadIsWaitingForIPDLReply()
{
return gMainThreadIsWaitingForIPDLReply;
}
// Helper for places where the main thread will block while waiting on a
// synchronous IPDL reply from a child process. Incoming messages from the
// child must be handled immediately.
struct MOZ_RAII AutoMarkMainThreadWaitingForIPDLReply
{
AutoMarkMainThreadWaitingForIPDLReply() {
MOZ_RELEASE_ASSERT(NS_IsMainThread());
MOZ_RELEASE_ASSERT(!gMainThreadIsWaitingForIPDLReply);
gMainThreadIsWaitingForIPDLReply = true;
}
~AutoMarkMainThreadWaitingForIPDLReply() {
gMainThreadIsWaitingForIPDLReply = false;
}
};
class MiddlemanProtocol : public ipc::IToplevelProtocol
{
public:
ipc::Side mSide;
MiddlemanProtocol* mOpposite;
MessageLoop* mOppositeMessageLoop;
explicit MiddlemanProtocol(ipc::Side aSide)
: ipc::IToplevelProtocol("MiddlemanProtocol", PContentMsgStart, aSide)
, mSide(aSide)
, mOpposite(nullptr)
, mOppositeMessageLoop(nullptr)
{}
virtual void RemoveManagee(int32_t, IProtocol*) override {
MOZ_CRASH("MiddlemanProtocol::RemoveManagee");
}
static void ForwardMessageAsync(MiddlemanProtocol* aProtocol, Message* aMessage) {
if (ActiveChildIsRecording()) {
PrintSpew("ForwardAsyncMsg %s %s %d\n",
(aProtocol->mSide == ipc::ChildSide) ? "Child" : "Parent",
IPC::StringFromIPCMessageType(aMessage->type()),
(int) aMessage->routing_id());
if (!aProtocol->GetIPCChannel()->Send(aMessage)) {
MOZ_CRASH("MiddlemanProtocol::ForwardMessageAsync");
}
} else {
delete aMessage;
}
}
virtual Result OnMessageReceived(const Message& aMessage) override {
// If we do not have a recording process then just see if the message can
// be handled in the middleman.
if (!mOppositeMessageLoop) {
MOZ_RELEASE_ASSERT(mSide == ipc::ChildSide);
HandleMessageInMiddleman(mSide, aMessage);
return MsgProcessed;
}
// Copy the message first, since HandleMessageInMiddleman may destructively
// modify it through OnMessageReceived calls.
Message* nMessage = new Message();
nMessage->CopyFrom(aMessage);
if (HandleMessageInMiddleman(mSide, aMessage)) {
delete nMessage;
return MsgProcessed;
}
mOppositeMessageLoop->PostTask(NewRunnableFunction("ForwardMessageAsync", ForwardMessageAsync,
mOpposite, nMessage));
return MsgProcessed;
}
static void ForwardMessageSync(MiddlemanProtocol* aProtocol, Message* aMessage, Message** aReply) {
PrintSpew("ForwardSyncMsg %s\n", IPC::StringFromIPCMessageType(aMessage->type()));
MOZ_RELEASE_ASSERT(!*aReply);
Message* nReply = new Message();
if (!aProtocol->GetIPCChannel()->Send(aMessage, nReply)) {
MOZ_CRASH("MiddlemanProtocol::ForwardMessageSync");
}
MonitorAutoLock lock(*gMonitor);
*aReply = nReply;
gMonitor->Notify();
}
virtual Result OnMessageReceived(const Message& aMessage, Message*& aReply) override {
MOZ_RELEASE_ASSERT(mOppositeMessageLoop);
MOZ_RELEASE_ASSERT(mSide == ipc::ChildSide || !MessageTargetIsDead(aMessage));
Message* nMessage = new Message();
nMessage->CopyFrom(aMessage);
mOppositeMessageLoop->PostTask(NewRunnableFunction("ForwardMessageSync", ForwardMessageSync,
mOpposite, nMessage, &aReply));
if (mSide == ipc::ChildSide) {
AutoMarkMainThreadWaitingForIPDLReply blocked;
ActiveRecordingChild()->WaitUntil([&]() { return !!aReply; });
} else {
MonitorAutoLock lock(*gMonitor);
while (!aReply) {
gMonitor->Wait();
}
}
PrintSpew("SyncMsgDone\n");
return MsgProcessed;
}
static void ForwardCallMessage(MiddlemanProtocol* aProtocol, Message* aMessage, Message** aReply) {
PrintSpew("ForwardSyncCall %s\n", IPC::StringFromIPCMessageType(aMessage->type()));
MOZ_RELEASE_ASSERT(!*aReply);
Message* nReply = new Message();
if (!aProtocol->GetIPCChannel()->Call(aMessage, nReply)) {
MOZ_CRASH("MiddlemanProtocol::ForwardCallMessage");
}
MonitorAutoLock lock(*gMonitor);
*aReply = nReply;
gMonitor->Notify();
}
virtual Result OnCallReceived(const Message& aMessage, Message*& aReply) override {
MOZ_RELEASE_ASSERT(mOppositeMessageLoop);
MOZ_RELEASE_ASSERT(mSide == ipc::ChildSide || !MessageTargetIsDead(aMessage));
Message* nMessage = new Message();
nMessage->CopyFrom(aMessage);
mOppositeMessageLoop->PostTask(NewRunnableFunction("ForwardCallMessage", ForwardCallMessage,
mOpposite, nMessage, &aReply));
if (mSide == ipc::ChildSide) {
AutoMarkMainThreadWaitingForIPDLReply blocked;
ActiveRecordingChild()->WaitUntil([&]() { return !!aReply; });
} else {
MonitorAutoLock lock(*gMonitor);
while (!aReply) {
gMonitor->Wait();
}
}
PrintSpew("SyncCallDone\n");
return MsgProcessed;
}
virtual int32_t GetProtocolTypeId() override {
MOZ_CRASH("MiddlemanProtocol::GetProtocolTypeId");
}
virtual void OnChannelClose() override {
MOZ_RELEASE_ASSERT(mSide == ipc::ChildSide);
MainThreadMessageLoop()->PostTask(NewRunnableFunction("Shutdown", Shutdown));
}
virtual void OnChannelError() override {
MOZ_CRASH("MiddlemanProtocol::OnChannelError");
}
};
static MiddlemanProtocol* gChildProtocol;
static MiddlemanProtocol* gParentProtocol;
ipc::MessageChannel*
ChannelToUIProcess()
{
return gChildProtocol->GetIPCChannel();
}
// Message loop for forwarding messages between the parent process and a
// recording process.
static MessageLoop* gForwardingMessageLoop;
static bool gParentProtocolOpened = false;
// Main routine for the forwarding message loop thread.
static void
ForwardingMessageLoopMain(void*)
{
MOZ_RELEASE_ASSERT(ActiveChildIsRecording());
MessageLoop messageLoop;
gForwardingMessageLoop = &messageLoop;
gChildProtocol->mOppositeMessageLoop = gForwardingMessageLoop;
gParentProtocol->Open(gRecordingProcess->GetChannel(),
base::GetProcId(gRecordingProcess->GetChildProcessHandle()));
// Notify the main thread that we have finished initialization.
{
MonitorAutoLock lock(*gMonitor);
gParentProtocolOpened = true;
gMonitor->Notify();
}
messageLoop.Run();
}
void
InitializeForwarding()
{
gChildProtocol = new MiddlemanProtocol(ipc::ChildSide);
if (gProcessKind == ProcessKind::MiddlemanRecording) {
gParentProtocol = new MiddlemanProtocol(ipc::ParentSide);
gParentProtocol->mOpposite = gChildProtocol;
gChildProtocol->mOpposite = gParentProtocol;
gParentProtocol->mOppositeMessageLoop = MainThreadMessageLoop();
if (!PR_CreateThread(PR_USER_THREAD, ForwardingMessageLoopMain, nullptr,
PR_PRIORITY_NORMAL, PR_GLOBAL_THREAD, PR_JOINABLE_THREAD, 0)) {
MOZ_CRASH("parent::Initialize");
}
// Wait for the forwarding message loop thread to finish initialization.
MonitorAutoLock lock(*gMonitor);
while (!gParentProtocolOpened) {
gMonitor->Wait();
}
}
}
} // namespace parent
} // namespace recordreplay
} // namespace mozilla

Просмотреть файл

@ -0,0 +1,177 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this file,
* You can obtain one at http://mozilla.org/MPL/2.0/. */
// This file has the logic which the middleman process uses to send messages to
// the UI process with painting data from the child process.
#include "ParentInternal.h"
#include "chrome/common/mach_ipc_mac.h"
#include "mozilla/dom/TabChild.h"
#include "mozilla/layers/CompositorBridgeChild.h"
#include "mozilla/layers/ImageDataSerializer.h"
#include "mozilla/layers/LayerTransactionChild.h"
#include "mozilla/layers/PTextureChild.h"
#include <mach/mach_vm.h>
namespace mozilla {
namespace recordreplay {
namespace parent {
// Graphics memory buffer shared with all child processes.
void* gGraphicsMemory;
static mach_port_t gGraphicsPort;
static ReceivePort* gGraphicsReceiver;
void
InitializeGraphicsMemory()
{
mach_vm_address_t address;
kern_return_t kr = mach_vm_allocate(mach_task_self(), &address,
GraphicsMemorySize, VM_FLAGS_ANYWHERE);
MOZ_RELEASE_ASSERT(kr == KERN_SUCCESS);
memory_object_size_t memoryObjectSize = GraphicsMemorySize;
kr = mach_make_memory_entry_64(mach_task_self(),
&memoryObjectSize,
address,
VM_PROT_DEFAULT,
&gGraphicsPort,
MACH_PORT_NULL);
MOZ_RELEASE_ASSERT(kr == KERN_SUCCESS);
MOZ_RELEASE_ASSERT(memoryObjectSize == GraphicsMemorySize);
gGraphicsMemory = (void*) address;
gGraphicsReceiver = new ReceivePort(nsPrintfCString("WebReplay.%d", getpid()).get());
}
void
SendGraphicsMemoryToChild()
{
MachReceiveMessage handshakeMessage;
kern_return_t kr = gGraphicsReceiver->WaitForMessage(&handshakeMessage, 0);
MOZ_RELEASE_ASSERT(kr == KERN_SUCCESS);
MOZ_RELEASE_ASSERT(handshakeMessage.GetMessageID() == GraphicsHandshakeMessageId);
mach_port_t childPort = handshakeMessage.GetTranslatedPort(0);
MOZ_RELEASE_ASSERT(childPort != MACH_PORT_NULL);
MachSendMessage message(GraphicsMemoryMessageId);
message.AddDescriptor(MachMsgPortDescriptor(gGraphicsPort, MACH_MSG_TYPE_COPY_SEND));
MachPortSender sender(childPort);
kr = sender.SendMessage(message, 1000);
MOZ_RELEASE_ASSERT(kr == KERN_SUCCESS);
}
static Maybe<PaintMessage> gLastPaint;
// Global object for the sandbox used to paint graphics data in this process.
static JS::PersistentRootedObject* gGraphicsSandbox;
static void
InitGraphicsSandbox()
{
MOZ_RELEASE_ASSERT(!gGraphicsSandbox);
dom::AutoJSAPI jsapi;
if (!jsapi.Init(xpc::PrivilegedJunkScope())) {
MOZ_CRASH("InitGraphicsSandbox");
}
JSContext* cx = jsapi.cx();
xpc::SandboxOptions options;
options.sandboxName.AssignLiteral("Record/Replay Graphics Sandbox");
options.invisibleToDebugger = true;
RootedValue v(cx);
nsresult rv = CreateSandboxObject(cx, &v, nsXPConnect::SystemPrincipal(), options);
MOZ_RELEASE_ASSERT(NS_SUCCEEDED(rv));
gGraphicsSandbox = new JS::PersistentRootedObject(cx);
*gGraphicsSandbox = ::js::UncheckedUnwrap(&v.toObject());
JSAutoRealm ac(cx, *gGraphicsSandbox);
ErrorResult er;
dom::GlobalObject global(cx, *gGraphicsSandbox);
RootedObject obj(cx);
dom::ChromeUtils::Import(global, NS_LITERAL_STRING("resource://devtools/server/actors/replay/graphics.js"),
dom::Optional<HandleObject>(), &obj, er);
MOZ_RELEASE_ASSERT(!er.Failed());
}
// Buffer used to transform graphics memory, if necessary.
static void* gBufferMemory;
void
UpdateGraphicsInUIProcess(const PaintMessage* aMsg)
{
MOZ_RELEASE_ASSERT(NS_IsMainThread());
if (aMsg) {
gLastPaint = Some(*aMsg);
} else if (!gLastPaint.isSome()) {
return;
}
// Make sure there is a sandbox which is running the graphics JS module.
if (!gGraphicsSandbox) {
InitGraphicsSandbox();
}
AutoSafeJSContext cx;
JSAutoRealm ac(cx, *gGraphicsSandbox);
size_t width = gLastPaint.ref().mWidth;
size_t height = gLastPaint.ref().mHeight;
size_t stride = layers::ImageDataSerializer::ComputeRGBStride(gSurfaceFormat, width);
// Make sure the width and height are appropriately sized.
CheckedInt<size_t> scaledWidth = CheckedInt<size_t>(width) * 4;
CheckedInt<size_t> scaledHeight = CheckedInt<size_t>(height) * stride;
MOZ_RELEASE_ASSERT(scaledWidth.isValid() && scaledWidth.value() <= stride);
MOZ_RELEASE_ASSERT(scaledHeight.isValid() && scaledHeight.value() <= GraphicsMemorySize);
// Get memory which we can pass to the graphics module to store in a canvas.
// Use the shared memory buffer directly, unless we need to transform the
// data due to extra memory in each row of the data which the child process
// sent us.
MOZ_RELEASE_ASSERT(gGraphicsMemory);
void* memory = gGraphicsMemory;
if (stride != width * 4) {
if (!gBufferMemory) {
gBufferMemory = malloc(GraphicsMemorySize);
}
memory = gBufferMemory;
for (size_t y = 0; y < height; y++) {
char* src = (char*)gGraphicsMemory + y * stride;
char* dst = (char*)gBufferMemory + y * width * 4;
memcpy(dst, src, width * 4);
}
}
JSObject* bufferObject =
JS_NewArrayBufferWithExternalContents(cx, width * height * 4, memory);
MOZ_RELEASE_ASSERT(bufferObject);
JS::AutoValueArray<3> args(cx);
args[0].setObject(*bufferObject);
args[1].setInt32(width);
args[2].setInt32(height);
// Call into the graphics module to update the canvas it manages.
RootedValue rval(cx);
if (!JS_CallFunctionName(cx, *gGraphicsSandbox, "Update", args, &rval)) {
MOZ_CRASH("UpdateGraphicsInUIProcess");
}
}
} // namespace parent
} // namespace recordreplay
} // namespace mozilla

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -0,0 +1,66 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this file,
* You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef mozilla_recordreplay_ParentIPC_h
#define mozilla_recordreplay_ParentIPC_h
#include "mozilla/dom/ContentChild.h"
#include "mozilla/ipc/MessageChannel.h"
#include "mozilla/ipc/ProcessChild.h"
#include "mozilla/ipc/ProtocolUtils.h"
#include "mozilla/ipc/ScopedXREEmbed.h"
#include "mozilla/ipc/Shmem.h"
namespace mozilla {
namespace recordreplay {
namespace parent {
// The middleman process is a content process that manages communication with
// one or more child recording or replaying processes. It performs IPC with the
// UI process in the normal fashion for a content process, using the normal
// IPDL protocols. Communication with a recording or replaying process is done
// via a special IPC channel (see Channel.h), and communication with a
// recording process can additionally be done via IPDL messages, usually by
// forwarding them from the UI process.
// UI process API
// Initialize state in a UI process.
void InitializeUIProcess(int aArgc, char** aArgv);
// Get any directory where content process recordings should be saved.
const char* SaveAllRecordingsDirectory();
// Middleman process API
// Save the recording up to the current point in execution.
void SaveRecording(const ipc::FileDescriptor& aFile);
// Get the message channel used to communicate with the UI process.
ipc::MessageChannel* ChannelToUIProcess();
// Initialize state in a middleman process.
void InitializeMiddleman(int aArgc, char* aArgv[], base::ProcessId aParentPid);
// Note the contents of the prefs shmem for use by the child process.
void NotePrefsShmemContents(char* aPrefs, size_t aPrefsLen);
// Open a socket which a recording/replaying child can use to connect to its
// middleman process.
void OpenChannel(base::ProcessId aMiddlemanPid, uint32_t aChannelId,
ipc::FileDescriptor* aConnection);
// Get the command line arguments to use when spawning a recording or replaying
// child process.
void GetArgumentsForChildProcess(base::ProcessId aMiddlemanPid, uint32_t aChannelId,
const char* aRecordingFile, bool aRecording,
std::vector<std::string>& aExtraArgs);
} // namespace parent
} // namespace recordreplay
} // namespace mozilla
#endif // mozilla_recordreplay_ParentIPC_h

Просмотреть файл

@ -0,0 +1,353 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this file,
* You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef mozilla_recordreplay_ParentInternal_h
#define mozilla_recordreplay_ParentInternal_h
#include "ParentIPC.h"
#include "Channel.h"
#include "mozilla/ipc/GeckoChildProcessHost.h"
namespace mozilla {
namespace recordreplay {
namespace parent {
// This file has internal declarations for interaction between different
// components of middleman logic.
class ChildProcessInfo;
// Get the message loop for the main thread.
MessageLoop* MainThreadMessageLoop();
// Called after prefs are available to this process.
void PreferencesLoaded();
// Return whether replaying processes are allowed to save checkpoints and
// rewind. Can only be called after PreferencesLoaded().
bool CanRewind();
// Whether the child currently being interacted with is recording.
bool ActiveChildIsRecording();
// Get the active recording child process.
ChildProcessInfo* ActiveRecordingChild();
// Return whether the middleman's main thread is blocked waiting on a
// synchronous IPDL reply from the recording child.
bool MainThreadIsWaitingForIPDLReply();
// Initialize state which handles incoming IPDL messages from the UI and
// recording child processes.
void InitializeForwarding();
// Terminate all children and kill this process.
void Shutdown();
// Monitor used for synchronizing between the main and channel or message loop threads.
static Monitor* gMonitor;
// Allow the child process to resume execution.
void Resume(bool aForward);
// Pause the child process at the next opportunity.
void Pause();
// Send a JSON request to the child process, and synchronously wait for a
// response.
void SendRequest(const js::CharBuffer& aBuffer, js::CharBuffer* aResponse);
// Set or clear a breakpoint in the child process.
void SetBreakpoint(size_t aId, const js::BreakpointPosition& aPosition);
///////////////////////////////////////////////////////////////////////////////
// Graphics
///////////////////////////////////////////////////////////////////////////////
extern void* gGraphicsMemory;
void InitializeGraphicsMemory();
void SendGraphicsMemoryToChild();
// Update the graphics painted in the UI process, per painting data received
// from a child process, or null for the last paint performed.
void UpdateGraphicsInUIProcess(const PaintMessage* aMsg);
// ID for the mach message sent from a child process to the middleman to
// request a port for the graphics shmem.
static const int32_t GraphicsHandshakeMessageId = 42;
// ID for the mach message sent from the middleman to a child process with the
// requested memory for.
static const int32_t GraphicsMemoryMessageId = 43;
// Fixed size of the graphics shared memory buffer.
static const size_t GraphicsMemorySize = 4096 * 4096 * 4;
///////////////////////////////////////////////////////////////////////////////
// Child Processes
///////////////////////////////////////////////////////////////////////////////
// Information about the role which a child process is fulfilling, and governs
// how the process responds to incoming messages.
class ChildRole
{
public:
// See ParentIPC.cpp for the meaning of these role types.
#define ForEachRoleType(Macro) \
Macro(Active) \
Macro(Standby) \
Macro(Inert)
enum Type {
#define DefineType(Name) Name,
ForEachRoleType(DefineType)
#undef DefineType
};
static const char* TypeString(Type aType) {
switch (aType) {
#define GetTypeString(Name) case Name: return #Name;
ForEachRoleType(GetTypeString)
#undef GetTypeString
default: MOZ_CRASH("Bad ChildRole type");
}
}
protected:
ChildProcessInfo* mProcess;
Type mType;
explicit ChildRole(Type aType)
: mProcess(nullptr), mType(aType)
{}
public:
void SetProcess(ChildProcessInfo* aProcess) {
MOZ_RELEASE_ASSERT(!mProcess);
mProcess = aProcess;
}
Type GetType() const { return mType; }
virtual ~ChildRole() {}
// The methods below are all called on the main thread.
virtual void Initialize() {}
// When the child is paused and potentially sitting idle, notify the role
// that state affecting its behavior has changed and may want to become
// active again.
virtual void Poke() {}
virtual void OnIncomingMessage(const Message& aMsg) = 0;
};
// Handle to the underlying recording process, if there is one. Recording
// processes are directly spawned by the middleman at startup, since they need
// to receive all the same IPC which the middleman receives from the UI process
// in order to initialize themselves. Replaying processes are all spawned by
// the UI process itself, due to sandboxing restrictions.
extern ipc::GeckoChildProcessHost* gRecordingProcess;
// Information about a recording or replaying child process.
class ChildProcessInfo
{
// Channel for communicating with the process.
Channel* mChannel;
// The last time we sent or received a message from this process.
TimeStamp mLastMessageTime;
// Whether this process is recording.
bool mRecording;
// The current recovery stage of this process.
//
// Recovery is used when we are shepherding a child to a particular state:
// a particular execution position and sets of installed breakpoints and
// saved checkpoints. Recovery is used when changing a child's role, and when
// spawning a new process to replace a crashed child process.
//
// When recovering, the child process won't yet be in the exact place
// reflected by the state below, but the main thread will wait until it has
// finished reaching this state before it is able to send or receive
// messages.
enum class RecoveryStage {
// No recovery is being performed, and the process can be interacted with.
None,
// The process has not yet reached mLastCheckpoint.
ReachingCheckpoint,
// The process has reached mLastCheckpoint, and additional messages are
// being sent to change its intra-checkpoint execution position or install
// breakpoints.
PlayingMessages
};
RecoveryStage mRecoveryStage;
// Whether the process is currently paused.
bool mPaused;
// If the process is paused, or if it is running while handling a message
// that won't cause it to change its execution point, the last message which
// caused it to pause.
Message* mPausedMessage;
// The last checkpoint which the child process reached. The child is
// somewhere between this and either the next or previous checkpoint,
// depending on the messages that have been sent to it.
size_t mLastCheckpoint;
// Messages sent to the process which will affect its behavior as it runs
// forward or backward from mLastCheckpoint. This includes all messages that
// will need to be sent to another process to recover it to the same state as
// this process.
InfallibleVector<Message*> mMessages;
// In the PlayingMessages recovery stage, how much of mMessages has been sent
// to the process.
size_t mNumRecoveredMessages;
// The number of times we have restarted this process.
size_t mNumRestarts;
// Current role of this process.
UniquePtr<ChildRole> mRole;
// Unsorted list of the checkpoints the process has been instructed to save.
// Those at or before the most recent checkpoint will have been saved.
InfallibleVector<size_t> mShouldSaveCheckpoints;
// Sorted major checkpoints for this process. See ParentIPC.cpp.
InfallibleVector<size_t> mMajorCheckpoints;
// Whether we need this child to pause while the recording is updated.
bool mPauseNeeded;
void OnIncomingMessage(size_t aChannelId, const Message& aMsg);
void OnIncomingRecoveryMessage(const Message& aMsg);
void SendNextRecoveryMessage();
void SendMessageRaw(const Message& aMsg);
static void MaybeProcessPendingMessageRunnable();
void ReceiveChildMessageOnMainThread(size_t aChannelId, Message* aMsg);
// Get the position of this process relative to its last checkpoint.
enum Disposition {
AtLastCheckpoint,
BeforeLastCheckpoint,
AfterLastCheckpoint
};
Disposition GetDisposition();
void Recover(bool aPaused, Message* aPausedMessage, size_t aLastCheckpoint,
Message** aMessages, size_t aNumMessages);
bool CanRestart();
void AttemptRestart(const char* aWhy);
void LaunchSubprocess();
public:
ChildProcessInfo(UniquePtr<ChildRole> aRole, bool aRecording);
~ChildProcessInfo();
ChildRole* Role() { return mRole.get(); }
size_t GetId() { return mChannel->GetId(); }
bool IsRecording() { return mRecording; }
size_t LastCheckpoint() { return mLastCheckpoint; }
bool IsRecovering() { return mRecoveryStage != RecoveryStage::None; }
bool PauseNeeded() { return mPauseNeeded; }
const InfallibleVector<size_t>& MajorCheckpoints() { return mMajorCheckpoints; }
bool IsPaused() { return mPaused; }
bool IsPausedAtCheckpoint();
bool IsPausedAtRecordingEndpoint();
// Return whether this process is paused at a breakpoint whose kind matches
// the supplied filter.
typedef std::function<bool(js::BreakpointPosition::Kind)> BreakpointFilter;
bool IsPausedAtMatchingBreakpoint(const BreakpointFilter& aFilter);
// Get the checkpoint at or earlier to the process' position. This is either
// the last reached checkpoint or the previous one.
size_t MostRecentCheckpoint() {
return (GetDisposition() == BeforeLastCheckpoint) ? mLastCheckpoint - 1 : mLastCheckpoint;
}
// Get the checkpoint which needs to be saved in order for this process
// (or another at the same place) to rewind.
size_t RewindTargetCheckpoint() {
switch (GetDisposition()) {
case BeforeLastCheckpoint:
case AtLastCheckpoint:
// This will return CheckpointId::Invalid if we are the beginning of the
// recording.
return LastCheckpoint() - 1;
case AfterLastCheckpoint:
return LastCheckpoint();
}
}
bool ShouldSaveCheckpoint(size_t aId) {
return VectorContains(mShouldSaveCheckpoints, aId);
}
bool IsMajorCheckpoint(size_t aId) {
return VectorContains(mMajorCheckpoints, aId);
}
bool HasSavedCheckpoint(size_t aId) {
return (aId <= MostRecentCheckpoint()) && ShouldSaveCheckpoint(aId);
}
size_t MostRecentSavedCheckpoint() {
size_t id = MostRecentCheckpoint();
while (!ShouldSaveCheckpoint(id)) {
id--;
}
return id;
}
void SetPauseNeeded() {
MOZ_RELEASE_ASSERT(!mPauseNeeded);
mPauseNeeded = true;
}
void ClearPauseNeeded() {
MOZ_RELEASE_ASSERT(IsPaused());
mPauseNeeded = false;
mRole->Poke();
}
void AddMajorCheckpoint(size_t aId);
void SetRole(UniquePtr<ChildRole> aRole);
void SendMessage(const Message& aMessage);
// Recover to the same state as another process.
void Recover(ChildProcessInfo* aTargetProcess);
// Recover to be paused at a checkpoint with no breakpoints set.
void RecoverToCheckpoint(size_t aCheckpoint);
// Handle incoming messages from this process (and no others) until the
// callback succeeds.
void WaitUntil(const std::function<bool()>& aCallback);
void WaitUntilPaused() { WaitUntil([=]() { return IsPaused(); }); }
static bool MaybeProcessPendingMessage(ChildProcessInfo* aProcess);
static void SetIntroductionMessage(IntroductionMessage* aMessage);
};
} // namespace parent
} // namespace recordreplay
} // namespace mozilla
#endif // mozilla_recordreplay_ParentInternal_h

Просмотреть файл

@ -0,0 +1,57 @@
# -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*-
# vim: set filetype=python:
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
EXPORTS.mozilla.recordreplay += [
'ipc/ChildIPC.h',
'ipc/ParentIPC.h',
]
if CONFIG['OS_ARCH'] == 'Darwin' and CONFIG['NIGHTLY_BUILD']:
UNIFIED_SOURCES += [
'Assembler.cpp',
'Callback.cpp',
'DirtyMemoryHandler.cpp',
'File.cpp',
'HashTable.cpp',
'ipc/Channel.cpp',
'ipc/ChildIPC.cpp',
'ipc/ChildNavigation.cpp',
'ipc/ChildProcess.cpp',
'ipc/JSControl.cpp',
'ipc/ParentForwarding.cpp',
'ipc/ParentGraphics.cpp',
'ipc/ParentIPC.cpp',
'Lock.cpp',
'MemorySnapshot.cpp',
'ProcessRecordReplay.cpp',
'ProcessRedirectDarwin.cpp',
'ProcessRewind.cpp',
'Thread.cpp',
'ThreadSnapshot.cpp',
'Trigger.cpp',
'ValueIndex.cpp',
'WeakPointer.cpp',
]
SOURCES += [
# ProcessRedirect includes udis86 directly and will not compile if the
# udis86 headers are included elsewhere in the same compilation unit.
'ProcessRedirect.cpp',
]
else:
UNIFIED_SOURCES += [
'ipc/DisabledIPC.cpp',
]
LOCAL_INCLUDES += [
'!/ipc/ipdl/_ipdlheaders',
'/ipc/chromium/src',
'/js/xpconnect/src',
]
FINAL_LIBRARY = 'xul'
with Files('**'):
BUG_COMPONENT = ('Core', 'Web Replay')

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -0,0 +1,195 @@
/* udis86 - libudis86/decode.h
*
* Copyright (c) 2002-2009 Vivek Thampi
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
* ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef UD_DECODE_H
#define UD_DECODE_H
#include "types.h"
#include "itab.h"
#define MAX_INSN_LENGTH 15
/* itab prefix bits */
#define P_none ( 0 )
#define P_cast ( 1 << 0 )
#define P_CAST(n) ( ( n >> 0 ) & 1 )
#define P_rexb ( 1 << 1 )
#define P_REXB(n) ( ( n >> 1 ) & 1 )
#define P_inv64 ( 1 << 4 )
#define P_INV64(n) ( ( n >> 4 ) & 1 )
#define P_rexw ( 1 << 5 )
#define P_REXW(n) ( ( n >> 5 ) & 1 )
#define P_def64 ( 1 << 7 )
#define P_DEF64(n) ( ( n >> 7 ) & 1 )
#define P_rexr ( 1 << 8 )
#define P_REXR(n) ( ( n >> 8 ) & 1 )
#define P_oso ( 1 << 9 )
#define P_OSO(n) ( ( n >> 9 ) & 1 )
#define P_aso ( 1 << 10 )
#define P_ASO(n) ( ( n >> 10 ) & 1 )
#define P_rexx ( 1 << 11 )
#define P_REXX(n) ( ( n >> 11 ) & 1 )
#define P_ImpAddr ( 1 << 12 )
#define P_IMPADDR(n) ( ( n >> 12 ) & 1 )
#define P_seg ( 1 << 13 )
#define P_SEG(n) ( ( n >> 13 ) & 1 )
#define P_str ( 1 << 14 )
#define P_STR(n) ( ( n >> 14 ) & 1 )
#define P_strz ( 1 << 15 )
#define P_STR_ZF(n) ( ( n >> 15 ) & 1 )
/* operand type constants -- order is important! */
enum ud_operand_code {
OP_NONE,
OP_A, OP_E, OP_M, OP_G,
OP_I, OP_F,
OP_R0, OP_R1, OP_R2, OP_R3,
OP_R4, OP_R5, OP_R6, OP_R7,
OP_AL, OP_CL, OP_DL,
OP_AX, OP_CX, OP_DX,
OP_eAX, OP_eCX, OP_eDX,
OP_rAX, OP_rCX, OP_rDX,
OP_ES, OP_CS, OP_SS, OP_DS,
OP_FS, OP_GS,
OP_ST0, OP_ST1, OP_ST2, OP_ST3,
OP_ST4, OP_ST5, OP_ST6, OP_ST7,
OP_J, OP_S, OP_O,
OP_I1, OP_I3, OP_sI,
OP_V, OP_W, OP_Q, OP_P,
OP_U, OP_N, OP_MU,
OP_R, OP_C, OP_D,
OP_MR
} UD_ATTR_PACKED;
/* operand size constants */
enum ud_operand_size {
SZ_NA = 0,
SZ_Z = 1,
SZ_V = 2,
SZ_RDQ = 7,
/* the following values are used as is,
* and thus hard-coded. changing them
* will break internals
*/
SZ_B = 8,
SZ_W = 16,
SZ_D = 32,
SZ_Q = 64,
SZ_T = 80,
SZ_O = 128,
SZ_Y = 17,
/*
* complex size types, that encode sizes for operands
* of type MR (memory or register), for internal use
* only. Id space 256 and above.
*/
SZ_BD = (SZ_B << 8) | SZ_D,
SZ_BV = (SZ_B << 8) | SZ_V,
SZ_WD = (SZ_W << 8) | SZ_D,
SZ_WV = (SZ_W << 8) | SZ_V,
SZ_WY = (SZ_W << 8) | SZ_Y,
SZ_DY = (SZ_D << 8) | SZ_Y,
SZ_WO = (SZ_W << 8) | SZ_O,
SZ_DO = (SZ_D << 8) | SZ_O,
SZ_QO = (SZ_Q << 8) | SZ_O,
} UD_ATTR_PACKED;
/* resolve complex size type.
*/
static inline enum ud_operand_size
Mx_mem_size(int size)
{
return (enum ud_operand_size) ((size >> 8) & 0xff);
}
static inline enum ud_operand_size
Mx_reg_size(int size)
{
return (enum ud_operand_size) (size & 0xff);
}
/* A single operand of an entry in the instruction table.
* (internal use only)
*/
struct ud_itab_entry_operand
{
enum ud_operand_code type;
enum ud_operand_size size;
};
/* A single entry in an instruction table.
*(internal use only)
*/
struct ud_itab_entry
{
enum ud_mnemonic_code mnemonic;
struct ud_itab_entry_operand operand1;
struct ud_itab_entry_operand operand2;
struct ud_itab_entry_operand operand3;
uint32_t prefix;
};
struct ud_lookup_table_list_entry {
const uint16_t *table;
enum ud_table_type type;
const char *meta;
};
static inline int
ud_opcode_field_sext(uint8_t primary_opcode)
{
return (primary_opcode & 0x02) != 0;
}
extern struct ud_itab_entry ud_itab[];
extern struct ud_lookup_table_list_entry ud_lookup_table_list[];
#endif /* UD_DECODE_H */
/* vim:cindent
* vim:expandtab
* vim:ts=4
* vim:sw=4
*/

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -0,0 +1,676 @@
#ifndef UD_ITAB_H
#define UD_ITAB_H
/* itab.h -- generated by udis86:scripts/ud_itab.py, do no edit */
/* ud_table_type -- lookup table types (see decode.c) */
enum ud_table_type {
UD_TAB__OPC_TABLE,
UD_TAB__OPC_X87,
UD_TAB__OPC_MOD,
UD_TAB__OPC_VEX_M,
UD_TAB__OPC_VEX_P,
UD_TAB__OPC_RM,
UD_TAB__OPC_VENDOR,
UD_TAB__OPC_OSIZE,
UD_TAB__OPC_MODE,
UD_TAB__OPC_3DNOW,
UD_TAB__OPC_REG,
UD_TAB__OPC_ASIZE,
UD_TAB__OPC_SSE
};
/* ud_mnemonic -- mnemonic constants */
enum ud_mnemonic_code {
UD_Iinvalid,
UD_I3dnow,
UD_Inone,
UD_Idb,
UD_Ipause,
UD_Iaaa,
UD_Iaad,
UD_Iaam,
UD_Iaas,
UD_Iadc,
UD_Iadd,
UD_Iaddpd,
UD_Iaddps,
UD_Iaddsd,
UD_Iaddss,
UD_Iand,
UD_Iandpd,
UD_Iandps,
UD_Iandnpd,
UD_Iandnps,
UD_Iarpl,
UD_Imovsxd,
UD_Ibound,
UD_Ibsf,
UD_Ibsr,
UD_Ibswap,
UD_Ibt,
UD_Ibtc,
UD_Ibtr,
UD_Ibts,
UD_Icall,
UD_Icbw,
UD_Icwde,
UD_Icdqe,
UD_Iclc,
UD_Icld,
UD_Iclflush,
UD_Iclgi,
UD_Icli,
UD_Iclts,
UD_Icmc,
UD_Icmovo,
UD_Icmovno,
UD_Icmovb,
UD_Icmovae,
UD_Icmovz,
UD_Icmovnz,
UD_Icmovbe,
UD_Icmova,
UD_Icmovs,
UD_Icmovns,
UD_Icmovp,
UD_Icmovnp,
UD_Icmovl,
UD_Icmovge,
UD_Icmovle,
UD_Icmovg,
UD_Icmp,
UD_Icmppd,
UD_Icmpps,
UD_Icmpsb,
UD_Icmpsw,
UD_Icmpsd,
UD_Icmpsq,
UD_Icmpss,
UD_Icmpxchg,
UD_Icmpxchg8b,
UD_Icmpxchg16b,
UD_Icomisd,
UD_Icomiss,
UD_Icpuid,
UD_Icvtdq2pd,
UD_Icvtdq2ps,
UD_Icvtpd2dq,
UD_Icvtpd2pi,
UD_Icvtpd2ps,
UD_Icvtpi2ps,
UD_Icvtpi2pd,
UD_Icvtps2dq,
UD_Icvtps2pi,
UD_Icvtps2pd,
UD_Icvtsd2si,
UD_Icvtsd2ss,
UD_Icvtsi2ss,
UD_Icvtss2si,
UD_Icvtss2sd,
UD_Icvttpd2pi,
UD_Icvttpd2dq,
UD_Icvttps2dq,
UD_Icvttps2pi,
UD_Icvttsd2si,
UD_Icvtsi2sd,
UD_Icvttss2si,
UD_Icwd,
UD_Icdq,
UD_Icqo,
UD_Idaa,
UD_Idas,
UD_Idec,
UD_Idiv,
UD_Idivpd,
UD_Idivps,
UD_Idivsd,
UD_Idivss,
UD_Iemms,
UD_Ienter,
UD_If2xm1,
UD_Ifabs,
UD_Ifadd,
UD_Ifaddp,
UD_Ifbld,
UD_Ifbstp,
UD_Ifchs,
UD_Ifclex,
UD_Ifcmovb,
UD_Ifcmove,
UD_Ifcmovbe,
UD_Ifcmovu,
UD_Ifcmovnb,
UD_Ifcmovne,
UD_Ifcmovnbe,
UD_Ifcmovnu,
UD_Ifucomi,
UD_Ifcom,
UD_Ifcom2,
UD_Ifcomp3,
UD_Ifcomi,
UD_Ifucomip,
UD_Ifcomip,
UD_Ifcomp,
UD_Ifcomp5,
UD_Ifcompp,
UD_Ifcos,
UD_Ifdecstp,
UD_Ifdiv,
UD_Ifdivp,
UD_Ifdivr,
UD_Ifdivrp,
UD_Ifemms,
UD_Iffree,
UD_Iffreep,
UD_Ificom,
UD_Ificomp,
UD_Ifild,
UD_Ifincstp,
UD_Ifninit,
UD_Ifiadd,
UD_Ifidivr,
UD_Ifidiv,
UD_Ifisub,
UD_Ifisubr,
UD_Ifist,
UD_Ifistp,
UD_Ifisttp,
UD_Ifld,
UD_Ifld1,
UD_Ifldl2t,
UD_Ifldl2e,
UD_Ifldpi,
UD_Ifldlg2,
UD_Ifldln2,
UD_Ifldz,
UD_Ifldcw,
UD_Ifldenv,
UD_Ifmul,
UD_Ifmulp,
UD_Ifimul,
UD_Ifnop,
UD_Ifpatan,
UD_Ifprem,
UD_Ifprem1,
UD_Ifptan,
UD_Ifrndint,
UD_Ifrstor,
UD_Ifnsave,
UD_Ifscale,
UD_Ifsin,
UD_Ifsincos,
UD_Ifsqrt,
UD_Ifstp,
UD_Ifstp1,
UD_Ifstp8,
UD_Ifstp9,
UD_Ifst,
UD_Ifnstcw,
UD_Ifnstenv,
UD_Ifnstsw,
UD_Ifsub,
UD_Ifsubp,
UD_Ifsubr,
UD_Ifsubrp,
UD_Iftst,
UD_Ifucom,
UD_Ifucomp,
UD_Ifucompp,
UD_Ifxam,
UD_Ifxch,
UD_Ifxch4,
UD_Ifxch7,
UD_Ifxrstor,
UD_Ifxsave,
UD_Ifxtract,
UD_Ifyl2x,
UD_Ifyl2xp1,
UD_Ihlt,
UD_Iidiv,
UD_Iin,
UD_Iimul,
UD_Iinc,
UD_Iinsb,
UD_Iinsw,
UD_Iinsd,
UD_Iint1,
UD_Iint3,
UD_Iint,
UD_Iinto,
UD_Iinvd,
UD_Iinvept,
UD_Iinvlpg,
UD_Iinvlpga,
UD_Iinvvpid,
UD_Iiretw,
UD_Iiretd,
UD_Iiretq,
UD_Ijo,
UD_Ijno,
UD_Ijb,
UD_Ijae,
UD_Ijz,
UD_Ijnz,
UD_Ijbe,
UD_Ija,
UD_Ijs,
UD_Ijns,
UD_Ijp,
UD_Ijnp,
UD_Ijl,
UD_Ijge,
UD_Ijle,
UD_Ijg,
UD_Ijcxz,
UD_Ijecxz,
UD_Ijrcxz,
UD_Ijmp,
UD_Ilahf,
UD_Ilar,
UD_Ilddqu,
UD_Ildmxcsr,
UD_Ilds,
UD_Ilea,
UD_Iles,
UD_Ilfs,
UD_Ilgs,
UD_Ilidt,
UD_Ilss,
UD_Ileave,
UD_Ilfence,
UD_Ilgdt,
UD_Illdt,
UD_Ilmsw,
UD_Ilock,
UD_Ilodsb,
UD_Ilodsw,
UD_Ilodsd,
UD_Ilodsq,
UD_Iloopne,
UD_Iloope,
UD_Iloop,
UD_Ilsl,
UD_Iltr,
UD_Imaskmovq,
UD_Imaxpd,
UD_Imaxps,
UD_Imaxsd,
UD_Imaxss,
UD_Imfence,
UD_Iminpd,
UD_Iminps,
UD_Iminsd,
UD_Iminss,
UD_Imonitor,
UD_Imontmul,
UD_Imov,
UD_Imovapd,
UD_Imovaps,
UD_Imovd,
UD_Imovhpd,
UD_Imovhps,
UD_Imovlhps,
UD_Imovlpd,
UD_Imovlps,
UD_Imovhlps,
UD_Imovmskpd,
UD_Imovmskps,
UD_Imovntdq,
UD_Imovnti,
UD_Imovntpd,
UD_Imovntps,
UD_Imovntq,
UD_Imovq,
UD_Imovsb,
UD_Imovsw,
UD_Imovsd,
UD_Imovsq,
UD_Imovss,
UD_Imovsx,
UD_Imovupd,
UD_Imovups,
UD_Imovzx,
UD_Imul,
UD_Imulpd,
UD_Imulps,
UD_Imulsd,
UD_Imulss,
UD_Imwait,
UD_Ineg,
UD_Inop,
UD_Inot,
UD_Ior,
UD_Iorpd,
UD_Iorps,
UD_Iout,
UD_Ioutsb,
UD_Ioutsw,
UD_Ioutsd,
UD_Ipacksswb,
UD_Ipackssdw,
UD_Ipackuswb,
UD_Ipaddb,
UD_Ipaddw,
UD_Ipaddd,
UD_Ipaddsb,
UD_Ipaddsw,
UD_Ipaddusb,
UD_Ipaddusw,
UD_Ipand,
UD_Ipandn,
UD_Ipavgb,
UD_Ipavgw,
UD_Ipcmpeqb,
UD_Ipcmpeqw,
UD_Ipcmpeqd,
UD_Ipcmpgtb,
UD_Ipcmpgtw,
UD_Ipcmpgtd,
UD_Ipextrb,
UD_Ipextrd,
UD_Ipextrq,
UD_Ipextrw,
UD_Ipinsrb,
UD_Ipinsrw,
UD_Ipinsrd,
UD_Ipinsrq,
UD_Ipmaddwd,
UD_Ipmaxsw,
UD_Ipmaxub,
UD_Ipminsw,
UD_Ipminub,
UD_Ipmovmskb,
UD_Ipmulhuw,
UD_Ipmulhw,
UD_Ipmullw,
UD_Ipop,
UD_Ipopa,
UD_Ipopad,
UD_Ipopfw,
UD_Ipopfd,
UD_Ipopfq,
UD_Ipor,
UD_Iprefetch,
UD_Iprefetchnta,
UD_Iprefetcht0,
UD_Iprefetcht1,
UD_Iprefetcht2,
UD_Ipsadbw,
UD_Ipshufw,
UD_Ipsllw,
UD_Ipslld,
UD_Ipsllq,
UD_Ipsraw,
UD_Ipsrad,
UD_Ipsrlw,
UD_Ipsrld,
UD_Ipsrlq,
UD_Ipsubb,
UD_Ipsubw,
UD_Ipsubd,
UD_Ipsubsb,
UD_Ipsubsw,
UD_Ipsubusb,
UD_Ipsubusw,
UD_Ipunpckhbw,
UD_Ipunpckhwd,
UD_Ipunpckhdq,
UD_Ipunpcklbw,
UD_Ipunpcklwd,
UD_Ipunpckldq,
UD_Ipi2fw,
UD_Ipi2fd,
UD_Ipf2iw,
UD_Ipf2id,
UD_Ipfnacc,
UD_Ipfpnacc,
UD_Ipfcmpge,
UD_Ipfmin,
UD_Ipfrcp,
UD_Ipfrsqrt,
UD_Ipfsub,
UD_Ipfadd,
UD_Ipfcmpgt,
UD_Ipfmax,
UD_Ipfrcpit1,
UD_Ipfrsqit1,
UD_Ipfsubr,
UD_Ipfacc,
UD_Ipfcmpeq,
UD_Ipfmul,
UD_Ipfrcpit2,
UD_Ipmulhrw,
UD_Ipswapd,
UD_Ipavgusb,
UD_Ipush,
UD_Ipusha,
UD_Ipushad,
UD_Ipushfw,
UD_Ipushfd,
UD_Ipushfq,
UD_Ipxor,
UD_Ircl,
UD_Ircr,
UD_Irol,
UD_Iror,
UD_Ircpps,
UD_Ircpss,
UD_Irdmsr,
UD_Irdpmc,
UD_Irdtsc,
UD_Irdtscp,
UD_Irepne,
UD_Irep,
UD_Iret,
UD_Iretf,
UD_Irsm,
UD_Irsqrtps,
UD_Irsqrtss,
UD_Isahf,
UD_Isalc,
UD_Isar,
UD_Ishl,
UD_Ishr,
UD_Isbb,
UD_Iscasb,
UD_Iscasw,
UD_Iscasd,
UD_Iscasq,
UD_Iseto,
UD_Isetno,
UD_Isetb,
UD_Isetae,
UD_Isetz,
UD_Isetnz,
UD_Isetbe,
UD_Iseta,
UD_Isets,
UD_Isetns,
UD_Isetp,
UD_Isetnp,
UD_Isetl,
UD_Isetge,
UD_Isetle,
UD_Isetg,
UD_Isfence,
UD_Isgdt,
UD_Ishld,
UD_Ishrd,
UD_Ishufpd,
UD_Ishufps,
UD_Isidt,
UD_Isldt,
UD_Ismsw,
UD_Isqrtps,
UD_Isqrtpd,
UD_Isqrtsd,
UD_Isqrtss,
UD_Istc,
UD_Istd,
UD_Istgi,
UD_Isti,
UD_Iskinit,
UD_Istmxcsr,
UD_Istosb,
UD_Istosw,
UD_Istosd,
UD_Istosq,
UD_Istr,
UD_Isub,
UD_Isubpd,
UD_Isubps,
UD_Isubsd,
UD_Isubss,
UD_Iswapgs,
UD_Isyscall,
UD_Isysenter,
UD_Isysexit,
UD_Isysret,
UD_Itest,
UD_Iucomisd,
UD_Iucomiss,
UD_Iud2,
UD_Iunpckhpd,
UD_Iunpckhps,
UD_Iunpcklps,
UD_Iunpcklpd,
UD_Iverr,
UD_Iverw,
UD_Ivmcall,
UD_Ivmclear,
UD_Ivmxon,
UD_Ivmptrld,
UD_Ivmptrst,
UD_Ivmlaunch,
UD_Ivmresume,
UD_Ivmxoff,
UD_Ivmread,
UD_Ivmwrite,
UD_Ivmrun,
UD_Ivmmcall,
UD_Ivmload,
UD_Ivmsave,
UD_Iwait,
UD_Iwbinvd,
UD_Iwrmsr,
UD_Ixadd,
UD_Ixchg,
UD_Ixgetbv,
UD_Ixlatb,
UD_Ixor,
UD_Ixorpd,
UD_Ixorps,
UD_Ixcryptecb,
UD_Ixcryptcbc,
UD_Ixcryptctr,
UD_Ixcryptcfb,
UD_Ixcryptofb,
UD_Ixrstor,
UD_Ixsave,
UD_Ixsetbv,
UD_Ixsha1,
UD_Ixsha256,
UD_Ixstore,
UD_Iaesdec,
UD_Iaesdeclast,
UD_Iaesenc,
UD_Iaesenclast,
UD_Iaesimc,
UD_Iaeskeygenassist,
UD_Ipclmulqdq,
UD_Igetsec,
UD_Imovdqa,
UD_Imaskmovdqu,
UD_Imovdq2q,
UD_Imovdqu,
UD_Imovq2dq,
UD_Ipaddq,
UD_Ipsubq,
UD_Ipmuludq,
UD_Ipshufhw,
UD_Ipshuflw,
UD_Ipshufd,
UD_Ipslldq,
UD_Ipsrldq,
UD_Ipunpckhqdq,
UD_Ipunpcklqdq,
UD_Iaddsubpd,
UD_Iaddsubps,
UD_Ihaddpd,
UD_Ihaddps,
UD_Ihsubpd,
UD_Ihsubps,
UD_Imovddup,
UD_Imovshdup,
UD_Imovsldup,
UD_Ipabsb,
UD_Ipabsw,
UD_Ipabsd,
UD_Ipshufb,
UD_Iphaddw,
UD_Iphaddd,
UD_Iphaddsw,
UD_Ipmaddubsw,
UD_Iphsubw,
UD_Iphsubd,
UD_Iphsubsw,
UD_Ipsignb,
UD_Ipsignd,
UD_Ipsignw,
UD_Ipmulhrsw,
UD_Ipalignr,
UD_Ipblendvb,
UD_Ipmuldq,
UD_Ipminsb,
UD_Ipminsd,
UD_Ipminuw,
UD_Ipminud,
UD_Ipmaxsb,
UD_Ipmaxsd,
UD_Ipmaxud,
UD_Ipmaxuw,
UD_Ipmulld,
UD_Iphminposuw,
UD_Iroundps,
UD_Iroundpd,
UD_Iroundss,
UD_Iroundsd,
UD_Iblendpd,
UD_Ipblendw,
UD_Iblendps,
UD_Iblendvpd,
UD_Iblendvps,
UD_Idpps,
UD_Idppd,
UD_Impsadbw,
UD_Iextractps,
UD_Iinsertps,
UD_Imovntdqa,
UD_Ipackusdw,
UD_Ipmovsxbw,
UD_Ipmovsxbd,
UD_Ipmovsxbq,
UD_Ipmovsxwd,
UD_Ipmovsxwq,
UD_Ipmovsxdq,
UD_Ipmovzxbw,
UD_Ipmovzxbd,
UD_Ipmovzxbq,
UD_Ipmovzxwd,
UD_Ipmovzxwq,
UD_Ipmovzxdq,
UD_Ipcmpeqq,
UD_Ipopcnt,
UD_Iptest,
UD_Ipcmpestri,
UD_Ipcmpestrm,
UD_Ipcmpgtq,
UD_Ipcmpistri,
UD_Ipcmpistrm,
UD_Imovbe,
UD_Icrc32,
UD_MAX_MNEMONIC_CODE
} UD_ATTR_PACKED;
#endif /* UD_ITAB_H */

Просмотреть файл

@ -0,0 +1,202 @@
/* udis86 - libudis86/types.h
*
* Copyright (c) 2002-2013 Vivek Thampi
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
* ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef UD_TYPES_H
#define UD_TYPES_H
/* gcc specific extensions */
#ifdef __GNUC__
# define UD_ATTR_PACKED __attribute__((packed))
#else
# define UD_ATTR_PACKED
#endif /* UD_ATTR_PACKED */
/* -----------------------------------------------------------------------------
* All possible "types" of objects in udis86. Order is Important!
* -----------------------------------------------------------------------------
*/
enum ud_type
{
UD_NONE,
/* 8 bit GPRs */
UD_R_AL, UD_R_CL, UD_R_DL, UD_R_BL,
UD_R_AH, UD_R_CH, UD_R_DH, UD_R_BH,
UD_R_SPL, UD_R_BPL, UD_R_SIL, UD_R_DIL,
UD_R_R8B, UD_R_R9B, UD_R_R10B, UD_R_R11B,
UD_R_R12B, UD_R_R13B, UD_R_R14B, UD_R_R15B,
/* 16 bit GPRs */
UD_R_AX, UD_R_CX, UD_R_DX, UD_R_BX,
UD_R_SP, UD_R_BP, UD_R_SI, UD_R_DI,
UD_R_R8W, UD_R_R9W, UD_R_R10W, UD_R_R11W,
UD_R_R12W, UD_R_R13W, UD_R_R14W, UD_R_R15W,
/* 32 bit GPRs */
UD_R_EAX, UD_R_ECX, UD_R_EDX, UD_R_EBX,
UD_R_ESP, UD_R_EBP, UD_R_ESI, UD_R_EDI,
UD_R_R8D, UD_R_R9D, UD_R_R10D, UD_R_R11D,
UD_R_R12D, UD_R_R13D, UD_R_R14D, UD_R_R15D,
/* 64 bit GPRs */
UD_R_RAX, UD_R_RCX, UD_R_RDX, UD_R_RBX,
UD_R_RSP, UD_R_RBP, UD_R_RSI, UD_R_RDI,
UD_R_R8, UD_R_R9, UD_R_R10, UD_R_R11,
UD_R_R12, UD_R_R13, UD_R_R14, UD_R_R15,
/* segment registers */
UD_R_ES, UD_R_CS, UD_R_SS, UD_R_DS,
UD_R_FS, UD_R_GS,
/* control registers*/
UD_R_CR0, UD_R_CR1, UD_R_CR2, UD_R_CR3,
UD_R_CR4, UD_R_CR5, UD_R_CR6, UD_R_CR7,
UD_R_CR8, UD_R_CR9, UD_R_CR10, UD_R_CR11,
UD_R_CR12, UD_R_CR13, UD_R_CR14, UD_R_CR15,
/* debug registers */
UD_R_DR0, UD_R_DR1, UD_R_DR2, UD_R_DR3,
UD_R_DR4, UD_R_DR5, UD_R_DR6, UD_R_DR7,
UD_R_DR8, UD_R_DR9, UD_R_DR10, UD_R_DR11,
UD_R_DR12, UD_R_DR13, UD_R_DR14, UD_R_DR15,
/* mmx registers */
UD_R_MM0, UD_R_MM1, UD_R_MM2, UD_R_MM3,
UD_R_MM4, UD_R_MM5, UD_R_MM6, UD_R_MM7,
/* x87 registers */
UD_R_ST0, UD_R_ST1, UD_R_ST2, UD_R_ST3,
UD_R_ST4, UD_R_ST5, UD_R_ST6, UD_R_ST7,
/* extended multimedia registers */
UD_R_XMM0, UD_R_XMM1, UD_R_XMM2, UD_R_XMM3,
UD_R_XMM4, UD_R_XMM5, UD_R_XMM6, UD_R_XMM7,
UD_R_XMM8, UD_R_XMM9, UD_R_XMM10, UD_R_XMM11,
UD_R_XMM12, UD_R_XMM13, UD_R_XMM14, UD_R_XMM15,
UD_R_RIP,
/* Operand Types */
UD_OP_REG, UD_OP_MEM, UD_OP_PTR, UD_OP_IMM,
UD_OP_JIMM, UD_OP_CONST
};
#include "itab.h"
union ud_lval {
int8_t sbyte;
uint8_t ubyte;
int16_t sword;
uint16_t uword;
int32_t sdword;
uint32_t udword;
int64_t sqword;
uint64_t uqword;
struct {
uint16_t seg;
uint32_t off;
} ptr;
};
/* -----------------------------------------------------------------------------
* struct ud_operand - Disassembled instruction Operand.
* -----------------------------------------------------------------------------
*/
struct ud_operand {
enum ud_type type;
uint8_t size;
uint8_t base;
uint8_t index;
uint8_t scale;
uint8_t offset;
union ud_lval lval;
/*
* internal use only
*/
uint64_t _legacy; /* this will be removed in 1.8 */
uint8_t _oprcode;
};
/* -----------------------------------------------------------------------------
* struct ud - The udis86 object.
* -----------------------------------------------------------------------------
*/
struct ud
{
/*
* input buffering
*/
const uint8_t* inp_buf;
size_t inp_buf_size;
size_t inp_buf_index;
uint8_t inp_curr;
size_t inp_ctr;
uint8_t inp_sess[64];
int inp_end;
uint8_t dis_mode;
enum ud_mnemonic_code mnemonic;
struct ud_operand operand[3];
uint8_t error;
uint8_t pfx_rex;
uint8_t pfx_seg;
uint8_t pfx_opr;
uint8_t pfx_adr;
uint8_t pfx_lock;
uint8_t pfx_str;
uint8_t pfx_rep;
uint8_t pfx_repe;
uint8_t pfx_repne;
uint8_t opr_mode;
uint8_t adr_mode;
uint8_t br_far;
uint8_t br_near;
uint8_t have_modrm;
uint8_t modrm;
uint8_t primary_opcode;
void * user_opaque_data;
struct ud_itab_entry * itab_entry;
struct ud_lookup_table_list_entry *le;
};
/* -----------------------------------------------------------------------------
* Type-definitions
* -----------------------------------------------------------------------------
*/
typedef enum ud_type ud_type_t;
typedef enum ud_mnemonic_code ud_mnemonic_code_t;
typedef struct ud ud_t;
typedef struct ud_operand ud_operand_t;
#define UD_EOI (-1)
#define UD_INP_CACHE_SZ 32
#endif
/*
vim: set ts=2 sw=2 expandtab
*/

Просмотреть файл

@ -0,0 +1,49 @@
/* udis86 - libudis86/udint.h -- definitions for internal use only
*
* Copyright (c) 2002-2009 Vivek Thampi
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
* ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _UDINT_H_
#define _UDINT_H_
#define UD_ASSERT(_x) MOZ_ASSERT(_x)
#define UDERR(u, m) \
do { \
(u)->error = 1; \
} while (0)
#define UD_RETURN_ON_ERROR(u) \
do { \
if ((u)->error != 0) { \
return (u)->error; \
} \
} while (0)
#define UD_RETURN_WITH_ERROR(u, m) \
do { \
UDERR(u, m); \
return (u)->error; \
} while (0)
#endif /* _UDINT_H_ */

Просмотреть файл

@ -0,0 +1,131 @@
/* udis86 - libudis86/udis86.c
*
* Copyright (c) 2002-2013 Vivek Thampi
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
* ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "udint.h"
#include "decode.h"
static void ud_inp_init(struct ud *u);
/* =============================================================================
* ud_set_mode() - Set Disassemly Mode.
* =============================================================================
*/
extern void
ud_set_mode(struct ud* u, uint8_t m)
{
switch(m) {
case 16:
case 32:
case 64: u->dis_mode = m ; return;
default: u->dis_mode = 16; return;
}
}
/* =============================================================================
* ud_init
* Initializes ud_t object.
* =============================================================================
*/
extern void
ud_init(struct ud* u)
{
memset((void*)u, 0, sizeof(struct ud));
ud_set_mode(u, 16);
u->mnemonic = UD_Iinvalid;
}
/* =============================================================================
* ud_insn_get_opr
* Return the operand struct representing the nth operand of
* the currently disassembled instruction. Returns NULL if
* there's no such operand.
* =============================================================================
*/
const struct ud_operand*
ud_insn_opr(const struct ud *u, unsigned int n)
{
if (n > 2 || u->operand[n].type == UD_NONE) {
return NULL;
} else {
return &u->operand[n];
}
}
/* =============================================================================
* ud_insn_mnemonic
* Return the current instruction mnemonic.
* =============================================================================
*/
enum ud_mnemonic_code
ud_insn_mnemonic(const struct ud *u)
{
return u->mnemonic;
}
/*
* ud_inp_init
* Initializes the input system.
*/
static void
ud_inp_init(struct ud *u)
{
u->inp_buf = NULL;
u->inp_buf_size = 0;
u->inp_buf_index = 0;
u->inp_curr = 0;
u->inp_ctr = 0;
u->inp_end = 0;
}
/* =============================================================================
* ud_inp_set_buffer
* Set buffer as input.
* =============================================================================
*/
void
ud_set_input_buffer(struct ud* u, const uint8_t* buf, size_t len)
{
ud_inp_init(u);
u->inp_buf = buf;
u->inp_buf_size = len;
u->inp_buf_index = 0;
}
/* vim:set ts=2 sw=2 expandtab */

Просмотреть файл

@ -951,7 +951,7 @@ nsXULAppInfo::EnsureContentProcess()
return NS_ERROR_NOT_AVAILABLE;
RefPtr<ContentParent> unused = ContentParent::GetNewOrUsedBrowserProcess(
NS_LITERAL_STRING(DEFAULT_REMOTE_TYPE));
nullptr, NS_LITERAL_STRING(DEFAULT_REMOTE_TYPE));
return NS_OK;
}

Просмотреть файл

@ -64,6 +64,8 @@
#include "mozilla/ipc/GeckoChildProcessHost.h"
#include "mozilla/ipc/IOThreadChild.h"
#include "mozilla/ipc/ProcessChild.h"
#include "mozilla/recordreplay/ChildIPC.h"
#include "mozilla/recordreplay/ParentIPC.h"
#include "ScopedXREEmbed.h"
#include "mozilla/plugins/PluginProcessChild.h"
@ -361,6 +363,8 @@ XRE_InitChildProcess(int aArgc,
NS_ENSURE_ARG_POINTER(aArgv[0]);
MOZ_ASSERT(aChildData);
recordreplay::Initialize(aArgc, aArgv);
#ifdef MOZ_ASAN_REPORTER
// In ASan reporter builds, we need to set ASan's log_path as early as
// possible, so it dumps its errors into files there instead of using
@ -444,6 +448,9 @@ XRE_InitChildProcess(int aArgc,
return NS_ERROR_FAILURE;
const char* const mach_port_name = aArgv[--aArgc];
Maybe<recordreplay::AutoPassThroughThreadEvents> pt;
pt.emplace();
const int kTimeoutMs = 1000;
MachSendMessage child_message(0);
@ -510,6 +517,7 @@ XRE_InitChildProcess(int aArgc,
return NS_ERROR_FAILURE;
}
pt.reset();
#endif
SetupErrorHandling(aArgv[0]);
@ -619,6 +627,9 @@ XRE_InitChildProcess(int aArgc,
}
}
// While replaying, use the parent PID that existed while recording.
parentPID = recordreplay::RecordReplayValue(parentPID);
#ifdef XP_MACOSX
mozilla::ipc::SharedMemoryBasic::SetupMachMemory(parentPID, ports_in_receiver, ports_in_sender,
ports_out_sender, ports_out_receiver, true);
@ -665,6 +676,12 @@ XRE_InitChildProcess(int aArgc,
break;
}
// If we are recording or replaying, initialize state and update arguments
// according to those which were captured by the MiddlemanProcessChild in the
// middleman process. No argument manipulation should happen between this
// call and the point where the process child is initialized.
recordreplay::child::InitRecordingOrReplayingProcess(&aArgc, &aArgv);
{
// This is a lexical scope for the MessageLoop below. We want it
// to go out of scope before NS_LogTerm() so that we don't get
@ -962,7 +979,7 @@ TestShellParent* GetOrCreateTestShellParent()
// processes.
RefPtr<ContentParent> parent =
ContentParent::GetNewOrUsedBrowserProcess(
NS_LITERAL_STRING(DEFAULT_REMOTE_TYPE));
nullptr, NS_LITERAL_STRING(DEFAULT_REMOTE_TYPE));
parent.forget(&gContentParent);
} else if (!gContentParent->IsAlive()) {
return nullptr;

Просмотреть файл

@ -3138,6 +3138,12 @@ locked_profiler_stop(PSLockRef aLock)
// At the very start, clear RacyFeatures.
RacyFeatures::SetInactive();
#if defined(GP_OS_android)
if (ActivePS::FeatureJava(aLock)) {
java::GeckoJavaSampler::Stop();
}
#endif
#ifdef MOZ_TASK_TRACER
if (ActivePS::FeatureTaskTracer(aLock)) {
tasktracer::StopLogging();

Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше