Bug 1606447 - Initial landing for cloud replay, r=jlast.

Differential Revision: https://phabricator.services.mozilla.com/D58444

--HG--
rename : toolkit/recordreplay/MiddlemanCall.cpp => toolkit/recordreplay/ExternalCall.cpp
rename : toolkit/recordreplay/MiddlemanCall.h => toolkit/recordreplay/ExternalCall.h
rename : toolkit/recordreplay/File.cpp => toolkit/recordreplay/Recording.cpp
rename : toolkit/recordreplay/File.h => toolkit/recordreplay/Recording.h
extra : moz-landing-system : lando
This commit is contained in:
Brian Hackett 2020-01-03 20:43:08 +00:00
Родитель 3da02eb210
Коммит 6a51495175
55 изменённых файлов: 5102 добавлений и 6267 удалений

Просмотреть файл

@ -617,11 +617,14 @@ class WebReplayPlayer extends Component {
const atPausedLocation =
pausedMessage && sameLocation(pausedMessage, message);
const { source, line, column } = message.frame;
const filename = source.split("/").pop();
let frameLocation = `${filename}:${line}`;
if (column > 100) {
frameLocation += `:${column}`;
let frameLocation = "";
if (message.frame) {
const { source, line, column } = message.frame;
const filename = source.split("/").pop();
frameLocation = `${filename}:${line}`;
if (column > 100) {
frameLocation += `:${column}`;
}
}
return dom.a({

Просмотреть файл

@ -11,20 +11,21 @@ PromiseTestUtils.whitelistRejectionsGlobally(/Unknown source actor/);
// Test interaction of breakpoints with debugger statements.
add_task(async function() {
const dbg = await attachRecordingDebugger("doc_debugger_statements.html", {
skipInterrupt: true,
});
const dbg = await attachRecordingDebugger("doc_debugger_statements.html");
await resume(dbg);
invokeInTab("foo");
await waitForPaused(dbg);
const pauseLine = getVisibleSelectedFrameLine(dbg);
ok(pauseLine == 6, "Paused at first debugger statement");
ok(pauseLine == 7, "Paused at first debugger statement");
await addBreakpoint(dbg, "doc_debugger_statements.html", 7);
await resumeToLine(dbg, 7);
await addBreakpoint(dbg, "doc_debugger_statements.html", 8);
await resumeToLine(dbg, 8);
await resumeToLine(dbg, 9);
await dbg.actions.removeAllBreakpoints(getContext(dbg));
await rewindToLine(dbg, 6);
await resumeToLine(dbg, 8);
await rewindToLine(dbg, 7);
await resumeToLine(dbg, 9);
await shutdownDebugger(dbg);
});

Просмотреть файл

@ -3,8 +3,10 @@
<div id="maindiv" style="padding-top:50px">Hello World!</div>
</body>
<script>
debugger;
document.getElementById("maindiv").innerHTML = "Foobar!";
debugger;
function foo() {
debugger;
document.getElementById("maindiv").innerHTML = "Foobar!";
debugger;
}
</script>
</html>

Просмотреть файл

@ -14,7 +14,7 @@ function f() {
window.setTimeout(recordingFinished);
return;
}
window.setTimeout(f, 1);
window.setTimeout(f, 100);
}
function updateNumber() {
number++;
@ -31,6 +31,6 @@ function testStepping2() {
c++;
c--;
}
window.setTimeout(f, 1);
window.setTimeout(f, 100);
</script>
</html>

Просмотреть файл

@ -0,0 +1,123 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
/* eslint-disable spaced-comment, brace-style, indent-legacy, no-shadow */
"use strict";
// This worker is spawned in the parent process the first time a middleman
// connects to a cloud based replaying process, and manages the web sockets
// which are used to connect to those remote processes. This could be done on
// the main thread, but is handled here because main thread web sockets must
// be associated with particular windows, which is not the case for the
// scope which connection.js is created in.
self.addEventListener("message", function({ data }) {
const { type, id } = data;
switch (type) {
case "connect":
try {
doConnect(id, data.channelId, data.address);
} catch (e) {
dump(`doConnect error: ${e}\n`);
}
break;
case "send":
try {
doSend(id, data.buf);
} catch (e) {
dump(`doSend error: ${e}\n`);
}
break;
default:
ThrowError(`Unknown event type ${type}`);
}
});
const gConnections = [];
function doConnect(id, channelId, address) {
if (gConnections[id]) {
ThrowError(`Duplicate connection ID ${id}`);
}
const socket = new WebSocket(address);
socket.onopen = evt => onOpen(id, evt);
socket.onclose = evt => onClose(id, evt);
socket.onmessage = evt => onMessage(id, evt);
socket.onerror = evt => onError(id, evt);
const connection = { outgoing: [] };
const promise = new Promise(r => (connection.openWaiter = r));
gConnections[id] = connection;
(async function sendMessages() {
await promise;
while (gConnections[id]) {
if (connection.outgoing.length) {
const buf = connection.outgoing.shift();
try {
socket.send(buf);
} catch (e) {
ThrowError(`Send error ${e}`);
}
} else {
await new Promise(resolve => (connection.sendWaiter = resolve));
}
}
})();
}
function doSend(id, buf) {
const connection = gConnections[id];
connection.outgoing.push(buf);
if (connection.sendWaiter) {
connection.sendWaiter();
connection.sendWaiter = null;
}
}
function onOpen(id) {
// Messages can now be sent to the socket.
gConnections[id].openWaiter();
}
function onClose(id, evt) {
gConnections[id] = null;
}
// Message data must be sent to the main thread in the order it was received.
// This is a bit tricky with web socket messages as they return data blobs,
// and blob.arrayBuffer() returns a promise such that multiple promises could
// be resolved out of order. Make sure this doesn't happen by using a single
// async frame to resolve the promises and forward them in order.
const gMessages = [];
let gMessageWaiter = null;
(async function processMessages() {
while (true) {
if (gMessages.length) {
const { id, promise } = gMessages.shift();
const buf = await promise;
postMessage({ id, buf });
} else {
await new Promise(resolve => (gMessageWaiter = resolve));
}
}
})();
function onMessage(id, evt) {
gMessages.push({ id, promise: evt.data.arrayBuffer() });
if (gMessageWaiter) {
gMessageWaiter();
gMessageWaiter = null;
}
}
function onError(id, evt) {
ThrowError(`Socket error ${evt}`);
}
function ThrowError(msg) {
dump(`Connection Worker Error: ${msg}\n`);
throw new Error(msg);
}

Просмотреть файл

@ -0,0 +1,39 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
/* eslint-disable spaced-comment, brace-style, indent-legacy, no-shadow */
"use strict";
// This file provides an interface for connecting middleman processes with
// replaying processes living remotely in the cloud.
let gWorker;
let gCallback;
let gNextConnectionId = 1;
// eslint-disable-next-line no-unused-vars
function Initialize(callback) {
gWorker = new Worker("connection-worker.js");
gWorker.addEventListener("message", onMessage);
gCallback = callback;
}
function onMessage(evt) {
gCallback(evt.data.id, evt.data.buf);
}
// eslint-disable-next-line no-unused-vars
function Connect(channelId, address, callback) {
const id = gNextConnectionId++;
gWorker.postMessage({ type: "connect", id, channelId, address });
return id;
}
// eslint-disable-next-line no-unused-vars
function SendMessage(id, buf) {
gWorker.postMessage({ type: "send", id, buf });
}
// eslint-disable-next-line no-unused-vars
var EXPORTED_SYMBOLS = ["Initialize", "Connect", "SendMessage"];

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -9,6 +9,8 @@ DIRS += [
]
DevToolsModules(
'connection-worker.js',
'connection.js',
'control.js',
'debugger.js',
'graphics.js',
@ -19,6 +21,7 @@ DevToolsModules(
XPIDL_MODULE = 'devtools_rr'
XPIDL_SOURCES = [
'rrIConnection.idl',
'rrIControl.idl',
'rrIGraphics.idl',
'rrIReplay.idl',

Просмотреть файл

@ -8,11 +8,9 @@
// requests and other instructions from the middleman via the exported symbols
// defined at the end of this file.
//
// Like all other JavaScript in the recording/replaying process, this code's
// state is included in memory snapshots and reset when checkpoints are
// restored. In the process of handling the middleman's requests, however, its
// state may vary between recording and replaying, or between different
// replays. As a result, we have to be very careful about performing operations
// In the process of handling the middleman's requests, state in this file may
// vary between recording and replaying, or between different replays.
// As a result, we have to be very careful about performing operations
// that might interact with the recording --- any time we enter the debuggee
// and evaluate code or perform other operations.
// The divergeFromRecording function should be used at any point where such
@ -45,7 +43,6 @@ const {
CSSRule,
pointPrecedes,
pointEquals,
pointArrayIncludes,
findClosestPoint,
} = sandbox;
@ -173,36 +170,16 @@ function isNonNullObject(obj) {
return obj && (typeof obj == "object" || typeof obj == "function");
}
function getMemoryUsage() {
const memoryKinds = {
Generic: [1],
Snapshots: [2, 3, 4, 5, 6, 7],
ScriptHits: [8],
};
const rv = {};
for (const [name, kinds] of Object.entries(memoryKinds)) {
let total = 0;
kinds.forEach(kind => {
total += RecordReplayControl.memoryUsage(kind);
});
rv[name] = total;
}
return rv;
}
///////////////////////////////////////////////////////////////////////////////
// Persistent Script State
///////////////////////////////////////////////////////////////////////////////
// Association between Debugger.Scripts and their IDs. The indices that this
// table assigns to scripts are stable across the entire recording, even though
// this table (like all JS state) is included in snapshots, rolled back when
// rewinding, and so forth. In debuggee time, this table only grows (there is
// no way to remove entries). Scripts created for debugger activity (e.g. eval)
// are ignored, and off thread compilation is disabled, so this table acquires
// the same scripts in the same order as we roll back and run forward in the
// recording.
// table assigns to scripts are stable across the entire recording. In debuggee
// time, this table only grows (there is no way to remove entries).
// Scripts created for debugger activity (e.g. eval) are ignored, and off thread
// compilation is disabled, so this table acquires the same scripts in the same
// order as we roll back and run forward in the recording.
const gScripts = new IdMap();
// Any scripts added since the last checkpoint.
@ -957,16 +934,11 @@ const gNewDebuggerStatements = [];
// Whether to pause on debugger statements when running forward.
let gPauseOnDebuggerStatement = false;
function ensureRunToPointPositionHandlers({ endpoint, snapshotPoints }) {
function ensureRunToPointPositionHandlers({ endpoint }) {
if (gLastCheckpoint == endpoint.checkpoint) {
assert(endpoint.position);
ensurePositionHandler(endpoint.position);
}
snapshotPoints.forEach(snapshot => {
if (gLastCheckpoint == snapshot.checkpoint && snapshot.position) {
ensurePositionHandler(snapshot.position);
}
});
}
// Handlers that run when a manifest is first received. This must be specified
@ -980,9 +952,10 @@ const gManifestStartHandlers = {
dbg.onDebuggerStatement = debuggerStatementHit;
},
restoreSnapshot({ numSnapshots }) {
RecordReplayControl.restoreSnapshot(numSnapshots);
throwError("Unreachable!");
fork({ id }) {
const point = currentScriptedExecutionPoint() || currentExecutionPoint();
RecordReplayControl.fork(id);
RecordReplayControl.manifestFinished({ point });
},
runToPoint(manifest) {
@ -1117,7 +1090,7 @@ function currentExecutionPoint(position) {
function currentScriptedExecutionPoint() {
const numFrames = countScriptFrames();
if (!numFrames) {
return null;
return undefined;
}
const index = numFrames - 1;
@ -1152,9 +1125,6 @@ const gManifestFinishedAfterCheckpointHandlers = {
// The primordial manifest runs forward to the first checkpoint, saves it,
// and then finishes.
assert(point.checkpoint == FirstCheckpointId);
if (!newSnapshot(point)) {
return;
}
RecordReplayControl.manifestFinished({ point });
},
@ -1163,29 +1133,22 @@ const gManifestFinishedAfterCheckpointHandlers = {
finishResume(point);
},
runToPoint({ endpoint, snapshotPoints }, point) {
runToPoint({ endpoint, flushExternalCalls }, point) {
assert(endpoint.checkpoint >= point.checkpoint);
if (pointArrayIncludes(snapshotPoints, point) && !newSnapshot(point)) {
return;
}
if (!endpoint.position && point.checkpoint == endpoint.checkpoint) {
if (flushExternalCalls) {
RecordReplayControl.flushExternalCalls();
}
RecordReplayControl.manifestFinished({ point });
}
},
scanRecording({ endpoint, snapshotPoints }, point) {
scanRecording({ endpoint }, point) {
stopScanningAllScripts();
if (pointArrayIncludes(snapshotPoints, point) && !newSnapshot(point)) {
return;
}
if (point.checkpoint == endpoint.checkpoint) {
const duration =
RecordReplayControl.currentExecutionTime() - gManifestStartTime;
RecordReplayControl.manifestFinished({
point,
duration,
memoryUsage: getMemoryUsage(),
});
RecordReplayControl.manifestFinished({ point, duration });
}
},
};
@ -1205,7 +1168,7 @@ const gManifestPrepareAfterCheckpointHandlers = {
},
};
function processManifestAfterCheckpoint(point, restoredSnapshot) {
function processManifestAfterCheckpoint(point) {
if (gManifestFinishedAfterCheckpointHandlers[gManifest.kind]) {
gManifestFinishedAfterCheckpointHandlers[gManifest.kind](gManifest, point);
}
@ -1244,15 +1207,10 @@ const gManifestPositionHandlers = {
finishResume(point);
},
runToPoint({ endpoint, snapshotPoints }, point) {
if (pointArrayIncludes(snapshotPoints, point)) {
clearPositionHandlers();
if (newSnapshot(point)) {
ensureRunToPointPositionHandlers({ endpoint, snapshotPoints });
}
}
runToPoint({ endpoint, flushExternalCalls }, point) {
if (pointEquals(point, endpoint)) {
clearPositionHandlers();
assert(!flushExternalCalls);
RecordReplayControl.manifestFinished({ point });
}
},
@ -1279,18 +1237,6 @@ function debuggerStatementHit() {
}
}
function newSnapshot(point) {
if (RecordReplayControl.newSnapshot()) {
return true;
}
// After rewinding gManifest won't be correct, so we always mark the current
// manifest as finished and rely on the middleman to give us a new one.
RecordReplayControl.manifestFinished({ restoredSnapshot: true, point });
return false;
}
///////////////////////////////////////////////////////////////////////////////
// Handler Helpers
///////////////////////////////////////////////////////////////////////////////

Просмотреть файл

@ -0,0 +1,23 @@
/* -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil; tab-width: 8 -*- */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "nsISupports.idl"
// This interface defines the methods used when communicating with remote
// replaying processes over web sockets in the parent process.
[scriptable, uuid(df6d8e96-4cba-4a1d-893a-1ee19e8d8468)]
interface rrIConnection : nsISupports {
// Supply the callback which will be invoked with the connection ID and an
// array buffer when new data is received from a remote process.
void Initialize(in jsval callback);
// Start a new connection with a new remote replaying process, specifying
// the channel ID the process will use (unique for the middleman this is
// associated with) and returning the globally unique connection ID.
long Connect(in long channelId, in AString address);
// Send message data through a particular connection.
void SendMessage(in long connectionId, in jsval buf);
};

Просмотреть файл

@ -11,8 +11,10 @@
interface rrIControl : nsISupports {
void Initialize(in jsval recordingChildId);
void ConnectDebugger(in jsval replayDebugger);
void ManifestFinished(in long childId, in jsval response);
void ManifestFinished(in long rootId, in long forkId, in jsval response);
void PingResponse(in long rootId, in long forkId, in long pingId,
in long progress);
void BeforeSaveRecording();
void AfterSaveRecording();
void ChildCrashed(in long childId);
void ChildCrashed(in long rootId, in long forkId);
};

Просмотреть файл

@ -48,6 +48,8 @@ namespace recordreplay {
Macro(InternalEndOrderedAtomicAccess, (), ()) \
Macro(InternalBeginPassThroughThreadEvents, (), ()) \
Macro(InternalEndPassThroughThreadEvents, (), ()) \
Macro(InternalBeginPassThroughThreadEventsWithLocalReplay, (), ()) \
Macro(InternalEndPassThroughThreadEventsWithLocalReplay, (), ()) \
Macro(InternalBeginDisallowThreadEvents, (), ()) \
Macro(InternalEndDisallowThreadEvents, (), ()) \
Macro(InternalRecordReplayBytes, (void* aData, size_t aSize), \
@ -92,7 +94,10 @@ FOR_EACH_INTERFACE_VOID(DECLARE_SYMBOL_VOID)
static void* LoadSymbol(const char* aName) {
#ifdef ENABLE_RECORD_REPLAY
void* rv = dlsym(RTLD_DEFAULT, aName);
MOZ_RELEASE_ASSERT(rv);
if (!rv) {
fprintf(stderr, "Record/Replay LoadSymbol failed: %s\n", aName);
MOZ_CRASH("LoadSymbol");
}
return rv;
#else
return nullptr;

Просмотреть файл

@ -147,6 +147,28 @@ struct MOZ_RAII AutoEnsurePassThroughThreadEvents {
bool mPassedThrough;
};
// Mark a region where thread events are passed through when locally replaying.
// Replaying processes can run either on a local machine as a content process
// associated with a firefox parent process, or on remote machines in the cloud.
// We want local replaying processes to be able to interact with the system so
// that they can connect with the parent process and e.g. report crashes.
// We also want to avoid such interaction when replaying in the cloud, as there
// won't be a parent process to connect to. Using these methods allows us to
// handle both of these cases without changing the calling code's control flow.
static inline void BeginPassThroughThreadEventsWithLocalReplay();
static inline void EndPassThroughThreadEventsWithLocalReplay();
// RAII class for regions where thread events are passed through when replaying
// locally.
struct MOZ_RAII AutoPassThroughThreadEventsWithLocalReplay {
AutoPassThroughThreadEventsWithLocalReplay() {
BeginPassThroughThreadEventsWithLocalReplay();
}
~AutoPassThroughThreadEventsWithLocalReplay() {
EndPassThroughThreadEventsWithLocalReplay();
}
};
// Mark a region where thread events are not allowed to occur. The process will
// crash immediately if an event does happen.
static inline void BeginDisallowThreadEvents();
@ -356,6 +378,10 @@ MOZ_MAKE_RECORD_REPLAY_WRAPPER_VOID(BeginPassThroughThreadEvents, (), ())
MOZ_MAKE_RECORD_REPLAY_WRAPPER_VOID(EndPassThroughThreadEvents, (), ())
MOZ_MAKE_RECORD_REPLAY_WRAPPER(AreThreadEventsPassedThrough, bool, false, (),
())
MOZ_MAKE_RECORD_REPLAY_WRAPPER_VOID(BeginPassThroughThreadEventsWithLocalReplay,
(), ())
MOZ_MAKE_RECORD_REPLAY_WRAPPER_VOID(EndPassThroughThreadEventsWithLocalReplay,
(), ())
MOZ_MAKE_RECORD_REPLAY_WRAPPER_VOID(BeginDisallowThreadEvents, (), ())
MOZ_MAKE_RECORD_REPLAY_WRAPPER_VOID(EndDisallowThreadEvents, (), ())
MOZ_MAKE_RECORD_REPLAY_WRAPPER(AreThreadEventsDisallowed, bool, false, (), ())

Просмотреть файл

@ -853,6 +853,7 @@ pref("devtools.recordreplay.includeSystemScripts", false);
pref("devtools.recordreplay.logging", false);
pref("devtools.recordreplay.loggingFull", false);
pref("devtools.recordreplay.fastLogpoints", false);
pref("devtools.recordreplay.cloudServer", "");
// Preferences for the new performance panel.
// This pref configures the base URL for the profiler.firefox.com instance to

Просмотреть файл

@ -47,8 +47,6 @@ void Assembler::Advance(size_t aSize) {
mCursor += aSize;
}
static const size_t JumpBytes = 17;
uint8_t* Assembler::Current() {
// Reallocate the buffer if there is not enough space. We need enough for the
// maximum space used by any of the assembling functions, as well as for a
@ -57,9 +55,9 @@ uint8_t* Assembler::Current() {
MOZ_RELEASE_ASSERT(mCanAllocateStorage);
// Allocate some writable, executable memory.
static const size_t BufferSize = PageSize;
uint8_t* buffer = new uint8_t[PageSize];
UnprotectExecutableMemory(buffer, PageSize);
static const size_t BufferSize = PageSize * 32;
uint8_t* buffer = new uint8_t[BufferSize];
UnprotectExecutableMemory(buffer, BufferSize);
if (mCursor) {
// Patch a jump for fallthrough from the last allocation.
@ -81,16 +79,20 @@ static void Push16(uint8_t** aIp, uint16_t aValue) {
(*aIp) += 4;
}
/* static */
void Assembler::PatchJump(uint8_t* aIp, void* aTarget) {
static void PushImmediateAtIp(uint8_t** aIp, void* aValue) {
// Push the target literal onto the stack, 2 bytes at a time. This is
// apparently the best way of getting an arbitrary 8 byte literal onto the
// stack, as 4 byte literals we push will be sign extended to 8 bytes.
size_t ntarget = reinterpret_cast<size_t>(aTarget);
Push16(&aIp, ntarget >> 48);
Push16(&aIp, ntarget >> 32);
Push16(&aIp, ntarget >> 16);
Push16(&aIp, ntarget);
size_t nvalue = reinterpret_cast<size_t>(aValue);
Push16(aIp, nvalue >> 48);
Push16(aIp, nvalue >> 32);
Push16(aIp, nvalue >> 16);
Push16(aIp, nvalue);
}
/* static */
void Assembler::PatchJump(uint8_t* aIp, void* aTarget) {
PushImmediateAtIp(&aIp, aTarget);
*aIp = 0xC3; // ret
}
@ -100,6 +102,20 @@ void Assembler::Jump(void* aTarget) {
Advance(JumpBytes);
}
void Assembler::PushImmediate(void* aValue) {
uint8_t* ip = Current();
PushImmediateAtIp(&ip, aValue);
Advance(PushImmediateBytes);
}
void Assembler::Return() {
NewInstruction(0xC3);
}
void Assembler::Breakpoint() {
NewInstruction(0xCC);
}
static uint8_t OppositeJump(uint8_t aOpcode) {
// Get the opposite single byte jump opcode for a one or two byte conditional
// jump. Opposite opcodes are adjacent, e.g. 0x7C -> jl and 0x7D -> jge.
@ -156,10 +172,24 @@ void Assembler::CompareRaxWithTopOfStack() {
NewInstruction(0x48, 0x39, 0x04, 0x24);
}
void Assembler::CompareTopOfStackWithRax() {
NewInstruction(0x48, 0x3B, 0x04, 0x24);
}
void Assembler::PushRbx() { NewInstruction(0x53); }
void Assembler::PopRbx() { NewInstruction(0x5B); }
void Assembler::PopRegister(/*ud_type*/ int aRegister) {
MOZ_RELEASE_ASSERT(aRegister == NormalizeRegister(aRegister));
if (aRegister <= UD_R_RDI) {
NewInstruction(0x58 + aRegister - UD_R_RAX);
} else {
NewInstruction(0x41, 0x58 + aRegister - UD_R_R8);
}
}
void Assembler::StoreRbxToRax(size_t aWidth) {
switch (aWidth) {
case 1:

Просмотреть файл

@ -44,6 +44,15 @@ class Assembler {
// the target will be the copy of aTarget instead.
void Jump(void* aTarget);
// Push aValue onto the stack.
void PushImmediate(void* aValue);
// Return to the address at the top of the stack.
void Return();
// For debugging, insert a breakpoint instruction.
void Breakpoint();
// Conditionally jump to aTarget, depending on the short jump opcode aCode.
// If aTarget is in the range of instructions being copied, the target will
// be the copy of aTarget instead.
@ -68,10 +77,15 @@ class Assembler {
// cmpq %rax, 0(%rsp)
void CompareRaxWithTopOfStack();
// cmpq 0(%rsp), %rax
void CompareTopOfStackWithRax();
// push/pop %rbx
void PushRbx();
void PopRbx();
void PopRegister(/*ud_type*/ int aRegister);
// movq/movl/movb %rbx, 0(%rax)
void StoreRbxToRax(size_t aWidth);
@ -174,6 +188,11 @@ static const size_t ShortJumpBytes = 2;
// The number of instruction bytes required for a jump that may clobber rax.
static const size_t JumpBytesClobberRax = 12;
// The number of instruction bytes required for an arbitrary jump.
static const size_t JumpBytes = 17;
static const size_t PushImmediateBytes = 16;
// The maximum byte length of an x86/x64 instruction.
static const size_t MaximumInstructionLength = 15;

Просмотреть файл

@ -12,9 +12,8 @@
namespace mozilla {
namespace recordreplay {
// BufferStream provides similar functionality to Stream in File.h, allowing
// reading or writing to a stream of data backed by an in memory buffer instead
// of data stored on disk.
// BufferStream is a simplified form of Stream from Recording.h, allowing
// reading or writing to a stream of data backed by an in memory buffer.
class BufferStream {
InfallibleVector<char>* mOutput;

Просмотреть файл

@ -1,125 +0,0 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "DirtyMemoryHandler.h"
#include "ipc/ChildInternal.h"
#include "mozilla/Sprintf.h"
#include "MemorySnapshot.h"
#include "Thread.h"
#include <mach/exc.h>
#include <mach/mach.h>
#include <mach/mach_vm.h>
#include <sys/time.h>
namespace mozilla {
namespace recordreplay {
static mach_port_t gDirtyMemoryExceptionPort;
// See AsmJSSignalHandlers.cpp.
static const mach_msg_id_t sExceptionId = 2405;
// This definition was generated by mig (the Mach Interface Generator) for the
// routine 'exception_raise' (exc.defs). See js/src/wasm/WasmSignalHandlers.cpp.
#pragma pack(4)
typedef struct {
mach_msg_header_t Head;
/* start of the kernel processed data */
mach_msg_body_t msgh_body;
mach_msg_port_descriptor_t thread;
mach_msg_port_descriptor_t task;
/* end of the kernel processed data */
NDR_record_t NDR;
exception_type_t exception;
mach_msg_type_number_t codeCnt;
int64_t code[2];
} Request__mach_exception_raise_t;
#pragma pack()
typedef struct {
Request__mach_exception_raise_t body;
mach_msg_trailer_t trailer;
} ExceptionRequest;
static void DirtyMemoryExceptionHandlerThread(void*) {
kern_return_t kret;
while (true) {
ExceptionRequest request;
kret = mach_msg(&request.body.Head, MACH_RCV_MSG, 0, sizeof(request),
gDirtyMemoryExceptionPort, MACH_MSG_TIMEOUT_NONE,
MACH_PORT_NULL);
kern_return_t replyCode = KERN_FAILURE;
if (kret == KERN_SUCCESS && request.body.Head.msgh_id == sExceptionId &&
request.body.exception == EXC_BAD_ACCESS && request.body.codeCnt == 2) {
uint8_t* faultingAddress = (uint8_t*)request.body.code[1];
if (HandleDirtyMemoryFault(faultingAddress)) {
replyCode = KERN_SUCCESS;
} else {
child::MinidumpInfo info(request.body.exception, request.body.code[0],
request.body.code[1],
request.body.thread.name);
child::ReportFatalError(
Some(info), "HandleDirtyMemoryFault failed %p %s", faultingAddress,
gMozCrashReason ? gMozCrashReason : "");
}
} else {
child::ReportFatalError(Nothing(),
"DirtyMemoryExceptionHandlerThread mach_msg "
"returned unexpected data");
}
__Reply__exception_raise_t reply;
reply.Head.msgh_bits =
MACH_MSGH_BITS(MACH_MSGH_BITS_REMOTE(request.body.Head.msgh_bits), 0);
reply.Head.msgh_size = sizeof(reply);
reply.Head.msgh_remote_port = request.body.Head.msgh_remote_port;
reply.Head.msgh_local_port = MACH_PORT_NULL;
reply.Head.msgh_id = request.body.Head.msgh_id + 100;
reply.NDR = NDR_record;
reply.RetCode = replyCode;
mach_msg(&reply.Head, MACH_SEND_MSG, sizeof(reply), 0, MACH_PORT_NULL,
MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL);
}
}
void SetupDirtyMemoryHandler() {
// Allow repeated calls.
static bool hasDirtyMemoryHandler = false;
if (hasDirtyMemoryHandler) {
return;
}
hasDirtyMemoryHandler = true;
MOZ_RELEASE_ASSERT(AreThreadEventsPassedThrough());
kern_return_t kret;
// Get a port which can send and receive data.
kret = mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_RECEIVE,
&gDirtyMemoryExceptionPort);
MOZ_RELEASE_ASSERT(kret == KERN_SUCCESS);
kret = mach_port_insert_right(mach_task_self(), gDirtyMemoryExceptionPort,
gDirtyMemoryExceptionPort,
MACH_MSG_TYPE_MAKE_SEND);
MOZ_RELEASE_ASSERT(kret == KERN_SUCCESS);
// Create a thread to block on reading the port.
Thread::SpawnNonRecordedThread(DirtyMemoryExceptionHandlerThread, nullptr);
// Set exception ports on the entire task. Unfortunately, this clobbers any
// other exception ports for the task, and forwarding to those other ports
// is not easy to get right.
kret = task_set_exception_ports(
mach_task_self(), EXC_MASK_BAD_ACCESS, gDirtyMemoryExceptionPort,
EXCEPTION_DEFAULT | MACH_EXCEPTION_CODES, THREAD_STATE_NONE);
MOZ_RELEASE_ASSERT(kret == KERN_SUCCESS);
}
} // namespace recordreplay
} // namespace mozilla

Просмотреть файл

@ -1,20 +0,0 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef mozilla_recordreplay_DirtyMemoryHandler_h
#define mozilla_recordreplay_DirtyMemoryHandler_h
namespace mozilla {
namespace recordreplay {
// Set up a handler to catch SEGV hardware exceptions and pass them on to
// HandleDirtyMemoryFault in MemorySnapshot.h for handling.
void SetupDirtyMemoryHandler();
} // namespace recordreplay
} // namespace mozilla
#endif // mozilla_recordreplay_DirtyMemoryHandler_h

Просмотреть файл

@ -0,0 +1,480 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "ExternalCall.h"
#include <unordered_map>
namespace mozilla {
namespace recordreplay {
///////////////////////////////////////////////////////////////////////////////
// Replaying and External Process State
///////////////////////////////////////////////////////////////////////////////
typedef std::unordered_map<ExternalCallId, ExternalCall*> CallsByIdMap;
typedef std::unordered_map<const void*, ExternalCallId> CallsByValueMap;
// State used for keeping track of external calls in either a replaying
// process or external process.
struct ExternalCallState {
// In a replaying or external process, association between ExternalCallIds
// and the associated ExternalCall, for each encountered call.
CallsByIdMap mCallsById;
// In a replaying or external process, association between values produced by
// a external call and the call's ID. This is the inverse of each call's
// mValue field, except that if multiple calls have produced the same value
// this maps that value to the most recent one.
CallsByValueMap mCallsByValue;
// In an external process, any buffers allocated for performed calls.
InfallibleVector<void*> mAllocatedBuffers;
};
// In a replaying process, all external call state. In an external process,
// state for the call currently being processed.
static ExternalCallState* gState;
// In a replaying process, all external calls found in the recording that have
// not been flushed to the root replaying process.
static StaticInfallibleVector<ExternalCall*> gUnflushedCalls;
// In a replaying process, lock protecting external call state. In the
// external process, all accesses occur on the main thread.
static Monitor* gMonitor;
void InitializeExternalCalls() {
MOZ_RELEASE_ASSERT(IsRecordingOrReplaying() || IsMiddleman());
if (IsReplaying()) {
gState = new ExternalCallState();
gMonitor = new Monitor();
}
}
static void SetExternalCallValue(ExternalCall* aCall, const void* aValue) {
aCall->mValue.reset();
aCall->mValue.emplace(aValue);
gState->mCallsByValue.erase(aValue);
gState->mCallsByValue.insert(CallsByValueMap::value_type(aValue, aCall->mId));
}
static void GatherDependentCalls(
InfallibleVector<ExternalCall*>& aOutgoingCalls, ExternalCall* aCall) {
for (ExternalCall* existing : aOutgoingCalls) {
if (existing == aCall) {
return;
}
}
aOutgoingCalls.append(aCall);
for (ExternalCallId dependentId : aCall->mDependentCalls) {
auto iter = gState->mCallsById.find(dependentId);
MOZ_RELEASE_ASSERT(iter != gState->mCallsById.end());
ExternalCall* dependent = iter->second;
GatherDependentCalls(aOutgoingCalls, dependent);
}
}
bool OnExternalCall(size_t aCallId, CallArguments* aArguments, bool aDiverged) {
MOZ_RELEASE_ASSERT(IsReplaying());
const Redirection& redirection = GetRedirection(aCallId);
MOZ_RELEASE_ASSERT(redirection.mExternalCall);
const char* messageName = "";
if (!strcmp(redirection.mName, "objc_msgSend")) {
messageName = aArguments->Arg<1, const char*>();
}
if (aDiverged) {
PrintSpew("OnExternalCall Diverged %s %s\n", redirection.mName, messageName);
}
MonitorAutoLock lock(*gMonitor);
// Allocate the new ExternalCall.
ExternalCall* call = new ExternalCall();
call->mCallId = aCallId;
// Save all the call's inputs.
{
ExternalCallContext cx(call, aArguments,
ExternalCallPhase::SaveInput);
redirection.mExternalCall(cx);
if (cx.mFailed) {
delete call;
if (child::CurrentRepaintCannotFail() && aDiverged) {
child::ReportFatalError("External call input failed: %s\n",
redirection.mName);
}
return false;
}
}
call->ComputeId();
bool isNewCall = false;
auto iter = gState->mCallsById.find(call->mId);
if (iter == gState->mCallsById.end()) {
// We haven't seen this call before.
isNewCall = true;
gState->mCallsById.insert(CallsByIdMap::value_type(call->mId, call));
} else {
// We've seen this call before, so use the old copy.
delete call;
call = iter->second;
// Reuse this call's result if we need to restore the output.
if (aDiverged) {
ExternalCallContext cx(call, aArguments,
ExternalCallPhase::RestoreOutput);
redirection.mExternalCall(cx);
return true;
}
}
// If we have not diverged from the recording, we already have the outputs
// we need. Run the SaveOutput phase to capture these so that we can reuse
// them later and associate any system outputs with the call.
if (!aDiverged) {
ExternalCallContext cx(call, aArguments,
ExternalCallPhase::SaveOutput);
redirection.mExternalCall(cx);
if (isNewCall) {
gUnflushedCalls.append(call);
}
return true;
}
PrintSpew("OnExternalCall Send %s %s\n", redirection.mName, messageName);
// Gather any calls this one transitively depends on.
InfallibleVector<ExternalCall*> outgoingCalls;
GatherDependentCalls(outgoingCalls, call);
// Encode all calls that need to be performed, in the order to perform them.
InfallibleVector<char> inputData;
BufferStream inputStream(&inputData);
for (int i = outgoingCalls.length() - 1; i >= 0; i--) {
outgoingCalls[i]->EncodeInput(inputStream);
}
// Synchronously wait for the call result.
InfallibleVector<char> outputData;
child::SendExternalCallRequest(call->mId,
inputData.begin(), inputData.length(),
&outputData);
// Decode the external call's output.
BufferStream outputStream(outputData.begin(), outputData.length());
call->DecodeOutput(outputStream);
ExternalCallContext cx(call, aArguments, ExternalCallPhase::RestoreOutput);
redirection.mExternalCall(cx);
return true;
}
void ProcessExternalCall(const char* aInputData, size_t aInputSize,
InfallibleVector<char>* aOutputData) {
MOZ_RELEASE_ASSERT(IsMiddleman());
gState = new ExternalCallState();
auto& calls = gState->mCallsById;
BufferStream inputStream(aInputData, aInputSize);
ExternalCall* lastCall = nullptr;
ExternalCallContext::ReleaseCallbackVector releaseCallbacks;
while (!inputStream.IsEmpty()) {
ExternalCall* call = new ExternalCall();
call->DecodeInput(inputStream);
const Redirection& redirection = GetRedirection(call->mCallId);
MOZ_RELEASE_ASSERT(redirection.mExternalCall);
PrintSpew("ProcessExternalCall %lu %s\n", call->mId, redirection.mName);
CallArguments arguments;
bool skipCall;
{
ExternalCallContext cx(call, &arguments, ExternalCallPhase::RestoreInput);
redirection.mExternalCall(cx);
skipCall = cx.mSkipExecuting;
}
if (!skipCall) {
RecordReplayInvokeCall(redirection.mBaseFunction, &arguments);
}
{
ExternalCallContext cx(call, &arguments, ExternalCallPhase::SaveOutput);
cx.mReleaseCallbacks = &releaseCallbacks;
redirection.mExternalCall(cx);
}
lastCall = call;
MOZ_RELEASE_ASSERT(calls.find(call->mId) == calls.end());
calls.insert(CallsByIdMap::value_type(call->mId, call));
}
BufferStream outputStream(aOutputData);
lastCall->EncodeOutput(outputStream);
for (const auto& callback : releaseCallbacks) {
callback();
}
for (auto iter = calls.begin(); iter != calls.end(); ++iter) {
delete iter->second;
}
for (auto buffer : gState->mAllocatedBuffers) {
free(buffer);
}
delete gState;
gState = nullptr;
}
void* ExternalCallContext::AllocateBytes(size_t aSize) {
void* rv = malloc(aSize);
// In an external process, any buffers we allocate live until the calls are
// reset. In a replaying process, the buffers will live forever, to match the
// lifetime of the ExternalCall itself.
if (IsMiddleman()) {
gState->mAllocatedBuffers.append(rv);
}
return rv;
}
void FlushExternalCalls() {
MonitorAutoLock lock(*gMonitor);
for (ExternalCall* call : gUnflushedCalls) {
InfallibleVector<char> outputData;
BufferStream outputStream(&outputData);
call->EncodeOutput(outputStream);
child::SendExternalCallOutput(call->mId, outputData.begin(),
outputData.length());
}
gUnflushedCalls.clear();
}
///////////////////////////////////////////////////////////////////////////////
// External Call Caching
///////////////////////////////////////////////////////////////////////////////
// In root replaying processes, the outputs produced by assorted external calls
// are cached for fulfilling future external call requests.
struct ExternalCallOutput {
char* mOutput;
size_t mOutputSize;
};
// Protected by gMonitor. Accesses can occur on any thread.
typedef std::unordered_map<ExternalCallId, ExternalCallOutput> CallOutputMap;
static CallOutputMap* gCallOutputMap;
void AddExternalCallOutput(ExternalCallId aId, const char* aOutput,
size_t aOutputSize) {
MonitorAutoLock lock(*gMonitor);
if (!gCallOutputMap) {
gCallOutputMap = new CallOutputMap();
}
ExternalCallOutput output;
output.mOutput = new char[aOutputSize];
memcpy(output.mOutput, aOutput, aOutputSize);
output.mOutputSize = aOutputSize;
gCallOutputMap->insert(CallOutputMap::value_type(aId, output));
}
bool HasExternalCallOutput(ExternalCallId aId,
InfallibleVector<char>* aOutput) {
MonitorAutoLock lock(*gMonitor);
if (!gCallOutputMap) {
return false;
}
auto iter = gCallOutputMap->find(aId);
if (iter == gCallOutputMap->end()) {
return false;
}
aOutput->append(iter->second.mOutput, iter->second.mOutputSize);
return true;
}
///////////////////////////////////////////////////////////////////////////////
// System Values
///////////////////////////////////////////////////////////////////////////////
static ExternalCall* LookupExternalCall(const void* aThing) {
CallsByValueMap::const_iterator iter = gState->mCallsByValue.find(aThing);
if (iter != gState->mCallsByValue.end()) {
CallsByIdMap::const_iterator iter2 = gState->mCallsById.find(iter->second);
if (iter2 != gState->mCallsById.end()) {
return iter2->second;
}
}
return nullptr;
}
static Maybe<const void*> GetExternalCallValue(ExternalCallId aId) {
auto iter = gState->mCallsById.find(aId);
if (iter != gState->mCallsById.end()) {
return iter->second->mValue;
}
return Nothing();
}
bool EX_SystemInput(ExternalCallContext& aCx, const void** aThingPtr) {
MOZ_RELEASE_ASSERT(aCx.AccessInput());
bool isNull = *aThingPtr == nullptr;
aCx.ReadOrWriteInputBytes(&isNull, sizeof(isNull));
if (isNull) {
*aThingPtr = nullptr;
return true;
}
ExternalCallId callId = 0;
if (aCx.mPhase == ExternalCallPhase::SaveInput) {
ExternalCall* call = LookupExternalCall(*aThingPtr);
if (call) {
callId = call->mId;
MOZ_RELEASE_ASSERT(callId);
aCx.mCall->mDependentCalls.append(call->mId);
}
}
aCx.ReadOrWriteInputBytes(&callId, sizeof(callId));
if (aCx.mPhase == ExternalCallPhase::RestoreInput) {
if (callId) {
Maybe<const void*> value = GetExternalCallValue(callId);
MOZ_RELEASE_ASSERT(value.isSome());
*aThingPtr = value.ref();
}
}
return callId != 0;
}
static const void* MangledSystemValue(ExternalCallId aId) {
return (const void*)((size_t)aId | (1ULL << 63));
}
void EX_SystemOutput(ExternalCallContext& aCx, const void** aOutput,
bool aUpdating) {
if (!aCx.AccessOutput()) {
return;
}
bool isNull = false;
Maybe<ExternalCallId> aliasedCall;
if (aCx.mPhase == ExternalCallPhase::SaveOutput) {
SetExternalCallValue(aCx.mCall, *aOutput);
isNull = *aOutput == nullptr;
if (!isNull) {
ExternalCall* call = LookupExternalCall(*aOutput);
if (call) {
aliasedCall.emplace(call->mId);
}
}
}
aCx.ReadOrWriteOutputBytes(&isNull, sizeof(isNull));
aCx.ReadOrWriteOutputBytes(&aliasedCall, sizeof(aliasedCall));
if (aCx.mPhase == ExternalCallPhase::RestoreOutput) {
do {
if (isNull) {
*aOutput = nullptr;
break;
}
if (aliasedCall.isSome()) {
auto iter = gState->mCallsById.find(aliasedCall.ref());
if (iter != gState->mCallsById.end()) {
*aOutput = iter->second;
break;
}
// If we haven't encountered the aliased call, fall through and generate
// a new value for it. Aliases might be spurious if they were derived from
// the recording and reflect a value that was released and had its memory
// reused.
}
*aOutput = MangledSystemValue(aCx.mCall->mId);
} while (false);
SetExternalCallValue(aCx.mCall, *aOutput);
}
}
///////////////////////////////////////////////////////////////////////////////
// ExternalCall
///////////////////////////////////////////////////////////////////////////////
void CallReturnRegisters::CopyFrom(CallArguments* aArguments) {
rval0 = aArguments->Rval<size_t, 0>();
rval1 = aArguments->Rval<size_t, 1>();
floatrval0 = aArguments->FloatRval<0>();
floatrval1 = aArguments->FloatRval<1>();
}
void CallReturnRegisters::CopyTo(CallArguments* aArguments) {
aArguments->Rval<size_t, 0>() = rval0;
aArguments->Rval<size_t, 1>() = rval1;
aArguments->FloatRval<0>() = floatrval0;
aArguments->FloatRval<1>() = floatrval1;
}
void ExternalCall::EncodeInput(BufferStream& aStream) const {
aStream.WriteScalar(mId);
aStream.WriteScalar(mCallId);
aStream.WriteScalar(mExcludeInput);
aStream.WriteScalar(mInput.length());
aStream.WriteBytes(mInput.begin(), mInput.length());
}
void ExternalCall::DecodeInput(BufferStream& aStream) {
mId = aStream.ReadScalar();
mCallId = aStream.ReadScalar();
mExcludeInput = aStream.ReadScalar();
size_t inputLength = aStream.ReadScalar();
mInput.appendN(0, inputLength);
aStream.ReadBytes(mInput.begin(), inputLength);
}
void ExternalCall::EncodeOutput(BufferStream& aStream) const {
aStream.WriteBytes(&mReturnRegisters, sizeof(CallReturnRegisters));
aStream.WriteScalar(mOutput.length());
aStream.WriteBytes(mOutput.begin(), mOutput.length());
}
void ExternalCall::DecodeOutput(BufferStream& aStream) {
aStream.ReadBytes(&mReturnRegisters, sizeof(CallReturnRegisters));
size_t outputLength = aStream.ReadScalar();
mOutput.appendN(0, outputLength);
aStream.ReadBytes(mOutput.begin(), outputLength);
}
} // namespace recordreplay
} // namespace mozilla

Просмотреть файл

@ -0,0 +1,455 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef mozilla_recordreplay_ExternalCall_h
#define mozilla_recordreplay_ExternalCall_h
#include "BufferStream.h"
#include "ProcessRedirect.h"
#include "mozilla/Maybe.h"
namespace mozilla {
namespace recordreplay {
// External Calls Overview
//
// With few exceptions, replaying processes do not interact with the underlying
// system or call the actual versions of redirected system library functions.
// This is problematic after diverging from the recording, as then the diverged
// thread cannot interact with its recording either.
//
// External calls are used in a replaying process after diverging from the
// recording to perform calls in another process instead. We call this the
// external process; currently it is always the middleman, though this will
// change soon.
//
// External call state is managed so that call results can be reused when
// possible, to minimize traffic between processes and improve efficiency.
// Conceptually, the result of performing a system call is entirely determined
// by its inputs: scalar values and the results of other system calls (and the
// version of the underlying system, which we ignore for now). This information
// uniquely identifies the call, and we can form a directed graph out of these
// calls by connecting them to the other calls they depend on.
//
// Each root replaying process maintains a portion of this graph. As the
// recording is replayed by other forked processes, nodes are added to the graph
// by studying the output of calls that appear in the recording itself. When
// a replaying process wants to make a system call that does not appear in the
// graph, the inputs for that call and the transitive closure of the calls it
// depends on is sent to another process running on the target platform. That
// process executes the call, saves the output and sends it back for adding to
// the graph. Other ways of updating the graph could be added in the future.
// Inputs and outputs to external calls are handled in a reusable fashion by
// adding a ExternalCall hook to the system call's redirection. This hook is
// called in one of the following phases.
enum class ExternalCallPhase {
// When replaying, a call which can be put in the external call graph is being
// made. This can happen either before or after diverging from the recording,
// and saves all inputs to the call which are sufficient to uniquely identify
// it in the graph.
SaveInput,
// In the external process, the inputs saved earlier are being restored in
// preparation for executing the call.
RestoreInput,
// Save any outputs produced by a call. This can happen either in the
// replaying process (using outputs saved in the recording) or in the external
// process (using outputs just produced). After saving the outputs to the
// call, it can be placed in the external call graph and be used to resolve
// external calls which a diverged replaying process is making. Returned
// register values are automatically saved.
SaveOutput,
// In the replaying process, restore any outputs associated with an external
// call. This is only called after diverging from the recording, and allows
// diverged execution to continue afterwards.
RestoreOutput,
};
// Global identifier for an external call. The result of an external call is
// determined by its inputs, and its ID is a hash of those inputs. If there are
// hash collisions between different inputs then two different calls will have
// the same ID, and things will break (subtly or not). Valid IDs are non-zero.
typedef uintptr_t ExternalCallId;
// Storage for the returned registers of a call which are automatically saved.
struct CallReturnRegisters {
size_t rval0, rval1;
double floatrval0, floatrval1;
void CopyFrom(CallArguments* aArguments);
void CopyTo(CallArguments* aArguments);
};
struct ExternalCall {
// ID for this call.
ExternalCallId mId = 0;
// ID of the redirection being invoked.
size_t mCallId = 0;
// All call inputs. Written in SaveInput, read in RestoreInput.
InfallibleVector<char> mInput;
// If non-zero, only the input before this extent is used to characterize this
// call and determine if it is the same as another external call.
size_t mExcludeInput = 0;
// Calls which this depends on, written in SaveInput and read in RestoreInput.
// Any calls referenced in mInput will also be here.
InfallibleVector<ExternalCallId> mDependentCalls;
// All call outputs. Written in SaveOutput, read in RestoreOutput.
InfallibleVector<char> mOutput;
// Values of any returned registers after the call.
CallReturnRegisters mReturnRegisters;
// Any system value produced by this call. In the external process this is
// the actual system value, while in the replaying process this is the value
// used during execution to represent the call's result.
Maybe<const void*> mValue;
void EncodeInput(BufferStream& aStream) const;
void DecodeInput(BufferStream& aStream);
void EncodeOutput(BufferStream& aStream) const;
void DecodeOutput(BufferStream& aStream);
void ComputeId() {
MOZ_RELEASE_ASSERT(!mId);
size_t extent = mExcludeInput ? mExcludeInput : mInput.length();
mId = HashGeneric(mCallId, HashBytes(mInput.begin(), extent));
if (!mId) {
mId = 1;
}
}
};
// Information needed to process one of the phases of an external call.
struct ExternalCallContext {
// Call being operated on.
ExternalCall* mCall;
// Complete arguments and return value information for the call.
CallArguments* mArguments;
// Current processing phase.
ExternalCallPhase mPhase;
// During the SaveInput phase, whether capturing input data has failed.
// In such cases the call cannot be placed in the external call graph and,
// if the thread has diverged from the recording, an unhandled divergence
// will occur.
bool mFailed = false;
// This can be set in the RestoreInput phase to avoid executing the call
// in the external process.
bool mSkipExecuting = false;
// Streams of data that can be accessed during the various phases. Streams
// need to be read or written from at the same points in the phases which use
// them, so that callbacks operating on these streams can be composed without
// issues.
// Inputs are written during SaveInput, and read during RestoreInput.
Maybe<BufferStream> mInputStream;
// Outputs are written during SaveOutput, and read during RestoreOutput.
Maybe<BufferStream> mOutputStream;
// If we are running the SaveOutput phase in an external process, a list of
// callbacks which will release all system resources created by by the call.
typedef InfallibleVector<std::function<void()>> ReleaseCallbackVector;
ReleaseCallbackVector* mReleaseCallbacks = nullptr;
ExternalCallContext(ExternalCall* aCall, CallArguments* aArguments,
ExternalCallPhase aPhase)
: mCall(aCall),
mArguments(aArguments),
mPhase(aPhase) {
switch (mPhase) {
case ExternalCallPhase::SaveInput:
mInputStream.emplace(&mCall->mInput);
break;
case ExternalCallPhase::RestoreInput:
mInputStream.emplace(mCall->mInput.begin(), mCall->mInput.length());
break;
case ExternalCallPhase::SaveOutput:
mCall->mReturnRegisters.CopyFrom(aArguments);
mOutputStream.emplace(&mCall->mOutput);
break;
case ExternalCallPhase::RestoreOutput:
mCall->mReturnRegisters.CopyTo(aArguments);
mOutputStream.emplace(mCall->mOutput.begin(), mCall->mOutput.length());
break;
}
}
void MarkAsFailed() {
MOZ_RELEASE_ASSERT(mPhase == ExternalCallPhase::SaveInput);
mFailed = true;
}
void WriteInputBytes(const void* aBuffer, size_t aSize) {
MOZ_RELEASE_ASSERT(mPhase == ExternalCallPhase::SaveInput);
mInputStream.ref().WriteBytes(aBuffer, aSize);
}
void WriteInputScalar(size_t aValue) {
MOZ_RELEASE_ASSERT(mPhase == ExternalCallPhase::SaveInput);
mInputStream.ref().WriteScalar(aValue);
}
void ReadInputBytes(void* aBuffer, size_t aSize) {
MOZ_RELEASE_ASSERT(mPhase == ExternalCallPhase::RestoreInput);
mInputStream.ref().ReadBytes(aBuffer, aSize);
}
size_t ReadInputScalar() {
MOZ_RELEASE_ASSERT(mPhase == ExternalCallPhase::RestoreInput);
return mInputStream.ref().ReadScalar();
}
bool AccessInput() { return mInputStream.isSome(); }
void ReadOrWriteInputBytes(void* aBuffer, size_t aSize, bool aExcludeInput = false) {
switch (mPhase) {
case ExternalCallPhase::SaveInput:
// Only one buffer can be excluded, and it has to be the last input to
// the external call.
MOZ_RELEASE_ASSERT(!mCall->mExcludeInput);
if (aExcludeInput) {
mCall->mExcludeInput = mCall->mInput.length();
}
WriteInputBytes(aBuffer, aSize);
break;
case ExternalCallPhase::RestoreInput:
ReadInputBytes(aBuffer, aSize);
break;
default:
MOZ_CRASH();
}
}
void ReadOrWriteInputBuffer(void** aBufferPtr, size_t aSize,
bool aIncludeContents = true) {
switch (mPhase) {
case ExternalCallPhase::SaveInput:
if (aIncludeContents) {
WriteInputBytes(*aBufferPtr, aSize);
}
break;
case ExternalCallPhase::RestoreInput:
*aBufferPtr = AllocateBytes(aSize);
if (aIncludeContents) {
ReadInputBytes(*aBufferPtr, aSize);
}
break;
default:
MOZ_CRASH();
}
}
bool AccessOutput() { return mOutputStream.isSome(); }
void ReadOrWriteOutputBytes(void* aBuffer, size_t aSize) {
switch (mPhase) {
case ExternalCallPhase::SaveOutput:
mOutputStream.ref().WriteBytes(aBuffer, aSize);
break;
case ExternalCallPhase::RestoreOutput:
mOutputStream.ref().ReadBytes(aBuffer, aSize);
break;
default:
MOZ_CRASH();
}
}
void ReadOrWriteOutputBuffer(void** aBuffer, size_t aSize) {
if (AccessInput()) {
bool isNull = *aBuffer == nullptr;
ReadOrWriteInputBytes(&isNull, sizeof(isNull));
if (isNull) {
*aBuffer = nullptr;
} else if (mPhase == ExternalCallPhase::RestoreInput) {
*aBuffer = AllocateBytes(aSize);
}
}
if (AccessOutput() && *aBuffer) {
ReadOrWriteOutputBytes(*aBuffer, aSize);
}
}
// Allocate some memory associated with the call, which will be released in
// the external process after fully processing a call, and will never be
// released in the replaying process.
void* AllocateBytes(size_t aSize);
};
// Notify the system about a call to a redirection with an external call hook.
// aDiverged is set if the current thread has diverged from the recording and
// any outputs for the call must be filled in; otherwise, they already have
// been filled in using data from the recording. Returns false if the call was
// unable to be processed.
bool OnExternalCall(size_t aCallId, CallArguments* aArguments,
bool aDiverged);
// In the external process, perform one or more calls encoded in aInputData
// and encode the output of the final call in aOutputData.
void ProcessExternalCall(const char* aInputData, size_t aInputSize,
InfallibleVector<char>* aOutputData);
// In a replaying process, flush all new external call found in the recording
// since the last flush to the root replaying process.
void FlushExternalCalls();
// In a root replaying process, remember the output from an external call.
void AddExternalCallOutput(ExternalCallId aId, const char* aOutput,
size_t aOutputSize);
// In a root replaying process, fetch the output from an external call if known.
bool HasExternalCallOutput(ExternalCallId aId, InfallibleVector<char>* aOutput);
///////////////////////////////////////////////////////////////////////////////
// External Call Helpers
///////////////////////////////////////////////////////////////////////////////
// Capture a scalar argument.
template <size_t Arg>
static inline void EX_ScalarArg(ExternalCallContext& aCx) {
if (aCx.AccessInput()) {
auto& arg = aCx.mArguments->Arg<Arg, size_t>();
aCx.ReadOrWriteInputBytes(&arg, sizeof(arg));
}
}
// Capture a floating point argument.
template <size_t Arg>
static inline void EX_FloatArg(ExternalCallContext& aCx) {
if (aCx.AccessInput()) {
auto& arg = aCx.mArguments->FloatArg<Arg>();
aCx.ReadOrWriteInputBytes(&arg, sizeof(arg));
}
}
// Capture an input buffer at BufferArg with element count at CountArg.
// If IncludeContents is not set, the buffer's contents are not captured,
// but the buffer's pointer will be allocated with the correct size when
// restoring input.
template <size_t BufferArg, size_t CountArg, typename ElemType = char,
bool IncludeContents = true>
static inline void EX_Buffer(ExternalCallContext& aCx) {
EX_ScalarArg<CountArg>(aCx);
if (aCx.AccessInput()) {
auto& buffer = aCx.mArguments->Arg<BufferArg, void*>();
auto byteSize = aCx.mArguments->Arg<CountArg, size_t>() * sizeof(ElemType);
aCx.ReadOrWriteInputBuffer(&buffer, byteSize, IncludeContents);
}
}
// Capture the contents of an optional input parameter.
template <size_t BufferArg, typename Type>
static inline void EX_InParam(ExternalCallContext& aCx) {
if (aCx.AccessInput()) {
auto& param = aCx.mArguments->Arg<BufferArg, void*>();
bool hasParam = !!param;
aCx.ReadOrWriteInputBytes(&hasParam, sizeof(hasParam));
if (hasParam) {
aCx.ReadOrWriteInputBuffer(&param, sizeof(Type));
} else {
param = nullptr;
}
}
}
// Capture a C string argument.
template <size_t StringArg>
static inline void EX_CString(ExternalCallContext& aCx) {
if (aCx.AccessInput()) {
auto& buffer = aCx.mArguments->Arg<StringArg, char*>();
size_t len = (aCx.mPhase == ExternalCallPhase::SaveInput)
? strlen(buffer) + 1
: 0;
aCx.ReadOrWriteInputBytes(&len, sizeof(len));
aCx.ReadOrWriteInputBuffer((void**)&buffer, len);
}
}
// Capture the data written to an output buffer at BufferArg with element count
// at CountArg.
template <size_t BufferArg, size_t CountArg, typename ElemType>
static inline void EX_WriteBuffer(ExternalCallContext& aCx) {
EX_ScalarArg<CountArg>(aCx);
auto& buffer = aCx.mArguments->Arg<BufferArg, void*>();
auto count = aCx.mArguments->Arg<CountArg, size_t>();
aCx.ReadOrWriteOutputBuffer(&buffer, count * sizeof(ElemType));
}
// Capture the data written to an out parameter.
template <size_t BufferArg, typename Type>
static inline void EX_OutParam(ExternalCallContext& aCx) {
auto& buffer = aCx.mArguments->Arg<BufferArg, void*>();
aCx.ReadOrWriteOutputBuffer(&buffer, sizeof(Type));
}
// Capture return values that are too large for register storage.
template <typename Type>
static inline void EX_OversizeRval(ExternalCallContext& aCx) {
EX_OutParam<0, Type>(aCx);
}
// Capture a byte count of stack argument data.
template <size_t ByteSize>
static inline void EX_StackArgumentData(ExternalCallContext& aCx) {
if (aCx.AccessInput()) {
auto stack = aCx.mArguments->StackAddress<0>();
aCx.ReadOrWriteInputBytes(stack, ByteSize);
}
}
// Avoid calling a function in the external process.
static inline void EX_SkipExecuting(ExternalCallContext& aCx) {
if (aCx.mPhase == ExternalCallPhase::RestoreInput) {
aCx.mSkipExecuting = true;
}
}
static inline void EX_NoOp(ExternalCallContext& aCx) {}
template <ExternalCallFn Fn0, ExternalCallFn Fn1,
ExternalCallFn Fn2 = EX_NoOp, ExternalCallFn Fn3 = EX_NoOp,
ExternalCallFn Fn4 = EX_NoOp, ExternalCallFn Fn5 = EX_NoOp>
static inline void EX_Compose(ExternalCallContext& aCx) {
Fn0(aCx);
Fn1(aCx);
Fn2(aCx);
Fn3(aCx);
Fn4(aCx);
Fn5(aCx);
}
// Helper for capturing inputs that are produced by other external calls.
// Returns false in the SaveInput phase if the input system value could not
// be found.
bool EX_SystemInput(ExternalCallContext& aCx, const void** aThingPtr);
// Helper for capturing output system values that might be consumed by other
// external calls.
void EX_SystemOutput(ExternalCallContext& aCx, const void** aOutput,
bool aUpdating = false);
void InitializeExternalCalls();
} // namespace recordreplay
} // namespace mozilla
#endif // mozilla_recordreplay_ExternalCall_h

Просмотреть файл

@ -113,16 +113,14 @@ class StableHashTableInfo {
mTable(nullptr),
mCallbackHash(0) {
// Use AllocateMemory, as the result will have RWX permissions.
mCallbackStorage =
(uint8_t*)AllocateMemory(CallbackStorageCapacity, MemoryKind::Tracked);
mCallbackStorage = (uint8_t*)DirectAllocateMemory(CallbackStorageCapacity);
MarkValid();
}
~StableHashTableInfo() {
MOZ_RELEASE_ASSERT(mHashToKey.empty());
DeallocateMemory(mCallbackStorage, CallbackStorageCapacity,
MemoryKind::Tracked);
DirectDeallocateMemory(mCallbackStorage, CallbackStorageCapacity);
UnmarkValid();
}

Просмотреть файл

@ -37,6 +37,9 @@ struct LockAcquires {
mNextOwner = NoNextOwner;
} else {
mNextOwner = mAcquires->ReadScalar();
if (!mNextOwner) {
Print("CRASH ReadAndNotifyNextOwner ZERO_ID\n");
}
if (mNextOwner != aCurrentThread->Id()) {
Thread::Notify(mNextOwner);
}
@ -53,13 +56,13 @@ static ChunkAllocator<LockAcquires> gLockAcquires;
// Table mapping native lock pointers to the associated Lock structure, for
// every recorded lock in existence.
typedef std::unordered_map<void*, Lock*> LockMap;
typedef std::unordered_map<NativeLock*, Lock*> LockMap;
static LockMap* gLocks;
static ReadWriteSpinLock gLocksLock;
static Lock* CreateNewLock(Thread* aThread, size_t aId) {
LockAcquires* info = gLockAcquires.Create(aId);
info->mAcquires = gRecordingFile->OpenStream(StreamName::Lock, aId);
info->mAcquires = gRecording->OpenStream(StreamName::Lock, aId);
if (IsReplaying()) {
info->ReadAndNotifyNextOwner(aThread);
@ -69,7 +72,7 @@ static Lock* CreateNewLock(Thread* aThread, size_t aId) {
}
/* static */
void Lock::New(void* aNativeLock) {
void Lock::New(NativeLock* aNativeLock) {
Thread* thread = Thread::Current();
RecordingEventSection res(thread);
if (!res.CanAccessEvents()) {
@ -104,7 +107,7 @@ void Lock::New(void* aNativeLock) {
}
/* static */
void Lock::Destroy(void* aNativeLock) {
void Lock::Destroy(NativeLock* aNativeLock) {
Lock* lock = nullptr;
{
AutoWriteSpinLock ex(gLocksLock);
@ -120,7 +123,7 @@ void Lock::Destroy(void* aNativeLock) {
}
/* static */
Lock* Lock::Find(void* aNativeLock) {
Lock* Lock::Find(NativeLock* aNativeLock) {
MOZ_RELEASE_ASSERT(IsRecordingOrReplaying());
AutoReadSpinLock ex(gLocksLock);
@ -146,7 +149,7 @@ Lock* Lock::Find(void* aNativeLock) {
return nullptr;
}
void Lock::Enter() {
void Lock::Enter(NativeLock* aNativeLock) {
Thread* thread = Thread::Current();
RecordingEventSection res(thread);
@ -171,16 +174,18 @@ void Lock::Enter() {
!thread->MaybeDivergeFromRecording()) {
Thread::Wait();
}
if (!thread->HasDivergedFromRecording()) {
mOwner = thread->Id();
if (!thread->HasDivergedFromRecording() && aNativeLock) {
thread->AddOwnedLock(aNativeLock);
}
}
}
void Lock::Exit() {
void Lock::Exit(NativeLock* aNativeLock) {
Thread* thread = Thread::Current();
if (IsReplaying() && !thread->HasDivergedFromRecording()) {
mOwner = 0;
if (aNativeLock) {
thread->RemoveOwnedLock(aNativeLock);
}
// Notify the next owner before releasing the lock.
LockAcquires* acquires = gLockAcquires.Get(mId);
@ -189,7 +194,7 @@ void Lock::Exit() {
}
/* static */
void Lock::LockAquiresUpdated(size_t aLockId) {
void Lock::LockAcquiresUpdated(size_t aLockId) {
LockAcquires* acquires = gLockAcquires.MaybeGet(aLockId);
if (acquires && acquires->mAcquires &&
acquires->mNextOwner == LockAcquires::NoNextOwner) {
@ -258,7 +263,7 @@ MOZ_EXPORT void RecordReplayInterface_InternalBeginOrderedAtomicAccess(
gAtomicLockOwners[atomicId].Lock();
}
gAtomicLocks[atomicId]->Enter();
gAtomicLocks[atomicId]->Enter(nullptr);
MOZ_RELEASE_ASSERT(thread->AtomicLockId().isNothing());
thread->AtomicLockId().emplace(atomicId);
@ -281,7 +286,7 @@ MOZ_EXPORT void RecordReplayInterface_InternalEndOrderedAtomicAccess() {
gAtomicLockOwners[atomicId].Unlock();
}
gAtomicLocks[atomicId]->Exit();
gAtomicLocks[atomicId]->Exit(nullptr);
}
} // extern "C"

Просмотреть файл

@ -10,7 +10,7 @@
#include "mozilla/PodOperations.h"
#include "mozilla/Types.h"
#include "File.h"
#include "Recording.h"
namespace mozilla {
namespace recordreplay {
@ -29,11 +29,8 @@ class Lock {
// Unique ID for this lock.
size_t mId;
// When replaying, any thread owning this lock as part of the recording.
Atomic<size_t, SequentiallyConsistent, Behavior::DontPreserve> mOwner;
public:
explicit Lock(size_t aId) : mId(aId), mOwner(0) { MOZ_ASSERT(aId); }
explicit Lock(size_t aId) : mId(aId) { MOZ_ASSERT(aId); }
size_t Id() { return mId; }
@ -41,26 +38,26 @@ class Lock {
// records the acquire in the lock's acquire order stream. When replaying,
// this is called before the lock has been acquired, and blocks the thread
// until it is next in line to acquire the lock.
void Enter();
void Enter(NativeLock* aNativeLock);
// This is called before releasing the lock, allowing the next owner to
// acquire it while replaying.
void Exit();
void Exit(NativeLock* aNativeLock);
// Create a new Lock corresponding to a native lock, with a fresh ID.
static void New(void* aNativeLock);
static void New(NativeLock* aNativeLock);
// Destroy any Lock associated with a native lock.
static void Destroy(void* aNativeLock);
static void Destroy(NativeLock* aNativeLock);
// Get the recorded Lock for a native lock if there is one, otherwise null.
static Lock* Find(void* aNativeLock);
static Lock* Find(NativeLock* aNativeLock);
// Initialize locking state.
static void InitializeLocks();
// Note that new data has been read into a lock's acquires stream.
static void LockAquiresUpdated(size_t aLockId);
static void LockAcquiresUpdated(size_t aLockId);
};
} // namespace recordreplay

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -1,129 +0,0 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef mozilla_recordreplay_MemorySnapshot_h
#define mozilla_recordreplay_MemorySnapshot_h
#include "mozilla/Types.h"
#include "ProcessRecordReplay.h"
namespace mozilla {
namespace recordreplay {
// Memory Snapshots Overview.
//
// As described in ProcessRewind.h, periodically snapshots are saved so that
// their state can be restored later. Memory snapshots are used to save and
// restore the contents of all heap memory: everything except thread stacks
// (see ThreadSnapshot.h for saving and restoring these) and untracked memory
// (which is not saved or restored, see ProcessRecordReplay.h).
//
// Each memory snapshot is a diff of the heap memory contents compared to the
// next one. See MemorySnapshot.cpp for how diffs are represented and computed.
//
// Rewinding must restore the exact contents of heap memory that existed when
// the target snapshot was reached. Because of this, memory that is allocated
// after a point when a snapshot has been saved will never actually be returned
// to the system. We instead keep a set of free blocks that are unused at the
// current point of execution and are available to satisfy new allocations.
// Make sure that a block of memory in a fixed allocation is already allocated.
void CheckFixedMemory(void* aAddress, size_t aSize);
// After marking a block of memory in a fixed allocation as non-writable,
// restore writability to any dirty pages in the range.
void RestoreWritableFixedMemory(void* aAddress, size_t aSize);
// Allocate memory, trying to use a specific address if provided but only if
// it is free.
void* AllocateMemoryTryAddress(void* aAddress, size_t aSize, MemoryKind aKind);
// Note a range of memory that was just allocated from the system, and the
// kind of memory allocation that was performed.
void RegisterAllocatedMemory(void* aBaseAddress, size_t aSize,
MemoryKind aKind);
// Exclude a region of memory from snapshots, before the first snapshot has
// been taken.
void AddInitialUntrackedMemoryRegion(uint8_t* aBase, size_t aSize);
// Return whether a range of memory is in a tracked region. This excludes
// memory that was allocated after the last snapshot and is not write
// protected.
bool MemoryRangeIsTracked(void* aAddress, size_t aSize);
// Initialize the memory snapshots system.
void InitializeMemorySnapshots();
// Take the first heap memory snapshot.
void TakeFirstMemorySnapshot();
// Take a differential heap memory snapshot compared to the last one.
void TakeDiffMemorySnapshot();
// Restore all heap memory to its state when the most recent snapshot was
// taken.
void RestoreMemoryToLastSnapshot();
// Restore all heap memory to its state at a snapshot where a complete diff
// was saved vs. the following snapshot. This requires that no tracked heap
// memory has been changed since the last snapshot.
void RestoreMemoryToLastDiffSnapshot();
// Set whether to allow changes to tracked heap memory at this point. If such
// changes occur when they are not allowed then the process will crash.
void SetMemoryChangesAllowed(bool aAllowed);
struct MOZ_RAII AutoDisallowMemoryChanges {
AutoDisallowMemoryChanges() { SetMemoryChangesAllowed(false); }
~AutoDisallowMemoryChanges() { SetMemoryChangesAllowed(true); }
};
// After a SEGV on the specified address, check if the violation occurred due
// to the memory having been write protected by the snapshot mechanism. This
// function returns whether the fault has been handled and execution may
// continue.
bool HandleDirtyMemoryFault(uint8_t* aAddress);
// For debugging, note a point where we hit an unrecoverable failure and try
// to make things easier for the debugger.
void UnrecoverableSnapshotFailure();
// After rewinding, mark all memory that has been allocated since the snapshot
// was taken as free.
void FixupFreeRegionsAfterRewind();
// When WANT_COUNTDOWN_THREAD is defined (see MemorySnapshot.cpp), set a count
// that, after a thread consumes it, causes the thread to report a fatal error.
// This is used for debugging and is a workaround for lldb often being unable
// to interrupt a running process.
void StartCountdown(size_t aCount);
// Per StartCountdown, set a countdown and remove it on destruction.
struct MOZ_RAII AutoCountdown {
explicit AutoCountdown(size_t aCount);
~AutoCountdown();
};
// Initialize the thread consuming the countdown.
void InitializeCountdownThread();
// This is an alternative to memmove/memcpy that can be called in areas where
// faults in write protected memory are not allowed. It's hard to avoid dynamic
// code loading when calling memmove/memcpy directly.
void MemoryMove(void* aDst, const void* aSrc, size_t aSize);
// Similarly, zero out a range of memory without doing anything weird with
// dynamic code loading.
void MemoryZero(void* aDst, size_t aSize);
// Get the amount of allocated memory used by data of the specified kind.
size_t GetMemoryUsage(MemoryKind aKind);
} // namespace recordreplay
} // namespace mozilla
#endif // mozilla_recordreplay_MemorySnapshot_h

Просмотреть файл

@ -1,458 +0,0 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "MiddlemanCall.h"
#include <unordered_map>
namespace mozilla {
namespace recordreplay {
typedef std::unordered_map<const void*, MiddlemanCall*> MiddlemanCallMap;
// State used for keeping track of middleman calls in either a replaying
// process or middleman process.
struct MiddlemanCallState {
// In a replaying or middleman process, all middleman calls that have been
// encountered, indexed by their ID.
InfallibleVector<MiddlemanCall*> mCalls;
// In a replaying or middleman process, association between values produced by
// a middleman call and the call itself.
MiddlemanCallMap mCallMap;
// In a middleman process, any buffers allocated for performed calls.
InfallibleVector<void*> mAllocatedBuffers;
};
// In a replaying process, all middleman call state. In a middleman process,
// state for the child currently being processed.
static MiddlemanCallState* gState;
// In a middleman process, middleman call state for each child process, indexed
// by the child ID.
static StaticInfallibleVector<MiddlemanCallState*> gStatePerChild;
// In a replaying process, lock protecting middleman call state. In the
// middleman, all accesses occur on the main thread.
static Monitor* gMonitor;
void InitializeMiddlemanCalls() {
MOZ_RELEASE_ASSERT(IsRecordingOrReplaying() || IsMiddleman());
if (IsReplaying()) {
gState = new MiddlemanCallState();
gMonitor = new Monitor();
}
}
// Apply the ReplayInput phase to aCall and any calls it depends on that have
// not been sent to the middleman yet, filling aOutgoingCalls with the set of
// such calls.
static bool GatherDependentCalls(
InfallibleVector<MiddlemanCall*>& aOutgoingCalls, MiddlemanCall* aCall) {
MOZ_RELEASE_ASSERT(!aCall->mSent);
aCall->mSent = true;
const Redirection& redirection = GetRedirection(aCall->mCallId);
CallArguments arguments;
aCall->mArguments.CopyTo(&arguments);
InfallibleVector<MiddlemanCall*> dependentCalls;
MiddlemanCallContext cx(aCall, &arguments, MiddlemanCallPhase::ReplayInput);
cx.mDependentCalls = &dependentCalls;
redirection.mMiddlemanCall(cx);
if (cx.mFailed) {
if (child::CurrentRepaintCannotFail()) {
child::ReportFatalError(Nothing(), "Middleman call input failed: %s\n",
redirection.mName);
}
return false;
}
for (MiddlemanCall* dependent : dependentCalls) {
if (!dependent->mSent && !GatherDependentCalls(aOutgoingCalls, dependent)) {
return false;
}
}
aOutgoingCalls.append(aCall);
return true;
}
bool SendCallToMiddleman(size_t aCallId, CallArguments* aArguments,
bool aDiverged) {
MOZ_RELEASE_ASSERT(IsReplaying());
const Redirection& redirection = GetRedirection(aCallId);
MOZ_RELEASE_ASSERT(redirection.mMiddlemanCall);
MonitorAutoLock lock(*gMonitor);
// Allocate and fill in a new MiddlemanCall.
size_t id = gState->mCalls.length();
MiddlemanCall* newCall = new MiddlemanCall();
gState->mCalls.emplaceBack(newCall);
newCall->mId = id;
newCall->mCallId = aCallId;
newCall->mArguments.CopyFrom(aArguments);
// Perform the ReplayPreface phase on the new call.
{
MiddlemanCallContext cx(newCall, aArguments,
MiddlemanCallPhase::ReplayPreface);
redirection.mMiddlemanCall(cx);
if (cx.mFailed) {
delete newCall;
gState->mCalls.popBack();
if (child::CurrentRepaintCannotFail()) {
child::ReportFatalError(Nothing(),
"Middleman call preface failed: %s\n",
redirection.mName);
}
return false;
}
}
// Other phases will not run if we have not diverged from the recording.
// Any outputs for the call have been handled by the SaveOutput hook.
if (!aDiverged) {
return true;
}
// Perform the ReplayInput phase on the new call and any others it depends on.
InfallibleVector<MiddlemanCall*> outgoingCalls;
if (!GatherDependentCalls(outgoingCalls, newCall)) {
for (MiddlemanCall* call : outgoingCalls) {
call->mSent = false;
}
return false;
}
// Encode all calls we are sending to the middleman.
InfallibleVector<char> inputData;
BufferStream inputStream(&inputData);
for (MiddlemanCall* call : outgoingCalls) {
call->EncodeInput(inputStream);
}
// Perform the calls synchronously in the middleman.
InfallibleVector<char> outputData;
child::SendMiddlemanCallRequest(inputData.begin(), inputData.length(),
&outputData);
// Decode outputs for the calls just sent, and perform the ReplayOutput phase
// on any older dependent calls we sent.
BufferStream outputStream(outputData.begin(), outputData.length());
for (MiddlemanCall* call : outgoingCalls) {
call->DecodeOutput(outputStream);
if (call != newCall) {
CallArguments oldArguments;
call->mArguments.CopyTo(&oldArguments);
MiddlemanCallContext cx(call, &oldArguments,
MiddlemanCallPhase::ReplayOutput);
cx.mReplayOutputIsOld = true;
GetRedirection(call->mCallId).mMiddlemanCall(cx);
}
}
// Perform the ReplayOutput phase to fill in outputs for the current call.
newCall->mArguments.CopyTo(aArguments);
MiddlemanCallContext cx(newCall, aArguments,
MiddlemanCallPhase::ReplayOutput);
redirection.mMiddlemanCall(cx);
return true;
}
void ProcessMiddlemanCall(size_t aChildId, const char* aInputData,
size_t aInputSize,
InfallibleVector<char>* aOutputData) {
MOZ_RELEASE_ASSERT(IsMiddleman());
while (aChildId >= gStatePerChild.length()) {
gStatePerChild.append(nullptr);
}
if (!gStatePerChild[aChildId]) {
gStatePerChild[aChildId] = new MiddlemanCallState();
}
gState = gStatePerChild[aChildId];
BufferStream inputStream(aInputData, aInputSize);
BufferStream outputStream(aOutputData);
while (!inputStream.IsEmpty()) {
MiddlemanCall* call = new MiddlemanCall();
call->DecodeInput(inputStream);
const Redirection& redirection = GetRedirection(call->mCallId);
MOZ_RELEASE_ASSERT(redirection.mMiddlemanCall);
CallArguments arguments;
call->mArguments.CopyTo(&arguments);
bool skipCall;
{
MiddlemanCallContext cx(call, &arguments,
MiddlemanCallPhase::MiddlemanInput);
redirection.mMiddlemanCall(cx);
skipCall = cx.mSkipCallInMiddleman;
}
if (!skipCall) {
RecordReplayInvokeCall(redirection.mBaseFunction, &arguments);
}
{
MiddlemanCallContext cx(call, &arguments,
MiddlemanCallPhase::MiddlemanOutput);
redirection.mMiddlemanCall(cx);
}
call->mArguments.CopyFrom(&arguments);
call->EncodeOutput(outputStream);
while (call->mId >= gState->mCalls.length()) {
gState->mCalls.emplaceBack(nullptr);
}
MOZ_RELEASE_ASSERT(!gState->mCalls[call->mId]);
gState->mCalls[call->mId] = call;
}
gState = nullptr;
}
void* MiddlemanCallContext::AllocateBytes(size_t aSize) {
void* rv = malloc(aSize);
// In a middleman process, any buffers we allocate live until the calls are
// reset. In a replaying process, the buffers will either live forever
// (if they are allocated in the ReplayPreface phase, to match the lifetime
// of the MiddlemanCall itself) or will be recovered when we rewind after we
// are done with our divergence from the recording (any other phase).
if (IsMiddleman()) {
gState->mAllocatedBuffers.append(rv);
}
return rv;
}
void ResetMiddlemanCalls(size_t aChildId) {
MOZ_RELEASE_ASSERT(IsMiddleman());
if (aChildId >= gStatePerChild.length()) {
return;
}
gState = gStatePerChild[aChildId];
if (!gState) {
return;
}
for (MiddlemanCall* call : gState->mCalls) {
if (call) {
CallArguments arguments;
call->mArguments.CopyTo(&arguments);
MiddlemanCallContext cx(call, &arguments,
MiddlemanCallPhase::MiddlemanRelease);
GetRedirection(call->mCallId).mMiddlemanCall(cx);
}
}
// Delete the calls in a second pass. The MiddlemanRelease phase depends on
// previous middleman calls still existing.
for (MiddlemanCall* call : gState->mCalls) {
delete call;
}
gState->mCalls.clear();
for (auto buffer : gState->mAllocatedBuffers) {
free(buffer);
}
gState->mAllocatedBuffers.clear();
gState->mCallMap.clear();
gState = nullptr;
}
///////////////////////////////////////////////////////////////////////////////
// System Values
///////////////////////////////////////////////////////////////////////////////
static void AddMiddlemanCallValue(const void* aThing, MiddlemanCall* aCall) {
gState->mCallMap.erase(aThing);
gState->mCallMap.insert(MiddlemanCallMap::value_type(aThing, aCall));
}
static MiddlemanCall* LookupMiddlemanCall(const void* aThing) {
MiddlemanCallMap::const_iterator iter = gState->mCallMap.find(aThing);
if (iter != gState->mCallMap.end()) {
return iter->second;
}
return nullptr;
}
static const void* GetMiddlemanCallValue(size_t aId) {
MOZ_RELEASE_ASSERT(IsMiddleman());
MOZ_RELEASE_ASSERT(aId < gState->mCalls.length() && gState->mCalls[aId] &&
gState->mCalls[aId]->mMiddlemanValue.isSome());
return gState->mCalls[aId]->mMiddlemanValue.ref();
}
bool MM_SystemInput(MiddlemanCallContext& aCx, const void** aThingPtr) {
MOZ_RELEASE_ASSERT(aCx.AccessPreface());
if (!*aThingPtr) {
// Null values are handled by the normal argument copying logic.
return true;
}
Maybe<size_t> callId;
if (aCx.mPhase == MiddlemanCallPhase::ReplayPreface) {
// Determine any middleman call this object came from, before the pointer
// has a chance to be clobbered by another call between this and the
// ReplayInput phase.
MiddlemanCall* call = LookupMiddlemanCall(*aThingPtr);
if (call) {
callId.emplace(call->mId);
}
}
aCx.ReadOrWritePrefaceBytes(&callId, sizeof(callId));
switch (aCx.mPhase) {
case MiddlemanCallPhase::ReplayPreface:
return true;
case MiddlemanCallPhase::ReplayInput:
if (callId.isSome()) {
aCx.WriteInputScalar(callId.ref());
aCx.mDependentCalls->append(gState->mCalls[callId.ref()]);
return true;
}
return false;
case MiddlemanCallPhase::MiddlemanInput:
if (callId.isSome()) {
size_t callIndex = aCx.ReadInputScalar();
*aThingPtr = GetMiddlemanCallValue(callIndex);
return true;
}
return false;
default:
MOZ_CRASH("Bad phase");
}
}
// Pointer system values are preserved during the replay so that null tests
// and equality tests work as expected. We additionally mangle the
// pointers here by setting one of the two highest bits, depending on whether
// the pointer came from the recording or from the middleman. This avoids
// accidentally conflating pointers that happen to have the same value but
// which originate from different processes.
static const void* MangleSystemValue(const void* aValue, bool aFromRecording) {
return (const void*)((size_t)aValue | (1ULL << (aFromRecording ? 63 : 62)));
}
void MM_SystemOutput(MiddlemanCallContext& aCx, const void** aOutput,
bool aUpdating) {
if (!*aOutput) {
if (aCx.mPhase == MiddlemanCallPhase::MiddlemanOutput) {
aCx.mCall->SetMiddlemanValue(*aOutput);
}
return;
}
switch (aCx.mPhase) {
case MiddlemanCallPhase::ReplayPreface:
if (!HasDivergedFromRecording()) {
// If we haven't diverged from the recording, use the output value saved
// in the recording.
if (!aUpdating) {
*aOutput = MangleSystemValue(*aOutput, true);
}
aCx.mCall->SetRecordingValue(*aOutput);
AddMiddlemanCallValue(*aOutput, aCx.mCall);
}
break;
case MiddlemanCallPhase::MiddlemanOutput:
aCx.mCall->SetMiddlemanValue(*aOutput);
AddMiddlemanCallValue(*aOutput, aCx.mCall);
break;
case MiddlemanCallPhase::ReplayOutput: {
if (!aUpdating) {
*aOutput = MangleSystemValue(*aOutput, false);
}
aCx.mCall->SetMiddlemanValue(*aOutput);
// Associate the value produced by the middleman with this call. If the
// call previously went through the ReplayPreface phase when we did not
// diverge from the recording, we will associate values from both the
// recording and middleman processes with this call. If a call made after
// diverging produced the same value as a call made before diverging, use
// the value saved in the recording for the first call, so that equality
// tests on the value work as expected.
MiddlemanCall* previousCall = LookupMiddlemanCall(*aOutput);
if (previousCall) {
if (previousCall->mRecordingValue.isSome()) {
*aOutput = previousCall->mRecordingValue.ref();
}
} else {
AddMiddlemanCallValue(*aOutput, aCx.mCall);
}
break;
}
default:
return;
}
}
///////////////////////////////////////////////////////////////////////////////
// MiddlemanCall
///////////////////////////////////////////////////////////////////////////////
void MiddlemanCall::EncodeInput(BufferStream& aStream) const {
aStream.WriteScalar(mId);
aStream.WriteScalar(mCallId);
aStream.WriteBytes(&mArguments, sizeof(CallRegisterArguments));
aStream.WriteScalar(mPreface.length());
aStream.WriteBytes(mPreface.begin(), mPreface.length());
aStream.WriteScalar(mInput.length());
aStream.WriteBytes(mInput.begin(), mInput.length());
}
void MiddlemanCall::DecodeInput(BufferStream& aStream) {
mId = aStream.ReadScalar();
mCallId = aStream.ReadScalar();
aStream.ReadBytes(&mArguments, sizeof(CallRegisterArguments));
size_t prefaceLength = aStream.ReadScalar();
mPreface.appendN(0, prefaceLength);
aStream.ReadBytes(mPreface.begin(), prefaceLength);
size_t inputLength = aStream.ReadScalar();
mInput.appendN(0, inputLength);
aStream.ReadBytes(mInput.begin(), inputLength);
}
void MiddlemanCall::EncodeOutput(BufferStream& aStream) const {
aStream.WriteBytes(&mArguments, sizeof(CallRegisterArguments));
aStream.WriteScalar(mOutput.length());
aStream.WriteBytes(mOutput.begin(), mOutput.length());
}
void MiddlemanCall::DecodeOutput(BufferStream& aStream) {
// Only update the return value when decoding arguments, so that we don't
// clobber the call's arguments with any changes made in the middleman.
CallRegisterArguments newArguments;
aStream.ReadBytes(&newArguments, sizeof(CallRegisterArguments));
mArguments.CopyRvalFrom(&newArguments);
size_t outputLength = aStream.ReadScalar();
mOutput.appendN(0, outputLength);
aStream.ReadBytes(mOutput.begin(), outputLength);
}
} // namespace recordreplay
} // namespace mozilla

Просмотреть файл

@ -1,458 +0,0 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef mozilla_recordreplay_MiddlemanCall_h
#define mozilla_recordreplay_MiddlemanCall_h
#include "BufferStream.h"
#include "ProcessRedirect.h"
#include "mozilla/Maybe.h"
namespace mozilla {
namespace recordreplay {
// Middleman Calls Overview
//
// With few exceptions, replaying processes do not interact with the underlying
// system or call the actual versions of redirected system library functions.
// This is problematic after diverging from the recording, as then the diverged
// thread cannot interact with its recording either.
//
// Middleman calls are used in a replaying process after diverging from the
// recording to perform calls in the middleman process instead. Inputs are
// gathered and serialized in the replaying process, then sent to the middleman
// process. The middleman calls the function, and its outputs are serialized
// for reading by the replaying process.
//
// Calls that might need to be sent to the middleman are processed in phases,
// per the MiddlemanCallPhase enum below. The timeline of a middleman call is
// as follows:
//
// - Any redirection with a middleman call hook can potentially be sent to the
// middleman. In a replaying process, whenever such a call is encountered,
// the hook is invoked in the ReplayPreface phase to capture any input data
// that must be examined at the time of the call itself.
//
// - If the thread has not diverged from the recording, the call is remembered
// but no further action is necessary yet.
//
// - If the thread has diverged from the recording, the call needs to go
// through the remaining phases. The ReplayInput phase captures any
// additional inputs to the call, potentially including values produced by
// other middleman calls.
//
// - The transitive closure of these call dependencies is produced, and all
// calls found go through the ReplayInput phase. The resulting data is sent
// to the middleman process, which goes through the MiddlemanInput phase
// to decode those inputs.
//
// - The middleman performs each of the calls it has been given, and their
// outputs are encoded in the MiddlemanOutput phase. These outputs are sent
// to the replaying process in a response and decoded in the ReplayOutput
// phase, which can then resume execution.
//
// - The replaying process holds onto information about calls it has sent until
// it rewinds to a point before it diverged from the recording. This rewind
// will --- without any special action required --- wipe out information on
// all calls sent to the middleman, and retain any data gathered in the
// ReplayPreface phase for calls that were made prior to the rewind target.
//
// - Information about calls and all resources held are retained in the
// middleman process are retained until a replaying process asks for them to
// be reset, which happens any time the replaying process first diverges from
// the recording. The MiddlemanRelease phase is used to release any system
// resources held.
// Ways of processing calls that can be sent to the middleman.
enum class MiddlemanCallPhase {
// When replaying, a call is being performed that might need to be sent to
// the middleman later.
ReplayPreface,
// A call for which inputs have been gathered is now being sent to the
// middleman. This is separate from ReplayPreface because capturing inputs
// might need to dereference pointers that could be bogus values originating
// from the recording. Waiting to dereference these pointers until we know
// the call needs to be sent to the middleman avoids needing to understand
// the inputs to all call sites of general purpose redirections such as
// CFArrayCreate.
ReplayInput,
// In the middleman process, a call from the replaying process is being
// performed.
MiddlemanInput,
// In the middleman process, a call from the replaying process was just
// performed, and its outputs need to be saved.
MiddlemanOutput,
// Back in the replaying process, the outputs from a call have been received
// from the middleman.
ReplayOutput,
// In the middleman process, release any system resources held after this
// call.
MiddlemanRelease,
};
struct MiddlemanCall {
// Unique ID for this call.
size_t mId;
// ID of the redirection being invoked.
size_t mCallId;
// All register arguments and return values are preserved when sending the
// call back and forth between processes.
CallRegisterArguments mArguments;
// Written in ReplayPrefaceInput, read in ReplayInput and MiddlemanInput.
InfallibleVector<char> mPreface;
// Written in ReplayInput, read in MiddlemanInput.
InfallibleVector<char> mInput;
// Written in MiddlemanOutput, read in ReplayOutput.
InfallibleVector<char> mOutput;
// In a replaying process, whether this call has been sent to the middleman.
bool mSent;
// In a replaying process, any value associated with this call that was
// included in the recording, when the call was made before diverging from
// the recording.
Maybe<const void*> mRecordingValue;
// In a replaying or middleman process, any value associated with this call
// that was produced by the middleman itself.
Maybe<const void*> mMiddlemanValue;
MiddlemanCall() : mId(0), mCallId(0), mSent(false) {}
void EncodeInput(BufferStream& aStream) const;
void DecodeInput(BufferStream& aStream);
void EncodeOutput(BufferStream& aStream) const;
void DecodeOutput(BufferStream& aStream);
void SetRecordingValue(const void* aValue) {
MOZ_RELEASE_ASSERT(mRecordingValue.isNothing());
mRecordingValue.emplace(aValue);
}
void SetMiddlemanValue(const void* aValue) {
MOZ_RELEASE_ASSERT(mMiddlemanValue.isNothing());
mMiddlemanValue.emplace(aValue);
}
};
// Information needed to process one of the phases of a middleman call,
// in either the replaying or middleman process.
struct MiddlemanCallContext {
// Call being operated on.
MiddlemanCall* mCall;
// Complete arguments and return value information for the call.
CallArguments* mArguments;
// Current processing phase.
MiddlemanCallPhase mPhase;
// During the ReplayPreface or ReplayInput phases, whether capturing input
// data has failed. In such cases the call cannot be sent to the middleman
// and, if the thread has diverged from the recording, an unhandled
// divergence and associated rewind will occur.
bool mFailed;
// This can be set in the MiddlemanInput phase to avoid performing the call
// in the middleman process.
bool mSkipCallInMiddleman;
// During the ReplayInput phase, this can be used to fill in any middleman
// calls whose output the current one depends on.
InfallibleVector<MiddlemanCall*>* mDependentCalls;
// Streams of data that can be accessed during the various phases. Streams
// need to be read or written from at the same points in the phases which use
// them, so that callbacks operating on these streams can be composed without
// issues.
// The preface is written during ReplayPreface, and read during both
// ReplayInput and MiddlemanInput.
Maybe<BufferStream> mPrefaceStream;
// Inputs are written during ReplayInput, and read during MiddlemanInput.
Maybe<BufferStream> mInputStream;
// Outputs are written during MiddlemanOutput, and read during ReplayOutput.
Maybe<BufferStream> mOutputStream;
// During the ReplayOutput phase, this is set if the call was made sometime
// in the past and pointers referred to in the arguments may no longer be
// valid.
bool mReplayOutputIsOld;
MiddlemanCallContext(MiddlemanCall* aCall, CallArguments* aArguments,
MiddlemanCallPhase aPhase)
: mCall(aCall),
mArguments(aArguments),
mPhase(aPhase),
mFailed(false),
mSkipCallInMiddleman(false),
mDependentCalls(nullptr),
mReplayOutputIsOld(false) {
switch (mPhase) {
case MiddlemanCallPhase::ReplayPreface:
mPrefaceStream.emplace(&mCall->mPreface);
break;
case MiddlemanCallPhase::ReplayInput:
mPrefaceStream.emplace(mCall->mPreface.begin(),
mCall->mPreface.length());
mInputStream.emplace(&mCall->mInput);
break;
case MiddlemanCallPhase::MiddlemanInput:
mPrefaceStream.emplace(mCall->mPreface.begin(),
mCall->mPreface.length());
mInputStream.emplace(mCall->mInput.begin(), mCall->mInput.length());
break;
case MiddlemanCallPhase::MiddlemanOutput:
mOutputStream.emplace(&mCall->mOutput);
break;
case MiddlemanCallPhase::ReplayOutput:
mOutputStream.emplace(mCall->mOutput.begin(), mCall->mOutput.length());
break;
case MiddlemanCallPhase::MiddlemanRelease:
break;
}
}
void MarkAsFailed() {
MOZ_RELEASE_ASSERT(mPhase == MiddlemanCallPhase::ReplayPreface ||
mPhase == MiddlemanCallPhase::ReplayInput);
mFailed = true;
}
void WriteInputBytes(const void* aBuffer, size_t aSize) {
MOZ_RELEASE_ASSERT(mPhase == MiddlemanCallPhase::ReplayInput);
mInputStream.ref().WriteBytes(aBuffer, aSize);
}
void WriteInputScalar(size_t aValue) {
MOZ_RELEASE_ASSERT(mPhase == MiddlemanCallPhase::ReplayInput);
mInputStream.ref().WriteScalar(aValue);
}
void ReadInputBytes(void* aBuffer, size_t aSize) {
MOZ_RELEASE_ASSERT(mPhase == MiddlemanCallPhase::MiddlemanInput);
mInputStream.ref().ReadBytes(aBuffer, aSize);
}
size_t ReadInputScalar() {
MOZ_RELEASE_ASSERT(mPhase == MiddlemanCallPhase::MiddlemanInput);
return mInputStream.ref().ReadScalar();
}
bool AccessInput() { return mInputStream.isSome(); }
void ReadOrWriteInputBytes(void* aBuffer, size_t aSize) {
switch (mPhase) {
case MiddlemanCallPhase::ReplayInput:
WriteInputBytes(aBuffer, aSize);
break;
case MiddlemanCallPhase::MiddlemanInput:
ReadInputBytes(aBuffer, aSize);
break;
default:
MOZ_CRASH();
}
}
bool AccessPreface() { return mPrefaceStream.isSome(); }
void ReadOrWritePrefaceBytes(void* aBuffer, size_t aSize) {
switch (mPhase) {
case MiddlemanCallPhase::ReplayPreface:
mPrefaceStream.ref().WriteBytes(aBuffer, aSize);
break;
case MiddlemanCallPhase::ReplayInput:
case MiddlemanCallPhase::MiddlemanInput:
mPrefaceStream.ref().ReadBytes(aBuffer, aSize);
break;
default:
MOZ_CRASH();
}
}
void ReadOrWritePrefaceBuffer(void** aBufferPtr, size_t aSize) {
switch (mPhase) {
case MiddlemanCallPhase::ReplayPreface:
mPrefaceStream.ref().WriteBytes(*aBufferPtr, aSize);
break;
case MiddlemanCallPhase::ReplayInput:
case MiddlemanCallPhase::MiddlemanInput:
*aBufferPtr = AllocateBytes(aSize);
mPrefaceStream.ref().ReadBytes(*aBufferPtr, aSize);
break;
default:
MOZ_CRASH();
}
}
bool AccessOutput() { return mOutputStream.isSome(); }
void ReadOrWriteOutputBytes(void* aBuffer, size_t aSize) {
switch (mPhase) {
case MiddlemanCallPhase::MiddlemanOutput:
mOutputStream.ref().WriteBytes(aBuffer, aSize);
break;
case MiddlemanCallPhase::ReplayOutput:
mOutputStream.ref().ReadBytes(aBuffer, aSize);
break;
default:
MOZ_CRASH();
}
}
void ReadOrWriteOutputBuffer(void** aBuffer, size_t aSize) {
if (*aBuffer) {
if (mPhase == MiddlemanCallPhase::MiddlemanInput || mReplayOutputIsOld) {
*aBuffer = AllocateBytes(aSize);
}
if (AccessOutput()) {
ReadOrWriteOutputBytes(*aBuffer, aSize);
}
}
}
// Allocate some memory associated with the call, which will be released in
// the replaying process on a rewind and in the middleman process when the
// call state is reset.
void* AllocateBytes(size_t aSize);
};
// Notify the system about a call to a redirection with a middleman call hook.
// aDiverged is set if the current thread has diverged from the recording and
// any outputs for the call must be filled in; otherwise, they already have
// been filled in using data from the recording. Returns false if the call was
// unable to be processed.
bool SendCallToMiddleman(size_t aCallId, CallArguments* aArguments,
bool aDiverged);
// In the middleman process, perform one or more calls encoded in aInputData
// and encode their outputs to aOutputData. The calls are associated with the
// specified child process ID.
void ProcessMiddlemanCall(size_t aChildId, const char* aInputData,
size_t aInputSize,
InfallibleVector<char>* aOutputData);
// In the middleman process, reset all call state for a child process ID.
void ResetMiddlemanCalls(size_t aChildId);
///////////////////////////////////////////////////////////////////////////////
// Middleman Call Helpers
///////////////////////////////////////////////////////////////////////////////
// Capture the contents of an input buffer at BufferArg with element count at
// CountArg.
template <size_t BufferArg, size_t CountArg, typename ElemType = char>
static inline void MM_Buffer(MiddlemanCallContext& aCx) {
if (aCx.AccessPreface()) {
auto& buffer = aCx.mArguments->Arg<BufferArg, void*>();
auto byteSize = aCx.mArguments->Arg<CountArg, size_t>() * sizeof(ElemType);
aCx.ReadOrWritePrefaceBuffer(&buffer, byteSize);
}
}
// Capture the contents of a fixed size input buffer.
template <size_t BufferArg, size_t ByteSize>
static inline void MM_BufferFixedSize(MiddlemanCallContext& aCx) {
if (aCx.AccessPreface()) {
auto& buffer = aCx.mArguments->Arg<BufferArg, void*>();
if (buffer) {
aCx.ReadOrWritePrefaceBuffer(&buffer, ByteSize);
}
}
}
// Capture a C string argument.
template <size_t StringArg>
static inline void MM_CString(MiddlemanCallContext& aCx) {
if (aCx.AccessPreface()) {
auto& buffer = aCx.mArguments->Arg<StringArg, char*>();
size_t len = (aCx.mPhase == MiddlemanCallPhase::ReplayPreface)
? strlen(buffer) + 1
: 0;
aCx.ReadOrWritePrefaceBytes(&len, sizeof(len));
aCx.ReadOrWritePrefaceBuffer((void**)&buffer, len);
}
}
// Capture the data written to an output buffer at BufferArg with element count
// at CountArg.
template <size_t BufferArg, size_t CountArg, typename ElemType>
static inline void MM_WriteBuffer(MiddlemanCallContext& aCx) {
auto& buffer = aCx.mArguments->Arg<BufferArg, void*>();
auto count = aCx.mArguments->Arg<CountArg, size_t>();
aCx.ReadOrWriteOutputBuffer(&buffer, count * sizeof(ElemType));
}
// Capture the data written to a fixed size output buffer.
template <size_t BufferArg, size_t ByteSize>
static inline void MM_WriteBufferFixedSize(MiddlemanCallContext& aCx) {
auto& buffer = aCx.mArguments->Arg<BufferArg, void*>();
aCx.ReadOrWriteOutputBuffer(&buffer, ByteSize);
}
// Capture return values that are too large for register storage.
template <size_t ByteSize>
static inline void MM_OversizeRval(MiddlemanCallContext& aCx) {
MM_WriteBufferFixedSize<0, ByteSize>(aCx);
}
// Capture a byte count of stack argument data.
template <size_t ByteSize>
static inline void MM_StackArgumentData(MiddlemanCallContext& aCx) {
if (aCx.AccessPreface()) {
auto stack = aCx.mArguments->StackAddress<0>();
aCx.ReadOrWritePrefaceBytes(stack, ByteSize);
}
}
// Avoid calling a function in the middleman process.
static inline void MM_SkipInMiddleman(MiddlemanCallContext& aCx) {
if (aCx.mPhase == MiddlemanCallPhase::MiddlemanInput) {
aCx.mSkipCallInMiddleman = true;
}
}
static inline void MM_NoOp(MiddlemanCallContext& aCx) {}
template <MiddlemanCallFn Fn0, MiddlemanCallFn Fn1,
MiddlemanCallFn Fn2 = MM_NoOp, MiddlemanCallFn Fn3 = MM_NoOp,
MiddlemanCallFn Fn4 = MM_NoOp>
static inline void MM_Compose(MiddlemanCallContext& aCx) {
Fn0(aCx);
Fn1(aCx);
Fn2(aCx);
Fn3(aCx);
Fn4(aCx);
}
// Helper for capturing inputs that are produced by other middleman calls.
// Returns false in the ReplayInput or MiddlemanInput phases if the input
// system value could not be found.
bool MM_SystemInput(MiddlemanCallContext& aCx, const void** aThingPtr);
// Helper for capturing output system values that might be consumed by other
// middleman calls.
void MM_SystemOutput(MiddlemanCallContext& aCx, const void** aOutput,
bool aUpdating = false);
} // namespace recordreplay
} // namespace mozilla
#endif // mozilla_recordreplay_MiddlemanCall_h

Просмотреть файл

@ -12,17 +12,22 @@
#include "mozilla/Maybe.h"
#include "mozilla/Sprintf.h"
#include "mozilla/StaticMutex.h"
#include "DirtyMemoryHandler.h"
#include "Lock.h"
#include "MemorySnapshot.h"
#include "ProcessRedirect.h"
#include "ProcessRewind.h"
#include "ValueIndex.h"
#include "pratom.h"
#include <dlfcn.h>
#include <fcntl.h>
#include <unistd.h>
#include <mach/exc.h>
#include <mach/mach.h>
#include <mach/mach_vm.h>
#include <mach/ndr.h>
#include <sys/time.h>
namespace mozilla {
namespace recordreplay {
@ -36,9 +41,7 @@ MOZ_NEVER_INLINE void BusyWait() {
// Basic interface
///////////////////////////////////////////////////////////////////////////////
File* gRecordingFile;
const char* gSnapshotMemoryPrefix;
const char* gSnapshotStackPrefix;
Recording* gRecording;
char* gInitializationFailureMessage;
@ -58,6 +61,11 @@ static bool gSpewEnabled;
// Whether this is the main child.
static bool gMainChild;
// Whether we are replaying on a cloud machine.
static bool gReplayingInCloud;
static void InitializeCrashDetector();
extern "C" {
MOZ_EXPORT void RecordReplayInterface_Initialize(int aArgc, char* aArgv[]) {
@ -74,10 +82,12 @@ MOZ_EXPORT void RecordReplayInterface_Initialize(int aArgc, char* aArgv[]) {
recordingFile.emplace(aArgv[i + 1]);
}
}
MOZ_RELEASE_ASSERT(processKind.isSome() && recordingFile.isSome());
MOZ_RELEASE_ASSERT(processKind.isSome());
gProcessKind = processKind.ref();
gRecordingFilename = strdup(recordingFile.ref());
if (recordingFile.isSome()) {
gRecordingFilename = strdup(recordingFile.ref());
}
switch (processKind.ref()) {
case ProcessKind::Recording:
@ -117,22 +127,15 @@ MOZ_EXPORT void RecordReplayInterface_Initialize(int aArgc, char* aArgv[]) {
EarlyInitializeRedirections();
if (!IsRecordingOrReplaying()) {
InitializeMiddlemanCalls();
InitializeExternalCalls();
return;
}
gSnapshotMemoryPrefix = mktemp(strdup("/tmp/SnapshotMemoryXXXXXX"));
gSnapshotStackPrefix = mktemp(strdup("/tmp/SnapshotStackXXXXXX"));
InitializeCurrentTime();
gRecordingFile = new File();
if (gRecordingFile->Open(recordingFile.ref(),
IsRecording() ? File::WRITE : File::READ)) {
InitializeRedirections();
} else {
gInitializationFailureMessage = strdup("Bad recording file");
}
gRecording = new Recording();
InitializeRedirections();
if (gInitializationFailureMessage) {
fprintf(stderr, "Initialization Failure: %s\n",
@ -145,19 +148,27 @@ MOZ_EXPORT void RecordReplayInterface_Initialize(int aArgc, char* aArgv[]) {
Thread* thread = Thread::GetById(MainThreadId);
MOZ_ASSERT(thread->Id() == MainThreadId);
thread->BindToCurrent();
thread->SetPassThrough(true);
InitializeMemorySnapshots();
// The translation layer we are running under in the cloud will intercept this
// and return a non-zero symbol address.
gReplayingInCloud = !!dlsym(RTLD_DEFAULT, "RecordReplay_ReplayingInCloud");
Thread::SpawnAllThreads();
InitializeCountdownThread();
SetupDirtyMemoryHandler();
InitializeMiddlemanCalls();
InitializeExternalCalls();
if (!gReplayingInCloud) {
// The crash detector is only useful when we have a local parent process to
// report crashes to. Avoid initializing it when running in the cloud
// so that we avoid calling mach interfaces with events passed through.
InitializeCrashDetector();
}
Lock::InitializeLocks();
// Don't create a stylo thread pool when recording or replaying.
putenv((char*)"STYLO_THREADS=1");
child::SetupRecordReplayChannel(aArgc, aArgv);
thread->SetPassThrough(false);
InitializeRewindState();
@ -197,16 +208,46 @@ MOZ_EXPORT void RecordReplayInterface_InternalRecordReplayBytes(void* aData,
MOZ_EXPORT void RecordReplayInterface_InternalInvalidateRecording(
const char* aWhy) {
if (IsRecording()) {
child::ReportFatalError(Nothing(), "Recording invalidated: %s", aWhy);
child::ReportFatalError("Recording invalidated: %s", aWhy);
} else {
child::ReportFatalError(Nothing(),
"Recording invalidated while replaying: %s", aWhy);
child::ReportFatalError("Recording invalidated while replaying: %s", aWhy);
}
Unreachable();
}
MOZ_EXPORT void RecordReplayInterface_InternalBeginPassThroughThreadEventsWithLocalReplay() {
if (IsReplaying() && !gReplayingInCloud) {
BeginPassThroughThreadEvents();
}
}
MOZ_EXPORT void RecordReplayInterface_InternalEndPassThroughThreadEventsWithLocalReplay() {
// If we are replaying locally we will be skipping over a section of the
// recording while events are passed through. Include the current stream
// position in the recording so that we will know how much to skip over.
MOZ_RELEASE_ASSERT(Thread::CurrentIsMainThread());
Stream* localReplayStream = gRecording->OpenStream(StreamName::LocalReplaySkip, 0);
Stream& events = Thread::Current()->Events();
size_t position = IsRecording() ? events.StreamPosition() : 0;
localReplayStream->RecordOrReplayScalar(&position);
if (IsReplaying() && !ReplayingInCloud()) {
EndPassThroughThreadEvents();
MOZ_RELEASE_ASSERT(events.StreamPosition() <= position);
size_t nbytes = position - events.StreamPosition();
void* buf = malloc(nbytes);
events.ReadBytes(buf, nbytes);
free(buf);
MOZ_RELEASE_ASSERT(events.StreamPosition() == position);
}
}
} // extern "C"
// How many bytes have been sent from the recording to the middleman.
size_t gRecordingDataSentToMiddleman;
void FlushRecording() {
MOZ_RELEASE_ASSERT(IsRecording());
MOZ_RELEASE_ASSERT(Thread::CurrentIsMainThread());
@ -214,35 +255,19 @@ void FlushRecording() {
// The recording can only be flushed when we are at a checkpoint.
// Save this endpoint to the recording.
size_t endpoint = GetLastCheckpoint();
Stream* endpointStream = gRecordingFile->OpenStream(StreamName::Main, 0);
Stream* endpointStream = gRecording->OpenStream(StreamName::Endpoint, 0);
endpointStream->WriteScalar(endpoint);
gRecordingFile->PreventStreamWrites();
gRecordingFile->Flush();
gRecordingFile->AllowStreamWrites();
}
gRecording->PreventStreamWrites();
gRecording->Flush();
gRecording->AllowStreamWrites();
// Try to load another recording index, returning whether one was found.
static bool LoadNextRecordingIndex() {
Thread::WaitForIdleThreads();
InfallibleVector<Stream*> updatedStreams;
File::ReadIndexResult result = gRecordingFile->ReadNextIndex(&updatedStreams);
if (result == File::ReadIndexResult::InvalidFile) {
MOZ_CRASH("Bad recording file");
if (gRecording->Size() > gRecordingDataSentToMiddleman) {
child::SendRecordingData(gRecordingDataSentToMiddleman,
gRecording->Data() + gRecordingDataSentToMiddleman,
gRecording->Size() - gRecordingDataSentToMiddleman);
gRecordingDataSentToMiddleman = gRecording->Size();
}
bool found = result == File::ReadIndexResult::FoundIndex;
if (found) {
for (Stream* stream : updatedStreams) {
if (stream->Name() == StreamName::Lock) {
Lock::LockAquiresUpdated(stream->NameIndex());
}
}
}
Thread::ResumeIdleThreads();
return found;
}
void HitEndOfRecording() {
@ -250,13 +275,11 @@ void HitEndOfRecording() {
MOZ_RELEASE_ASSERT(!AreThreadEventsPassedThrough());
if (Thread::CurrentIsMainThread()) {
// Load more data from the recording. The debugger is not allowed to let us
// go past the recording endpoint, so there should be more data.
bool found = LoadNextRecordingIndex();
MOZ_RELEASE_ASSERT(found);
// We should have been provided with all the data needed to run forward in
// the replay. Check to see if there is any pending data.
child::AddPendingRecordingData();
} else {
// Non-main threads may wait until more recording data is loaded by the
// main thread.
// Non-main threads may wait until more recording data is added.
Thread::Wait();
}
}
@ -268,7 +291,7 @@ size_t RecordingEndpoint() {
MOZ_RELEASE_ASSERT(IsReplaying());
MOZ_RELEASE_ASSERT(!AreThreadEventsPassedThrough());
Stream* endpointStream = gRecordingFile->OpenStream(StreamName::Main, 0);
Stream* endpointStream = gRecording->OpenStream(StreamName::Endpoint, 0);
while (!endpointStream->AtEnd()) {
gRecordingEndpoint = endpointStream->ReadScalar();
}
@ -301,8 +324,11 @@ const char* ThreadEventName(ThreadEvent aEvent) {
int GetRecordingPid() { return gRecordingPid; }
void ResetPid() { gPid = getpid(); }
bool IsMainChild() { return gMainChild; }
void SetMainChild() { gMainChild = true; }
bool ReplayingInCloud() { return gReplayingInCloud; }
///////////////////////////////////////////////////////////////////////////////
// Record/Replay Assertions
@ -324,7 +350,7 @@ MOZ_EXPORT void RecordReplayInterface_InternalRecordReplayAssert(
// This must be kept in sync with Stream::RecordOrReplayThreadEvent, which
// peeks at the input string written after the thread event.
thread->Events().RecordOrReplayThreadEvent(ThreadEvent::Assert);
thread->Events().RecordOrReplayThreadEvent(ThreadEvent::Assert, text);
thread->Events().CheckInput(text);
}
@ -399,5 +425,96 @@ MOZ_EXPORT void RecordReplayInterface_InternalHoldJSObject(void* aJSObj) {
} // extern "C"
static mach_port_t gCrashDetectorExceptionPort;
// See AsmJSSignalHandlers.cpp.
static const mach_msg_id_t sExceptionId = 2405;
// This definition was generated by mig (the Mach Interface Generator) for the
// routine 'exception_raise' (exc.defs). See js/src/wasm/WasmSignalHandlers.cpp.
#pragma pack(4)
typedef struct {
mach_msg_header_t Head;
/* start of the kernel processed data */
mach_msg_body_t msgh_body;
mach_msg_port_descriptor_t thread;
mach_msg_port_descriptor_t task;
/* end of the kernel processed data */
NDR_record_t NDR;
exception_type_t exception;
mach_msg_type_number_t codeCnt;
int64_t code[2];
} Request__mach_exception_raise_t;
#pragma pack()
typedef struct {
Request__mach_exception_raise_t body;
mach_msg_trailer_t trailer;
} ExceptionRequest;
static void CrashDetectorThread(void*) {
kern_return_t kret;
while (true) {
ExceptionRequest request;
kret = mach_msg(&request.body.Head, MACH_RCV_MSG, 0, sizeof(request),
gCrashDetectorExceptionPort, MACH_MSG_TIMEOUT_NONE,
MACH_PORT_NULL);
Print("Crashing: %s\n", gMozCrashReason);
kern_return_t replyCode = KERN_FAILURE;
if (kret == KERN_SUCCESS && request.body.Head.msgh_id == sExceptionId &&
request.body.exception == EXC_BAD_ACCESS && request.body.codeCnt == 2) {
uint8_t* faultingAddress = (uint8_t*)request.body.code[1];
child::MinidumpInfo info(request.body.exception, request.body.code[0],
request.body.code[1],
request.body.thread.name,
request.body.task.name);
child::ReportCrash(info, faultingAddress);
} else {
child::ReportFatalError("CrashDetectorThread mach_msg "
"returned unexpected data");
}
__Reply__exception_raise_t reply;
reply.Head.msgh_bits =
MACH_MSGH_BITS(MACH_MSGH_BITS_REMOTE(request.body.Head.msgh_bits), 0);
reply.Head.msgh_size = sizeof(reply);
reply.Head.msgh_remote_port = request.body.Head.msgh_remote_port;
reply.Head.msgh_local_port = MACH_PORT_NULL;
reply.Head.msgh_id = request.body.Head.msgh_id + 100;
reply.NDR = NDR_record;
reply.RetCode = replyCode;
mach_msg(&reply.Head, MACH_SEND_MSG, sizeof(reply), 0, MACH_PORT_NULL,
MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL);
}
}
static void InitializeCrashDetector() {
MOZ_RELEASE_ASSERT(AreThreadEventsPassedThrough());
kern_return_t kret;
// Get a port which can send and receive data.
kret = mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_RECEIVE,
&gCrashDetectorExceptionPort);
MOZ_RELEASE_ASSERT(kret == KERN_SUCCESS);
kret = mach_port_insert_right(mach_task_self(), gCrashDetectorExceptionPort,
gCrashDetectorExceptionPort,
MACH_MSG_TYPE_MAKE_SEND);
MOZ_RELEASE_ASSERT(kret == KERN_SUCCESS);
// Create a thread to block on reading the port.
Thread::SpawnNonRecordedThread(CrashDetectorThread, nullptr);
// Set exception ports on the entire task. Unfortunately, this clobbers any
// other exception ports for the task, and forwarding to those other ports
// is not easy to get right.
kret = task_set_exception_ports(
mach_task_self(), EXC_MASK_BAD_ACCESS, gCrashDetectorExceptionPort,
EXCEPTION_DEFAULT | MACH_EXCEPTION_CODES, THREAD_STATE_NONE);
MOZ_RELEASE_ASSERT(kret == KERN_SUCCESS);
}
} // namespace recordreplay
} // namespace mozilla

Просмотреть файл

@ -72,10 +72,10 @@ enum class ThreadEvent : uint32_t {
// Get the printable name for a thread event.
const char* ThreadEventName(ThreadEvent aEvent);
class File;
class Recording;
// File used during recording and replay.
extern File* gRecordingFile;
// Recording being written to or read from.
extern Recording* gRecording;
// Whether record/replay state has finished initialization.
extern bool gInitialized;
@ -106,6 +106,9 @@ size_t RecordingEndpoint();
bool IsMainChild();
void SetMainChild();
// Whether we are replaying a recording on a machine in the cloud.
bool ReplayingInCloud();
// Get the process kind and recording file specified at the command line.
// These are available in the middleman as well as while recording/replaying.
extern ProcessKind gProcessKind;
@ -203,6 +206,9 @@ MOZ_MakeRecordReplayPrinter(Print, false)
// Get the ID of the process that produced the recording.
int GetRecordingPid();
// Update the current pid after a fork.
void ResetPid();
///////////////////////////////////////////////////////////////////////////////
// Profiling
///////////////////////////////////////////////////////////////////////////////
@ -232,99 +238,6 @@ struct AutoTimer {
void DumpTimers();
///////////////////////////////////////////////////////////////////////////////
// Memory Management
///////////////////////////////////////////////////////////////////////////////
// In cases where memory is tracked and should be saved/restored with
// checkoints, malloc and other standard library functions suffice to allocate
// memory in the record/replay system. The routines below are used for handling
// redirections for the raw system calls underlying the standard libraries, and
// for cases where allocated memory should be untracked: the contents are
// ignored when saving/restoring checkpoints.
// Different kinds of memory used in the system.
enum class MemoryKind {
// Memory whose contents are saved/restored with checkpoints.
Tracked,
// All remaining memory kinds refer to untracked memory.
// Memory not fitting into one of the categories below.
Generic,
// Memory used for thread snapshots.
ThreadSnapshot,
// Memory used by various parts of the memory snapshot system.
TrackedRegions,
FreeRegions,
DirtyPageSet,
SortedDirtyPageSet,
PageCopy,
// Memory used by various parts of JS integration.
ScriptHits,
Count
};
// Allocate or deallocate a block of memory of a particular kind. Allocated
// memory is initially zeroed.
void* AllocateMemory(size_t aSize, MemoryKind aKind);
void DeallocateMemory(void* aAddress, size_t aSize, MemoryKind aKind);
// Allocation policy for managing memory of a particular kind.
template <MemoryKind Kind>
class AllocPolicy {
public:
template <typename T>
T* maybe_pod_calloc(size_t aNumElems) {
if (aNumElems & tl::MulOverflowMask<sizeof(T)>::value) {
MOZ_CRASH();
}
// Note: AllocateMemory always returns zeroed memory.
return static_cast<T*>(AllocateMemory(aNumElems * sizeof(T), Kind));
}
template <typename T>
void free_(T* aPtr, size_t aSize) {
DeallocateMemory(aPtr, aSize * sizeof(T), Kind);
}
template <typename T>
T* maybe_pod_realloc(T* aPtr, size_t aOldSize, size_t aNewSize) {
T* res = maybe_pod_calloc<T>(aNewSize);
memcpy(res, aPtr, aOldSize * sizeof(T));
free_<T>(aPtr, aOldSize);
return res;
}
template <typename T>
T* maybe_pod_malloc(size_t aNumElems) {
return maybe_pod_calloc<T>(aNumElems);
}
template <typename T>
T* pod_malloc(size_t aNumElems) {
return maybe_pod_malloc<T>(aNumElems);
}
template <typename T>
T* pod_calloc(size_t aNumElems) {
return maybe_pod_calloc<T>(aNumElems);
}
template <typename T>
T* pod_realloc(T* aPtr, size_t aOldSize, size_t aNewSize) {
return maybe_pod_realloc<T>(aPtr, aOldSize, aNewSize);
}
void reportAllocOverflow() const {}
MOZ_MUST_USE bool checkSimulatedOOM() const { return true; }
};
///////////////////////////////////////////////////////////////////////////////
// Redirection Bypassing
///////////////////////////////////////////////////////////////////////////////
@ -338,16 +251,11 @@ class AllocPolicy {
typedef size_t FileHandle;
// Allocate/deallocate a block of memory directly from the system.
void* DirectAllocateMemory(void* aAddress, size_t aSize);
void* DirectAllocateMemory(size_t aSize);
void DirectDeallocateMemory(void* aAddress, size_t aSize);
// Give a block of memory R or RX access.
void DirectWriteProtectMemory(void* aAddress, size_t aSize, bool aExecutable,
bool aIgnoreFailures = false);
// Give a block of memory RW or RWX access.
void DirectUnprotectMemory(void* aAddress, size_t aSize, bool aExecutable,
bool aIgnoreFailures = false);
// Make a memory range inaccessible.
void DirectMakeInaccessible(void* aAddress, size_t aSize);
// Open an existing file for reading or a new file for writing, clobbering any
// existing file.
@ -372,8 +280,19 @@ size_t DirectRead(FileHandle aFd, void* aData, size_t aSize);
// Create a new pipe.
void DirectCreatePipe(FileHandle* aWriteFd, FileHandle* aReadFd);
typedef pthread_t NativeThreadId;
// Spawn a new thread.
void DirectSpawnThread(void (*aFunction)(void*), void* aArgument);
NativeThreadId DirectSpawnThread(void (*aFunction)(void*), void* aArgument,
void* aStackBase, size_t aStackSize);
// Get the current thread.
NativeThreadId DirectCurrentThread();
typedef pthread_mutex_t NativeLock;
void DirectLockMutex(NativeLock* aLock, bool aPassThroughEvents = true);
void DirectUnlockMutex(NativeLock* aLock, bool aPassThroughEvents = true);
} // namespace recordreplay
} // namespace mozilla

Просмотреть файл

@ -7,7 +7,7 @@
#include "ProcessRedirect.h"
#include "InfallibleVector.h"
#include "MiddlemanCall.h"
#include "ExternalCall.h"
#include "ipc/ChildInternal.h"
#include "ipc/ParentInternal.h"
#include "mozilla/Sprintf.h"
@ -92,23 +92,22 @@ __attribute__((used)) int RecordReplayInterceptCall(int aCallId,
// After we have diverged from the recording, we can't access the thread's
// recording anymore.
// If the redirection has a middleman preamble hook, call it to see if it
// can handle this call. The middleman preamble hook is separate from the
// If the redirection has an external preamble hook, call it to see if it
// can handle this call. The external preamble hook is separate from the
// normal preamble hook because entering the RecordingEventSection can
// cause the current thread to diverge from the recording; testing for
// HasDivergedFromRecording() does not work reliably in the normal preamble.
if (redirection.mMiddlemanPreamble) {
if (CallPreambleHook(redirection.mMiddlemanPreamble, aCallId,
if (redirection.mExternalPreamble) {
if (CallPreambleHook(redirection.mExternalPreamble, aCallId,
aArguments)) {
return 0;
}
}
// If the redirection has a middleman call hook, try to perform the call in
// the middleman instead.
if (redirection.mMiddlemanCall) {
if (SendCallToMiddleman(aCallId, aArguments,
/* aPopulateOutput = */ true)) {
// If the redirection has an external call hook, try to get its result
// from another process.
if (redirection.mExternalCall) {
if (OnExternalCall(aCallId, aArguments, /* aPopulateOutput = */ true)) {
return 0;
}
}
@ -116,14 +115,13 @@ __attribute__((used)) int RecordReplayInterceptCall(int aCallId,
if (child::CurrentRepaintCannotFail()) {
// EnsureNotDivergedFromRecording is going to force us to crash, so fail
// earlier with a more helpful error message.
child::ReportFatalError(Nothing(),
"Could not perform middleman call: %s\n",
child::ReportFatalError("Could not perform external call: %s\n",
redirection.mName);
}
// Calling any redirection which performs the standard steps will cause
// debugger operations that have diverged from the recording to fail.
EnsureNotDivergedFromRecording();
EnsureNotDivergedFromRecording(Some(aCallId));
Unreachable();
}
@ -149,11 +147,11 @@ __attribute__((used)) int RecordReplayInterceptCall(int aCallId,
redirection.mSaveOutput(thread->Events(), aArguments, &error);
}
// Save information about any potential middleman calls encountered if we
// haven't diverged from the recording, in case we diverge and later calls
// Save information about any external calls encountered if we haven't
// diverged from the recording, in case we diverge and later calls
// access data produced by this one.
if (IsReplaying() && redirection.mMiddlemanCall) {
(void)SendCallToMiddleman(aCallId, aArguments, /* aDiverged = */ false);
if (IsReplaying() && redirection.mExternalCall) {
(void)OnExternalCall(aCallId, aArguments, /* aDiverged = */ false);
}
RestoreError(error);
@ -170,8 +168,13 @@ extern size_t RecordReplayRedirectCall(...);
__asm(
"_RecordReplayRedirectCall:"
// Make space for a CallArguments struct on the stack.
"subq $616, %rsp;"
// Save rbp for backtraces.
"pushq %rbp;"
"movq %rsp, %rbp;"
// Make space for a CallArguments struct on the stack, with a little extra
// space for alignment.
"subq $624, %rsp;"
// Fill in the structure's contents.
"movq %rdi, 0(%rsp);"
@ -194,7 +197,7 @@ __asm(
// Save stack arguments into the structure.
"_RecordReplayRedirectCall_Loop:"
"subq $1, %rsi;"
"movq 624(%rsp, %rsi, 8), %rdx;" // Ignore the return ip on the stack.
"movq 640(%rsp, %rsi, 8), %rdx;" // Ignore the rip/rbp saved on stack.
"movq %rdx, 104(%rsp, %rsi, 8);"
"testq %rsi, %rsi;"
"jne _RecordReplayRedirectCall_Loop;"
@ -223,7 +226,8 @@ __asm(
"movsd 56(%rsp), %xmm1;"
"movsd 64(%rsp), %xmm2;"
"movq 72(%rsp), %rax;"
"addq $616, %rsp;"
"addq $624, %rsp;"
"popq %rbp;"
"jmpq *%rax;"
// The message has been recorded/replayed.
@ -235,9 +239,10 @@ __asm(
"movsd 96(%rsp), %xmm1;"
// Pop the structure from the stack.
"addq $616, %rsp;"
"addq $624, %rsp;"
// Return to caller.
"popq %rbp;"
"ret;");
// Call a function address with the specified arguments.
@ -246,10 +251,15 @@ extern void RecordReplayInvokeCallRaw(CallArguments* aArguments, void* aFnPtr);
__asm(
"_RecordReplayInvokeCallRaw:"
// Save rbp for backtraces.
"pushq %rbp;"
"movq %rsp, %rbp;"
// Save function pointer in rax.
"movq %rsi, %rax;"
// Save arguments on the stack. This also aligns the stack.
// Save arguments on the stack, with a second copy for alignment.
"push %rdi;"
"push %rdi;"
// Count how many stack arguments we need to copy.
@ -286,11 +296,13 @@ __asm(
// Save any return values to the arguments.
"pop %rdi;"
"pop %rdi;"
"movq %rax, 72(%rdi);"
"movq %rdx, 80(%rdi);"
"movsd %xmm0, 88(%rdi);"
"movsd %xmm1, 96(%rdi);"
"popq %rbp;"
"ret;");
} // extern "C"
@ -468,7 +480,9 @@ static uint8_t* MaybeInternalJumpTarget(uint8_t* aIpStart, uint8_t* aIpEnd) {
// For these functions, there is a syscall near the beginning which
// other system threads might be inside.
strstr(startName, "__workq_kernreturn") ||
strstr(startName, "kevent64")) {
strstr(startName, "kevent64") ||
// Workaround suspected udis86 bug when disassembling this function.
strstr(startName, "CGAffineTransformMakeScale")) {
PrintRedirectSpew("Failed [%p]: Vetoed by annotation\n", aIpEnd - 1);
return aIpEnd - 1;
}
@ -498,7 +512,8 @@ static void UnknownInstruction(const char* aName, uint8_t* aIp,
for (size_t i = 0; i < aNbytes; i++) {
byteData.AppendPrintf(" %d", (int)aIp[i]);
}
RedirectFailure("Unknown instruction in %s:%s", aName, byteData.get());
RedirectFailure("Unknown instruction in %s [%p]:%s", aName, aIp,
byteData.get());
}
// Try to emit instructions to |aAssembler| with equivalent behavior to any
@ -541,14 +556,21 @@ static bool CopySpecialInstruction(uint8_t* aIp, ud_t* aUd, size_t aNbytes,
}
return true;
}
if (op->type == UD_OP_MEM && op->base == UD_R_RIP && !op->index &&
op->offset == 32) {
// jmp *$offset32(%rip)
uint8_t* addr = aIp + aNbytes + op->lval.sdword;
aAssembler.MoveImmediateToRax(addr);
aAssembler.LoadRax(8);
aAssembler.JumpToRax();
return true;
if (op->type == UD_OP_MEM && !op->index) {
if (op->base == UD_R_RIP) {
if (op->offset == 32) {
// jmp *$offset32(%rip)
uint8_t* addr = aIp + aNbytes + op->lval.sdword;
aAssembler.MoveImmediateToRax(addr);
aAssembler.LoadRax(8);
aAssembler.JumpToRax();
return true;
}
} else {
// Non-IP relative call or jump.
aAssembler.CopyInstruction(aIp, aNbytes);
return true;
}
}
}
@ -632,6 +654,24 @@ static bool CopySpecialInstruction(uint8_t* aIp, ud_t* aUd, size_t aNbytes,
aAssembler.PopRax();
return true;
}
if (dst->type == UD_OP_MEM && src->type == UD_OP_REG &&
dst->base == UD_R_RIP && !dst->index && dst->offset == 32) {
// cmpq reg, $offset32(%rip)
int reg = Assembler::NormalizeRegister(src->base);
if (!reg) {
return false;
}
uint8_t* addr = aIp + aNbytes + dst->lval.sdword;
aAssembler.PushRax();
aAssembler.MoveRegisterToRax(reg);
aAssembler.PushRax();
aAssembler.MoveImmediateToRax(addr);
aAssembler.LoadRax(8);
aAssembler.CompareTopOfStackWithRax();
aAssembler.PopRax();
aAssembler.PopRax();
return true;
}
}
if (mnemonic == UD_Ixchg) {
@ -731,11 +771,37 @@ static uint8_t* CopyInstructions(const char* aName, uint8_t* aIpStart,
return ip;
}
static bool PreserveCallerSaveRegisters(const char* aName) {
// LLVM assumes that the call made when getting thread local variables will
// preserve registers that are normally caller save. It's not clear what ABI
// is actually assumed for this function so preserve all possible registers.
return !strcmp(aName, "tlv_get_addr");
}
// Generate code to set %rax and enter RecordReplayRedirectCall.
static uint8_t* GenerateRedirectStub(Assembler& aAssembler, size_t aCallId) {
uint8_t* newFunction = aAssembler.Current();
aAssembler.MoveImmediateToRax((void*)aCallId);
aAssembler.Jump(BitwiseCast<void*>(RecordReplayRedirectCall));
if (PreserveCallerSaveRegisters(GetRedirection(aCallId).mName)) {
static int registers[] = {
UD_R_RDI, UD_R_RDI /* for alignment */, UD_R_RSI, UD_R_RDX, UD_R_RCX,
UD_R_R8, UD_R_R9, UD_R_R10, UD_R_R11,
};
for (size_t i = 0; i < ArrayLength(registers); i++) {
aAssembler.MoveRegisterToRax(registers[i]);
aAssembler.PushRax();
}
aAssembler.MoveImmediateToRax((void*)aCallId);
uint8_t* after = aAssembler.Current() + PushImmediateBytes + JumpBytes;
aAssembler.PushImmediate(after);
aAssembler.Jump(BitwiseCast<void*>(RecordReplayRedirectCall));
for (int i = ArrayLength(registers) - 1; i >= 0; i--) {
aAssembler.PopRegister(registers[i]);
}
aAssembler.Return();
} else {
aAssembler.MoveImmediateToRax((void*)aCallId);
aAssembler.Jump(BitwiseCast<void*>(RecordReplayRedirectCall));
}
return newFunction;
}
@ -755,8 +821,8 @@ static void Redirect(size_t aCallId, Redirection& aRedirection,
if (!functionStart) {
if (aFirstPass) {
PrintSpew("Could not find symbol %s for redirecting.\n",
aRedirection.mName);
PrintRedirectSpew("Could not find symbol %s for redirecting.\n",
aRedirection.mName);
}
return;
}

Просмотреть файл

@ -73,11 +73,21 @@ namespace recordreplay {
// Function Redirections
///////////////////////////////////////////////////////////////////////////////
struct CallArguments;
// Capture the arguments that can be passed to a redirection, and provide
// storage to specify the redirection's return value. We only need to capture
// enough argument data here for calls made directly from Gecko code,
// i.e. where events are not passed through. Calls made while events are passed
// through are performed with the same stack and register state as when they
// were initially invoked.
//
// Arguments and return value indexes refer to the register contents as passed
// to the function originally. For functions with complex or floating point
// arguments and return values, the right index to use might be different than
// expected, per the requirements of the System V x64 ABI.
struct CallArguments {
// The maximum number of stack arguments that can be captured.
static const size_t NumStackArguments = 64;
// All argument and return value data that is stored in registers and whose
// values are preserved when calling a redirected function.
struct CallRegisterArguments {
protected:
size_t arg0; // 0
size_t arg1; // 8
@ -92,30 +102,6 @@ struct CallRegisterArguments {
size_t rval1; // 80
double floatrval0; // 88
double floatrval1; // 96
// Size: 104
public:
void CopyFrom(const CallRegisterArguments* aArguments);
void CopyTo(CallRegisterArguments* aArguments) const;
void CopyRvalFrom(const CallRegisterArguments* aArguments);
};
// Capture the arguments that can be passed to a redirection, and provide
// storage to specify the redirection's return value. We only need to capture
// enough argument data here for calls made directly from Gecko code,
// i.e. where events are not passed through. Calls made while events are passed
// through are performed with the same stack and register state as when they
// were initially invoked.
//
// Arguments and return value indexes refer to the register contents as passed
// to the function originally. For functions with complex or floating point
// arguments and return values, the right index to use might be different than
// expected, per the requirements of the System V x64 ABI.
struct CallArguments : public CallRegisterArguments {
// The maximum number of stack arguments that can be captured.
static const size_t NumStackArguments = 64;
protected:
size_t stack[NumStackArguments]; // 104
// Size: 616
@ -123,7 +109,7 @@ struct CallArguments : public CallRegisterArguments {
template <typename T>
T& Arg(size_t aIndex) {
static_assert(sizeof(T) == sizeof(size_t), "Size must match");
static_assert(IsFloatingPoint<T>::value == false, "FloatArg NYI");
static_assert(IsFloatingPoint<T>::value == false, "Use FloatArg");
MOZ_RELEASE_ASSERT(aIndex < 70);
switch (aIndex) {
case 0:
@ -148,6 +134,19 @@ struct CallArguments : public CallRegisterArguments {
return Arg<T>(Index);
}
template <size_t Index>
double& FloatArg() {
static_assert(Index < 3, "Bad index");
switch (Index) {
case 0:
return floatarg0;
case 1:
return floatarg1;
case 2:
return floatarg2;
}
}
template <size_t Offset>
size_t* StackAddress() {
static_assert(Offset % sizeof(size_t) == 0, "Bad stack offset");
@ -179,24 +178,6 @@ struct CallArguments : public CallRegisterArguments {
}
};
inline void CallRegisterArguments::CopyFrom(
const CallRegisterArguments* aArguments) {
memcpy(this, aArguments, sizeof(CallRegisterArguments));
}
inline void CallRegisterArguments::CopyTo(
CallRegisterArguments* aArguments) const {
memcpy(aArguments, this, sizeof(CallRegisterArguments));
}
inline void CallRegisterArguments::CopyRvalFrom(
const CallRegisterArguments* aArguments) {
rval0 = aArguments->rval0;
rval1 = aArguments->rval1;
floatrval0 = aArguments->floatrval0;
floatrval1 = aArguments->floatrval1;
}
// Generic type for a system error code.
typedef ssize_t ErrorType;
@ -231,10 +212,10 @@ enum class PreambleResult {
// modify its behavior.
typedef PreambleResult (*PreambleFn)(CallArguments* aArguments);
// Signature for a function that conveys data about a call to or from the
// middleman process.
struct MiddlemanCallContext;
typedef void (*MiddlemanCallFn)(MiddlemanCallContext& aCx);
// Signature for a function that conveys data about a call to or from an
// external process.
struct ExternalCallContext;
typedef void (*ExternalCallFn)(ExternalCallContext& aCx);
// Information about a system library API function which is being redirected.
struct Redirection {
@ -260,13 +241,13 @@ struct Redirection {
// If specified, will be called upon entry to the redirected call.
PreambleFn mPreamble;
// If specified, will be called while replaying and diverged from the
// recording to perform this call in the middleman process.
MiddlemanCallFn mMiddlemanCall;
// If specified, allows this call to be made after diverging from the
// recording. See ExternalCall.h
ExternalCallFn mExternalCall;
// Additional preamble that is only called while replaying and diverged from
// the recording.
PreambleFn mMiddlemanPreamble;
PreambleFn mExternalPreamble;
};
// Platform specific methods describing the set of redirections.
@ -399,6 +380,21 @@ static inline void RR_CStringRval(Stream& aEvents, CallArguments* aArguments,
}
}
// Record/replay a fixed size rval buffer.
template <size_t ByteCount>
static inline void RR_RvalBuffer(Stream& aEvents, CallArguments* aArguments,
ErrorType* aError) {
auto& rval = aArguments->Rval<void*>();
bool hasRval = IsRecording() && rval;
aEvents.RecordOrReplayValue(&hasRval);
if (IsReplaying()) {
rval = hasRval ? NewLeakyArray<char>(ByteCount) : nullptr;
}
if (hasRval) {
aEvents.RecordOrReplayBytes(rval, ByteCount);
}
}
// Ensure that the return value matches the specified argument.
template <size_t Argument>
static inline void RR_RvalIsArgument(Stream& aEvents, CallArguments* aArguments,
@ -508,6 +504,14 @@ static inline void RR_WriteOptionalBufferFixedSize(Stream& aEvents,
}
}
// Record/replay an out parameter.
template <size_t Arg, typename Type>
static inline void RR_OutParam(Stream& aEvents, CallArguments* aArguments,
ErrorType* aError) {
RR_WriteOptionalBufferFixedSize<Arg, sizeof(Type)>(aEvents, aArguments,
aError);
}
// Record/replay the contents of a buffer at argument BufferArg with byte size
// CountArg, where the call return value plus Offset indicates the amount of
// data written to the buffer by the call. The return value must already have
@ -588,6 +592,14 @@ static inline PreambleResult Preamble_WaitForever(CallArguments* aArguments) {
return PreambleResult::PassThrough;
}
static inline PreambleResult Preamble_NYI(CallArguments* aArguments) {
if (AreThreadEventsPassedThrough()) {
return PreambleResult::PassThrough;
}
MOZ_CRASH("Redirection NYI");
return PreambleResult::Veto;
}
///////////////////////////////////////////////////////////////////////////////
// Other Redirection Interfaces
///////////////////////////////////////////////////////////////////////////////

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -6,16 +6,13 @@
#include "ProcessRewind.h"
#include "nsString.h"
#include "ipc/ChildInternal.h"
#include "ipc/ParentInternal.h"
#include "mozilla/dom/ScriptSettings.h"
#include "mozilla/StaticMutex.h"
#include "InfallibleVector.h"
#include "MemorySnapshot.h"
#include "Monitor.h"
#include "ProcessRecordReplay.h"
#include "ThreadSnapshot.h"
namespace mozilla {
namespace recordreplay {
@ -23,16 +20,6 @@ namespace recordreplay {
// The most recent checkpoint which was encountered.
static size_t gLastCheckpoint = InvalidCheckpointId;
// Information about the current rewinding state. The contents of this structure
// are in untracked memory.
struct RewindInfo {
// Thread stacks for snapshots which have been saved.
InfallibleVector<AllSavedThreadStacks, 1024, AllocPolicy<MemoryKind::Generic>>
mSnapshots;
};
static RewindInfo* gRewindInfo;
// Lock for managing pending main thread callbacks.
static Monitor* gMainThreadCallbackMonitor;
@ -41,94 +28,9 @@ static Monitor* gMainThreadCallbackMonitor;
static StaticInfallibleVector<std::function<void()>> gMainThreadCallbacks;
void InitializeRewindState() {
MOZ_RELEASE_ASSERT(gRewindInfo == nullptr);
void* memory = AllocateMemory(sizeof(RewindInfo), MemoryKind::Generic);
gRewindInfo = new (memory) RewindInfo();
gMainThreadCallbackMonitor = new Monitor();
}
void RestoreSnapshotAndResume(size_t aNumSnapshots) {
MOZ_RELEASE_ASSERT(IsReplaying());
MOZ_RELEASE_ASSERT(Thread::CurrentIsMainThread());
MOZ_RELEASE_ASSERT(!AreThreadEventsPassedThrough());
MOZ_RELEASE_ASSERT(aNumSnapshots < gRewindInfo->mSnapshots.length());
// Make sure we don't lose pending main thread callbacks due to rewinding.
{
MonitorAutoLock lock(*gMainThreadCallbackMonitor);
MOZ_RELEASE_ASSERT(gMainThreadCallbacks.empty());
}
Thread::WaitForIdleThreads();
double start = CurrentTime();
{
// Rewind heap memory to the target snapshot.
AutoDisallowMemoryChanges disallow;
RestoreMemoryToLastSnapshot();
for (size_t i = 0; i < aNumSnapshots; i++) {
gRewindInfo->mSnapshots.back().ReleaseContents();
gRewindInfo->mSnapshots.popBack();
RestoreMemoryToLastDiffSnapshot();
}
}
FixupFreeRegionsAfterRewind();
double end = CurrentTime();
PrintSpew("Restore %.2fs\n", (end - start) / 1000000.0);
// Finally, let threads restore themselves to their stacks at the snapshot
// we are rewinding to.
RestoreAllThreads(gRewindInfo->mSnapshots.back());
Unreachable();
}
bool NewSnapshot() {
if (IsRecording()) {
return true;
}
Thread::WaitForIdleThreads();
PrintSpew("Saving snapshot...\n");
double start = CurrentTime();
// Record either the first or a subsequent diff memory snapshot.
if (gRewindInfo->mSnapshots.empty()) {
TakeFirstMemorySnapshot();
} else {
TakeDiffMemorySnapshot();
}
gRewindInfo->mSnapshots.emplaceBack();
double end = CurrentTime();
bool reached = true;
// Save all thread stacks for the snapshot. If we rewind here from a
// later point of execution then this will return false.
if (SaveAllThreads(gRewindInfo->mSnapshots.back())) {
PrintSpew("Saved snapshot %.2fs\n", (end - start) / 1000000.0);
} else {
PrintSpew("Restored snapshot\n");
reached = false;
// After restoring, make sure all threads have updated their stacks
// before letting any of them resume execution. Threads might have
// pointers into each others' stacks.
WaitForIdleThreadsToRestoreTheirStacks();
}
Thread::ResumeIdleThreads();
return reached;
}
void NewCheckpoint() {
MOZ_RELEASE_ASSERT(Thread::CurrentIsMainThread());
MOZ_RELEASE_ASSERT(!AreThreadEventsPassedThrough());
@ -147,21 +49,18 @@ void DivergeFromRecording() {
Thread* thread = Thread::Current();
MOZ_RELEASE_ASSERT(thread->IsMainThread());
if (!thread->HasDivergedFromRecording()) {
// Reset middleman call state whenever we first diverge from the recording.
child::SendResetMiddlemanCalls();
gUnhandledDivergeAllowed = true;
if (!thread->HasDivergedFromRecording()) {
thread->DivergeFromRecording();
// Direct all other threads to diverge from the recording as well.
Thread::WaitForIdleThreads();
for (size_t i = MainThreadId + 1; i <= MaxRecordedThreadId; i++) {
for (size_t i = MainThreadId + 1; i <= MaxThreadId; i++) {
Thread::GetById(i)->SetShouldDivergeFromRecording();
}
Thread::ResumeIdleThreads();
}
gUnhandledDivergeAllowed = true;
}
extern "C" {
@ -178,29 +77,19 @@ void DisallowUnhandledDivergeFromRecording() {
gUnhandledDivergeAllowed = false;
}
void EnsureNotDivergedFromRecording() {
// If we have diverged from the recording and encounter an operation we can't
// handle, rewind to the last snapshot.
void EnsureNotDivergedFromRecording(const Maybe<int>& aCallId) {
AssertEventsAreNotPassedThrough();
if (HasDivergedFromRecording()) {
MOZ_RELEASE_ASSERT(gUnhandledDivergeAllowed);
// Crash instead of rewinding if a repaint is about to fail and is not
// allowed.
if (child::CurrentRepaintCannotFail()) {
MOZ_CRASH("Recording divergence while repainting");
}
PrintSpew("Unhandled recording divergence: %s\n",
aCallId.isSome() ? GetRedirection(aCallId.ref()).mName : "");
PrintSpew("Unhandled recording divergence, restoring snapshot...\n");
RestoreSnapshotAndResume(0);
child::ReportUnhandledDivergence();
Unreachable();
}
}
size_t NumSnapshots() {
return gRewindInfo ? gRewindInfo->mSnapshots.length() : 0;
}
size_t GetLastCheckpoint() { return gLastCheckpoint; }
static bool gMainThreadShouldPause = false;
@ -238,12 +127,10 @@ void PauseMainThreadAndServiceCallbacks() {
}
}
// As for RestoreSnapshotAndResume, we shouldn't resume the main thread
// while it still has callbacks to execute.
// We shouldn't resume the main thread while it still has callbacks.
MOZ_RELEASE_ASSERT(gMainThreadCallbacks.empty());
// If we diverge from the recording the only way we can get back to resuming
// normal execution is to rewind to a snapshot prior to the divergence.
// If we diverge from the recording we can't resume normal execution.
MOZ_RELEASE_ASSERT(!HasDivergedFromRecording());
gMainThreadIsPaused = false;
@ -268,5 +155,37 @@ void ResumeExecution() {
gMainThreadCallbackMonitor->Notify();
}
bool ForkProcess() {
MOZ_RELEASE_ASSERT(IsReplaying());
Thread::WaitForIdleThreads();
// Before forking all other threads need to release any locks they are
// holding. After the fork the new process will only have a main thread and
// will not be able to acquire any lock held at the time of the fork.
Thread::OperateOnIdleThreadLocks(Thread::OwnedLockState::NeedRelease);
AutoEnsurePassThroughThreadEvents pt;
pid_t pid = fork();
if (pid > 0) {
Thread::OperateOnIdleThreadLocks(Thread::OwnedLockState::NeedAcquire);
Thread::ResumeIdleThreads();
return true;
}
Print("FORKED %d\n", getpid());
if (TestEnv("MOZ_REPLAYING_WAIT_AT_FORK")) {
BusyWait();
}
ResetPid();
Thread::RespawnAllThreadsAfterFork();
Thread::ResumeIdleThreads();
return false;
}
} // namespace recordreplay
} // namespace mozilla

Просмотреть файл

@ -28,14 +28,6 @@ namespace recordreplay {
// checkpoint has an ID, which monotonically increases during the execution.
// Checkpoints form a basis for identifying a particular point in execution,
// and in allowing replaying processes to rewind themselves.
//
// In a replaying process, snapshots can be taken that retain enough information
// to restore the contents of heap memory and thread stacks at the point the
// snapshot was taken. Snapshots are usually taken when certain checkpoints are
// reached, but they can be taken at other points as well.
//
// See MemorySnapshot.h and ThreadSnapshot.h for information on how snapshots
// are represented.
///////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////
@ -50,7 +42,7 @@ namespace recordreplay {
// 3. The replay logic can inspect the process state, diverge from the recording
// by calling DivergeFromRecording, and eventually can unpause the main
// thread and allow execution to resume by calling ResumeExecution
// (if DivergeFromRecording was not called) or RestoreSnapshotAndResume.
// (if DivergeFromRecording was not called).
///////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////
@ -65,9 +57,8 @@ namespace recordreplay {
// After this is called, some thread events will happen as if events were
// passed through, but other events that require interacting with the system
// will trigger an unhandled divergence from the recording via
// EnsureNotDivergedFromRecording, causing the process to rewind to the most
// recent snapshot. The debugger will recognize this rewind and play
// back in a way that restores the state when DivergeFromRecording() was
// EnsureNotDivergedFromRecording. The debugger will recognize this rewind and
// play back in a way that restores the state when DivergeFromRecording() was
// called, but without performing the later operation that triggered the
// rewind.
///////////////////////////////////////////////////////////////////////////////
@ -79,10 +70,10 @@ static const size_t FirstCheckpointId = 1;
// Initialize state needed for rewinding.
void InitializeRewindState();
// Invoke a callback on the main thread, and pause it until ResumeExecution or
// RestoreSnapshotAndResume are called. When the main thread is not paused,
// this must be called on the main thread itself. When the main thread is
// already paused, this may be called from any thread.
// Invoke a callback on the main thread, and pause it until ResumeExecution is
// called. When the main thread is not paused, this must be called on the main
// thread itself. When the main thread is already paused, this may be called
// from any thread.
void PauseMainThreadAndInvokeCallback(const std::function<void()>& aCallback);
// Return whether the main thread should be paused. This does not necessarily
@ -93,46 +84,33 @@ bool MainThreadShouldPause();
// longer needs to pause.
void PauseMainThreadAndServiceCallbacks();
// Return how many snapshots have been taken.
size_t NumSnapshots();
// Get the ID of the most recently encountered checkpoint.
size_t GetLastCheckpoint();
// When paused at a breakpoint or at a checkpoint, restore a snapshot that
// was saved earlier. aNumSnapshots is the number of snapshots to skip over
// when restoring.
void RestoreSnapshotAndResume(size_t aNumSnapshots);
// Fork a new processs from this one. Returns true if this is the original
// process, or false if this is the fork.
bool ForkProcess();
// When paused at a breakpoint or at a checkpoint, unpause and proceed with
// execution.
void ResumeExecution();
// Allow execution after this point to diverge from the recording. Execution
// will remain diverged until an earlier snapshot is restored.
//
// If an unhandled divergence occurs (see the 'Recording Divergence' comment
// in ProcessRewind.h) then the process rewinds to the most recent snapshot.
// Allow execution after this point to diverge from the recording.
void DivergeFromRecording();
// After a call to DivergeFromRecording(), this may be called to prevent future
// unhandled divergence from causing earlier snapshots to be restored
// unhandled divergence from following the normal bailouot path
// (the process will immediately crash instead). This state lasts until a new
// call to DivergeFromRecording, or to an explicit restore of an earlier
// snapshot.
// call to DivergeFromRecording.
void DisallowUnhandledDivergeFromRecording();
// Make sure that execution has not diverged from the recording after a call to
// DivergeFromRecording, by rewinding to the last snapshot if so.
void EnsureNotDivergedFromRecording();
// DivergeFromRecording.
void EnsureNotDivergedFromRecording(const Maybe<int>& aCallId);
// Note a checkpoint at the current execution position.
void NewCheckpoint();
// Create a new snapshot that can be restored later. This method returns true
// if the snapshot was just taken, and false if it was just restored.
bool NewSnapshot();
} // namespace recordreplay
} // namespace mozilla

Просмотреть файл

@ -4,7 +4,7 @@
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "File.h"
#include "Recording.h"
#include "ipc/ChildInternal.h"
#include "mozilla/Compression.h"
@ -22,7 +22,7 @@ namespace recordreplay {
///////////////////////////////////////////////////////////////////////////////
void Stream::ReadBytes(void* aData, size_t aSize) {
MOZ_RELEASE_ASSERT(mFile->OpenForReading());
MOZ_RELEASE_ASSERT(mRecording->IsReading());
size_t totalRead = 0;
@ -52,7 +52,7 @@ void Stream::ReadBytes(void* aData, size_t aSize) {
EnsureMemory(&mBallast, &mBallastSize, chunk.mCompressedSize,
BallastMaxSize(), DontCopyExistingData);
mFile->ReadChunk(mBallast.get(), chunk);
mRecording->ReadChunk(mBallast.get(), chunk);
EnsureMemory(&mBuffer, &mBufferSize, chunk.mDecompressedSize, BUFFER_MAX,
DontCopyExistingData);
@ -71,17 +71,17 @@ void Stream::ReadBytes(void* aData, size_t aSize) {
}
bool Stream::AtEnd() {
MOZ_RELEASE_ASSERT(mFile->OpenForReading());
MOZ_RELEASE_ASSERT(mRecording->IsReading());
return mBufferPos == mBufferLength && mChunkIndex == mChunks.length();
}
void Stream::WriteBytes(const void* aData, size_t aSize) {
MOZ_RELEASE_ASSERT(mFile->OpenForWriting());
MOZ_RELEASE_ASSERT(mRecording->IsWriting());
MOZ_RELEASE_ASSERT(mName != StreamName::Event || mInRecordingEventSection);
// Prevent the entire file from being flushed while we write this data.
AutoReadSpinLock streamLock(mFile->mStreamLock);
// Prevent the recording from being flushed while we write this data.
AutoReadSpinLock streamLock(mRecording->mStreamLock);
while (true) {
// Fill up the data buffer first.
@ -97,7 +97,7 @@ void Stream::WriteBytes(const void* aData, size_t aSize) {
aData = (char*)aData + bufWrite;
aSize -= bufWrite;
// Grow the file's buffer if it is not at its maximum size.
// Grow the stream's buffer if it is not at its maximum size.
if (mBufferSize < BUFFER_MAX) {
EnsureMemory(&mBuffer, &mBufferSize, mBufferSize + 1, BUFFER_MAX,
CopyExistingData);
@ -142,12 +142,34 @@ void Stream::WriteScalar(size_t aValue) {
} while (aValue);
}
void Stream::RecordOrReplayThreadEvent(ThreadEvent aEvent) {
// Workaround arc4random being called from jemalloc when recording on macOS but
// not when replaying on linux.
bool Stream::ReadMismatchedEventData(ThreadEvent aEvent) {
if (!strcmp(ThreadEventName(aEvent), "arc4random")) {
if (mNameIndex == MainThreadId) {
// For execution progress counter.
ReadScalar();
}
size_t value;
RecordOrReplayValue(&value);
return true;
}
return false;
}
void Stream::RecordOrReplayThreadEvent(ThreadEvent aEvent, const char* aExtra) {
if (IsRecording()) {
WriteScalar((size_t)aEvent);
} else {
ThreadEvent oldEvent = (ThreadEvent)ReadScalar();
if (oldEvent != aEvent) {
while (oldEvent != aEvent) {
if (ReadMismatchedEventData(oldEvent)) {
oldEvent = (ThreadEvent)ReadScalar();
continue;
}
DumpEvents();
const char* extra = "";
if (oldEvent == ThreadEvent::Assert) {
// Include the asserted string in the error. This must match up with
@ -158,10 +180,12 @@ void Stream::RecordOrReplayThreadEvent(ThreadEvent aEvent) {
extra = ReadInputString();
}
child::ReportFatalError(
Nothing(), "Event Mismatch: Recorded %s %s Replayed %s",
ThreadEventName(oldEvent), extra, ThreadEventName(aEvent));
"Event Mismatch: Recorded %s %s Replayed %s %s",
ThreadEventName(oldEvent), extra,
ThreadEventName(aEvent), aExtra ? aExtra : "");
}
mLastEvent = aEvent;
PushEvent(ThreadEventName(aEvent));
}
// Check the execution progress counter for events executing on the main
@ -185,8 +209,8 @@ void Stream::CheckInput(size_t aValue) {
} else {
size_t oldValue = ReadScalar();
if (oldValue != aValue) {
child::ReportFatalError(Nothing(),
"Input Mismatch: %s Recorded %llu Replayed %llu",
DumpEvents();
child::ReportFatalError("Input Mismatch: %s Recorded %llu Replayed %llu",
ThreadEventName(mLastEvent), oldValue, aValue);
}
}
@ -208,10 +232,11 @@ void Stream::CheckInput(const char* aValue) {
} else {
const char* oldInput = ReadInputString();
if (strcmp(oldInput, aValue) != 0) {
child::ReportFatalError(Nothing(),
"Input Mismatch: %s Recorded %s Replayed %s",
DumpEvents();
child::ReportFatalError("Input Mismatch: %s Recorded %s Replayed %s",
ThreadEventName(mLastEvent), oldInput, aValue);
}
PushEvent(aValue);
}
}
@ -224,7 +249,8 @@ void Stream::CheckInput(const void* aData, size_t aSize) {
ReadBytes(mInputBallast.get(), aSize);
if (memcmp(aData, mInputBallast.get(), aSize) != 0) {
child::ReportFatalError(Nothing(), "Input Buffer Mismatch: %s",
DumpEvents();
child::ReportFatalError("Input Buffer Mismatch: %s",
ThreadEventName(mLastEvent));
}
}
@ -254,7 +280,7 @@ void Stream::EnsureInputBallast(size_t aSize) {
}
void Stream::Flush(bool aTakeLock) {
MOZ_RELEASE_ASSERT(mFile && mFile->OpenForWriting());
MOZ_RELEASE_ASSERT(mRecording->IsWriting());
if (!mBufferPos) {
return;
@ -270,8 +296,9 @@ void Stream::Flush(bool aTakeLock) {
MOZ_RELEASE_ASSERT((size_t)compressedSize <= bound);
StreamChunkLocation chunk =
mFile->WriteChunk(mBallast.get(), compressedSize, mBufferPos,
mStreamPos - mBufferPos, aTakeLock);
mRecording->WriteChunk(mName, mNameIndex,
mBallast.get(), compressedSize, mBufferPos,
mStreamPos - mBufferPos, aTakeLock);
mChunks.append(chunk);
MOZ_ALWAYS_TRUE(++mChunkIndex == mChunks.length());
@ -283,202 +310,124 @@ size_t Stream::BallastMaxSize() {
return Compression::LZ4::maxCompressedSize(BUFFER_MAX);
}
static bool gDumpEvents;
void Stream::PushEvent(const char* aEvent) {
if (gDumpEvents) {
mEvents.append(strdup(aEvent));
}
}
void Stream::DumpEvents() {
if (gDumpEvents) {
Print("Thread Events: %d\n", Thread::Current()->Id());
for (char* ev : mEvents) {
Print("Event: %s\n", ev);
}
}
}
///////////////////////////////////////////////////////////////////////////////
// File
// Recording
///////////////////////////////////////////////////////////////////////////////
// Information in a file index about a chunk.
struct FileIndexChunk {
Recording::Recording() : mMode(IsRecording() ? WRITE : READ) {
PodZero(&mLock);
PodZero(&mStreamLock);
if (IsReplaying()) {
gDumpEvents = TestEnv("MOZ_REPLAYING_DUMP_EVENTS");
}
}
// The recording format is a series of chunks. Each chunk is a ChunkDescriptor
// followed by the compressed contents of the chunk itself.
struct ChunkDescriptor {
uint32_t /* StreamName */ mName;
uint32_t mNameIndex;
StreamChunkLocation mChunk;
FileIndexChunk() { PodZero(this); }
ChunkDescriptor() { PodZero(this); }
FileIndexChunk(StreamName aName, uint32_t aNameIndex,
const StreamChunkLocation& aChunk)
ChunkDescriptor(StreamName aName, uint32_t aNameIndex,
const StreamChunkLocation& aChunk)
: mName((uint32_t)aName), mNameIndex(aNameIndex), mChunk(aChunk) {}
};
// We expect to find this at every index in a file.
static const uint64_t MagicValue = 0xd3e7f5fae445b3ac;
void Recording::NewContents(const uint8_t* aContents, size_t aSize,
InfallibleVector<Stream*>* aUpdatedStreams) {
// All other recorded threads are idle when adding new contents, so we don't
// have to worry about thread safety here.
MOZ_RELEASE_ASSERT(Thread::CurrentIsMainThread());
MOZ_RELEASE_ASSERT(IsReading());
// Index of chunks in a file. There is an index at the start of the file
// (which is always empty) and at various places within the file itself.
struct FileIndex {
// This should match MagicValue.
uint64_t mMagic;
mContents.append(aContents, aSize);
// How many FileIndexChunk instances follow this structure.
uint32_t mNumChunks;
size_t offset = 0;
while (offset < aSize) {
MOZ_RELEASE_ASSERT(offset + sizeof(ChunkDescriptor) <= aSize);
ChunkDescriptor* desc = (ChunkDescriptor*)(aContents + offset);
offset += sizeof(ChunkDescriptor);
// The location of the next index in the file, or zero.
uint64_t mNextIndexOffset;
explicit FileIndex(uint32_t aNumChunks)
: mMagic(MagicValue), mNumChunks(aNumChunks), mNextIndexOffset(0) {}
};
bool File::Open(const char* aName, Mode aMode) {
MOZ_RELEASE_ASSERT(!mFd);
MOZ_RELEASE_ASSERT(aName);
mMode = aMode;
mFd = DirectOpenFile(aName, mMode == WRITE);
if (OpenForWriting()) {
// Write an empty index at the start of the file.
FileIndex index(0);
DirectWrite(mFd, &index, sizeof(index));
mWriteOffset += sizeof(index);
return true;
}
// Read in every index in the file.
ReadIndexResult result;
do {
result = ReadNextIndex(nullptr);
if (result == ReadIndexResult::InvalidFile) {
return false;
}
} while (result == ReadIndexResult::FoundIndex);
return true;
}
void File::Close() {
if (!mFd) {
return;
}
if (OpenForWriting()) {
Flush();
}
Clear();
}
File::ReadIndexResult File::ReadNextIndex(
InfallibleVector<Stream*>* aUpdatedStreams) {
// Unlike in the Flush() case, we don't have to worry about other threads
// attempting to read data from streams in this file while we are reading
// the new index.
MOZ_ASSERT(OpenForReading());
// Read in the last index to see if there is another one.
DirectSeekFile(mFd, mLastIndexOffset + offsetof(FileIndex, mNextIndexOffset));
uint64_t nextIndexOffset;
if (DirectRead(mFd, &nextIndexOffset, sizeof(nextIndexOffset)) !=
sizeof(nextIndexOffset)) {
return ReadIndexResult::InvalidFile;
}
if (!nextIndexOffset) {
return ReadIndexResult::EndOfFile;
}
mLastIndexOffset = nextIndexOffset;
FileIndex index(0);
DirectSeekFile(mFd, nextIndexOffset);
if (DirectRead(mFd, &index, sizeof(index)) != sizeof(index)) {
return ReadIndexResult::InvalidFile;
}
if (index.mMagic != MagicValue) {
return ReadIndexResult::InvalidFile;
}
MOZ_RELEASE_ASSERT(index.mNumChunks);
size_t indexBytes = index.mNumChunks * sizeof(FileIndexChunk);
FileIndexChunk* chunks = new FileIndexChunk[index.mNumChunks];
if (DirectRead(mFd, chunks, indexBytes) != indexBytes) {
return ReadIndexResult::InvalidFile;
}
for (size_t i = 0; i < index.mNumChunks; i++) {
const FileIndexChunk& indexChunk = chunks[i];
Stream* stream =
OpenStream((StreamName)indexChunk.mName, indexChunk.mNameIndex);
stream->mChunks.append(indexChunk.mChunk);
Stream* stream = OpenStream((StreamName)desc->mName, desc->mNameIndex);
stream->mChunks.append(desc->mChunk);
if (aUpdatedStreams) {
aUpdatedStreams->append(stream);
}
}
delete[] chunks;
return ReadIndexResult::FoundIndex;
MOZ_RELEASE_ASSERT(offset + desc->mChunk.mCompressedSize <= aSize);
offset += desc->mChunk.mCompressedSize;
}
}
bool File::Flush() {
MOZ_ASSERT(OpenForWriting());
void Recording::Flush() {
AutoSpinLock lock(mLock);
InfallibleVector<FileIndexChunk> newChunks;
for (auto& vector : mStreams) {
for (const UniquePtr<Stream>& stream : vector) {
if (stream) {
stream->Flush(/* aTakeLock = */ false);
for (size_t i = stream->mFlushedChunks; i < stream->mChunkIndex; i++) {
newChunks.emplaceBack(stream->mName, stream->mNameIndex,
stream->mChunks[i]);
}
stream->mFlushedChunks = stream->mChunkIndex;
}
}
}
if (newChunks.empty()) {
return false;
}
// Write the new index information at the end of the file.
uint64_t indexOffset = mWriteOffset;
size_t indexBytes = newChunks.length() * sizeof(FileIndexChunk);
FileIndex index(newChunks.length());
DirectWrite(mFd, &index, sizeof(index));
DirectWrite(mFd, newChunks.begin(), indexBytes);
mWriteOffset += sizeof(index) + indexBytes;
// Update the next index offset for the last index written.
MOZ_RELEASE_ASSERT(sizeof(index.mNextIndexOffset) == sizeof(indexOffset));
DirectSeekFile(mFd, mLastIndexOffset + offsetof(FileIndex, mNextIndexOffset));
DirectWrite(mFd, &indexOffset, sizeof(indexOffset));
DirectSeekFile(mFd, mWriteOffset);
mLastIndexOffset = indexOffset;
return true;
}
StreamChunkLocation File::WriteChunk(const char* aStart, size_t aCompressedSize,
size_t aDecompressedSize,
uint64_t aStreamPos, bool aTakeLock) {
StreamChunkLocation Recording::WriteChunk(StreamName aName, size_t aNameIndex,
const char* aStart,
size_t aCompressedSize,
size_t aDecompressedSize,
uint64_t aStreamPos, bool aTakeLock) {
Maybe<AutoSpinLock> lock;
if (aTakeLock) {
lock.emplace(mLock);
}
StreamChunkLocation chunk;
chunk.mOffset = mWriteOffset;
chunk.mOffset = mContents.length() + sizeof(ChunkDescriptor);
chunk.mCompressedSize = aCompressedSize;
chunk.mDecompressedSize = aDecompressedSize;
chunk.mHash = HashBytes(aStart, aCompressedSize);
chunk.mStreamPos = aStreamPos;
DirectWrite(mFd, aStart, aCompressedSize);
mWriteOffset += aCompressedSize;
ChunkDescriptor desc;
desc.mName = (uint32_t) aName;
desc.mNameIndex = aNameIndex;
desc.mChunk = chunk;
mContents.append((const uint8_t*)&desc, sizeof(ChunkDescriptor));
mContents.append(aStart, aCompressedSize);
return chunk;
}
void File::ReadChunk(char* aDest, const StreamChunkLocation& aChunk) {
void Recording::ReadChunk(char* aDest, const StreamChunkLocation& aChunk) {
AutoSpinLock lock(mLock);
DirectSeekFile(mFd, aChunk.mOffset);
size_t res = DirectRead(mFd, aDest, aChunk.mCompressedSize);
MOZ_RELEASE_ASSERT(res == aChunk.mCompressedSize);
MOZ_RELEASE_ASSERT(aChunk.mOffset + aChunk.mCompressedSize <= mContents.length());
memcpy(aDest, mContents.begin() + aChunk.mOffset, aChunk.mCompressedSize);
MOZ_RELEASE_ASSERT(HashBytes(aDest, aChunk.mCompressedSize) == aChunk.mHash);
}
Stream* File::OpenStream(StreamName aName, size_t aNameIndex) {
Stream* Recording::OpenStream(StreamName aName, size_t aNameIndex) {
AutoSpinLock lock(mLock);
auto& vector = mStreams[(size_t)aName];

Просмотреть файл

@ -4,8 +4,8 @@
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef mozilla_recordreplay_File_h
#define mozilla_recordreplay_File_h
#ifndef mozilla_recordreplay_Recording_h
#define mozilla_recordreplay_Recording_h
#include "InfallibleVector.h"
#include "ProcessRecordReplay.h"
@ -14,30 +14,28 @@
#include "mozilla/PodOperations.h"
#include "mozilla/RecordReplay.h"
#include "mozilla/UniquePtr.h"
#include "nsString.h"
namespace mozilla {
namespace recordreplay {
// Structure managing file I/O. Each file contains an index for a set of named
// streams, whose contents are compressed and interleaved throughout the file.
// Additionally, we directly manage the file handle and all associated memory.
// This makes it easier to restore memory snapshots without getting confused
// about the state of the file handles which the process has opened. Data
// written and read from files is automatically compressed with LZ4.
// Representation of the recording which is written to by recording processes
// and read from by replaying processes. The recording encapsulates a set of
// streams of data. While recording, these streams grow independently from one
// another, and when the recording is flushed the streams contents are collated
// into a single stream of bytes which can be saved to disk or sent to other
// processes via IPC or network connections.
//
// Files are used internally for any disk accesses which the record/replay
// infrastructure needs to make. Currently, this is only for accessing the
// recording file.
//
// File is threadsafe for simultaneous read/read and write/write accesses.
// Data in the recording is automatically compressed with LZ4. The Recording
// object is threadsafe for simultaneous read/read and write/write accesses.
// Stream is not threadsafe.
// A location of a chunk of a stream within a file.
// A location of a chunk of a stream within a recording.
struct StreamChunkLocation {
// Offset into the file of the start of the chunk.
// Offset into the recording of the start of the chunk.
uint64_t mOffset;
// Compressed (stored) size of the chunk.
// Compressed size of the chunk, as stored in the recording.
uint32_t mCompressedSize;
// Decompressed size of the chunk.
@ -50,34 +48,48 @@ struct StreamChunkLocation {
uint64_t mStreamPos;
};
enum class StreamName { Main, Lock, Event, Count };
enum class StreamName {
// Per-thread list of events.
Event,
class File;
// Per-lock list of threads in acquire order.
Lock,
// Single stream containing endpoints of recording after flushing.
Endpoint,
// Single stream describing recording sections to skip for local replay;.
LocalReplaySkip,
Count
};
class Recording;
class RecordingEventSection;
class Stream {
friend class File;
friend class Recording;
friend class RecordingEventSection;
// File this stream belongs to.
File* mFile;
// Recording this stream belongs to.
Recording* mRecording;
// Prefix name for this stream.
StreamName mName;
// Index which, when combined to mName, uniquely identifies this stream in
// the file.
// Index which, when combined with mName, uniquely identifies this stream in
// the recording.
size_t mNameIndex;
// When writing, all chunks that have been flushed to disk. When reading, all
// chunks in the entire stream.
// All chunks of data in the stream.
InfallibleVector<StreamChunkLocation> mChunks;
// Data buffer.
UniquePtr<char[]> mBuffer;
// The maximum number of bytes to buffer before compressing and writing to
// disk, and the maximum number of bytes that can be decompressed at once.
// the recording, and the maximum number of bytes that can be decompressed at
// once.
static const size_t BUFFER_MAX = 1024 * 1024;
// The capacity of mBuffer, at most BUFFER_MAX.
@ -107,15 +119,15 @@ class Stream {
// writing, this equals mChunks.length().
size_t mChunkIndex;
// When writing, the number of chunks in this stream when the file was last
// flushed.
size_t mFlushedChunks;
// Whether there is a RecordingEventSection instance active for this stream.
bool mInRecordingEventSection;
Stream(File* aFile, StreamName aName, size_t aNameIndex)
: mFile(aFile),
// When replaying and MOZ_REPLAYING_DUMP_EVENTS is set, this describes all
// events in the stream we have replayed so far.
InfallibleVector<char*> mEvents;
Stream(Recording* aRecording, StreamName aName, size_t aNameIndex)
: mRecording(aRecording),
mName(aName),
mNameIndex(aNameIndex),
mBuffer(nullptr),
@ -129,7 +141,6 @@ class Stream {
mInputBallastSize(0),
mLastEvent((ThreadEvent)0),
mChunkIndex(0),
mFlushedChunks(0),
mInRecordingEventSection(false) {}
public:
@ -166,7 +177,7 @@ class Stream {
// Note a new thread event for this stream, and make sure it is the same
// while replaying as it was while recording.
void RecordOrReplayThreadEvent(ThreadEvent aEvent);
void RecordOrReplayThreadEvent(ThreadEvent aEvent, const char* aExtra = nullptr);
// Replay a thread event without requiring it to be a specific event.
ThreadEvent ReplayThreadEvent();
@ -189,9 +200,14 @@ class Stream {
const char* ReadInputString();
static size_t BallastMaxSize();
void PushEvent(const char* aEvent);
void DumpEvents();
bool ReadMismatchedEventData(ThreadEvent aEvent);
};
class File {
class Recording {
public:
enum Mode { WRITE, READ };
@ -199,70 +215,53 @@ class File {
friend class RecordingEventSection;
private:
// Open file handle, or 0 if closed.
FileHandle mFd;
// Whether this recording is for writing or reading.
Mode mMode = READ;
// Whether this file is open for writing or reading.
Mode mMode;
// When writing, all contents that have been flushed so far. When reading,
// all known contents. When writing, existing parts of the recording are not
// modified: the recording can only grow.
InfallibleVector<uint8_t> mContents;
// When writing, the current offset into the file.
uint64_t mWriteOffset;
// The offset of the last index read or written to the file.
uint64_t mLastIndexOffset;
// All streams in this file, indexed by stream name and name index.
// All streams in this recording, indexed by stream name and name index.
typedef InfallibleVector<UniquePtr<Stream>> StreamVector;
StreamVector mStreams[(size_t)StreamName::Count];
// Lock protecting access to this file.
// Lock protecting access to this recording.
SpinLock mLock;
// When writing, lock for synchronizing file flushes (writer) with other
// threads writing to streams in this file (readers).
// When writing, lock for synchronizing flushes (writer) with other threads
// writing to streams in this recording (readers).
ReadWriteSpinLock mStreamLock;
void Clear() {
mFd = 0;
mMode = READ;
mWriteOffset = 0;
mLastIndexOffset = 0;
for (auto& vector : mStreams) {
vector.clear();
}
PodZero(&mLock);
PodZero(&mStreamLock);
}
public:
File() { Clear(); }
~File() { Close(); }
Recording();
bool Open(const char* aName, Mode aMode);
void Close();
bool IsWriting() const { return mMode == WRITE; }
bool IsReading() const { return mMode == READ; }
bool OpenForWriting() const { return mFd && mMode == WRITE; }
bool OpenForReading() const { return mFd && mMode == READ; }
const uint8_t* Data() const { return mContents.begin(); }
size_t Size() const { return mContents.length(); }
// Get or create a stream in this recording.
Stream* OpenStream(StreamName aName, size_t aNameIndex);
// Prevent/allow other threads to write to streams in this file.
// When reading, append additional contents to this recording.
// aUpdatedStreams is optional and filled in with streams whose contents have
// changed, and may have duplicates.
void NewContents(const uint8_t* aContents, size_t aSize,
InfallibleVector<Stream*>* aUpdatedStreams);
// Prevent/allow other threads to write to streams in this recording.
void PreventStreamWrites() { mStreamLock.WriteLock(); }
void AllowStreamWrites() { mStreamLock.WriteUnlock(); }
// Flush any changes since the last Flush() call to disk, returning whether
// there were such changes.
bool Flush();
enum class ReadIndexResult { InvalidFile, EndOfFile, FoundIndex };
// Read any data added to the file by a Flush() call. aUpdatedStreams is
// optional and filled in with streams whose contents have changed, and may
// have duplicates.
ReadIndexResult ReadNextIndex(InfallibleVector<Stream*>* aUpdatedStreams);
// Flush all streams to the recording.
void Flush();
private:
StreamChunkLocation WriteChunk(const char* aStart, size_t aCompressedSize,
StreamChunkLocation WriteChunk(StreamName aName, size_t aNameIndex,
const char* aStart, size_t aCompressedSize,
size_t aDecompressedSize, uint64_t aStreamPos,
bool aTakeLock);
void ReadChunk(char* aDest, const StreamChunkLocation& aChunk);
@ -271,4 +270,4 @@ class File {
} // namespace recordreplay
} // namespace mozilla
#endif // mozilla_recordreplay_File_h
#endif // mozilla_recordreplay_Recording_h

Просмотреть файл

@ -12,7 +12,6 @@
#include "mozilla/StaticMutex.h"
#include "mozilla/ThreadLocal.h"
#include "ChunkAllocator.h"
#include "MemorySnapshot.h"
#include "ProcessRewind.h"
#include "SpinLock.h"
#include "ThreadSnapshot.h"
@ -24,22 +23,9 @@ namespace recordreplay {
// Thread Organization
///////////////////////////////////////////////////////////////////////////////
static MOZ_THREAD_LOCAL(Thread*) gTlsThreadKey;
/* static */
Monitor* Thread::gMonitor;
/* static */
Thread* Thread::Current() {
MOZ_ASSERT(IsRecordingOrReplaying());
Thread* thread = gTlsThreadKey.get();
if (!thread && IsReplaying()) {
// Disable system threads when replaying.
WaitForeverNoIdle();
}
return thread;
}
/* static */
bool Thread::CurrentIsMainThread() {
Thread* thread = Current();
@ -47,27 +33,23 @@ bool Thread::CurrentIsMainThread() {
}
void Thread::BindToCurrent() {
MOZ_ASSERT(!mStackBase);
gTlsThreadKey.set(this);
pthread_t self = DirectCurrentThread();
size_t size = pthread_get_stacksize_np(self);
uint8_t* base = (uint8_t*)pthread_get_stackaddr_np(self) - size;
mNativeId = pthread_self();
size_t size = pthread_get_stacksize_np(mNativeId);
uint8_t* base = (uint8_t*)pthread_get_stackaddr_np(mNativeId) - size;
// Lock if we will be notifying later on. We don't do this for the main
// thread because we haven't initialized enough state yet that we can use
// a monitor.
Maybe<MonitorAutoLock> lock;
if (mId != MainThreadId) {
lock.emplace(*gMonitor);
if (IsMainThread()) {
mStackBase = base;
mStackSize = size;
} else {
MOZ_RELEASE_ASSERT(base == mStackBase);
MOZ_RELEASE_ASSERT(size == mStackSize);
}
mStackBase = base;
mStackSize = size;
if (!IsMainThread() && !mMachId) {
MOZ_RELEASE_ASSERT(this == Current());
MOZ_RELEASE_ASSERT(!AreThreadEventsPassedThrough());
// Notify WaitUntilInitialized if it is waiting for this thread to start.
if (mId != MainThreadId) {
gMonitor->NotifyAll();
mMachId = RecordReplayValue(IsRecording() ? mach_thread_self() : 0);
}
}
@ -76,14 +58,14 @@ static Thread* gThreads;
/* static */
Thread* Thread::GetById(size_t aId) {
MOZ_ASSERT(aId);
MOZ_ASSERT(aId <= MaxThreadId);
MOZ_RELEASE_ASSERT(aId);
MOZ_RELEASE_ASSERT(aId <= MaxThreadId);
return &gThreads[aId];
}
/* static */
Thread* Thread::GetByNativeId(NativeThreadId aNativeId) {
for (size_t id = MainThreadId; id <= MaxRecordedThreadId; id++) {
for (size_t id = MainThreadId; id <= MaxThreadId; id++) {
Thread* thread = GetById(id);
if (thread->mNativeId == aNativeId) {
return thread;
@ -92,48 +74,72 @@ Thread* Thread::GetByNativeId(NativeThreadId aNativeId) {
return nullptr;
}
static uint8_t* gThreadStackMemory = nullptr;
static const size_t ThreadStackSize = 2 * 1024 * 1024;
/* static */
Thread* Thread::GetByStackPointer(void* aSp) {
Thread* Thread::Current() {
MOZ_ASSERT(IsRecordingOrReplaying());
if (!gThreads) {
return nullptr;
}
for (size_t i = MainThreadId; i <= MaxThreadId; i++) {
Thread* thread = &gThreads[i];
if (MemoryContains(thread->mStackBase, thread->mStackSize, aSp)) {
return thread;
uint8_t* ptr = (uint8_t*)&ptr;
Thread* mainThread = GetById(MainThreadId);
if (MemoryContains(mainThread->mStackBase, mainThread->mStackSize, ptr)) {
return mainThread;
}
if (ptr >= gThreadStackMemory) {
size_t id = MainThreadId + 1 + (ptr - gThreadStackMemory) / ThreadStackSize;
if (id <= MaxThreadId) {
return GetById(id);
}
}
return nullptr;
}
static int gWaitForeverFd;
/* static */
void Thread::InitializeThreads() {
FileHandle writeFd, readFd;
DirectCreatePipe(&writeFd, &readFd);
gWaitForeverFd = readFd;
gThreads = new Thread[MaxThreadId + 1];
size_t nbytes = (MaxThreadId - MainThreadId) * ThreadStackSize;
gThreadStackMemory = (uint8_t*) DirectAllocateMemory(nbytes);
for (size_t i = MainThreadId; i <= MaxThreadId; i++) {
Thread* thread = &gThreads[i];
PodZero(thread);
new (thread) Thread();
thread->mId = i;
thread->mEvents = gRecording->OpenStream(StreamName::Event, i);
if (i <= MaxRecordedThreadId) {
thread->mEvents = gRecordingFile->OpenStream(StreamName::Event, i);
if (i == MainThreadId) {
thread->BindToCurrent();
thread->mNativeId = DirectCurrentThread();
} else {
thread->mStackBase = gThreadStackMemory + (i - MainThreadId - 1) * ThreadStackSize;
thread->mStackSize = ThreadStackSize - PageSize * 2;
// Make some memory between thread stacks inaccessible so that breakpad
// can tell the different thread stacks apart.
DirectMakeInaccessible(thread->mStackBase + ThreadStackSize - PageSize,
PageSize);
thread->SetPassThrough(true);
}
DirectCreatePipe(&thread->mNotifyfd, &thread->mIdlefd);
}
if (!gTlsThreadKey.init()) {
MOZ_CRASH();
}
}
/* static */
void Thread::WaitUntilInitialized(Thread* aThread) {
MonitorAutoLock lock(*gMonitor);
while (!aThread->mStackBase) {
gMonitor->Wait();
}
}
/* static */
@ -143,8 +149,19 @@ void Thread::ThreadMain(void* aArgument) {
Thread* thread = (Thread*)aArgument;
MOZ_ASSERT(thread->mId > MainThreadId);
// mMachId is set in BindToCurrent, which already ran if we forked and then
// respawned this thread.
bool forked = !!thread->mMachId;
thread->SetPassThrough(false);
thread->BindToCurrent();
if (forked) {
AutoPassThroughThreadEvents pt;
thread->ReleaseOrAcquireOwnedLocks(OwnedLockState::NeedAcquire);
RestoreThreadStack(thread->Id());
}
while (true) {
// Wait until this thread has been given a start routine.
while (true) {
@ -157,11 +174,7 @@ void Thread::ThreadMain(void* aArgument) {
Wait();
}
{
Maybe<AutoPassThroughThreadEvents> pt;
if (!thread->IsRecordedThread()) pt.emplace();
thread->mStart(thread->mStartArg);
}
thread->mStart(thread->mStartArg);
MonitorAutoLock lock(*gMonitor);
@ -179,44 +192,45 @@ void Thread::ThreadMain(void* aArgument) {
void Thread::SpawnAllThreads() {
MOZ_ASSERT(AreThreadEventsPassedThrough());
InitializeThreadSnapshots(MaxRecordedThreadId + 1);
InitializeThreadSnapshots();
gMonitor = new Monitor();
// All Threads are spawned up front. This allows threads to be scanned
// (e.g. in ReplayUnlock) without worrying about racing with other threads
// being spawned.
for (size_t i = MainThreadId + 1; i <= MaxRecordedThreadId; i++) {
SpawnThread(GetById(i));
for (size_t i = MainThreadId + 1; i <= MaxThreadId; i++) {
// mNativeId reflects the ID when the original process started, ignoring
// any IDs of threads that are respawned after forking.
Thread* thread = GetById(i);
thread->mNativeId = SpawnThread(thread);
}
}
// The number of non-recorded threads that have been spawned.
static Atomic<size_t, SequentiallyConsistent, Behavior::DontPreserve>
gNumNonRecordedThreads;
/* static */
Thread* Thread::SpawnNonRecordedThread(Callback aStart, void* aArgument) {
if (IsMiddleman()) {
DirectSpawnThread(aStart, aArgument);
return nullptr;
}
size_t id = MaxRecordedThreadId + ++gNumNonRecordedThreads;
MOZ_RELEASE_ASSERT(id <= MaxThreadId);
Thread* thread = GetById(id);
thread->mStart = aStart;
thread->mStartArg = aArgument;
SpawnThread(thread);
return thread;
void Thread::SpawnNonRecordedThread(Callback aStart, void* aArgument) {
DirectSpawnThread(aStart, aArgument, nullptr, 0);
}
/* static */
void Thread::SpawnThread(Thread* aThread) {
DirectSpawnThread(ThreadMain, aThread);
WaitUntilInitialized(aThread);
void Thread::RespawnAllThreadsAfterFork() {
MOZ_ASSERT(AreThreadEventsPassedThrough());
for (size_t id = MainThreadId; id <= MaxThreadId; id++) {
Thread* thread = GetById(id);
DirectCloseFile(thread->mNotifyfd);
DirectCloseFile(thread->mIdlefd);
DirectCreatePipe(&thread->mNotifyfd, &thread->mIdlefd);
if (!thread->IsMainThread()) {
SaveThreadStack(id);
SpawnThread(thread);
}
}
}
/* static */
NativeThreadId Thread::SpawnThread(Thread* aThread) {
return DirectSpawnThread(ThreadMain, aThread, aThread->mStackBase,
aThread->mStackSize);
}
/* static */
@ -233,16 +247,16 @@ NativeThreadId Thread::StartThread(Callback aStart, void* aArgument,
size_t id = 0;
if (IsRecording()) {
// Look for an idle thread.
for (id = MainThreadId + 1; id <= MaxRecordedThreadId; id++) {
for (id = MainThreadId + 1; id <= MaxThreadId; id++) {
Thread* targetThread = Thread::GetById(id);
if (!targetThread->mStart && !targetThread->mNeedsJoin) {
break;
}
}
if (id >= MaxRecordedThreadId) {
child::ReportFatalError(Nothing(), "Too many threads");
if (id > MaxThreadId) {
child::ReportFatalError("Too many threads");
}
MOZ_RELEASE_ASSERT(id <= MaxRecordedThreadId);
MOZ_RELEASE_ASSERT(id <= MaxThreadId);
}
thread->Events().RecordOrReplayThreadEvent(ThreadEvent::CreateThread);
thread->Events().RecordOrReplayScalar(&id);
@ -269,7 +283,7 @@ NativeThreadId Thread::StartThread(Callback aStart, void* aArgument,
void Thread::Join() {
MOZ_ASSERT(!AreThreadEventsPassedThrough());
EnsureNotDivergedFromRecording();
EnsureNotDivergedFromRecording(Nothing());
while (true) {
MonitorAutoLock lock(*gMonitor);
@ -282,6 +296,62 @@ void Thread::Join() {
}
}
void Thread::AddOwnedLock(NativeLock* aNativeLock) {
mOwnedLocks.append(aNativeLock);
}
void Thread::RemoveOwnedLock(NativeLock* aNativeLock) {
for (int i = mOwnedLocks.length() - 1; i >= 0; i--) {
if (mOwnedLocks[i] == aNativeLock) {
mOwnedLocks.erase(&mOwnedLocks[i]);
return;
}
}
MOZ_CRASH("RemoveOwnedLock");
}
void Thread::ReleaseOrAcquireOwnedLocks(OwnedLockState aState) {
MOZ_RELEASE_ASSERT(aState != OwnedLockState::None);
for (NativeLock* lock : mOwnedLocks) {
if (aState == OwnedLockState::NeedRelease) {
DirectUnlockMutex(lock, /* aPassThroughEvents */ false);
} else {
DirectLockMutex(lock, /* aPassThroughEvents */ false);
}
}
}
void** Thread::GetOrCreateStorage(uintptr_t aKey) {
for (StorageEntry** pentry = &mStorageEntries; *pentry; pentry = &(*pentry)->mNext) {
StorageEntry* entry = *pentry;
if (entry->mKey == aKey) {
// Put this at the front of the list.
*pentry = entry->mNext;
entry->mNext = mStorageEntries;
mStorageEntries = entry;
return &entry->mData;
}
}
StorageEntry* entry = (StorageEntry*) AllocateStorage(sizeof(StorageEntry));
entry->mKey = aKey;
entry->mData = 0;
entry->mNext = mStorageEntries;
mStorageEntries = entry;
return &entry->mData;
}
uint8_t* Thread::AllocateStorage(size_t aSize) {
// malloc uses TLS, so go directly to the system to allocate TLS storage.
if (mStorageCursor + aSize >= mStorageLimit) {
size_t nbytes = std::max(aSize, PageSize);
mStorageCursor = (uint8_t*) DirectAllocateMemory(nbytes);
mStorageLimit = mStorageCursor + nbytes;
}
uint8_t* res = mStorageCursor;
mStorageCursor += aSize;
return res;
}
///////////////////////////////////////////////////////////////////////////////
// Thread Public API Accessors
///////////////////////////////////////////////////////////////////////////////
@ -342,14 +412,14 @@ void Thread::WaitForIdleThreads() {
MOZ_RELEASE_ASSERT(CurrentIsMainThread());
MonitorAutoLock lock(*gMonitor);
for (size_t i = MainThreadId + 1; i <= MaxRecordedThreadId; i++) {
for (size_t i = MainThreadId + 1; i <= MaxThreadId; i++) {
Thread* thread = GetById(i);
thread->mShouldIdle = true;
thread->mUnrecordedWaitNotified = false;
}
while (true) {
bool done = true;
for (size_t i = MainThreadId + 1; i <= MaxRecordedThreadId; i++) {
for (size_t i = MainThreadId + 1; i <= MaxThreadId; i++) {
Thread* thread = GetById(i);
if (!thread->mIdle) {
done = false;
@ -392,16 +462,27 @@ void Thread::WaitForIdleThreads() {
}
/* static */
void Thread::ResumeSingleIdleThread(size_t aId) {
GetById(aId)->mShouldIdle = false;
Notify(aId);
void Thread::OperateOnIdleThreadLocks(OwnedLockState aState) {
MOZ_RELEASE_ASSERT(CurrentIsMainThread());
MOZ_RELEASE_ASSERT(aState != OwnedLockState::None);
for (size_t i = MainThreadId + 1; i <= MaxThreadId; i++) {
Thread* thread = GetById(i);
if (thread->mOwnedLocks.length()) {
thread->mOwnedLockState = aState;
Notify(i);
while (thread->mOwnedLockState != OwnedLockState::None) {
WaitNoIdle();
}
}
}
}
/* static */
void Thread::ResumeIdleThreads() {
MOZ_RELEASE_ASSERT(CurrentIsMainThread());
for (size_t i = MainThreadId + 1; i <= MaxRecordedThreadId; i++) {
ResumeSingleIdleThread(i);
for (size_t i = MainThreadId + 1; i <= MaxThreadId; i++) {
GetById(i)->mShouldIdle = false;
Notify(i);
}
}
@ -430,7 +511,7 @@ void Thread::NotifyUnrecordedWait(
}
}
bool Thread::MaybeWaitForSnapshot(
bool Thread::MaybeWaitForFork(
const std::function<void()>& aReleaseCallback) {
MOZ_RELEASE_ASSERT(!PassThroughEvents());
if (IsMainThread()) {
@ -459,8 +540,6 @@ void Thread::WaitNoIdle() {
/* static */
void Thread::Wait() {
Thread* thread = Current();
MOZ_ASSERT(!thread->mIdle);
MOZ_ASSERT(thread->IsRecordedThread() && !thread->PassThroughEvents());
if (thread->IsMainThread()) {
WaitNoIdle();
@ -475,7 +554,7 @@ void Thread::Wait() {
thread->SetPassThrough(true);
int stackSeparator = 0;
if (!SaveThreadState(thread->Id(), &stackSeparator)) {
// We just restored a checkpoint, notify the main thread since it is waiting
// We just installed a stack, notify the main thread since it is waiting
// for all threads to restore their stacks.
Notify(MainThreadId);
}
@ -487,15 +566,15 @@ void Thread::Wait() {
}
do {
// Release or reacquire owned locks if the main thread asked us to.
if (thread->mOwnedLockState != OwnedLockState::None) {
thread->ReleaseOrAcquireOwnedLocks(thread->mOwnedLockState);
thread->mOwnedLockState = OwnedLockState::None;
Notify(MainThreadId);
}
// Do the actual waiting for another thread to notify this one.
WaitNoIdle();
// Rewind this thread if the main thread told us to do so. The main
// thread is responsible for rewinding its own stack.
if (ShouldRestoreThreadStack(thread->Id())) {
RestoreThreadStack(thread->Id());
Unreachable();
}
} while (thread->mShouldIdle);
thread->mIdle = false;
@ -512,11 +591,9 @@ void Thread::WaitForever() {
/* static */
void Thread::WaitForeverNoIdle() {
FileHandle writeFd, readFd;
DirectCreatePipe(&writeFd, &readFd);
while (true) {
uint8_t data;
DirectRead(readFd, &data, 1);
DirectRead(gWaitForeverFd, &data, 1);
}
}
@ -529,7 +606,7 @@ void Thread::Notify(size_t aId) {
/* static */
size_t Thread::TotalEventProgress() {
size_t result = 0;
for (size_t id = MainThreadId; id <= MaxRecordedThreadId; id++) {
for (size_t id = MainThreadId; id <= MaxThreadId; id++) {
Thread* thread = GetById(id);
// Accessing the stream position here is racy. The returned value is used to

Просмотреть файл

@ -8,12 +8,11 @@
#define mozilla_recordreplay_Thread_h
#include "mozilla/Atomics.h"
#include "File.h"
#include "Recording.h"
#include "Lock.h"
#include "Monitor.h"
#include <pthread.h>
#include <setjmp.h>
namespace mozilla {
namespace recordreplay {
@ -39,9 +38,9 @@ namespace recordreplay {
// allows the process to rewind itself without needing to spawn or destroy
// any threads.
//
// 2. Some additional number of threads are spawned for use by the IPC and
// memory snapshot mechanisms. These have associated Thread
// structures but are not recorded and always pass through thread events.
// 2. Some additional number of threads are spawned for use for IPC. These have
// associated Thread structures but are not recorded and always pass through
// thread events.
//
// 3. All recorded threads and must be able to enter a particular blocking
// state, under Thread::Wait, when requested by the main thread calling
@ -49,36 +48,37 @@ namespace recordreplay {
// thread attempts to take a recorded lock and blocks in Lock::Wait.
// For other threads (any thread which has diverged from the recording,
// or JS helper threads even when no recording divergence has occurred),
// NotifyUnrecordedWait and MaybeWaitForSnapshot are used to enter
// this state when the thread performs a blocking operation.
// NotifyUnrecordedWait and MaybeWaitForFork are used to enter this state
// when the thread performs a blocking operation.
//
// 4. Once all recorded threads are idle, the main thread is able to record
// memory snapshots and thread stacks for later rewinding. Additional
// threads created for #2 above do not idle and do not have their state
// included in snapshots, but they are designed to avoid interfering with
// the main thread while it is taking or restoring a checkpoint.
// 4. Once all recorded threads are idle, the main thread is able to fork.
// Additional threads created for #2 above do not idle, but they are designed
// to avoid interfering with the main thread while it forks.
// The ID used by the process main thread.
static const size_t MainThreadId = 1;
// The maximum ID useable by recorded threads.
static const size_t MaxRecordedThreadId = 70;
static const size_t MaxThreadId = 70;
// The maximum number of threads which are not recorded but need a Thread so
// that they can participate in e.g. Wait/Notify calls.
static const size_t MaxNumNonRecordedThreads = 12;
static const size_t MaxThreadId =
MaxRecordedThreadId + MaxNumNonRecordedThreads;
typedef pthread_t NativeThreadId;
// Information about the execution state of a thread.
// Information about the execution state of a recorded thread.
class Thread {
public:
// Signature for the start function of a thread.
typedef void (*Callback)(void*);
// Actions a thread can take with its owned locks.
enum OwnedLockState {
// No action by the thread is needed.
None,
// The thread must release all of its owned locks.
NeedRelease,
// The thread must acquire all of its owned locks.
NeedAcquire,
};
private:
// Monitor used to protect various thread information (see Thread.h) and to
// wait on or signal progress for a thread.
@ -116,6 +116,12 @@ class Thread {
// ID for this thread used by the system.
NativeThreadId mNativeId;
// On macOS, any thread ID given by mach_thread_self. This originates from the
// recording and does not correspond with a system resource when replaying.
// It is stored here so that we can provide a consistent ID after the process
// forks and we diverge from the recording.
uintptr_t mMachId;
// Stream with events for the thread. This is only used on the thread itself.
Stream* mEvents;
@ -135,6 +141,10 @@ class Thread {
// Whether the thread is waiting on idlefd.
Atomic<bool, SequentiallyConsistent, Behavior::DontPreserve> mIdle;
// While the thread is idling, whether to release or acquire its owned locks.
Atomic<OwnedLockState, SequentiallyConsistent, Behavior::DontPreserve>
mOwnedLockState;
// Any callback which should be invoked so the thread can make progress,
// and whether the callback has been invoked yet while the main thread is
// waiting for threads to become idle. Protected by the thread monitor.
@ -144,7 +154,31 @@ class Thread {
// Identifier of any atomic which this thread currently holds.
Maybe<size_t> mAtomicLockId;
// While replaying, recorded locks which this thread owns.
InfallibleVector<NativeLock*> mOwnedLocks;
// Thread local storage, used for non-main threads when replaying.
// This emulates pthread TLS entries. By associating these TLS entries with
// the Thread itself, they will be preserved if we fork and then respawn all
// threads.
struct StorageEntry {
uintptr_t mKey;
void* mData;
StorageEntry* mNext;
};
StorageEntry* mStorageEntries;
uint8_t* mStorageCursor;
uint8_t* mStorageLimit;
uint8_t* AllocateStorage(size_t aSize);
public:
// These are used by certain redirections to convey information from the
// SaveOutput hook to the MiddlemanCall hook.
uintptr_t mRedirectionValue;
InfallibleVector<char> mRedirectionData;
///////////////////////////////////////////////////////////////////////////////
// Public Routines
///////////////////////////////////////////////////////////////////////////////
@ -157,10 +191,6 @@ class Thread {
size_t StackSize() { return mStackSize; }
inline bool IsMainThread() const { return mId == MainThreadId; }
inline bool IsRecordedThread() const { return mId <= MaxRecordedThreadId; }
inline bool IsNonMainRecordedThread() const {
return IsRecordedThread() && !IsMainThread();
}
// Access the flag for whether this thread is passing events through.
void SetPassThrough(bool aPassThrough) {
@ -205,6 +235,11 @@ class Thread {
!HasDivergedFromRecording();
}
// Get the macOS mach identifier for this thread.
uintptr_t GetMachId() const {
return mMachId;
}
// The actual start routine at the root of all recorded threads, and of all
// threads when replaying.
static void ThreadMain(void* aArgument);
@ -225,19 +260,20 @@ class Thread {
// Lookup a Thread by various methods.
static Thread* GetById(size_t aId);
static Thread* GetByNativeId(NativeThreadId aNativeId);
static Thread* GetByStackPointer(void* aSp);
// Spawn all non-main recorded threads used for recording/replaying.
static void SpawnAllThreads();
// After forking, the new process will only have a main thread. Respawn all
// recorded non-main threads and restore them to their state in the original
// process before the fork.
static void RespawnAllThreadsAfterFork();
// Spawn the specified thread.
static void SpawnThread(Thread* aThread);
static NativeThreadId SpawnThread(Thread* aThread);
// Spawn a non-recorded thread with the specified start routine/argument.
static Thread* SpawnNonRecordedThread(Callback aStart, void* aArgument);
// Wait until a thread has initialized its stack and other state.
static void WaitUntilInitialized(Thread* aThread);
static void SpawnNonRecordedThread(Callback aStart, void* aArgument);
// Start an existing thread, for use when the process has called a thread
// creation system API when events were not passed through. The return value
@ -251,6 +287,18 @@ class Thread {
// Give access to the atomic lock which the thread owns.
Maybe<size_t>& AtomicLockId() { return mAtomicLockId; }
// Mark changes in the recorded locks which this thread owns.
void AddOwnedLock(NativeLock* aLock);
void RemoveOwnedLock(NativeLock* aLock);
// Release or acquire all locks owned by this thread. This does not affect
// the set of owned locks.
void ReleaseOrAcquireOwnedLocks(OwnedLockState aState);
// Get a pointer to the internal storage for this thread for aKey, creating it
// if necessary.
void** GetOrCreateStorage(uintptr_t aKey);
///////////////////////////////////////////////////////////////////////////////
// Thread Coordination
///////////////////////////////////////////////////////////////////////////////
@ -285,25 +333,26 @@ class Thread {
// main thread is already waiting for other threads to become idle.
//
// The callback should poke the thread so that it is no longer blocked on the
// resource. The thread must call MaybeWaitForSnapshot before blocking again.
// resource. The thread must call MaybeWaitForFork before blocking again.
//
// MaybeWaitForSnapshot takes a callback to release any resources before the
// MaybeWaitForFork takes a callback to release any resources before the
// thread begins idling. The return value is whether this callback was
// invoked.
void NotifyUnrecordedWait(const std::function<void()>& aNotifyCallback);
bool MaybeWaitForSnapshot(const std::function<void()>& aReleaseCallback);
bool MaybeWaitForFork(const std::function<void()>& aReleaseCallback);
// Wait for all other threads to enter the idle state necessary for saving
// or restoring a checkpoint. This may only be called on the main thread.
static void WaitForIdleThreads();
// When all other threads are in an idle state, wait for them to either
// release or reacquire all locks they own, and then reenter the idle state.
static void OperateOnIdleThreadLocks(OwnedLockState aState);
// After WaitForIdleThreads(), the main thread will call this to allow
// other threads to resume execution.
static void ResumeIdleThreads();
// Allow a single thread to resume execution.
static void ResumeSingleIdleThread(size_t aId);
// Return whether this thread will remain in the idle state entered after
// WaitForIdleThreads.
bool ShouldIdle() { return mShouldIdle; }
@ -313,28 +362,6 @@ class Thread {
static size_t TotalEventProgress();
};
// This uses a stack pointer instead of TLS to make sure events are passed
// through, for avoiding thorny reentrance issues.
class AutoEnsurePassThroughThreadEventsUseStackPointer {
Thread* mThread;
bool mPassedThrough;
public:
AutoEnsurePassThroughThreadEventsUseStackPointer()
: mThread(Thread::GetByStackPointer(this)),
mPassedThrough(!mThread || mThread->PassThroughEvents()) {
if (!mPassedThrough) {
mThread->SetPassThrough(true);
}
}
~AutoEnsurePassThroughThreadEventsUseStackPointer() {
if (!mPassedThrough) {
mThread->SetPassThrough(false);
}
}
};
// Mark a region of code where a thread's event stream can be accessed.
// This class has several properties:
//
@ -358,7 +385,7 @@ class MOZ_RAII RecordingEventSection {
}
if (IsRecording()) {
MOZ_RELEASE_ASSERT(!aThread->Events().mInRecordingEventSection);
aThread->Events().mFile->mStreamLock.ReadLock();
aThread->Events().mRecording->mStreamLock.ReadLock();
aThread->Events().mInRecordingEventSection = true;
} else {
while (!aThread->MaybeDivergeFromRecording() &&
@ -373,7 +400,7 @@ class MOZ_RAII RecordingEventSection {
return;
}
if (IsRecording()) {
mThread->Events().mFile->mStreamLock.ReadUnlock();
mThread->Events().mRecording->mStreamLock.ReadUnlock();
mThread->Events().mInRecordingEventSection = false;
}
}

Просмотреть файл

@ -6,8 +6,6 @@
#include "ThreadSnapshot.h"
#include "MemorySnapshot.h"
#include "SpinLock.h"
#include "Thread.h"
namespace mozilla {
@ -18,22 +16,16 @@ namespace recordreplay {
#define THREAD_STACK_TOP_SIZE 2048
// Information about a thread's state, for use in saving or restoring
// snapshots. The contents of this structure are in preserved memory.
// Information about a thread's state, for use in saving or restoring its stack
// and register state. This is similar to setjmp/longjmp, except that stack
// contents are restored after jumping. We don't use setjmp/longjmp to avoid
// problems when the saving thread is different from the restoring thread.
struct ThreadState {
// Whether this thread should update its state when no longer idle. This is
// only used for non-main threads.
size_t /* bool */ mShouldRestore;
// Contents of callee-save registers: rbx, rbp, and r12-r15
uintptr_t mCalleeSaveRegisters[6];
// Register state, as stored by setjmp and restored by longjmp. Saved when a
// non-main thread idles or the main thread begins to save all thread states.
// When |mShouldRestore| is set, this is the state to set it to.
jmp_buf mRegisters; // jmp_buf is 148 bytes
uint32_t mPadding;
// Top of the stack, set as for |registers|. Stack pointer information is
// actually included in |registers| as well, but jmp_buf is opaque.
void* mStackPointer;
// Contents of rsp.
uintptr_t mStackPointer;
// Contents of the top of the stack, set as for |registers|. This captures
// parts of the stack that might mutate between the state being saved and the
@ -41,7 +33,7 @@ struct ThreadState {
uint8_t mStackTop[THREAD_STACK_TOP_SIZE];
size_t mStackTopBytes;
// Stack contents to copy to |stackPointer|, non-nullptr if |mShouldRestore|
// Stack contents to copy to |mStackPointer|.
// is set.
uint8_t* mStackContents;
@ -54,122 +46,117 @@ struct ThreadState {
// main thread, which immediately updates its state when restoring snapshots.
static ThreadState* gThreadState;
void InitializeThreadSnapshots(size_t aNumThreads) {
gThreadState = (ThreadState*)AllocateMemory(aNumThreads * sizeof(ThreadState),
MemoryKind::ThreadSnapshot);
jmp_buf buf;
if (setjmp(buf) == 0) {
longjmp(buf, 1);
}
ThreadYield();
void InitializeThreadSnapshots() {
size_t numThreads = MaxThreadId + 1;
gThreadState = new ThreadState[numThreads];
memset(gThreadState, 0, numThreads * sizeof(ThreadState));
}
static void ClearThreadState(ThreadState* aInfo) {
MOZ_RELEASE_ASSERT(aInfo->mShouldRestore);
DeallocateMemory(aInfo->mStackContents, aInfo->mStackBytes,
MemoryKind::ThreadSnapshot);
aInfo->mShouldRestore = false;
free(aInfo->mStackContents);
aInfo->mStackContents = nullptr;
aInfo->mStackBytes = 0;
}
extern "C" {
extern int SaveThreadStateOrReturnFromRestore(ThreadState* aInfo,
int (*aSetjmpArg)(jmp_buf),
int* aStackSeparator);
#define THREAD_STACK_POINTER_OFFSET 48
#define THREAD_STACK_TOP_OFFSET 56
#define THREAD_STACK_TOP_BYTES_OFFSET 2104
#define THREAD_STACK_CONTENTS_OFFSET 2112
#define THREAD_STACK_BYTES_OFFSET 2120
#define THREAD_REGISTERS_OFFSET 8
#define THREAD_STACK_POINTER_OFFSET 160
#define THREAD_STACK_TOP_OFFSET 168
#define THREAD_STACK_TOP_BYTES_OFFSET 2216
#define THREAD_STACK_CONTENTS_OFFSET 2224
#define THREAD_STACK_BYTES_OFFSET 2232
extern int SaveThreadStateOrReturnFromRestore(ThreadState* aInfo,
int* aStackSeparator);
__asm(
"_SaveThreadStateOrReturnFromRestore:"
// On Unix/x64, the first integer arg is in %rdi. Move this into a
// callee save register so that setjmp/longjmp will save/restore it even
// though the rest of the stack is incoherent after the longjmp.
"push %rbx;"
"movq %rdi, %rbx;"
// Update |aInfo->mStackPointer|. Everything above this on the stack will be
// restored after getting here from longjmp.
"movq %rsp, " ExpandAndQuote(THREAD_STACK_POINTER_OFFSET) "(%rbx);"
// restored later.
"movq %rsp, " ExpandAndQuote(THREAD_STACK_POINTER_OFFSET) "(%rdi);"
// Compute the number of bytes to store on the stack top.
"subq %rsp, %rdx;" // rdx is the third arg reg
"subq %rsp, %rsi;" // rsi is the second arg reg
// Bounds check against the size of the stack top buffer.
"cmpl $" ExpandAndQuote(THREAD_STACK_TOP_SIZE) ", %edx;"
"cmpl $" ExpandAndQuote(THREAD_STACK_TOP_SIZE) ", %esi;"
"jg SaveThreadStateOrReturnFromRestore_crash;"
// Store the number of bytes written to the stack top buffer.
"movq %rdx, " ExpandAndQuote(THREAD_STACK_TOP_BYTES_OFFSET) "(%rbx);"
"movq %rsi, " ExpandAndQuote(THREAD_STACK_TOP_BYTES_OFFSET) "(%rdi);"
// Load the start of the stack top buffer and the stack pointer.
"movq %rsp, %r8;"
"movq %rbx, %r9;"
"movq %rdi, %r9;"
"addq $" ExpandAndQuote(THREAD_STACK_TOP_OFFSET) ", %r9;"
"jmp SaveThreadStateOrReturnFromRestore_copyTopRestart;"
// Fill in the stack top buffer.
"jmp SaveThreadStateOrReturnFromRestore_copyTopRestart;"
"SaveThreadStateOrReturnFromRestore_copyTopRestart:"
"testq %rdx, %rdx;"
"testq %rsi, %rsi;"
"je SaveThreadStateOrReturnFromRestore_copyTopDone;"
"movl 0(%r8), %ecx;"
"movl %ecx, 0(%r9);"
"addq $4, %r8;"
"addq $4, %r9;"
"subq $4, %rdx;"
"subq $4, %rsi;"
"jmp SaveThreadStateOrReturnFromRestore_copyTopRestart;"
"SaveThreadStateOrReturnFromRestore_copyTopDone:"
// Call setjmp, passing |aInfo->mRegisters|.
"addq $" ExpandAndQuote(THREAD_REGISTERS_OFFSET) ", %rdi;"
"callq *%rsi;" // rsi is the second arg reg
// Save callee save registers.
"movq %rbx, 0(%rdi);"
"movq %rbp, 8(%rdi);"
"movq %r12, 16(%rdi);"
"movq %r13, 24(%rdi);"
"movq %r14, 32(%rdi);"
"movq %r15, 40(%rdi);"
// If setjmp returned zero, we just saved the state and are done.
"testl %eax, %eax;"
"je SaveThreadStateOrReturnFromRestore_done;"
// Return zero when saving.
"movq $0, %rax;"
"ret;"
// Otherwise we just returned from longjmp, and need to restore the stack
// contents before anything else can be performed. Use caller save registers
// exclusively for this, don't touch the stack at all.
"SaveThreadStateOrReturnFromRestore_crash:"
"movq $0, %rbx;"
"movq 0(%rbx), %rbx;"
);
extern void RestoreThreadState(ThreadState* aInfo);
__asm(
"_RestoreThreadState:"
// Restore callee save registers.
"movq 0(%rdi), %rbx;"
"movq 8(%rdi), %rbp;"
"movq 16(%rdi), %r12;"
"movq 24(%rdi), %r13;"
"movq 32(%rdi), %r14;"
"movq 40(%rdi), %r15;"
// Restore stack pointer.
"movq " ExpandAndQuote(THREAD_STACK_POINTER_OFFSET) "(%rdi), %rsp;"
// Load |mStackPointer|, |mStackContents|, and |mStackBytes| from |aInfo|.
"movq " ExpandAndQuote(THREAD_STACK_POINTER_OFFSET) "(%rbx), %rcx;"
"movq " ExpandAndQuote(THREAD_STACK_CONTENTS_OFFSET) "(%rbx), %r8;"
"movq " ExpandAndQuote(THREAD_STACK_BYTES_OFFSET) "(%rbx), %r9;"
// The stack pointer we loaded should be identical to the stack pointer we have.
"cmpq %rsp, %rcx;"
"jne SaveThreadStateOrReturnFromRestore_crash;"
"jmp SaveThreadStateOrReturnFromRestore_copyAfterRestart;"
"movq %rsp, %rcx;"
"movq " ExpandAndQuote(THREAD_STACK_CONTENTS_OFFSET) "(%rdi), %r8;"
"movq " ExpandAndQuote(THREAD_STACK_BYTES_OFFSET) "(%rdi), %r9;"
// Fill in the contents of the entire stack.
"SaveThreadStateOrReturnFromRestore_copyAfterRestart:"
"jmp RestoreThreadState_copyAfterRestart;"
"RestoreThreadState_copyAfterRestart:"
"testq %r9, %r9;"
"je SaveThreadStateOrReturnFromRestore_done;"
"je RestoreThreadState_done;"
"movl 0(%r8), %edx;"
"movl %edx, 0(%rcx);"
"addq $4, %rcx;"
"addq $4, %r8;"
"subq $4, %r9;"
"jmp SaveThreadStateOrReturnFromRestore_copyAfterRestart;"
"jmp RestoreThreadState_copyAfterRestart;"
"RestoreThreadState_done:"
"SaveThreadStateOrReturnFromRestore_crash:"
"movq $0, %rbx;"
"movq 0(%rbx), %rbx;"
"SaveThreadStateOrReturnFromRestore_done:"
"pop %rbx;"
// Return non-zero when restoring.
"movq $1, %rax;"
"ret;"
);
@ -177,8 +164,7 @@ __asm(
bool SaveThreadState(size_t aId, int* aStackSeparator) {
static_assert(
offsetof(ThreadState, mRegisters) == THREAD_REGISTERS_OFFSET &&
offsetof(ThreadState, mStackPointer) == THREAD_STACK_POINTER_OFFSET &&
offsetof(ThreadState, mStackPointer) == THREAD_STACK_POINTER_OFFSET &&
offsetof(ThreadState, mStackTop) == THREAD_STACK_TOP_OFFSET &&
offsetof(ThreadState, mStackTopBytes) ==
THREAD_STACK_TOP_BYTES_OFFSET &&
@ -187,28 +173,21 @@ bool SaveThreadState(size_t aId, int* aStackSeparator) {
offsetof(ThreadState, mStackBytes) == THREAD_STACK_BYTES_OFFSET,
"Incorrect ThreadState offsets");
MOZ_RELEASE_ASSERT(aId <= MaxThreadId);
ThreadState* info = &gThreadState[aId];
MOZ_RELEASE_ASSERT(!info->mShouldRestore);
bool res =
SaveThreadStateOrReturnFromRestore(info, setjmp, aStackSeparator) == 0;
SaveThreadStateOrReturnFromRestore(info, aStackSeparator) == 0;
if (!res) {
ClearThreadState(info);
}
return res;
}
void RestoreThreadStack(size_t aId) {
ThreadState* info = &gThreadState[aId];
longjmp(info->mRegisters, 1);
MOZ_CRASH(); // longjmp does not return.
}
static void SaveThreadStack(SavedThreadStack& aStack, size_t aId) {
void SaveThreadStack(size_t aId) {
Thread* thread = Thread::GetById(aId);
MOZ_RELEASE_ASSERT(aId <= MaxThreadId);
ThreadState& info = gThreadState[aId];
aStack.mStackPointer = info.mStackPointer;
MemoryMove(aStack.mRegisters, info.mRegisters, sizeof(jmp_buf));
uint8_t* stackPointer = (uint8_t*)info.mStackPointer;
uint8_t* stackTop = thread->StackBase() + thread->StackSize();
@ -217,89 +196,27 @@ static void SaveThreadStack(SavedThreadStack& aStack, size_t aId) {
MOZ_RELEASE_ASSERT(stackBytes >= info.mStackTopBytes);
aStack.mStack =
(uint8_t*)AllocateMemory(stackBytes, MemoryKind::ThreadSnapshot);
aStack.mStackBytes = stackBytes;
// Release any existing stack contents from a previous fork.
free(info.mStackContents);
MemoryMove(aStack.mStack, info.mStackTop, info.mStackTopBytes);
MemoryMove(aStack.mStack + info.mStackTopBytes,
stackPointer + info.mStackTopBytes,
stackBytes - info.mStackTopBytes);
info.mStackContents = (uint8_t*) malloc(stackBytes);
info.mStackBytes = stackBytes;
memcpy(info.mStackContents, info.mStackTop, info.mStackTopBytes);
memcpy(info.mStackContents + info.mStackTopBytes,
stackPointer + info.mStackTopBytes,
stackBytes - info.mStackTopBytes);
}
static void RestoreStackForLoadingByThread(const SavedThreadStack& aStack,
size_t aId) {
ThreadState& info = gThreadState[aId];
MOZ_RELEASE_ASSERT(!info.mShouldRestore);
void RestoreThreadStack(size_t aId) {
MOZ_RELEASE_ASSERT(aId <= MaxThreadId);
info.mStackPointer = aStack.mStackPointer;
MemoryMove(info.mRegisters, aStack.mRegisters, sizeof(jmp_buf));
ThreadState* info = &gThreadState[aId];
MOZ_RELEASE_ASSERT(info->mStackContents);
info.mStackBytes = aStack.mStackBytes;
uint8_t* stackContents =
(uint8_t*)AllocateMemory(info.mStackBytes, MemoryKind::ThreadSnapshot);
MemoryMove(stackContents, aStack.mStack, aStack.mStackBytes);
info.mStackContents = stackContents;
info.mShouldRestore = true;
}
bool ShouldRestoreThreadStack(size_t aId) {
return gThreadState[aId].mShouldRestore;
}
bool SaveAllThreads(AllSavedThreadStacks& aSaved) {
MOZ_RELEASE_ASSERT(Thread::CurrentIsMainThread());
AutoPassThroughThreadEvents pt; // setjmp may perform system calls.
AutoDisallowMemoryChanges disallow;
int stackSeparator = 0;
if (!SaveThreadState(MainThreadId, &stackSeparator)) {
// We just restored this state from a later point of execution.
return false;
}
for (size_t i = MainThreadId; i <= MaxRecordedThreadId; i++) {
SaveThreadStack(aSaved.mStacks[i - 1], i);
}
return true;
}
void RestoreAllThreads(const AllSavedThreadStacks& aSaved) {
MOZ_RELEASE_ASSERT(Thread::CurrentIsMainThread());
// These will be matched by the Auto* classes in SaveAllThreads().
BeginPassThroughThreadEvents();
SetMemoryChangesAllowed(false);
for (size_t i = MainThreadId; i <= MaxRecordedThreadId; i++) {
RestoreStackForLoadingByThread(aSaved.mStacks[i - 1], i);
}
// Restore this stack to its state when we saved it in SaveAllThreads(), and
// continue executing from there.
RestoreThreadStack(MainThreadId);
RestoreThreadState(info);
Unreachable();
}
void WaitForIdleThreadsToRestoreTheirStacks() {
// Wait for all other threads to restore their stack before resuming
// execution.
while (true) {
bool done = true;
for (size_t i = MainThreadId + 1; i <= MaxRecordedThreadId; i++) {
if (ShouldRestoreThreadStack(i)) {
Thread::Notify(i);
done = false;
}
}
if (done) {
break;
}
Thread::WaitNoIdle();
}
}
} // namespace recordreplay
} // namespace mozilla

Просмотреть файл

@ -7,101 +7,26 @@
#ifndef mozilla_recordreplay_ThreadSnapshot_h
#define mozilla_recordreplay_ThreadSnapshot_h
#include "File.h"
#include "ProcessRewind.h"
#include "Thread.h"
namespace mozilla {
namespace recordreplay {
// Thread Snapshots Overview.
//
// The functions below are used when a thread saves or restores its stack and
// register state. The steps taken in saving and restoring a thread snapshot are
// as follows:
//
// 1. Before idling (non-main threads) or before creating a snapshot (main
// thread), the thread calls SaveThreadState. This saves the register state
// for the thread as well as a portion of the top of the stack, and after
// saving the state it returns true.
//
// 2. Once all other threads are idle, the main thread saves the remainder of
// all thread stacks. (The portion saved earlier gives threads leeway to
// perform operations after saving their stack, mainly for entering an idle
// state.)
//
// 3. The thread stacks are now stored on the heap. Later on, the main thread
// may ensure that all threads are idle and then call, for all threads,
// RestoreStackForLoadingByThread. This prepares the stacks for restoring by
// the associated threads.
//
// 4. While still in their idle state, threads call ShouldRestoreThreadStack to
// see if there is stack information for them to restore.
//
// 5. If ShouldRestoreThreadStack returns true, RestoreThreadStack is then
// called to restore the stack and register state to the point where
// SaveThreadState was originally called.
//
// 6. RestoreThreadStack does not return. Instead, control transfers to the
// call to SaveThreadState, which returns false after being restored to.
// aStackSeparator is a pointer into the stack. Values shallower than this in
// the stack will be preserved as they are at the time of the SaveThreadState
// call, whereas deeper values will be preserved as they are at the point where
// the main thread saves the remainder of the stack.
bool SaveThreadState(size_t aId, int* aStackSeparator);
// Information saved about the state of a thread.
struct SavedThreadStack {
// Saved stack pointer.
void* mStackPointer;
// Remember the entire stack contents for a thread, after forking.
void SaveThreadStack(size_t aId);
// Saved stack contents, starting at |mStackPointer|.
uint8_t* mStack;
size_t mStackBytes;
// Saved register state.
jmp_buf mRegisters;
SavedThreadStack() { PodZero(this); }
void ReleaseContents() {
if (mStackBytes) {
DeallocateMemory(mStack, mStackBytes, MemoryKind::ThreadSnapshot);
}
}
};
struct AllSavedThreadStacks {
SavedThreadStack mStacks[MaxRecordedThreadId];
void ReleaseContents() {
for (SavedThreadStack& stack : mStacks) {
stack.ReleaseContents();
}
}
};
// When all other threads are idle, the main thread may call this to save its
// own stack and the stacks of all other threads. The return value is true if
// the stacks were just saved, or false if they were just restored due to a
// rewind from a later point of execution.
bool SaveAllThreads(AllSavedThreadStacks& aSaved);
// Restore a set of saved stacks and rewind state to that point.
// This function does not return.
void RestoreAllThreads(const AllSavedThreadStacks& aSaved);
// After rewinding to an earlier point, the main thread will call this to
// ensure that each thread has woken up and restored its own stack contents.
// The main thread does not itself write to the stacks of other threads.
void WaitForIdleThreadsToRestoreTheirStacks();
bool ShouldRestoreThreadStack(size_t aId);
// Restore the saved stack contents after a fork.
void RestoreThreadStack(size_t aId);
// Initialize state for taking thread snapshots.
void InitializeThreadSnapshots(size_t aNumThreads);
void InitializeThreadSnapshots();
} // namespace recordreplay
} // namespace mozilla

Просмотреть файл

@ -34,18 +34,20 @@ static void GetSocketAddress(struct sockaddr_un* addr,
namespace parent {
void OpenChannel(base::ProcessId aMiddlemanPid, uint32_t aChannelId,
void OpenChannel(base::ProcessId aProcessId, uint32_t aChannelId,
ipc::FileDescriptor* aConnection) {
MOZ_RELEASE_ASSERT(IsMiddleman() || XRE_IsParentProcess());
int connectionFd = socket(AF_UNIX, SOCK_STREAM, 0);
MOZ_RELEASE_ASSERT(connectionFd > 0);
struct sockaddr_un addr;
GetSocketAddress(&addr, aMiddlemanPid, aChannelId);
GetSocketAddress(&addr, aProcessId, aChannelId);
DirectDeleteFile(addr.sun_path);
int rv = bind(connectionFd, (sockaddr*)&addr, SUN_LEN(&addr));
MOZ_RELEASE_ASSERT(rv >= 0);
if (rv < 0) {
Print("Error: bind() failed [errno %d], crashing...\n", errno);
MOZ_CRASH("OpenChannel");
}
*aConnection = ipc::FileDescriptor(connectionFd);
close(connectionFd);
@ -53,63 +55,50 @@ void OpenChannel(base::ProcessId aMiddlemanPid, uint32_t aChannelId,
} // namespace parent
static void InitializeSimulatedDelayState();
struct HelloMessage {
int32_t mMagic;
};
Channel::Channel(size_t aId, bool aMiddlemanRecording,
const MessageHandler& aHandler)
Channel::Channel(size_t aId, Kind aKind, const MessageHandler& aHandler,
base::ProcessId aParentPid)
: mId(aId),
mKind(aKind),
mHandler(aHandler),
mInitialized(false),
mConnectionFd(0),
mFd(0),
mMessageBuffer(nullptr),
mMessageBytes(0),
mSimulateDelays(false) {
MOZ_RELEASE_ASSERT(NS_IsMainThread());
if (IsRecordingOrReplaying()) {
MOZ_RELEASE_ASSERT(AreThreadEventsPassedThrough());
mFd = socket(AF_UNIX, SOCK_STREAM, 0);
MOZ_RELEASE_ASSERT(mFd > 0);
struct sockaddr_un addr;
GetSocketAddress(&addr, child::MiddlemanProcessId(), mId);
int rv = HANDLE_EINTR(connect(mFd, (sockaddr*)&addr, SUN_LEN(&addr)));
MOZ_RELEASE_ASSERT(rv >= 0);
DirectDeleteFile(addr.sun_path);
} else {
MOZ_RELEASE_ASSERT(IsMiddleman());
mMessageBytes(0) {
MOZ_RELEASE_ASSERT(!IsRecordingOrReplaying() || AreThreadEventsPassedThrough());
if (IsParent()) {
ipc::FileDescriptor connection;
if (aMiddlemanRecording) {
// When starting the recording child process we have not done enough
// initialization to ask for a channel from the parent, but have also not
// started the sandbox so we can do it ourselves.
parent::OpenChannel(base::GetCurrentProcId(), mId, &connection);
} else {
if (aKind == Kind::MiddlemanReplay) {
// The middleman is sandboxed at this point and the parent must open
// the channel on our behalf.
dom::ContentChild::GetSingleton()->SendOpenRecordReplayChannel(
mId, &connection);
aId, &connection);
MOZ_RELEASE_ASSERT(connection.IsValid());
} else {
parent::OpenChannel(base::GetCurrentProcId(), aId, &connection);
}
mConnectionFd = connection.ClonePlatformHandle().release();
int rv = listen(mConnectionFd, 1);
MOZ_RELEASE_ASSERT(rv >= 0);
} else {
MOZ_RELEASE_ASSERT(aParentPid);
mFd = socket(AF_UNIX, SOCK_STREAM, 0);
MOZ_RELEASE_ASSERT(mFd > 0);
struct sockaddr_un addr;
GetSocketAddress(&addr, aParentPid, aId);
int rv = HANDLE_EINTR(connect(mFd, (sockaddr*)&addr, SUN_LEN(&addr)));
MOZ_RELEASE_ASSERT(rv >= 0);
DirectDeleteFile(addr.sun_path);
}
// Simulate message delays in channels used to communicate with a replaying
// process.
mSimulateDelays = IsMiddleman() ? !aMiddlemanRecording : IsReplaying();
InitializeSimulatedDelayState();
Thread::SpawnNonRecordedThread(ThreadMain, this);
}
@ -119,15 +108,7 @@ void Channel::ThreadMain(void* aChannelArg) {
static const int32_t MagicValue = 0x914522b9;
if (IsRecordingOrReplaying()) {
HelloMessage msg;
int rv = HANDLE_EINTR(recv(channel->mFd, &msg, sizeof(msg), MSG_WAITALL));
MOZ_RELEASE_ASSERT(rv == sizeof(msg));
MOZ_RELEASE_ASSERT(msg.mMagic == MagicValue);
} else {
MOZ_RELEASE_ASSERT(IsMiddleman());
if (channel->IsParent()) {
channel->mFd = HANDLE_EINTR(accept(channel->mConnectionFd, nullptr, 0));
MOZ_RELEASE_ASSERT(channel->mFd > 0);
@ -136,14 +117,24 @@ void Channel::ThreadMain(void* aChannelArg) {
int rv = HANDLE_EINTR(send(channel->mFd, &msg, sizeof(msg), 0));
MOZ_RELEASE_ASSERT(rv == sizeof(msg));
}
} else {
HelloMessage msg;
channel->mStartTime = channel->mAvailableTime = TimeStamp::Now();
int rv = HANDLE_EINTR(recv(channel->mFd, &msg, sizeof(msg), MSG_WAITALL));
MOZ_RELEASE_ASSERT(rv == sizeof(msg));
MOZ_RELEASE_ASSERT(msg.mMagic == MagicValue);
}
{
MonitorAutoLock lock(channel->mMonitor);
channel->mInitialized = true;
channel->mMonitor.Notify();
auto& pending = channel->mPendingData;
if (!pending.empty()) {
channel->SendRaw(pending.begin(), pending.length());
pending.clear();
}
}
while (true) {
@ -155,103 +146,24 @@ void Channel::ThreadMain(void* aChannelArg) {
}
}
// Simulated one way latency between middleman and replaying children, in ms.
static size_t gSimulatedLatency;
// Simulated bandwidth for data transferred between middleman and replaying
// children, in bytes/ms.
static size_t gSimulatedBandwidth;
static size_t LoadEnvValue(const char* aEnv) {
const char* value = getenv(aEnv);
if (value && value[0]) {
int n = atoi(value);
return n >= 0 ? n : 0;
}
return 0;
}
static void InitializeSimulatedDelayState() {
// In preparation for shifting computing resources into the cloud when
// debugging a recorded execution (see bug 1547081), we need to be able to
// test expected performance when there is a significant distance between the
// user's machine (running the UI, middleman, and recording process) and
// machines in the cloud (running replaying processes). To assess this
// expected performance, the environment variables below can be used to
// specify the one-way latency and bandwidth to simulate for connections
// between the middleman and replaying processes.
//
// This simulation is approximate: the bandwidth tracked is per connection
// instead of the total across all connections, and network restrictions are
// not yet simulated when transferring graphics data.
//
// If there are multiple channels then we will do this initialization multiple
// times, so this needs to be idempotent.
gSimulatedLatency = LoadEnvValue("MOZ_RECORD_REPLAY_SIMULATED_LATENCY");
gSimulatedBandwidth = LoadEnvValue("MOZ_RECORD_REPLAY_SIMULATED_BANDWIDTH");
}
static bool MessageSubjectToSimulatedDelay(MessageType aType) {
switch (aType) {
// Middleman call messages are not subject to delays. When replaying
// children are in the cloud they will use a local process to perform
// middleman calls.
case MessageType::MiddlemanCallResponse:
case MessageType::MiddlemanCallRequest:
case MessageType::ResetMiddlemanCalls:
// Don't call system functions when we're in the process of crashing.
case MessageType::BeginFatalError:
case MessageType::FatalError:
return false;
default:
return true;
}
}
void Channel::SendMessage(Message&& aMsg) {
// Block until the channel is initialized.
if (!mInitialized) {
MonitorAutoLock lock(mMonitor);
while (!mInitialized) {
mMonitor.Wait();
}
}
PrintMessage("SendMsg", aMsg);
SendMessageData((const char*)&aMsg, aMsg.mSize);
}
if (gSimulatedLatency && gSimulatedBandwidth && mSimulateDelays &&
MessageSubjectToSimulatedDelay(aMsg.mType)) {
AutoEnsurePassThroughThreadEvents pt;
void Channel::SendMessageData(const char* aData, size_t aSize) {
MonitorAutoLock lock(mMonitor);
// Find the time this message will start sending.
TimeStamp sendTime = TimeStamp::Now();
if (sendTime < mAvailableTime) {
sendTime = mAvailableTime;
}
// Find the time spent sending the message over the channel.
size_t sendDurationMs = aMsg.mSize / gSimulatedBandwidth;
mAvailableTime = sendTime + TimeDuration::FromMilliseconds(sendDurationMs);
// The receive time of the message is the time the message finishes sending
// plus the connection latency.
TimeStamp receiveTime =
mAvailableTime + TimeDuration::FromMilliseconds(gSimulatedLatency);
aMsg.mReceiveTime = (receiveTime - mStartTime).ToMilliseconds();
if (mInitialized) {
SendRaw(aData, aSize);
} else {
mPendingData.append(aData, aSize);
}
}
// Send messages atomically, except when crashing.
Maybe<MonitorAutoLock> lock;
if (aMsg.mType != MessageType::BeginFatalError &&
aMsg.mType != MessageType::FatalError) {
lock.emplace(mMonitor);
}
const char* ptr = (const char*)&aMsg;
size_t nbytes = aMsg.mSize;
while (nbytes) {
int rv = HANDLE_EINTR(send(mFd, ptr, nbytes, 0));
void Channel::SendRaw(const char* aData, size_t aSize) {
while (aSize) {
int rv = HANDLE_EINTR(send(mFd, aData, aSize, 0));
if (rv < 0) {
// If the other side of the channel has crashed, don't send the message.
// Avoid crashing in this process too, so that we don't generate another
@ -259,22 +171,20 @@ void Channel::SendMessage(Message&& aMsg) {
MOZ_RELEASE_ASSERT(errno == EPIPE);
return;
}
ptr += rv;
nbytes -= rv;
aData += rv;
aSize -= rv;
}
}
Message::UniquePtr Channel::WaitForMessage() {
if (!mMessageBuffer) {
mMessageBuffer = (MessageBuffer*)AllocateMemory(sizeof(MessageBuffer),
MemoryKind::Generic);
mMessageBuffer->appendN(0, PageSize);
if (mMessageBuffer.empty()) {
mMessageBuffer.appendN(0, PageSize);
}
size_t messageSize = 0;
while (true) {
if (mMessageBytes >= sizeof(Message)) {
Message* msg = (Message*)mMessageBuffer->begin();
Message* msg = (Message*)mMessageBuffer.begin();
messageSize = msg->mSize;
MOZ_RELEASE_ASSERT(messageSize >= sizeof(Message));
if (mMessageBytes >= messageSize) {
@ -283,52 +193,61 @@ Message::UniquePtr Channel::WaitForMessage() {
}
// Make sure the buffer is large enough for the entire incoming message.
if (messageSize > mMessageBuffer->length()) {
mMessageBuffer->appendN(0, messageSize - mMessageBuffer->length());
if (messageSize > mMessageBuffer.length()) {
mMessageBuffer.appendN(0, messageSize - mMessageBuffer.length());
}
ssize_t nbytes =
HANDLE_EINTR(recv(mFd, &mMessageBuffer->begin()[mMessageBytes],
mMessageBuffer->length() - mMessageBytes, 0));
if (nbytes < 0) {
MOZ_RELEASE_ASSERT(errno == EAGAIN);
HANDLE_EINTR(recv(mFd, &mMessageBuffer.begin()[mMessageBytes],
mMessageBuffer.length() - mMessageBytes, 0));
if (nbytes < 0 && errno == EAGAIN) {
continue;
} else if (nbytes == 0) {
// The other side of the channel has shut down.
if (IsMiddleman()) {
return nullptr;
}
PrintSpew("Channel disconnected, exiting...\n");
_exit(0);
}
if (nbytes == 0 || (nbytes < 0 && errno == ECONNRESET)) {
// The other side of the channel has shut down.
if (ExitProcessOnDisconnect()) {
PrintSpew("Channel disconnected, exiting...\n");
_exit(0);
} else {
// Returning null will shut down the channel.
PrintSpew("Channel disconnected, shutting down thread.\n");
return nullptr;
}
}
MOZ_RELEASE_ASSERT(nbytes > 0);
mMessageBytes += nbytes;
}
Message::UniquePtr res = ((Message*)mMessageBuffer->begin())->Clone();
Message::UniquePtr res = ((Message*)mMessageBuffer.begin())->Clone();
// Remove the message we just received from the incoming buffer.
size_t remaining = mMessageBytes - messageSize;
if (remaining) {
memmove(mMessageBuffer->begin(), &mMessageBuffer->begin()[messageSize],
memmove(mMessageBuffer.begin(), &mMessageBuffer[messageSize],
remaining);
}
mMessageBytes = remaining;
// If there is a simulated delay on the message, wait until it completes.
if (res->mReceiveTime) {
TimeStamp receiveTime =
mStartTime + TimeDuration::FromMilliseconds(res->mReceiveTime);
while (receiveTime > TimeStamp::Now()) {
MonitorAutoLock lock(mMonitor);
mMonitor.WaitUntil(receiveTime);
}
}
PrintMessage("RecvMsg", *res);
return res;
}
void Channel::ExitIfNotInitializedBefore(const TimeStamp& aDeadline) {
MOZ_RELEASE_ASSERT(IsParent());
MonitorAutoLock lock(mMonitor);
while (!mInitialized) {
if (TimeStamp::Now() >= aDeadline) {
PrintSpew("Timed out waiting for channel initialization, exiting...\n");
_exit(0);
}
mMonitor.WaitUntil(aDeadline);
}
}
void Channel::PrintMessage(const char* aPrefix, const Message& aMsg) {
if (!SpewEnabled()) {
return;
@ -349,13 +268,19 @@ void Channel::PrintMessage(const char* aPrefix, const Message& aMsg) {
nsDependentSubstring(nmsg.Buffer(), nmsg.BufferSize()));
break;
}
case MessageType::RecordingData: {
const auto& nmsg = static_cast<const RecordingDataMessage&>(aMsg);
data = nsPrintfCString("Start %llu Size %lu", nmsg.mTag,
nmsg.BinaryDataSize());
break;
}
default:
break;
}
const char* kind =
IsMiddleman() ? "Middleman" : (IsRecording() ? "Recording" : "Replaying");
PrintSpew("%s%s:%d %s %s\n", kind, aPrefix, (int)mId, aMsg.TypeString(),
data.get());
PrintSpew("%s%s:%lu:%lu %s %s\n", kind, aPrefix, mId, aMsg.mForkId,
aMsg.TypeString(), data.get());
}
} // namespace recordreplay

Просмотреть файл

@ -13,9 +13,7 @@
#include "mozilla/Maybe.h"
#include "mozilla/UniquePtr.h"
#include "File.h"
#include "JSControl.h"
#include "MiddlemanCall.h"
#include "ExternalCall.h"
#include "Monitor.h"
namespace mozilla {
@ -59,10 +57,12 @@ namespace recordreplay {
/* rewinding. */ \
_Macro(Ping) \
\
/* Sent to recording processes when exiting, or to force a hanged replaying */ \
/* process to crash. */ \
/* Sent to child processes which should exit normally. */ \
_Macro(Terminate) \
\
/* Force a hanged replaying process to crash and produce a dump. */ \
_Macro(Crash) \
\
/* Poke a child that is recording to create an artificial checkpoint, rather than */ \
/* (potentially) idling indefinitely. This has no effect on a replaying process. */ \
_Macro(CreateCheckpoint) \
@ -70,8 +70,9 @@ namespace recordreplay {
/* Unpause the child and perform a debugger-defined operation. */ \
_Macro(ManifestStart) \
\
/* Respond to a MiddlemanCallRequest message. */ \
_Macro(MiddlemanCallResponse) \
/* Respond to a ExternalCallRequest message. This is also sent between separate */ \
/* replaying processes to fill the external call cache in root replaying processes. */ \
_Macro(ExternalCallResponse) \
\
/* Messages sent from the child process to the middleman. */ \
\
@ -81,24 +82,27 @@ namespace recordreplay {
/* Respond to a ping message */ \
_Macro(PingResponse) \
\
_Macro(UnhandledDivergence) \
\
/* A critical error occurred and execution cannot continue. The child will */ \
/* stop executing after sending this message and will wait to be terminated. */ \
/* A minidump for the child has been generated. */ \
_Macro(FatalError) \
\
/* Sent when a fatal error has occurred, but before the minidump has been */ \
/* generated. */ \
_Macro(BeginFatalError) \
\
/* The child's graphics were repainted. */ \
/* The child's graphics were repainted into the graphics shmem. */ \
_Macro(Paint) \
\
/* Call a system function from the middleman process which the child has */ \
/* encountered after diverging from the recording. */ \
_Macro(MiddlemanCallRequest) \
/* The child's graphics were repainted and have been encoded as an image. */ \
_Macro(PaintEncoded) \
\
/* Reset all information generated by previous MiddlemanCallRequest messages. */ \
_Macro(ResetMiddlemanCalls)
/* Get the result of performing an external call. */ \
_Macro(ExternalCallRequest) \
\
/* Messages sent in both directions. */ \
\
/* Send recording data from a recording process to the middleman, or from the */ \
/* middleman to a replaying process. */ \
_Macro(RecordingData)
enum class MessageType {
#define DefineEnum(Kind) Kind,
@ -109,16 +113,15 @@ enum class MessageType {
struct Message {
MessageType mType;
// When simulating message delays, the time this message should be received,
// relative to when the channel was opened.
uint32_t mReceiveTime;
// Total message size, including the header.
uint32_t mSize;
// Any associated forked process ID for this message.
uint32_t mForkId;
protected:
Message(MessageType aType, uint32_t aSize)
: mType(aType), mReceiveTime(0), mSize(aSize) {
Message(MessageType aType, uint32_t aSize, uint32_t aForkId)
: mType(aType), mSize(aSize), mForkId(aForkId) {
MOZ_RELEASE_ASSERT(mSize >= sizeof(*this));
}
@ -151,9 +154,12 @@ struct Message {
bool CanBeSentWhileUnpaused() const {
return mType == MessageType::CreateCheckpoint ||
mType == MessageType::SetDebuggerRunsInMiddleman ||
mType == MessageType::MiddlemanCallResponse ||
mType == MessageType::Ping || mType == MessageType::Terminate ||
mType == MessageType::Introduction;
mType == MessageType::ExternalCallResponse ||
mType == MessageType::Ping ||
mType == MessageType::Terminate ||
mType == MessageType::Crash ||
mType == MessageType::Introduction ||
mType == MessageType::RecordingData;
}
protected:
@ -186,7 +192,7 @@ struct IntroductionMessage : public Message {
IntroductionMessage(uint32_t aSize, base::ProcessId aParentPid,
uint32_t aArgc)
: Message(MessageType::Introduction, aSize),
: Message(MessageType::Introduction, aSize, 0),
mParentPid(aParentPid),
mArgc(aArgc) {}
@ -226,24 +232,28 @@ struct IntroductionMessage : public Message {
template <MessageType Type>
struct EmptyMessage : public Message {
EmptyMessage() : Message(Type, sizeof(*this)) {}
explicit EmptyMessage(uint32_t aForkId = 0)
: Message(Type, sizeof(*this), aForkId) {}
};
typedef EmptyMessage<MessageType::SetDebuggerRunsInMiddleman>
SetDebuggerRunsInMiddlemanMessage;
typedef EmptyMessage<MessageType::Terminate> TerminateMessage;
typedef EmptyMessage<MessageType::Crash> CrashMessage;
typedef EmptyMessage<MessageType::CreateCheckpoint> CreateCheckpointMessage;
template <MessageType Type>
struct JSONMessage : public Message {
explicit JSONMessage(uint32_t aSize) : Message(Type, aSize) {}
explicit JSONMessage(uint32_t aSize, uint32_t aForkId)
: Message(Type, aSize, aForkId) {}
const char16_t* Buffer() const { return Data<JSONMessage<Type>, char16_t>(); }
size_t BufferSize() const { return DataSize<JSONMessage<Type>, char16_t>(); }
static JSONMessage<Type>* New(const char16_t* aBuffer, size_t aBufferSize) {
static JSONMessage<Type>* New(uint32_t aForkId, const char16_t* aBuffer,
size_t aBufferSize) {
JSONMessage<Type>* res =
NewWithData<JSONMessage<Type>, char16_t>(aBufferSize);
NewWithData<JSONMessage<Type>, char16_t>(aBufferSize, aForkId);
MOZ_RELEASE_ASSERT(res->BufferSize() == aBufferSize);
PodCopy(res->Data<JSONMessage<Type>, char16_t>(), aBuffer, aBufferSize);
return res;
@ -254,13 +264,13 @@ typedef JSONMessage<MessageType::ManifestStart> ManifestStartMessage;
typedef JSONMessage<MessageType::ManifestFinished> ManifestFinishedMessage;
struct FatalErrorMessage : public Message {
explicit FatalErrorMessage(uint32_t aSize)
: Message(MessageType::FatalError, aSize) {}
explicit FatalErrorMessage(uint32_t aSize, uint32_t aForkId)
: Message(MessageType::FatalError, aSize, aForkId) {}
const char* Error() const { return Data<FatalErrorMessage, const char>(); }
};
typedef EmptyMessage<MessageType::BeginFatalError> BeginFatalErrorMessage;
typedef EmptyMessage<MessageType::UnhandledDivergence> UnhandledDivergenceMessage;
// The format for graphics data which will be sent to the middleman process.
// This needs to match the format expected for canvas image data, to avoid
@ -272,49 +282,58 @@ struct PaintMessage : public Message {
uint32_t mHeight;
PaintMessage(uint32_t aWidth, uint32_t aHeight)
: Message(MessageType::Paint, sizeof(*this)),
: Message(MessageType::Paint, sizeof(*this), 0),
mWidth(aWidth),
mHeight(aHeight) {}
};
template <MessageType Type>
struct BinaryMessage : public Message {
explicit BinaryMessage(uint32_t aSize) : Message(Type, aSize) {}
// Associated value whose meaning depends on the message type.
uint64_t mTag;
explicit BinaryMessage(uint32_t aSize, uint32_t aForkId, uint64_t aTag)
: Message(Type, aSize, aForkId), mTag(aTag) {}
const char* BinaryData() const { return Data<BinaryMessage<Type>, char>(); }
size_t BinaryDataSize() const {
return DataSize<BinaryMessage<Type>, char>();
}
static BinaryMessage<Type>* New(const char* aData, size_t aDataSize) {
static BinaryMessage<Type>* New(uint32_t aForkId, uint64_t aTag,
const char* aData, size_t aDataSize) {
BinaryMessage<Type>* res =
NewWithData<BinaryMessage<Type>, char>(aDataSize);
NewWithData<BinaryMessage<Type>, char>(aDataSize, aForkId, aTag);
MOZ_RELEASE_ASSERT(res->BinaryDataSize() == aDataSize);
PodCopy(res->Data<BinaryMessage<Type>, char>(), aData, aDataSize);
return res;
}
};
typedef BinaryMessage<MessageType::MiddlemanCallRequest>
MiddlemanCallRequestMessage;
typedef BinaryMessage<MessageType::MiddlemanCallResponse>
MiddlemanCallResponseMessage;
typedef EmptyMessage<MessageType::ResetMiddlemanCalls>
ResetMiddlemanCallsMessage;
typedef BinaryMessage<MessageType::PaintEncoded> PaintEncodedMessage;
// The tag is the ID of the external call being performed.
typedef BinaryMessage<MessageType::ExternalCallRequest>
ExternalCallRequestMessage;
typedef BinaryMessage<MessageType::ExternalCallResponse>
ExternalCallResponseMessage;
// The tag is the start offset of the recording data needed.
typedef BinaryMessage<MessageType::RecordingData> RecordingDataMessage;
struct PingMessage : public Message {
uint32_t mId;
explicit PingMessage(uint32_t aId)
: Message(MessageType::Ping, sizeof(*this)), mId(aId) {}
explicit PingMessage(uint32_t aForkId, uint32_t aId)
: Message(MessageType::Ping, sizeof(*this), aForkId), mId(aId) {}
};
struct PingResponseMessage : public Message {
uint32_t mId;
uint64_t mProgress;
PingResponseMessage(uint32_t aId, uint64_t aProgress)
: Message(MessageType::PingResponse, sizeof(*this)),
PingResponseMessage(uint32_t aForkId, uint32_t aId, uint64_t aProgress)
: Message(MessageType::PingResponse, sizeof(*this), aForkId),
mId(aId),
mProgress(aProgress) {}
};
@ -325,15 +344,39 @@ class Channel {
// called on the channel's message thread.
typedef std::function<void(Message::UniquePtr)> MessageHandler;
// Different kinds of channels.
enum class Kind {
// Connect middleman to a recording process.
MiddlemanRecord,
// Connect middleman to a replaying process.
MiddlemanReplay,
// Connect recording or replaying process to the middleman.
RecordReplay,
// Connect parent managing a cloud connection to a middleman.
ParentCloud,
// Connect a root replaying process to one of its forks.
ReplayRoot,
// Connect a forked replaying process to its root replaying process.
ReplayForked,
};
private:
// ID for this channel, unique for the middleman.
size_t mId;
// Kind of this channel.
Kind mKind;
// Callback to invoke off thread on incoming messages.
MessageHandler mHandler;
// Whether the channel is initialized and ready for outgoing messages.
Atomic<bool, SequentiallyConsistent, Behavior::DontPreserve> mInitialized;
bool mInitialized;
// Descriptor used to accept connections on the parent side.
int mConnectionFd;
@ -345,22 +388,13 @@ class Channel {
Monitor mMonitor;
// Buffer for message data received from the other side of the channel.
typedef InfallibleVector<char, 0, AllocPolicy<MemoryKind::Generic>>
MessageBuffer;
MessageBuffer* mMessageBuffer;
typedef InfallibleVector<char> MessageBuffer;
MessageBuffer mMessageBuffer;
// The number of bytes of data already in the message buffer.
size_t mMessageBytes;
// Whether this channel is subject to message delays during simulation.
bool mSimulateDelays;
// The time this channel was opened, for use in simulating message delays.
TimeStamp mStartTime;
// When simulating message delays, the time at which old messages will have
// finished sending and new messages may be sent.
TimeStamp mAvailableTime;
InfallibleVector<char> mPendingData;
// If spew is enabled, print a message and associated info to stderr.
void PrintMessage(const char* aPrefix, const Message& aMsg);
@ -372,16 +406,54 @@ class Channel {
// Main routine for the channel's thread.
static void ThreadMain(void* aChannel);
void SendRaw(const char* aData, size_t aSize);
// Return whether this is the parent side of a connection. This side is opened
// first and the child will connect to it afterwards.
bool IsParent() {
switch (mKind) {
case Kind::MiddlemanRecord:
case Kind::MiddlemanReplay:
case Kind::ReplayForked:
return true;
case Kind::RecordReplay:
case Kind::ParentCloud:
case Kind::ReplayRoot:
return false;
}
MOZ_CRASH("Bad kind");
}
// Return whether to exit the process when the other side of the channel
// disconnects.
bool ExitProcessOnDisconnect() {
switch (mKind) {
case Kind::RecordReplay:
case Kind::ReplayForked:
return true;
case Kind::MiddlemanRecord:
case Kind::MiddlemanReplay:
case Kind::ParentCloud:
case Kind::ReplayRoot:
return false;
}
MOZ_CRASH("Bad kind");
}
public:
// Initialize this channel, connect to the other side, and spin up a thread
// to process incoming messages by calling aHandler.
Channel(size_t aId, bool aMiddlemanRecording, const MessageHandler& aHandler);
Channel(size_t aId, Kind aKind, const MessageHandler& aHandler,
base::ProcessId aParentPid = 0);
size_t GetId() { return mId; }
// Send a message to the other side of the channel. This must be called on
// the main thread, except for fatal error messages.
// Send a message to the other side of the channel.
void SendMessage(Message&& aMsg);
// Send data which contains message(s) to the other side of the channel.
void SendMessageData(const char* aData, size_t aSize);
// Exit the process if the channel is not initialized before a deadline.
void ExitIfNotInitializedBefore(const TimeStamp& aDeadline);
};
// Command line option used to specify the middleman pid for a child process.

Просмотреть файл

@ -21,7 +21,6 @@
#include "mozilla/VsyncDispatcher.h"
#include "InfallibleVector.h"
#include "MemorySnapshot.h"
#include "nsPrintfCString.h"
#include "ParentInternal.h"
#include "ProcessRecordReplay.h"
@ -50,6 +49,9 @@ Monitor* gMonitor;
// The singleton channel for communicating with the middleman.
Channel* gChannel;
// Fork ID of this process.
static size_t gForkId;
static base::ProcessId gMiddlemanPid;
static base::ProcessId gParentPid;
static StaticInfallibleVector<char*> gParentArgv;
@ -63,21 +65,32 @@ static FileHandle gCheckpointReadFd;
// receipt and then processed during InitRecordingOrReplayingProcess.
static UniquePtr<IntroductionMessage, Message::FreePolicy> gIntroductionMessage;
// Data we've received which hasn't been incorporated into the recording yet.
static StaticInfallibleVector<char> gPendingRecordingData;
// When recording, whether developer tools server code runs in the middleman.
static bool gDebuggerRunsInMiddleman;
// Any response received to the last MiddlemanCallRequest message.
static UniquePtr<MiddlemanCallResponseMessage, Message::FreePolicy>
// Any response received to the last ExternalCallRequest message.
static UniquePtr<ExternalCallResponseMessage, Message::FreePolicy>
gCallResponseMessage;
// Whether some thread has sent a MiddlemanCallRequest and is waiting for
// Whether some thread has sent an ExternalCallRequest and is waiting for
// gCallResponseMessage to be filled in.
static bool gWaitingForCallResponse;
static void HandleMessageToForkedProcess(Message::UniquePtr aMsg);
// Processing routine for incoming channel messages.
static void ChannelMessageHandler(Message::UniquePtr aMsg) {
MOZ_RELEASE_ASSERT(MainThreadShouldPause() || aMsg->CanBeSentWhileUnpaused());
if (aMsg->mForkId != gForkId) {
MOZ_RELEASE_ASSERT(!gForkId);
HandleMessageToForkedProcess(std::move(aMsg));
return;
}
switch (aMsg->mType) {
case MessageType::Introduction: {
MonitorAutoLock lock(*gMonitor);
@ -114,21 +127,16 @@ static void ChannelMessageHandler(Message::UniquePtr aMsg) {
const PingMessage& nmsg = (const PingMessage&)*aMsg;
uint64_t total =
*ExecutionProgressCounter() + Thread::TotalEventProgress();
gChannel->SendMessage(PingResponseMessage(nmsg.mId, total));
gChannel->SendMessage(PingResponseMessage(gForkId, nmsg.mId, total));
break;
}
case MessageType::Terminate: {
// Terminate messages behave differently in recording vs. replaying
// processes. When sent to a recording process (which the middleman
// manages directly) they signal that a clean shutdown is needed, while
// when sent to a replaying process (which the UI process manages) they
// signal that the process should crash, since it seems to be hanged.
if (IsRecording()) {
PrintSpew("Terminate message received, exiting...\n");
_exit(0);
} else {
ReportFatalError(Nothing(), "Hung replaying process");
}
PrintSpew("Terminate message received, exiting...\n");
_exit(0);
break;
}
case MessageType::Crash: {
ReportFatalError("Hung replaying process");
break;
}
case MessageType::ManifestStart: {
@ -141,12 +149,21 @@ static void ChannelMessageHandler(Message::UniquePtr aMsg) {
});
break;
}
case MessageType::MiddlemanCallResponse: {
case MessageType::ExternalCallResponse: {
MonitorAutoLock lock(*gMonitor);
MOZ_RELEASE_ASSERT(gWaitingForCallResponse);
MOZ_RELEASE_ASSERT(!gCallResponseMessage);
gCallResponseMessage.reset(
static_cast<MiddlemanCallResponseMessage*>(aMsg.release()));
static_cast<ExternalCallResponseMessage*>(aMsg.release()));
gMonitor->NotifyAll();
break;
}
case MessageType::RecordingData: {
MonitorAutoLock lock(*gMonitor);
const RecordingDataMessage& nmsg = (const RecordingDataMessage&)*aMsg;
MOZ_RELEASE_ASSERT(
nmsg.mTag == gRecording->Size() + gPendingRecordingData.length());
gPendingRecordingData.append(nmsg.BinaryData(), nmsg.BinaryDataSize());
gMonitor->NotifyAll();
break;
}
@ -179,50 +196,10 @@ static void ListenForCheckpointThreadMain(void*) {
// Shared memory block for graphics data.
void* gGraphicsShmem;
void InitRecordingOrReplayingProcess(int* aArgc, char*** aArgv) {
if (!IsRecordingOrReplaying()) {
return;
}
Maybe<int> middlemanPid;
Maybe<int> channelID;
for (int i = 0; i < *aArgc; i++) {
if (!strcmp((*aArgv)[i], gMiddlemanPidOption)) {
MOZ_RELEASE_ASSERT(middlemanPid.isNothing() && i + 1 < *aArgc);
middlemanPid.emplace(atoi((*aArgv)[i + 1]));
}
if (!strcmp((*aArgv)[i], gChannelIDOption)) {
MOZ_RELEASE_ASSERT(channelID.isNothing() && i + 1 < *aArgc);
channelID.emplace(atoi((*aArgv)[i + 1]));
}
}
MOZ_RELEASE_ASSERT(middlemanPid.isSome());
MOZ_RELEASE_ASSERT(channelID.isSome());
gMiddlemanPid = middlemanPid.ref();
Maybe<AutoPassThroughThreadEvents> pt;
pt.emplace();
gMonitor = new Monitor();
gChannel = new Channel(channelID.ref(), /* aMiddlemanRecording = */ false,
ChannelMessageHandler);
pt.reset();
// N.B. We can't spawn recorded threads when replaying if there was an
// initialization failure.
if (!gInitializationFailureMessage) {
DirectCreatePipe(&gCheckpointWriteFd, &gCheckpointReadFd);
Thread::StartThread(ListenForCheckpointThreadMain, nullptr, false);
}
pt.emplace();
static void WaitForGraphicsShmem() {
// Setup a mach port to receive the graphics shmem handle over.
ReceivePort receivePort(
nsPrintfCString("WebReplay.%d.%d", gMiddlemanPid, (int)channelID.ref())
.get());
nsPrintfCString portString("WebReplay.%d.%lu", gMiddlemanPid, GetId());
ReceivePort receivePort(portString.get());
MachSendMessage handshakeMessage(parent::GraphicsHandshakeMessageId);
handshakeMessage.AddDescriptor(
@ -248,40 +225,81 @@ void InitRecordingOrReplayingProcess(int* aArgc, char*** aArgv) {
MOZ_RELEASE_ASSERT(kr == KERN_SUCCESS);
gGraphicsShmem = (void*)address;
}
// The graphics shared memory contents are excluded from snapshots. We do not
// want checkpoint restores in this child to interfere with drawing being
// performed by another child.
AddInitialUntrackedMemoryRegion((uint8_t*)gGraphicsShmem,
parent::GraphicsMemorySize);
static void InitializeForkListener();
pt.reset();
void SetupRecordReplayChannel(int aArgc, char* aArgv[]) {
MOZ_RELEASE_ASSERT(IsRecordingOrReplaying() &&
AreThreadEventsPassedThrough());
Maybe<int> channelID;
for (int i = 0; i < aArgc; i++) {
if (!strcmp(aArgv[i], gMiddlemanPidOption)) {
MOZ_RELEASE_ASSERT(!gMiddlemanPid && i + 1 < aArgc);
gMiddlemanPid = atoi(aArgv[i + 1]);
}
if (!strcmp(aArgv[i], gChannelIDOption)) {
MOZ_RELEASE_ASSERT(channelID.isNothing() && i + 1 < aArgc);
channelID.emplace(atoi(aArgv[i + 1]));
}
}
MOZ_RELEASE_ASSERT(channelID.isSome());
gMonitor = new Monitor();
gChannel = new Channel(channelID.ref(), Channel::Kind::RecordReplay,
ChannelMessageHandler, gMiddlemanPid);
// If we failed to initialize then report it to the user.
if (gInitializationFailureMessage) {
ReportFatalError(Nothing(), "%s", gInitializationFailureMessage);
ReportFatalError("%s", gInitializationFailureMessage);
Unreachable();
}
// Wait for the parent to send us the introduction message.
{
MonitorAutoLock lock(*gMonitor);
while (!gIntroductionMessage) {
MonitorAutoLock lock(*gMonitor);
while (!gIntroductionMessage) {
gMonitor->Wait();
}
// If we're replaying, we also need to wait for some recording data.
if (IsReplaying()) {
while (gPendingRecordingData.empty()) {
gMonitor->Wait();
}
}
}
void InitRecordingOrReplayingProcess(int* aArgc, char*** aArgv) {
if (!IsRecordingOrReplaying()) {
return;
}
MOZ_RELEASE_ASSERT(!AreThreadEventsPassedThrough());
{
AutoPassThroughThreadEvents pt;
if (IsRecording()) {
WaitForGraphicsShmem();
} else {
InitializeForkListener();
}
}
DirectCreatePipe(&gCheckpointWriteFd, &gCheckpointReadFd);
Thread::StartThread(ListenForCheckpointThreadMain, nullptr, false);
// Process the introduction message to fill in arguments.
MOZ_RELEASE_ASSERT(gParentArgv.empty());
gParentPid = gIntroductionMessage->mParentPid;
// Record/replay the introduction message itself so we get consistent args
// between recording and replaying.
{
IntroductionMessage* msg =
IntroductionMessage::RecordReplay(*gIntroductionMessage);
gParentPid = gIntroductionMessage->mParentPid;
const char* pos = msg->ArgvString();
for (size_t i = 0; i < msg->mArgc; i++) {
gParentArgv.append(strdup(pos));
@ -311,45 +329,197 @@ bool DebuggerRunsInMiddleman() {
return RecordReplayValue(gDebuggerRunsInMiddleman);
}
void ReportFatalError(const Maybe<MinidumpInfo>& aMinidump, const char* aFormat,
...) {
// Notify the middleman that we are crashing and are going to try to write a
// minidump.
gChannel->SendMessage(BeginFatalErrorMessage());
static void HandleMessageFromForkedProcess(Message::UniquePtr aMsg);
// Unprotect any memory which might be written while producing the minidump.
UnrecoverableSnapshotFailure();
// Messages to send to forks that don't exist yet.
static StaticInfallibleVector<Message::UniquePtr> gPendingForkMessages;
struct ForkedProcess {
base::ProcessId mPid;
size_t mForkId;
Channel* mChannel;
};
static StaticInfallibleVector<ForkedProcess> gForkedProcesses;
static FileHandle gForkWriteFd, gForkReadFd;
static char* gFatalErrorMemory;
static const size_t FatalErrorMemorySize = PageSize;
static void ForkListenerThread(void*) {
while (true) {
ForkedProcess process;
int nbytes = read(gForkReadFd, &process, sizeof(process));
MOZ_RELEASE_ASSERT(nbytes == sizeof(process));
process.mChannel = new Channel(0, Channel::Kind::ReplayRoot,
HandleMessageFromForkedProcess,
process.mPid);
// Send any messages destined for this fork.
size_t i = 0;
while (i < gPendingForkMessages.length()) {
auto& pending = gPendingForkMessages[i];
if (pending->mForkId == process.mForkId) {
process.mChannel->SendMessage(std::move(*pending));
gPendingForkMessages.erase(&pending);
} else {
i++;
}
}
gForkedProcesses.emplaceBack(process);
}
}
static void InitializeForkListener() {
DirectCreatePipe(&gForkWriteFd, &gForkReadFd);
Thread::SpawnNonRecordedThread(ForkListenerThread, nullptr);
if (!ReplayingInCloud()) {
gFatalErrorMemory = (char*) mmap(nullptr, FatalErrorMemorySize,
PROT_READ | PROT_WRITE, MAP_ANON | MAP_SHARED, -1, 0);
MOZ_RELEASE_ASSERT(gFatalErrorMemory != MAP_FAILED);
}
}
static void SendMessageToForkedProcess(Message::UniquePtr aMsg) {
for (const ForkedProcess& process : gForkedProcesses) {
if (process.mForkId == aMsg->mForkId) {
process.mChannel->SendMessage(std::move(*aMsg));
return;
}
}
gPendingForkMessages.append(std::move(aMsg));
}
static bool MaybeHandleExternalCallResponse(const Message& aMsg) {
// Remember the results of any external calls that have been made, in case
// they show up again later.
if (aMsg.mType == MessageType::ExternalCallResponse) {
const auto& nmsg = static_cast<const ExternalCallResponseMessage&>(aMsg);
AddExternalCallOutput(nmsg.mTag, nmsg.BinaryData(), nmsg.BinaryDataSize());
return true;
}
return false;
}
static void HandleMessageToForkedProcess(Message::UniquePtr aMsg) {
MaybeHandleExternalCallResponse(*aMsg);
SendMessageToForkedProcess(std::move(aMsg));
}
static void HandleMessageFromForkedProcess(Message::UniquePtr aMsg) {
// Try to handle external calls with data in this process, instead of
// forwarding them (potentially across a network connection) to the middleman.
if (aMsg->mType == MessageType::ExternalCallRequest) {
const auto& nmsg = static_cast<const ExternalCallRequestMessage&>(*aMsg);
InfallibleVector<char> outputData;
if (HasExternalCallOutput(nmsg.mTag, &outputData)) {
Message::UniquePtr response(ExternalCallResponseMessage::New(
nmsg.mForkId, nmsg.mTag, outputData.begin(), outputData.length()));
SendMessageToForkedProcess(std::move(response));
return;
}
}
if (MaybeHandleExternalCallResponse(*aMsg)) {
// CallResponse messages from forked processes are intended for this one.
// Don't notify the middleman.
return;
}
gChannel->SendMessage(std::move(*aMsg));
}
static const size_t ForkTimeoutSeconds = 10;
void RegisterFork(size_t aForkId) {
AutoPassThroughThreadEvents pt;
gForkId = aForkId;
gChannel = new Channel(0, Channel::Kind::ReplayForked, ChannelMessageHandler);
ForkedProcess process;
process.mPid = getpid();
process.mForkId = aForkId;
int nbytes = write(gForkWriteFd, &process, sizeof(process));
MOZ_RELEASE_ASSERT(nbytes == sizeof(process));
// If the root process is exiting while we are setting up the channel, it will
// not connect to this process and we won't be able to shut down properly.
// Set a timeout to avoid this situation.
TimeStamp deadline =
TimeStamp::Now() + TimeDuration::FromSeconds(ForkTimeoutSeconds);
gChannel->ExitIfNotInitializedBefore(deadline);
}
void ReportCrash(const MinidumpInfo& aInfo, void* aFaultingAddress) {
int pid;
pid_for_task(aInfo.mTask, &pid);
size_t forkId = 0;
if (aInfo.mTask != mach_task_self()) {
for (const ForkedProcess& fork : gForkedProcesses) {
if (fork.mPid == pid) {
forkId = fork.mForkId;
}
}
if (!forkId) {
Print("Could not find fork ID for crashing task\n");
}
}
AutoEnsurePassThroughThreadEvents pt;
#ifdef MOZ_CRASHREPORTER
MinidumpInfo info = aMinidump.isSome()
? aMinidump.ref()
: MinidumpInfo(EXC_CRASH, 1, 0, mach_thread_self());
google_breakpad::ExceptionHandler::WriteForwardedExceptionMinidump(
info.mExceptionType, info.mCode, info.mSubcode, info.mThread);
aInfo.mExceptionType, aInfo.mCode, aInfo.mSubcode, aInfo.mThread,
aInfo.mTask);
#endif
va_list ap;
va_start(ap, aFormat);
char buf[2048];
VsprintfLiteral(buf, aFormat, ap);
va_end(ap);
if (gFatalErrorMemory && gFatalErrorMemory[0]) {
SprintfLiteral(buf, "%s", gFatalErrorMemory);
memset(gFatalErrorMemory, 0, FatalErrorMemorySize);
} else {
SprintfLiteral(buf, "Fault %p", aFaultingAddress);
}
// Construct a FatalErrorMessage on the stack, to avoid touching the heap.
char msgBuf[4096];
size_t header = sizeof(FatalErrorMessage);
size_t len = std::min(strlen(buf) + 1, sizeof(msgBuf) - header);
FatalErrorMessage* msg = new (msgBuf) FatalErrorMessage(header + len);
FatalErrorMessage* msg = new (msgBuf) FatalErrorMessage(header + len, forkId);
memcpy(&msgBuf[header], buf, len);
msgBuf[sizeof(msgBuf) - 1] = 0;
// Don't take the message lock when sending this, to avoid touching the heap.
gChannel->SendMessage(std::move(*msg));
DirectPrint("***** Fatal Record/Replay Error *****\n");
DirectPrint(buf);
DirectPrint("\n");
Print("***** Fatal Record/Replay Error #%lu:%lu *****\n%s\n", GetId(), forkId,
buf);
}
void ReportFatalError(const char* aFormat, ...) {
if (!gFatalErrorMemory) {
gFatalErrorMemory = new char[4096];
}
va_list ap;
va_start(ap, aFormat);
vsnprintf(gFatalErrorMemory, FatalErrorMemorySize - 1, aFormat, ap);
va_end(ap);
Print("FatalError: %s\n", gFatalErrorMemory);
MOZ_CRASH("ReportFatalError");
}
void ReportUnhandledDivergence() {
gChannel->SendMessage(UnhandledDivergenceMessage(gForkId));
// Block until we get a terminate message and die.
Thread::WaitForeverNoIdle();
@ -357,6 +527,29 @@ void ReportFatalError(const Maybe<MinidumpInfo>& aMinidump, const char* aFormat,
size_t GetId() { return gChannel->GetId(); }
void AddPendingRecordingData() {
Thread::WaitForIdleThreads();
InfallibleVector<Stream*> updatedStreams;
{
MonitorAutoLock lock(*gMonitor);
MOZ_RELEASE_ASSERT(!gPendingRecordingData.empty());
gRecording->NewContents((const uint8_t*)gPendingRecordingData.begin(),
gPendingRecordingData.length(), &updatedStreams);
gPendingRecordingData.clear();
}
for (Stream* stream : updatedStreams) {
if (stream->Name() == StreamName::Lock) {
Lock::LockAcquiresUpdated(stream->NameIndex());
}
}
Thread::ResumeIdleThreads();
}
///////////////////////////////////////////////////////////////////////////////
// Vsyncs
///////////////////////////////////////////////////////////////////////////////
@ -464,6 +657,32 @@ already_AddRefed<gfx::DrawTarget> DrawTargetForRemoteDrawing(
return drawTarget.forget();
}
bool EncodeGraphics(nsACString& aData) {
// Get an image encoder for the media type.
nsCString encoderCID("@mozilla.org/image/encoder;2?type=image/png");
nsCOMPtr<imgIEncoder> encoder = do_CreateInstance(encoderCID.get());
size_t stride = layers::ImageDataSerializer::ComputeRGBStride(gSurfaceFormat,
gPaintWidth);
nsString options;
nsresult rv = encoder->InitFromData(
(const uint8_t*)gDrawTargetBuffer, stride * gPaintHeight, gPaintWidth,
gPaintHeight, stride, imgIEncoder::INPUT_FORMAT_HOSTARGB, options);
if (NS_FAILED(rv)) {
return false;
}
uint64_t count;
rv = encoder->Available(&count);
if (NS_FAILED(rv)) {
return false;
}
rv = Base64EncodeInputStream(encoder, aData, count);
return NS_SUCCEEDED(rv);
}
void NotifyPaintStart() {
MOZ_RELEASE_ASSERT(NS_IsMainThread());
@ -499,8 +718,22 @@ static void PaintFromMainThread() {
MOZ_RELEASE_ASSERT(!gNumPendingPaints);
if (IsMainChild() && gDrawTargetBuffer) {
memcpy(gGraphicsShmem, gDrawTargetBuffer, gDrawTargetBufferSize);
gChannel->SendMessage(PaintMessage(gPaintWidth, gPaintHeight));
if (IsRecording()) {
memcpy(gGraphicsShmem, gDrawTargetBuffer, gDrawTargetBufferSize);
gChannel->SendMessage(PaintMessage(gPaintWidth, gPaintHeight));
} else {
AutoPassThroughThreadEvents pt;
nsAutoCString data;
if (!EncodeGraphics(data)) {
MOZ_CRASH("EncodeGraphics failed");
}
Message* msg = PaintEncodedMessage::New(gForkId, 0, data.BeginReading(),
data.Length());
gChannel->SendMessage(std::move(*msg));
free(msg);
}
}
}
@ -527,7 +760,7 @@ static bool gDidRepaint;
// Whether we are currently repainting.
static bool gRepainting;
bool Repaint(nsAString& aData) {
bool Repaint(nsACString& aData) {
MOZ_RELEASE_ASSERT(NS_IsMainThread());
MOZ_RELEASE_ASSERT(HasDivergedFromRecording());
@ -562,29 +795,7 @@ bool Repaint(nsAString& aData) {
return false;
}
// Get an image encoder for the media type.
nsCString encoderCID("@mozilla.org/image/encoder;2?type=image/png");
nsCOMPtr<imgIEncoder> encoder = do_CreateInstance(encoderCID.get());
size_t stride = layers::ImageDataSerializer::ComputeRGBStride(gSurfaceFormat,
gPaintWidth);
nsString options;
nsresult rv = encoder->InitFromData(
(const uint8_t*)gDrawTargetBuffer, stride * gPaintHeight, gPaintWidth,
gPaintHeight, stride, imgIEncoder::INPUT_FORMAT_HOSTARGB, options);
if (NS_FAILED(rv)) {
return false;
}
uint64_t count;
rv = encoder->Available(&count);
if (NS_FAILED(rv)) {
return false;
}
rv = Base64EncodeInputStream(encoder, aData, count);
return NS_SUCCEEDED(rv);
return EncodeGraphics(aData);
}
bool CurrentRepaintCannotFail() {
@ -598,15 +809,16 @@ bool CurrentRepaintCannotFail() {
void ManifestFinished(const js::CharBuffer& aBuffer) {
MOZ_RELEASE_ASSERT(NS_IsMainThread());
ManifestFinishedMessage* msg =
ManifestFinishedMessage::New(aBuffer.begin(), aBuffer.length());
ManifestFinishedMessage::New(gForkId, aBuffer.begin(), aBuffer.length());
PauseMainThreadAndInvokeCallback([=]() {
gChannel->SendMessage(std::move(*msg));
free(msg);
});
}
void SendMiddlemanCallRequest(const char* aInputData, size_t aInputSize,
InfallibleVector<char>* aOutputData) {
void SendExternalCallRequest(ExternalCallId aId,
const char* aInputData, size_t aInputSize,
InfallibleVector<char>* aOutputData) {
AutoPassThroughThreadEvents pt;
MonitorAutoLock lock(*gMonitor);
@ -615,10 +827,9 @@ void SendMiddlemanCallRequest(const char* aInputData, size_t aInputSize,
}
gWaitingForCallResponse = true;
MiddlemanCallRequestMessage* msg =
MiddlemanCallRequestMessage::New(aInputData, aInputSize);
UniquePtr<ExternalCallRequestMessage> msg(ExternalCallRequestMessage::New(
gForkId, aId, aInputData, aInputSize));
gChannel->SendMessage(std::move(*msg));
free(msg);
while (!gCallResponseMessage) {
gMonitor->Wait();
@ -633,9 +844,19 @@ void SendMiddlemanCallRequest(const char* aInputData, size_t aInputSize,
gMonitor->Notify();
}
void SendResetMiddlemanCalls() {
MOZ_RELEASE_ASSERT(NS_IsMainThread());
gChannel->SendMessage(ResetMiddlemanCallsMessage());
void SendExternalCallOutput(ExternalCallId aId,
const char* aOutputData, size_t aOutputSize) {
Message::UniquePtr msg(ExternalCallResponseMessage::New(
gForkId, aId, aOutputData, aOutputSize));
gChannel->SendMessage(std::move(*msg));
}
void SendRecordingData(size_t aStart, const uint8_t* aData, size_t aSize) {
MOZ_RELEASE_ASSERT(Thread::CurrentIsMainThread());
RecordingDataMessage* msg =
RecordingDataMessage::New(gForkId, aStart, (const char*)aData, aSize);
gChannel->SendMessage(std::move(*msg));
free(msg);
}
} // namespace child

Просмотреть файл

@ -10,7 +10,7 @@
#include "Channel.h"
#include "ChildIPC.h"
#include "JSControl.h"
#include "MiddlemanCall.h"
#include "ExternalCall.h"
#include "Monitor.h"
// This file has internal definitions for communication between the main
@ -20,24 +20,33 @@ namespace mozilla {
namespace recordreplay {
namespace child {
// Optional information about a crash that occurred. If not provided to
// ReportFatalError, the current thread will be treated as crashed.
void SetupRecordReplayChannel(int aArgc, char* aArgv[]);
// Information about a crash that occurred.
struct MinidumpInfo {
int mExceptionType;
int mCode;
int mSubcode;
mach_port_t mThread;
mach_port_t mTask;
MinidumpInfo(int aExceptionType, int aCode, int aSubcode, mach_port_t aThread)
MinidumpInfo(int aExceptionType, int aCode, int aSubcode, mach_port_t aThread,
mach_port_t aTask)
: mExceptionType(aExceptionType),
mCode(aCode),
mSubcode(aSubcode),
mThread(aThread) {}
mThread(aThread),
mTask(aTask) {}
};
void ReportCrash(const MinidumpInfo& aMinidumpInfo, void* aFaultingAddress);
// Generate a minidump and report a fatal error to the middleman process.
void ReportFatalError(const Maybe<MinidumpInfo>& aMinidumpInfo,
const char* aFormat, ...);
void ReportFatalError(const char* aFormat, ...);
// Report to the middleman that we had an unhandled recording divergence, and
// that execution in this process cannot continue.
void ReportUnhandledDivergence();
// Get the unique ID of this child.
size_t GetId();
@ -51,10 +60,15 @@ bool DebuggerRunsInMiddleman();
// Notify the middleman that the last manifest was finished.
void ManifestFinished(const js::CharBuffer& aResponse);
// Send messages operating on middleman calls.
void SendMiddlemanCallRequest(const char* aInputData, size_t aInputSize,
InfallibleVector<char>* aOutputData);
void SendResetMiddlemanCalls();
// Send messages operating on external calls.
void SendExternalCallRequest(ExternalCallId aId,
const char* aInputData, size_t aInputSize,
InfallibleVector<char>* aOutputData);
// Send the output from an external call to the root replaying process,
// to fill in its external call cache.
void SendExternalCallOutput(ExternalCallId aId,
const char* aOutputData, size_t aOutputSize);
// Return whether a repaint is in progress and is not allowed to trigger an
// unhandled recording divergence per preferences.
@ -62,7 +76,15 @@ bool CurrentRepaintCannotFail();
// Paint according to the current process state, then convert it to an image
// and serialize it in aData.
bool Repaint(nsAString& aData);
bool Repaint(nsACString& aData);
// Mark this process as having forked from its parent.
void RegisterFork(size_t aForkId);
// Send new recording data from a recording process to the middleman.
void SendRecordingData(size_t aStart, const uint8_t* aData, size_t aSize);
void AddPendingRecordingData();
} // namespace child
} // namespace recordreplay

Просмотреть файл

@ -17,76 +17,85 @@ namespace parent {
// A saved introduction message for sending to all children.
static IntroductionMessage* gIntroductionMessage;
// How many channels have been constructed so far.
static size_t gNumChannels;
// Whether children might be debugged and should not be treated as hung.
static bool gChildrenAreDebugging;
/* static */
void ChildProcessInfo::SetIntroductionMessage(IntroductionMessage* aMessage) {
gIntroductionMessage = aMessage;
}
ChildProcessInfo::ChildProcessInfo(
const Maybe<RecordingProcessData>& aRecordingProcessData)
size_t aId, const Maybe<RecordingProcessData>& aRecordingProcessData)
: mRecording(aRecordingProcessData.isSome()) {
MOZ_RELEASE_ASSERT(NS_IsMainThread());
static bool gFirst = false;
if (!gFirst) {
gFirst = true;
gChildrenAreDebugging = !!getenv("MOZ_REPLAYING_WAIT_AT_START");
}
Channel::Kind kind =
IsRecording() ? Channel::Kind::MiddlemanRecord : Channel::Kind::MiddlemanReplay;
mChannel = new Channel(aId, kind, [=](Message::UniquePtr aMsg) {
ReceiveChildMessageOnMainThread(aId, std::move(aMsg));
});
LaunchSubprocess(aRecordingProcessData);
LaunchSubprocess(aId, aRecordingProcessData);
}
ChildProcessInfo::~ChildProcessInfo() {
MOZ_RELEASE_ASSERT(NS_IsMainThread());
if (IsRecording() && !HasCrashed()) {
SendMessage(TerminateMessage());
}
SendMessage(TerminateMessage(0));
}
void ChildProcessInfo::OnIncomingMessage(const Message& aMsg) {
MOZ_RELEASE_ASSERT(NS_IsMainThread());
switch (aMsg.mType) {
case MessageType::BeginFatalError:
mHasBegunFatalError = true;
return;
case MessageType::FatalError: {
mHasFatalError = true;
const FatalErrorMessage& nmsg =
static_cast<const FatalErrorMessage&>(aMsg);
OnCrash(nmsg.Error());
OnCrash(nmsg.mForkId, nmsg.Error());
return;
}
case MessageType::Paint:
UpdateGraphicsAfterPaint(static_cast<const PaintMessage&>(aMsg));
break;
case MessageType::ManifestFinished:
mPaused = true;
js::ForwardManifestFinished(this, aMsg);
case MessageType::PaintEncoded: {
const PaintEncodedMessage& nmsg =
static_cast<const PaintEncodedMessage&>(aMsg);
nsDependentCSubstring data(nmsg.BinaryData(), nmsg.BinaryDataSize());
nsAutoCString dataBinary;
if (NS_FAILED(Base64Decode(data, dataBinary))) {
MOZ_CRASH("Base64Decode failed");
}
UpdateGraphicsAfterRepaint(dataBinary);
break;
case MessageType::MiddlemanCallRequest: {
const MiddlemanCallRequestMessage& nmsg =
static_cast<const MiddlemanCallRequestMessage&>(aMsg);
}
case MessageType::ManifestFinished: {
const auto& nmsg = static_cast<const ManifestFinishedMessage&>(aMsg);
js::ForwardManifestFinished(this, nmsg);
break;
}
case MessageType::UnhandledDivergence: {
const auto& nmsg = static_cast<const UnhandledDivergenceMessage&>(aMsg);
js::ForwardUnhandledDivergence(this, nmsg);
break;
}
case MessageType::PingResponse: {
const auto& nmsg = static_cast<const PingResponseMessage&>(aMsg);
js::ForwardPingResponse(this, nmsg);
break;
}
case MessageType::ExternalCallRequest: {
const auto& nmsg = static_cast<const ExternalCallRequestMessage&>(aMsg);
InfallibleVector<char> outputData;
ProcessMiddlemanCall(GetId(), nmsg.BinaryData(), nmsg.BinaryDataSize(),
&outputData);
Message::UniquePtr response(MiddlemanCallResponseMessage::New(
outputData.begin(), outputData.length()));
ProcessExternalCall(nmsg.BinaryData(), nmsg.BinaryDataSize(),
&outputData);
Message::UniquePtr response(ExternalCallResponseMessage::New(
nmsg.mForkId, nmsg.mTag, outputData.begin(), outputData.length()));
SendMessage(std::move(*response));
break;
}
case MessageType::ResetMiddlemanCalls:
ResetMiddlemanCalls(GetId());
break;
case MessageType::PingResponse:
OnPingResponse(static_cast<const PingResponseMessage&>(aMsg));
case MessageType::RecordingData: {
const auto& msg = static_cast<const RecordingDataMessage&>(aMsg);
MOZ_RELEASE_ASSERT(msg.mTag == gRecordingContents.length());
gRecordingContents.append(msg.BinaryData(), msg.BinaryDataSize());
break;
}
default:
break;
}
@ -94,14 +103,6 @@ void ChildProcessInfo::OnIncomingMessage(const Message& aMsg) {
void ChildProcessInfo::SendMessage(Message&& aMsg) {
MOZ_RELEASE_ASSERT(NS_IsMainThread());
MOZ_RELEASE_ASSERT(!HasCrashed());
// Update paused state.
MOZ_RELEASE_ASSERT(IsPaused() || aMsg.CanBeSentWhileUnpaused());
if (aMsg.mType == MessageType::ManifestStart) {
mPaused = false;
}
mChannel->SendMessage(std::move(aMsg));
}
@ -134,22 +135,16 @@ void GetArgumentsForChildProcess(base::ProcessId aMiddlemanPid,
}
void ChildProcessInfo::LaunchSubprocess(
size_t aChannelId,
const Maybe<RecordingProcessData>& aRecordingProcessData) {
size_t channelId = gNumChannels++;
// Create a new channel every time we launch a new subprocess, without
// deleting or tearing down the old one's state. This is pretty silly and it
// would be nice if we could do something better here, especially because
// with restarts we could create any number of channels over time.
mChannel =
new Channel(channelId, IsRecording(), [=](Message::UniquePtr aMsg) {
ReceiveChildMessageOnMainThread(std::move(aMsg));
});
MOZ_RELEASE_ASSERT(IsRecording() == aRecordingProcessData.isSome());
MOZ_RELEASE_ASSERT(gIntroductionMessage);
SendMessage(std::move(*gIntroductionMessage));
if (IsRecording()) {
std::vector<std::string> extraArgs;
GetArgumentsForChildProcess(base::GetCurrentProcId(), channelId,
GetArgumentsForChildProcess(base::GetCurrentProcId(), aChannelId,
gRecordingFilename, /* aRecording = */ true,
extraArgs);
@ -169,17 +164,17 @@ void ChildProcessInfo::LaunchSubprocess(
if (!gRecordingProcess->LaunchAndWaitForProcessHandle(extraArgs)) {
MOZ_CRASH("ChildProcessInfo::LaunchSubprocess");
}
SendGraphicsMemoryToChild();
} else {
dom::ContentChild::GetSingleton()->SendCreateReplayingProcess(channelId);
UniquePtr<Message> msg(RecordingDataMessage::New(
0, 0, gRecordingContents.begin(), gRecordingContents.length()));
SendMessage(std::move(*msg));
dom::ContentChild::GetSingleton()->SendCreateReplayingProcess(aChannelId);
}
SendGraphicsMemoryToChild();
MOZ_RELEASE_ASSERT(gIntroductionMessage);
SendMessage(std::move(*gIntroductionMessage));
}
void ChildProcessInfo::OnCrash(const char* aWhy) {
void ChildProcessInfo::OnCrash(size_t aForkId, const char* aWhy) {
MOZ_RELEASE_ASSERT(NS_IsMainThread());
// If a child process crashes or hangs then annotate the crash report.
@ -191,28 +186,16 @@ void ChildProcessInfo::OnCrash(const char* aWhy) {
// be generated.
dom::ContentChild::GetSingleton()->SendGenerateReplayCrashReport(GetId());
BusyWait();
// Continue execution if we were able to recover from the crash.
if (js::RecoverFromCrash(this)) {
// Mark this child as crashed so it can't be used again, even if it didn't
// generate a minidump.
mHasFatalError = true;
if (js::RecoverFromCrash(GetId(), aForkId)) {
return;
}
}
// If we received a FatalError message then the child generated a minidump.
// Shut down cleanly so that we don't mask the report with our own crash.
if (mHasFatalError) {
Shutdown();
}
// Indicate when we crash if the child tried to send us a fatal error message
// but had a problem either unprotecting system memory or generating the
// minidump.
MOZ_RELEASE_ASSERT(!mHasBegunFatalError);
// The child crashed without producing a minidump, produce one ourselves.
MOZ_CRASH("Unexpected child crash");
Shutdown();
}
///////////////////////////////////////////////////////////////////////////////
@ -226,13 +209,13 @@ void ChildProcessInfo::OnCrash(const char* aWhy) {
// All messages received on a channel thread which the main thread has not
// processed yet. This is protected by gMonitor.
struct PendingMessage {
ChildProcessInfo* mProcess;
size_t mChildId = 0;
Message::UniquePtr mMsg;
PendingMessage() : mProcess(nullptr) {}
PendingMessage() {}
PendingMessage& operator=(PendingMessage&& aOther) {
mProcess = aOther.mProcess;
mChildId = aOther.mChildId;
mMsg = std::move(aOther.mMsg);
return *this;
}
@ -244,141 +227,39 @@ static StaticInfallibleVector<PendingMessage> gPendingMessages;
static Message::UniquePtr ExtractChildMessage(ChildProcessInfo** aProcess) {
MOZ_RELEASE_ASSERT(NS_IsMainThread());
for (size_t i = 0; i < gPendingMessages.length(); i++) {
PendingMessage& pending = gPendingMessages[i];
if (!*aProcess || pending.mProcess == *aProcess) {
*aProcess = pending.mProcess;
Message::UniquePtr msg = std::move(pending.mMsg);
gPendingMessages.erase(&pending);
return msg;
}
if (!gPendingMessages.length()) {
return nullptr;
}
return nullptr;
PendingMessage& pending = gPendingMessages[0];
*aProcess = GetChildProcess(pending.mChildId);
MOZ_RELEASE_ASSERT(*aProcess);
Message::UniquePtr msg = std::move(pending.mMsg);
gPendingMessages.erase(&pending);
return msg;
}
// Hang Detection
//
// Replaying processes will be terminated if no execution progress has been made
// for some number of seconds. This generates a crash report for diagnosis and
// allows another replaying process to be spawned in its place. We detect that
// no progress is being made by periodically sending ping messages to the
// replaying process, and comparing the progress values returned by them.
// The ping messages are sent at least PingIntervalSeconds apart, and the
// process is considered hanged if at any point the last MaxStalledPings ping
// messages either did not respond or responded with the same progress value.
//
// Dividing our accounting between different ping messages avoids treating
// processes as hanged when the computer wakes up after sleeping: no pings will
// be sent while the computer is sleeping and processes are suspended, so the
// computer will need to be awake for some time before any processes are marked
// as hanged (before which they will hopefully be able to make progress).
static const size_t PingIntervalSeconds = 2;
static const size_t MaxStalledPings = 10;
bool ChildProcessInfo::IsHanged() {
if (mPings.length() < MaxStalledPings) {
return false;
}
size_t firstIndex = mPings.length() - MaxStalledPings;
uint64_t firstValue = mPings[firstIndex].mProgress;
if (!firstValue) {
// The child hasn't responded to any of the pings.
return true;
}
for (size_t i = firstIndex; i < mPings.length(); i++) {
if (mPings[i].mProgress && mPings[i].mProgress != firstValue) {
return false;
}
}
return true;
}
void ChildProcessInfo::ResetPings(bool aMightRewind) {
mMightRewind = aMightRewind;
mPings.clear();
mLastPingTime = TimeStamp::Now();
}
static uint32_t gNumPings;
void ChildProcessInfo::MaybePing() {
if (IsRecording() || IsPaused() || gChildrenAreDebugging) {
return;
}
TimeStamp now = TimeStamp::Now();
// After sending a terminate message we don't ping the process anymore, just
// make sure it eventually crashes instead of continuing to hang. Use this as
// a fallback if we can't ping the process because it might be rewinding.
if (mSentTerminateMessage || mMightRewind) {
size_t TerminateSeconds = PingIntervalSeconds * MaxStalledPings;
if (now >= mLastPingTime + TimeDuration::FromSeconds(TerminateSeconds)) {
OnCrash(mMightRewind ? "Rewinding child process non-responsive"
: "Child process non-responsive");
}
return;
}
if (now < mLastPingTime + TimeDuration::FromSeconds(PingIntervalSeconds)) {
return;
}
if (IsHanged()) {
// Try to get the child to crash, so that we can get a minidump.
CrashReporter::AnnotateCrashReport(
CrashReporter::Annotation::RecordReplayHang, true);
SendMessage(TerminateMessage());
mSentTerminateMessage = true;
} else {
uint32_t id = ++gNumPings;
mPings.emplaceBack(id);
SendMessage(PingMessage(id));
}
mLastPingTime = now;
}
void ChildProcessInfo::OnPingResponse(const PingResponseMessage& aMsg) {
for (size_t i = 0; i < mPings.length(); i++) {
if (mPings[i].mId == aMsg.mId) {
mPings[i].mProgress = aMsg.mProgress;
break;
}
}
}
void ChildProcessInfo::WaitUntilPaused() {
/* static */
void ChildProcessInfo::MaybeProcessNextMessage() {
MOZ_RELEASE_ASSERT(NS_IsMainThread());
while (!IsPaused()) {
MaybePing();
Maybe<MonitorAutoLock> lock;
lock.emplace(*gMonitor);
Maybe<MonitorAutoLock> lock;
lock.emplace(*gMonitor);
MaybeHandlePendingSyncMessage();
MaybeHandlePendingSyncMessage();
ChildProcessInfo* process;
Message::UniquePtr msg = ExtractChildMessage(&process);
// Search for the first message received from this process.
ChildProcessInfo* process = this;
Message::UniquePtr msg = ExtractChildMessage(&process);
if (msg) {
lock.reset();
OnIncomingMessage(*msg);
} else if (HasCrashed()) {
// If the child crashed but we recovered, we don't have to keep waiting.
break;
} else {
TimeStamp deadline =
TimeStamp::Now() + TimeDuration::FromSeconds(PingIntervalSeconds);
gMonitor->WaitUntil(deadline);
}
if (msg) {
lock.reset();
process->OnIncomingMessage(*msg);
} else {
// We wait for at most one second before returning to the caller.
TimeStamp deadline = TimeStamp::Now() + TimeDuration::FromSeconds(1);
gMonitor->WaitUntil(deadline);
}
}
@ -410,14 +291,14 @@ void ChildProcessInfo::MaybeProcessPendingMessageRunnable() {
// Execute a task that processes a message received from the child. This is
// called on a channel thread, and the function executes asynchronously on
// the main thread.
void ChildProcessInfo::ReceiveChildMessageOnMainThread(
Message::UniquePtr aMsg) {
/* static */ void ChildProcessInfo::ReceiveChildMessageOnMainThread(
size_t aChildId, Message::UniquePtr aMsg) {
MOZ_RELEASE_ASSERT(!NS_IsMainThread());
MonitorAutoLock lock(*gMonitor);
PendingMessage pending;
pending.mProcess = this;
pending.mChildId = aChildId;
pending.mMsg = std::move(aMsg);
gPendingMessages.append(std::move(pending));

Просмотреть файл

@ -86,6 +86,13 @@ bool IsMiddlemanWithRecordingChild() { return false; }
bool DebuggerRunsInMiddleman() { MOZ_CRASH(); }
bool UseCloudForReplayingProcesses() { return false; }
void CreateReplayingCloudProcess(base::ProcessId aProcessId,
uint32_t aChannelId) {
MOZ_CRASH();
}
} // namespace parent
} // namespace recordreplay

Просмотреть файл

@ -15,7 +15,6 @@
#include "js/JSON.h"
#include "js/PropertySpec.h"
#include "ChildInternal.h"
#include "MemorySnapshot.h"
#include "ParentInternal.h"
#include "nsImportModule.h"
#include "rrIControl.h"
@ -45,29 +44,39 @@ static JSObject* RequireObject(JSContext* aCx, HandleValue aValue) {
return &aValue.toObject();
}
static parent::ChildProcessInfo* GetChildById(JSContext* aCx,
const Value& aValue,
bool aAllowUnpaused = false) {
static bool RequireNumber(JSContext* aCx, HandleValue aValue, size_t* aNumber) {
if (!aValue.isNumber()) {
JS_ReportErrorASCII(aCx, "Expected child ID");
JS_ReportErrorASCII(aCx, "Expected number");
return false;
}
*aNumber = aValue.toNumber();
return true;
}
static parent::ChildProcessInfo* ToChildProcess(JSContext* aCx,
HandleValue aRootValue,
HandleValue aForkValue,
size_t* aForkId) {
size_t rootId;
if (!RequireNumber(aCx, aRootValue, &rootId) ||
!RequireNumber(aCx, aForkValue, aForkId)) {
return nullptr;
}
parent::ChildProcessInfo* child = parent::GetChildProcess(aValue.toNumber());
parent::ChildProcessInfo* child = parent::GetChildProcess(rootId);
if (!child) {
JS_ReportErrorASCII(aCx, "Bad child ID");
return nullptr;
}
if (child->HasCrashed()) {
JS_ReportErrorASCII(aCx, "Child has crashed");
return nullptr;
}
if (!aAllowUnpaused && !child->IsPaused()) {
JS_ReportErrorASCII(aCx, "Child is unpaused");
return nullptr;
}
return child;
}
static parent::ChildProcessInfo* ToChildProcess(JSContext* aCx,
HandleValue aRootValue) {
RootedValue forkValue(aCx, Int32Value(0));
size_t forkId;
return ToChildProcess(aCx, aRootValue, forkValue, &forkId);
}
///////////////////////////////////////////////////////////////////////////////
// Middleman Control
///////////////////////////////////////////////////////////////////////////////
@ -96,21 +105,45 @@ void SetupMiddlemanControl(const Maybe<size_t>& aRecordingChildId) {
}
}
void ForwardManifestFinished(parent::ChildProcessInfo* aChild,
const Message& aMsg) {
static void ForwardManifestFinished(parent::ChildProcessInfo* aChild,
size_t aForkId, const char16_t* aBuffer,
size_t aBufferSize) {
MOZ_RELEASE_ASSERT(gControl);
const auto& nmsg = static_cast<const ManifestFinishedMessage&>(aMsg);
AutoSafeJSContext cx;
JSAutoRealm ar(cx, xpc::PrivilegedJunkScope());
RootedValue value(cx);
if (nmsg.BufferSize() &&
!JS_ParseJSON(cx, nmsg.Buffer(), nmsg.BufferSize(), &value)) {
if (aBufferSize && !JS_ParseJSON(cx, aBuffer, aBufferSize, &value)) {
MOZ_CRASH("ForwardManifestFinished");
}
if (NS_FAILED(gControl->ManifestFinished(aChild->GetId(), value))) {
if (NS_FAILED(gControl->ManifestFinished(aChild->GetId(), aForkId, value))) {
MOZ_CRASH("ForwardManifestFinished");
}
}
void ForwardManifestFinished(parent::ChildProcessInfo* aChild,
const ManifestFinishedMessage& aMsg) {
ForwardManifestFinished(aChild, aMsg.mForkId, aMsg.Buffer(),
aMsg.BufferSize());
}
void ForwardUnhandledDivergence(parent::ChildProcessInfo* aChild,
const UnhandledDivergenceMessage& aMsg) {
char16_t buf[] = u"{\"unhandledDivergence\":true}";
ForwardManifestFinished(aChild, aMsg.mForkId, buf, ArrayLength(buf) - 1);
}
void ForwardPingResponse(parent::ChildProcessInfo* aChild,
const PingResponseMessage& aMsg) {
MOZ_RELEASE_ASSERT(gControl);
AutoSafeJSContext cx;
JSAutoRealm ar(cx, xpc::PrivilegedJunkScope());
if (NS_FAILED(gControl->PingResponse(aChild->GetId(), aMsg.mForkId, aMsg.mId,
aMsg.mProgress))) {
MOZ_CRASH("ForwardManifestFinished");
}
}
@ -137,17 +170,13 @@ void AfterSaveRecording() {
}
}
bool RecoverFromCrash(parent::ChildProcessInfo* aChild) {
bool RecoverFromCrash(size_t aRootId, size_t aForkId) {
MOZ_RELEASE_ASSERT(gControl);
AutoSafeJSContext cx;
JSAutoRealm ar(cx, xpc::PrivilegedJunkScope());
if (NS_FAILED(gControl->ChildCrashed(aChild->GetId()))) {
return false;
}
return true;
return !NS_FAILED(gControl->ChildCrashed(aRootId, aForkId));
}
///////////////////////////////////////////////////////////////////////////////
@ -210,15 +239,21 @@ static bool Middleman_SpawnReplayingChild(JSContext* aCx, unsigned aArgc,
Value* aVp) {
CallArgs args = CallArgsFromVp(aArgc, aVp);
size_t id = parent::SpawnReplayingChild();
args.rval().setInt32(id);
if (!args.get(0).isNumber()) {
JS_ReportErrorASCII(aCx, "Expected numeric argument");
return false;
}
size_t id = args.get(0).toNumber();
parent::SpawnReplayingChild(id);
args.rval().setUndefined();
return true;
}
static bool Middleman_SendManifest(JSContext* aCx, unsigned aArgc, Value* aVp) {
CallArgs args = CallArgsFromVp(aArgc, aVp);
RootedObject manifestObject(aCx, RequireObject(aCx, args.get(1)));
RootedObject manifestObject(aCx, RequireObject(aCx, args.get(2)));
if (!manifestObject) {
return false;
}
@ -229,34 +264,40 @@ static bool Middleman_SendManifest(JSContext* aCx, unsigned aArgc, Value* aVp) {
return false;
}
parent::ChildProcessInfo* child = GetChildById(aCx, args.get(0));
size_t forkId;
parent::ChildProcessInfo* child = ToChildProcess(aCx, args.get(0),
args.get(1), &forkId);
if (!child) {
return false;
}
bool mightRewind = ToBoolean(args.get(2));
ManifestStartMessage* msg = ManifestStartMessage::New(
manifestBuffer.begin(), manifestBuffer.length());
forkId, manifestBuffer.begin(), manifestBuffer.length());
child->SendMessage(std::move(*msg));
free(msg);
child->ResetPings(mightRewind);
args.rval().setUndefined();
return true;
}
static bool Middleman_MaybePing(JSContext* aCx, unsigned aArgc, Value* aVp) {
static bool Middleman_Ping(JSContext* aCx, unsigned aArgc, Value* aVp) {
CallArgs args = CallArgsFromVp(aArgc, aVp);
parent::ChildProcessInfo* child =
GetChildById(aCx, args.get(0), /* aAllowUnpaused */ true);
size_t forkId;
parent::ChildProcessInfo* child = ToChildProcess(aCx, args.get(0),
args.get(1), &forkId);
if (!child) {
return false;
}
child->MaybePing();
size_t pingId;
if (!RequireNumber(aCx, args.get(2), &pingId)) {
return false;
}
child->SendMessage(PingMessage(forkId, pingId));
args.rval().setUndefined();
return true;
}
@ -327,30 +368,34 @@ static bool Middleman_InRepaintStressMode(JSContext* aCx, unsigned aArgc,
return true;
}
// Recording children can idle indefinitely while waiting for input, without
// creating a checkpoint. If this might be a problem, this method induces the
// child to create a new checkpoint and pause.
static void MaybeCreateCheckpointInChild(parent::ChildProcessInfo* aChild) {
if (aChild->IsRecording() && !aChild->IsPaused()) {
aChild->SendMessage(CreateCheckpointMessage());
}
}
static bool Middleman_WaitUntilPaused(JSContext* aCx, unsigned aArgc,
Value* aVp) {
static bool Middleman_CreateCheckpointInRecording(JSContext* aCx,
unsigned aArgc, Value* aVp) {
CallArgs args = CallArgsFromVp(aArgc, aVp);
parent::ChildProcessInfo* child = GetChildById(aCx, args.get(0),
/* aAllowUnpaused = */ true);
parent::ChildProcessInfo* child = ToChildProcess(aCx, args.get(0));
if (!child) {
return false;
}
if (ToBoolean(args.get(1))) {
MaybeCreateCheckpointInChild(child);
if (!child->IsRecording()) {
JS_ReportErrorASCII(aCx, "Need recording child");
return false;
}
child->WaitUntilPaused();
// Recording children can idle indefinitely while waiting for input, without
// creating a checkpoint. If this might be a problem, this method induces the
// child to create a new checkpoint and pause.
child->SendMessage(CreateCheckpointMessage());
args.rval().setUndefined();
return true;
}
static bool Middleman_MaybeProcessNextMessage(JSContext* aCx, unsigned aArgc,
Value* aVp) {
CallArgs args = CallArgsFromVp(aArgc, aVp);
parent::ChildProcessInfo::MaybeProcessNextMessage();
args.rval().setUndefined();
return true;
@ -377,6 +422,78 @@ static bool Middleman_Atomize(JSContext* aCx, unsigned aArgc, Value* aVp) {
return true;
}
static bool Middleman_Terminate(JSContext* aCx, unsigned aArgc, Value* aVp) {
CallArgs args = CallArgsFromVp(aArgc, aVp);
size_t forkId;
parent::ChildProcessInfo* child = ToChildProcess(aCx, args.get(0),
args.get(1), &forkId);
if (!child) {
return false;
}
child->SendMessage(TerminateMessage(forkId));
args.rval().setUndefined();
return true;
}
static bool Middleman_CrashHangedChild(JSContext* aCx, unsigned aArgc,
Value* aVp) {
CallArgs args = CallArgsFromVp(aArgc, aVp);
size_t forkId;
parent::ChildProcessInfo* child = ToChildProcess(aCx, args.get(0),
args.get(1), &forkId);
if (!child) {
return false;
}
// Try to get the child to crash, so that we can get a minidump.
CrashReporter::AnnotateCrashReport(
CrashReporter::Annotation::RecordReplayHang, true);
child->SendMessage(CrashMessage(forkId));
args.rval().setUndefined();
return true;
}
static bool Middleman_RecordingLength(JSContext* aCx, unsigned aArgc,
Value* aVp) {
CallArgs args = CallArgsFromVp(aArgc, aVp);
args.rval().setNumber((double)parent::gRecordingContents.length());
return true;
}
static bool Middleman_UpdateRecording(JSContext* aCx, unsigned aArgc,
Value* aVp) {
CallArgs args = CallArgsFromVp(aArgc, aVp);
size_t forkId;
parent::ChildProcessInfo* child = ToChildProcess(aCx, args.get(0),
args.get(1), &forkId);
if (!child) {
return false;
}
if (!args.get(2).isNumber()) {
JS_ReportErrorASCII(aCx, "Expected numeric argument");
return false;
}
size_t start = args.get(2).toNumber();
if (start < parent::gRecordingContents.length()) {
UniquePtr<Message> msg(RecordingDataMessage::New(
forkId, start, parent::gRecordingContents.begin() + start,
parent::gRecordingContents.length() - start));
child->SendMessage(std::move(*msg));
}
args.rval().setUndefined();
return true;
}
///////////////////////////////////////////////////////////////////////////////
// Devtools Sandbox
///////////////////////////////////////////////////////////////////////////////
@ -644,6 +761,23 @@ static bool FetchContent(JSContext* aCx, HandleString aURL,
// Recording/Replaying Methods
///////////////////////////////////////////////////////////////////////////////
static bool RecordReplay_Fork(JSContext* aCx, unsigned aArgc, Value* aVp) {
CallArgs args = CallArgsFromVp(aArgc, aVp);
if (!args.get(0).isNumber()) {
JS_ReportErrorASCII(aCx, "Expected numeric argument");
return false;
}
size_t id = args.get(0).toNumber();
if (!ForkProcess()) {
child::RegisterFork(id);
}
args.rval().setUndefined();
return true;
}
static bool RecordReplay_ChildId(JSContext* aCx, unsigned aArgc, Value* aVp) {
CallArgs args = CallArgsFromVp(aArgc, aVp);
@ -658,13 +792,6 @@ static bool RecordReplay_AreThreadEventsDisallowed(JSContext* aCx,
return true;
}
static bool RecordReplay_NewSnapshot(JSContext* aCx, unsigned aArgc,
Value* aVp) {
CallArgs args = CallArgsFromVp(aArgc, aVp);
args.rval().setBoolean(NewSnapshot());
return true;
}
static bool RecordReplay_DivergeFromRecording(JSContext* aCx, unsigned aArgc,
Value* aVp) {
CallArgs args = CallArgsFromVp(aArgc, aVp);
@ -758,27 +885,6 @@ static bool RecordReplay_ResumeExecution(JSContext* aCx, unsigned aArgc,
return true;
}
static bool RecordReplay_RestoreSnapshot(JSContext* aCx, unsigned aArgc,
Value* aVp) {
CallArgs args = CallArgsFromVp(aArgc, aVp);
if (!args.get(0).isNumber()) {
JS_ReportErrorASCII(aCx, "Bad checkpoint ID");
return false;
}
size_t numSnapshots = args.get(0).toNumber();
if (numSnapshots >= NumSnapshots()) {
JS_ReportErrorASCII(aCx, "Haven't saved enough checkpoints");
return false;
}
RestoreSnapshotAndResume(numSnapshots);
JS_ReportErrorASCII(aCx, "Unreachable!");
return false;
}
// The total amount of time this process has spent idling.
static double gIdleTimeTotal;
@ -821,6 +927,16 @@ static bool RecordReplay_FlushRecording(JSContext* aCx, unsigned aArgc,
return true;
}
static bool RecordReplay_FlushExternalCalls(JSContext* aCx, unsigned aArgc,
Value* aVp) {
CallArgs args = CallArgsFromVp(aArgc, aVp);
FlushExternalCalls();
args.rval().setUndefined();
return true;
}
static bool RecordReplay_SetMainChild(JSContext* aCx, unsigned aArgc,
Value* aVp) {
CallArgs args = CallArgsFromVp(aArgc, aVp);
@ -857,13 +973,13 @@ static bool RecordReplay_GetContent(JSContext* aCx, unsigned aArgc,
static bool RecordReplay_Repaint(JSContext* aCx, unsigned aArgc, Value* aVp) {
CallArgs args = CallArgsFromVp(aArgc, aVp);
nsString data;
nsCString data;
if (!child::Repaint(data)) {
args.rval().setNull();
return true;
}
JSString* str = JS_NewUCStringCopyN(aCx, data.BeginReading(), data.Length());
JSString* str = JS_NewStringCopyN(aCx, data.BeginReading(), data.Length());
if (!str) {
return false;
}
@ -872,26 +988,6 @@ static bool RecordReplay_Repaint(JSContext* aCx, unsigned aArgc, Value* aVp) {
return true;
}
static bool RecordReplay_MemoryUsage(JSContext* aCx, unsigned aArgc,
Value* aVp) {
CallArgs args = CallArgsFromVp(aArgc, aVp);
if (!args.get(0).isNumber()) {
JS_ReportErrorASCII(aCx, "Bad memory kind");
return false;
}
size_t kind = args.get(0).toNumber();
if (kind >= (size_t)MemoryKind::Count) {
JS_ReportErrorASCII(aCx, "Memory kind out of range");
return false;
}
args.rval().setDouble(GetMemoryUsage((MemoryKind)kind));
return true;
}
static bool RecordReplay_Dump(JSContext* aCx, unsigned aArgc, Value* aVp) {
// This method is an alternative to dump() that can be used in places where
// thread events are disallowed.
@ -924,8 +1020,6 @@ enum ChangeFrameKind {
};
struct ScriptHitInfo {
typedef AllocPolicy<MemoryKind::ScriptHits> AllocPolicy;
// Information about a location where a script offset has been hit.
struct ScriptHit {
uint32_t mFrameIndex : 16;
@ -940,12 +1034,7 @@ struct ScriptHitInfo {
static_assert(sizeof(ScriptHit) == 8, "Unexpected size");
struct ScriptHitChunk {
ScriptHit mHits[7];
ScriptHitChunk* mPrevious;
};
ScriptHitChunk* mFreeChunk;
typedef InfallibleVector<ScriptHit> ScriptHitVector;
struct ScriptHitKey {
uint32_t mScript;
@ -966,7 +1055,7 @@ struct ScriptHitInfo {
}
};
typedef HashMap<ScriptHitKey, ScriptHitChunk*, ScriptHitKey, AllocPolicy>
typedef HashMap<ScriptHitKey, ScriptHitVector*, ScriptHitKey>
ScriptHitMap;
struct AnyScriptHit {
@ -979,30 +1068,26 @@ struct ScriptHitInfo {
: mScript(aScript), mFrameIndex(aFrameIndex), mProgress(aProgress) {}
};
typedef InfallibleVector<AnyScriptHit, 128, AllocPolicy> AnyScriptHitVector;
typedef InfallibleVector<AnyScriptHit, 128> AnyScriptHitVector;
struct CheckpointInfo {
ScriptHitMap mTable;
AnyScriptHitVector mChangeFrames[NumChangeFrameKinds];
};
InfallibleVector<CheckpointInfo*, 1024, AllocPolicy> mInfo;
ScriptHitInfo() : mFreeChunk(nullptr) {}
InfallibleVector<CheckpointInfo*, 1024> mInfo;
CheckpointInfo* GetInfo(uint32_t aCheckpoint) {
while (aCheckpoint >= mInfo.length()) {
mInfo.append(nullptr);
}
if (!mInfo[aCheckpoint]) {
void* mem =
AllocateMemory(sizeof(CheckpointInfo), MemoryKind::ScriptHits);
mInfo[aCheckpoint] = new (mem) CheckpointInfo();
mInfo[aCheckpoint] = new CheckpointInfo();
}
return mInfo[aCheckpoint];
}
ScriptHitChunk* FindHits(uint32_t aCheckpoint, uint32_t aScript,
ScriptHitVector* FindHits(uint32_t aCheckpoint, uint32_t aScript,
uint32_t aOffset) {
CheckpointInfo* info = GetInfo(aCheckpoint);
@ -1017,40 +1102,12 @@ struct ScriptHitInfo {
ScriptHitKey key(aScript, aOffset);
ScriptHitMap::AddPtr p = info->mTable.lookupForAdd(key);
if (!p && !info->mTable.add(p, key, NewChunk(nullptr))) {
if (!p && !info->mTable.add(p, key, new ScriptHitVector())) {
MOZ_CRASH("ScriptHitInfo::AddHit");
}
ScriptHitChunk* chunk = p->value();
p->value() = AddHit(chunk, ScriptHit(aFrameIndex, aProgress));
}
ScriptHitChunk* AddHit(ScriptHitChunk* aChunk, const ScriptHit& aHit) {
for (int i = ArrayLength(aChunk->mHits) - 1; i >= 0; i--) {
if (!aChunk->mHits[i].mProgress) {
aChunk->mHits[i] = aHit;
return aChunk;
}
}
ScriptHitChunk* newChunk = NewChunk(aChunk);
newChunk->mHits[ArrayLength(newChunk->mHits) - 1] = aHit;
return newChunk;
}
ScriptHitChunk* NewChunk(ScriptHitChunk* aPrevious) {
if (!mFreeChunk) {
void* mem = AllocateMemory(PageSize, MemoryKind::ScriptHits);
ScriptHitChunk* chunks = reinterpret_cast<ScriptHitChunk*>(mem);
size_t numChunks = PageSize / sizeof(ScriptHitChunk);
for (size_t i = 0; i < numChunks - 1; i++) {
chunks[i].mPrevious = &chunks[i + 1];
}
mFreeChunk = chunks;
}
ScriptHitChunk* result = mFreeChunk;
mFreeChunk = mFreeChunk->mPrevious;
result->mPrevious = aPrevious;
return result;
ScriptHitVector* hits = p->value();
hits->append(ScriptHit(aFrameIndex, aProgress));
}
void AddChangeFrame(uint32_t aCheckpoint, uint32_t aWhich, uint32_t aScript,
@ -1076,8 +1133,7 @@ static JSString* gBreakpointAtom;
static JSString* gExitAtom;
static void InitializeScriptHits() {
void* mem = AllocateMemory(sizeof(ScriptHitInfo), MemoryKind::ScriptHits);
gScriptHits = new (mem) ScriptHitInfo();
gScriptHits = new ScriptHitInfo();
AutoSafeJSContext cx;
JSAutoRealm ar(cx, xpc::PrivilegedJunkScope());
@ -1255,23 +1311,20 @@ static bool RecordReplay_FindScriptHits(JSContext* aCx, unsigned aArgc,
RootedValueVector values(aCx);
ScriptHitInfo::ScriptHitChunk* chunk =
ScriptHitInfo::ScriptHitVector* hits =
gScriptHits ? gScriptHits->FindHits(checkpoint, script, offset) : nullptr;
while (chunk) {
for (const auto& hit : chunk->mHits) {
if (hit.mProgress) {
RootedObject hitObject(aCx, JS_NewObject(aCx, nullptr));
if (!hitObject ||
!JS_DefineProperty(aCx, hitObject, "progress",
(double)hit.mProgress, JSPROP_ENUMERATE) ||
!JS_DefineProperty(aCx, hitObject, "frameIndex", hit.mFrameIndex,
JSPROP_ENUMERATE) ||
!values.append(ObjectValue(*hitObject))) {
return false;
}
if (hits) {
for (const auto& hit : *hits) {
RootedObject hitObject(aCx, JS_NewObject(aCx, nullptr));
if (!hitObject ||
!JS_DefineProperty(aCx, hitObject, "progress",
(double)hit.mProgress, JSPROP_ENUMERATE) ||
!JS_DefineProperty(aCx, hitObject, "frameIndex", hit.mFrameIndex,
JSPROP_ENUMERATE) ||
!values.append(ObjectValue(*hitObject))) {
return false;
}
}
chunk = chunk->mPrevious;
}
JSObject* array = JS::NewArrayObject(aCx, values);
@ -1370,22 +1423,28 @@ static bool RecordReplay_FindChangeFrames(JSContext* aCx, unsigned aArgc,
static const JSFunctionSpec gMiddlemanMethods[] = {
JS_FN("registerReplayDebugger", Middleman_RegisterReplayDebugger, 1, 0),
JS_FN("canRewind", Middleman_CanRewind, 0, 0),
JS_FN("spawnReplayingChild", Middleman_SpawnReplayingChild, 0, 0),
JS_FN("spawnReplayingChild", Middleman_SpawnReplayingChild, 1, 0),
JS_FN("sendManifest", Middleman_SendManifest, 3, 0),
JS_FN("maybePing", Middleman_MaybePing, 1, 0),
JS_FN("ping", Middleman_Ping, 3, 0),
JS_FN("hadRepaint", Middleman_HadRepaint, 1, 0),
JS_FN("restoreMainGraphics", Middleman_RestoreMainGraphics, 0, 0),
JS_FN("clearGraphics", Middleman_ClearGraphics, 0, 0),
JS_FN("inRepaintStressMode", Middleman_InRepaintStressMode, 0, 0),
JS_FN("waitUntilPaused", Middleman_WaitUntilPaused, 1, 0),
JS_FN("createCheckpointInRecording", Middleman_CreateCheckpointInRecording,
1, 0),
JS_FN("maybeProcessNextMessage", Middleman_MaybeProcessNextMessage, 0, 0),
JS_FN("atomize", Middleman_Atomize, 1, 0),
JS_FN("terminate", Middleman_Terminate, 2, 0),
JS_FN("crashHangedChild", Middleman_CrashHangedChild, 2, 0),
JS_FN("recordingLength", Middleman_RecordingLength, 0, 0),
JS_FN("updateRecording", Middleman_UpdateRecording, 3, 0),
JS_FS_END};
static const JSFunctionSpec gRecordReplayMethods[] = {
JS_FN("fork", RecordReplay_Fork, 1, 0),
JS_FN("childId", RecordReplay_ChildId, 0, 0),
JS_FN("areThreadEventsDisallowed", RecordReplay_AreThreadEventsDisallowed,
0, 0),
JS_FN("newSnapshot", RecordReplay_NewSnapshot, 0, 0),
JS_FN("divergeFromRecording", RecordReplay_DivergeFromRecording, 0, 0),
JS_FN("progressCounter", RecordReplay_ProgressCounter, 0, 0),
JS_FN("setProgressCounter", RecordReplay_SetProgressCounter, 1, 0),
@ -1393,13 +1452,12 @@ static const JSFunctionSpec gRecordReplayMethods[] = {
RecordReplay_ShouldUpdateProgressCounter, 1, 0),
JS_FN("manifestFinished", RecordReplay_ManifestFinished, 1, 0),
JS_FN("resumeExecution", RecordReplay_ResumeExecution, 0, 0),
JS_FN("restoreSnapshot", RecordReplay_RestoreSnapshot, 1, 0),
JS_FN("currentExecutionTime", RecordReplay_CurrentExecutionTime, 0, 0),
JS_FN("flushRecording", RecordReplay_FlushRecording, 0, 0),
JS_FN("flushExternalCalls", RecordReplay_FlushExternalCalls, 0, 0),
JS_FN("setMainChild", RecordReplay_SetMainChild, 0, 0),
JS_FN("getContent", RecordReplay_GetContent, 1, 0),
JS_FN("repaint", RecordReplay_Repaint, 0, 0),
JS_FN("memoryUsage", RecordReplay_MemoryUsage, 0, 0),
JS_FN("isScanningScripts", RecordReplay_IsScanningScripts, 0, 0),
JS_FN("setScanningScripts", RecordReplay_SetScanningScripts, 1, 0),
JS_FN("getFrameDepth", RecordReplay_GetFrameDepth, 0, 0),

Просмотреть файл

@ -9,6 +9,7 @@
#include "jsapi.h"
#include "Channel.h"
#include "InfallibleVector.h"
#include "ProcessRewind.h"
@ -57,9 +58,13 @@ void ManifestStart(const CharBuffer& aContents);
// Setup the middleman control state.
void SetupMiddlemanControl(const Maybe<size_t>& aRecordingChildId);
// Handle an incoming message from a child process.
// Handle incoming messages from a child process.
void ForwardManifestFinished(parent::ChildProcessInfo* aChild,
const Message& aMsg);
const ManifestFinishedMessage& aMsg);
void ForwardUnhandledDivergence(parent::ChildProcessInfo* aChild,
const UnhandledDivergenceMessage& aMsg);
void ForwardPingResponse(parent::ChildProcessInfo* aChild,
const PingResponseMessage& aMsg);
// Prepare the child processes so that the recording file can be safely copied.
void BeforeSaveRecording();
@ -72,7 +77,7 @@ void AfterSaveRecording();
void HitCheckpoint(size_t aCheckpoint);
// Called when a child crashes, returning whether the crash was recovered from.
bool RecoverFromCrash(parent::ChildProcessInfo* aChild);
bool RecoverFromCrash(size_t aRootId, size_t aForkId);
// Accessors for state which can be accessed from JS.

Просмотреть файл

@ -159,28 +159,6 @@ static bool AlwaysForwardMessage(const IPC::Message& aMessage) {
return type == dom::PBrowser::Msg_Destroy__ID;
}
static bool gMainThreadIsWaitingForIPDLReply = false;
bool MainThreadIsWaitingForIPDLReply() {
return gMainThreadIsWaitingForIPDLReply;
}
// Helper for places where the main thread will block while waiting on a
// synchronous IPDL reply from a child process. Incoming messages from the
// child must be handled immediately.
struct MOZ_RAII AutoMarkMainThreadWaitingForIPDLReply {
AutoMarkMainThreadWaitingForIPDLReply() {
MOZ_RELEASE_ASSERT(NS_IsMainThread());
MOZ_RELEASE_ASSERT(!gMainThreadIsWaitingForIPDLReply);
ResumeBeforeWaitingForIPDLReply();
gMainThreadIsWaitingForIPDLReply = true;
}
~AutoMarkMainThreadWaitingForIPDLReply() {
gMainThreadIsWaitingForIPDLReply = false;
}
};
static void BeginShutdown() {
// If there is a channel error or anything that could result from the child
// crashing, cleanly shutdown this process so that we don't generate a
@ -301,10 +279,7 @@ class MiddlemanProtocol : public ipc::IToplevelProtocol {
"StaticMaybeSendSyncMessage", StaticMaybeSendSyncMessage, this));
if (mSide == ipc::ChildSide) {
AutoMarkMainThreadWaitingForIPDLReply blocked;
while (!mSyncMessageReply) {
MOZ_CRASH("NYI");
}
MOZ_CRASH("NYI");
} else {
MonitorAutoLock lock(*gMonitor);

Просмотреть файл

@ -166,6 +166,10 @@ void UpdateGraphicsAfterPaint(const PaintMessage& aMsg) {
}
void UpdateGraphicsAfterRepaint(const nsACString& aImageData) {
if (!gGraphics) {
InitGraphicsSandbox();
}
nsCOMPtr<nsIInputStream> stream;
nsresult rv = NS_NewCStringInputStream(getter_AddRefs(stream), aImageData);
MOZ_RELEASE_ASSERT(NS_SUCCEEDED(rv));

Просмотреть файл

@ -13,11 +13,13 @@
#include "ipc/Channel.h"
#include "js/Proxy.h"
#include "mozilla/dom/ContentProcessMessageManager.h"
#include "ChildInternal.h"
#include "InfallibleVector.h"
#include "JSControl.h"
#include "Monitor.h"
#include "ProcessRecordReplay.h"
#include "ProcessRedirect.h"
#include "rrIConnection.h"
#include <algorithm>
@ -83,21 +85,9 @@ ChildProcessInfo* GetChildProcess(size_t aId) {
return nullptr;
}
size_t SpawnReplayingChild() {
ChildProcessInfo* child = new ChildProcessInfo(Nothing());
void SpawnReplayingChild(size_t aChannelId) {
ChildProcessInfo* child = new ChildProcessInfo(aChannelId, Nothing());
gReplayingChildren.append(child);
return child->GetId();
}
void ResumeBeforeWaitingForIPDLReply() {
MOZ_RELEASE_ASSERT(gActiveChild->IsRecording());
// The main thread is about to block while it waits for a sync reply from the
// recording child process. If the child is paused, resume it immediately so
// that we don't deadlock.
if (gActiveChild->IsPaused()) {
MOZ_CRASH("NYI");
}
}
///////////////////////////////////////////////////////////////////////////////
@ -160,25 +150,17 @@ bool DebuggerRunsInMiddleman() {
// Saving Recordings
///////////////////////////////////////////////////////////////////////////////
// Handle to the recording file opened at startup.
static FileHandle gRecordingFd;
StaticInfallibleVector<char> gRecordingContents;
static void SaveRecordingInternal(const ipc::FileDescriptor& aFile) {
// Make sure the recording file is up to date and ready for copying.
js::BeforeSaveRecording();
// Copy the file's contents to the new file.
DirectSeekFile(gRecordingFd, 0);
// Copy the recording's contents to the new file.
ipc::FileDescriptor::UniquePlatformHandle writefd =
aFile.ClonePlatformHandle();
char buf[4096];
while (true) {
size_t n = DirectRead(gRecordingFd, buf, sizeof(buf));
if (!n) {
break;
}
DirectWrite(writefd.get(), buf, n);
}
DirectWrite(writefd.get(), gRecordingContents.begin(),
gRecordingContents.length());
PrintSpew("Saved Recording Copy.\n");
@ -196,6 +178,144 @@ void SaveRecording(const ipc::FileDescriptor& aFile) {
}
}
///////////////////////////////////////////////////////////////////////////////
// Cloud Processes
///////////////////////////////////////////////////////////////////////////////
bool UseCloudForReplayingProcesses() {
nsAutoString cloudServer;
Preferences::GetString("devtools.recordreplay.cloudServer", cloudServer);
return cloudServer.Length() != 0;
}
static StaticRefPtr<rrIConnection> gConnection;
static StaticInfallibleVector<Channel*> gConnectionChannels;
class SendMessageToCloudRunnable : public Runnable {
public:
int32_t mConnectionId;
Message::UniquePtr mMsg;
SendMessageToCloudRunnable(int32_t aConnectionId, Message::UniquePtr aMsg)
: Runnable("SendMessageToCloudRunnable"),
mConnectionId(aConnectionId), mMsg(std::move(aMsg)) {}
NS_IMETHODIMP Run() {
AutoSafeJSContext cx;
JSAutoRealm ar(cx, xpc::PrivilegedJunkScope());
JS::RootedObject data(cx, JS::NewArrayBuffer(cx, mMsg->mSize));
MOZ_RELEASE_ASSERT(data);
{
JS::AutoCheckCannotGC nogc;
bool isSharedMemory;
uint8_t* ptr = JS::GetArrayBufferData(data, &isSharedMemory, nogc);
MOZ_RELEASE_ASSERT(ptr);
memcpy(ptr, mMsg.get(), mMsg->mSize);
}
JS::RootedValue dataValue(cx, JS::ObjectValue(*data));
if (NS_FAILED(gConnection->SendMessage(mConnectionId, dataValue))) {
MOZ_CRASH("SendMessageToCloud");
}
return NS_OK;
}
};
static bool ConnectionCallback(JSContext* aCx, unsigned aArgc, JS::Value* aVp) {
JS::CallArgs args = CallArgsFromVp(aArgc, aVp);
if (!args.get(0).isNumber()) {
JS_ReportErrorASCII(aCx, "Expected number");
return false;
}
size_t id = args.get(0).toNumber();
if (id >= gConnectionChannels.length() || !gConnectionChannels[id]) {
JS_ReportErrorASCII(aCx, "Bad connection channel ID");
return false;
}
if (!args.get(1).isObject()) {
JS_ReportErrorASCII(aCx, "Expected object");
return false;
}
bool sentData = false;
{
JS::AutoCheckCannotGC nogc;
uint32_t length;
uint8_t* ptr;
bool isSharedMemory;
JS::GetArrayBufferLengthAndData(&args.get(1).toObject(), &length,
&isSharedMemory, &ptr);
if (ptr) {
Channel* channel = gConnectionChannels[id];
channel->SendMessageData((const char*) ptr, length);
sentData = true;
}
}
if (!sentData) {
JS_ReportErrorASCII(aCx, "Expected array buffer");
return false;
}
args.rval().setUndefined();
return true;
}
void CreateReplayingCloudProcess(base::ProcessId aProcessId,
uint32_t aChannelId) {
MOZ_RELEASE_ASSERT(XRE_IsParentProcess());
if (!gConnection) {
nsCOMPtr<rrIConnection> connection =
do_ImportModule("resource://devtools/server/actors/replay/connection.js");
gConnection = connection.forget();
ClearOnShutdown(&gConnection);
AutoSafeJSContext cx;
JSAutoRealm ar(cx, xpc::PrivilegedJunkScope());
JSFunction* fun = JS_NewFunction(cx, ConnectionCallback, 2, 0,
"ConnectionCallback");
MOZ_RELEASE_ASSERT(fun);
JS::RootedValue callback(cx, JS::ObjectValue(*(JSObject*)fun));
if (NS_FAILED(gConnection->Initialize(callback))) {
MOZ_CRASH("CreateReplayingCloudProcess");
}
}
AutoSafeJSContext cx;
JSAutoRealm ar(cx, xpc::PrivilegedJunkScope());
nsAutoString cloudServer;
Preferences::GetString("devtools.recordreplay.cloudServer", cloudServer);
MOZ_RELEASE_ASSERT(cloudServer.Length() != 0);
int32_t connectionId;
if (NS_FAILED(gConnection->Connect(aChannelId, cloudServer, &connectionId))) {
MOZ_CRASH("CreateReplayingCloudProcess");
}
Channel* channel = new Channel(
aChannelId, Channel::Kind::ParentCloud,
[=](Message::UniquePtr aMsg) {
RefPtr<SendMessageToCloudRunnable> runnable =
new SendMessageToCloudRunnable(connectionId, std::move(aMsg));
NS_DispatchToMainThread(runnable);
}, aProcessId);
while ((size_t)connectionId >= gConnectionChannels.length()) {
gConnectionChannels.append(nullptr);
}
gConnectionChannels[connectionId] = channel;
}
///////////////////////////////////////////////////////////////////////////////
// Initialization
///////////////////////////////////////////////////////////////////////////////
@ -209,6 +329,8 @@ static base::ProcessId gParentPid;
base::ProcessId ParentProcessId() { return gParentPid; }
Monitor* gMonitor;
void InitializeMiddleman(int aArgc, char* aArgv[], base::ProcessId aParentPid,
const base::SharedMemoryHandle& aPrefsHandle,
const ipc::FileDescriptor& aPrefMapHandle) {
@ -234,7 +356,7 @@ void InitializeMiddleman(int aArgc, char* aArgv[], base::ProcessId aParentPid,
if (gProcessKind == ProcessKind::MiddlemanRecording) {
RecordingProcessData data(aPrefsHandle, aPrefMapHandle);
gRecordingChild = new ChildProcessInfo(Some(data));
gRecordingChild = new ChildProcessInfo(0, Some(data));
// Set the active child to the recording child initially, so that message
// forwarding works before the middleman control JS has been initialized.
@ -243,9 +365,21 @@ void InitializeMiddleman(int aArgc, char* aArgv[], base::ProcessId aParentPid,
InitializeForwarding();
// Open a file handle to the recording file we can use for saving recordings
// later on.
gRecordingFd = DirectOpenFile(gRecordingFilename, false);
if (gProcessKind == ProcessKind::MiddlemanReplaying) {
// Load the entire recording into memory.
FileHandle fd = DirectOpenFile(gRecordingFilename, false);
char buf[4096];
while (true) {
size_t n = DirectRead(fd, buf, sizeof(buf));
if (!n) {
break;
}
gRecordingContents.append(buf, n);
}
DirectCloseFile(fd);
}
}
} // namespace parent

Просмотреть файл

@ -69,6 +69,15 @@ void GetArgumentsForChildProcess(base::ProcessId aMiddlemanPid,
// Return whether the middleman will be running developer tools server code.
bool DebuggerRunsInMiddleman();
// Return whether to create replaying processes on a remote machine.
bool UseCloudForReplayingProcesses();
// Create a replaying process on a remote machine. aProcessId is the pid of the
// middleman process which the replaying process will connect to, and aChannelId
// is the ID (unique for each middleman) of the resulting channel.
void CreateReplayingCloudProcess(base::ProcessId aProcessId,
uint32_t aChannelId);
} // namespace parent
} // namespace recordreplay
} // namespace mozilla

Просмотреть файл

@ -37,20 +37,12 @@ ChildProcessInfo* GetActiveChild();
// Get a child process by its ID.
ChildProcessInfo* GetChildProcess(size_t aId);
// Spawn a new replaying child process, returning its ID.
size_t SpawnReplayingChild();
// Spawn a new replaying child process with the specified ID.
void SpawnReplayingChild(size_t aId);
// Specify the current active child.
void SetActiveChild(ChildProcessInfo* aChild);
// Return whether the middleman's main thread is blocked waiting on a
// synchronous IPDL reply from the recording child.
bool MainThreadIsWaitingForIPDLReply();
// If necessary, resume execution in the child before the main thread begins
// to block while waiting on an IPDL reply from the child.
void ResumeBeforeWaitingForIPDLReply();
// Immediately forward any sync child->parent IPDL message. These are sent on
// the main thread, which might be blocked waiting for a response from the
// recording child and unable to run an event loop.
@ -63,9 +55,12 @@ void InitializeForwarding();
// Terminate all children and kill this process.
void Shutdown();
// All data in the recording.
extern StaticInfallibleVector<char> gRecordingContents;
// Monitor used for synchronizing between the main and channel or message loop
// threads.
static Monitor* gMonitor;
extern Monitor* gMonitor;
///////////////////////////////////////////////////////////////////////////////
// Graphics
@ -156,67 +151,29 @@ class ChildProcessInfo {
// Whether this process is recording.
bool mRecording = false;
// Whether the process is currently paused.
bool mPaused = false;
// Flags for whether we have received messages from the child indicating it
// is crashing.
bool mHasBegunFatalError = false;
bool mHasFatalError = false;
// Whether the child might be rewinding and can't receive ping messages.
bool mMightRewind = false;
// Whether the child is considered to be hanged and has been instructed to
// crash.
bool mSentTerminateMessage = false;
// The last time we send a ping or terminate message.
TimeStamp mLastPingTime;
struct PingInfo {
uint32_t mId;
uint64_t mProgress;
explicit PingInfo(uint32_t aId) : mId(aId), mProgress(0) {}
};
// Information about all pings we have sent since they were reset.
InfallibleVector<PingInfo> mPings;
void OnIncomingMessage(const Message& aMsg);
static void MaybeProcessPendingMessageRunnable();
void ReceiveChildMessageOnMainThread(Message::UniquePtr aMsg);
static void ReceiveChildMessageOnMainThread(size_t aChildId,
Message::UniquePtr aMsg);
bool IsHanged();
void OnPingResponse(const PingResponseMessage& aMsg);
void OnCrash(const char* aWhy);
void OnCrash(size_t aForkId, const char* aWhy);
void LaunchSubprocess(
const Maybe<RecordingProcessData>& aRecordingProcessData);
size_t aId, const Maybe<RecordingProcessData>& aRecordingProcessData);
public:
explicit ChildProcessInfo(
const Maybe<RecordingProcessData>& aRecordingProcessData);
size_t aId, const Maybe<RecordingProcessData>& aRecordingProcessData);
~ChildProcessInfo();
size_t GetId() { return mChannel->GetId(); }
bool IsRecording() { return mRecording; }
bool IsPaused() { return mPaused; }
bool HasCrashed() { return mHasFatalError; }
// Send a message over the underlying channel.
void SendMessage(Message&& aMessage);
// Handle incoming messages from this process (and no others) until it pauses.
// The return value is null if it is already paused, otherwise the message
// which caused it to pause.
void WaitUntilPaused();
static void SetIntroductionMessage(IntroductionMessage* aMessage);
void ResetPings(bool aMightRewind);
void MaybePing();
static void MaybeProcessNextMessage();
};
} // namespace parent

Просмотреть файл

@ -13,8 +13,7 @@ if CONFIG['OS_ARCH'] == 'Darwin' and CONFIG['NIGHTLY_BUILD']:
UNIFIED_SOURCES += [
'Assembler.cpp',
'Callback.cpp',
'DirtyMemoryHandler.cpp',
'File.cpp',
'ExternalCall.cpp',
'HashTable.cpp',
'ipc/Channel.cpp',
'ipc/ChildIPC.cpp',
@ -24,11 +23,10 @@ if CONFIG['OS_ARCH'] == 'Darwin' and CONFIG['NIGHTLY_BUILD']:
'ipc/ParentGraphics.cpp',
'ipc/ParentIPC.cpp',
'Lock.cpp',
'MemorySnapshot.cpp',
'MiddlemanCall.cpp',
'ProcessRecordReplay.cpp',
'ProcessRedirectDarwin.cpp',
'ProcessRewind.cpp',
'Recording.cpp',
'Thread.cpp',
'ThreadSnapshot.cpp',
'ValueIndex.cpp',