Bug 1464903 Part 6 - Redirection infrastructure, r=froydnj.

--HG--
extra : rebase_source : 388c86e7eea4c94161d572ddf9390cf923c11a8c
This commit is contained in:
Brian Hackett 2018-07-22 11:45:11 +00:00
Родитель 328be466d8
Коммит bf2d66a38e
5 изменённых файлов: 2102 добавлений и 0 удалений

Просмотреть файл

@ -0,0 +1,345 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "Assembler.h"
#include "ProcessRecordReplay.h"
#include "udis86/types.h"
#include <sys/mman.h>
namespace mozilla {
namespace recordreplay {
Assembler::Assembler()
: mCursor(nullptr)
, mCursorEnd(nullptr)
, mCanAllocateStorage(true)
{}
Assembler::Assembler(uint8_t* aStorage, size_t aSize)
: mCursor(aStorage)
, mCursorEnd(aStorage + aSize)
, mCanAllocateStorage(false)
{}
Assembler::~Assembler()
{
// Patch each jump to the point where the jump's target was copied, if there
// is one.
for (auto pair : mJumps) {
uint8_t* source = pair.first;
uint8_t* target = pair.second;
for (auto copyPair : mCopiedInstructions) {
if (copyPair.first == target) {
PatchJump(source, copyPair.second);
break;
}
}
}
}
void
Assembler::NoteOriginalInstruction(uint8_t* aIp)
{
mCopiedInstructions.emplaceBack(aIp, Current());
}
void
Assembler::Advance(size_t aSize)
{
MOZ_RELEASE_ASSERT(aSize <= MaximumAdvance);
mCursor += aSize;
}
static const size_t JumpBytes = 17;
uint8_t*
Assembler::Current()
{
// Reallocate the buffer if there is not enough space. We need enough for the
// maximum space used by any of the assembling functions, as well as for a
// following jump for fallthrough to the next allocated space.
if (size_t(mCursorEnd - mCursor) <= MaximumAdvance + JumpBytes) {
MOZ_RELEASE_ASSERT(mCanAllocateStorage);
// Allocate some writable, executable memory.
static const size_t BufferSize = PageSize;
uint8_t* buffer = new uint8_t[PageSize];
UnprotectExecutableMemory(buffer, PageSize);
if (mCursor) {
// Patch a jump for fallthrough from the last allocation.
MOZ_RELEASE_ASSERT(size_t(mCursorEnd - mCursor) >= JumpBytes);
PatchJump(mCursor, buffer);
}
mCursor = buffer;
mCursorEnd = &buffer[BufferSize];
}
return mCursor;
}
static void
Push16(uint8_t** aIp, uint16_t aValue)
{
(*aIp)[0] = 0x66;
(*aIp)[1] = 0x68;
*reinterpret_cast<uint16_t*>(*aIp + 2) = aValue;
(*aIp) += 4;
}
/* static */ void
Assembler::PatchJump(uint8_t* aIp, void* aTarget)
{
// Push the target literal onto the stack, 2 bytes at a time. This is
// apparently the best way of getting an arbitrary 8 byte literal onto the
// stack, as 4 byte literals we push will be sign extended to 8 bytes.
size_t ntarget = reinterpret_cast<size_t>(aTarget);
Push16(&aIp, ntarget >> 48);
Push16(&aIp, ntarget >> 32);
Push16(&aIp, ntarget >> 16);
Push16(&aIp, ntarget);
*aIp = 0xC3; // ret
}
void
Assembler::Jump(void* aTarget)
{
PatchJump(Current(), aTarget);
mJumps.emplaceBack(Current(), (uint8_t*) aTarget);
Advance(JumpBytes);
}
static uint8_t
OppositeJump(uint8_t aOpcode)
{
// Get the opposite single byte jump opcode for a one or two byte conditional
// jump. Opposite opcodes are adjacent, e.g. 0x7C -> jl and 0x7D -> jge.
if (aOpcode >= 0x80 && aOpcode <= 0x8F) {
aOpcode -= 0x10;
} else {
MOZ_RELEASE_ASSERT(aOpcode >= 0x70 && aOpcode <= 0x7F);
}
return (aOpcode & 1) ? aOpcode - 1 : aOpcode + 1;
}
void
Assembler::ConditionalJump(uint8_t aCode, void* aTarget)
{
uint8_t* ip = Current();
ip[0] = OppositeJump(aCode);
ip[1] = (uint8_t) JumpBytes;
Advance(2);
Jump(aTarget);
}
void
Assembler::CopyInstruction(uint8_t* aIp, size_t aSize)
{
MOZ_RELEASE_ASSERT(aSize <= MaximumInstructionLength);
memcpy(Current(), aIp, aSize);
Advance(aSize);
}
void
Assembler::PushRax()
{
NewInstruction(0x50);
}
void
Assembler::PopRax()
{
NewInstruction(0x58);
}
void
Assembler::JumpToRax()
{
NewInstruction(0xFF, 0xE0);
}
void
Assembler::CallRax()
{
NewInstruction(0xFF, 0xD0);
}
void
Assembler::LoadRax(size_t aWidth)
{
switch (aWidth) {
case 1: NewInstruction(0x8A, 0x00); break;
case 2: NewInstruction(0x66, 0x8B, 0x00); break;
case 4: NewInstruction(0x8B, 0x00); break;
case 8: NewInstruction(0x48, 0x8B, 0x00); break;
default: MOZ_CRASH();
}
}
void
Assembler::CompareRaxWithTopOfStack()
{
NewInstruction(0x48, 0x39, 0x04, 0x24);
}
void
Assembler::PushRbx()
{
NewInstruction(0x53);
}
void
Assembler::PopRbx()
{
NewInstruction(0x5B);
}
void
Assembler::StoreRbxToRax(size_t aWidth)
{
switch (aWidth) {
case 1: NewInstruction(0x88, 0x18); break;
case 2: NewInstruction(0x66, 0x89, 0x18); break;
case 4: NewInstruction(0x89, 0x18); break;
case 8: NewInstruction(0x48, 0x89, 0x18); break;
default: MOZ_CRASH();
}
}
void
Assembler::CompareValueWithRax(uint8_t aValue, size_t aWidth)
{
switch (aWidth) {
case 1: NewInstruction(0x3C, aValue); break;
case 2: NewInstruction(0x66, 0x83, 0xF8, aValue); break;
case 4: NewInstruction(0x83, 0xF8, aValue); break;
case 8: NewInstruction(0x48, 0x83, 0xF8, aValue); break;
default: MOZ_CRASH();
}
}
static const size_t MoveImmediateBytes = 10;
/* static */ void
Assembler::PatchMoveImmediateToRax(uint8_t* aIp, void* aValue)
{
aIp[0] = 0x40 | (1 << 3);
aIp[1] = 0xB8;
*reinterpret_cast<void**>(aIp + 2) = aValue;
}
void
Assembler::MoveImmediateToRax(void* aValue)
{
PatchMoveImmediateToRax(Current(), aValue);
Advance(MoveImmediateBytes);
}
void
Assembler::MoveRaxToRegister(/*ud_type*/ int aRegister)
{
MOZ_RELEASE_ASSERT(aRegister == NormalizeRegister(aRegister));
uint8_t* ip = Current();
if (aRegister <= UD_R_RDI) {
ip[0] = 0x48;
ip[1] = 0x89;
ip[2] = 0xC0 + aRegister - UD_R_RAX;
} else {
ip[0] = 0x49;
ip[1] = 0x89;
ip[2] = 0xC0 + aRegister - UD_R_R8;
}
Advance(3);
}
void
Assembler::MoveRegisterToRax(/*ud_type*/ int aRegister)
{
MOZ_RELEASE_ASSERT(aRegister == NormalizeRegister(aRegister));
uint8_t* ip = Current();
if (aRegister <= UD_R_RDI) {
ip[0] = 0x48;
ip[1] = 0x89;
ip[2] = 0xC0 + (aRegister - UD_R_RAX) * 8;
} else {
ip[0] = 0x4C;
ip[1] = 0x89;
ip[2] = 0xC0 + (aRegister - UD_R_R8) * 8;
}
Advance(3);
}
/* static */ /*ud_type*/ int
Assembler::NormalizeRegister(/*ud_type*/ int aRegister)
{
if (aRegister >= UD_R_AL && aRegister <= UD_R_R15B) {
return aRegister - UD_R_AL + UD_R_RAX;
}
if (aRegister >= UD_R_AX && aRegister <= UD_R_R15W) {
return aRegister - UD_R_AX + UD_R_RAX;
}
if (aRegister >= UD_R_EAX && aRegister <= UD_R_R15D) {
return aRegister - UD_R_EAX + UD_R_RAX;
}
if (aRegister >= UD_R_RAX && aRegister <= UD_R_R15) {
return aRegister;
}
return UD_NONE;
}
/* static */ bool
Assembler::CanPatchShortJump(uint8_t* aIp, void* aTarget)
{
return (aIp + 2 - 128 <= aTarget) && (aIp + 2 + 127 >= aTarget);
}
/* static */ void
Assembler::PatchShortJump(uint8_t* aIp, void* aTarget)
{
MOZ_RELEASE_ASSERT(CanPatchShortJump(aIp, aTarget));
aIp[0] = 0xEB;
aIp[1] = uint8_t(static_cast<uint8_t*>(aTarget) - aIp - 2);
}
/* static */ void
Assembler::PatchJumpClobberRax(uint8_t* aIp, void* aTarget)
{
PatchMoveImmediateToRax(aIp, aTarget);
aIp[10] = 0x50; // push %rax
aIp[11] = 0xC3; // ret
}
/* static */ void
Assembler::PatchClobber(uint8_t* aIp)
{
aIp[0] = 0xCC; // int3
}
static uint8_t*
PageStart(uint8_t* aPtr)
{
static_assert(sizeof(size_t) == sizeof(uintptr_t), "Unsupported Platform");
return reinterpret_cast<uint8_t*>(reinterpret_cast<size_t>(aPtr) & ~(PageSize - 1));
}
void
UnprotectExecutableMemory(uint8_t* aAddress, size_t aSize)
{
MOZ_ASSERT(aSize);
uint8_t* pageStart = PageStart(aAddress);
uint8_t* pageEnd = PageStart(aAddress + aSize - 1) + PageSize;
int ret = mprotect(pageStart, pageEnd - pageStart, PROT_READ | PROT_EXEC | PROT_WRITE);
MOZ_RELEASE_ASSERT(ret >= 0);
}
} // namespace recordreplay
} // namespace mozilla

Просмотреть файл

@ -0,0 +1,181 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef mozilla_recordreplay_Assembler_h
#define mozilla_recordreplay_Assembler_h
#include "InfallibleVector.h"
#include <utility>
namespace mozilla {
namespace recordreplay {
// Assembler for x64 instructions. This is a simple assembler that is primarily
// designed for use in copying instructions from a function that is being
// redirected.
class Assembler
{
public:
// Create an assembler that allocates its own instruction storage. Assembled
// code will never be reclaimed by the system.
Assembler();
// Create an assembler that uses the specified memory range for instruction
// storage.
Assembler(uint8_t* aStorage, size_t aSize);
~Assembler();
// Mark the point at which we start copying an instruction in the original
// range.
void NoteOriginalInstruction(uint8_t* aIp);
// Get the address where the next assembled instruction will be placed.
uint8_t* Current();
///////////////////////////////////////////////////////////////////////////////
// Routines for assembling instructions in new instruction storage
///////////////////////////////////////////////////////////////////////////////
// Jump to aTarget. If aTarget is in the range of instructions being copied,
// the target will be the copy of aTarget instead.
void Jump(void* aTarget);
// Conditionally jump to aTarget, depending on the short jump opcode aCode.
// If aTarget is in the range of instructions being copied, the target will
// be the copy of aTarget instead.
void ConditionalJump(uint8_t aCode, void* aTarget);
// Copy an instruction verbatim from aIp.
void CopyInstruction(uint8_t* aIp, size_t aSize);
// push/pop %rax
void PushRax();
void PopRax();
// jump *%rax
void JumpToRax();
// call *%rax
void CallRax();
// movq/movl/movb 0(%rax), %rax
void LoadRax(size_t aWidth);
// cmpq %rax, 0(%rsp)
void CompareRaxWithTopOfStack();
// push/pop %rbx
void PushRbx();
void PopRbx();
// movq/movl/movb %rbx, 0(%rax)
void StoreRbxToRax(size_t aWidth);
// cmpq/cmpb $literal8, %rax
void CompareValueWithRax(uint8_t aValue, size_t aWidth);
// movq $value, %rax
void MoveImmediateToRax(void* aValue);
// movq %rax, register
void MoveRaxToRegister(/*ud_type*/ int aRegister);
// movq register, %rax
void MoveRegisterToRax(/*ud_type*/ int aRegister);
// Normalize a Udis86 register to its 8 byte version, returning UD_NONE/zero
// for unexpected registers.
static /*ud_type*/ int NormalizeRegister(/*ud_type*/ int aRegister);
///////////////////////////////////////////////////////////////////////////////
// Routines for assembling instructions at arbitrary locations
///////////////////////////////////////////////////////////////////////////////
// Return whether it is possible to patch a short jump to aTarget from aIp.
static bool CanPatchShortJump(uint8_t* aIp, void* aTarget);
// Patch a short jump to aTarget at aIp.
static void PatchShortJump(uint8_t* aIp, void* aTarget);
// Patch a long jump to aTarget at aIp. Rax may be clobbered.
static void PatchJumpClobberRax(uint8_t* aIp, void* aTarget);
// Patch the value used in an earlier MoveImmediateToRax call.
static void PatchMoveImmediateToRax(uint8_t* aIp, void* aValue);
// Patch an int3 breakpoint instruction at Ip.
static void PatchClobber(uint8_t* aIp);
private:
// Patch a jump that doesn't clobber any instructions.
static void PatchJump(uint8_t* aIp, void* aTarget);
// Consume some instruction storage.
void Advance(size_t aSize);
// The maximum amount we can write at a time without a jump potentially
// being introduced into the instruction stream.
static const size_t MaximumAdvance = 20;
inline size_t CountBytes() { return 0; }
template <typename... Tail>
inline size_t CountBytes(uint8_t aByte, Tail... aMoreBytes) {
return 1 + CountBytes(aMoreBytes...);
}
inline void CopyBytes(uint8_t* aIp) {}
template <typename... Tail>
inline void CopyBytes(uint8_t* aIp, uint8_t aByte, Tail... aMoreBytes) {
*aIp = aByte;
CopyBytes(aIp + 1, aMoreBytes...);
}
// Write a complete instruction with bytes specified as parameters.
template <typename... ByteList>
inline void NewInstruction(ByteList... aBytes) {
size_t numBytes = CountBytes(aBytes...);
MOZ_ASSERT(numBytes <= MaximumAdvance);
uint8_t* ip = Current();
CopyBytes(ip, aBytes...);
Advance(numBytes);
}
// Storage for assembling new instructions.
uint8_t* mCursor;
uint8_t* mCursorEnd;
bool mCanAllocateStorage;
// Association between the instruction original and copy pointers, for all
// instructions that have been copied.
InfallibleVector<std::pair<uint8_t*,uint8_t*>> mCopiedInstructions;
// For jumps we have copied, association between the source (in generated
// code) and target (in the original code) of the jump. These will be updated
// to refer to their copy (if there is one) in generated code in the
// assembler's destructor.
InfallibleVector<std::pair<uint8_t*,uint8_t*>> mJumps;
};
// The number of instruction bytes required for a short jump.
static const size_t ShortJumpBytes = 2;
// The number of instruction bytes required for a jump that may clobber rax.
static const size_t JumpBytesClobberRax = 12;
// The maximum byte length of an x86/x64 instruction.
static const size_t MaximumInstructionLength = 15;
// Make a region of memory RWX.
void UnprotectExecutableMemory(uint8_t* aAddress, size_t aSize);
} // recordreplay
} // mozilla
#endif // mozilla_recordreplay_Assembler_h

Просмотреть файл

@ -0,0 +1,116 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef mozilla_recordreplay_CallFunction_h
#define mozilla_recordreplay_CallFunction_h
namespace mozilla {
namespace recordreplay {
// These macros define functions for calling a void* function pointer with
// a particular ABI and arbitrary arguments. In principle we could do this
// with varargs (i.e. cast to 'int (ABI *)(...)' before calling), but MSVC
// treats 'int (__stdcall *)(...)' as 'int (__cdecl *)(...)', unfortunately.
//
// After instantiating DefineAllCallFunctions, the resulting functions will
// be overloaded and have the form, for a given ABI:
//
// template <typename ReturnType>
// ReturnType CallFunctionABI(void* fn);
//
// template <typename ReturnType, typename T0>
// ReturnType CallFunctionABI(void* fn, T0 a0);
//
// template <typename ReturnType, typename T0, typename T1>
// ReturnType CallFunctionABI(void* fn, T0 a0, T1 a1);
//
// And so forth.
#define DefineCallFunction(aABI, aReturnType, aFormals, aFormalTypes, aActuals) \
static inline aReturnType CallFunction ##aABI aFormals { \
MOZ_ASSERT(aFn); \
return BitwiseCast<aReturnType (aABI *) aFormalTypes>(aFn) aActuals; \
}
#define DefineAllCallFunctions(aABI) \
template <typename ReturnType> \
DefineCallFunction(aABI, ReturnType, (void* aFn), (), ()) \
template <typename ReturnType, typename T0> \
DefineCallFunction(aABI, ReturnType, \
(void* aFn, T0 a0), (T0), (a0)) \
template <typename ReturnType, typename T0, typename T1> \
DefineCallFunction(aABI, ReturnType, \
(void* aFn, T0 a0, T1 a1), (T0, T1), (a0, a1)) \
template <typename ReturnType, typename T0, typename T1, typename T2> \
DefineCallFunction(aABI, ReturnType, \
(void* aFn, T0 a0, T1 a1, T2 a2), \
(T0, T1, T2), (a0, a1, a2)) \
template <typename ReturnType, typename T0, typename T1, typename T2, typename T3> \
DefineCallFunction(aABI, ReturnType, \
(void* aFn, T0 a0, T1 a1, T2 a2, T3 a3), \
(T0, T1, T2, T3), \
(a0, a1, a2, a3)) \
template <typename ReturnType, typename T0, typename T1, typename T2, typename T3, \
typename T4> \
DefineCallFunction(aABI, ReturnType, \
(void* aFn, T0 a0, T1 a1, T2 a2, T3 a3, T4 a4), \
(T0, T1, T2, T3, T4), \
(a0, a1, a2, a3, a4)) \
template <typename ReturnType, typename T0, typename T1, typename T2, typename T3, \
typename T4, typename T5> \
DefineCallFunction(aABI, ReturnType, \
(void* aFn, T0 a0, T1 a1, T2 a2, T3 a3, T4 a4, T5 a5), \
(T0, T1, T2, T3, T4, T5), \
(a0, a1, a2, a3, a4, a5)) \
template <typename ReturnType, typename T0, typename T1, typename T2, typename T3, \
typename T4, typename T5, typename T6> \
DefineCallFunction(aABI, ReturnType, \
(void* aFn, T0 a0, T1 a1, T2 a2, T3 a3, T4 a4, T5 a5, \
T6 a6), \
(T0, T1, T2, T3, T4, T5, T6), \
(a0, a1, a2, a3, a4, a5, a6)) \
template <typename ReturnType, typename T0, typename T1, typename T2, typename T3, \
typename T4, typename T5, typename T6, typename T7> \
DefineCallFunction(aABI, ReturnType, \
(void* aFn, T0 a0, T1 a1, T2 a2, T3 a3, T4 a4, T5 a5, \
T6 a6, T7 a7), \
(T0, T1, T2, T3, T4, T5, T6, T7), \
(a0, a1, a2, a3, a4, a5, a6, a7)) \
template <typename ReturnType, typename T0, typename T1, typename T2, typename T3, \
typename T4, typename T5, typename T6, typename T7, \
typename T8> \
DefineCallFunction(aABI, ReturnType, \
(void* aFn, T0 a0, T1 a1, T2 a2, T3 a3, T4 a4, T5 a5, \
T6 a6, T7 a7, T8 a8), \
(T0, T1, T2, T3, T4, T5, T6, T7, T8), \
(a0, a1, a2, a3, a4, a5, a6, a7, a8)) \
template <typename ReturnType, typename T0, typename T1, typename T2, typename T3, \
typename T4, typename T5, typename T6, typename T7, \
typename T8, typename T9> \
DefineCallFunction(aABI, ReturnType, \
(void* aFn, T0 a0, T1 a1, T2 a2, T3 a3, T4 a4, T5 a5, \
T6 a6, T7 a7, T8 a8, T9 a9), \
(T0, T1, T2, T3, T4, T5, T6, T7, T8, T9), \
(a0, a1, a2, a3, a4, a5, a6, a7, a8, a9)) \
template <typename ReturnType, typename T0, typename T1, typename T2, typename T3, \
typename T4, typename T5, typename T6, typename T7, \
typename T8, typename T9, typename T10> \
DefineCallFunction(aABI, ReturnType, \
(void* aFn, T0 a0, T1 a1, T2 a2, T3 a3, T4 a4, T5 a5, \
T6 a6, T7 a7, T8 a8, T9 a9, T10 a10), \
(T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10), \
(a0, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10)) \
template <typename ReturnType, typename T0, typename T1, typename T2, typename T3, \
typename T4, typename T5, typename T6, typename T7, \
typename T8, typename T9, typename T10, typename T11> \
DefineCallFunction(aABI, ReturnType, \
(void* aFn, T0 a0, T1 a1, T2 a2, T3 a3, T4 a4, T5 a5, \
T6 a6, T7 a7, T8 a8, T9 a9, T10 a10, T11 a11), \
(T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11), \
(a0, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11))
} // recordreplay
} // mozilla
#endif // mozilla_recordreplay_CallFunction_h

Просмотреть файл

@ -0,0 +1,686 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "ProcessRedirect.h"
#include "InfallibleVector.h"
#include "mozilla/Sprintf.h"
#include <dlfcn.h>
#include <string.h>
namespace {
#include "udis86/udis86.c"
#include "udis86/decode.c"
#include "udis86/itab.c"
} // anonymous namespace
namespace mozilla {
namespace recordreplay {
///////////////////////////////////////////////////////////////////////////////
// Library API Redirections
///////////////////////////////////////////////////////////////////////////////
// Redirecting system library APIs requires delicacy. We have to patch the code
// so that whenever control reaches the beginning of the library API's symbol,
// we will end up jumping to an address of our choice instead. This has to be
// done without corrupting the instructions of any functions in the library,
// which principally means ensuring that there are no internal jumps into the
// code segments we have patched.
//
// The patching we do here might fail: it isn't possible to redirect an
// arbitrary symbol within an arbitrary block of code. We are doing a best
// effort sort of thing, and any failures will be noted for reporting and
// without touching the original code at all.
// Keep track of the jumps we know about which could affect the validity if a
// code patch.
static StaticInfallibleVector<std::pair<uint8_t*,uint8_t*>> gInternalJumps;
// Jump to patch in at the end of redirecting. To avoid issues with calling
// redirected functions before all redirections have been installed
// (particularly due to locks being taken while checking for internal jump
// targets), all modification of the original code is delayed until after no
// further system calls are needed.
struct JumpPatch
{
uint8_t* mStart;
uint8_t* mTarget;
bool mShort;
JumpPatch(uint8_t* aStart, uint8_t* aTarget, bool aShort)
: mStart(aStart), mTarget(aTarget), mShort(aShort)
{}
};
static StaticInfallibleVector<JumpPatch> gJumpPatches;
static void
AddJumpPatch(uint8_t* aStart, uint8_t* aTarget, bool aShort)
{
gInternalJumps.emplaceBack(aStart, aTarget);
gJumpPatches.emplaceBack(aStart, aTarget, aShort);
}
// A range of instructions to clobber at the end of redirecting.
struct ClobberPatch
{
uint8_t* mStart;
uint8_t* mEnd;
ClobberPatch(uint8_t* aStart, uint8_t* aEnd)
: mStart(aStart), mEnd(aEnd)
{}
};
static StaticInfallibleVector<ClobberPatch> gClobberPatches;
static void
AddClobberPatch(uint8_t* aStart, uint8_t* aEnd)
{
if (aStart < aEnd) {
gClobberPatches.emplaceBack(aStart, aEnd);
}
}
static uint8_t*
SymbolBase(uint8_t* aPtr)
{
Dl_info info;
if (!dladdr(aPtr, &info)) {
MOZ_CRASH();
}
return static_cast<uint8_t*>(info.dli_saddr);
}
// Use Udis86 to decode a single instruction, returning the number of bytes
// consumed.
static size_t
DecodeInstruction(uint8_t* aIp, ud_t* aUd)
{
ud_init(aUd);
ud_set_input_buffer(aUd, aIp, MaximumInstructionLength);
ud_set_mode(aUd, 64);
size_t nbytes = ud_decode(aUd);
MOZ_RELEASE_ASSERT(nbytes && nbytes <= MaximumInstructionLength);
return nbytes;
}
// If it is unsafe to patch new instructions into [aIpStart, aIpEnd> then
// return an instruction at which a new search can be started from.
static uint8_t*
MaybeInternalJumpTarget(uint8_t* aIpStart, uint8_t* aIpEnd)
{
// The start and end have to be associated with the same symbol, as otherwise
// a jump could come into the start of the later symbol.
const char* startName = SymbolNameRaw(aIpStart);
const char* endName = SymbolNameRaw(aIpEnd - 1);
if (strcmp(startName, endName)) {
return SymbolBase(aIpEnd - 1);
}
// Look for any internal jumps from outside the patch range into the middle
// of the patch range.
for (auto jump : gInternalJumps) {
if (!(jump.first >= aIpStart && jump.first < aIpEnd) &&
jump.second > aIpStart && jump.second < aIpEnd) {
return jump.second;
}
}
// Treat patched regions of code as if they had internal jumps.
for (auto patch : gJumpPatches) {
uint8_t* end = patch.mStart + (patch.mShort ? ShortJumpBytes : JumpBytesClobberRax);
if (MemoryIntersects(aIpStart, aIpEnd - aIpStart, patch.mStart, end - patch.mStart)) {
return end;
}
}
for (auto patch : gClobberPatches) {
if (MemoryIntersects(aIpStart, aIpEnd - aIpStart, patch.mStart, patch.mEnd - patch.mStart)) {
return patch.mEnd;
}
}
if ((size_t)(aIpEnd - aIpStart) > ShortJumpBytes) {
// Manually annotate functions which might have backedges that interfere
// with redirecting the initial bytes of the function. Ideally we would
// find these backedges with some binary analysis, but this is easier said
// than done, especially since there doesn't seem to be a standard way to
// determine the extent of a symbol's code on OSX. Use strstr to avoid
// issues with goo in the symbol names.
if ((strstr(startName, "CTRunGetGlyphs") &&
!strstr(startName, "CTRunGetGlyphsPtr")) ||
(strstr(startName, "CTRunGetPositions") &&
!strstr(startName, "CTRunGetPositionsPtr")) ||
(strstr(startName, "CTRunGetStringIndices") &&
!strstr(startName, "CTRunGetStringIndicesPtr")) ||
strstr(startName, "CGColorSpaceCreateDeviceRGB") ||
// For these functions, there is a syscall near the beginning which
// other system threads might be inside.
strstr(startName, "__workq_kernreturn") ||
strstr(startName, "kevent64")) {
return aIpEnd - 1;
}
}
return nullptr;
}
// Any reasons why redirection failed.
static StaticInfallibleVector<char*> gRedirectFailures;
static void
RedirectFailure(const char* aFormat, ...)
{
va_list ap;
va_start(ap, aFormat);
char buf[4096];
VsprintfLiteral(buf, aFormat, ap);
va_end(ap);
gRedirectFailures.emplaceBack(strdup(buf));
}
static void
UnknownInstruction(const char* aName, uint8_t* aIp, size_t aNbytes)
{
char buf[4096];
char* ptr = buf;
for (size_t i = 0; i < aNbytes; i++) {
int written = snprintf(ptr, sizeof(buf) - (ptr - buf), " %d", (int) aIp[i]);
ptr += written;
}
RedirectFailure("Unknown instruction in %s:%s", aName, buf);
}
// Try to emit instructions to |aAssembler| with equivalent behavior to any
// special jump or ip-dependent instruction at |aIp|, returning true if the
// instruction was copied.
static bool
CopySpecialInstruction(uint8_t* aIp, ud_t* aUd, size_t aNbytes, Assembler& aAssembler)
{
aAssembler.NoteOriginalInstruction(aIp);
if (aUd->pfx_seg) {
return false;
}
ud_mnemonic_code mnemonic = ud_insn_mnemonic(aUd);
if (mnemonic == UD_Icall || mnemonic == UD_Ijmp || (mnemonic >= UD_Ijo && mnemonic <= UD_Ijg)) {
MOZ_RELEASE_ASSERT(!ud_insn_opr(aUd, 1));
const ud_operand* op = ud_insn_opr(aUd, 0);
if (op->type == UD_OP_JIMM) {
// Call or jump relative to rip.
uint8_t* target = aIp + aNbytes;
switch (op->size) {
case 8: target += op->lval.sbyte; break;
case 32: target += op->lval.sdword; break;
default: return false;
}
gInternalJumps.emplaceBack(nullptr, target);
if (mnemonic == UD_Icall) {
aAssembler.MoveImmediateToRax(target);
aAssembler.CallRax();
} else if (mnemonic == UD_Ijmp) {
aAssembler.Jump(target);
} else {
aAssembler.ConditionalJump(aUd->primary_opcode, target);
}
return true;
}
if (op->type == UD_OP_MEM && op->base == UD_R_RIP && !op->index && op->offset == 32) {
// jmp *$offset32(%rip)
uint8_t* addr = aIp + aNbytes + op->lval.sdword;
aAssembler.MoveImmediateToRax(addr);
aAssembler.LoadRax(8);
aAssembler.JumpToRax();
return true;
}
}
if (mnemonic == UD_Imov || mnemonic == UD_Ilea) {
MOZ_RELEASE_ASSERT(!ud_insn_opr(aUd, 2));
const ud_operand* dst = ud_insn_opr(aUd, 0);
const ud_operand* src = ud_insn_opr(aUd, 1);
if (dst->type == UD_OP_REG &&
src->type == UD_OP_MEM && src->base == UD_R_RIP && !src->index && src->offset == 32) {
// mov/lea $offset32(%rip), reg
int reg = Assembler::NormalizeRegister(dst->base);
if (!reg) {
return false;
}
uint8_t* addr = aIp + aNbytes + src->lval.sdword;
if (reg != UD_R_RAX) {
aAssembler.PushRax();
}
aAssembler.MoveImmediateToRax(addr);
if (mnemonic == UD_Imov) {
aAssembler.LoadRax(src->size / 8);
}
if (reg != UD_R_RAX) {
aAssembler.MoveRaxToRegister(reg);
aAssembler.PopRax();
}
return true;
}
if (dst->type == UD_OP_MEM && dst->base == UD_R_RIP && !dst->index && dst->offset == 32 &&
src->type == UD_OP_REG && mnemonic == UD_Imov) {
// movl reg, $offset32(%rip)
int reg = Assembler::NormalizeRegister(src->base);
if (!reg) {
return false;
}
uint8_t* addr = aIp + aNbytes + dst->lval.sdword;
aAssembler.PushRax();
aAssembler.PushRbx();
aAssembler.MoveRegisterToRax(reg);
aAssembler.PushRax();
aAssembler.PopRbx();
aAssembler.MoveImmediateToRax(addr);
aAssembler.StoreRbxToRax(src->size / 8);
aAssembler.PopRbx();
aAssembler.PopRax();
return true;
}
}
if (mnemonic == UD_Icmp) {
MOZ_RELEASE_ASSERT(!ud_insn_opr(aUd, 2));
const ud_operand* dst = ud_insn_opr(aUd, 0);
const ud_operand* src = ud_insn_opr(aUd, 1);
if (dst->type == UD_OP_MEM && dst->base == UD_R_RIP && !dst->index && dst->offset == 32 &&
src->type == UD_OP_IMM && src->size == 8) {
// cmp $literal8, $offset32(%rip)
uint8_t value = src->lval.ubyte;
uint8_t* addr = aIp + aNbytes + dst->lval.sdword;
aAssembler.PushRax();
aAssembler.MoveImmediateToRax(addr);
aAssembler.LoadRax(dst->size / 8);
aAssembler.CompareValueWithRax(value, dst->size / 8);
aAssembler.PopRax();
return true;
}
if (dst->type == UD_OP_REG &&
src->type == UD_OP_MEM && src->base == UD_R_RIP && !src->index && src->offset == 32) {
// cmpq $offset32(%rip), reg
int reg = Assembler::NormalizeRegister(dst->base);
if (!reg) {
return false;
}
uint8_t* addr = aIp + aNbytes + src->lval.sdword;
aAssembler.PushRax();
aAssembler.MoveRegisterToRax(reg);
aAssembler.PushRax();
aAssembler.MoveImmediateToRax(addr);
aAssembler.LoadRax(8);
aAssembler.CompareRaxWithTopOfStack();
aAssembler.PopRax();
aAssembler.PopRax();
return true;
}
}
return false;
}
// Copy an instruction to aAssembler, returning the number of bytes used by the
// instruction.
static size_t
CopyInstruction(const char* aName, uint8_t* aIp, Assembler& aAssembler)
{
// Use Udis86 to decode a single instruction.
ud_t ud;
size_t nbytes = DecodeInstruction(aIp, &ud);
// Check for a special cased instruction.
if (CopySpecialInstruction(aIp, &ud, nbytes, aAssembler)) {
return nbytes;
}
// Don't copy call and jump instructions. We should have special cased these,
// and these may not behave correctly after a naive copy if their behavior is
// relative to the instruction pointer.
ud_mnemonic_code_t mnemonic = ud_insn_mnemonic(&ud);
if (mnemonic == UD_Icall || (mnemonic >= UD_Ijo && mnemonic <= UD_Ijmp)) {
UnknownInstruction(aName, aIp, nbytes);
return nbytes;
}
// Don't copy instructions which have the instruction pointer as an operand.
// We should have special cased these, and as above these will not behave
// correctly after being naively copied due to their dependence on the
// instruction pointer.
for (size_t i = 0;; i++) {
const ud_operand_t* op = ud_insn_opr(&ud, i);
if (!op) {
break;
}
switch (op->type) {
case UD_OP_MEM:
if (op->index == UD_R_RIP) {
UnknownInstruction(aName, aIp, nbytes);
return nbytes;
}
MOZ_FALLTHROUGH;
case UD_OP_REG:
if (op->base == UD_R_RIP) {
UnknownInstruction(aName, aIp, nbytes);
return nbytes;
}
break;
default:
break;
}
}
aAssembler.CopyInstruction(aIp, nbytes);
return nbytes;
}
// Copy all instructions containing bytes in the range [aIpStart, aIpEnd) to
// the given assembler, returning the address of the first instruction not
// copied (i.e. the fallthrough instruction from the copied range).
static uint8_t*
CopyInstructions(const char* aName, uint8_t* aIpStart, uint8_t* aIpEnd,
Assembler& aAssembler)
{
MOZ_RELEASE_ASSERT(!MaybeInternalJumpTarget(aIpStart, aIpEnd));
uint8_t* ip = aIpStart;
while (ip < aIpEnd) {
ip += CopyInstruction(aName, ip, aAssembler);
}
return ip;
}
// Get the instruction pointer to use as the address of the base function for a
// redirection.
static uint8_t*
FunctionStartAddress(Redirection& aRedirection)
{
uint8_t* addr = static_cast<uint8_t*>(dlsym(RTLD_DEFAULT, aRedirection.mName));
if (!addr)
return nullptr;
if (addr[0] == 0xFF && addr[1] == 0x25) {
return *(uint8_t**)(addr + 6 + *reinterpret_cast<int32_t*>(addr + 2));
}
return addr;
}
// Setup a redirection: overwrite the machine code for its base function, and
// fill in its original function, to satisfy the function pointer behaviors
// described in the Redirection structure. aCursor and aCursorEnd are used to
// allocate executable memory for use in the redirection.
static void
Redirect(Redirection& aRedirection, Assembler& aAssembler, bool aFirstPass)
{
// The patching we do here might fail: it isn't possible to redirect an
// arbitrary instruction pointer within an arbitrary block of code. This code
// is doing a best effort sort of thing, and on failure it will crash safely.
// The main thing we want to avoid is corrupting the code so that it has been
// redirected but might crash or behave incorrectly when executed.
uint8_t* functionStart = aRedirection.mBaseFunction;
uint8_t* ro = functionStart;
if (!functionStart) {
if (aFirstPass) {
PrintSpew("Could not find symbol %s for redirecting.\n", aRedirection.mName);
}
return;
}
if (aRedirection.mOriginalFunction != aRedirection.mBaseFunction) {
// We already redirected this function.
MOZ_RELEASE_ASSERT(!aFirstPass);
return;
}
// First, see if we can overwrite JumpBytesClobberRax bytes of instructions
// at the base function with a direct jump to the new function. Rax is never
// live at the start of a function and we can emit a jump to an arbitrary
// location with fewer instruction bytes on x64 if we clobber it.
//
// This will work if there are no extraneous jump targets within the region
// of memory we are overwriting. If there are, we will corrupt the behavior
// of those jumps if we patch the memory.
uint8_t* extent = ro + JumpBytesClobberRax;
if (!MaybeInternalJumpTarget(ro, extent)) {
// Given code instructions for the base function as follows (AA are
// instructions we will end up copying, -- are instructions that will never
// be inspected or modified):
//
// base function: AA--
//
// Transform the code into:
//
// base function: J0--
// generated code: AAJ1
//
// Where J0 jumps to the new function, the original function is at AA, and
// J1 jumps to the point after J0.
// Set the new function to the start of the generated code.
aRedirection.mOriginalFunction = aAssembler.Current();
// Copy AA into generated code.
ro = CopyInstructions(aRedirection.mName, ro, extent, aAssembler);
// Emit jump J1.
aAssembler.Jump(ro);
// Emit jump J0.
AddJumpPatch(functionStart, aRedirection.mNewFunction, /* aShort = */ false);
AddClobberPatch(functionStart + JumpBytesClobberRax, ro);
return;
}
// We don't have enough space to patch in a long jump to an arbitrary
// instruction. Attempt to find another region of code that is long enough
// for two long jumps, has no internal jump targets, and is within range of
// the base function for a short jump.
//
// Given code instructions for the base function, with formatting as above:
//
// base function: AA--BBBB--
//
// Transform the code into:
//
// base function: J0--J1J2--
// generated code: AAJ3 BBBBJ4
//
// With the original function at AA, the jump targets are as follows:
//
// J0: short jump to J2
// J1: jump to BBBB
// J2: jump to the new function
// J3: jump to the point after J0
// J4: jump to the point after J2
// Skip this during the first pass, we don't want to patch a jump in over the
// initial bytes of a function we haven't redirected yet.
if (aFirstPass) {
return;
}
// The original symbol must have enough bytes to insert a short jump.
MOZ_RELEASE_ASSERT(!MaybeInternalJumpTarget(ro, ro + ShortJumpBytes));
// Copy AA into generated code.
aRedirection.mOriginalFunction = aAssembler.Current();
uint8_t* nro = CopyInstructions(aRedirection.mName, ro, ro + ShortJumpBytes, aAssembler);
// Emit jump J3.
aAssembler.Jump(nro);
// Keep advancing the instruction pointer until we get to a region that is
// large enough for two long jump patches.
ro = SymbolBase(extent);
while (true) {
extent = ro + JumpBytesClobberRax * 2;
uint8_t* target = MaybeInternalJumpTarget(ro, extent);
if (target) {
ro = target;
continue;
}
break;
}
// Copy BBBB into generated code.
uint8_t* firstJumpTarget = aAssembler.Current();
uint8_t* afterip = CopyInstructions(aRedirection.mName, ro, extent, aAssembler);
// Emit jump J4.
aAssembler.Jump(afterip);
// Emit jump J1.
AddJumpPatch(ro, firstJumpTarget, /* aShort = */ false);
// Emit jump J2.
AddJumpPatch(ro + JumpBytesClobberRax, aRedirection.mNewFunction, /* aShort = */ false);
AddClobberPatch(ro + 2 * JumpBytesClobberRax, afterip);
// Emit jump J0.
AddJumpPatch(functionStart, ro + JumpBytesClobberRax, /* aShort = */ true);
AddClobberPatch(functionStart + ShortJumpBytes, nro);
}
void
EarlyInitializeRedirections()
{
for (size_t i = 0;; i++) {
Redirection& redirection = gRedirections[i];
if (!redirection.mName) {
break;
}
MOZ_ASSERT(!redirection.mBaseFunction);
MOZ_ASSERT(redirection.mNewFunction);
MOZ_ASSERT(!redirection.mOriginalFunction);
redirection.mBaseFunction = FunctionStartAddress(redirection);
redirection.mOriginalFunction = redirection.mBaseFunction;
if (redirection.mBaseFunction && IsRecordingOrReplaying()) {
// We will get confused if we try to redirect the same address in multiple places.
for (size_t j = 0; j < i; j++) {
if (gRedirections[j].mBaseFunction == redirection.mBaseFunction) {
PrintSpew("Redirection %s shares the same address as %s, skipping.\n",
redirection.mName, gRedirections[j].mName);
redirection.mBaseFunction = nullptr;
break;
}
}
}
}
}
bool
InitializeRedirections()
{
MOZ_ASSERT(IsRecordingOrReplaying());
{
Assembler assembler;
for (size_t i = 0;; i++) {
Redirection& redirection = gRedirections[i];
if (!redirection.mName) {
break;
}
Redirect(redirection, assembler, /* aFirstPass = */ true);
}
for (size_t i = 0;; i++) {
Redirection& redirection = gRedirections[i];
if (!redirection.mName) {
break;
}
Redirect(redirection, assembler, /* aFirstPass = */ false);
}
}
// Don't install redirections if we had any failures.
if (!gRedirectFailures.empty()) {
size_t len = 4096;
gInitializationFailureMessage = new char[4096];
gInitializationFailureMessage[--len] = 0;
char* ptr = gInitializationFailureMessage;
for (char* reason : gRedirectFailures) {
size_t n = snprintf(ptr, len, "%s\n", reason);
if (n >= len) {
break;
}
ptr += n;
len -= n;
}
return false;
}
// Remove write protection from all patched regions, so that we don't call
// into the system while we are in the middle of redirecting.
for (const JumpPatch& patch : gJumpPatches) {
UnprotectExecutableMemory(patch.mStart, patch.mShort ? ShortJumpBytes : JumpBytesClobberRax);
}
for (const ClobberPatch& patch : gClobberPatches) {
UnprotectExecutableMemory(patch.mStart, patch.mEnd - patch.mStart);
}
// Do the actual patching of executable code for the functions we are
// redirecting.
for (const JumpPatch& patch : gJumpPatches) {
if (patch.mShort) {
Assembler::PatchShortJump(patch.mStart, patch.mTarget);
} else {
Assembler::PatchJumpClobberRax(patch.mStart, patch.mTarget);
}
}
for (const ClobberPatch& patch : gClobberPatches) {
for (uint8_t* ip = patch.mStart; ip < patch.mEnd; ip++) {
Assembler::PatchClobber(ip);
}
}
return true;
}
///////////////////////////////////////////////////////////////////////////////
// Utility
///////////////////////////////////////////////////////////////////////////////
Atomic<size_t, SequentiallyConsistent, Behavior::DontPreserve> gMemoryLeakBytes;
void*
BindFunctionArgument(void* aFunction, void* aArgument, size_t aArgumentPosition,
Assembler& aAssembler)
{
void* res = aAssembler.Current();
// On x64 the argument will be in a register, so to add an extra argument for
// the callee we just need to fill in the appropriate register for the
// argument position with the bound argument value.
aAssembler.MoveImmediateToRax(aArgument);
switch (aArgumentPosition) {
case 1: aAssembler.MoveRaxToRegister(UD_R_RSI); break;
case 2: aAssembler.MoveRaxToRegister(UD_R_RDX); break;
case 3: aAssembler.MoveRaxToRegister(UD_R_RCX); break;
default: MOZ_CRASH();
}
// Jump to the function that was bound.
aAssembler.Jump(aFunction);
return res;
}
} // namespace recordreplay
} // namespace mozilla

Просмотреть файл

@ -0,0 +1,774 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef mozilla_recordreplay_ProcessRedirect_h
#define mozilla_recordreplay_ProcessRedirect_h
#include "Assembler.h"
#include "Callback.h"
#include "CallFunction.h"
#include "ProcessRecordReplay.h"
#include "ProcessRewind.h"
#include "Thread.h"
#include "ipc/Channel.h"
#include "mozilla/Assertions.h"
#include "mozilla/Atomics.h"
#include "mozilla/Casting.h"
#include <errno.h>
namespace mozilla {
namespace recordreplay {
// Redirections Overview.
//
// The vast majority of recording and replaying is done through function
// redirections. When the record/replay system is initialized, a set of system
// library API functions have their machine code modified so that when that API
// is called it redirects control to a custom record/replay function with the
// same signature. Machine code is also generated that contains any overwritten
// instructions in the API, and which may be called to get the API's original
// behavior before it was redirected.
//
// In the usual case, a record/replay function redirection does the following
// standard steps:
//
// 1. If events are being passed through, the original function is called and
// its results returned to the caller, as if the redirection was not there
// at all.
//
// 2. If events are not passed through and we are recording, the original
// function is called, and then an event is recorded for the current thread
// along with any outputs produced by the call.
//
// 3. If events are not passed through and we are replaying, the original
// function is *not* called, but rather the event and outputs are read from
// the recording and sent back to the caller.
//
// Macros are provided below to streamline this process. Redirections do not
// need to adhere to this protocol, however, and can have whatever behaviors
// that are necessary for reliable record/replay.
//
// Some platforms need additional redirection techniques for handling different
// features of that platform. See the individual ProcessRedirect*.cpp files for
// descriptions of these.
//
// The main advantage of using redirections is that Gecko code does not need to
// be modified at all to take advantage of them. Redirected APIs should be
// functions that are directly called by Gecko code and are part of system
// libraries. These APIs are well defined, well documented by the platform, and
// stable. The main maintenance burden going forward is in handling new library
// APIs that were not previously called by Gecko.
//
// The main risk with using function redirections is that the set of redirected
// functions is incomplete. If a library API is not redirected then it might
// behave differently between recording and replaying, or it might crash while
// replaying.
///////////////////////////////////////////////////////////////////////////////
// Function Redirections
///////////////////////////////////////////////////////////////////////////////
// Information about a system library API function which is being redirected.
struct Redirection
{
// Name of the function being redirected.
const char* mName;
// Address of the function which is being redirected. The code for this
// function is modified so that attempts to call this function will instead
// call mNewFunction.
uint8_t* mBaseFunction;
// Function with the same signature as mBaseFunction, which may have
// different behavior for recording/replaying the call.
uint8_t* mNewFunction;
// Function with the same signature and original behavior as
// mBaseFunction.
uint8_t* mOriginalFunction;
};
// All platform specific redirections, indexed by the call event.
extern Redirection gRedirections[];
// Do early initialization of redirections. This is done on both
// recording/replaying and middleman processes, and allows OriginalCall() to
// work in either case.
void EarlyInitializeRedirections();
// Set up all platform specific redirections, or fail and set
// gInitializationFailureMessage.
bool InitializeRedirections();
// Generic type for a system error code.
typedef ssize_t ErrorType;
// Functions for saving or restoring system error codes.
static inline ErrorType SaveError() { return errno; }
static inline void RestoreError(ErrorType aError) { errno = aError; }
// Specify the default ABI to use by the record/replay macros below.
#define DEFAULTABI
// Define CallFunction(...) for all supported ABIs.
DefineAllCallFunctions(DEFAULTABI)
// Get the address of the original function for a call event ID.
static inline void*
OriginalFunction(size_t aCallId)
{
return gRedirections[aCallId].mOriginalFunction;
}
#define TokenPaste(aFirst, aSecond) aFirst ## aSecond
// Call the original function for a call event ID with a particular ABI and any
// number of arguments.
#define OriginalCallABI(aName, aReturnType, aABI, ...) \
TokenPaste(CallFunction, aABI) <aReturnType> \
(OriginalFunction(CallEvent_ ##aName), ##__VA_ARGS__)
// Call the original function for a call event ID with the default ABI.
#define OriginalCall(aName, aReturnType, ...) \
OriginalCallABI(aName, aReturnType, DEFAULTABI, ##__VA_ARGS__)
// State for a function redirection which performs the standard steps (see the
// comment at the start of this file). This should not be created directly, but
// rather through one of the macros below.
struct AutoRecordReplayFunctionVoid
{
// The current thread, or null if events are being passed through.
Thread* mThread;
// Any system error generated by the call which was redirected.
ErrorType mError;
protected:
// Information about the call being recorded.
size_t mCallId;
const char* mCallName;
public:
AutoRecordReplayFunctionVoid(size_t aCallId, const char* aCallName)
: mThread(AreThreadEventsPassedThrough() ? nullptr : Thread::Current()),
mError(0), mCallId(aCallId), mCallName(aCallName)
{
if (mThread) {
// Calling any redirection which performs the standard steps will cause
// debugger operations that have diverged from the recording to fail.
EnsureNotDivergedFromRecording();
MOZ_ASSERT(!AreThreadEventsDisallowed());
// Pass through events in case we are calling the original function.
mThread->SetPassThrough(true);
}
}
~AutoRecordReplayFunctionVoid()
{
if (mThread) {
// Restore any error saved or replayed earlier to the system.
RestoreError(mError);
}
}
// Begin recording or replaying data for the call. This must be called before
// destruction if mThread is non-null.
inline void StartRecordReplay() {
MOZ_ASSERT(mThread);
// Save any system error in case we want to record/replay it.
mError = SaveError();
// Stop the event passing through that was initiated in the constructor.
mThread->SetPassThrough(false);
// Add an event for the thread.
RecordReplayAssert("%s", mCallName);
ThreadEvent ev = (ThreadEvent)((uint32_t)ThreadEvent::CallStart + mCallId);
mThread->Events().RecordOrReplayThreadEvent(ev);
}
};
// State for a function redirection that performs the standard steps and also
// returns a value.
template <typename ReturnType>
struct AutoRecordReplayFunction : AutoRecordReplayFunctionVoid
{
// The value which this function call should return.
ReturnType mRval;
AutoRecordReplayFunction(size_t aCallId, const char* aCallName)
: AutoRecordReplayFunctionVoid(aCallId, aCallName)
{}
};
// Macros for recording or replaying a function that performs the standard
// steps. These macros should be used near the start of the body of a
// redirection function, and will fall through only if events are not
// passed through and the outputs of the function need to be recorded or
// replayed.
//
// These macros define an AutoRecordReplayFunction local |rrf| with state for
// the redirection, and additional locals |events| and (if the function has a
// return value) |rval| for convenient access.
// Record/replay a function that returns a value and has a particular ABI.
#define RecordReplayFunctionABI(aName, aReturnType, aABI, ...) \
AutoRecordReplayFunction<aReturnType> rrf(CallEvent_ ##aName, #aName); \
if (!rrf.mThread) { \
return OriginalCallABI(aName, aReturnType, aABI, ##__VA_ARGS__); \
} \
if (IsRecording()) { \
rrf.mRval = OriginalCallABI(aName, aReturnType, aABI, ##__VA_ARGS__); \
} \
rrf.StartRecordReplay(); \
Stream& events = rrf.mThread->Events(); \
(void) events; \
aReturnType& rval = rrf.mRval
// Record/replay a function that returns a value and has the default ABI.
#define RecordReplayFunction(aName, aReturnType, ...) \
RecordReplayFunctionABI(aName, aReturnType, DEFAULTABI, ##__VA_ARGS__)
// Record/replay a function that has no return value and has a particular ABI.
#define RecordReplayFunctionVoidABI(aName, aABI, ...) \
AutoRecordReplayFunctionVoid rrf(CallEvent_ ##aName, #aName); \
if (!rrf.mThread) { \
OriginalCallABI(aName, void, aABI, ##__VA_ARGS__); \
return; \
} \
if (IsRecording()) { \
OriginalCallABI(aName, void, aABI, ##__VA_ARGS__); \
} \
rrf.StartRecordReplay(); \
Stream& events = rrf.mThread->Events(); \
(void) events
// Record/replay a function that has no return value and has the default ABI.
#define RecordReplayFunctionVoid(aName, ...) \
RecordReplayFunctionVoidABI(aName, DEFAULTABI, ##__VA_ARGS__)
// The following macros are used for functions that do not record an error and
// take or return values of specified types.
//
// aAT == aArgumentType
// aRT == aReturnType
#define RRFunctionTypes0(aName, aRT) \
static aRT DEFAULTABI \
RR_ ##aName () \
{ \
RecordReplayFunction(aName, aRT); \
events.RecordOrReplayValue(&rval); \
return rval; \
}
#define RRFunctionTypes1(aName, aRT, aAT0) \
static aRT DEFAULTABI \
RR_ ##aName (aAT0 a0) \
{ \
RecordReplayFunction(aName, aRT, a0); \
events.RecordOrReplayValue(&rval); \
return rval; \
}
#define RRFunctionTypes2(aName, aRT, aAT0, aAT1) \
static aRT DEFAULTABI \
RR_ ##aName (aAT0 a0, aAT1 a1) \
{ \
RecordReplayFunction(aName, aRT, a0, a1); \
events.RecordOrReplayValue(&rval); \
return rval; \
}
#define RRFunctionTypes3(aName, aRT, aAT0, aAT1, aAT2) \
static aRT DEFAULTABI \
RR_ ##aName (aAT0 a0, aAT1 a1, aAT2 a2) \
{ \
RecordReplayFunction(aName, aRT, a0, a1, a2); \
events.RecordOrReplayValue(&rval); \
return rval; \
}
#define RRFunctionTypes4(aName, aRT, aAT0, aAT1, aAT2, aAT3) \
static aRT DEFAULTABI \
RR_ ##aName (aAT0 a0, aAT1 a1, aAT2 a2, aAT3 a3) \
{ \
RecordReplayFunction(aName, aRT, a0, a1, a2, a3); \
events.RecordOrReplayValue(&rval); \
return rval; \
}
#define RRFunctionTypes5(aName, aRT, aAT0, aAT1, aAT2, aAT3, \
aAT4) \
static aRT DEFAULTABI \
RR_ ##aName (aAT0 a0, aAT1 a1, aAT2 a2, aAT3 a3, aAT4 a4) \
{ \
RecordReplayFunction(aName, aRT, a0, a1, a2, a3, a4); \
events.RecordOrReplayValue(&rval); \
return rval; \
}
#define RRFunctionTypes6(aName, aRT, aAT0, aAT1, aAT2, aAT3, \
aAT4, aAT5) \
static aRT DEFAULTABI \
RR_ ##aName (aAT0 a0, aAT1 a1, aAT2 a2, aAT3 a3, aAT4 a4, \
aAT5 a5) \
{ \
RecordReplayFunction(aName, aRT, a0, a1, a2, a3, a4, a5); \
events.RecordOrReplayValue(&rval); \
return rval; \
}
#define RRFunctionTypes7(aName, aRT, aAT0, aAT1, aAT2, aAT3, \
aAT4, aAT5, aAT6) \
static aRT DEFAULTABI \
RR_ ##aName (aAT0 a0, aAT1 a1, aAT2 a2, aAT3 a3, aAT4 a4, \
aAT5 a5, aAT6 a6) \
{ \
RecordReplayFunction(aName, aRT, a0, a1, a2, a3, a4, a5, a6); \
events.RecordOrReplayValue(&rval); \
return rval; \
}
#define RRFunctionTypes8(aName, aRT, aAT0, aAT1, aAT2, aAT3, \
aAT4, aAT5, aAT6, aAT7) \
static aRT DEFAULTABI \
RR_ ##aName (aAT0 a0, aAT1 a1, aAT2 a2, aAT3 a3, aAT4 a4, \
aAT5 a5, aAT6 a6, aAT7 a7) \
{ \
RecordReplayFunction(aName, aRT, a0, a1, a2, a3, a4, a5, a6, a7); \
events.RecordOrReplayValue(&rval); \
return rval; \
}
#define RRFunctionTypes9(aName, aRT, aAT0, aAT1, aAT2, aAT3, \
aAT4, aAT5, aAT6, aAT7, aAT8) \
static aRT DEFAULTABI \
RR_ ##aName (aAT0 a0, aAT1 a1, aAT2 a2, aAT3 a3, aAT4 a4, \
aAT5 a5, aAT6 a6, aAT7 a7, aAT8 a8) \
{ \
RecordReplayFunction(aName, aRT, a0, a1, a2, a3, a4, a5, a6, a7, a8); \
events.RecordOrReplayValue(&rval); \
return rval; \
}
#define RRFunctionTypes10(aName, aRT, aAT0, aAT1, aAT2, aAT3, \
aAT4, aAT5, aAT6, aAT7, aAT8, aAT9) \
static aRT DEFAULTABI \
RR_ ##aName (aAT0 a0, aAT1 a1, aAT2 a2, aAT3 a3, aAT4 a4, \
aAT5 a5, aAT6 a6, aAT7 a7, aAT8 a8, aAT9 a9) \
{ \
RecordReplayFunction(aName, aRT, a0, a1, a2, a3, a4, a5, a6, a7, a8, a9); \
events.RecordOrReplayValue(&rval); \
return rval; \
}
#define RRFunctionTypesVoid1(aName, aAT0) \
static void DEFAULTABI \
RR_ ##aName (aAT0 a0) \
{ \
RecordReplayFunctionVoid(aName, a0); \
}
#define RRFunctionTypesVoid2(aName, aAT0, aAT1) \
static void DEFAULTABI \
RR_ ##aName (aAT0 a0, aAT1 a1) \
{ \
RecordReplayFunctionVoid(aName, a0, a1); \
}
#define RRFunctionTypesVoid3(aName, aAT0, aAT1, aAT2) \
static void DEFAULTABI \
RR_ ##aName (aAT0 a0, aAT1 a1, aAT2 a2) \
{ \
RecordReplayFunctionVoid(aName, a0, a1, a2); \
}
#define RRFunctionTypesVoid4(aName, aAT0, aAT1, aAT2, aAT3) \
static void DEFAULTABI \
RR_ ##aName (aAT0 a0, aAT1 a1, aAT2 a2, aAT3 a3) \
{ \
RecordReplayFunctionVoid(aName, a0, a1, a2, a3); \
}
#define RRFunctionTypesVoid5(aName, aAT0, aAT1, aAT2, aAT3, aAT4) \
static void DEFAULTABI \
RR_ ##aName (aAT0 a0, aAT1 a1, aAT2 a2, aAT3 a3, aAT4 a4) \
{ \
RecordReplayFunctionVoid(aName, a0, a1, a2, a3, a4); \
}
// The following macros are used for functions that take and return scalar
// values (not a struct or a floating point) and do not record an error
// anywhere.
#define RRFunction0(aName) \
RRFunctionTypes0(aName, size_t)
#define RRFunction1(aName) \
RRFunctionTypes1(aName, size_t, size_t)
#define RRFunction2(aName) \
RRFunctionTypes2(aName, size_t, size_t, size_t)
#define RRFunction3(aName) \
RRFunctionTypes3(aName, size_t, size_t, size_t, size_t)
#define RRFunction4(aName) \
RRFunctionTypes4(aName, size_t, size_t, size_t, size_t, size_t)
#define RRFunction5(aName) \
RRFunctionTypes5(aName, size_t, size_t, size_t, size_t, size_t, size_t)
#define RRFunction6(aName) \
RRFunctionTypes6(aName, size_t, size_t, size_t, size_t, size_t, size_t, size_t)
#define RRFunction7(aName) \
RRFunctionTypes7(aName, size_t, size_t, size_t, size_t, size_t, size_t, size_t, size_t)
#define RRFunction8(aName) \
RRFunctionTypes8(aName, size_t, size_t, size_t, size_t, size_t, size_t, size_t, size_t, \
size_t)
#define RRFunction9(aName) \
RRFunctionTypes9(aName, size_t, size_t, size_t, size_t, size_t, size_t, size_t, size_t, \
size_t, size_t)
#define RRFunction10(aName) \
RRFunctionTypes10(aName, size_t, size_t, size_t, size_t, size_t, size_t, size_t, size_t, \
size_t, size_t, size_t)
// The following macros are used for functions that take scalar arguments and
// do not return a value or record an error anywhere.
#define RRFunctionVoid0(aName) \
static void DEFAULTABI \
RR_ ##aName () \
{ \
RecordReplayFunctionVoid(aName); \
}
#define RRFunctionVoid1(aName) \
RRFunctionTypesVoid1(aName, size_t)
#define RRFunctionVoid2(aName) \
RRFunctionTypesVoid2(aName, size_t, size_t)
#define RRFunctionVoid3(aName) \
RRFunctionTypesVoid3(aName, size_t, size_t, size_t)
#define RRFunctionVoid4(aName) \
RRFunctionTypesVoid4(aName, size_t, size_t, size_t, size_t)
#define RRFunctionVoid5(aName) \
RRFunctionTypesVoid5(aName, size_t, size_t, size_t, size_t, size_t)
// The following macros are used for functions that return a signed integer
// value and record an error if the return value is negative.
#define RRFunctionNegError0(aName) \
static ssize_t DEFAULTABI \
RR_ ##aName () \
{ \
RecordReplayFunction(aName, ssize_t); \
RecordOrReplayHadErrorNegative(rrf); \
return rval; \
}
#define RRFunctionNegError1(aName) \
static ssize_t DEFAULTABI \
RR_ ##aName (size_t a0) \
{ \
RecordReplayFunction(aName, ssize_t, a0); \
RecordOrReplayHadErrorNegative(rrf); \
return rval; \
}
#define RRFunctionNegError2(aName) \
static ssize_t DEFAULTABI \
RR_ ##aName (size_t a0, size_t a1) \
{ \
RecordReplayFunction(aName, ssize_t, a0, a1); \
RecordOrReplayHadErrorNegative(rrf); \
return rval; \
}
#define RRFunctionNegError3(aName) \
static ssize_t DEFAULTABI \
RR_ ##aName (size_t a0, size_t a1, size_t a2) \
{ \
RecordReplayFunction(aName, ssize_t, a0, a1, a2); \
RecordOrReplayHadErrorNegative(rrf); \
return rval; \
}
#define RRFunctionNegError4(aName) \
static ssize_t DEFAULTABI \
RR_ ##aName (size_t a0, size_t a1, size_t a2, size_t a3) \
{ \
RecordReplayFunction(aName, ssize_t, a0, a1, a2, a3); \
RecordOrReplayHadErrorNegative(rrf); \
return rval; \
}
#define RRFunctionNegError5(aName) \
static ssize_t DEFAULTABI \
RR_ ##aName (size_t a0, size_t a1, size_t a2, size_t a3, \
size_t a4) \
{ \
RecordReplayFunction(aName, ssize_t, a0, a1, a2, a3, a4); \
RecordOrReplayHadErrorNegative(rrf); \
return rval; \
}
#define RRFunctionNegError6(aName) \
static ssize_t DEFAULTABI \
RR_ ##aName (size_t a0, size_t a1, size_t a2, size_t a3, \
size_t a4, size_t a5) \
{ \
RecordReplayFunction(aName, ssize_t, a0, a1, a2, a3, a4, a5); \
RecordOrReplayHadErrorNegative(rrf); \
return rval; \
}
// The following macros are used for functions that return an integer
// value and record an error if the return value is zero.
#define RRFunctionZeroError0(aName) \
static size_t __stdcall \
RR_ ##aName () \
{ \
RecordReplayFunction(aName, size_t); \
RecordOrReplayHadErrorZero(rrf); \
return rval; \
}
#define RRFunctionZeroError1(aName) \
static size_t __stdcall \
RR_ ##aName (size_t a0) \
{ \
RecordReplayFunction(aName, size_t, a0); \
RecordOrReplayHadErrorZero(rrf); \
return rval; \
}
#define RRFunctionZeroErrorABI2(aName, aABI) \
static size_t aABI \
RR_ ##aName (size_t a0, size_t a1) \
{ \
RecordReplayFunctionABI(aName, size_t, aABI, a0, a1); \
RecordOrReplayHadErrorZero(rrf); \
return rval; \
}
#define RRFunctionZeroError2(aName) RRFunctionZeroErrorABI2(aName, DEFAULTABI)
#define RRFunctionZeroError3(aName) \
static size_t __stdcall \
RR_ ##aName (size_t a0, size_t a1, size_t a2) \
{ \
RecordReplayFunction(aName, size_t, a0, a1, a2); \
RecordOrReplayHadErrorZero(rrf); \
return rval; \
}
#define RRFunctionZeroError4(aName) \
static size_t __stdcall \
RR_ ##aName (size_t a0, size_t a1, size_t a2, size_t a3) \
{ \
RecordReplayFunction(aName, size_t, a0, a1, a2, a3); \
RecordOrReplayHadErrorZero(rrf); \
return rval; \
}
#define RRFunctionZeroError5(aName) \
static size_t __stdcall \
RR_ ##aName (size_t a0, size_t a1, size_t a2, size_t a3, \
size_t a4) \
{ \
RecordReplayFunction(aName, size_t, a0, a1, a2, a3, a4); \
RecordOrReplayHadErrorZero(rrf); \
return rval; \
}
#define RRFunctionZeroError6(aName) \
static size_t __stdcall \
RR_ ##aName (size_t a0, size_t a1, size_t a2, size_t a3, \
size_t a4, size_t a5) \
{ \
RecordReplayFunction(aName, size_t, a0, a1, a2, a3, a4, a5); \
RecordOrReplayHadErrorZero(rrf); \
return rval; \
}
#define RRFunctionZeroError7(aName) \
static size_t __stdcall \
RR_ ##aName (size_t a0, size_t a1, size_t a2, size_t a3, \
size_t a4, size_t a5, size_t a6) \
{ \
RecordReplayFunction(aName, size_t, a0, a1, a2, a3, a4, a5, a6); \
RecordOrReplayHadErrorZero(rrf); \
return rval; \
}
#define RRFunctionZeroError8(aName) \
static size_t __stdcall \
RR_ ##aName (size_t a0, size_t a1, size_t a2, size_t a3, \
size_t a4, size_t a5, size_t a6, size_t a7) \
{ \
RecordReplayFunction(aName, size_t, a0, a1, a2, a3, a4, a5, a6, a7); \
RecordOrReplayHadErrorZero(rrf); \
return rval; \
}
// Recording template for functions which are used for inter-thread
// synchronization and must be replayed in the original order they executed in.
#define RecordReplayOrderedFunction(aName, aReturnType, aFailureRval, aFormals, ...) \
static aReturnType DEFAULTABI \
RR_ ## aName aFormals \
{ \
BeginOrderedEvent(); /* This is a noop if !mThread */ \
RecordReplayFunction(aName, aReturnType, __VA_ARGS__); \
EndOrderedEvent(); \
events.RecordOrReplayValue(&rval); \
if (rval == aFailureRval) { \
events.RecordOrReplayValue(&rrf.mError); \
} \
return rval; \
}
///////////////////////////////////////////////////////////////////////////////
// Callback Redirections
///////////////////////////////////////////////////////////////////////////////
// Below are helpers for use in handling a common callback pattern used within
// redirections: the system is passed a pointer to a Gecko callback, and a
// pointer to some opaque Gecko data which the system will pass to the callback
// when invoking it.
//
// This pattern may be handled by replacing the Gecko callback with a callback
// wrapper (see Callback.h), and replacing the opaque Gecko data with a pointer
// to a CallbackWrapperData structure, which contains both the original Gecko
// callback to use and the data which should be passed to it.
//
// The RecordReplayCallback is used early in the callback wrapper to save and
// restore both the Gecko callback and its opaque data pointer.
struct CallbackWrapperData
{
void* mFunction;
void* mData;
template <typename FunctionType>
CallbackWrapperData(FunctionType aFunction, void* aData)
: mFunction(BitwiseCast<void*>(aFunction)), mData(aData)
{}
};
// This class should not be used directly, but rather through the macro below.
template <typename FunctionType>
struct AutoRecordReplayCallback
{
FunctionType mFunction;
AutoRecordReplayCallback(void** aDataArgument, size_t aCallbackId)
: mFunction(nullptr)
{
MOZ_ASSERT(IsRecordingOrReplaying());
if (IsRecording()) {
CallbackWrapperData* wrapperData = (CallbackWrapperData*) *aDataArgument;
mFunction = (FunctionType) wrapperData->mFunction;
*aDataArgument = wrapperData->mData;
BeginCallback(aCallbackId);
}
SaveOrRestoreCallbackData((void**)&mFunction);
SaveOrRestoreCallbackData(aDataArgument);
}
~AutoRecordReplayCallback() {
if (IsRecording()) {
EndCallback();
}
}
};
// Macro for using AutoRecordReplayCallback.
#define RecordReplayCallback(aFunctionType, aDataArgument) \
AutoRecordReplayCallback<aFunctionType> rrc(aDataArgument, CallbackEvent_ ##aFunctionType)
///////////////////////////////////////////////////////////////////////////////
// Redirection Helpers
///////////////////////////////////////////////////////////////////////////////
// Read/write a success code (where zero is failure) and errno value on failure.
template <typename T>
static inline bool
RecordOrReplayHadErrorZero(AutoRecordReplayFunction<T>& aRrf)
{
aRrf.mThread->Events().RecordOrReplayValue(&aRrf.mRval);
if (aRrf.mRval == 0) {
aRrf.mThread->Events().RecordOrReplayValue(&aRrf.mError);
return true;
}
return false;
}
// Read/write a success code (where negative values are failure) and errno value on failure.
template <typename T>
static inline bool
RecordOrReplayHadErrorNegative(AutoRecordReplayFunction<T>& aRrf)
{
aRrf.mThread->Events().RecordOrReplayValue(&aRrf.mRval);
if (aRrf.mRval < 0) {
aRrf.mThread->Events().RecordOrReplayValue(&aRrf.mError);
return true;
}
return false;
}
extern Atomic<size_t, SequentiallyConsistent, Behavior::DontPreserve> gMemoryLeakBytes;
// For allocating memory in redirections that will never be reclaimed. This is
// done for simplicity. If the amount of leaked memory from redirected calls
// grows too large then steps can be taken to more closely emulate the library
// behavior.
template <typename T>
static inline T*
NewLeakyArray(size_t aSize)
{
gMemoryLeakBytes += aSize * sizeof(T);
return new T[aSize];
}
///////////////////////////////////////////////////////////////////////////////
// Other Redirection Interfaces
///////////////////////////////////////////////////////////////////////////////
// Given an argument function aFunction, generate code for a new function that
// takes one fewer argument than aFunction and then calls aFunction with all
// its arguments and the aArgument value in the last argument position.
//
// i.e. if aFunction has the signature: size_t (*)(void*, void*, void*);
//
// Then BindFunctionArgument(aFunction, aArgument, 2) produces this function:
//
// size_t result(void* a0, void* a1) {
// return aFunction(a0, a1, aArgument);
// }
//
// Supported positions for the bound argument are 1, 2, and 3.
void*
BindFunctionArgument(void* aFunction, void* aArgument, size_t aArgumentPosition,
Assembler& aAssembler);
} // recordreplay
} // mozilla
#endif // mozilla_recordreplay_ProcessRedirect_h