This commit is contained in:
Ryan VanderMeulen 2014-07-09 15:45:21 -04:00
Родитель 33438fd923 2ad53f526f
Коммит 6004615aad
75 изменённых файлов: 2434 добавлений и 2173 удалений

Просмотреть файл

@ -24,9 +24,9 @@ skip-if = e10s # Bug 941428 - UITour.jsm not e10s friendly
[browser_UITour_detach_tab.js]
skip-if = e10s # Bug 941428 - UITour.jsm not e10s friendly
[browser_UITour_annotation_size_attributes.js]
skip-if = e10s || os == "win" # Bug 941428 - UITour.jsm not e10s friendly. Intermittent test failures on Windows (bug 1026310 & bug 1032137)
[browser_UITour_panel_close_annotation.js]
skip-if = e10s # Bug 941428 - UITour.jsm not e10s friendly
[browser_UITour_panel_close_annotation.js]
skip-if = e10s || os == "win" # Bug 941428 - UITour.jsm not e10s friendly. Intermittent test failures on Windows (bug 1026310 & bug 1032137)
[browser_UITour_registerPageID.js]
skip-if = e10s # Bug 941428 - UITour.jsm not e10s friendly
[browser_UITour_sync.js]

Просмотреть файл

@ -64,6 +64,7 @@ _MOZBUILD_EXTERNAL_VARIABLES := \
RESOURCE_FILES \
SDK_HEADERS \
SIMPLE_PROGRAMS \
SONAME \
TEST_DIRS \
TIERS \
TOOL_DIRS \
@ -148,6 +149,7 @@ CHECK_VARS := \
XPI_PKGNAME \
INSTALL_EXTENSION_ID \
SHARED_LIBRARY_NAME \
SONAME \
STATIC_LIBRARY_NAME \
$(NULL)

Просмотреть файл

@ -177,6 +177,12 @@ else
SHARED_LIBRARY := $(DLL_PREFIX)$(SHARED_LIBRARY_NAME)$(DLL_SUFFIX)
endif
ifdef SONAME
DSO_SONAME = $(DLL_PREFIX)$(SONAME)$(DLL_SUFFIX)
else
DSO_SONAME = $(notdir $@)
endif
EMBED_MANIFEST_AT=2
endif # MKSHLIB

Просмотреть файл

@ -1312,8 +1312,8 @@ if test "$GNU_CC"; then
fi
# FIXME: Let us build with strict aliasing. bug 414641.
CFLAGS="$CFLAGS -fno-strict-aliasing"
MKSHLIB='$(CXX) $(CXXFLAGS) $(DSO_PIC_CFLAGS) $(DSO_LDOPTS) -Wl,-h,$(notdir $@) -o $@'
MKCSHLIB='$(CC) $(CFLAGS) $(DSO_PIC_CFLAGS) $(DSO_LDOPTS) -Wl,-h,$(notdir $@) -o $@'
MKSHLIB='$(CXX) $(CXXFLAGS) $(DSO_PIC_CFLAGS) $(DSO_LDOPTS) -Wl,-h,$(DSO_SONAME) -o $@'
MKCSHLIB='$(CC) $(CFLAGS) $(DSO_PIC_CFLAGS) $(DSO_LDOPTS) -Wl,-h,$(DSO_SONAME) -o $@'
WARNINGS_AS_ERRORS='-Werror'
DSO_CFLAGS=''
DSO_PIC_CFLAGS='-fPIC'
@ -1461,8 +1461,8 @@ elif test "$SOLARIS_SUNPRO_CC"; then
fi
_DEFINES_CFLAGS='$(ACDEFINES) -D_MOZILLA_CONFIG_H_ -DMOZILLA_CLIENT'
else
MKSHLIB='$(LD) $(DSO_LDOPTS) -h $(notdir $@) -o $@'
MKCSHLIB='$(LD) $(DSO_LDOPTS) -h $(notdir $@) -o $@'
MKSHLIB='$(LD) $(DSO_LDOPTS) -h $(DSO_SONAME) -o $@'
MKCSHLIB='$(LD) $(DSO_LDOPTS) -h $(DSO_SONAME) -o $@'
DSO_LDOPTS='-shared'
if test "$GNU_LD"; then
@ -2279,8 +2279,8 @@ ia64*-hpux*)
if test "$LIBRUNPATH"; then
DSO_LDOPTS="-Wl,-R$LIBRUNPATH $DSO_LDOPTS"
fi
MKSHLIB='$(CXX) $(CXXFLAGS) $(DSO_PIC_CFLAGS) $(DSO_LDOPTS) -Wl,-soname,$(notdir $@) -o $@'
MKCSHLIB='$(CC) $(CFLAGS) $(DSO_PIC_CFLAGS) $(DSO_LDOPTS) -Wl,-soname,$(notdir $@)) -o $@'
MKSHLIB='$(CXX) $(CXXFLAGS) $(DSO_PIC_CFLAGS) $(DSO_LDOPTS) -Wl,-soname,$(DSO_SONAME) -o $@'
MKCSHLIB='$(CC) $(CFLAGS) $(DSO_PIC_CFLAGS) $(DSO_LDOPTS) -Wl,-soname,$(DSO_SONAME) -o $@'
;;
*-openbsd*)
@ -2323,8 +2323,8 @@ ia64*-hpux*)
[LDFLAGS=$_SAVE_LDFLAGS])
fi
MOZ_OPTIMIZE_FLAGS="-xO4"
MKSHLIB='$(CXX) $(CXXFLAGS) $(DSO_PIC_FLAGS) $(DSO_LDOPTS) -h $(notdir $@) -o $@'
MKCSHLIB='$(CC) $(CFLAGS) $(DSO_PIC_FLAGS) $(DSO_LDOPTS) -h $(notdir $@) -o $@'
MKSHLIB='$(CXX) $(CXXFLAGS) $(DSO_PIC_FLAGS) $(DSO_LDOPTS) -h $(DSO_SONAME) -o $@'
MKCSHLIB='$(CC) $(CFLAGS) $(DSO_PIC_FLAGS) $(DSO_LDOPTS) -h $(DSO_SONAME) -o $@'
MKSHLIB_FORCE_ALL='-z allextract'
MKSHLIB_UNFORCE_ALL='-z defaultextract'
DSO_LDOPTS='-G'

Просмотреть файл

@ -5,6 +5,7 @@
#include "TrackEncoder.h"
#include "AudioChannelFormat.h"
#include "MediaStreamGraph.h"
#include "prlog.h"
#include "VideoUtils.h"
#undef LOG
@ -17,12 +18,38 @@
namespace mozilla {
#ifdef PR_LOGGING
PRLogModuleInfo* gTrackEncoderLog;
#define TRACK_LOG(type, msg) PR_LOG(gTrackEncoderLog, type, msg)
#else
#define TRACK_LOG(type, msg)
#endif
static const int DEFAULT_CHANNELS = 1;
static const int DEFAULT_SAMPLING_RATE = 16000;
static const int DEFAULT_FRAME_WIDTH = 640;
static const int DEFAULT_FRAME_HEIGHT = 480;
static const int DEFAULT_TRACK_RATE = USECS_PER_S;
TrackEncoder::TrackEncoder()
: mReentrantMonitor("media.TrackEncoder")
, mEncodingComplete(false)
, mEosSetInEncoder(false)
, mInitialized(false)
, mEndOfStream(false)
, mCanceled(false)
#ifdef PR_LOGGING
, mAudioInitCounter(0)
, mVideoInitCounter(0)
#endif
{
#ifdef PR_LOGGING
if (!gTrackEncoderLog) {
gTrackEncoderLog = PR_NewLogModule("TrackEncoder");
}
#endif
}
void
AudioTrackEncoder::NotifyQueuedTrackChanges(MediaStreamGraph* aGraph,
TrackID aID,
@ -39,6 +66,10 @@ AudioTrackEncoder::NotifyQueuedTrackChanges(MediaStreamGraph* aGraph,
// Check and initialize parameters for codec encoder.
if (!mInitialized) {
#ifdef PR_LOGGING
mAudioInitCounter++;
TRACK_LOG(PR_LOG_DEBUG, ("Init the audio encoder %d times", mAudioInitCounter));
#endif
AudioSegment::ChunkIterator iter(const_cast<AudioSegment&>(audio));
while (!iter.IsEnded()) {
AudioChunk chunk = *iter;
@ -158,6 +189,10 @@ VideoTrackEncoder::NotifyQueuedTrackChanges(MediaStreamGraph* aGraph,
// Check and initialize parameters for codec encoder.
if (!mInitialized) {
#ifdef PR_LOGGING
mVideoInitCounter++;
TRACK_LOG(PR_LOG_DEBUG, ("Init the video encoder %d times", mVideoInitCounter));
#endif
VideoSegment::ChunkIterator iter(const_cast<VideoSegment&>(video));
while (!iter.IsEnded()) {
VideoChunk chunk = *iter;

Просмотреть файл

@ -31,14 +31,7 @@ class MediaStreamGraph;
class TrackEncoder
{
public:
TrackEncoder()
: mReentrantMonitor("media.TrackEncoder")
, mEncodingComplete(false)
, mEosSetInEncoder(false)
, mInitialized(false)
, mEndOfStream(false)
, mCanceled(false)
{}
TrackEncoder();
virtual ~TrackEncoder() {}
@ -131,6 +124,12 @@ protected:
* mReentrantMonitor.
*/
bool mCanceled;
#ifdef PR_LOGGING
// How many times we have tried to initialize the encoder.
uint32_t mAudioInitCounter;
uint32_t mVideoInitCounter;
#endif
};
class AudioTrackEncoder : public TrackEncoder

Просмотреть файл

@ -21,6 +21,7 @@ function startTest() {
var source = context.createBufferSource();
source.buffer = buffer;
source.loop = true;
var dest = context.createMediaStreamDestination();
var stopTriggered = false;
var onstopTriggered = false;

Просмотреть файл

@ -20,6 +20,7 @@ function startTest() {
var source = context.createBufferSource();
source.buffer = buffer;
source.loop = true;
var dest = context.createMediaStreamDestination();
source.connect(dest);

Просмотреть файл

@ -61,12 +61,15 @@ AlarmsManager.prototype = {
break;
}
let sandbox = new Cu.Sandbox(Cu.getWebIDLCallerPrincipal());
sandbox.data = aData;
let data = Cu.evalInSandbox("JSON.stringify(data)", sandbox);
let request = this.createRequest();
this._cpmm.sendAsyncMessage("AlarmsManager:Add",
{ requestId: this.getRequestId(request),
date: aDate,
ignoreTimezone: isIgnoreTimezone,
data: aData,
data: data,
pageURL: this._pageURL,
manifestURL: this._manifestURL });
return request;
@ -109,13 +112,16 @@ AlarmsManager.prototype = {
// We don't need to expose everything to the web content.
let alarms = [];
json.alarms.forEach(function trimAlarmInfo(aAlarm) {
let sandbox = new Cu.Sandbox(this._principal);
sandbox.data = aAlarm.data;
let data = Cu.evalInSandbox("JSON.parse(data)", sandbox);
let alarm = { "id": aAlarm.id,
"date": aAlarm.date,
"respectTimezone": aAlarm.ignoreTimezone ?
"ignoreTimezone" : "honorTimezone",
"data": aAlarm.data };
"data": data };
alarms.push(alarm);
});
}.bind(this));
Services.DOMRequest.fireSuccess(request,
Cu.cloneInto(alarms, this._window));
@ -153,10 +159,11 @@ AlarmsManager.prototype = {
// Get the manifest URL if this is an installed app
let appsService = Cc["@mozilla.org/AppsService;1"]
.getService(Ci.nsIAppsService);
let principal = aWindow.document.nodePrincipal;
this._pageURL = principal.URI.spec;
this._manifestURL = appsService.getManifestURLByLocalId(principal.appId);
this._window = aWindow;
this._principal = this._window.document.nodePrincipal;
this._pageURL = this._principal.URI.spec;
this._manifestURL =
appsService.getManifestURLByLocalId(this._principal.appId);
},
// Called from DOMRequestIpcHelper.

Просмотреть файл

@ -0,0 +1,2 @@
<!DOCTYPE html>
<html><head></head><body><span id="text">Nothing to see here</span><iframe name="subframe"></iframe></body></html>

Просмотреть файл

@ -1,6 +1,9 @@
[DEFAULT]
skip-if = e10s
support-files =
file_empty.html
[test_alarm_add_data.html]
skip-if = (buildapp == 'b2g' && toolkit != 'gonk') #Bug 931116, b2g desktop specific, initial triage
[test_alarm_add_date.html]
@ -11,3 +14,5 @@ skip-if = (buildapp == 'b2g' && toolkit != 'gonk') #Bug 931116, b2g desktop spec
[test_alarm_permitted_app.html]
[test_alarm_remove.html]
skip-if = (buildapp == 'b2g' && toolkit != 'gonk') #Bug 931116, b2g desktop specific, initial triage
[test_bug1015540.html]
skip-if = (buildapp == 'b2g' && toolkit != 'gonk') #Bug 931116, b2g desktop specific, initial triage

Просмотреть файл

@ -0,0 +1,73 @@
<!DOCTYPE HTML>
<html>
<head>
<meta charset="utf-8">
<title>Test data Paramter of Alarm API for Bug 1015540</title>
<link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css"/>
<script type="application/javascript" src="/tests/SimpleTest/SimpleTest.js"></script>
</head>
<body>
<a target="_blank" href="https://bugzilla.mozilla.org/show_bug.cgi?id=1015540">Bug 1015540</a>
<p id="display"></p>
<div id="content" style="display: none"></div>
<pre id="test">
<script type="application/javascript">
"use strict";
// Verify passing a cross-origin object for the data paramter
function testCrossOriginObject() {
var tomorrow = new Date();
tomorrow.setDate(tomorrow.getDate() + 1);
var data = document.getElementById('ifr').contentWindow;
try {
navigator.mozAlarms.add(tomorrow, "honorTimezone", data);
ok(false, "Adding alarms for cross-origin objects should be prohibited.");
} catch (e) {
ok(true, "Adding alarms for cross-origin objects is prohibited.");
}
SimpleTest.finish();
}
function startTests() {
SpecialPowers.pushPrefEnv({
"set": [["dom.mozAlarms.enabled", true]]
}, function() {
SpecialPowers.addPermission("alarms", true, document);
var isAllowedToTest = true;
if (navigator.appVersion.indexOf("Android") !== -1) {
ok(true, "mozAlarms is not allowed on Android for now. " +
"TODO Bug 863557.");
isAllowedToTest = false;
} else if (SpecialPowers.wrap(document).nodePrincipal.appStatus ==
SpecialPowers.Ci.nsIPrincipal.APP_STATUS_NOT_INSTALLED) {
ok(true, "mozAlarms is not allowed for non-installed apps. " +
"TODO Bug 876981.");
isAllowedToTest = false;
}
if (isAllowedToTest) {
ok(true, "Start to test...");
testCrossOriginObject();
} else {
// A sanity check to make sure we must run tests on Firefox OS (B2G).
if (navigator.userAgent.indexOf("Mobile") != -1 &&
navigator.appVersion.indexOf("Android") == -1) {
ok(false, "Should run the test on Firefox OS (B2G)!");
}
SimpleTest.finish();
}
});
}
SimpleTest.waitForExplicitFinish();
</script>
</pre>
<iframe id="ifr" onload="startTests()" src="http://example.org/tests/dom/alarm/test/file_empty.html"></iframe>
</body>
</html>

Просмотреть файл

@ -64,7 +64,6 @@ SimpleTextureClientPool::GetTextureClient(bool aAutoRecycle)
RefPtr<TextureClient> textureClient;
if (mAvailableTextureClients.size()) {
textureClient = mAvailableTextureClients.top();
textureClient->WaitForBufferOwnership();
mAvailableTextureClients.pop();
RECYCLE_LOG("%s Skip allocate (%i left), returning %p\n", (mFormat == SurfaceFormat::B8G8R8A8?"poolA":"poolX"), mAvailableTextureClients.size(), textureClient.get());

Просмотреть файл

@ -44,7 +44,6 @@ TextureClientPool::GetTextureClient()
RefPtr<TextureClient> textureClient;
if (mTextureClients.size()) {
textureClient = mTextureClients.top();
textureClient->WaitForBufferOwnership();
mTextureClients.pop();
return textureClient;
}

Просмотреть файл

@ -142,6 +142,8 @@ public:
ID3D11Device* GetDevice() { return mDevice; }
ID3D11DeviceContext* GetDC() { return mContext; }
private:
// ensure mSize is up to date with respect to mWidget
void EnsureSize();

Просмотреть файл

@ -105,10 +105,14 @@ static bool LockD3DTexture(T* aTexture)
MOZ_ASSERT(aTexture);
RefPtr<IDXGIKeyedMutex> mutex;
aTexture->QueryInterface((IDXGIKeyedMutex**)byRef(mutex));
if (!mutex) {
return false;
// Textures created by the DXVA decoders don't have a mutex for synchronization
if (mutex) {
HRESULT hr = mutex->AcquireSync(0, INFINITE);
if (FAILED(hr)) {
NS_WARNING("Failed to lock the texture");
return false;
}
}
mutex->AcquireSync(0, INFINITE);
return true;
}
@ -119,7 +123,10 @@ static void UnlockD3DTexture(T* aTexture)
RefPtr<IDXGIKeyedMutex> mutex;
aTexture->QueryInterface((IDXGIKeyedMutex**)byRef(mutex));
if (mutex) {
mutex->ReleaseSync(0);
HRESULT hr = mutex->ReleaseSync(0);
if (FAILED(hr)) {
NS_WARNING("Failed to unlock the texture");
}
}
}
@ -364,12 +371,13 @@ DataTextureSourceD3D11::Update(DataSourceSurface* aSurface,
nsIntRegion* aDestRegion,
IntPoint* aSrcOffset)
{
// Right now we only support full surface update. If aDestRegion is provided,
// It will be ignored. Incremental update with a source offset is only used
// on Mac so it is not clear that we ever will need to support it for D3D.
// Incremental update with a source offset is only used on Mac so it is not
// clear that we ever will need to support it for D3D.
MOZ_ASSERT(!aSrcOffset);
MOZ_ASSERT(aSurface);
HRESULT hr;
if (!mCompositor || !mCompositor->GetDevice()) {
return false;
}
@ -380,23 +388,59 @@ DataTextureSourceD3D11::Update(DataSourceSurface* aSurface,
mSize = aSurface->GetSize();
mFormat = aSurface->GetFormat();
CD3D11_TEXTURE2D_DESC desc(dxgiFormat, mSize.width, mSize.height,
1, 1, D3D11_BIND_SHADER_RESOURCE,
D3D11_USAGE_IMMUTABLE);
CD3D11_TEXTURE2D_DESC desc(dxgiFormat, mSize.width, mSize.height, 1, 1);
int32_t maxSize = mCompositor->GetMaxTextureSize();
if ((mSize.width <= maxSize && mSize.height <= maxSize) ||
(mFlags & TextureFlags::DISALLOW_BIGIMAGE)) {
D3D11_SUBRESOURCE_DATA initData;
initData.pSysMem = aSurface->GetData();
initData.SysMemPitch = aSurface->Stride();
mCompositor->GetDevice()->CreateTexture2D(&desc, &initData, byRef(mTexture));
mIsTiled = false;
if (!mTexture) {
Reset();
return false;
if (mTexture) {
D3D11_TEXTURE2D_DESC currentDesc;
mTexture->GetDesc(&currentDesc);
// Make sure there's no size mismatch, if there is, recreate.
if (currentDesc.Width != mSize.width || currentDesc.Height != mSize.height ||
currentDesc.Format != dxgiFormat) {
mTexture = nullptr;
// Make sure we upload the whole surface.
aDestRegion = nullptr;
}
}
if (!mTexture) {
hr = mCompositor->GetDevice()->CreateTexture2D(&desc, nullptr, byRef(mTexture));
mIsTiled = false;
if (FAILED(hr) || !mTexture) {
Reset();
return false;
}
}
DataSourceSurface::MappedSurface map;
aSurface->Map(DataSourceSurface::MapType::READ, &map);
if (aDestRegion) {
nsIntRegionRectIterator iter(*aDestRegion);
const nsIntRect *iterRect;
while ((iterRect = iter.Next())) {
D3D11_BOX box;
box.front = 0;
box.back = 1;
box.left = iterRect->x;
box.top = iterRect->y;
box.right = iterRect->XMost();
box.bottom = iterRect->YMost();
void* data = map.mData + map.mStride * iterRect->y + BytesPerPixel(aSurface->GetFormat()) * iterRect->x;
mCompositor->GetDC()->UpdateSubresource(mTexture, 0, &box, data, map.mStride, map.mStride * mSize.height);
}
} else {
mCompositor->GetDC()->UpdateSubresource(mTexture, 0, nullptr, aSurface->GetData(),
aSurface->Stride(), aSurface->Stride() * mSize.height);
}
aSurface->Unmap();
} else {
mIsTiled = true;
uint32_t tileCount = GetRequiredTilesD3D11(mSize.width, maxSize) *
@ -410,6 +454,7 @@ DataTextureSourceD3D11::Update(DataSourceSurface* aSurface,
desc.Width = tileRect.width;
desc.Height = tileRect.height;
desc.Usage = D3D11_USAGE_IMMUTABLE;
D3D11_SUBRESOURCE_DATA initData;
initData.pSysMem = aSurface->GetData() +
@ -417,8 +462,8 @@ DataTextureSourceD3D11::Update(DataSourceSurface* aSurface,
tileRect.x * bpp;
initData.SysMemPitch = aSurface->Stride();
mCompositor->GetDevice()->CreateTexture2D(&desc, &initData, byRef(mTileTextures[i]));
if (!mTileTextures[i]) {
hr = mCompositor->GetDevice()->CreateTexture2D(&desc, &initData, byRef(mTileTextures[i]));
if (FAILED(hr) || !mTileTextures[i]) {
Reset();
return false;
}

Просмотреть файл

@ -46,7 +46,7 @@
callFunction(std_Object_hasOwnProperty, obj, prop)
function TypedObjectTypeDescr(typedObj) {
return TYPROTO_DESCR(typedObj.__proto__);
return TYPROTO_DESCR(std_Object_getPrototypeOf(typedObj));
}
///////////////////////////////////////////////////////////////////////////

Просмотреть файл

@ -63,6 +63,7 @@ var std_Number_POSITIVE_INFINITY = Number.POSITIVE_INFINITY;
var std_Object_create = Object.create;
var std_Object_getOwnPropertyNames = Object.getOwnPropertyNames;
var std_Object_hasOwnProperty = Object.prototype.hasOwnProperty;
var std_Object_getPrototypeOf = Object.getPrototypeOf;
var std_RegExp_test = RegExp.prototype.test;
var std_String_fromCharCode = String.fromCharCode;
var std_String_charCodeAt = String.prototype.charCodeAt;

Просмотреть файл

@ -1099,8 +1099,8 @@ if test "$GNU_CC"; then
if test "${OS_ARCH}" != Darwin; then
CFLAGS="$CFLAGS -fgnu89-inline"
fi
MKSHLIB='$(CXX) $(CXXFLAGS) $(DSO_PIC_CFLAGS) $(DSO_LDOPTS) -Wl,-h,$(notdir $@) -o $@'
MKCSHLIB='$(CC) $(CFLAGS) $(DSO_PIC_CFLAGS) $(DSO_LDOPTS) -Wl,-h,$(notdir $@) -o $@'
MKSHLIB='$(CXX) $(CXXFLAGS) $(DSO_PIC_CFLAGS) $(DSO_LDOPTS) -Wl,-h,$(DSO_SONAME) -o $@'
MKCSHLIB='$(CC) $(CFLAGS) $(DSO_PIC_CFLAGS) $(DSO_LDOPTS) -Wl,-h,$(DSO_SONAME) -o $@'
DSO_LDOPTS='-shared'
if test "$GCC_USE_GNU_LD"; then
# Some tools like ASan use a runtime library that is only
@ -1192,8 +1192,8 @@ elif test "$SOLARIS_SUNPRO_CC"; then
fi
_DEFINES_CFLAGS='$(ACDEFINES) -D_JS_CONFDEFS_H_ -DMOZILLA_CLIENT'
else
MKSHLIB='$(LD) $(DSO_LDOPTS) -h $(notdir $@) -o $@'
MKCSHLIB='$(LD) $(DSO_LDOPTS) -h $(notdir $@) -o $@'
MKSHLIB='$(LD) $(DSO_LDOPTS) -h $(DSO_SONAME) -o $@'
MKCSHLIB='$(LD) $(DSO_LDOPTS) -h $(DSO_SONAME) -o $@'
DSO_LDOPTS='-shared'
if test "$GNU_LD"; then
@ -1799,8 +1799,8 @@ ia64*-hpux*)
if test "$LIBRUNPATH"; then
DSO_LDOPTS="-Wl,-R$LIBRUNPATH $DSO_LDOPTS"
fi
MKSHLIB='$(CXX) $(CXXFLAGS) $(DSO_PIC_CFLAGS) $(DSO_LDOPTS) -Wl,-soname,$(notdir $@) -o $@'
MKCSHLIB='$(CC) $(CFLAGS) $(DSO_PIC_CFLAGS) $(DSO_LDOPTS) -Wl,-soname,$(notdir $@) -o $@'
MKSHLIB='$(CXX) $(CXXFLAGS) $(DSO_PIC_CFLAGS) $(DSO_LDOPTS) -Wl,-soname,$(DSO_SONAME) -o $@'
MKCSHLIB='$(CC) $(CFLAGS) $(DSO_PIC_CFLAGS) $(DSO_LDOPTS) -Wl,-soname,$(DSO_SONAME) -o $@'
;;
*-openbsd*)
@ -1840,8 +1840,8 @@ ia64*-hpux*)
[LDFLAGS=$_SAVE_LDFLAGS])
fi
MOZ_OPTIMIZE_FLAGS="-xO4"
MKSHLIB='$(CXX) $(CXXFLAGS) $(DSO_PIC_FLAGS) $(DSO_LDOPTS) -h $(notdir $@) -o $@'
MKCSHLIB='$(CC) $(CFLAGS) $(DSO_PIC_FLAGS) $(DSO_LDOPTS) -h $(notdir $@) -o $@'
MKSHLIB='$(CXX) $(CXXFLAGS) $(DSO_PIC_FLAGS) $(DSO_LDOPTS) -h $(DSO_SONAME) -o $@'
MKCSHLIB='$(CC) $(CFLAGS) $(DSO_PIC_FLAGS) $(DSO_LDOPTS) -h $(DSO_SONAME) -o $@'
MKSHLIB_FORCE_ALL='-z allextract'
MKSHLIB_UNFORCE_ALL='-z defaultextract'
DSO_LDOPTS='-G'

Просмотреть файл

@ -321,9 +321,8 @@ class GCRuntime
bool isGcNeeded() { return isNeeded; }
double computeHeapGrowthFactor(size_t lastBytes) const;
size_t computeTriggerBytes(double growthFactor, size_t lastBytes,
JSGCInvocationKind gckind) const;
double computeHeapGrowthFactor(size_t lastBytes);
size_t computeTriggerBytes(double growthFactor, size_t lastBytes, JSGCInvocationKind gckind);
size_t allocationThreshold() { return allocThreshold; }
JSGCMode gcMode() const { return mode; }
@ -394,7 +393,7 @@ class GCRuntime
bool releaseObservedTypes();
void endSweepingZoneGroup();
bool sweepPhase(SliceBudget &sliceBudget);
void endSweepPhase(bool lastGC);
void endSweepPhase(JSGCInvocationKind gckind, bool lastGC);
void sweepZones(FreeOp *fop, bool lastGC);
void decommitArenasFromAvailableList(Chunk **availableListHeadp);
void decommitArenas();
@ -514,9 +513,6 @@ class GCRuntime
/* Whether all compartments are being collected in first GC slice. */
bool isFull;
/* The kind of the last collection. */
JSGCInvocationKind lastKind;
/* The reason that an interrupt-triggered GC should be called. */
JS::gcreason::Reason triggerReason;

Просмотреть файл

@ -31,7 +31,6 @@ JS::Zone::Zone(JSRuntime *rt)
gcMallocBytes(0),
gcMallocGCTriggered(false),
gcBytes(0),
gcBytesAfterGC(0),
gcTriggerBytes(0),
data(nullptr),
isSystem(false),

Просмотреть файл

@ -106,6 +106,7 @@ struct Zone : public JS::shadow::Zone,
size_t *baselineStubsOptimized);
void setGCLastBytes(size_t lastBytes, js::JSGCInvocationKind gckind);
void reduceGCTriggerBytes(size_t amount);
void resetGCMallocBytes();
void setGCMaxMallocBytes(size_t value);
@ -247,12 +248,8 @@ struct Zone : public JS::shadow::Zone,
// updated by both the main and GC helper threads.
mozilla::Atomic<size_t, mozilla::ReleaseAcquire> gcBytes;
// The number of bytes allocated in the GC heap for this zone after the last GC.
size_t gcBytesAfterGC;
// GC trigger threshold for allocations on the GC heap. It is updated by
// both the main and GC helper threads.
mozilla::Atomic<size_t, mozilla::ReleaseAcquire> gcTriggerBytes;
// GC trigger threshold for allocations on the GC heap.
size_t gcTriggerBytes;
// Per-zone data for use by an embedder.
void *data;

Просмотреть файл

@ -455,7 +455,7 @@ NativeRegExpMacroAssembler::GenerateCode(JSContext *cx)
JS_ASSERT(!v.label);
v.patchOffset.fixup(&masm);
uintptr_t offset = masm.actualOffset(v.labelOffset);
Assembler::patchDataWithValueCheck(CodeLocationLabel(code, v.patchOffset),
Assembler::PatchDataWithValueCheck(CodeLocationLabel(code, v.patchOffset),
ImmPtr(code->raw() + offset),
ImmPtr(0));
}

Просмотреть файл

@ -6752,7 +6752,7 @@ GenerateInterruptExit(ModuleCompiler &m, Label *throwLabel)
// Pop resumePC into PC. Clobber HeapReg to make the jump and restore it
// during jump delay slot.
JS_ASSERT(Imm16::isInSignedRange(AsmJSModule::heapGlobalDataOffset()));
JS_ASSERT(Imm16::IsInSignedRange(AsmJSModule::heapGlobalDataOffset()));
masm.pop(HeapReg);
masm.as_jr(HeapReg);
masm.loadPtr(Address(GlobalReg, AsmJSModule::heapGlobalDataOffset()), HeapReg);

Просмотреть файл

@ -374,7 +374,7 @@ AsmJSModule::finish(ExclusiveContext *cx, TokenStream &tokenStream, MacroAssembl
if (!staticLinkData_.relativeLinks.append(link))
return false;
labelOffset = Assembler::extractCodeLabelOffset(code_ + patchAtOffset);
labelOffset = Assembler::ExtractCodeLabelOffset(code_ + patchAtOffset);
}
}
@ -399,7 +399,7 @@ AsmJSModule::finish(ExclusiveContext *cx, TokenStream &tokenStream, MacroAssembl
RelativeLink link(RelativeLink::InstructionImmediate);
link.patchAtOffset = masm.longJump(i);
InstImm *inst = (InstImm *)(code_ + masm.longJump(i));
link.targetOffset = Assembler::extractLuiOriValue(inst, inst->next()) - (uint32_t)code_;
link.targetOffset = Assembler::ExtractLuiOriValue(inst, inst->next()) - (uint32_t)code_;
if (!staticLinkData_.relativeLinks.append(link))
return false;
}
@ -597,12 +597,12 @@ AsmJSModule::staticallyLink(ExclusiveContext *cx)
if (link.isRawPointerPatch())
*(uint8_t **)(patchAt) = target;
else
Assembler::patchInstructionImmediate(patchAt, PatchedImmPtr(target));
Assembler::PatchInstructionImmediate(patchAt, PatchedImmPtr(target));
}
for (size_t i = 0; i < staticLinkData_.absoluteLinks.length(); i++) {
AbsoluteLink link = staticLinkData_.absoluteLinks[i];
Assembler::patchDataWithValueCheck(CodeLocationLabel(code_ + link.patchAt.offset()),
Assembler::PatchDataWithValueCheck(CodeLocationLabel(code_ + link.patchAt.offset()),
PatchedImmPtr(AddressOf(link.target, cx)),
PatchedImmPtr((void*)-1));
}
@ -642,7 +642,7 @@ AsmJSModule::initHeap(Handle<ArrayBufferObject*> heap, JSContext *cx)
#elif defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_MIPS)
uint32_t heapLength = heap->byteLength();
for (unsigned i = 0; i < heapAccesses_.length(); i++) {
jit::Assembler::updateBoundsCheck(heapLength,
jit::Assembler::UpdateBoundsCheck(heapLength,
(jit::Instruction*)(heapAccesses_[i].offset() + code_));
}
#endif
@ -652,11 +652,11 @@ void
AsmJSModule::restoreToInitialState(ArrayBufferObject *maybePrevBuffer, ExclusiveContext *cx)
{
#ifdef DEBUG
// Put the absolute links back to -1 so patchDataWithValueCheck assertions
// Put the absolute links back to -1 so PatchDataWithValueCheck assertions
// in staticallyLink are valid.
for (size_t i = 0; i < staticLinkData_.absoluteLinks.length(); i++) {
AbsoluteLink link = staticLinkData_.absoluteLinks[i];
Assembler::patchDataWithValueCheck(CodeLocationLabel(code_ + link.patchAt.offset()),
Assembler::PatchDataWithValueCheck(CodeLocationLabel(code_ + link.patchAt.offset()),
PatchedImmPtr((void*)-1),
PatchedImmPtr(AddressOf(link.target, cx)));
}

Просмотреть файл

@ -218,7 +218,7 @@ BaselineCompiler::compile()
label.fixup(&masm);
size_t icEntry = icLoadLabels_[i].icEntry;
ICEntry *entryAddr = &(baselineScript->icEntry(icEntry));
Assembler::patchDataWithValueCheck(CodeLocationLabel(code, label),
Assembler::PatchDataWithValueCheck(CodeLocationLabel(code, label),
ImmPtr(entryAddr),
ImmPtr((void*)-1));
}

Просмотреть файл

@ -6723,7 +6723,7 @@ CodeGenerator::link(JSContext *cx, types::CompilerConstraintList *constraints)
ionScript->setHasUncompiledCallTarget();
invalidateEpilogueData_.fixup(&masm);
Assembler::patchDataWithValueCheck(CodeLocationLabel(code, invalidateEpilogueData_),
Assembler::PatchDataWithValueCheck(CodeLocationLabel(code, invalidateEpilogueData_),
ImmPtr(ionScript),
ImmPtr((void*)-1));
@ -6745,7 +6745,7 @@ CodeGenerator::link(JSContext *cx, types::CompilerConstraintList *constraints)
for (size_t i = 0; i < ionScriptLabels_.length(); i++) {
ionScriptLabels_[i].fixup(&masm);
Assembler::patchDataWithValueCheck(CodeLocationLabel(code, ionScriptLabels_[i]),
Assembler::PatchDataWithValueCheck(CodeLocationLabel(code, ionScriptLabels_[i]),
ImmPtr(ionScript),
ImmPtr((void*)-1));
}
@ -6783,14 +6783,14 @@ CodeGenerator::link(JSContext *cx, types::CompilerConstraintList *constraints)
TraceLogger *logger = TraceLoggerForMainThread(cx->runtime());
for (uint32_t i = 0; i < patchableTraceLoggers_.length(); i++) {
patchableTraceLoggers_[i].fixup(&masm);
Assembler::patchDataWithValueCheck(CodeLocationLabel(code, patchableTraceLoggers_[i]),
Assembler::PatchDataWithValueCheck(CodeLocationLabel(code, patchableTraceLoggers_[i]),
ImmPtr(logger),
ImmPtr(nullptr));
}
uint32_t scriptId = TraceLogCreateTextId(logger, script);
for (uint32_t i = 0; i < patchableTLScripts_.length(); i++) {
patchableTLScripts_[i].fixup(&masm);
Assembler::patchDataWithValueCheck(CodeLocationLabel(code, patchableTLScripts_[i]),
Assembler::PatchDataWithValueCheck(CodeLocationLabel(code, patchableTLScripts_[i]),
ImmPtr((void *) uintptr_t(scriptId)),
ImmPtr((void *)0));
}

Просмотреть файл

@ -2621,14 +2621,14 @@ InvalidateActivation(FreeOp *fop, uint8_t *jitTop, bool invalidateAll)
CodeLocationLabel dataLabelToMunge(it.returnAddressToFp());
ptrdiff_t delta = ionScript->invalidateEpilogueDataOffset() -
(it.returnAddressToFp() - ionCode->raw());
Assembler::patchWrite_Imm32(dataLabelToMunge, Imm32(delta));
Assembler::PatchWrite_Imm32(dataLabelToMunge, Imm32(delta));
CodeLocationLabel osiPatchPoint = SafepointReader::InvalidationPatchPoint(ionScript, si);
CodeLocationLabel invalidateEpilogue(ionCode, CodeOffsetLabel(ionScript->invalidateEpilogueOffset()));
IonSpew(IonSpew_Invalidate, " ! Invalidate ionScript %p (ref %u) -> patching osipoint %p",
ionScript, ionScript->refcount(), (void *) osiPatchPoint.raw());
Assembler::patchWrite_NearCall(osiPatchPoint, invalidateEpilogue);
Assembler::PatchWrite_NearCall(osiPatchPoint, invalidateEpilogue);
}
IonSpew(IonSpew_Invalidate, "END invalidating activation");

Просмотреть файл

@ -246,7 +246,7 @@ class IonCache::StubAttacher
void patchStubCodePointer(MacroAssembler &masm, JitCode *code) {
if (hasStubCodePatchOffset_) {
stubCodePatchOffset_.fixup(&masm);
Assembler::patchDataWithValueCheck(CodeLocationLabel(code, stubCodePatchOffset_),
Assembler::PatchDataWithValueCheck(CodeLocationLabel(code, stubCodePatchOffset_),
ImmPtr(code), STUB_ADDR);
}
}
@ -373,7 +373,7 @@ DispatchIonCache::updateBaseAddress(JitCode *code, MacroAssembler &masm)
IonCache::updateBaseAddress(code, masm);
dispatchLabel_.fixup(&masm);
Assembler::patchDataWithValueCheck(CodeLocationLabel(code, dispatchLabel_),
Assembler::PatchDataWithValueCheck(CodeLocationLabel(code, dispatchLabel_),
ImmPtr(&firstStub_),
ImmPtr((void*)-1));
firstStub_ = fallbackLabel_.raw();

Просмотреть файл

@ -375,7 +375,7 @@ class RepatchIonCache : public IonCache
#if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_MIPS)
uint32_t i = 0;
while (i < REJOIN_LABEL_OFFSET)
ptr = Assembler::nextInstruction(ptr, &i);
ptr = Assembler::NextInstruction(ptr, &i);
#endif
return CodeLocationLabel(ptr);
}

Просмотреть файл

@ -155,7 +155,7 @@ JitFrameIterator::checkInvalidation(IonScript **ionScriptOut) const
int32_t invalidationDataOffset = ((int32_t *) returnAddr)[-1];
uint8_t *ionScriptDataOffset = returnAddr + invalidationDataOffset;
IonScript *ionScript = (IonScript *) Assembler::getPointer(ionScriptDataOffset);
IonScript *ionScript = (IonScript *) Assembler::GetPointer(ionScriptDataOffset);
JS_ASSERT(ionScript->containsReturnAddress(returnAddr));
*ionScriptOut = ionScript;
return true;
@ -1426,7 +1426,7 @@ OsiIndex::returnPointDisplacement() const
// In general, pointer arithmetic on code is bad, but in this case,
// getting the return address from a call instruction, stepping over pools
// would be wrong.
return callPointDisplacement_ + Assembler::patchWrite_NearCallSize();
return callPointDisplacement_ + Assembler::PatchWrite_NearCallSize();
}
SnapshotIterator::SnapshotIterator(IonScript *ionScript, SnapshotOffset snapshotOffset,

Просмотреть файл

@ -902,7 +902,7 @@ class MacroAssembler : public MacroAssemblerSpecific
// be unset if the code never needed to push its JitCode*.
if (hasEnteredExitFrame()) {
exitCodePatch_.fixup(this);
patchDataWithValueCheck(CodeLocationLabel(code, exitCodePatch_),
PatchDataWithValueCheck(CodeLocationLabel(code, exitCodePatch_),
ImmPtr(code),
ImmPtr((void*)-1));
}

Просмотреть файл

@ -1428,7 +1428,7 @@ class LSafepoint : public TempObject
// In general, pointer arithmetic on code is bad, but in this case,
// getting the return address from a call instruction, stepping over pools
// would be wrong.
return osiCallPointOffset_ + Assembler::patchWrite_NearCallSize();
return osiCallPointOffset_ + Assembler::PatchWrite_NearCallSize();
}
uint32_t osiCallPointOffset() const {
return osiCallPointOffset_;

Просмотреть файл

@ -397,7 +397,7 @@ SafepointReader::SafepointReader(IonScript *script, const SafepointIndex *si)
uint32_t
SafepointReader::osiReturnPointOffset() const
{
return osiCallPointOffset_ + Assembler::patchWrite_NearCallSize();
return osiCallPointOffset_ + Assembler::PatchWrite_NearCallSize();
}
CodeLocationLabel

Просмотреть файл

@ -160,9 +160,9 @@ uint32_t GetARMFlags()
isSet = true;
#if defined(__ARM_ARCH_7__) || defined (__ARM_ARCH_7A__)
// This should really be detected at runtime, but /proc/*/auxv
// doesn't seem to carry the ISA. We could look in
// /proc/cpuinfo as well, but the chances that it will be
// different from this are low.
// doesn't seem to carry the ISA. We could look in /proc/cpuinfo
// as well, but the chances that it will be different from this
// are low.
flags |= HWCAP_ARMv7;
#endif
return flags;
@ -314,14 +314,14 @@ VFPRegister::ReduceSetForPush(const FloatRegisterSet &s)
FloatRegisterSet mod;
for (TypedRegisterIterator<FloatRegister> iter(s); iter.more(); iter++) {
if ((*iter).isSingle()) {
// add in just this float
// Add in just this float.
mod.addUnchecked(*iter);
} else if ((*iter).id() < 16) {
// a double with an overlay, add in both floats
// A double with an overlay, add in both floats.
mod.addUnchecked((*iter).singleOverlay(0));
mod.addUnchecked((*iter).singleOverlay(1));
} else {
// add in the lone double in the range 16-31
// Add in the lone double in the range 16-31.
mod.addUnchecked(*iter);
}
}

Просмотреть файл

@ -14,7 +14,8 @@
#include "js/Utility.h"
// gcc appears to use __ARM_PCS_VFP to denote that the target is a hard-float target.
// Gcc appears to use __ARM_PCS_VFP to denote that the target is a hard-float
// target.
#if defined(__ARM_PCS_VFP)
#define JS_CODEGEN_ARM_HARDFP
#endif
@ -38,11 +39,11 @@ static const uint32_t ShadowStackSpace = 0;
// These offsets are related to bailouts.
////
// Size of each bailout table entry. On arm, this is presently
// a single call (which is wrong!). the call clobbers lr.
// For now, I've dealt with this by ensuring that we never allocate to lr.
// it should probably be 8 bytes, a mov of an immediate into r12 (not
// allocated presently, or ever) followed by a branch to the apropriate code.
// Size of each bailout table entry. On arm, this is presently a single call
// (which is wrong!). The call clobbers lr.
// For now, I've dealt with this by ensuring that we never allocate to lr. It
// should probably be 8 bytes, a mov of an immediate into r12 (not allocated
// presently, or ever) followed by a branch to the apropriate code.
static const uint32_t BAILOUT_TABLE_ENTRY_SIZE = 4;
class Registers
@ -139,7 +140,7 @@ class Registers
// Registers returned from a JS -> C call.
static const uint32_t CallMask =
(1 << Registers::r0) |
(1 << Registers::r1); // used for double-size returns
(1 << Registers::r1); // Used for double-size returns.
static const uint32_t AllocatableMask = AllMask & ~NonAllocatableMask;
typedef uint32_t SetType;
@ -240,9 +241,9 @@ class TypedRegisterSet;
class VFPRegister
{
public:
// What type of data is being stored in this register?
// UInt / Int are specifically for vcvt, where we need
// to know how the data is supposed to be converted.
// What type of data is being stored in this register? UInt / Int are
// specifically for vcvt, where we need to know how the data is supposed to
// be converted.
enum RegType {
Single = 0x0,
Double = 0x1,
@ -255,13 +256,11 @@ class VFPRegister
protected:
RegType kind : 2;
// ARM doesn't have more than 32 registers...
// don't take more bits than we'll need.
// Presently, I don't have plans to address the upper
// and lower halves of the double registers seprately, so
// 5 bits should suffice. If I do decide to address them seprately
// (vmov, I'm looking at you), I will likely specify it as a separate
// field.
// ARM doesn't have more than 32 registers. Don't take more bits than we'll
// need. Presently, we don't have plans to address the upper and lower
// halves of the double registers seprately, so 5 bits should suffice. If we
// do decide to address them seprately (vmov, I'm looking at you), we will
// likely specify it as a separate field.
public:
Code code_ : 5;
protected:
@ -307,7 +306,7 @@ class VFPRegister
struct VFPRegIndexSplit;
VFPRegIndexSplit encode();
// for serializing values
// For serializing values.
struct VFPRegIndexSplit {
const uint32_t block : 4;
const uint32_t bit : 1;
@ -325,8 +324,8 @@ class VFPRegister
Code code() const {
JS_ASSERT(!_isInvalid && !_isMissing);
// this should only be used in areas where we only have doubles
// and singles.
// This should only be used in areas where we only have doubles and
// singles.
JS_ASSERT(isFloat());
return Code(code_);
}
@ -391,8 +390,8 @@ class VFPRegister
}
// | d0 |
// | s0 | s1 |
// if we've stored s0 and s1 in memory, we also want to say that d0
// is stored there, but it is only stored at the location where it is aligned
// If we've stored s0 and s1 in memory, we also want to say that d0 is
// stored there, but it is only stored at the location where it is aligned
// e.g. at s0, not s1.
void alignedAliased(uint32_t aliasIdx, VFPRegister *ret) {
if (aliasIdx == 0) {
@ -424,8 +423,7 @@ class VFPRegister
};
// The only floating point register set that we work with
// are the VFP Registers
// The only floating point register set that we work with are the VFP Registers.
typedef VFPRegister FloatRegister;
uint32_t GetARMFlags();
@ -435,16 +433,16 @@ bool HasVFP();
bool Has32DP();
bool HasIDIV();
// Arm/D32 has double registers that can NOT be treated as float32
// and this requires some dances in lowering.
// Arm/D32 has double registers that can NOT be treated as float32 and this
// requires some dances in lowering.
inline bool
hasUnaliasedDouble()
{
return Has32DP();
}
// On ARM, Dn aliases both S2n and S2n+1, so if you need to convert a float32
// to a double as a temporary, you need a temporary double register.
// On ARM, Dn aliases both S2n and S2n+1, so if you need to convert a float32 to
// a double as a temporary, you need a temporary double register.
inline bool
hasMultiAlias()
{
@ -453,8 +451,9 @@ hasMultiAlias()
bool ParseARMHwCapFlags(const char *armHwCap);
// If the simulator is used then the ABI choice is dynamic. Otherwise the ABI is static
// and useHardFpABI is inlined so that unused branches can be optimized away.
// If the simulator is used then the ABI choice is dynamic. Otherwise the ABI is
// static and useHardFpABI is inlined so that unused branches can be optimized
// away.
#if defined(JS_ARM_SIMULATOR)
bool UseHardFpABI();
#else

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -20,9 +20,9 @@ namespace jit {
class BailoutStack
{
uintptr_t frameClassId_;
// This is pushed in the bailout handler. Both entry points into the handler
// This is pushed in the bailout handler. Both entry points into the handler
// inserts their own value int lr, which is then placed onto the stack along
// with frameClassId_ above. This should be migrated to ip.
// with frameClassId_ above. This should be migrated to ip.
public:
union {
uintptr_t frameSize_;

Просмотреть файл

@ -46,7 +46,7 @@ EmitCallIC(CodeOffsetLabel *patchOffset, MacroAssembler &masm)
JS_ASSERT(R2 == ValueOperand(r1, r0));
masm.loadPtr(Address(BaselineStubReg, ICStub::offsetOfStubCode()), r0);
// Call the stubcode via a direct branch-and-link
// Call the stubcode via a direct branch-and-link.
masm.ma_blx(r0);
}
@ -54,8 +54,8 @@ inline void
EmitEnterTypeMonitorIC(MacroAssembler &masm,
size_t monitorStubOffset = ICMonitoredStub::offsetOfFirstMonitorStub())
{
// This is expected to be called from within an IC, when BaselineStubReg
// is properly initialized to point to the stub.
// This is expected to be called from within an IC, when BaselineStubReg is
// properly initialized to point to the stub.
masm.loadPtr(Address(BaselineStubReg, (uint32_t) monitorStubOffset), BaselineStubReg);
// Load stubcode pointer from BaselineStubEntry.
@ -96,9 +96,9 @@ EmitTailCallVM(JitCode *target, MacroAssembler &masm, uint32_t argSize)
masm.store32(r1, Address(BaselineFrameReg, BaselineFrame::reverseOffsetOfFrameSize()));
// Push frame descriptor and perform the tail call.
// BaselineTailCallReg (lr) already contains the return address (as we keep it there through
// the stub calls), but the VMWrapper code being called expects the return address to also
// be pushed on the stack.
// BaselineTailCallReg (lr) already contains the return address (as we keep
// it there through the stub calls), but the VMWrapper code being called
// expects the return address to also be pushed on the stack.
JS_ASSERT(BaselineTailCallReg == lr);
masm.makeFrameDescriptor(r0, JitFrame_BaselineJS);
masm.push(r0);
@ -109,8 +109,8 @@ EmitTailCallVM(JitCode *target, MacroAssembler &masm, uint32_t argSize)
inline void
EmitCreateStubFrameDescriptor(MacroAssembler &masm, Register reg)
{
// Compute stub frame size. We have to add two pointers: the stub reg and previous
// frame pointer pushed by EmitEnterStubFrame.
// Compute stub frame size. We have to add two pointers: the stub reg and
// previous frame pointer pushed by EmitEnterStubFrame.
masm.mov(BaselineFrameReg, reg);
masm.ma_add(Imm32(sizeof(void *) * 2), reg);
masm.ma_sub(BaselineStackReg, reg);
@ -142,8 +142,8 @@ EmitEnterStubFrame(MacroAssembler &masm, Register scratch)
masm.store32(scratch, Address(BaselineFrameReg, BaselineFrame::reverseOffsetOfFrameSize()));
// Note: when making changes here, don't forget to update STUB_FRAME_SIZE
// if needed.
// Note: when making changes here, don't forget to update STUB_FRAME_SIZE if
// needed.
// Push frame descriptor and return address.
masm.makeFrameDescriptor(scratch, JitFrame_BaselineJS);
@ -162,10 +162,10 @@ EmitEnterStubFrame(MacroAssembler &masm, Register scratch)
inline void
EmitLeaveStubFrame(MacroAssembler &masm, bool calledIntoIon = false)
{
// Ion frames do not save and restore the frame pointer. If we called
// into Ion, we have to restore the stack pointer from the frame descriptor.
// If we performed a VM call, the descriptor has been popped already so
// in that case we use the frame pointer.
// Ion frames do not save and restore the frame pointer. If we called into
// Ion, we have to restore the stack pointer from the frame descriptor. If
// we performed a VM call, the descriptor has been popped already so in that
// case we use the frame pointer.
if (calledIntoIon) {
masm.pop(ScratchRegister);
masm.ma_lsr(Imm32(FRAMESIZE_SHIFT), ScratchRegister, ScratchRegister);
@ -190,11 +190,11 @@ EmitStowICValues(MacroAssembler &masm, int values)
JS_ASSERT(values >= 0 && values <= 2);
switch(values) {
case 1:
// Stow R0
// Stow R0.
masm.pushValue(R0);
break;
case 2:
// Stow R0 and R1
// Stow R0 and R1.
masm.pushValue(R0);
masm.pushValue(R1);
break;
@ -207,14 +207,14 @@ EmitUnstowICValues(MacroAssembler &masm, int values, bool discard = false)
JS_ASSERT(values >= 0 && values <= 2);
switch(values) {
case 1:
// Unstow R0
// Unstow R0.
if (discard)
masm.addPtr(Imm32(sizeof(Value)), BaselineStackReg);
else
masm.popValue(R0);
break;
case 2:
// Unstow R0 and R1
// Unstow R0 and R1.
if (discard) {
masm.addPtr(Imm32(sizeof(Value) * 2), BaselineStackReg);
} else {
@ -230,17 +230,17 @@ EmitCallTypeUpdateIC(MacroAssembler &masm, JitCode *code, uint32_t objectOffset)
{
JS_ASSERT(R2 == ValueOperand(r1, r0));
// R0 contains the value that needs to be typechecked.
// The object we're updating is a boxed Value on the stack, at offset
// objectOffset from esp, excluding the return address.
// R0 contains the value that needs to be typechecked. The object we're
// updating is a boxed Value on the stack, at offset objectOffset from esp,
// excluding the return address.
// Save the current BaselineStubReg to stack, as well as the TailCallReg,
// since on ARM, the LR is live.
masm.push(BaselineStubReg);
masm.push(BaselineTailCallReg);
// This is expected to be called from within an IC, when BaselineStubReg
// is properly initialized to point to the stub.
// This is expected to be called from within an IC, when BaselineStubReg is
// properly initialized to point to the stub.
masm.loadPtr(Address(BaselineStubReg, ICUpdatedStub::offsetOfFirstUpdateStub()),
BaselineStubReg);
@ -286,7 +286,7 @@ template <typename AddrType>
inline void
EmitPreBarrier(MacroAssembler &masm, const AddrType &addr, MIRType type)
{
// on ARM, lr is clobbered by patchableCallPreBarrier. Save it first.
// On ARM, lr is clobbered by patchableCallPreBarrier. Save it first.
masm.push(lr);
masm.patchableCallPreBarrier(addr, type);
masm.pop(lr);
@ -302,7 +302,7 @@ EmitStubGuardFailure(MacroAssembler &masm)
// BaselineStubEntry points to the current stub.
// Load next stub into BaselineStubReg
// Load next stub into BaselineStubReg.
masm.loadPtr(Address(BaselineStubReg, ICStub::offsetOfNext()), BaselineStubReg);
// Load stubcode pointer from BaselineStubEntry into scratch register.

Просмотреть файл

@ -36,7 +36,7 @@ ICCompare_Int32::Compiler::generateStubCode(MacroAssembler &masm)
masm.tagValue(JSVAL_TYPE_BOOLEAN, R0.payloadReg(), R0);
EmitReturnFromIC(masm);
// Failure case - jump to next stub
// Failure case - jump to next stub.
masm.bind(&failure);
EmitStubGuardFailure(masm);
@ -62,7 +62,7 @@ ICCompare_Double::Compiler::generateStubCode(MacroAssembler &masm)
masm.tagValue(JSVAL_TYPE_BOOLEAN, dest, R0);
EmitReturnFromIC(masm);
// Failure case - jump to next stub
// Failure case - jump to next stub.
masm.bind(&failure);
EmitStubGuardFailure(masm);
return true;
@ -82,7 +82,7 @@ ICBinaryArith_Int32::Compiler::generateStubCode(MacroAssembler &masm)
masm.branchTestInt32(Assembler::NotEqual, R0, &failure);
masm.branchTestInt32(Assembler::NotEqual, R1, &failure);
// Add R0 and R1. Don't need to explicitly unbox, just use R2's payloadReg.
// Add R0 and R1. Don't need to explicitly unbox, just use R2's payloadReg.
Register scratchReg = R2.payloadReg();
// DIV and MOD need an extra non-volatile ValueOperand to hold R0.
@ -95,12 +95,12 @@ ICBinaryArith_Int32::Compiler::generateStubCode(MacroAssembler &masm)
case JSOP_ADD:
masm.ma_add(R0.payloadReg(), R1.payloadReg(), scratchReg, SetCond);
// Just jump to failure on overflow. R0 and R1 are preserved, so we can just jump to
// the next stub.
// Just jump to failure on overflow. R0 and R1 are preserved, so we can
// just jump to the next stub.
masm.j(Assembler::Overflow, &failure);
// Box the result and return. We know R0.typeReg() already contains the integer
// tag, so we just need to move the result value into place.
// Box the result and return. We know R0.typeReg() already contains the
// integer tag, so we just need to move the result value into place.
masm.mov(scratchReg, R0.payloadReg());
break;
case JSOP_SUB:
@ -131,7 +131,8 @@ ICBinaryArith_Int32::Compiler::generateStubCode(MacroAssembler &masm)
masm.ma_cmp(R0.payloadReg(), Imm32(0), Assembler::LessThan);
masm.j(Assembler::Equal, &failure);
// The call will preserve registers r4-r11. Save R0 and the link register.
// The call will preserve registers r4-r11. Save R0 and the link
// register.
JS_ASSERT(R1 == ValueOperand(r5, r4));
JS_ASSERT(R0 == ValueOperand(r3, r2));
masm.moveValue(R0, savedValue);
@ -222,7 +223,7 @@ ICBinaryArith_Int32::Compiler::generateStubCode(MacroAssembler &masm)
break;
}
// Failure case - jump to next stub
// Failure case - jump to next stub.
masm.bind(&failure);
EmitStubGuardFailure(masm);

Просмотреть файл

@ -23,16 +23,14 @@ static MOZ_CONSTEXPR_VAR Register BaselineFrameReg = r11;
static MOZ_CONSTEXPR_VAR Register BaselineStackReg = sp;
// ValueOperands R0, R1, and R2.
// R0 == JSReturnReg, and R2 uses registers not
// preserved across calls. R1 value should be
// preserved across calls.
// R0 == JSReturnReg, and R2 uses registers not preserved across calls. R1 value
// should be preserved across calls.
static MOZ_CONSTEXPR_VAR ValueOperand R0(r3, r2);
static MOZ_CONSTEXPR_VAR ValueOperand R1(r5, r4);
static MOZ_CONSTEXPR_VAR ValueOperand R2(r1, r0);
// BaselineTailCallReg and BaselineStubReg
// These use registers that are not preserved across
// calls.
// These use registers that are not preserved across calls.
static MOZ_CONSTEXPR_VAR Register BaselineTailCallReg = r14;
static MOZ_CONSTEXPR_VAR Register BaselineStubReg = r9;
@ -44,9 +42,9 @@ static MOZ_CONSTEXPR_VAR Register BaselineSecondScratchReg = r6;
// R7 - R9 are generally available for use within stubcode.
// Note that BaselineTailCallReg is actually just the link
// register. In ARM code emission, we do not clobber BaselineTailCallReg
// since we keep the return address for calls there.
// Note that BaselineTailCallReg is actually just the link register. In ARM code
// emission, we do not clobber BaselineTailCallReg since we keep the return
// address for calls there.
// FloatReg0 must be equal to ReturnFloatReg.
static MOZ_CONSTEXPR_VAR FloatRegister FloatReg0 = d0;

Просмотреть файл

@ -92,7 +92,7 @@ CodeGeneratorARM::generateEpilogue()
masm.freeStack(frameSize());
JS_ASSERT(masm.framePushed() == 0);
masm.pop(pc);
masm.dumpPool();
masm.flushBuffer();
return true;
}
@ -275,18 +275,22 @@ CodeGeneratorARM::visitMinMaxD(LMinMaxD *ins)
Label nan, equal, returnSecond, done;
masm.compareDouble(first, second);
masm.ma_b(&nan, Assembler::VFP_Unordered); // first or second is NaN, result is NaN.
masm.ma_b(&equal, Assembler::VFP_Equal); // make sure we handle -0 and 0 right.
// First or second is NaN, result is NaN.
masm.ma_b(&nan, Assembler::VFP_Unordered);
// Make sure we handle -0 and 0 right.
masm.ma_b(&equal, Assembler::VFP_Equal);
masm.ma_b(&returnSecond, cond);
masm.ma_b(&done);
// Check for zero.
masm.bind(&equal);
masm.compareDouble(first, InvalidFloatReg);
masm.ma_b(&done, Assembler::VFP_NotEqualOrUnordered); // first wasn't 0 or -0, so just return it.
// First wasn't 0 or -0, so just return it.
masm.ma_b(&done, Assembler::VFP_NotEqualOrUnordered);
// So now both operands are either -0 or 0.
if (ins->mir()->isMax()) {
masm.ma_vadd(second, first, first); // -0 + -0 = -0 and -0 + 0 = 0.
// -0 + -0 = -0 and -0 + 0 = 0.
masm.ma_vadd(second, first, first);
} else {
masm.ma_vneg(first, first);
masm.ma_vsub(first, second, first);
@ -403,11 +407,11 @@ CodeGeneratorARM::visitMulI(LMulI *ins)
break;
case 0:
masm.ma_mov(Imm32(0), ToRegister(dest));
return true; // escape overflow check;
return true; // Escape overflow check;
case 1:
// nop
// Nop
masm.ma_mov(ToRegister(lhs), ToRegister(dest));
return true; // escape overflow check;
return true; // Escape overflow check;
case 2:
masm.ma_add(ToRegister(lhs), ToRegister(lhs), ToRegister(dest), SetCond);
// Overflow is handled later.
@ -417,17 +421,19 @@ CodeGeneratorARM::visitMulI(LMulI *ins)
if (constant > 0) {
// Try shift and add sequences for a positive constant.
if (!mul->canOverflow()) {
// If it cannot overflow, we can do lots of optimizations
// If it cannot overflow, we can do lots of optimizations.
Register src = ToRegister(lhs);
uint32_t shift = FloorLog2(constant);
uint32_t rest = constant - (1 << shift);
// See if the constant has one bit set, meaning it can be encoded as a bitshift
// See if the constant has one bit set, meaning it can be
// encoded as a bitshift.
if ((1 << shift) == constant) {
masm.ma_lsl(Imm32(shift), src, ToRegister(dest));
handled = true;
} else {
// If the constant cannot be encoded as (1<<C1), see if it can be encoded as
// (1<<C1) | (1<<C2), which can be computed using an add and a shift
// If the constant cannot be encoded as (1 << C1), see
// if it can be encoded as (1 << C1) | (1 << C2), which
// can be computed using an add and a shift.
uint32_t shift_rest = FloorLog2(rest);
if ((1u << shift_rest) == rest) {
masm.as_add(ToRegister(dest), src, lsl(src, shift-shift_rest));
@ -444,9 +450,9 @@ CodeGeneratorARM::visitMulI(LMulI *ins)
if ((1 << shift) == constant) {
// dest = lhs * pow(2,shift)
masm.ma_lsl(Imm32(shift), ToRegister(lhs), ToRegister(dest));
// At runtime, check (lhs == dest >> shift), if this does not hold,
// some bits were lost due to overflow, and the computation should
// be resumed as a double.
// At runtime, check (lhs == dest >> shift), if this
// does not hold, some bits were lost due to overflow,
// and the computation should be resumed as a double.
masm.as_cmp(ToRegister(lhs), asr(ToRegister(dest), shift));
c = Assembler::NotEqual;
handled = true;
@ -462,19 +468,19 @@ CodeGeneratorARM::visitMulI(LMulI *ins)
}
}
}
// Bailout on overflow
// Bailout on overflow.
if (mul->canOverflow() && !bailoutIf(c, ins->snapshot()))
return false;
} else {
Assembler::Condition c = Assembler::Overflow;
//masm.imull(ToOperand(rhs), ToRegister(lhs));
// masm.imull(ToOperand(rhs), ToRegister(lhs));
if (mul->canOverflow())
c = masm.ma_check_mul(ToRegister(lhs), ToRegister(rhs), ToRegister(dest), c);
else
masm.ma_mul(ToRegister(lhs), ToRegister(rhs), ToRegister(dest));
// Bailout on overflow
// Bailout on overflow.
if (mul->canOverflow() && !bailoutIf(c, ins->snapshot()))
return false;
@ -502,8 +508,11 @@ CodeGeneratorARM::divICommon(MDiv *mir, Register lhs, Register rhs, Register out
if (mir->canBeNegativeOverflow()) {
// Handle INT32_MIN / -1;
// The integer division will give INT32_MIN, but we want -(double)INT32_MIN.
masm.ma_cmp(lhs, Imm32(INT32_MIN)); // sets EQ if lhs == INT32_MIN
masm.ma_cmp(rhs, Imm32(-1), Assembler::Equal); // if EQ (LHS == INT32_MIN), sets EQ if rhs == -1
// Sets EQ if lhs == INT32_MIN.
masm.ma_cmp(lhs, Imm32(INT32_MIN));
// If EQ (LHS == INT32_MIN), sets EQ if rhs == -1.
masm.ma_cmp(rhs, Imm32(-1), Assembler::Equal);
if (mir->canTruncateOverflow()) {
// (-INT32_MIN)|0 = INT32_MIN
Label skip;
@ -553,7 +562,7 @@ CodeGeneratorARM::divICommon(MDiv *mir, Register lhs, Register rhs, Register out
bool
CodeGeneratorARM::visitDivI(LDivI *ins)
{
// Extract the registers from this instruction
// Extract the registers from this instruction.
Register lhs = ToRegister(ins->lhs());
Register rhs = ToRegister(ins->rhs());
Register temp = ToRegister(ins->getTemp(0));
@ -588,7 +597,7 @@ extern "C" {
bool
CodeGeneratorARM::visitSoftDivI(LSoftDivI *ins)
{
// Extract the registers from this instruction
// Extract the registers from this instruction.
Register lhs = ToRegister(ins->lhs());
Register rhs = ToRegister(ins->rhs());
Register output = ToRegister(ins->output());
@ -662,19 +671,20 @@ bool
CodeGeneratorARM::modICommon(MMod *mir, Register lhs, Register rhs, Register output,
LSnapshot *snapshot, Label &done)
{
// 0/X (with X < 0) is bad because both of these values *should* be doubles, and
// the result should be -0.0, which cannot be represented in integers.
// 0/X (with X < 0) is bad because both of these values *should* be doubles,
// and the result should be -0.0, which cannot be represented in integers.
// X/0 is bad because it will give garbage (or abort), when it should give
// either \infty, -\infty or NAN.
// Prevent 0 / X (with X < 0) and X / 0
// testing X / Y. Compare Y with 0.
// There are three cases: (Y < 0), (Y == 0) and (Y > 0)
// If (Y < 0), then we compare X with 0, and bail if X == 0
// If (Y == 0), then we simply want to bail. Since this does not set
// the flags necessary for LT to trigger, we don't test X, and take the
// bailout because the EQ flag is set.
// if (Y > 0), we don't set EQ, and we don't trigger LT, so we don't take the bailout.
// testing X / Y. Compare Y with 0.
// There are three cases: (Y < 0), (Y == 0) and (Y > 0).
// If (Y < 0), then we compare X with 0, and bail if X == 0.
// If (Y == 0), then we simply want to bail. Since this does not set the
// flags necessary for LT to trigger, we don't test X, and take the bailout
// because the EQ flag is set.
// If (Y > 0), we don't set EQ, and we don't trigger LT, so we don't take
// the bailout.
if (mir->canBeDivideByZero() || mir->canBeNegativeDividend()) {
masm.ma_cmp(rhs, Imm32(0));
masm.ma_cmp(lhs, Imm32(0), Assembler::LessThan);
@ -704,7 +714,7 @@ CodeGeneratorARM::visitModI(LModI *ins)
Register callTemp = ToRegister(ins->callTemp());
MMod *mir = ins->mir();
// save the lhs in case we end up with a 0 that should be a -0.0 because lhs < 0.
// Save the lhs in case we end up with a 0 that should be a -0.0 because lhs < 0.
masm.ma_mov(lhs, callTemp);
Label done;
@ -713,7 +723,7 @@ CodeGeneratorARM::visitModI(LModI *ins)
masm.ma_smod(lhs, rhs, output);
// If X%Y == 0 and X < 0, then we *actually* wanted to return -0.0
// If X%Y == 0 and X < 0, then we *actually* wanted to return -0.0.
if (mir->canBeNegativeDividend()) {
if (mir->isTruncated()) {
// -0.0|0 == 0
@ -735,7 +745,7 @@ CodeGeneratorARM::visitModI(LModI *ins)
bool
CodeGeneratorARM::visitSoftModI(LSoftModI *ins)
{
// Extract the registers from this instruction
// Extract the registers from this instruction.
Register lhs = ToRegister(ins->lhs());
Register rhs = ToRegister(ins->rhs());
Register output = ToRegister(ins->output());
@ -743,15 +753,17 @@ CodeGeneratorARM::visitSoftModI(LSoftModI *ins)
MMod *mir = ins->mir();
Label done;
// save the lhs in case we end up with a 0 that should be a -0.0 because lhs < 0.
// Save the lhs in case we end up with a 0 that should be a -0.0 because lhs < 0.
JS_ASSERT(callTemp.code() > r3.code() && callTemp.code() < r12.code());
masm.ma_mov(lhs, callTemp);
// Prevent INT_MIN % -1;
// The integer division will give INT_MIN, but we want -(double)INT_MIN.
if (mir->canBeNegativeDividend()) {
masm.ma_cmp(lhs, Imm32(INT_MIN)); // sets EQ if lhs == INT_MIN
masm.ma_cmp(rhs, Imm32(-1), Assembler::Equal); // if EQ (LHS == INT_MIN), sets EQ if rhs == -1
// Sets EQ if lhs == INT_MIN
masm.ma_cmp(lhs, Imm32(INT_MIN));
// If EQ (LHS == INT_MIN), sets EQ if rhs == -1
masm.ma_cmp(rhs, Imm32(-1), Assembler::Equal);
if (mir->isTruncated()) {
// (INT_MIN % -1)|0 == 0
Label skip;
@ -802,11 +814,12 @@ CodeGeneratorARM::visitModPowTwoI(LModPowTwoI *ins)
Register out = ToRegister(ins->getDef(0));
MMod *mir = ins->mir();
Label fin;
// bug 739870, jbramley has a different sequence that may help with speed here
// bug 739870, jbramley has a different sequence that may help with speed
// here.
masm.ma_mov(in, out, SetCond);
masm.ma_b(&fin, Assembler::Zero);
masm.ma_rsb(Imm32(0), out, NoSetCond, Assembler::Signed);
masm.ma_and(Imm32((1<<ins->shift())-1), out);
masm.ma_and(Imm32((1 << ins->shift()) - 1), out);
masm.ma_rsb(Imm32(0), out, SetCond, Assembler::Signed);
if (mir->canBeNegativeDividend()) {
if (!mir->isTruncated()) {
@ -846,9 +859,8 @@ CodeGeneratorARM::visitBitNotI(LBitNotI *ins)
{
const LAllocation *input = ins->getOperand(0);
const LDefinition *dest = ins->getDef(0);
// this will not actually be true on arm.
// We can not an imm8m in order to get a wider range
// of numbers
// This will not actually be true on arm. We can not an imm8m in order to
// get a wider range of numbers
JS_ASSERT(!input->isConstant());
masm.ma_mvn(ToRegister(input), ToRegister(dest));
@ -861,7 +873,7 @@ CodeGeneratorARM::visitBitOpI(LBitOpI *ins)
const LAllocation *lhs = ins->getOperand(0);
const LAllocation *rhs = ins->getOperand(1);
const LDefinition *dest = ins->getDef(0);
// all of these bitops should be either imm32's, or integer registers.
// All of these bitops should be either imm32's, or integer registers.
switch (ins->bitop()) {
case JSOP_BITOR:
if (rhs->isConstant())
@ -928,8 +940,8 @@ CodeGeneratorARM::visitShiftI(LShiftI *ins)
}
} else {
// The shift amounts should be AND'ed into the 0-31 range since arm
// shifts by the lower byte of the register (it will attempt to shift
// by 250 if you ask it to).
// shifts by the lower byte of the register (it will attempt to shift by
// 250 if you ask it to).
masm.ma_and(Imm32(0x1F), ToRegister(rhs), dest);
switch (ins->bitop()) {
@ -994,7 +1006,8 @@ CodeGeneratorARM::visitPowHalfD(LPowHalfD *ins)
masm.ma_vneg(ScratchDoubleReg, output, Assembler::Equal);
masm.ma_b(&done, Assembler::Equal);
// Math.pow(-0, 0.5) == 0 == Math.pow(0, 0.5). Adding 0 converts any -0 to 0.
// Math.pow(-0, 0.5) == 0 == Math.pow(0, 0.5).
// Adding 0 converts any -0 to 0.
masm.ma_vimm(0.0, ScratchDoubleReg);
masm.ma_vadd(ScratchDoubleReg, input, output);
masm.ma_vsqrt(output, output);
@ -1067,45 +1080,46 @@ CodeGeneratorARM::visitOutOfLineTableSwitch(OutOfLineTableSwitch *ool)
bool
CodeGeneratorARM::emitTableSwitchDispatch(MTableSwitch *mir, Register index, Register base)
{
// the code generated by this is utter hax.
// the end result looks something like:
// The code generated by this is utter hax.
// The end result looks something like:
// SUBS index, input, #base
// RSBSPL index, index, #max
// LDRPL pc, pc, index lsl 2
// B default
// If the range of targets in N through M, we first subtract off the lowest
// case (N), which both shifts the arguments into the range 0 to (M-N) with
// and sets the MInus flag if the argument was out of range on the low end.
// case (N), which both shifts the arguments into the range 0 to (M - N)
// with and sets the MInus flag if the argument was out of range on the low
// end.
// Then we a reverse subtract with the size of the jump table, which will
// reverse the order of range (It is size through 0, rather than 0 through
// size). The main purpose of this is that we set the same flag as the lower
// bound check for the upper bound check. Lastly, we do this conditionally
// size). The main purpose of this is that we set the same flag as the lower
// bound check for the upper bound check. Lastly, we do this conditionally
// on the previous check succeeding.
// Then we conditionally load the pc offset by the (reversed) index (times
// the address size) into the pc, which branches to the correct case.
// NOTE: when we go to read the pc, the value that we get back is the pc of
// the current instruction *PLUS 8*. This means that ldr foo, [pc, +0]
// reads $pc+8. In other words, there is an empty word after the branch into
// the switch table before the table actually starts. Since the only other
// unhandled case is the default case (both out of range high and out of range low)
// I then insert a branch to default case into the extra slot, which ensures
// we don't attempt to execute the address table.
// the address size) into the pc, which branches to the correct case. NOTE:
// when we go to read the pc, the value that we get back is the pc of the
// current instruction *PLUS 8*. This means that ldr foo, [pc, +0] reads
// $pc+8. In other words, there is an empty word after the branch into the
// switch table before the table actually starts. Since the only other
// unhandled case is the default case (both out of range high and out of
// range low) I then insert a branch to default case into the extra slot,
// which ensures we don't attempt to execute the address table.
Label *defaultcase = skipTrivialBlocks(mir->getDefault())->lir()->label();
int32_t cases = mir->numCases();
// Lower value with low value
// Lower value with low value.
masm.ma_sub(index, Imm32(mir->low()), index, SetCond);
masm.ma_rsb(index, Imm32(cases - 1), index, SetCond, Assembler::NotSigned);
AutoForbidPools afp(&masm);
masm.ma_ldr(DTRAddr(pc, DtrRegImmShift(index, LSL, 2)), pc, Offset, Assembler::NotSigned);
masm.ma_b(defaultcase);
// To fill in the CodeLabels for the case entries, we need to first
// generate the case entries (we don't yet know their offsets in the
// instruction stream).
// To fill in the CodeLabels for the case entries, we need to first generate
// the case entries (we don't yet know their offsets in the instruction
// stream).
OutOfLineTableSwitch *ool = new(alloc()) OutOfLineTableSwitch(alloc(), mir);
for (int32_t i = 0; i < cases; i++) {
CodeLabel cl;
@ -1226,8 +1240,8 @@ CodeGeneratorARM::visitRound(LRound *lir)
Register output = ToRegister(lir->output());
FloatRegister tmp = ToFloatRegister(lir->temp());
Label bail;
// Output is either correct, or clamped. All -0 cases have been translated to a clamped
// case.a
// Output is either correct, or clamped. All -0 cases have been translated
// to a clamped case.
masm.round(input, output, &bail, tmp);
if (!bailoutFrom(&bail, lir->snapshot()))
return false;
@ -1241,8 +1255,8 @@ CodeGeneratorARM::visitRoundF(LRoundF *lir)
Register output = ToRegister(lir->output());
FloatRegister tmp = ToFloatRegister(lir->temp());
Label bail;
// Output is either correct, or clamped. All -0 cases have been translated to a clamped
// case.a
// Output is either correct, or clamped. All -0 cases have been translated
// to a clamped case.
masm.roundf(input, output, &bail, tmp);
if (!bailoutFrom(&bail, lir->snapshot()))
return false;
@ -1339,9 +1353,9 @@ CodeGeneratorARM::visitBox(LBox *box)
JS_ASSERT(!box->getOperand(0)->isConstant());
// On x86, the input operand and the output payload have the same
// virtual register. All that needs to be written is the type tag for
// the type definition.
// On x86, the input operand and the output payload have the same virtual
// register. All that needs to be written is the type tag for the type
// definition.
masm.ma_mov(Imm32(MIRTypeToTag(box->type())), ToRegister(type));
return true;
}
@ -1414,11 +1428,10 @@ CodeGeneratorARM::visitTestDAndBranch(LTestDAndBranch *test)
MBasicBlock *ifTrue = test->ifTrue();
MBasicBlock *ifFalse = test->ifFalse();
// If the compare set the 0 bit, then the result
// is definately false.
// If the compare set the 0 bit, then the result is definately false.
jumpToBlock(ifFalse, Assembler::Zero);
// it is also false if one of the operands is NAN, which is
// shown as Overflow.
// It is also false if one of the operands is NAN, which is shown as
// Overflow.
jumpToBlock(ifFalse, Assembler::Overflow);
jumpToBlock(ifTrue);
return true;
@ -1433,11 +1446,10 @@ CodeGeneratorARM::visitTestFAndBranch(LTestFAndBranch *test)
MBasicBlock *ifTrue = test->ifTrue();
MBasicBlock *ifFalse = test->ifFalse();
// If the compare set the 0 bit, then the result
// is definately false.
// If the compare set the 0 bit, then the result is definately false.
jumpToBlock(ifFalse, Assembler::Zero);
// it is also false if one of the operands is NAN, which is
// shown as Overflow.
// It is also false if one of the operands is NAN, which is shown as
// Overflow.
jumpToBlock(ifFalse, Assembler::Overflow);
jumpToBlock(ifTrue);
return true;
@ -1629,22 +1641,22 @@ CodeGeneratorARM::visitNotI(LNotI *ins)
bool
CodeGeneratorARM::visitNotD(LNotD *ins)
{
// Since this operation is not, we want to set a bit if
// the double is falsey, which means 0.0, -0.0 or NaN.
// when comparing with 0, an input of 0 will set the Z bit (30)
// and NaN will set the V bit (28) of the APSR.
// Since this operation is not, we want to set a bit if the double is
// falsey, which means 0.0, -0.0 or NaN. When comparing with 0, an input of
// 0 will set the Z bit (30) and NaN will set the V bit (28) of the APSR.
FloatRegister opd = ToFloatRegister(ins->input());
Register dest = ToRegister(ins->output());
// Do the compare
// Do the compare.
masm.ma_vcmpz(opd);
// TODO There are three variations here to compare performance-wise.
bool nocond = true;
if (nocond) {
// Load the value into the dest register
// Load the value into the dest register.
masm.as_vmrs(dest);
masm.ma_lsr(Imm32(28), dest, dest);
masm.ma_alu(dest, lsr(dest, 2), dest, op_orr); // 28 + 2 = 30
// 28 + 2 = 30
masm.ma_alu(dest, lsr(dest, 2), dest, OpOrr);
masm.ma_and(Imm32(1), dest);
} else {
masm.as_vmrs(pc);
@ -1658,22 +1670,22 @@ CodeGeneratorARM::visitNotD(LNotD *ins)
bool
CodeGeneratorARM::visitNotF(LNotF *ins)
{
// Since this operation is not, we want to set a bit if
// the double is falsey, which means 0.0, -0.0 or NaN.
// when comparing with 0, an input of 0 will set the Z bit (30)
// and NaN will set the V bit (28) of the APSR.
// Since this operation is not, we want to set a bit if the double is
// falsey, which means 0.0, -0.0 or NaN. When comparing with 0, an input of
// 0 will set the Z bit (30) and NaN will set the V bit (28) of the APSR.
FloatRegister opd = ToFloatRegister(ins->input());
Register dest = ToRegister(ins->output());
// Do the compare
// Do the compare.
masm.ma_vcmpz_f32(opd);
// TODO There are three variations here to compare performance-wise.
bool nocond = true;
if (nocond) {
// Load the value into the dest register
// Load the value into the dest register.
masm.as_vmrs(dest);
masm.ma_lsr(Imm32(28), dest, dest);
masm.ma_alu(dest, lsr(dest, 2), dest, op_orr); // 28 + 2 = 30
// 28 + 2 = 30
masm.ma_alu(dest, lsr(dest, 2), dest, OpOrr);
masm.ma_and(Imm32(1), dest);
} else {
masm.as_vmrs(pc);
@ -1726,15 +1738,14 @@ CodeGeneratorARM::visitGuardClass(LGuardClass *guard)
bool
CodeGeneratorARM::generateInvalidateEpilogue()
{
// Ensure that there is enough space in the buffer for the OsiPoint
// patching to occur. Otherwise, we could overwrite the invalidation
// epilogue.
for (size_t i = 0; i < sizeof(void *); i+= Assembler::nopSize())
// Ensure that there is enough space in the buffer for the OsiPoint patching
// to occur. Otherwise, we could overwrite the invalidation epilogue.
for (size_t i = 0; i < sizeof(void *); i += Assembler::NopSize())
masm.nop();
masm.bind(&invalidate_);
// Push the return address of the point that we bailed out at onto the stack
// Push the return address of the point that we bailed out at onto the stack.
masm.Push(lr);
// Push the Ion script onto the stack (when we determine what that pointer is).
@ -1743,8 +1754,8 @@ CodeGeneratorARM::generateInvalidateEpilogue()
masm.branch(thunk);
// We should never reach this point in JIT code -- the invalidation thunk should
// pop the invalidated JS frame and return directly to its caller.
// We should never reach this point in JIT code -- the invalidation thunk
// should pop the invalidated JS frame and return directly to its caller.
masm.assumeUnreachable("Should have returned directly to its caller instead of here.");
return true;
}

Просмотреть файл

@ -26,9 +26,9 @@ class CodeGeneratorARM : public CodeGeneratorShared
// Label for the common return path.
NonAssertingLabel returnLabel_;
NonAssertingLabel deoptLabel_;
// ugh. this is not going to be pretty to move over.
// stack slotted variables are not useful on arm.
// it looks like this will need to return one of two types.
// Ugh. This is not going to be pretty to move over. Stack slotted variables
// are not useful on arm. It looks like this will need to return one of two
// types.
inline Operand ToOperand(const LAllocation &a) {
if (a.isGeneralReg())
return Operand(a.toGeneralReg()->reg());

Просмотреть файл

@ -144,10 +144,10 @@ class LDivI : public LBinaryMath<1>
// takes two arguments (dividend in r0, divisor in r1). The LInstruction gets
// encoded such that the divisor and dividend are passed in their apropriate
// registers and end their life at the start of the instruction by the use of
// useFixedAtStart. The result is returned in r0 and the other three registers
// that can be trashed are marked as temps. For the time being, the link
// useFixedAtStart. The result is returned in r0 and the other three registers
// that can be trashed are marked as temps. For the time being, the link
// register is not marked as trashed because we never allocate to the link
// register. The FP registers are not trashed.
// register. The FP registers are not trashed.
class LSoftDivI : public LBinaryMath<3>
{
public:
@ -304,7 +304,7 @@ class LPowHalfD : public LInstructionHelper<1, 1, 0>
}
};
// Takes a tableswitch with an integer to decide
// Takes a tableswitch with an integer to decide.
class LTableSwitch : public LInstructionHelper<0, 1, 1>
{
public:
@ -332,7 +332,7 @@ class LTableSwitch : public LInstructionHelper<0, 1, 1>
}
};
// Takes a tableswitch with an integer to decide
// Takes a tableswitch with an integer to decide.
class LTableSwitchV : public LInstructionHelper<0, BOX_PIECES, 2>
{
public:

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -25,17 +25,19 @@ static Register CallReg = ip;
static const int defaultShift = 3;
JS_STATIC_ASSERT(1 << defaultShift == sizeof(jsval));
// MacroAssemblerARM is inheriting form Assembler defined in Assembler-arm.{h,cpp}
// MacroAssemblerARM is inheriting form Assembler defined in
// Assembler-arm.{h,cpp}
class MacroAssemblerARM : public Assembler
{
protected:
// On ARM, some instructions require a second scratch register. This register
// defaults to lr, since it's non-allocatable (as it can be clobbered by some
// instructions). Allow the baseline compiler to override this though, since
// baseline IC stubs rely on lr holding the return address.
// On ARM, some instructions require a second scratch register. This
// register defaults to lr, since it's non-allocatable (as it can be
// clobbered by some instructions). Allow the baseline compiler to override
// this though, since baseline IC stubs rely on lr holding the return
// address.
Register secondScratchReg_;
// higher level tag testing code
// Higher level tag testing code.
Operand ToPayload(Operand base) {
return Operand(Register::FromCode(base.base()), base.disp());
}
@ -85,10 +87,9 @@ class MacroAssemblerARM : public Assembler
void negateDouble(FloatRegister reg);
void inc64(AbsoluteAddress dest);
// somewhat direct wrappers for the low-level assembler funcitons
// bitops
// attempt to encode a virtual alu instruction using
// two real instructions.
// Somewhat direct wrappers for the low-level assembler funcitons
// bitops. Attempt to encode a virtual alu instruction using two real
// instructions.
private:
bool alu_dbl(Register src1, Imm32 imm, Register dest, ALUOp op,
SetCond_ sc, Condition c);
@ -107,9 +108,11 @@ class MacroAssemblerARM : public Assembler
RelocStyle rs, Instruction *i = nullptr);
void ma_movPatchable(ImmPtr imm, Register dest, Assembler::Condition c,
RelocStyle rs, Instruction *i = nullptr);
// These should likely be wrapped up as a set of macros
// or something like that. I cannot think of a good reason
// to explicitly have all of this code.
// These should likely be wrapped up as a set of macros or something like
// that. I cannot think of a good reason to explicitly have all of this
// code.
// ALU based ops
// mov
void ma_mov(Register src, Register dest,
@ -147,7 +150,7 @@ class MacroAssemblerARM : public Assembler
void ma_neg(Register src, Register dest,
SetCond_ sc = NoSetCond, Condition c = Always);
// and
// And
void ma_and(Register src, Register dest,
SetCond_ sc = NoSetCond, Condition c = Always);
@ -162,11 +165,11 @@ class MacroAssemblerARM : public Assembler
// bit clear (dest <- dest & ~imm) or (dest <- src1 & ~src2)
// Bit clear (dest <- dest & ~imm) or (dest <- src1 & ~src2)
void ma_bic(Imm32 imm, Register dest,
SetCond_ sc = NoSetCond, Condition c = Always);
// exclusive or
// Exclusive or
void ma_eor(Register src, Register dest,
SetCond_ sc = NoSetCond, Condition c = Always);
@ -180,7 +183,7 @@ class MacroAssemblerARM : public Assembler
SetCond_ sc = NoSetCond, Condition c = Always);
// or
// Or
void ma_orr(Register src, Register dest,
SetCond_ sc = NoSetCond, Condition c = Always);
@ -194,49 +197,49 @@ class MacroAssemblerARM : public Assembler
SetCond_ sc = NoSetCond, Condition c = Always);
// arithmetic based ops
// add with carry
// Arithmetic based ops.
// Add with carry:
void ma_adc(Imm32 imm, Register dest, SetCond_ sc = NoSetCond, Condition c = Always);
void ma_adc(Register src, Register dest, SetCond_ sc = NoSetCond, Condition c = Always);
void ma_adc(Register src1, Register src2, Register dest, SetCond_ sc = NoSetCond, Condition c = Always);
// add
// Add:
void ma_add(Imm32 imm, Register dest, SetCond_ sc = NoSetCond, Condition c = Always);
void ma_add(Register src1, Register dest, SetCond_ sc = NoSetCond, Condition c = Always);
void ma_add(Register src1, Register src2, Register dest, SetCond_ sc = NoSetCond, Condition c = Always);
void ma_add(Register src1, Operand op, Register dest, SetCond_ sc = NoSetCond, Condition c = Always);
void ma_add(Register src1, Imm32 op, Register dest, SetCond_ sc = NoSetCond, Condition c = Always);
// subtract with carry
// Subtract with carry:
void ma_sbc(Imm32 imm, Register dest, SetCond_ sc = NoSetCond, Condition c = Always);
void ma_sbc(Register src1, Register dest, SetCond_ sc = NoSetCond, Condition c = Always);
void ma_sbc(Register src1, Register src2, Register dest, SetCond_ sc = NoSetCond, Condition c = Always);
// subtract
// Subtract:
void ma_sub(Imm32 imm, Register dest, SetCond_ sc = NoSetCond, Condition c = Always);
void ma_sub(Register src1, Register dest, SetCond_ sc = NoSetCond, Condition c = Always);
void ma_sub(Register src1, Register src2, Register dest, SetCond_ sc = NoSetCond, Condition c = Always);
void ma_sub(Register src1, Operand op, Register dest, SetCond_ sc = NoSetCond, Condition c = Always);
void ma_sub(Register src1, Imm32 op, Register dest, SetCond_ sc = NoSetCond, Condition c = Always);
// reverse subtract
// Reverse subtract:
void ma_rsb(Imm32 imm, Register dest, SetCond_ sc = NoSetCond, Condition c = Always);
void ma_rsb(Register src1, Register dest, SetCond_ sc = NoSetCond, Condition c = Always);
void ma_rsb(Register src1, Register src2, Register dest, SetCond_ sc = NoSetCond, Condition c = Always);
void ma_rsb(Register src1, Imm32 op2, Register dest, SetCond_ sc = NoSetCond, Condition c = Always);
// reverse subtract with carry
// Reverse subtract with carry:
void ma_rsc(Imm32 imm, Register dest, SetCond_ sc = NoSetCond, Condition c = Always);
void ma_rsc(Register src1, Register dest, SetCond_ sc = NoSetCond, Condition c = Always);
void ma_rsc(Register src1, Register src2, Register dest, SetCond_ sc = NoSetCond, Condition c = Always);
// compares/tests
// compare negative (sets condition codes as src1 + src2 would)
// Compares/tests.
// Compare negative (sets condition codes as src1 + src2 would):
void ma_cmn(Register src1, Imm32 imm, Condition c = Always);
void ma_cmn(Register src1, Register src2, Condition c = Always);
void ma_cmn(Register src1, Operand op, Condition c = Always);
// compare (src - src2)
// Compare (src - src2):
void ma_cmp(Register src1, Imm32 imm, Condition c = Always);
void ma_cmp(Register src1, ImmWord ptr, Condition c = Always);
void ma_cmp(Register src1, ImmGCPtr ptr, Condition c = Always);
@ -244,38 +247,39 @@ class MacroAssemblerARM : public Assembler
void ma_cmp(Register src1, Register src2, Condition c = Always);
// test for equality, (src1^src2)
// Test for equality, (src1 ^ src2):
void ma_teq(Register src1, Imm32 imm, Condition c = Always);
void ma_teq(Register src1, Register src2, Condition c = Always);
void ma_teq(Register src1, Operand op, Condition c = Always);
// test (src1 & src2)
// Test (src1 & src2):
void ma_tst(Register src1, Imm32 imm, Condition c = Always);
void ma_tst(Register src1, Register src2, Condition c = Always);
void ma_tst(Register src1, Operand op, Condition c = Always);
// multiplies. For now, there are only two that we care about.
// Multiplies. For now, there are only two that we care about.
void ma_mul(Register src1, Register src2, Register dest);
void ma_mul(Register src1, Imm32 imm, Register dest);
Condition ma_check_mul(Register src1, Register src2, Register dest, Condition cond);
Condition ma_check_mul(Register src1, Imm32 imm, Register dest, Condition cond);
// fast mod, uses scratch registers, and thus needs to be in the assembler
// implicitly assumes that we can overwrite dest at the beginning of the sequence
// Fast mod, uses scratch registers, and thus needs to be in the assembler
// implicitly assumes that we can overwrite dest at the beginning of the
// sequence.
void ma_mod_mask(Register src, Register dest, Register hold, Register tmp,
int32_t shift);
// mod, depends on integer divide instructions being supported
// Mod - depends on integer divide instructions being supported.
void ma_smod(Register num, Register div, Register dest);
void ma_umod(Register num, Register div, Register dest);
// division, depends on integer divide instructions being supported
// Division - depends on integer divide instructions being supported.
void ma_sdiv(Register num, Register div, Register dest, Condition cond = Always);
void ma_udiv(Register num, Register div, Register dest, Condition cond = Always);
// memory
// shortcut for when we know we're transferring 32 bits of data
// Memory:
// Shortcut for when we know we're transferring 32 bits of data.
void ma_dtr(LoadStore ls, Register rn, Imm32 offset, Register rt,
Index mode = Offset, Condition cc = Always);
@ -298,7 +302,7 @@ class MacroAssemblerARM : public Assembler
void ma_strb(Register rt, DTRAddr addr, Index mode = Offset, Condition cc = Always);
void ma_strh(Register rt, EDtrAddr addr, Index mode = Offset, Condition cc = Always);
void ma_strd(Register rt, DebugOnly<Register> rt2, EDtrAddr addr, Index mode = Offset, Condition cc = Always);
// specialty for moving N bits of data, where n == 8,16,32,64
// Specialty for moving N bits of data, where n == 8,16,32,64.
BufferOffset ma_dataTransferN(LoadStore ls, int size, bool IsSigned,
Register rn, Register rm, Register rt,
Index mode = Offset, Condition cc = Always, unsigned scale = TimesOne);
@ -312,19 +316,19 @@ class MacroAssemblerARM : public Assembler
void ma_vpop(VFPRegister r);
void ma_vpush(VFPRegister r);
// branches when done from within arm-specific code
// Branches when done from within arm-specific code.
BufferOffset ma_b(Label *dest, Condition c = Always, bool isPatchable = false);
void ma_bx(Register dest, Condition c = Always);
void ma_b(void *target, Relocation::Kind reloc, Condition c = Always);
// this is almost NEVER necessary, we'll basically never be calling a label
// This is almost NEVER necessary, we'll basically never be calling a label
// except, possibly in the crazy bailout-table case.
void ma_bl(Label *dest, Condition c = Always);
void ma_blx(Register dest, Condition c = Always);
//VFP/ALU
// VFP/ALU:
void ma_vadd(FloatRegister src1, FloatRegister src2, FloatRegister dst);
void ma_vsub(FloatRegister src1, FloatRegister src2, FloatRegister dst);
@ -356,19 +360,19 @@ class MacroAssemblerARM : public Assembler
void ma_vneg_f32(FloatRegister src, FloatRegister dest, Condition cc = Always);
// source is F64, dest is I32
// Source is F64, dest is I32:
void ma_vcvt_F64_I32(FloatRegister src, FloatRegister dest, Condition cc = Always);
void ma_vcvt_F64_U32(FloatRegister src, FloatRegister dest, Condition cc = Always);
// source is I32, dest is F64
// Source is I32, dest is F64:
void ma_vcvt_I32_F64(FloatRegister src, FloatRegister dest, Condition cc = Always);
void ma_vcvt_U32_F64(FloatRegister src, FloatRegister dest, Condition cc = Always);
// source is F32, dest is I32
// Source is F32, dest is I32:
void ma_vcvt_F32_I32(FloatRegister src, FloatRegister dest, Condition cc = Always);
void ma_vcvt_F32_U32(FloatRegister src, FloatRegister dest, Condition cc = Always);
// source is I32, dest is F32
// Source is I32, dest is F32:
void ma_vcvt_I32_F32(FloatRegister src, FloatRegister dest, Condition cc = Always);
void ma_vcvt_U32_F32(FloatRegister src, FloatRegister dest, Condition cc = Always);
@ -389,21 +393,22 @@ class MacroAssemblerARM : public Assembler
BufferOffset ma_vstr(VFPRegister src, const Operand &addr, Condition cc = Always);
BufferOffset ma_vstr(VFPRegister src, Register base, Register index, int32_t shift = defaultShift, Condition cc = Always);
// calls an Ion function, assumes that the stack is untouched (8 byte alinged)
// Calls an Ion function, assumes that the stack is untouched (8 byte
// aligned).
void ma_callIon(const Register reg);
// callso an Ion function, assuming that sp has already been decremented
// Calls an Ion function, assuming that sp has already been decremented.
void ma_callIonNoPush(const Register reg);
// calls an ion function, assuming that the stack is currently not 8 byte aligned
// Calls an ion function, assuming that the stack is currently not 8 byte
// aligned.
void ma_callIonHalfPush(const Register reg);
void ma_call(ImmPtr dest);
// Float registers can only be loaded/stored in continuous runs
// when using vstm/vldm.
// This function breaks set into continuous runs and loads/stores
// them at [rm]. rm will be modified and left in a state logically
// suitable for the next load/store.
// Returns the offset from [dm] for the logical next load/store.
// Float registers can only be loaded/stored in continuous runs when using
// vstm/vldm. This function breaks set into continuous runs and loads/stores
// them at [rm]. rm will be modified and left in a state logically suitable
// for the next load/store. Returns the offset from [dm] for the logical
// next load/store.
int32_t transferMultipleByRuns(FloatRegisterSet set, LoadStore ls,
Register rm, DTMMode mode)
{
@ -420,9 +425,8 @@ class MacroAssemblerARM : public Assembler
private:
// Implementation for transferMultipleByRuns so we can use different
// iterators for forward/backward traversals.
// The sign argument should be 1 if we traverse forwards, -1 if we
// traverse backwards.
// iterators for forward/backward traversals. The sign argument should be 1
// if we traverse forwards, -1 if we traverse backwards.
template<typename RegisterIterator> int32_t
transferMultipleByRunsImpl(FloatRegisterSet set, LoadStore ls,
Register rm, DTMMode mode, int32_t sign)
@ -453,17 +457,17 @@ class MacroAssemblerARMCompat : public MacroAssemblerARM
// Number of bytes the stack is adjusted inside a call to C. Calls to C may
// not be nested.
uint32_t args_;
// The actual number of arguments that were passed, used to assert that
// the initial number of arguments declared was correct.
// The actual number of arguments that were passed, used to assert that the
// initial number of arguments declared was correct.
uint32_t passedArgs_;
uint32_t passedArgTypes_;
// ARM treats arguments as a vector in registers/memory, that looks like:
// { r0, r1, r2, r3, [sp], [sp,+4], [sp,+8] ... }
// usedIntSlots_ keeps track of how many of these have been used.
// It bears a passing resemblance to passedArgs_, but a single argument
// can effectively use between one and three slots depending on its size and
// alignment requirements
// usedIntSlots_ keeps track of how many of these have been used. It bears a
// passing resemblance to passedArgs_, but a single argument can effectively
// use between one and three slots depending on its size and alignment
// requirements.
uint32_t usedIntSlots_;
#if defined(JS_CODEGEN_ARM_HARDFP) || defined(JS_ARM_SIMULATOR)
uint32_t usedFloatSlots_;
@ -472,13 +476,13 @@ class MacroAssemblerARMCompat : public MacroAssemblerARM
#endif
bool dynamicAlignment_;
// Used to work around the move resolver's lack of support for
// moving into register pairs, which the softfp ABI needs.
// Used to work around the move resolver's lack of support for moving into
// register pairs, which the softfp ABI needs.
mozilla::Array<MoveOperand, 2> floatArgsInGPR;
mozilla::Array<bool, 2> floatArgsInGPRValid;
// Compute space needed for the function call and set the properties of the
// callee. It returns the space which has to be allocated for calling the
// callee. It returns the space which has to be allocated for calling the
// function.
//
// arg Number of arguments of the function.
@ -489,9 +493,9 @@ class MacroAssemblerARMCompat : public MacroAssemblerARM
// Extra bytes currently pushed onto the frame beyond frameDepth_. This is
// needed to compute offsets to stack slots while temporary space has been
// reserved for unexpected spills or C++ function calls. It is maintained
// by functions which track stack alignment, which for clear distinction
// use StudlyCaps (for example, Push, Pop).
// reserved for unexpected spills or C++ function calls. It is maintained by
// functions which track stack alignment, which for clear distinction use
// StudlyCaps (for example, Push, Pop).
uint32_t framePushed_;
void adjustFrame(int value) {
setFramePushed(framePushed_ + value);
@ -505,9 +509,8 @@ class MacroAssemblerARMCompat : public MacroAssemblerARM
public:
using MacroAssemblerARM::call;
// jumps + other functions that should be called from
// non-arm specific code...
// basically, an x86 front end on top of the ARM code.
// Jumps + other functions that should be called from non-arm specific
// code. Basically, an x86 front end on top of the ARM code.
void j(Condition code , Label *dest)
{
as_b(dest, code);
@ -537,7 +540,7 @@ class MacroAssemblerARMCompat : public MacroAssemblerARM
as_blx(reg);
}
void call(Label *label) {
// for now, assume that it'll be nearby?
// For now, assume that it'll be nearby?
as_bl(label, Always);
}
void call(ImmWord imm) {
@ -647,8 +650,8 @@ class MacroAssemblerARMCompat : public MacroAssemblerARM
CodeOffsetLabel toggledJump(Label *label);
// Emit a BLX or NOP instruction. ToggleCall can be used to patch
// this instruction.
// Emit a BLX or NOP instruction. ToggleCall can be used to patch this
// instruction.
CodeOffsetLabel toggledCall(JitCode *target, bool enabled);
CodeOffsetLabel pushWithPatch(ImmWord imm) {
@ -702,7 +705,7 @@ class MacroAssemblerARMCompat : public MacroAssemblerARM
return value.typeReg();
}
// higher level tag testing code
// Higher level tag testing code.
Condition testInt32(Condition cond, const ValueOperand &value);
Condition testBoolean(Condition cond, const ValueOperand &value);
Condition testDouble(Condition cond, const ValueOperand &value);
@ -716,7 +719,7 @@ class MacroAssemblerARMCompat : public MacroAssemblerARM
Condition testPrimitive(Condition cond, const ValueOperand &value);
// register-based tests
// Register-based tests.
Condition testInt32(Condition cond, Register tag);
Condition testBoolean(Condition cond, Register tag);
Condition testNull(Condition cond, Register tag);
@ -767,7 +770,7 @@ class MacroAssemblerARMCompat : public MacroAssemblerARM
void branchTestValue(Condition cond, const Address &valaddr, const ValueOperand &value,
Label *label);
// unboxing code
// Unboxing code.
void unboxNonDouble(const ValueOperand &operand, Register dest);
void unboxNonDouble(const Address &src, Register dest);
void unboxInt32(const ValueOperand &src, Register dest) { unboxNonDouble(src, dest); }
@ -789,7 +792,7 @@ class MacroAssemblerARMCompat : public MacroAssemblerARM
ma_eor(Imm32(1), val.payloadReg());
}
// boxing code
// Boxing code.
void boxDouble(FloatRegister src, const ValueOperand &dest);
void boxNonDouble(JSValueType type, Register src, const ValueOperand &dest);
@ -818,7 +821,7 @@ class MacroAssemblerARMCompat : public MacroAssemblerARM
void loadInt32OrDouble(Register base, Register index,
FloatRegister dest, int32_t shift = defaultShift);
void loadConstantDouble(double dp, FloatRegister dest);
// treat the value as a boolean, and set condition codes accordingly
// Treat the value as a boolean, and set condition codes accordingly.
Condition testInt32Truthy(bool truthy, const ValueOperand &operand);
Condition testBooleanTruthy(bool truthy, const ValueOperand &operand);
Condition testDoubleTruthy(bool truthy, FloatRegister reg);
@ -1107,7 +1110,7 @@ class MacroAssemblerARMCompat : public MacroAssemblerARM
void storeValue(JSValueType type, Register reg, BaseIndex dest) {
// Harder cases not handled yet.
JS_ASSERT(dest.offset == 0);
ma_alu(dest.base, lsl(dest.index, dest.scale), ScratchRegister, op_add);
ma_alu(dest.base, lsl(dest.index, dest.scale), ScratchRegister, OpAdd);
storeValue(type, reg, Address(ScratchRegister, 0));
}
void storeValue(ValueOperand val, const Address &dest) {
@ -1131,7 +1134,7 @@ class MacroAssemblerARMCompat : public MacroAssemblerARM
void storeValue(const Value &val, BaseIndex dest) {
// Harder cases not handled yet.
JS_ASSERT(dest.offset == 0);
ma_alu(dest.base, lsl(dest.index, dest.scale), ScratchRegister, op_add);
ma_alu(dest.base, lsl(dest.index, dest.scale), ScratchRegister, OpAdd);
storeValue(val, Address(ScratchRegister, 0));
}
@ -1250,7 +1253,7 @@ class MacroAssemblerARMCompat : public MacroAssemblerARM
void callWithExitFrame(JitCode *target, Register dynStack);
// Makes an Ion call using the only two methods that it is sane for
// indep code to make a call
// independent code to make a call.
void callIon(Register callee);
void callIonFromAsmJS(Register callee);
@ -1379,8 +1382,8 @@ class MacroAssemblerARMCompat : public MacroAssemblerARM
}
void clampIntToUint8(Register reg) {
// look at (reg >> 8) if it is 0, then reg shouldn't be clamped
// if it is <0, then we want to clamp to 0, otherwise, we wish to clamp to 255
// Look at (reg >> 8) if it is 0, then reg shouldn't be clamped if it is
// <0, then we want to clamp to 0, otherwise, we wish to clamp to 255
as_mov(ScratchRegister, asr(reg, 8), SetCond);
ma_mov(Imm32(0xff), reg, NoSetCond, NotEqual);
ma_mov(Imm32(0), reg, NoSetCond, Signed);
@ -1423,7 +1426,7 @@ class MacroAssemblerARMCompat : public MacroAssemblerARM
void setStackArg(Register reg, uint32_t arg);
void breakpoint();
// conditional breakpoint
// Conditional breakpoint.
void breakpoint(Condition cc);
void compareDouble(FloatRegister lhs, FloatRegister rhs);
@ -1446,8 +1449,8 @@ class MacroAssemblerARMCompat : public MacroAssemblerARM
ma_lsl(imm, dest, dest);
}
// If source is a double, load it into dest. If source is int32,
// convert it to double. Else, branch to failure.
// If source is a double, load it into dest. If source is int32, convert it
// to double. Else, branch to failure.
void ensureDouble(const ValueOperand &source, FloatRegister dest, Label *failure);
void
@ -1501,8 +1504,8 @@ class MacroAssemblerARMCompat : public MacroAssemblerARM
// Arguments must be assigned in a left-to-right order. This process may
// temporarily use more stack, in which case esp-relative addresses will be
// automatically adjusted. It is extremely important that esp-relative
// addresses are computed *after* setupABICall(). Furthermore, no
// operations should be emitted while setting arguments.
// addresses are computed *after* setupABICall(). Furthermore, no operations
// should be emitted while setting arguments.
void passABIArg(const MoveOperand &from, MoveOp::Type type);
void passABIArg(Register reg);
void passABIArg(FloatRegister reg, MoveOp::Type type);
@ -1533,7 +1536,7 @@ class MacroAssemblerARMCompat : public MacroAssemblerARM
ma_add(address.base, Imm32(address.offset), dest, NoSetCond);
}
void computeEffectiveAddress(const BaseIndex &address, Register dest) {
ma_alu(address.base, lsl(address.index, address.scale), dest, op_add, NoSetCond);
ma_alu(address.base, lsl(address.index, address.scale), dest, OpAdd, NoSetCond);
if (address.offset)
ma_add(dest, Imm32(address.offset), dest, NoSetCond);
}
@ -1545,8 +1548,8 @@ class MacroAssemblerARMCompat : public MacroAssemblerARM
void roundf(FloatRegister input, Register output, Label *handleNotAnInt, FloatRegister tmp);
void clampCheck(Register r, Label *handleNotAnInt) {
// check explicitly for r == INT_MIN || r == INT_MAX
// this is the instruction sequence that gcc generated for this
// Check explicitly for r == INT_MIN || r == INT_MAX
// This is the instruction sequence that gcc generated for this
// operation.
ma_sub(r, Imm32(0x80000001), ScratchRegister);
ma_cmn(ScratchRegister, Imm32(3));

Просмотреть файл

@ -46,7 +46,7 @@ MoveEmitterARM::cycleSlot() const
return Operand(StackPointer, offset);
}
// THIS IS ALWAYS AN LDRAddr. It should not be wrapped in an operand, methinks
// THIS IS ALWAYS AN LDRAddr. It should not be wrapped in an operand, methinks.
Operand
MoveEmitterARM::spillSlot() const
{
@ -83,12 +83,12 @@ MoveEmitterARM::tempReg()
if (spilledReg_ != InvalidReg)
return spilledReg_;
// For now, just pick r12/ip as the eviction point. This is totally
// random, and if it ends up being bad, we can use actual heuristics later.
// r12 is actually a bad choice. it is the scratch register, which is frequently
// used for address computations, such as those found when we attempt to access
// values more than 4096 off of the stack pointer.
// instead, use lr, the LinkRegister.
// For now, just pick r12/ip as the eviction point. This is totally random,
// and if it ends up being bad, we can use actual heuristics later. r12 is
// actually a bad choice. It is the scratch register, which is frequently
// used for address computations, such as those found when we attempt to
// access values more than 4096 off of the stack pointer. Instead, use lr,
// the LinkRegister.
spilledReg_ = r14;
if (pushedAtSpill_ == -1) {
masm.Push(spilledReg_);

Просмотреть файл

@ -191,54 +191,54 @@ class SimInstruction {
inline int VFPMRegValue(VFPRegPrecision pre) { return VFPGlueRegValue(pre, 0, 5); }
inline int VFPDRegValue(VFPRegPrecision pre) { return VFPGlueRegValue(pre, 12, 22); }
// Fields used in Data processing instructions
// Fields used in Data processing instructions.
inline int opcodeValue() const { return static_cast<ALUOp>(bits(24, 21)); }
inline ALUOp opcodeField() const { return static_cast<ALUOp>(bitField(24, 21)); }
inline int sValue() const { return bit(20); }
// with register
// With register.
inline int rmValue() const { return bits(3, 0); }
inline ShiftType shifttypeValue() const { return static_cast<ShiftType>(bits(6, 5)); }
inline int rsValue() const { return bits(11, 8); }
inline int shiftAmountValue() const { return bits(11, 7); }
// with immediate
// With immediate.
inline int rotateValue() const { return bits(11, 8); }
inline int immed8Value() const { return bits(7, 0); }
inline int immed4Value() const { return bits(19, 16); }
inline int immedMovwMovtValue() const { return immed4Value() << 12 | offset12Value(); }
// Fields used in Load/Store instructions
// Fields used in Load/Store instructions.
inline int PUValue() const { return bits(24, 23); }
inline int PUField() const { return bitField(24, 23); }
inline int bValue() const { return bit(22); }
inline int wValue() const { return bit(21); }
inline int lValue() const { return bit(20); }
// with register uses same fields as Data processing instructions above
// with immediate
// With register uses same fields as Data processing instructions above with
// immediate.
inline int offset12Value() const { return bits(11, 0); }
// multiple
// Multiple.
inline int rlistValue() const { return bits(15, 0); }
// extra loads and stores
// Extra loads and stores.
inline int signValue() const { return bit(6); }
inline int hValue() const { return bit(5); }
inline int immedHValue() const { return bits(11, 8); }
inline int immedLValue() const { return bits(3, 0); }
// Fields used in Branch instructions
// Fields used in Branch instructions.
inline int linkValue() const { return bit(24); }
inline int sImmed24Value() const { return ((instructionBits() << 8) >> 8); }
// Fields used in Software interrupt instructions
// Fields used in Software interrupt instructions.
inline SoftwareInterruptCodes svcValue() const {
return static_cast<SoftwareInterruptCodes>(bits(23, 0));
}
// Test for special encodings of type 0 instructions (extra loads and stores,
// as well as multiplications).
// Test for special encodings of type 0 instructions (extra loads and
// stores, as well as multiplications).
inline bool isSpecialType0() const { return (bit(7) == 1) && (bit(4) == 1); }
// Test for miscellaneous instructions encodings of type 0 instructions.
@ -631,12 +631,13 @@ ReadLine(const char *prompt)
}
int len = strlen(line_buf);
if (len > 0 && line_buf[len - 1] == '\n') {
// Since we read a new line we are done reading the line. This
// will exit the loop after copying this buffer into the result.
// Since we read a new line we are done reading the line. This will
// exit the loop after copying this buffer into the result.
keep_going = false;
}
if (!result) {
// Allocate the initial result and make room for the terminating '\0'
// Allocate the initial result and make room for the terminating
// '\0'.
result = (char *)js_malloc(len + 1);
if (!result)
return nullptr;
@ -693,7 +694,7 @@ ArmDebugger::debug()
char arg2[ARG_SIZE + 1];
char *argv[3] = { cmd, arg1, arg2 };
// make sure to have a proper terminating character if reaching the limit
// Make sure to have a proper terminating character if reaching the limit.
cmd[COMMAND_SIZE] = 0;
arg1[ARG_SIZE] = 0;
arg2[ARG_SIZE] = 0;
@ -735,7 +736,8 @@ ArmDebugger::debug()
sim_->set_pc(sim_->get_pc() + 4);
sim_->icount_++;
} else if ((strcmp(cmd, "c") == 0) || (strcmp(cmd, "cont") == 0)) {
// Execute the one instruction we broke at with breakpoints disabled.
// Execute the one instruction we broke at with breakpoints
// disabled.
sim_->instructionDecode(reinterpret_cast<SimInstruction *>(sim_->get_pc()));
sim_->icount_++;
// Leave the debugger shell.
@ -1076,7 +1078,7 @@ CheckICache(SimulatorRuntime::ICacheMap &i_cache, SimInstruction *instr)
cache_page->cachedData(offset),
SimInstruction::kInstrSize) == 0);
} else {
// Cache miss. Load memory into the cache.
// Cache miss. Load memory into the cache.
memcpy(cached_line, line, CachePage::kLineLength);
*cache_valid_byte = CachePage::LINE_VALID;
}
@ -1180,8 +1182,8 @@ Simulator::Simulator(SimulatorRuntime *srt)
// When the generated code calls a VM function (masm.callWithABI) we need to
// call that function instead of trying to execute it with the simulator
// (because it's x86 code instead of arm code). We do that by redirecting the
// VM call to a svc (Supervisor Call) instruction that is handled by the
// (because it's x86 code instead of arm code). We do that by redirecting the VM
// call to a svc (Supervisor Call) instruction that is handled by the
// simulator. We write the original destination of the jump just at a known
// offset from the svc instruction so the simulator knows what to call.
class Redirection
@ -1275,8 +1277,8 @@ Simulator::set_register(int reg, int32_t value)
registers_[reg] = value;
}
// Get the register from the architecture state. This function does handle
// the special case of accessing the PC register.
// Get the register from the architecture state. This function does handle the
// special case of accessing the PC register.
int32_t
Simulator::get_register(int reg) const
{
@ -1291,8 +1293,8 @@ Simulator::get_double_from_register_pair(int reg)
{
MOZ_ASSERT(reg >= 0 && reg < num_registers && (reg % 2) == 0);
// Read the bits from the unsigned integer register_[] array
// into the double precision floating point value and return it.
// Read the bits from the unsigned integer register_[] array into the double
// precision floating point value and return it.
double dm_val = 0.0;
char buffer[2 * sizeof(vfp_registers_[0])];
memcpy(buffer, &registers_[reg], 2 * sizeof(registers_[0]));
@ -1630,8 +1632,8 @@ Simulator::overRecursedWithExtra(uint32_t extra) const
return newsp <= stackLimit();
}
// Checks if the current instruction should be executed based on its
// condition bits.
// Checks if the current instruction should be executed based on its condition
// bits.
bool
Simulator::conditionallyExecute(SimInstruction *instr)
{
@ -1704,14 +1706,14 @@ Simulator::overflowFrom(int32_t alu_out, int32_t left, int32_t right, bool addit
{
bool overflow;
if (addition) {
// operands have the same sign
// Operands have the same sign.
overflow = ((left >= 0 && right >= 0) || (left < 0 && right < 0))
// and operands and result have different sign
// And operands and result have different sign.
&& ((left < 0 && alu_out >= 0) || (left >= 0 && alu_out < 0));
} else {
// operands have different signs
// Operands have different signs.
overflow = ((left < 0 && right >= 0) || (left >= 0 && right < 0))
// and first operand and result have different signs
// And first operand and result have different signs.
&& ((left < 0 && alu_out >= 0) || (left >= 0 && alu_out < 0));
}
return overflow;
@ -2074,10 +2076,10 @@ typedef int32_t (*Prototype_Int_IntDouble)(int32_t arg0, double arg1);
// Fill the volatile registers with scratch values.
//
// Some of the ABI calls assume that the float registers are not scratched, even
// though the ABI defines them as volatile - a performance optimization. These are
// all calls passing operands in integer registers, so for now the simulator does not
// scratch any float registers for these calls. Should try to narrow it further in
// future.
// though the ABI defines them as volatile - a performance optimization. These
// are all calls passing operands in integer registers, so for now the simulator
// does not scratch any float registers for these calls. Should try to narrow it
// further in future.
//
void
Simulator::scratchVolatileRegisters(bool scratchFloat)
@ -2087,8 +2089,8 @@ Simulator::scratchVolatileRegisters(bool scratchFloat)
set_register(r1, scratch_value);
set_register(r2, scratch_value);
set_register(r3, scratch_value);
set_register(r12, scratch_value); // Intra-Procedure-call scratch register
set_register(r14, scratch_value); // Link register
set_register(r12, scratch_value); // Intra-Procedure-call scratch register.
set_register(r14, scratch_value); // Link register.
if (scratchFloat) {
uint64_t scratch_value_d = 0x5a5a5a5a5a5a5a5aLU ^ uint64_t(icount_) ^ (uint64_t(icount_) << 30);
@ -2142,9 +2144,10 @@ Simulator::softwareInterrupt(SimInstruction *instr)
case Args_General2: {
Prototype_General2 target = reinterpret_cast<Prototype_General2>(external);
int64_t result = target(arg0, arg1);
// The ARM backend makes calls to __aeabi_idivmod and __aeabi_uidivmod assuming
// that the float registers are non-volatile as a performance optimization, so the
// float registers must not be scratch when calling these.
// The ARM backend makes calls to __aeabi_idivmod and
// __aeabi_uidivmod assuming that the float registers are
// non-volatile as a performance optimization, so the float
// registers must not be scratch when calling these.
bool scratchFloat = target != __aeabi_idivmod && target != __aeabi_uidivmod;
scratchVolatileRegisters(/* scratchFloat = */ scratchFloat);
setCallResult(result);
@ -2306,8 +2309,8 @@ Simulator::softwareInterrupt(SimInstruction *instr)
if (isWatchedStop(code))
increaseStopCounter(code);
// Stop if it is enabled, otherwise go on jumping over the stop
// and the message address.
// Stop if it is enabled, otherwise go on jumping over the stop and
// the message address.
if (isEnabledStop(code)) {
ArmDebugger dbg(this);
dbg.stop(instr);
@ -2404,8 +2407,8 @@ Simulator::printStopInfo(uint32_t code)
}
}
// Instruction types 0 and 1 are both rolled into one function because they
// only differ in the handling of the shifter_operand.
// Instruction types 0 and 1 are both rolled into one function because they only
// differ in the handling of the shifter_operand.
void
Simulator::decodeType01(SimInstruction *instr)
{
@ -2423,9 +2426,9 @@ Simulator::decodeType01(SimInstruction *instr)
int32_t rm_val = get_register(rm);
if (instr->bit(23) == 0) {
if (instr->bit(21) == 0) {
// The MUL instruction description (A 4.1.33) refers to Rd as being
// the destination for the operation, but it confusingly uses the
// Rn field to encode it.
// The MUL instruction description (A 4.1.33) refers to
// Rd as being the destination for the operation, but it
// confusingly uses the Rn field to encode it.
int rd = rn; // Remap the rn field to the Rd register.
int32_t alu_out = rm_val * rs_val;
set_register(rd, alu_out);
@ -2435,9 +2438,10 @@ Simulator::decodeType01(SimInstruction *instr)
int rd = instr->rdValue();
int32_t acc_value = get_register(rd);
if (instr->bit(22) == 0) {
// The MLA instruction description (A 4.1.28) refers to the order
// of registers as "Rd, Rm, Rs, Rn". But confusingly it uses the
// Rn field to encode the Rd register and the Rd field to encode
// The MLA instruction description (A 4.1.28) refers
// to the order of registers as "Rd, Rm, Rs,
// Rn". But confusingly it uses the Rn field to
// encode the Rd register and the Rd field to encode
// the Rn register.
int32_t mul_out = rm_val * rs_val;
int32_t result = acc_value + mul_out;
@ -2449,9 +2453,9 @@ Simulator::decodeType01(SimInstruction *instr)
}
}
} else {
// The signed/long multiply instructions use the terms RdHi and RdLo
// when referring to the target registers. They are mapped to the Rn
// and Rd fields as follows:
// The signed/long multiply instructions use the terms RdHi
// and RdLo when referring to the target registers. They are
// mapped to the Rn and Rd fields as follows:
// RdLo == Rd
// RdHi == Rn (This is confusingly stored in variable rd here
// because the mul instruction from above uses the
@ -2469,7 +2473,7 @@ Simulator::decodeType01(SimInstruction *instr)
hi_res = static_cast<int32_t>(result >> 32);
lo_res = static_cast<int32_t>(result & 0xffffffff);
} else {
// unsigned multiply
// Unsigned multiply.
uint64_t left_op = static_cast<uint32_t>(rm_val);
uint64_t right_op = static_cast<uint32_t>(rs_val);
uint64_t result = left_op * right_op;
@ -2485,7 +2489,7 @@ Simulator::decodeType01(SimInstruction *instr)
MOZ_CRASH(); // Not used atm.
}
} else {
// extra load/store instructions
// Extra load/store instructions.
int rd = instr->rdValue();
int rn = instr->rnValue();
int32_t rn_val = get_register(rn);
@ -2587,7 +2591,7 @@ Simulator::decodeType01(SimInstruction *instr)
}
}
} else {
// signed byte loads
// Signed byte loads.
MOZ_ASSERT(instr->hasSign());
MOZ_ASSERT(instr->hasL());
int8_t val = readB(addr);
@ -2688,7 +2692,7 @@ Simulator::decodeType01(SimInstruction *instr)
}
int32_t alu_out;
switch (instr->opcodeField()) {
case op_and:
case OpAnd:
alu_out = rn_val & shifter_operand;
set_register(rd, alu_out);
if (instr->hasS()) {
@ -2696,7 +2700,7 @@ Simulator::decodeType01(SimInstruction *instr)
setCFlag(shifter_carry_out);
}
break;
case op_eor:
case OpEor:
alu_out = rn_val ^ shifter_operand;
set_register(rd, alu_out);
if (instr->hasS()) {
@ -2704,7 +2708,7 @@ Simulator::decodeType01(SimInstruction *instr)
setCFlag(shifter_carry_out);
}
break;
case op_sub:
case OpSub:
alu_out = rn_val - shifter_operand;
set_register(rd, alu_out);
if (instr->hasS()) {
@ -2713,7 +2717,7 @@ Simulator::decodeType01(SimInstruction *instr)
setVFlag(overflowFrom(alu_out, rn_val, shifter_operand, false));
}
break;
case op_rsb:
case OpRsb:
alu_out = shifter_operand - rn_val;
set_register(rd, alu_out);
if (instr->hasS()) {
@ -2722,7 +2726,7 @@ Simulator::decodeType01(SimInstruction *instr)
setVFlag(overflowFrom(alu_out, shifter_operand, rn_val, false));
}
break;
case op_add:
case OpAdd:
alu_out = rn_val + shifter_operand;
set_register(rd, alu_out);
if (instr->hasS()) {
@ -2731,7 +2735,7 @@ Simulator::decodeType01(SimInstruction *instr)
setVFlag(overflowFrom(alu_out, rn_val, shifter_operand, true));
}
break;
case op_adc:
case OpAdc:
alu_out = rn_val + shifter_operand + getCarry();
set_register(rd, alu_out);
if (instr->hasS()) {
@ -2740,11 +2744,11 @@ Simulator::decodeType01(SimInstruction *instr)
setVFlag(overflowFrom(alu_out, rn_val, shifter_operand, true));
}
break;
case op_sbc:
case op_rsc:
case OpSbc:
case OpRsc:
MOZ_CRASH();
break;
case op_tst:
case OpTst:
if (instr->hasS()) {
alu_out = rn_val & shifter_operand;
setNZFlags(alu_out);
@ -2754,7 +2758,7 @@ Simulator::decodeType01(SimInstruction *instr)
set_register(rd, alu_out);
}
break;
case op_teq:
case OpTeq:
if (instr->hasS()) {
alu_out = rn_val ^ shifter_operand;
setNZFlags(alu_out);
@ -2765,7 +2769,7 @@ Simulator::decodeType01(SimInstruction *instr)
MOZ_CRASH();
}
break;
case op_cmp:
case OpCmp:
if (instr->hasS()) {
alu_out = rn_val - shifter_operand;
setNZFlags(alu_out);
@ -2777,7 +2781,7 @@ Simulator::decodeType01(SimInstruction *instr)
set_register(rd, alu_out);
}
break;
case op_cmn:
case OpCmn:
if (instr->hasS()) {
alu_out = rn_val + shifter_operand;
setNZFlags(alu_out);
@ -2789,7 +2793,7 @@ Simulator::decodeType01(SimInstruction *instr)
MOZ_CRASH();
}
break;
case op_orr:
case OpOrr:
alu_out = rn_val | shifter_operand;
set_register(rd, alu_out);
if (instr->hasS()) {
@ -2797,7 +2801,7 @@ Simulator::decodeType01(SimInstruction *instr)
setCFlag(shifter_carry_out);
}
break;
case op_mov:
case OpMov:
alu_out = shifter_operand;
set_register(rd, alu_out);
if (instr->hasS()) {
@ -2805,7 +2809,7 @@ Simulator::decodeType01(SimInstruction *instr)
setCFlag(shifter_carry_out);
}
break;
case op_bic:
case OpBic:
alu_out = rn_val & ~shifter_operand;
set_register(rd, alu_out);
if (instr->hasS()) {
@ -2813,7 +2817,7 @@ Simulator::decodeType01(SimInstruction *instr)
setCFlag(shifter_carry_out);
}
break;
case op_mvn:
case OpMvn:
alu_out = ~shifter_operand;
set_register(rd, alu_out);
if (instr->hasS()) {
@ -2942,9 +2946,9 @@ Simulator::decodeType3(SimInstruction *instr)
else // ASR
rm_val >>= shift;
// If saturation occurs, the Q flag should be set in the CPSR.
// There is no Q flag yet, and no instruction (MRS) to read the
// CPSR directly.
// If saturation occurs, the Q flag should be set in the
// CPSR. There is no Q flag yet, and no instruction (MRS)
// to read the CPSR directly.
if (rm_val > sat_val)
rm_val = sat_val;
else if (rm_val < 0)
@ -3042,7 +3046,7 @@ Simulator::decodeType3(SimInstruction *instr)
if (instr->bit(22) == 0x0 && instr->bit(20) == 0x1 &&
instr->bits(15,12) == 0x0f && instr->bits(7, 4) == 0x1) {
if (!instr->hasW()) {
// sdiv (in V8 notation matching ARM ISA format) rn = rm/rs
// sdiv (in V8 notation matching ARM ISA format) rn = rm/rs.
int rm = instr->rmValue();
int32_t rm_val = get_register(rm);
int rs = instr->rsValue();
@ -3056,7 +3060,7 @@ Simulator::decodeType3(SimInstruction *instr)
set_register(rn, ret_val);
return;
} else {
// udiv (in V8 notation matching ARM ISA format) rn = rm/rs
// udiv (in V8 notation matching ARM ISA format) rn = rm/rs.
int rm = instr->rmValue();
uint32_t rm_val = get_register(rm);
int rs = instr->rsValue();
@ -3149,7 +3153,8 @@ Simulator::decodeType3(SimInstruction *instr)
void
Simulator::decodeType4(SimInstruction *instr)
{
MOZ_ASSERT(instr->bit(22) == 0); // Only allowed to be set in privileged mode.
// Only allowed to be set in privileged mode.
MOZ_ASSERT(instr->bit(22) == 0);
bool load = instr->hasL();
handleRList(instr, load);
}
@ -3194,7 +3199,7 @@ Simulator::decodeTypeVFP(SimInstruction *instr)
if (instr->bit(4) == 0) {
if (instr->opc1Value() == 0x7) {
// Other data processing instructions
// Other data processing instructions.
if ((instr->opc2Value() == 0x0) && (instr->opc3Value() == 0x1)) {
// vmov register to register.
if (instr->szValue() == 0x1) {
@ -3238,7 +3243,7 @@ Simulator::decodeTypeVFP(SimInstruction *instr)
decodeVCVTBetweenFloatingPointAndInteger(instr);
} else if ((instr->opc2Value() == 0xA) && (instr->opc3Value() == 0x3) &&
(instr->bit(8) == 1)) {
// vcvt.f64.s32 Dd, Dd, #<fbits>
// vcvt.f64.s32 Dd, Dd, #<fbits>.
int fraction_bits = 32 - ((instr->bits(3, 0) << 1) | instr->bit(5));
int fixed_value = get_sinteger_from_s_register(vd * 2);
double divide = 1 << fraction_bits;
@ -3267,7 +3272,7 @@ Simulator::decodeTypeVFP(SimInstruction *instr)
if (instr->szValue() == 0x1) {
set_d_register_from_double(vd, instr->doubleImmedVmov());
} else {
// vmov.f32 immediate
// vmov.f32 immediate.
set_s_register_from_float(vd, instr->float32ImmedVmov());
}
} else {
@ -3333,8 +3338,8 @@ Simulator::decodeTypeVFP(SimInstruction *instr)
const double dn_val = get_double_from_d_register(vn);
const double dm_val = get_double_from_d_register(vm);
// Note: we do the mul and add/sub in separate steps to avoid getting a
// result with too high precision.
// Note: we do the mul and add/sub in separate steps to avoid
// getting a result with too high precision.
set_d_register_from_double(vd, dn_val * dm_val);
if (is_vmls) {
set_d_register_from_double(vd,
@ -3369,7 +3374,7 @@ Simulator::decodeTypeVFP(SimInstruction *instr)
} else if ((instr->VLValue() == 0x0) &&
(instr->VCValue() == 0x1) &&
(instr->bit(23) == 0x0)) {
// vmov (ARM core register to scalar)
// vmov (ARM core register to scalar).
int vd = instr->bits(19, 16) | (instr->bit(7) << 4);
double dd_value = get_double_from_d_register(vd);
int32_t data[2];
@ -3380,7 +3385,7 @@ Simulator::decodeTypeVFP(SimInstruction *instr)
} else if ((instr->VLValue() == 0x1) &&
(instr->VCValue() == 0x1) &&
(instr->bit(23) == 0x0)) {
// vmov (scalar to ARM core register)
// vmov (scalar to ARM core register).
int vn = instr->bits(19, 16) | (instr->bit(7) << 4);
double dn_value = get_double_from_d_register(vn);
int32_t data[2];
@ -3539,8 +3544,8 @@ get_inv_op_vfp_flag(VFPRoundingMode mode, double val, bool unsigned_)
if (val != val)
return true;
// Check for overflow. This code works because 32bit integers can be
// exactly represented by ieee-754 64bit floating-point values.
// Check for overflow. This code works because 32bit integers can be exactly
// represented by ieee-754 64bit floating-point values.
switch (mode) {
case SimRN:
return unsigned_ ? (val >= (max_uint + 0.5)) ||
@ -3592,15 +3597,14 @@ Simulator::decodeVCVTBetweenFloatingPointAndInteger(SimInstruction *instr)
// We are playing with code close to the C++ standard's limits below,
// hence the very simple code and heavy checks.
//
// Note:
// C++ defines default type casting from floating point to integer as
// (close to) rounding toward zero ("fractional part discarded").
// Note: C++ defines default type casting from floating point to integer
// as (close to) rounding toward zero ("fractional part discarded").
int dst = instr->VFPDRegValue(kSinglePrecision);
int src = instr->VFPMRegValue(src_precision);
// Bit 7 in vcvt instructions indicates if we should use the FPSCR rounding
// mode or the default Round to Zero mode.
// Bit 7 in vcvt instructions indicates if we should use the FPSCR
// rounding mode or the default Round to Zero mode.
VFPRoundingMode mode = (instr->bit(7) != 1) ? FPSCR_rounding_mode_ : SimRZ;
MOZ_ASSERT(mode == SimRM || mode == SimRZ || mode == SimRN);
@ -3696,8 +3700,8 @@ Simulator::decodeVCVTBetweenFloatingPointAndIntegerFrac(SimInstruction *instr)
// We are playing with code close to the C++ standard's limits below,
// hence the very simple code and heavy checks.
//
// Note: C++ defines default type casting from floating point to integer as
// (close to) rounding toward zero ("fractional part discarded").
// Note: C++ defines default type casting from floating point to integer
// as (close to) rounding toward zero ("fractional part discarded").
int dst = instr->VFPDRegValue(precision);
@ -3711,8 +3715,8 @@ Simulator::decodeVCVTBetweenFloatingPointAndIntegerFrac(SimInstruction *instr)
// Scale value by specified number of fraction bits.
val *= mult;
// Rounding down towards zero. No need to account for the rounding error as this
// instruction always rounds down towards zero. See SimRZ below.
// Rounding down towards zero. No need to account for the rounding error
// as this instruction always rounds down towards zero. See SimRZ below.
int temp = unsigned_integer ? static_cast<uint32_t>(val) : static_cast<int32_t>(val);
inv_op_vfp_flag_ = get_inv_op_vfp_flag(SimRZ, val, unsigned_integer);
@ -3877,7 +3881,7 @@ Simulator::decodeSpecialCondition(SimInstruction *instr)
break;
case 7:
if (instr->bits(18, 16) == 0 && instr->bits(11, 6) == 0x28 && instr->bit(4) == 1) {
// vmovl unsigned
// vmovl unsigned.
int Vd = (instr->bit(22) << 4) | instr->vdValue();
int Vm = (instr->bit(5) << 4) | instr->vmValue();
int imm3 = instr->bits(21, 19);
@ -4038,8 +4042,8 @@ Simulator::instructionDecode(SimInstruction *instr)
MOZ_CRASH();
break;
}
// If the instruction is a non taken conditional stop, we need to skip the
// inlined message address.
// If the instruction is a non taken conditional stop, we need to skip
// the inlined message address.
} else if (instr->isStop()) {
set_pc(get_pc() + 2 * SimInstruction::kInstrSize);
}
@ -4052,8 +4056,8 @@ template<bool EnableStopSimAt>
void
Simulator::execute()
{
// Get the PC to simulate. Cannot use the accessor here as we need the
// raw PC value and not the one used as input to arithmetic instructions.
// Get the PC to simulate. Cannot use the accessor here as we need the raw
// PC value and not the one used as input to arithmetic instructions.
int program_counter = get_pc();
AsmJSActivation *activation = TlsPerThreadData.get()->asmJSActivationStackFromOwnerThread();
@ -4090,9 +4094,9 @@ Simulator::callInternal(uint8_t *entry)
// the LR the simulation stops when returning to this call point.
set_register(lr, end_sim_pc);
// Remember the values of callee-saved registers.
// The code below assumes that r9 is not used as sb (static base) in
// simulator code and therefore is regarded as a callee-saved register.
// Remember the values of callee-saved registers. The code below assumes
// that r9 is not used as sb (static base) in simulator code and therefore
// is regarded as a callee-saved register.
int32_t r4_val = get_register(r4);
int32_t r5_val = get_register(r5);
int32_t r6_val = get_register(r6);
@ -4142,7 +4146,7 @@ Simulator::callInternal(uint8_t *entry)
set_d_register(d14, &callee_saved_value_d);
set_d_register(d15, &callee_saved_value_d);
// Start the simulation
// Start the simulation.
if (Simulator::StopSimAt != -1L)
execute<true>();
else

Просмотреть файл

@ -172,7 +172,7 @@ class Simulator
// Known bad pc value to ensure that the simulator does not execute
// without being properly setup.
bad_lr = -1,
// A pc value used to signal the simulator to stop execution. Generally
// A pc value used to signal the simulator to stop execution. Generally
// the lr is set to this value on transition from native C code to
// simulated execution, so that the simulator can "return" to the native
// C code.

Просмотреть файл

@ -37,7 +37,7 @@ static const FloatRegisterSet NonVolatileFloatRegs =
static void
GenerateReturn(MacroAssembler &masm, int returnCode, SPSProfiler *prof)
{
// Restore non-volatile floating point registers
// Restore non-volatile floating point registers.
masm.transferMultipleByRuns(NonVolatileFloatRegs, IsLoad, StackPointer, IA);
// Unwind the sps mark.
@ -59,7 +59,7 @@ GenerateReturn(MacroAssembler &masm, int returnCode, SPSProfiler *prof)
// r12 isn't saved, so it shouldn't be restored.
masm.transferReg(pc);
masm.finishDataTransfer();
masm.dumpPool();
masm.flushBuffer();
}
struct EnterJITStack
@ -75,7 +75,7 @@ struct EnterJITStack
size_t hasSPSMark;
// non-volatile registers.
// Non-volatile registers.
void *r4;
void *r5;
void *r6;
@ -133,7 +133,7 @@ JitRuntime::generateEnterJIT(JSContext *cx, EnterJitType type)
// The 5th argument is located at [sp, 36]
masm.finishDataTransfer();
// Push the EnterJIT sps mark. "Frame pointer" = start of saved core regs.
// Push the EnterJIT sps mark. "Frame pointer" = start of saved core regs.
masm.movePtr(sp, r8);
masm.spsMarkJit(&cx->runtime()->spsProfiler, r8, r9);
@ -154,26 +154,27 @@ JitRuntime::generateEnterJIT(JSContext *cx, EnterJitType type)
masm.loadPtr(slot_vp, r10);
masm.unboxInt32(Address(r10, 0), r10);
// Subtract off the size of the arguments from the stack pointer, store elsewhere
// Subtract off the size of the arguments from the stack pointer, store
// elsewhere.
aasm->as_sub(r4, sp, O2RegImmShift(r1, LSL, 3)); //r4 = sp - argc*8
// Get the final position of the stack pointer into the stack pointer
// Get the final position of the stack pointer into the stack pointer.
aasm->as_sub(sp, r4, Imm8(16)); // sp' = sp - argc*8 - 16
// Get a copy of the number of args to use as a decrement counter, also
// Set the zero condition code
// Get a copy of the number of args to use as a decrement counter, also set
// the zero condition code.
aasm->as_mov(r5, O2Reg(r1), SetCond);
// Loop over arguments, copying them from an unknown buffer onto the Ion
// stack so they can be accessed from JIT'ed code.
{
Label header, footer;
// If there aren't any arguments, don't do anything
// If there aren't any arguments, don't do anything.
aasm->as_b(&footer, Assembler::Zero);
// Get the top of the loop
// Get the top of the loop.
masm.bind(&header);
aasm->as_sub(r5, r5, Imm8(1), SetCond);
// We could be more awesome, and unroll this, using a loadm
// (particularly since the offset is effectively 0)
// but that seems more error prone, and complex.
// (particularly since the offset is effectively 0) but that seems more
// error prone, and complex.
// BIG FAT WARNING: this loads both r6 and r7.
aasm->as_extdtr(IsLoad, 64, true, PostIndex, r6, EDtrAddr(r2, EDtrOffImm(8)));
aasm->as_extdtr(IsStore, 64, true, PostIndex, r6, EDtrAddr(r4, EDtrOffImm(8)));
@ -211,9 +212,9 @@ JitRuntime::generateEnterJIT(JSContext *cx, EnterJitType type)
masm.load32(slot_numStackValues, numStackValues);
// Write return address. On ARM, CodeLabel is only used for tableswitch,
// so we can't use it here to get the return address. Instead, we use
// pc + a fixed offset to a jump to returnLabel. The pc register holds
// pc + 8, so we add the size of 2 instructions to skip the instructions
// so we can't use it here to get the return address. Instead, we use pc
// + a fixed offset to a jump to returnLabel. The pc register holds pc +
// 8, so we add the size of 2 instructions to skip the instructions
// emitted by storePtr and jump(&skipJump).
{
AutoForbidPools afp(&masm);
@ -235,7 +236,8 @@ JitRuntime::generateEnterJIT(JSContext *cx, EnterJitType type)
masm.mov(sp, framePtr);
#ifdef XP_WIN
// Can't push large frames blindly on windows. Touch frame memory incrementally.
// Can't push large frames blindly on windows. Touch frame memory
// incrementally.
masm.ma_lsl(Imm32(3), numStackValues, scratch);
masm.subPtr(scratch, framePtr);
{
@ -287,7 +289,7 @@ JitRuntime::generateEnterJIT(JSContext *cx, EnterJitType type)
masm.jump(jitcode);
// OOM: load error value, discard return address and previous frame
// OOM: Load error value, discard return address and previous frame
// pointer and return.
masm.bind(&error);
masm.mov(framePtr, sp);
@ -309,12 +311,12 @@ JitRuntime::generateEnterJIT(JSContext *cx, EnterJitType type)
masm.bind(&returnLabel);
}
// The top of the stack now points to the address of the field following
// the return address because the return address is popped for the
// return, so we need to remove the size of the return address field.
// The top of the stack now points to the address of the field following the
// return address because the return address is popped for the return, so we
// need to remove the size of the return address field.
aasm->as_sub(sp, sp, Imm8(4));
// Load off of the stack the size of our local stack
// Load off of the stack the size of our local stack.
masm.loadPtr(Address(sp, IonJSFrameLayout::offsetOfDescriptor()), r5);
aasm->as_add(sp, sp, lsr(r5, FRAMESIZE_SHIFT));
@ -323,9 +325,8 @@ JitRuntime::generateEnterJIT(JSContext *cx, EnterJitType type)
masm.storeValue(JSReturnOperand, Address(r5, 0));
// :TODO: Optimize storeValue with:
// We're using a load-double here. In order for that to work,
// the data needs to be stored in two consecutive registers,
// make sure this is the case
// We're using a load-double here. In order for that to work, the data needs
// to be stored in two consecutive registers, make sure this is the case
// JS_ASSERT(JSReturnReg_Type.code() == JSReturnReg_Data.code()+1);
// aasm->as_extdtr(IsStore, 64, true, Offset,
// JSReturnReg_Data, EDtrAddr(r5, EDtrOffImm(0)));
@ -350,16 +351,16 @@ JitRuntime::generateInvalidator(JSContext *cx)
// See large comment in x86's JitRuntime::generateInvalidator.
MacroAssembler masm(cx);
//masm.as_bkpt();
// At this point, one of two things has happened.
// At this point, one of two things has happened:
// 1) Execution has just returned from C code, which left the stack aligned
// 2) Execution has just returned from Ion code, which left the stack unaligned.
// The old return address should not matter, but we still want the
// stack to be aligned, and there is no good reason to automatically align it with
// a call to setupUnalignedABICall.
// The old return address should not matter, but we still want the stack to
// be aligned, and there is no good reason to automatically align it with a
// call to setupUnalignedABICall.
masm.ma_and(Imm32(~7), sp, sp);
masm.startDataTransferM(IsStore, sp, DB, WriteBack);
// We don't have to push everything, but this is likely easier.
// setting regs_
// Setting regs_.
for (uint32_t i = 0; i < Registers::Total; i++)
masm.transferReg(Register::FromCode(i));
masm.finishDataTransfer();
@ -385,10 +386,11 @@ JitRuntime::generateInvalidator(JSContext *cx)
masm.ma_ldr(Address(sp, 0), r2);
masm.ma_ldr(Address(sp, sizeOfBailoutInfo), r1);
// Remove the return address, the IonScript, the register state
// (InvaliationBailoutStack) and the space that was allocated for the return value
// (InvaliationBailoutStack) and the space that was allocated for the return
// value.
masm.ma_add(sp, Imm32(sizeof(InvalidationBailoutStack) + sizeOfRetval + sizeOfBailoutInfo), sp);
// remove the space that this frame was using before the bailout
// (computed by InvalidationBailout)
// Remove the space that this frame was using before the bailout (computed
// by InvalidationBailout)
masm.ma_add(sp, r1, sp);
// Jump to shared bailout tail. The BailoutInfo pointer has to be in r2.
@ -415,7 +417,7 @@ JitRuntime::generateArgumentsRectifier(JSContext *cx, ExecutionMode mode, void *
// Including |this|, there are (|nargs| + 1) arguments to copy.
JS_ASSERT(ArgumentsRectifierReg == r8);
// Copy number of actual arguments into r0
// Copy number of actual arguments into r0.
masm.ma_ldr(DTRAddr(sp, DtrOffImm(IonRectifierFrameLayout::offsetOfNumActualArgs())), r0);
// Load the number of |undefined|s to push into r6.
@ -441,7 +443,7 @@ JitRuntime::generateArgumentsRectifier(JSContext *cx, ExecutionMode mode, void *
// Get the topmost argument.
masm.ma_alu(r3, lsl(r8, 3), r3, op_add); // r3 <- r3 + nargs * 8
masm.ma_alu(r3, lsl(r8, 3), r3, OpAdd); // r3 <- r3 + nargs * 8
masm.ma_add(r3, Imm32(sizeof(IonRectifierFrameLayout)), r3);
// Push arguments, |nargs| + 1 times (to include |this|).
@ -495,7 +497,7 @@ JitRuntime::generateArgumentsRectifier(JSContext *cx, ExecutionMode mode, void *
// return address
// Discard pushed arguments.
masm.ma_alu(sp, lsr(r4, FRAMESIZE_SHIFT), sp, op_add);
masm.ma_alu(sp, lsr(r4, FRAMESIZE_SHIFT), sp, OpAdd);
masm.ret();
Linker linker(masm);
@ -524,12 +526,12 @@ PushBailoutFrame(MacroAssembler &masm, uint32_t frameClass, Register spArg)
// bailoutFrame.snapshotOffset
// bailoutFrame.frameSize
// STEP 1a: save our register sets to the stack so Bailout() can
// read everything.
// STEP 1a: Save our register sets to the stack so Bailout() can read
// everything.
// sp % 8 == 0
masm.startDataTransferM(IsStore, sp, DB, WriteBack);
// We don't have to push everything, but this is likely easier.
// setting regs_
// Setting regs_.
for (uint32_t i = 0; i < Registers::Total; i++)
masm.transferReg(Register::FromCode(i));
masm.finishDataTransfer();
@ -539,24 +541,24 @@ PushBailoutFrame(MacroAssembler &masm, uint32_t frameClass, Register spArg)
masm.transferFloatReg(FloatRegister::FromCode(i));
masm.finishFloatTransfer();
// STEP 1b: Push both the "return address" of the function call (the
// address of the instruction after the call that we used to get
// here) as well as the callee token onto the stack. The return
// address is currently in r14. We will proceed by loading the
// callee token into a sacrificial register <= r14, then pushing
// both onto the stack
// STEP 1b: Push both the "return address" of the function call (the address
// of the instruction after the call that we used to get here) as
// well as the callee token onto the stack. The return address is
// currently in r14. We will proceed by loading the callee token
// into a sacrificial register <= r14, then pushing both onto the
// stack.
// now place the frameClass onto the stack, via a register
// Now place the frameClass onto the stack, via a register.
masm.ma_mov(Imm32(frameClass), r4);
// And onto the stack. Since the stack is full, we need to put this
// one past the end of the current stack. Sadly, the ABI says that we need
// to always point to the lowest place that has been written. the OS is
// free to do whatever it wants below sp.
// And onto the stack. Since the stack is full, we need to put this one past
// the end of the current stack. Sadly, the ABI says that we need to always
// point to the lowest place that has been written. The OS is free to do
// whatever it wants below sp.
masm.startDataTransferM(IsStore, sp, DB, WriteBack);
// set frameClassId_
// Set frameClassId_.
masm.transferReg(r4);
// Set tableOffset_; higher registers are stored at higher locations on
// the stack.
// Set tableOffset_; higher registers are stored at higher locations on the
// stack.
masm.transferReg(lr);
masm.finishDataTransfer();
@ -570,17 +572,16 @@ GenerateBailoutThunk(JSContext *cx, MacroAssembler &masm, uint32_t frameClass)
// SP % 8 == 4
// STEP 1c: Call the bailout function, giving a pointer to the
// structure we just blitted onto the stack
// structure we just blitted onto the stack.
const int sizeOfBailoutInfo = sizeof(void *)*2;
masm.reserveStack(sizeOfBailoutInfo);
masm.mov(sp, r1);
masm.setupAlignedABICall(2);
// Decrement sp by another 4, so we keep alignment
// Not Anymore! pushing both the snapshotoffset as well as the
// masm.as_sub(sp, sp, Imm8(4));
// Decrement sp by another 4, so we keep alignment. Not Anymore! Pushing
// both the snapshotoffset as well as the: masm.as_sub(sp, sp, Imm8(4));
// Set the old (4-byte aligned) value of the sp as the first argument
// Set the old (4-byte aligned) value of the sp as the first argument.
masm.passABIArg(r0);
masm.passABIArg(r1);
@ -594,11 +595,11 @@ GenerateBailoutThunk(JSContext *cx, MacroAssembler &masm, uint32_t frameClass)
sizeof(void *) * Registers::Total;
if (frameClass == NO_FRAME_SIZE_CLASS_ID) {
// Make sure the bailout frame size fits into the offset for a load
// Make sure the bailout frame size fits into the offset for a load.
masm.as_dtr(IsLoad, 32, Offset,
r4, DTRAddr(sp, DtrOffImm(4)));
// used to be: offsetof(BailoutStack, frameSize_)
// this structure is no longer available to us :(
// Used to be: offsetof(BailoutStack, frameSize_)
// This structure is no longer available to us :(
// We add 12 to the bailoutFrameSize because:
// sizeof(uint32_t) for the tableOffset that was pushed onto the stack
// sizeof(uintptr_t) for the snapshotOffset;
@ -607,9 +608,14 @@ GenerateBailoutThunk(JSContext *cx, MacroAssembler &masm, uint32_t frameClass)
masm.as_add(sp, sp, O2Reg(r4));
} else {
uint32_t frameSize = FrameSizeClass::FromClass(frameClass).frameSize();
masm.ma_add(Imm32(frameSize // the frame that was added when we entered the most recent function
+ sizeof(void*) // the size of the "return address" that was dumped on the stack
+ bailoutFrameSize) // everything else that was pushed on the stack
masm.ma_add(Imm32(// The frame that was added when we entered the most
// recent function.
frameSize
// The size of the "return address" that was dumped on
// the stack.
+ sizeof(void*)
// Everything else that was pushed on the stack.
+ bailoutFrameSize)
, sp);
}
@ -621,14 +627,15 @@ GenerateBailoutThunk(JSContext *cx, MacroAssembler &masm, uint32_t frameClass)
static void
GenerateParallelBailoutThunk(MacroAssembler &masm, uint32_t frameClass)
{
// As GenerateBailoutThunk, except we return an error immediately. We do
// the bailout dance so that we can walk the stack and have accurate
// reporting of frame information.
// As GenerateBailoutThunk, except we return an error immediately. We do the
// bailout dance so that we can walk the stack and have accurate reporting
// of frame information.
PushBailoutFrame(masm, frameClass, r0);
// Parallel bailout is like parallel failure in that we unwind all the way
// to the entry frame. Reserve space for the frame pointer of the entry frame.
// to the entry frame. Reserve space for the frame pointer of the entry
// frame.
const int sizeOfEntryFramePointer = sizeof(uint8_t *) * 2;
masm.reserveStack(sizeOfEntryFramePointer);
masm.mov(sp, r1);
@ -786,8 +793,8 @@ JitRuntime::generateVMWrapper(JSContext *cx, const VMFunction &f)
argDisp += sizeof(void *);
break;
case VMFunction::DoubleByValue:
// Values should be passed by reference, not by value, so we
// assert that the argument is a double-precision float.
// Values should be passed by reference, not by value, so we assert
// that the argument is a double-precision float.
JS_ASSERT(f.argPassedInFloatReg(explicitArg));
masm.passABIArg(MoveOperand(argsBase, argDisp), MoveOp::DOUBLE);
argDisp += sizeof(double);
@ -935,9 +942,9 @@ JitRuntime::generateDebugTrapHandler(JSContext *cx)
masm.mov(r11, scratch1);
masm.subPtr(Imm32(BaselineFrame::Size()), scratch1);
// Enter a stub frame and call the HandleDebugTrap VM function. Ensure
// the stub frame has a nullptr ICStub pointer, since this pointer is
// marked during GC.
// Enter a stub frame and call the HandleDebugTrap VM function. Ensure the
// stub frame has a nullptr ICStub pointer, since this pointer is marked
// during GC.
masm.movePtr(ImmPtr(nullptr), BaselineStubReg);
EmitEnterStubFrame(masm, scratch2);
@ -951,9 +958,9 @@ JitRuntime::generateDebugTrapHandler(JSContext *cx)
EmitLeaveStubFrame(masm);
// If the stub returns |true|, we have to perform a forced return
// (return from the JS frame). If the stub returns |false|, just return
// from the trap stub so that execution continues at the current pc.
// If the stub returns |true|, we have to perform a forced return (return
// from the JS frame). If the stub returns |false|, just return from the
// trap stub so that execution continues at the current pc.
Label forcedReturn;
masm.branchTest32(Assembler::NonZero, ReturnReg, ReturnReg, &forcedReturn);
masm.mov(lr, pc);

Просмотреть файл

@ -155,7 +155,7 @@ jit::PatchJump(CodeLocationJump &jump_, CodeLocationLabel label)
Instruction *inst1 = (Instruction *)jump_.raw();
Instruction *inst2 = inst1->next();
Assembler::updateLuiOriValue(inst1, inst2, (uint32_t)label.raw());
Assembler::UpdateLuiOriValue(inst1, inst2, (uint32_t)label.raw());
AutoFlushICache::flush(uintptr_t(inst1), 8);
}
@ -177,8 +177,8 @@ Assembler::executableCopy(uint8_t *buffer)
for (size_t i = 0; i < longJumps_.length(); i++) {
Instruction *inst1 = (Instruction *) ((uint32_t)buffer + longJumps_[i]);
uint32_t value = extractLuiOriValue(inst1, inst1->next());
updateLuiOriValue(inst1, inst1->next(), (uint32_t)buffer + value);
uint32_t value = ExtractLuiOriValue(inst1, inst1->next());
UpdateLuiOriValue(inst1, inst1->next(), (uint32_t)buffer + value);
}
AutoFlushICache::setRange(uintptr_t(buffer), m_buffer.size());
@ -226,16 +226,16 @@ class RelocationIterator
};
uintptr_t
Assembler::getPointer(uint8_t *instPtr)
Assembler::GetPointer(uint8_t *instPtr)
{
Instruction *inst = (Instruction*)instPtr;
return Assembler::extractLuiOriValue(inst, inst->next());
return Assembler::ExtractLuiOriValue(inst, inst->next());
}
static JitCode *
CodeFromJump(Instruction *jump)
{
uint8_t *target = (uint8_t *)Assembler::extractLuiOriValue(jump, jump->next());
uint8_t *target = (uint8_t *)Assembler::ExtractLuiOriValue(jump, jump->next());
return JitCode::FromExecutable(target);
}
@ -255,7 +255,7 @@ TraceDataRelocations(JSTracer *trc, uint8_t *buffer, CompactBufferReader &reader
while (reader.more()) {
size_t offset = reader.readUnsigned();
Instruction *inst = (Instruction*)(buffer + offset);
void *ptr = (void *)Assembler::extractLuiOriValue(inst, inst->next());
void *ptr = (void *)Assembler::ExtractLuiOriValue(inst, inst->next());
// No barrier needed since these are constants.
gc::MarkGCThingUnbarriered(trc, reinterpret_cast<void **>(&ptr), "ion-masm-ptr");
@ -269,7 +269,7 @@ TraceDataRelocations(JSTracer *trc, MIPSBuffer *buffer, CompactBufferReader &rea
BufferOffset bo (reader.readUnsigned());
MIPSBuffer::AssemblerBufferInstIterator iter(bo, buffer);
void *ptr = (void *)Assembler::extractLuiOriValue(iter.cur(), iter.next());
void *ptr = (void *)Assembler::ExtractLuiOriValue(iter.cur(), iter.next());
// No barrier needed since these are constants.
gc::MarkGCThingUnbarriered(trc, reinterpret_cast<void **>(&ptr), "ion-masm-ptr");
@ -330,9 +330,9 @@ Assembler::processCodeLabels(uint8_t *rawCode)
}
int32_t
Assembler::extractCodeLabelOffset(uint8_t *code) {
Assembler::ExtractCodeLabelOffset(uint8_t *code) {
InstImm *inst = (InstImm *)code;
return Assembler::extractLuiOriValue(inst, inst->next());
return Assembler::ExtractLuiOriValue(inst, inst->next());
}
void
@ -342,8 +342,8 @@ Assembler::Bind(uint8_t *rawCode, AbsoluteLabel *label, const void *address)
int32_t src = label->offset();
do {
Instruction *inst = (Instruction *) (rawCode + src);
uint32_t next = Assembler::extractLuiOriValue(inst, inst->next());
Assembler::updateLuiOriValue(inst, inst->next(), (uint32_t)address);
uint32_t next = Assembler::ExtractLuiOriValue(inst, inst->next());
Assembler::UpdateLuiOriValue(inst, inst->next(), (uint32_t)address);
src = next;
} while (src != AbsoluteLabel::INVALID_OFFSET);
}
@ -490,12 +490,12 @@ Assembler::writeInst(uint32_t x, uint32_t *dest)
if (dest == nullptr)
return m_buffer.putInt(x);
writeInstStatic(x, dest);
WriteInstStatic(x, dest);
return BufferOffset();
}
void
Assembler::writeInstStatic(uint32_t x, uint32_t *dest)
Assembler::WriteInstStatic(uint32_t x, uint32_t *dest)
{
MOZ_ASSERT(dest != nullptr);
*dest = x;
@ -557,21 +557,21 @@ Assembler::as_nor(Register rd, Register rs, Register rt)
BufferOffset
Assembler::as_andi(Register rd, Register rs, int32_t j)
{
MOZ_ASSERT(Imm16::isInUnsignedRange(j));
MOZ_ASSERT(Imm16::IsInUnsignedRange(j));
return writeInst(InstImm(op_andi, rs, rd, Imm16(j)).encode());
}
BufferOffset
Assembler::as_ori(Register rd, Register rs, int32_t j)
{
MOZ_ASSERT(Imm16::isInUnsignedRange(j));
MOZ_ASSERT(Imm16::IsInUnsignedRange(j));
return writeInst(InstImm(op_ori, rs, rd, Imm16(j)).encode());
}
BufferOffset
Assembler::as_xori(Register rd, Register rs, int32_t j)
{
MOZ_ASSERT(Imm16::isInUnsignedRange(j));
MOZ_ASSERT(Imm16::IsInUnsignedRange(j));
return writeInst(InstImm(op_xori, rs, rd, Imm16(j)).encode());
}
@ -672,7 +672,7 @@ Assembler::as_addu(Register rd, Register rs, Register rt)
BufferOffset
Assembler::as_addiu(Register rd, Register rs, int32_t j)
{
MOZ_ASSERT(Imm16::isInSignedRange(j));
MOZ_ASSERT(Imm16::IsInSignedRange(j));
return writeInst(InstImm(op_addiu, rs, rd, Imm16(j)).encode());
}
@ -715,7 +715,7 @@ Assembler::as_mul(Register rd, Register rs, Register rt)
BufferOffset
Assembler::as_lui(Register rd, int32_t j)
{
MOZ_ASSERT(Imm16::isInUnsignedRange(j));
MOZ_ASSERT(Imm16::IsInUnsignedRange(j));
return writeInst(InstImm(op_lui, zero, rd, Imm16(j)).encode());
}
@ -874,14 +874,14 @@ Assembler::as_sltu(Register rd, Register rs, Register rt)
BufferOffset
Assembler::as_slti(Register rd, Register rs, int32_t j)
{
MOZ_ASSERT(Imm16::isInSignedRange(j));
MOZ_ASSERT(Imm16::IsInSignedRange(j));
return writeInst(InstImm(op_slti, rs, rd, Imm16(j)).encode());
}
BufferOffset
Assembler::as_sltiu(Register rd, Register rs, uint32_t j)
{
MOZ_ASSERT(Imm16::isInUnsignedRange(j));
MOZ_ASSERT(Imm16::IsInUnsignedRange(j));
return writeInst(InstImm(op_sltiu, rs, rd, Imm16(j)).encode());
}
@ -943,28 +943,28 @@ Assembler::as_ext(Register rt, Register rs, uint16_t pos, uint16_t size)
BufferOffset
Assembler::as_ld(FloatRegister fd, Register base, int32_t off)
{
MOZ_ASSERT(Imm16::isInSignedRange(off));
MOZ_ASSERT(Imm16::IsInSignedRange(off));
return writeInst(InstImm(op_ldc1, base, fd, Imm16(off)).encode());
}
BufferOffset
Assembler::as_sd(FloatRegister fd, Register base, int32_t off)
{
MOZ_ASSERT(Imm16::isInSignedRange(off));
MOZ_ASSERT(Imm16::IsInSignedRange(off));
return writeInst(InstImm(op_sdc1, base, fd, Imm16(off)).encode());
}
BufferOffset
Assembler::as_ls(FloatRegister fd, Register base, int32_t off)
{
MOZ_ASSERT(Imm16::isInSignedRange(off));
MOZ_ASSERT(Imm16::IsInSignedRange(off));
return writeInst(InstImm(op_lwc1, base, fd, Imm16(off)).encode());
}
BufferOffset
Assembler::as_ss(FloatRegister fd, Register base, int32_t off)
{
MOZ_ASSERT(Imm16::isInSignedRange(off));
MOZ_ASSERT(Imm16::IsInSignedRange(off));
return writeInst(InstImm(op_swc1, base, fd, Imm16(off)).encode());
}
@ -1253,7 +1253,7 @@ Assembler::bind(InstImm *inst, uint32_t branch, uint32_t target)
// If encoded offset is 4, then the jump must be short
if (BOffImm16(inst[0]).decode() == 4) {
MOZ_ASSERT(BOffImm16::isInRange(offset));
MOZ_ASSERT(BOffImm16::IsInRange(offset));
inst[0].setBOffImm16(BOffImm16(offset));
inst[1].makeNop();
return;
@ -1263,13 +1263,13 @@ Assembler::bind(InstImm *inst, uint32_t branch, uint32_t target)
// address after the reserved block.
if (inst[0].encode() == inst_bgezal.encode()) {
addLongJump(BufferOffset(branch));
writeLuiOriInstructions(inst, &inst[1], ScratchRegister, target);
WriteLuiOriInstructions(inst, &inst[1], ScratchRegister, target);
inst[2] = InstReg(op_special, ScratchRegister, zero, ra, ff_jalr).encode();
// There is 1 nop after this.
return;
}
if (BOffImm16::isInRange(offset)) {
if (BOffImm16::IsInRange(offset)) {
bool conditional = (inst[0].encode() != inst_bgezal.encode() &&
inst[0].encode() != inst_beq.encode());
@ -1287,7 +1287,7 @@ Assembler::bind(InstImm *inst, uint32_t branch, uint32_t target)
if (inst[0].encode() == inst_beq.encode()) {
// Handle long unconditional jump.
addLongJump(BufferOffset(branch));
writeLuiOriInstructions(inst, &inst[1], ScratchRegister, target);
WriteLuiOriInstructions(inst, &inst[1], ScratchRegister, target);
inst[2] = InstReg(op_special, ScratchRegister, zero, zero, ff_jr).encode();
// There is 1 nop after this.
} else {
@ -1295,7 +1295,7 @@ Assembler::bind(InstImm *inst, uint32_t branch, uint32_t target)
inst[0] = invertBranch(inst[0], BOffImm16(5 * sizeof(void *)));
// No need for a "nop" here because we can clobber scratch.
addLongJump(BufferOffset(branch + sizeof(void *)));
writeLuiOriInstructions(&inst[1], &inst[2], ScratchRegister, target);
WriteLuiOriInstructions(&inst[1], &inst[2], ScratchRegister, target);
inst[3] = InstReg(op_special, ScratchRegister, zero, zero, ff_jr).encode();
// There is 1 nop after this.
}
@ -1312,7 +1312,7 @@ Assembler::bind(RepatchLabel *label)
Instruction *inst1 = editSrc(b);
Instruction *inst2 = inst1->next();
updateLuiOriValue(inst1, inst2, dest.getOffset());
UpdateLuiOriValue(inst1, inst2, dest.getOffset());
}
label->bind(dest.getOffset());
}
@ -1363,32 +1363,32 @@ Assembler::as_break(uint32_t code)
}
uint32_t
Assembler::patchWrite_NearCallSize()
Assembler::PatchWrite_NearCallSize()
{
return 4 * sizeof(uint32_t);
}
void
Assembler::patchWrite_NearCall(CodeLocationLabel start, CodeLocationLabel toCall)
Assembler::PatchWrite_NearCall(CodeLocationLabel start, CodeLocationLabel toCall)
{
Instruction *inst = (Instruction *) start.raw();
uint8_t *dest = toCall.raw();
// Overwrite whatever instruction used to be here with a call.
// Always use long jump for two reasons:
// - Jump has to be the same size because of patchWrite_NearCallSize.
// - Jump has to be the same size because of PatchWrite_NearCallSize.
// - Return address has to be at the end of replaced block.
// Short jump wouldn't be more efficient.
writeLuiOriInstructions(inst, &inst[1], ScratchRegister, (uint32_t)dest);
WriteLuiOriInstructions(inst, &inst[1], ScratchRegister, (uint32_t)dest);
inst[2] = InstReg(op_special, ScratchRegister, zero, ra, ff_jalr);
inst[3] = InstNOP();
// Ensure everyone sees the code that was just written into memory.
AutoFlushICache::flush(uintptr_t(inst), patchWrite_NearCallSize());
AutoFlushICache::flush(uintptr_t(inst), PatchWrite_NearCallSize());
}
uint32_t
Assembler::extractLuiOriValue(Instruction *inst0, Instruction *inst1)
Assembler::ExtractLuiOriValue(Instruction *inst0, Instruction *inst1)
{
InstImm *i0 = (InstImm *) inst0;
InstImm *i1 = (InstImm *) inst1;
@ -1401,43 +1401,43 @@ Assembler::extractLuiOriValue(Instruction *inst0, Instruction *inst1)
}
void
Assembler::updateLuiOriValue(Instruction *inst0, Instruction *inst1, uint32_t value)
Assembler::UpdateLuiOriValue(Instruction *inst0, Instruction *inst1, uint32_t value)
{
MOZ_ASSERT(inst0->extractOpcode() == ((uint32_t)op_lui >> OpcodeShift));
MOZ_ASSERT(inst1->extractOpcode() == ((uint32_t)op_ori >> OpcodeShift));
((InstImm *) inst0)->setImm16(Imm16::upper(Imm32(value)));
((InstImm *) inst1)->setImm16(Imm16::lower(Imm32(value)));
((InstImm *) inst0)->setImm16(Imm16::Upper(Imm32(value)));
((InstImm *) inst1)->setImm16(Imm16::Lower(Imm32(value)));
}
void
Assembler::writeLuiOriInstructions(Instruction *inst0, Instruction *inst1,
Assembler::WriteLuiOriInstructions(Instruction *inst0, Instruction *inst1,
Register reg, uint32_t value)
{
*inst0 = InstImm(op_lui, zero, reg, Imm16::upper(Imm32(value)));
*inst1 = InstImm(op_ori, reg, reg, Imm16::lower(Imm32(value)));
*inst0 = InstImm(op_lui, zero, reg, Imm16::Upper(Imm32(value)));
*inst1 = InstImm(op_ori, reg, reg, Imm16::Lower(Imm32(value)));
}
void
Assembler::patchDataWithValueCheck(CodeLocationLabel label, PatchedImmPtr newValue,
Assembler::PatchDataWithValueCheck(CodeLocationLabel label, PatchedImmPtr newValue,
PatchedImmPtr expectedValue)
{
Instruction *inst = (Instruction *) label.raw();
// Extract old Value
DebugOnly<uint32_t> value = Assembler::extractLuiOriValue(&inst[0], &inst[1]);
DebugOnly<uint32_t> value = Assembler::ExtractLuiOriValue(&inst[0], &inst[1]);
MOZ_ASSERT(value == uint32_t(expectedValue.value));
// Replace with new value
Assembler::updateLuiOriValue(inst, inst->next(), uint32_t(newValue.value));
Assembler::UpdateLuiOriValue(inst, inst->next(), uint32_t(newValue.value));
AutoFlushICache::flush(uintptr_t(inst), 8);
}
void
Assembler::patchDataWithValueCheck(CodeLocationLabel label, ImmPtr newValue, ImmPtr expectedValue)
Assembler::PatchDataWithValueCheck(CodeLocationLabel label, ImmPtr newValue, ImmPtr expectedValue)
{
patchDataWithValueCheck(label, PatchedImmPtr(newValue.value),
PatchDataWithValueCheck(label, PatchedImmPtr(newValue.value),
PatchedImmPtr(expectedValue.value));
}
@ -1447,7 +1447,7 @@ Assembler::patchDataWithValueCheck(CodeLocationLabel label, ImmPtr newValue, Imm
// be totally safe. Since that instruction will never be executed again, a
// ICache flush should not be necessary
void
Assembler::patchWrite_Imm32(CodeLocationLabel label, Imm32 imm)
Assembler::PatchWrite_Imm32(CodeLocationLabel label, Imm32 imm)
{
// Raw is going to be the return address.
uint32_t *raw = (uint32_t*)label.raw();
@ -1457,14 +1457,14 @@ Assembler::patchWrite_Imm32(CodeLocationLabel label, Imm32 imm)
}
void
Assembler::patchInstructionImmediate(uint8_t *code, PatchedImmPtr imm)
Assembler::PatchInstructionImmediate(uint8_t *code, PatchedImmPtr imm)
{
InstImm *inst = (InstImm *)code;
Assembler::updateLuiOriValue(inst, inst->next(), (uint32_t)imm.value);
Assembler::UpdateLuiOriValue(inst, inst->next(), (uint32_t)imm.value);
}
uint8_t *
Assembler::nextInstruction(uint8_t *inst_, uint32_t *count)
Assembler::NextInstruction(uint8_t *inst_, uint32_t *count)
{
Instruction *inst = reinterpret_cast<Instruction*>(inst_);
if (count != nullptr)
@ -1578,11 +1578,11 @@ Assembler::ToggleCall(CodeLocationLabel inst_, bool enabled)
AutoFlushICache::flush(uintptr_t(i2), 4);
}
void Assembler::updateBoundsCheck(uint32_t heapSize, Instruction *inst)
void Assembler::UpdateBoundsCheck(uint32_t heapSize, Instruction *inst)
{
InstImm *i0 = (InstImm *) inst;
InstImm *i1 = (InstImm *) i0->next();
// Replace with new value
Assembler::updateLuiOriValue(i0, i1, heapSize);
Assembler::UpdateLuiOriValue(i0, i1, heapSize);
}

Просмотреть файл

@ -438,9 +438,9 @@ class BOffImm16
: data ((offset - 4) >> 2 & Imm16Mask)
{
MOZ_ASSERT((offset & 0x3) == 0);
MOZ_ASSERT(isInRange(offset));
MOZ_ASSERT(IsInRange(offset));
}
static bool isInRange(int offset) {
static bool IsInRange(int offset) {
if ((offset - 4) < (INT16_MIN << 2))
return false;
if ((offset - 4) > (INT16_MAX << 2))
@ -479,9 +479,9 @@ class JOffImm26
: data ((offset - 4) >> 2 & Imm26Mask)
{
MOZ_ASSERT((offset & 0x3) == 0);
MOZ_ASSERT(isInRange(offset));
MOZ_ASSERT(IsInRange(offset));
}
static bool isInRange(int offset) {
static bool IsInRange(int offset) {
if ((offset - 4) < -536870912)
return false;
if ((offset - 4) > 536870908)
@ -518,16 +518,16 @@ class Imm16
uint32_t decodeUnsigned() {
return value;
}
static bool isInSignedRange(int32_t imm) {
static bool IsInSignedRange(int32_t imm) {
return imm >= INT16_MIN && imm <= INT16_MAX;
}
static bool isInUnsignedRange(uint32_t imm) {
static bool IsInUnsignedRange(uint32_t imm) {
return imm <= UINT16_MAX ;
}
static Imm16 lower (Imm32 imm) {
static Imm16 Lower (Imm32 imm) {
return Imm16(imm.value & 0xffff);
}
static Imm16 upper (Imm32 imm) {
static Imm16 Upper (Imm32 imm) {
return Imm16((imm.value >> 16) & 0xffff);
}
};
@ -749,7 +749,7 @@ class Assembler : public AssemblerShared
}
public:
static uintptr_t getPointer(uint8_t *);
static uintptr_t GetPointer(uint8_t *);
bool oom() const;
@ -791,7 +791,7 @@ class Assembler : public AssemblerShared
BufferOffset writeInst(uint32_t x, uint32_t *dest = nullptr);
// A static variant for the cases where we don't want to have an assembler
// object at all. Normally, you would use the dummy (nullptr) object.
static void writeInstStatic(uint32_t x, uint32_t *dest);
static void WriteInstStatic(uint32_t x, uint32_t *dest);
public:
BufferOffset align(int alignment);
@ -1012,37 +1012,37 @@ class Assembler : public AssemblerShared
void flushBuffer() {
}
static uint32_t patchWrite_NearCallSize();
static uint32_t nopSize() { return 4; }
static uint32_t PatchWrite_NearCallSize();
static uint32_t NopSize() { return 4; }
static uint32_t extractLuiOriValue(Instruction *inst0, Instruction *inst1);
static void updateLuiOriValue(Instruction *inst0, Instruction *inst1, uint32_t value);
static void writeLuiOriInstructions(Instruction *inst, Instruction *inst1,
static uint32_t ExtractLuiOriValue(Instruction *inst0, Instruction *inst1);
static void UpdateLuiOriValue(Instruction *inst0, Instruction *inst1, uint32_t value);
static void WriteLuiOriInstructions(Instruction *inst, Instruction *inst1,
Register reg, uint32_t value);
static void patchWrite_NearCall(CodeLocationLabel start, CodeLocationLabel toCall);
static void patchDataWithValueCheck(CodeLocationLabel label, PatchedImmPtr newValue,
static void PatchWrite_NearCall(CodeLocationLabel start, CodeLocationLabel toCall);
static void PatchDataWithValueCheck(CodeLocationLabel label, PatchedImmPtr newValue,
PatchedImmPtr expectedValue);
static void patchDataWithValueCheck(CodeLocationLabel label, ImmPtr newValue,
static void PatchDataWithValueCheck(CodeLocationLabel label, ImmPtr newValue,
ImmPtr expectedValue);
static void patchWrite_Imm32(CodeLocationLabel label, Imm32 imm);
static void PatchWrite_Imm32(CodeLocationLabel label, Imm32 imm);
static void patchInstructionImmediate(uint8_t *code, PatchedImmPtr imm);
static void PatchInstructionImmediate(uint8_t *code, PatchedImmPtr imm);
static uint32_t alignDoubleArg(uint32_t offset) {
static uint32_t AlignDoubleArg(uint32_t offset) {
return (offset + 1U) &~ 1U;
}
static uint8_t *nextInstruction(uint8_t *instruction, uint32_t *count = nullptr);
static uint8_t *NextInstruction(uint8_t *instruction, uint32_t *count = nullptr);
static void ToggleToJmp(CodeLocationLabel inst_);
static void ToggleToCmp(CodeLocationLabel inst_);
static void ToggleCall(CodeLocationLabel inst_, bool enabled);
static void updateBoundsCheck(uint32_t logHeapSize, Instruction *inst);
static void UpdateBoundsCheck(uint32_t logHeapSize, Instruction *inst);
void processCodeLabels(uint8_t *rawCode);
static int32_t extractCodeLabelOffset(uint8_t *code);
static int32_t ExtractCodeLabelOffset(uint8_t *code);
bool bailed() {
return m_buffer.bail();

Просмотреть файл

@ -1862,7 +1862,7 @@ CodeGeneratorMIPS::generateInvalidateEpilogue()
// Ensure that there is enough space in the buffer for the OsiPoint
// patching to occur. Otherwise, we could overwrite the invalidation
// epilogue.
for (size_t i = 0; i < sizeof(void *); i += Assembler::nopSize())
for (size_t i = 0; i < sizeof(void *); i += Assembler::NopSize())
masm.nop();
masm.bind(&invalidate_);

Просмотреть файл

@ -267,28 +267,28 @@ MacroAssemblerMIPS::ma_li(Register dest, AbsoluteLabel *label)
void
MacroAssemblerMIPS::ma_li(Register dest, Imm32 imm)
{
if (Imm16::isInSignedRange(imm.value)) {
if (Imm16::IsInSignedRange(imm.value)) {
as_addiu(dest, zero, imm.value);
} else if (Imm16::isInUnsignedRange(imm.value)) {
as_ori(dest, zero, Imm16::lower(imm).encode());
} else if (Imm16::lower(imm).encode() == 0) {
as_lui(dest, Imm16::upper(imm).encode());
} else if (Imm16::IsInUnsignedRange(imm.value)) {
as_ori(dest, zero, Imm16::Lower(imm).encode());
} else if (Imm16::Lower(imm).encode() == 0) {
as_lui(dest, Imm16::Upper(imm).encode());
} else {
as_lui(dest, Imm16::upper(imm).encode());
as_ori(dest, dest, Imm16::lower(imm).encode());
as_lui(dest, Imm16::Upper(imm).encode());
as_ori(dest, dest, Imm16::Lower(imm).encode());
}
}
// This method generates lui and ori instruction pair that can be modified by
// updateLuiOriValue, either during compilation (eg. Assembler::bind), or
// UpdateLuiOriValue, either during compilation (eg. Assembler::bind), or
// during execution (eg. jit::PatchJump).
void
MacroAssemblerMIPS::ma_liPatchable(Register dest, Imm32 imm)
{
m_buffer.ensureSpace(2 * sizeof(uint32_t));
as_lui(dest, Imm16::upper(imm).encode());
as_ori(dest, dest, Imm16::lower(imm).encode());
as_lui(dest, Imm16::Upper(imm).encode());
as_ori(dest, dest, Imm16::Lower(imm).encode());
}
void
@ -392,7 +392,7 @@ MacroAssemblerMIPS::ma_and(Register rd, Imm32 imm)
void
MacroAssemblerMIPS::ma_and(Register rd, Register rs, Imm32 imm)
{
if (Imm16::isInUnsignedRange(imm.value)) {
if (Imm16::IsInUnsignedRange(imm.value)) {
as_andi(rd, rs, imm.value);
} else {
ma_li(ScratchRegister, imm);
@ -422,7 +422,7 @@ MacroAssemblerMIPS::ma_or(Register rd, Imm32 imm)
void
MacroAssemblerMIPS::ma_or(Register rd, Register rs, Imm32 imm)
{
if (Imm16::isInUnsignedRange(imm.value)) {
if (Imm16::IsInUnsignedRange(imm.value)) {
as_ori(rd, rs, imm.value);
} else {
ma_li(ScratchRegister, imm);
@ -452,7 +452,7 @@ MacroAssemblerMIPS::ma_xor(Register rd, Imm32 imm)
void
MacroAssemblerMIPS::ma_xor(Register rd, Register rs, Imm32 imm)
{
if (Imm16::isInUnsignedRange(imm.value)) {
if (Imm16::IsInUnsignedRange(imm.value)) {
as_xori(rd, rs, imm.value);
} else {
ma_li(ScratchRegister, imm);
@ -466,7 +466,7 @@ MacroAssemblerMIPS::ma_xor(Register rd, Register rs, Imm32 imm)
void
MacroAssemblerMIPS::ma_addu(Register rd, Register rs, Imm32 imm)
{
if (Imm16::isInSignedRange(imm.value)) {
if (Imm16::IsInSignedRange(imm.value)) {
as_addiu(rd, rs, imm.value);
} else {
ma_li(ScratchRegister, imm);
@ -507,7 +507,7 @@ MacroAssemblerMIPS::ma_addTestOverflow(Register rd, Register rs, Imm32 imm, Labe
{
// Check for signed range because of as_addiu
// Check for unsigned range because of as_xori
if (Imm16::isInSignedRange(imm.value) && Imm16::isInUnsignedRange(imm.value)) {
if (Imm16::IsInSignedRange(imm.value) && Imm16::IsInUnsignedRange(imm.value)) {
Label goodAddition;
as_addiu(rd, rs, imm.value);
@ -536,7 +536,7 @@ MacroAssemblerMIPS::ma_subu(Register rd, Register rs, Register rt)
void
MacroAssemblerMIPS::ma_subu(Register rd, Register rs, Imm32 imm)
{
if (Imm16::isInSignedRange(-imm.value)) {
if (Imm16::IsInSignedRange(-imm.value)) {
as_addiu(rd, rs, -imm.value);
} else {
ma_li(ScratchRegister, imm);
@ -701,7 +701,7 @@ MacroAssemblerMIPS::ma_load(Register dest, Address address,
{
int16_t encodedOffset;
Register base;
if (!Imm16::isInSignedRange(address.offset)) {
if (!Imm16::IsInSignedRange(address.offset)) {
ma_li(ScratchRegister, Imm32(address.offset));
as_addu(ScratchRegister, address.base, ScratchRegister);
base = ScratchRegister;
@ -747,7 +747,7 @@ MacroAssemblerMIPS::ma_store(Register data, Address address, LoadStoreSize size,
{
int16_t encodedOffset;
Register base;
if (!Imm16::isInSignedRange(address.offset)) {
if (!Imm16::IsInSignedRange(address.offset)) {
ma_li(ScratchRegister, Imm32(address.offset));
as_addu(ScratchRegister, address.base, ScratchRegister);
base = ScratchRegister;
@ -828,7 +828,7 @@ MacroAssemblerMIPS::ma_sw(Imm32 imm, Address address)
MOZ_ASSERT(address.base != ScratchRegister);
ma_li(ScratchRegister, imm);
if (Imm16::isInSignedRange(address.offset)) {
if (Imm16::IsInSignedRange(address.offset)) {
as_sw(ScratchRegister, address.base, Imm16(address.offset).encode());
} else {
MOZ_ASSERT(address.base != SecondScratchReg);
@ -939,11 +939,11 @@ MacroAssemblerMIPS::branchWithCode(InstImm code, Label *label, JumpKind jumpKind
if (label->bound()) {
int32_t offset = label->offset() - m_buffer.nextOffset().getOffset();
if (BOffImm16::isInRange(offset))
if (BOffImm16::IsInRange(offset))
jumpKind = ShortJump;
if (jumpKind == ShortJump) {
MOZ_ASSERT(BOffImm16::isInRange(offset));
MOZ_ASSERT(BOffImm16::IsInRange(offset));
code.setBOffImm16(BOffImm16(offset));
writeInst(code.encode());
as_nop();
@ -1358,7 +1358,7 @@ MacroAssemblerMIPS::ma_mv(ValueOperand src, FloatRegister dest)
void
MacroAssemblerMIPS::ma_ls(FloatRegister ft, Address address)
{
if (Imm16::isInSignedRange(address.offset)) {
if (Imm16::IsInSignedRange(address.offset)) {
as_ls(ft, address.base, Imm16(address.offset).encode());
} else {
MOZ_ASSERT(address.base != ScratchRegister);
@ -1375,7 +1375,7 @@ MacroAssemblerMIPS::ma_ld(FloatRegister ft, Address address)
// alignment.
int32_t off2 = address.offset + TAG_OFFSET;
if (Imm16::isInSignedRange(address.offset) && Imm16::isInSignedRange(off2)) {
if (Imm16::IsInSignedRange(address.offset) && Imm16::IsInSignedRange(off2)) {
as_ls(ft, address.base, Imm16(address.offset).encode());
as_ls(getOddPair(ft), address.base, Imm16(off2).encode());
} else {
@ -1390,7 +1390,7 @@ void
MacroAssemblerMIPS::ma_sd(FloatRegister ft, Address address)
{
int32_t off2 = address.offset + TAG_OFFSET;
if (Imm16::isInSignedRange(address.offset) && Imm16::isInSignedRange(off2)) {
if (Imm16::IsInSignedRange(address.offset) && Imm16::IsInSignedRange(off2)) {
as_ss(ft, address.base, Imm16(address.offset).encode());
as_ss(getOddPair(ft), address.base, Imm16(off2).encode());
} else {
@ -1411,7 +1411,7 @@ MacroAssemblerMIPS::ma_sd(FloatRegister ft, BaseIndex address)
void
MacroAssemblerMIPS::ma_ss(FloatRegister ft, Address address)
{
if (Imm16::isInSignedRange(address.offset)) {
if (Imm16::IsInSignedRange(address.offset)) {
as_ss(ft, address.base, Imm16(address.offset).encode());
} else {
ma_li(ScratchRegister, Imm32(address.offset));
@ -2845,7 +2845,7 @@ MacroAssemblerMIPSCompat::storeValue(JSValueType type, Register reg, BaseIndex d
// Make sure that ma_sw doesn't clobber ScratchRegister
int32_t offset = dest.offset;
if (!Imm16::isInSignedRange(offset)) {
if (!Imm16::IsInSignedRange(offset)) {
ma_li(SecondScratchReg, Imm32(offset));
as_addu(ScratchRegister, ScratchRegister, SecondScratchReg);
offset = 0;
@ -2889,7 +2889,7 @@ MacroAssemblerMIPSCompat::storeValue(const Value &val, BaseIndex dest)
// Make sure that ma_sw doesn't clobber ScratchRegister
int32_t offset = dest.offset;
if (!Imm16::isInSignedRange(offset)) {
if (!Imm16::IsInSignedRange(offset)) {
ma_li(SecondScratchReg, Imm32(offset));
as_addu(ScratchRegister, ScratchRegister, SecondScratchReg);
offset = 0;

Просмотреть файл

@ -42,7 +42,7 @@ Address
MoveEmitterMIPS::cycleSlot() const
{
int offset = masm.framePushed() - pushedAtCycle_;
MOZ_ASSERT(Imm16::isInSignedRange(offset));
MOZ_ASSERT(Imm16::IsInSignedRange(offset));
return Address(StackPointer, offset);
}

Просмотреть файл

@ -248,7 +248,7 @@ class AssemblerX86Shared : public AssemblerShared
MOZ_ASSUME_UNREACHABLE("Unknown double condition");
}
static void staticAsserts() {
static void StaticAsserts() {
// DoubleConditionBits should not interfere with x86 condition codes.
JS_STATIC_ASSERT(!((Equal | NotEqual | Above | AboveOrEqual | Below |
BelowOrEqual | Parity | NoParity) & DoubleConditionBits));
@ -282,7 +282,7 @@ class AssemblerX86Shared : public AssemblerShared
void executableCopy(void *buffer);
void processCodeLabels(uint8_t *rawCode);
static int32_t extractCodeLabelOffset(uint8_t *code) {
static int32_t ExtractCodeLabelOffset(uint8_t *code) {
return *(uintptr_t *)code;
}
void copyJumpRelocationTable(uint8_t *dest);
@ -1640,46 +1640,46 @@ class AssemblerX86Shared : public AssemblerShared
// Patching.
static size_t patchWrite_NearCallSize() {
static size_t PatchWrite_NearCallSize() {
return 5;
}
static uintptr_t getPointer(uint8_t *instPtr) {
static uintptr_t GetPointer(uint8_t *instPtr) {
uintptr_t *ptr = ((uintptr_t *) instPtr) - 1;
return *ptr;
}
// Write a relative call at the start location |dataLabel|.
// Note that this DOES NOT patch data that comes before |label|.
static void patchWrite_NearCall(CodeLocationLabel startLabel, CodeLocationLabel target) {
static void PatchWrite_NearCall(CodeLocationLabel startLabel, CodeLocationLabel target) {
uint8_t *start = startLabel.raw();
*start = 0xE8;
ptrdiff_t offset = target - startLabel - patchWrite_NearCallSize();
ptrdiff_t offset = target - startLabel - PatchWrite_NearCallSize();
JS_ASSERT(int32_t(offset) == offset);
*((int32_t *) (start + 1)) = offset;
}
static void patchWrite_Imm32(CodeLocationLabel dataLabel, Imm32 toWrite) {
static void PatchWrite_Imm32(CodeLocationLabel dataLabel, Imm32 toWrite) {
*((int32_t *) dataLabel.raw() - 1) = toWrite.value;
}
static void patchDataWithValueCheck(CodeLocationLabel data, PatchedImmPtr newData,
static void PatchDataWithValueCheck(CodeLocationLabel data, PatchedImmPtr newData,
PatchedImmPtr expectedData) {
// The pointer given is a pointer to *after* the data.
uintptr_t *ptr = ((uintptr_t *) data.raw()) - 1;
JS_ASSERT(*ptr == (uintptr_t)expectedData.value);
*ptr = (uintptr_t)newData.value;
}
static void patchDataWithValueCheck(CodeLocationLabel data, ImmPtr newData, ImmPtr expectedData) {
patchDataWithValueCheck(data, PatchedImmPtr(newData.value), PatchedImmPtr(expectedData.value));
static void PatchDataWithValueCheck(CodeLocationLabel data, ImmPtr newData, ImmPtr expectedData) {
PatchDataWithValueCheck(data, PatchedImmPtr(newData.value), PatchedImmPtr(expectedData.value));
}
static void patchInstructionImmediate(uint8_t *code, PatchedImmPtr imm) {
static void PatchInstructionImmediate(uint8_t *code, PatchedImmPtr imm) {
MOZ_ASSUME_UNREACHABLE("Unused.");
}
static uint32_t nopSize() {
static uint32_t NopSize() {
return 1;
}
static uint8_t *nextInstruction(uint8_t *cur, uint32_t *count) {
static uint8_t *NextInstruction(uint8_t *cur, uint32_t *count) {
MOZ_ASSUME_UNREACHABLE("nextInstruction NYI on x86");
}

Просмотреть файл

@ -411,13 +411,13 @@ CodeGeneratorShared::ensureOsiSpace()
//
// At points where we want to ensure that invalidation won't corrupt an
// important instruction, we make sure to pad with nops.
if (masm.currentOffset() - lastOsiPointOffset_ < Assembler::patchWrite_NearCallSize()) {
int32_t paddingSize = Assembler::patchWrite_NearCallSize();
if (masm.currentOffset() - lastOsiPointOffset_ < Assembler::PatchWrite_NearCallSize()) {
int32_t paddingSize = Assembler::PatchWrite_NearCallSize();
paddingSize -= masm.currentOffset() - lastOsiPointOffset_;
for (int32_t i = 0; i < paddingSize; ++i)
masm.nop();
}
JS_ASSERT(masm.currentOffset() - lastOsiPointOffset_ >= Assembler::patchWrite_NearCallSize());
JS_ASSERT(masm.currentOffset() - lastOsiPointOffset_ >= Assembler::PatchWrite_NearCallSize());
lastOsiPointOffset_ = masm.currentOffset();
}

Просмотреть файл

@ -2006,7 +2006,7 @@ CodeGeneratorX86Shared::generateInvalidateEpilogue()
// Ensure that there is enough space in the buffer for the OsiPoint
// patching to occur. Otherwise, we could overwrite the invalidation
// epilogue.
for (size_t i = 0; i < sizeof(void *); i+= Assembler::nopSize())
for (size_t i = 0; i < sizeof(void *); i += Assembler::NopSize())
masm.nop();
masm.bind(&invalidate_);

Просмотреть файл

@ -13,9 +13,11 @@
namespace js {
namespace jit {
// This should theoretically reside inside of AssemblerBuffer, but that won't be nice
// AssemblerBuffer is templated, BufferOffset would be indirectly.
// A BufferOffset is the offset into a buffer, expressed in bytes of instructions.
// This should theoretically reside inside of AssemblerBuffer, but that won't be
// nice AssemblerBuffer is templated, BufferOffset would be indirectly.
//
// A BufferOffset is the offset into a buffer, expressed in bytes of
// instructions.
class BufferOffset
{
@ -27,10 +29,10 @@ class BufferOffset
int getOffset() const { return offset; }
// A BOffImm is a Branch Offset Immediate. It is an architecture-specific
// structure that holds the immediate for a pc relative branch.
// diffB takes the label for the destination of the branch, and encodes
// the immediate for the branch. This will need to be fixed up later, since
// A pool may be inserted between the branch and its destination
// structure that holds the immediate for a pc relative branch. diffB takes
// the label for the destination of the branch, and encodes the immediate
// for the branch. This will need to be fixed up later, since A pool may be
// inserted between the branch and its destination.
template <class BOffImm>
BOffImm diffB(BufferOffset other) const {
return BOffImm(offset - other.offset);
@ -84,7 +86,8 @@ template<int SliceSize, class Inst>
struct AssemblerBuffer
{
public:
AssemblerBuffer() : head(nullptr), tail(nullptr), m_oom(false), m_bail(false), bufferSize(0), LifoAlloc_(8192) {}
AssemblerBuffer() : head(nullptr), tail(nullptr), m_oom(false),
m_bail(false), bufferSize(0), LifoAlloc_(8192) {}
protected:
typedef BufferSlice<SliceSize> Slice;
typedef AssemblerBuffer<SliceSize, Inst> AssemblerBuffer_;
@ -93,12 +96,12 @@ struct AssemblerBuffer
public:
bool m_oom;
bool m_bail;
// How much data has been added to the buffer thusfar.
// How much data has been added to the buffer thus far.
uint32_t bufferSize;
uint32_t lastInstSize;
bool isAligned(int alignment) const {
// make sure the requested alignment is a power of two.
JS_ASSERT((alignment & (alignment-1)) == 0);
// Make sure the requested alignment is a power of two.
JS_ASSERT(IsPowerOfTwo(alignment));
return !(size() & (alignment - 1));
}
virtual Slice *newSlice(LifoAlloc &a) {
@ -111,7 +114,7 @@ struct AssemblerBuffer
return tmp;
}
bool ensureSpace(int size) {
if (tail != nullptr && tail->size()+size <= SliceSize)
if (tail != nullptr && tail->size() + size <= SliceSize)
return true;
Slice *tmp = newSlice(LifoAlloc_);
if (tmp == nullptr)
@ -170,22 +173,22 @@ struct AssemblerBuffer
void fail_bail() {
m_bail = true;
}
// finger for speeding up accesses
// Finger for speeding up accesses.
Slice *finger;
unsigned int finger_offset;
Inst *getInst(BufferOffset off) {
int local_off = off.getOffset();
// don't update the structure's finger in place, so there is the option
// Don't update the structure's finger in place, so there is the option
// to not update it.
Slice *cur = nullptr;
int cur_off;
// get the offset that we'd be dealing with by walking through backwards
// Get the offset that we'd be dealing with by walking through
// backwards.
int end_off = bufferSize - local_off;
// If end_off is negative, then it is in the last chunk, and there is no
// real work to be done.
if (end_off <= 0) {
if (end_off <= 0)
return (Inst*)&tail->instructions[-end_off];
}
bool used_finger = false;
int finger_off = abs((int)(local_off - finger_offset));
if (finger_off < Min(local_off, end_off)) {
@ -194,11 +197,11 @@ struct AssemblerBuffer
cur_off = finger_offset;
used_finger = true;
} else if (local_off < end_off) {
// it is closest to the start
// It is closest to the start.
cur = head;
cur_off = 0;
} else {
// it is closest to the end
// It is closest to the end.
cur = tail;
cur_off = bufferSize;
}
@ -228,7 +231,8 @@ struct AssemblerBuffer
finger = cur;
finger_offset = cur_off;
}
// the offset within this node should not be larger than the node itself.
// The offset within this node should not be larger than the node
// itself.
JS_ASSERT(local_off < (int)cur->size());
return (Inst*)&cur->instructions[local_off];
}
@ -272,7 +276,7 @@ struct AssemblerBuffer
AssemblerBufferInstIterator(BufferOffset off, AssemblerBuffer_ *buff) : bo(off), m_buffer(buff) {}
Inst *next() {
Inst *i = m_buffer->getInst(bo);
bo = BufferOffset(bo.getOffset()+i->size());
bo = BufferOffset(bo.getOffset() + i->size());
return cur();
};
Inst *cur() {

Просмотреть файл

@ -30,21 +30,22 @@ struct Pool
public:
const bool isBackref;
const bool canDedup;
// "other" is the backwards half of this pool, it is held in another pool structure
// "other" is the backwards half of this pool, it is held in another pool structure.
Pool *other;
uint8_t *poolData;
uint32_t numEntries;
uint32_t buffSize;
LoadOffsets loadOffsets;
// When filling pools where the the size of an immediate is larger
// than the size of an instruction, we find we're in a case where the distance between the
// next instruction and the next pool slot is increasing!
// When filling pools where the the size of an immediate is larger than the
// size of an instruction, we find we're in a case where the distance
// between the next instruction and the next pool slot is increasing!
// Moreover, If we want to do fancy things like deduplicate pool entries at
// dump time, we may not know the location in a pool (and thus the limiting load)
// until very late.
// Lastly, it may be beneficial to interleave the pools. I have absolutely no idea
// how that will work, but my suspicions are that it will be difficult.
// Lastly, it may be beneficial to interleave the pools. I have absolutely
// no idea how that will work, but my suspicions are that it will be
// difficult.
BufferOffset limitingUser;
int limitingUsee;
@ -63,12 +64,12 @@ struct Pool
alignment(garbage), isBackref(garbage), canDedup(garbage), other((Pool*)garbage)
{
}
// Sometimes, when we are adding large values to a pool, the limiting use may change.
// Handle this case. nextInst is the address of the
// Sometimes, when we are adding large values to a pool, the limiting use
// may change. Handle this case. The nextInst is the address of the
void updateLimiter(BufferOffset nextInst) {
int oldRange, newRange;
if (isBackref) {
// common expressions that are not subtracted: the location of the pool, ...
// Common expressions that are not subtracted: the location of the pool, ...
oldRange = limitingUser.getOffset() - ((numEntries - limitingUsee) * immSize);
newRange = nextInst.getOffset();
} else {
@ -81,10 +82,9 @@ struct Pool
limitingUsee = numEntries;
}
}
// checkFull is called before any modifications have been made.
// It is "if we were to add this instruction and pool entry,
// would we be in an invalid state?". If it is true, then it is in fact
// time for a "pool dump".
// checkFull is called before any modifications have been made. It is "if
// we were to add this instruction and pool entry, would we be in an invalid
// state?". If it is true, then it is in fact time for a "pool dump".
// poolOffset is the distance from the end of the current section to the end of the pool.
// For the last section of the pool, this will be the size of the footer
@ -98,23 +98,22 @@ struct Pool
bool checkFullBackref(int poolOffset, int codeOffset) {
if (!limitingUser.assigned())
return false;
signed int distance =
limitingUser.getOffset() + bias
- codeOffset + poolOffset +
signed int distance = limitingUser.getOffset() + bias - codeOffset + poolOffset +
(numEntries - limitingUsee + 1) * immSize;
if (distance >= maxOffset)
return true;
return false;
}
// checkFull answers the question "If a pool were placed at poolOffset, would
// any reference into the pool be out of range?". It is meant to be used as instructions
// and elements are inserted, to determine if a saved perforation point needs to be used.
// checkFull answers the question "If a pool were placed at poolOffset,
// would any reference into the pool be out of range?". It is meant to be
// used as instructions and elements are inserted, to determine if a saved
// perforation point needs to be used.
bool checkFull(int poolOffset) {
// Inserting an instruction into the stream can
// push any of the pools out of range.
// Similarly, inserting into a pool can push the pool entry out of range
// Inserting an instruction into the stream can push any of the pools
// out of range. Similarly, inserting into a pool can push the pool
// entry out of range.
JS_ASSERT(!isBackref);
// Not full if there aren't any uses.
if (!limitingUser.assigned()) {
@ -128,7 +127,8 @@ struct Pool
return false;
}
// By the time this function is called, we'd damn well better know that this is going to succeed.
// By the time this function is called, we'd damn well better know that this
// is going to succeed.
uint32_t insertEntry(uint8_t *data, BufferOffset off, LifoAlloc &LifoAlloc_) {
if (numEntries == buffSize) {
buffSize <<= 1;
@ -166,9 +166,9 @@ struct Pool
return true;
}
// WARNING: This will not always align values. It will only
// align to the requirement of the pool. If the pool is empty,
// there is nothing to be aligned, so it will not perform any alignment
// WARNING: This will not always align values. It will only align to the
// requirement of the pool. If the pool is empty, there is nothing to be
// aligned, so it will not perform any alignment.
uint8_t* align(uint8_t *ptr) {
return (uint8_t*)align((uint32_t)ptr);
}
@ -316,13 +316,13 @@ struct AssemblerBufferWithConstantPool : public AssemblerBuffer<SliceSize, Inst>
typedef BufferSliceTail<SliceSize, InstBaseSize> BufferSlice;
typedef AssemblerBuffer<SliceSize, Inst> Parent;
// The size of a guard instruction
// The size of a guard instruction.
const int guardSize;
// The size of the header that is put at the beginning of a full pool
// The size of the header that is put at the beginning of a full pool.
const int headerSize;
// The size of a footer that is put in a pool after it is full.
const int footerSize;
// the number of sub-pools that we can allocate into.
// The number of sub-pools that we can allocate into.
static const int numPoolKinds = 1 << poolKindBits;
Pool *pools;
@ -330,34 +330,36 @@ struct AssemblerBufferWithConstantPool : public AssemblerBuffer<SliceSize, Inst>
// The buffer should be aligned to this address.
const int instBufferAlign;
// the number of times we've dumped the pool.
// The number of times we've dumped the pool.
int numDumps;
struct PoolInfo {
int offset; // the number of instructions before the start of the pool
int size; // the size of the pool, including padding
int finalPos; // the end of the buffer, in bytes from the beginning of the buffer
int offset; // The number of instructions before the start of the pool.
int size; // The size of the pool, including padding.
int finalPos; // The end of the buffer, in bytes from the beginning of the buffer.
BufferSlice *slice;
};
PoolInfo *poolInfo;
// we need to keep track of how large the pools are, so we can allocate
// enough space for them later. This should include any amount of padding
// We need to keep track of how large the pools are, so we can allocate
// enough space for them later. This should include any amount of padding
// necessary to keep the pools aligned.
int poolSize;
// The Assembler should set this to true if it does not want us to dump a pool here
// The Assembler should set this to true if it does not want us to dump a
// pool here.
int canNotPlacePool;
// Are we filling up the forwards or backwards pools?
bool inBackref;
// Insert a number of NOP instructions between each requested instruction at all
// locations at which a pool can potentially spill. This is useful for checking
// that instruction locations are correctly referenced and/or followed.
// Insert a number of NOP instructions between each requested instruction at
// all locations at which a pool can potentially spill. This is useful for
// checking that instruction locations are correctly referenced and/or
// followed.
const uint32_t nopFillInst;
const uint32_t nopFill;
// Inhibit the insertion of fill NOPs in the dynamic context in which they are
// being inserted.
// Inhibit the insertion of fill NOPs in the dynamic context in which they
// are being inserted.
bool inhibitNops;
// Cache the last place we saw an opportunity to dump the pool
// Cache the last place we saw an opportunity to dump the pool.
BufferOffset perforation;
BufferSlice *perforatedNode;
public:
@ -413,7 +415,7 @@ struct AssemblerBufferWithConstantPool : public AssemblerBuffer<SliceSize, Inst>
void executableCopy(uint8_t *dest_) {
if (this->oom())
return;
// TODO: only do this when the pool actually has a value in it
// TODO: only do this when the pool actually has a value in it.
flushPool();
for (int idx = 0; idx < numPoolKinds; idx++) {
JS_ASSERT(pools[idx].numEntries == 0 && pools[idx].other->numEntries == 0);
@ -429,7 +431,7 @@ struct AssemblerBufferWithConstantPool : public AssemblerBuffer<SliceSize, Inst>
for (unsigned int idx = 0; idx <cur->size()/InstBaseSize;
idx++, curInstOffset += InstBaseSize) {
// Is the current instruction a branch?
if (cur->isBranch[idx >> 3] & (1<<(idx&7))) {
if (cur->isBranch[idx >> 3] & (1 << (idx & 7))) {
// It's a branch. fix up the branchiness!
patchBranch((Inst*)&src[idx], curIndex, BufferOffset(curInstOffset));
}
@ -437,11 +439,11 @@ struct AssemblerBufferWithConstantPool : public AssemblerBuffer<SliceSize, Inst>
}
dest+=cur->size()/InstBaseSize;
if (cur->data != nullptr) {
// have the repatcher move on to the next pool
// Have the repatcher move on to the next pool.
curIndex ++;
// loop over all of the pools, copying them into place.
// Loop over all of the pools, copying them into place.
uint8_t *poolDest = (uint8_t*)dest;
Asm::writePoolHeader(poolDest, cur->data, cur->isNatural);
Asm::WritePoolHeader(poolDest, cur->data, cur->isNatural);
poolDest += headerSize;
for (int idx = 0; idx < numPoolKinds; idx++) {
Pool *curPool = &cur->data[idx];
@ -450,18 +452,20 @@ struct AssemblerBufferWithConstantPool : public AssemblerBuffer<SliceSize, Inst>
memcpy(poolDest, curPool->poolData, curPool->immSize * curPool->numEntries);
poolDest += curPool->immSize * curPool->numEntries;
}
// now go over the whole list backwards, and copy in the reverse portions
for (int idx = numPoolKinds-1; idx >= 0; idx--) {
// Now go over the whole list backwards, and copy in the reverse
// portions.
for (int idx = numPoolKinds - 1; idx >= 0; idx--) {
Pool *curPool = cur->data[idx].other;
// align the pool.
poolDest = curPool->align(poolDest);
memcpy(poolDest, curPool->poolData, curPool->immSize * curPool->numEntries);
poolDest += curPool->immSize * curPool->numEntries;
}
// write a footer in place
Asm::writePoolFooter(poolDest, cur->data, cur->isNatural);
// Write a footer in place.
Asm::WritePoolFooter(poolDest, cur->data, cur->isNatural);
poolDest += footerSize;
// at this point, poolDest had better still be aligned to a chunk boundary.
// At this point, poolDest had better still be aligned to a
// chunk boundary.
dest = (Chunk*) poolDest;
}
}
@ -472,8 +476,8 @@ struct AssemblerBufferWithConstantPool : public AssemblerBuffer<SliceSize, Inst>
if (nopFill > 0 && !inhibitNops && !canNotPlacePool) {
inhibitNops = true;
// Fill using a branch-nop rather than a NOP so this can
// be distinguished and skipped.
// Fill using a branch-nop rather than a NOP so this can be
// distinguished and skipped.
for (int i = 0; i < nopFill; i++)
putInt(nopFillInst);
@ -497,26 +501,26 @@ struct AssemblerBufferWithConstantPool : public AssemblerBuffer<SliceSize, Inst>
spewEntry(data, p->immSize);
IonSpewFin(IonSpew_Pools);
}
// insert the pool value
// Insert the pool value.
if (inBackref)
token = insertEntryBackwards(instSize, inst, p, data);
else
token = insertEntryForwards(instSize, inst, p, data);
// now to get an instruction to write
// Now to get an instruction to write.
PoolEntry retPE;
if (p != nullptr) {
if (this->oom())
return BufferOffset();
int poolId = p - pools;
IonSpew(IonSpew_Pools, "[%d] Entry has token %d, offset ~%d", id, token, size());
Asm::insertTokenIntoTag(instSize, inst, token);
Asm::InsertTokenIntoTag(instSize, inst, token);
JS_ASSERT(poolId < (1 << poolKindBits));
JS_ASSERT(poolId >= 0);
// Figure out the offset within like-kinded pool entries
// Figure out the offset within like-kinded pool entries.
retPE = PoolEntry(entryCount[poolId], poolId);
entryCount[poolId]++;
}
// Now inst is a valid thing to insert into the instruction stream
// Now inst is a valid thing to insert into the instruction stream.
if (pe != nullptr)
*pe = retPE;
if (markAsBranch)
@ -525,7 +529,7 @@ struct AssemblerBufferWithConstantPool : public AssemblerBuffer<SliceSize, Inst>
}
uint32_t insertEntryBackwards(uint32_t instSize, uint8_t *inst, Pool *p, uint8_t *data) {
// unlike the forward case, inserting an instruction without inserting
// Unlike the forward case, inserting an instruction without inserting
// anything into a pool after a pool has been placed, we don't affect
// anything relevant, so we can skip this check entirely!
@ -535,18 +539,17 @@ struct AssemblerBufferWithConstantPool : public AssemblerBuffer<SliceSize, Inst>
// Instead, assume that we always add the maximum.
int poolOffset = footerSize;
Pool *cur, *tmp;
// NOTE: we want to process the pools from last to first.
// Since the last pool is pools[0].other, and the first pool
// is pools[numPoolKinds-1], we actually want to process this
// forwards.
// NOTE: we want to process the pools from last to first. Since the last
// pool is pools[0].other, and the first pool is pools[numPoolKinds-1],
// we actually want to process this forwards.
for (cur = pools; cur < &pools[numPoolKinds]; cur++) {
// fetch the pool for the backwards half.
// Fetch the pool for the backwards half.
tmp = cur->other;
if (p == cur)
tmp->updateLimiter(this->nextOffset());
if (tmp->checkFullBackref(poolOffset, perforation.getOffset())) {
// uh-oh, the backwards pool is full. Time to finalize it, and
// Uh-oh, the backwards pool is full. Time to finalize it, and
// switch to a new forward pool.
if (p != nullptr)
IonSpew(IonSpew_Pools, "[%d]Inserting pool entry caused a spill", id);
@ -558,8 +561,8 @@ struct AssemblerBufferWithConstantPool : public AssemblerBuffer<SliceSize, Inst>
return uint32_t(-1);
return this->insertEntryForwards(instSize, inst, p, data);
}
// when moving back to front, calculating the alignment is hard, just be
// conservative with it.
// When moving back to front, calculating the alignment is hard,
// just be conservative with it.
poolOffset += tmp->immSize * tmp->numEntries + tmp->getAlignment();
if (p == tmp) {
poolOffset += tmp->immSize;
@ -568,31 +571,34 @@ struct AssemblerBufferWithConstantPool : public AssemblerBuffer<SliceSize, Inst>
return p->numEntries + p->other->insertEntry(data, this->nextOffset(), this->LifoAlloc_);
}
// Simultaneously insert an instSized instruction into the stream,
// and an entry into the pool. There are many things that can happen.
// Simultaneously insert an instSized instruction into the stream, and an
// entry into the pool. There are many things that can happen.
// 1) the insertion goes as planned
// 2) inserting an instruction pushes a previous pool-reference out of range, forcing a dump
// 2a) there isn't a reasonable save point in the instruction stream. We need to save room for
// a guard instruction to branch over the pool.
// 2) inserting an instruction pushes a previous pool-reference out of
// range, forcing a dump
// 2a) there isn't a reasonable save point in the instruction stream. We
// need to save room for a guard instruction to branch over the pool.
int insertEntryForwards(uint32_t instSize, uint8_t *inst, Pool *p, uint8_t *data) {
// Advance the "current offset" by an inst, so everyone knows what their offset should be.
// Advance the "current offset" by an inst, so everyone knows what their
// offset should be.
uint32_t nextOffset = this->size() + instSize;
uint32_t poolOffset = nextOffset;
Pool *tmp;
// If we need a guard instruction, reserve space for that.
if (!perforatedNode)
poolOffset += guardSize;
// Also, take into account the size of the header that will be placed *after*
// the guard instruction
// Also, take into account the size of the header that will be placed
// *after* the guard instruction.
poolOffset += headerSize;
// Perform the necessary range checks.
for (tmp = pools; tmp < &pools[numPoolKinds]; tmp++) {
// The pool may wish for a particular alignment, Let's give it one.
// The pool may wish for a particular alignment. Let's give it one.
JS_ASSERT((tmp->getAlignment() & (tmp->getAlignment() - 1)) == 0);
// The pool only needs said alignment *if* there are any entries in the pool
// WARNING: the pool needs said alignment if there are going to be entries in
// the pool after this entry has been inserted
// The pool only needs said alignment *if* there are any entries in
// the pool WARNING: the pool needs said alignment if there are
// going to be entries in the pool after this entry has been
// inserted
if (p == tmp)
poolOffset = tmp->forceAlign(poolOffset);
else
@ -604,7 +610,7 @@ struct AssemblerBufferWithConstantPool : public AssemblerBuffer<SliceSize, Inst>
p->updateLimiter(BufferOffset(nextOffset));
}
if (tmp->checkFull(poolOffset)) {
// uh-oh. DUMP DUMP DUMP
// uh-oh. DUMP DUMP DUMP.
if (p != nullptr)
IonSpew(IonSpew_Pools, "[%d] Inserting pool entry caused a spill", id);
else
@ -613,7 +619,7 @@ struct AssemblerBufferWithConstantPool : public AssemblerBuffer<SliceSize, Inst>
this->dumpPool();
return this->insertEntryBackwards(instSize, inst, p, data);
}
// include the size of this pool in the running total
// Include the size of this pool in the running total.
if (p == tmp) {
nextOffset += tmp->immSize;
}
@ -628,16 +634,16 @@ struct AssemblerBufferWithConstantPool : public AssemblerBuffer<SliceSize, Inst>
return insertEntry(sizeof(uint32_t) / sizeof(uint8_t), (uint8_t*)&value,
nullptr, nullptr, nullptr, markAsBranch);
}
// Mark the current section as an area where we can
// later go to dump a pool
// Mark the current section as an area where we can later go to dump a pool.
void perforate() {
// If we're filling the backrefrences, we don't want to start looking for a new dumpsite.
// If we're filling the backrefrences, we don't want to start looking
// for a new dumpsite.
if (inBackref)
return;
if (canNotPlacePool)
return;
// If there is nothing in the pool, then it is strictly disadvantageous
// to attempt to place a pool here
// to attempt to place a pool here.
bool empty = true;
for (int i = 0; i < numPoolKinds; i++) {
if (pools[i].numEntries != 0) {
@ -653,13 +659,13 @@ struct AssemblerBufferWithConstantPool : public AssemblerBuffer<SliceSize, Inst>
IonSpew(IonSpew_Pools, "[%d] Adding a perforation at offset %d", id, perforation.getOffset());
}
// After a pool is finished, no more elements may be added to it. During this phase, we
// will know the exact offsets to the pool entries, and those values should be written into
// the given instructions.
// After a pool is finished, no more elements may be added to it. During
// this phase, we will know the exact offsets to the pool entries, and those
// values should be written into the given instructions.
PoolInfo getPoolData() const {
int prevOffset = getInfo(numDumps-1).offset;
int prevEnd = getInfo(numDumps-1).finalPos;
// calculate the offset of the start of this pool;
// Calculate the offset of the start of this pool.
int perfOffset = perforation.assigned() ?
perforation.getOffset() :
this->nextOffset().getOffset() + this->guardSize;
@ -682,7 +688,8 @@ struct AssemblerBufferWithConstantPool : public AssemblerBuffer<SliceSize, Inst>
finOffset=pools[poolIdx].align(finOffset);
finOffset+=pools[poolIdx].numEntries * pools[poolIdx].immSize;
}
// And compute the necessary adjustments for the second half of the pool.
// And compute the necessary adjustments for the second half of the
// pool.
for (int poolIdx = numPoolKinds-1; poolIdx >= 0; poolIdx--) {
finOffset=pools[poolIdx].other->align(finOffset);
finOffset+=pools[poolIdx].other->numEntries * pools[poolIdx].other->immSize;
@ -698,26 +705,28 @@ struct AssemblerBufferWithConstantPool : public AssemblerBuffer<SliceSize, Inst>
return ret;
}
void finishPool() {
// This function should only be called while the backwards half of the pool
// is being filled in. The backwards half of the pool is always in a state
// where it is sane. Everything that needs to be done here is for "sanity's sake".
// The per-buffer pools need to be reset, and we need to record the size of the pool.
// This function should only be called while the backwards half of the
// pool is being filled in. The backwards half of the pool is always in
// a state where it is sane. Everything that needs to be done here is
// for "sanity's sake". The per-buffer pools need to be reset, and we
// need to record the size of the pool.
IonSpew(IonSpew_Pools, "[%d] Finishing pool %d", id, numDumps);
JS_ASSERT(inBackref);
PoolInfo newPoolInfo = getPoolData();
if (newPoolInfo.size == 0) {
// The code below also creates a new pool, but that is not necessary, since
// the pools have not been modified at all.
// The code below also creates a new pool, but that is not
// necessary, since the pools have not been modified at all.
new (&perforation) BufferOffset();
perforatedNode = nullptr;
inBackref = false;
IonSpew(IonSpew_Pools, "[%d] Aborting because the pool is empty", id);
// Bail out early, since we don't want to even pretend these pools exist.
// Bail out early, since we don't want to even pretend these pools
// exist.
return;
}
JS_ASSERT(perforatedNode != nullptr);
if (numDumps >= (1<<logBasePoolInfo) && (numDumps & (numDumps-1)) == 0) {
// need to resize.
if (numDumps >= (1 << logBasePoolInfo) && (numDumps & (numDumps - 1)) == 0) {
// Need to resize.
PoolInfo *tmp = static_cast<PoolInfo*>(this->LifoAlloc_.alloc(sizeof(PoolInfo) * numDumps * 2));
if (tmp == nullptr) {
this->fail_oom();
@ -728,71 +737,78 @@ struct AssemblerBufferWithConstantPool : public AssemblerBuffer<SliceSize, Inst>
}
// In order to figure out how to fix up the loads for the second half of the pool
// we need to find where the bits of the pool that have been implemented end.
// In order to figure out how to fix up the loads for the second half of
// the pool we need to find where the bits of the pool that have been
// implemented end.
int poolOffset = perforation.getOffset();
int magicAlign = getInfo(numDumps-1).finalPos - getInfo(numDumps-1).offset;
poolOffset += magicAlign;
poolOffset += headerSize;
for (int poolIdx = 0; poolIdx < numPoolKinds; poolIdx++) {
poolOffset=pools[poolIdx].align(poolOffset);
poolOffset+=pools[poolIdx].numEntries * pools[poolIdx].immSize;
poolOffset = pools[poolIdx].align(poolOffset);
poolOffset += pools[poolIdx].numEntries * pools[poolIdx].immSize;
}
mozilla::Array<LoadOffsets, 1 << poolKindBits> outcasts;
mozilla::Array<uint8_t *, 1 << poolKindBits> outcastEntries;
// All of the pool loads referred to by this code are going to
// need fixing up here.
// All of the pool loads referred to by this code are going to need
// fixing up here.
int skippedBytes = 0;
for (int poolIdx = numPoolKinds-1; poolIdx >= 0; poolIdx--) {
Pool *p = pools[poolIdx].other;
JS_ASSERT(p != nullptr);
unsigned int idx = p->numEntries-1;
// Allocate space for tracking information that needs to be propagated to the next pool
// as well as space for quickly updating the pool entries in the current pool to remove
// the entries that don't actually fit. I probably should change this over to a vector
// Allocate space for tracking information that needs to be
// propagated to the next pool as well as space for quickly updating
// the pool entries in the current pool to remove the entries that
// don't actually fit. I probably should change this over to a
// vector
outcastEntries[poolIdx] = new uint8_t[p->getPoolSize()];
bool *preservedEntries = new bool[p->numEntries];
// Hacks on top of Hacks!
// the patching code takes in the address of the instruction to be patched,
// and the "address" of the element in the pool that we want to load.
// However, since the code isn't actually in an array, we need to lie about
// the address that the pool is in. Furthermore, since the offsets are
// technically from the beginning of the FORWARD reference section, we have
// to lie to ourselves about where this pool starts in order to make sure
// the distance into the pool is interpreted correctly.
// There is a more elegant way to fix this that will need to be implemented
// eventually. We will want to provide the fixup function with a method to
// convert from a 'token' into a pool offset.
// The patching code takes in the address of the instruction to be
// patched, and the "address" of the element in the pool that we
// want to load. However, since the code isn't actually in an array,
// we need to lie about the address that the pool is
// in. Furthermore, since the offsets are technically from the
// beginning of the FORWARD reference section, we have to lie to
// ourselves about where this pool starts in order to make sure the
// distance into the pool is interpreted correctly. There is a more
// elegant way to fix this that will need to be implemented
// eventually. We will want to provide the fixup function with a
// method to convert from a 'token' into a pool offset.
poolOffset = p->align(poolOffset);
int numSkips = 0;
int fakePoolOffset = poolOffset - pools[poolIdx].numEntries * pools[poolIdx].immSize;
for (BufferOffset *iter = p->loadOffsets.end()-1;
iter != p->loadOffsets.begin()-1; --iter, --idx)
for (BufferOffset *iter = p->loadOffsets.end() - 1;
iter != p->loadOffsets.begin() - 1; --iter, --idx)
{
IonSpew(IonSpew_Pools, "[%d] Linking entry %d in pool %d", id, idx+ pools[poolIdx].numEntries, poolIdx);
JS_ASSERT(iter->getOffset() >= perforation.getOffset());
// Everything here is known, we can safely do the necessary substitutions
Inst * inst = this->getInst(*iter);
// Manually compute the offset, including a possible bias.
// Also take into account the whole size of the pool that is being placed.
// Everything here is known, we can safely do the necessary
// substitutions.
Inst *inst = this->getInst(*iter);
// Manually compute the offset, including a possible bias. Also
// take into account the whole size of the pool that is being
// placed.
int codeOffset = fakePoolOffset - iter->getOffset() - newPoolInfo.size + numSkips * p->immSize - skippedBytes;
// That is, patchConstantPoolLoad wants to be handed the address of the
// pool entry that is being loaded. We need to do a non-trivial amount
// of math here, since the pool that we've made does not actually reside there
// in memory.
// That is, PatchConstantPoolLoad wants to be handed the address
// of the pool entry that is being loaded. We need to do a
// non-trivial amount of math here, since the pool that we've
// made does not actually reside there in memory.
IonSpew(IonSpew_Pools, "[%d] Fixing offset to %d", id, codeOffset - magicAlign);
if (!Asm::patchConstantPoolLoad(inst, (uint8_t*)inst + codeOffset - magicAlign)) {
// NOTE: if removing this entry happens to change the alignment of the next
// block, chances are you will have a bad time.
// ADDENDUM: this CANNOT happen on ARM, because the only elements that
// fall into this case are doubles loaded via vfp, but they will also be
// the last pool, which means it cannot affect the alignment of any other
// Sub Pools.
if (!Asm::PatchConstantPoolLoad(inst, (uint8_t*)inst + codeOffset - magicAlign)) {
// NOTE: if removing this entry happens to change the
// alignment of the next block, chances are you will have a
// bad time.
// ADDENDUM: this CANNOT happen on ARM, because the only
// elements that fall into this case are doubles loaded via
// vfp, but they will also be the last pool, which means it
// cannot affect the alignment of any other Sub Pools.
IonSpew(IonSpew_Pools, "[%d]***Offset was still out of range!***", id, codeOffset - magicAlign);
IonSpew(IonSpew_Pools, "[%d] Too complicated; bailingp", id);
this->fail_bail();
// only free up to the current offset
// Only free up to the current offset.
for (int pi = poolIdx; pi < numPoolKinds; pi++)
delete[] outcastEntries[pi];
delete[] preservedEntries;
@ -801,7 +817,8 @@ struct AssemblerBufferWithConstantPool : public AssemblerBuffer<SliceSize, Inst>
preservedEntries[idx] = true;
}
}
// remove the elements of the pool that should not be there (YAY, MEMCPY)
// Remove the elements of the pool that should not be there (YAY,
// MEMCPY).
unsigned int idxDest = 0;
// If no elements were skipped, no expensive copy is necessary.
if (numSkips != 0) {
@ -821,7 +838,7 @@ struct AssemblerBufferWithConstantPool : public AssemblerBuffer<SliceSize, Inst>
delete[] preservedEntries;
preservedEntries = nullptr;
}
// bind the current pool to the perforation point.
// Bind the current pool to the perforation point.
Pool **tmp = &perforatedNode->data;
*tmp = static_cast<Pool*>(this->LifoAlloc_.alloc(sizeof(Pool) * numPoolKinds));
if (tmp == nullptr) {
@ -830,8 +847,8 @@ struct AssemblerBufferWithConstantPool : public AssemblerBuffer<SliceSize, Inst>
delete[] outcastEntries[pi];
return;
}
// The above operations may have changed the size of pools!
// recalibrate the size of the pool.
// The above operations may have changed the size of pools! Recalibrate
// the size of the pool.
newPoolInfo = getPoolData();
poolInfo[numDumps] = newPoolInfo;
poolSize += poolInfo[numDumps].size;
@ -839,7 +856,7 @@ struct AssemblerBufferWithConstantPool : public AssemblerBuffer<SliceSize, Inst>
memcpy(*tmp, pools, sizeof(Pool) * numPoolKinds);
// reset everything to the state that it was in when we started
// Reset everything to the state that it was in when we started.
for (int poolIdx = 0; poolIdx < numPoolKinds; poolIdx++) {
if (!pools[poolIdx].reset(this->LifoAlloc_)) {
this->fail_oom();
@ -856,35 +873,38 @@ struct AssemblerBufferWithConstantPool : public AssemblerBuffer<SliceSize, Inst>
// has been allocated, it is time to populate the new forward pool with
// any entries that couldn't fit in the backwards pool.
for (int poolIdx = 0; poolIdx < numPoolKinds; poolIdx++) {
// Technically, the innermost pool will never have this issue, but it is easier
// to just handle this case.
// Since the pool entry was filled back-to-front, and in the next buffer, the elements
// should be front-to-back, this insertion also needs to proceed backwards
// Technically, the innermost pool will never have this issue, but
// it is easier to just handle this case.
// Since the pool entry was filled back-to-front, and in the next
// buffer, the elements should be front-to-back, this insertion also
// needs to proceed backwards
int idx = outcasts[poolIdx].length();
for (BufferOffset *iter = outcasts[poolIdx].end()-1;
iter != outcasts[poolIdx].begin()-1;
for (BufferOffset *iter = outcasts[poolIdx].end() - 1;
iter != outcasts[poolIdx].begin() - 1;
--iter, --idx) {
pools[poolIdx].updateLimiter(*iter);
Inst *inst = this->getInst(*iter);
Asm::insertTokenIntoTag(pools[poolIdx].instSize, (uint8_t*)inst, outcasts[poolIdx].end()-1-iter);
pools[poolIdx].insertEntry(&outcastEntries[poolIdx][idx*pools[poolIdx].immSize], *iter, this->LifoAlloc_);
Asm::InsertTokenIntoTag(pools[poolIdx].instSize, (uint8_t*)inst, outcasts[poolIdx].end() - 1 - iter);
pools[poolIdx].insertEntry(&outcastEntries[poolIdx][idx * pools[poolIdx].immSize], *iter, this->LifoAlloc_);
}
delete[] outcastEntries[poolIdx];
}
// this (*2) is not technically kosher, but I want to get this bug fixed.
// It should actually be guardSize + the size of the instruction that we're attempting
// to insert. Unfortunately that vaue is never passed in. On ARM, these instructions
// are always 4 bytes, so guardSize is legit to use.
// This (*2) is not technically kosher, but I want to get this bug
// fixed. It should actually be guardSize + the size of the instruction
// that we're attempting to insert. Unfortunately that vaue is never
// passed in. On ARM, these instructions are always 4 bytes, so
// guardSize is legit to use.
poolOffset = this->size() + guardSize * 2;
poolOffset += headerSize;
for (int poolIdx = 0; poolIdx < numPoolKinds; poolIdx++) {
// There can still be an awkward situation where the element that triggered the
// initial dump didn't fit into the pool backwards, and now, still does not fit into
// this pool. Now it is necessary to go and dump this pool (note: this is almost
// certainly being called from dumpPool()).
// There can still be an awkward situation where the element that
// triggered the initial dump didn't fit into the pool backwards,
// and now, still does not fit into this pool. Now it is necessary
// to go and dump this pool (note: this is almost certainly being
// called from dumpPool()).
poolOffset = pools[poolIdx].align(poolOffset);
if (pools[poolIdx].checkFull(poolOffset)) {
// ONCE AGAIN, UH-OH, TIME TO BAIL
// ONCE AGAIN, UH-OH, TIME TO BAIL.
dumpPool();
break;
}
@ -907,13 +927,14 @@ struct AssemblerBufferWithConstantPool : public AssemblerBuffer<SliceSize, Inst>
if (!perforation.assigned()) {
JS_ASSERT(!canNotPlacePool);
IonSpew(IonSpew_Pools, "[%d] No Perforation point selected, generating a new one", id);
// There isn't a perforation here, we need to dump the pool with a guard.
// There isn't a perforation here, we need to dump the pool with a
// guard.
BufferOffset branch = this->nextOffset();
bool shouldMarkAsBranch = this->isNextBranch();
this->markNextAsBranch();
this->putBlob(guardSize, nullptr);
BufferOffset afterPool = this->nextOffset();
Asm::writePoolGuard(branch, this->getInst(branch), afterPool);
Asm::WritePoolGuard(branch, this->getInst(branch), afterPool);
markGuard();
perforatedNode->isNatural = false;
if (shouldMarkAsBranch)
@ -929,46 +950,50 @@ struct AssemblerBufferWithConstantPool : public AssemblerBuffer<SliceSize, Inst>
for (int poolIdx = 0; poolIdx < numPoolKinds; poolIdx++) {
mozilla::DebugOnly<bool> beforePool = true;
Pool *p = &pools[poolIdx];
// Any entries that happened to be after the place we put our pool will need to be
// switched from the forward-referenced pool to the backward-refrenced pool.
// Any entries that happened to be after the place we put our pool
// will need to be switched from the forward-referenced pool to the
// backward-refrenced pool.
int idx = 0;
for (BufferOffset *iter = p->loadOffsets.begin();
iter != p->loadOffsets.end(); ++iter, ++idx)
{
if (iter->getOffset() >= perforation.getOffset()) {
IonSpew(IonSpew_Pools, "[%d] Pushing entry %d in pool %d into the backwards section.", id, idx, poolIdx);
// insert this into the rear part of the pool.
// Insert this into the rear part of the pool.
int offset = idx * p->immSize;
p->other->insertEntry(&p->poolData[offset], BufferOffset(*iter), this->LifoAlloc_);
// update the limiting entry for this pool.
// Update the limiting entry for this pool.
p->other->updateLimiter(*iter);
// Update the current pool to report fewer entries. They are now in the
// backwards section.
// Update the current pool to report fewer entries. They
// are now in the backwards section.
p->numEntries--;
beforePool = false;
} else {
JS_ASSERT(beforePool);
// align the pool offset to the alignment of this pool
// it already only aligns when the pool has data in it, but we want to not
// align when all entries will end up in the backwards half of the pool
// Align the pool offset to the alignment of this pool it
// already only aligns when the pool has data in it, but we
// want to not align when all entries will end up in the
// backwards half of the pool.
poolOffset = p->align(poolOffset);
IonSpew(IonSpew_Pools, "[%d] Entry %d in pool %d is before the pool.", id, idx, poolIdx);
// Everything here is known, we can safely do the necessary substitutions
Inst * inst = this->getInst(*iter);
// We need to manually compute the offset, including a possible bias.
// Everything here is known, we can safely do the necessary
// substitutions.
Inst *inst = this->getInst(*iter);
// We need to manually compute the offset, including a
// possible bias.
int codeOffset = poolOffset - iter->getOffset();
// That is, patchConstantPoolLoad wants to be handed the address of the
// pool entry that is being loaded. We need to do a non-trivial amount
// of math here, since the pool that we've made does not actually reside there
// in memory.
// That is, PatchConstantPoolLoad wants to be handed the
// address of the pool entry that is being loaded. We need
// to do a non-trivial amount of math here, since the pool
// that we've made does not actually reside there in memory.
IonSpew(IonSpew_Pools, "[%d] Fixing offset to %d", id, codeOffset - magicAlign);
Asm::patchConstantPoolLoad(inst, (uint8_t*)inst + codeOffset - magicAlign);
Asm::PatchConstantPoolLoad(inst, (uint8_t*)inst + codeOffset - magicAlign);
}
}
// Some number of entries have been positively identified as being
// in this section of the pool. Before processing the next pool,
// update the offset from the beginning of the buffer
// update the offset from the beginning of the buffer.
poolOffset += p->numEntries * p->immSize;
}
poolOffset = footerSize;
@ -976,8 +1001,8 @@ struct AssemblerBufferWithConstantPool : public AssemblerBuffer<SliceSize, Inst>
for (int poolIdx = numPoolKinds-1; poolIdx >= 0; poolIdx--) {
Pool *tmp = pools[poolIdx].other;
if (tmp->checkFullBackref(poolOffset, perforation.getOffset())) {
// GNAAAH. While we rotated elements into the back half, one of them filled up
// Now, dumping the back half is necessary...
// GNAAAH. While we rotated elements into the back half, one of
// them filled up. Now, dumping the back half is necessary...
finishPool();
break;
}
@ -1014,10 +1039,10 @@ struct AssemblerBufferWithConstantPool : public AssemblerBuffer<SliceSize, Inst>
}
// Can't assert anything here, since the first pool may be after the target.
}
Asm::retargetNearBranch(i, offset, false);
Asm::RetargetNearBranch(i, offset, false);
}
// Mark the next instruction as a valid guard. This means we can place a pool here.
// Mark the next instruction as a valid guard. This means we can place a pool here.
void markGuard() {
// If we are in a no pool zone then there is no point in dogearing
// this branch as a place to go back to
@ -1032,21 +1057,22 @@ struct AssemblerBufferWithConstantPool : public AssemblerBuffer<SliceSize, Inst>
void enterNoPool() {
insertNopFill();
if (!canNotPlacePool && !perforation.assigned()) {
// Embarassing mode: The Assembler requests the start of a no pool section
// and there have been no valid places that a pool could be dumped thusfar.
// If a pool were to fill up before this no-pool section ends, we need to go back
// in the stream and enter a pool guard after the fact. This is feasable, but
// for now, it is easier to just allocate a junk instruction, default it to a nop, and
// finally, if the pool *is* needed, patch the nop to apool guard.
// What the assembler requests:
// Embarassing mode: The Assembler requests the start of a no pool
// section and there have been no valid places that a pool could be
// dumped thusfar. If a pool were to fill up before this no-pool
// section ends, we need to go back in the stream and enter a pool
// guard after the fact. This is feasable, but for now, it is easier
// to just allocate a junk instruction, default it to a nop, and
// finally, if the pool *is* needed, patch the nop to apool
// guard. What the assembler requests:
// #request no-pool zone
// push pc
// blx r12
// #end no-pool zone
// however, if we would need to insert a pool, and there is no perforation point...
// so, actual generated code:
// however, if we would need to insert a pool, and there is no
// perforation point... so, actual generated code:
// b next; <= perforation point
// next:
@ -1058,7 +1084,7 @@ struct AssemblerBufferWithConstantPool : public AssemblerBuffer<SliceSize, Inst>
this->markNextAsBranch();
this->putBlob(guardSize, nullptr);
BufferOffset afterPool = this->nextOffset();
Asm::writePoolGuard(branch, this->getInst(branch), afterPool);
Asm::WritePoolGuard(branch, this->getInst(branch), afterPool);
markGuard();
if (perforatedNode != nullptr)
perforatedNode->isNatural = false;
@ -1075,9 +1101,8 @@ struct AssemblerBufferWithConstantPool : public AssemblerBuffer<SliceSize, Inst>
return &pools[idx];
}
void markNextAsBranch() {
// If the previous thing inserted was the last instruction of
// the node, then whoops, we want to mark the first instruction of
// the next node.
// If the previous thing inserted was the last instruction of the node,
// then whoops, we want to mark the first instruction of the next node.
this->ensureSpace(InstBaseSize);
JS_ASSERT(*this->getTail() != nullptr);
(*this->getTail())->markNextAsBranch();
@ -1100,8 +1125,8 @@ struct AssemblerBufferWithConstantPool : public AssemblerBuffer<SliceSize, Inst>
int cur = 0;
while(cur < numDumps && poolInfo[cur].offset <= offset)
cur++;
// poolInfo[curDumpsite] is now larger than the offset
// either this is the first one, or the previous is the last one we care about
// poolInfo[curDumpsite] is now larger than the offset either this is
// the first one, or the previous is the last one we care about.
if (cur == 0)
return 0;
return poolInfo[cur-1].finalPos - poolInfo[cur-1].offset;
@ -1126,9 +1151,9 @@ struct AssemblerBufferWithConstantPool : public AssemblerBuffer<SliceSize, Inst>
}
if (poolNum != nullptr)
*poolNum = idx;
// If this offset is contained in any finished pool, forward or backwards, p now
// points to that pool, if it is not in any pool (should be in the currently building pool)
// then p is nullptr.
// If this offset is contained in any finished pool, forward or
// backwards, p now points to that pool, if it is not in any pool
// (should be in the currently building pool) then p is nullptr.
if (p == nullptr) {
p = &pools[poolKind];
if (offset >= p->getPoolSize()) {
@ -1163,16 +1188,15 @@ struct AssemblerBufferWithConstantPool : public AssemblerBuffer<SliceSize, Inst>
Pool *poolGroup = pi->slice->data;
uint32_t start = pi->finalPos - pi->size + headerSize;
/// The order of the pools is:
// A B C C_Rev B_Rev A_Rev, so in the initial pass,
// go through the pools forwards, and in the second pass
// go through them in reverse order.
// A B C C_Rev B_Rev A_Rev, so in the initial pass, go through the pools
// forwards, and in the second pass go through them in reverse order.
for (int idx = 0; idx < numPoolKinds; idx++) {
if (&poolGroup[idx] == realPool) {
return start + offset;
}
start = poolGroup[idx].addPoolSize(start);
}
for (int idx = numPoolKinds-1; idx >= 0; idx--) {
for (int idx = numPoolKinds - 1; idx >= 0; idx--) {
if (poolGroup[idx].other == realPool) {
return start + offset;
}

Просмотреть файл

@ -985,7 +985,7 @@ Chunk::releaseArena(ArenaHeader *aheader)
JS_ASSERT(rt->gc.bytesAllocated() >= ArenaSize);
JS_ASSERT(zone->gcBytes >= ArenaSize);
if (rt->gc.isBackgroundSweeping())
zone->gcBytesAfterGC -= ArenaSize;
zone->reduceGCTriggerBytes(zone->gcHeapGrowthFactor * ArenaSize);
rt->gc.updateBytesAllocated(-ArenaSize);
zone->gcBytes -= ArenaSize;
@ -1695,8 +1695,7 @@ GCRuntime::onTooMuchMalloc()
}
size_t
GCRuntime::computeTriggerBytes(double growthFactor, size_t lastBytes,
JSGCInvocationKind gckind) const
GCRuntime::computeTriggerBytes(double growthFactor, size_t lastBytes, JSGCInvocationKind gckind)
{
size_t base = gckind == GC_SHRINK ? lastBytes : Max(lastBytes, allocThreshold);
double trigger = double(base) * growthFactor;
@ -1704,7 +1703,7 @@ GCRuntime::computeTriggerBytes(double growthFactor, size_t lastBytes,
}
double
GCRuntime::computeHeapGrowthFactor(size_t lastBytes) const
GCRuntime::computeHeapGrowthFactor(size_t lastBytes)
{
/*
* The heap growth factor depends on the heap size after a GC and the GC frequency.
@ -1746,11 +1745,22 @@ GCRuntime::computeHeapGrowthFactor(size_t lastBytes) const
void
Zone::setGCLastBytes(size_t lastBytes, JSGCInvocationKind gckind)
{
const GCRuntime &gc = runtimeFromAnyThread()->gc;
GCRuntime &gc = runtimeFromMainThread()->gc;
gcHeapGrowthFactor = gc.computeHeapGrowthFactor(lastBytes);
gcTriggerBytes = gc.computeTriggerBytes(gcHeapGrowthFactor, lastBytes, gckind);
}
void
Zone::reduceGCTriggerBytes(size_t amount)
{
JS_ASSERT(amount > 0);
JS_ASSERT(gcTriggerBytes >= amount);
GCRuntime &gc = runtimeFromAnyThread()->gc;
if (gcTriggerBytes - amount < gc.allocationThreshold() * gcHeapGrowthFactor)
return;
gcTriggerBytes -= amount;
}
Allocator::Allocator(Zone *zone)
: zone_(zone)
{}
@ -2580,15 +2590,6 @@ GCRuntime::sweepBackgroundThings(bool onBackgroundThread)
}
}
if (onBackgroundThread) {
/*
* Update zone triggers a second time now we have completely finished
* sweeping these zones.
*/
for (Zone *zone = sweepingZones; zone; zone = zone->gcNextGraphNode)
zone->setGCLastBytes(zone->gcBytesAfterGC, lastKind);
}
sweepingZones = nullptr;
}
@ -4386,7 +4387,7 @@ GCRuntime::sweepPhase(SliceBudget &sliceBudget)
}
void
GCRuntime::endSweepPhase(bool lastGC)
GCRuntime::endSweepPhase(JSGCInvocationKind gckind, bool lastGC)
{
gcstats::AutoPhase ap(stats, gcstats::PHASE_SWEEP);
FreeOp fop(rt, sweepOnBackgroundThread);
@ -4459,7 +4460,7 @@ GCRuntime::endSweepPhase(bool lastGC)
* Expire needs to unlock it for other callers.
*/
AutoLockGC lock(rt);
expireChunksAndArenas(lastKind == GC_SHRINK);
expireChunksAndArenas(gckind == GC_SHRINK);
}
}
@ -4502,10 +4503,9 @@ GCRuntime::endSweepPhase(bool lastGC)
lastGCTime + highFrequencyTimeThreshold * PRMJ_USEC_PER_MSEC > currentTime;
for (ZonesIter zone(rt, WithAtoms); !zone.done(); zone.next()) {
zone->setGCLastBytes(zone->gcBytes, lastKind);
zone->setGCLastBytes(zone->gcBytes, gckind);
if (zone->isCollecting()) {
JS_ASSERT(zone->isGCFinished());
zone->gcBytesAfterGC = zone->gcBytes;
zone->setGCState(Zone::NoGC);
}
@ -4787,8 +4787,6 @@ GCRuntime::incrementalCollectSlice(int64_t budget,
JS_ASSERT_IF(incrementalState != NO_INCREMENTAL, isIncremental);
isIncremental = budget != SliceBudget::Unlimited;
lastKind = gckind;
if (zeal == ZealIncrementalRootsThenFinish || zeal == ZealIncrementalMarkAllThenFinish) {
/*
* Yields between slices occurs at predetermined points in these modes;
@ -4876,7 +4874,7 @@ GCRuntime::incrementalCollectSlice(int64_t budget,
if (!finished)
break;
endSweepPhase(lastGC);
endSweepPhase(gckind, lastGC);
if (sweepOnBackgroundThread)
helperState.startBackgroundSweep(gckind == GC_SHRINK);

Просмотреть файл

@ -0,0 +1,17 @@
// |reftest| skip-if(!this.hasOwnProperty("SIMD"))
/*
* Any copyright is dedicated to the Public Domain.
* http://creativecommons.org/licenses/publicdomain/
*/
var BUGNUMBER = 1023145;
var summary = "Use the original getPrototypeOf in self-hosted code";
delete Object.prototype.__proto__;
var int32x4 = SIMD.int32x4;
var ar = int32x4.array(1);
var array = new ar([int32x4(1, 2, 3, 4)]);
if (typeof reportCompare === "function")
reportCompare(true, true);

Просмотреть файл

@ -12,4 +12,5 @@ b2g = true
skip = false
[test_touchcaret.py]
disabled = "Bug 1035172"
[test_selectioncarets.py]

Просмотреть файл

@ -738,7 +738,8 @@ RenderFrameParent::RenderFrameParent(nsFrameLoader* aFrameLoader,
}
}
if (gfxPlatform::UsesOffMainThreadCompositing()) {
if (gfxPlatform::UsesOffMainThreadCompositing() &&
XRE_GetProcessType() == GeckoProcessType_Default) {
// Our remote frame will push layers updates to the compositor,
// and we'll keep an indirect reference to that tree.
*aId = mLayersId = CompositorParent::AllocateLayerTreeId();

Просмотреть файл

@ -389,6 +389,13 @@ class TreeMetadataEmitter(LoggingMixin):
self._final_libs.append((sandbox['OBJDIR'], libname, final_lib))
passthru.variables['FORCE_STATIC_LIB'] = True
soname = sandbox.get('SONAME')
if soname:
if not sandbox.get('FORCE_SHARED_LIB'):
raise SandboxValidationError('SONAME applicable only for shared libraries')
else:
passthru.variables['SONAME'] = soname
# While there are multiple test manifests, the behavior is very similar
# across them. We enforce this by having common handling of all
# manifests and outputting a single class type with the differences

Просмотреть файл

@ -417,6 +417,14 @@ VARIABLES = {
``BIN_SUFFIX``, the name will remain unchanged.
""", 'binaries'),
'SONAME': (unicode, unicode,
"""The soname of the shared object currently being linked
soname is the "logical name" of a shared object, often used to provide
version backwards compatibility. This variable makes sense only for
shared objects, and is supported only on some unix platforms.
""", 'binaries'),
'HOST_SIMPLE_PROGRAMS': (StrictOrderingOnAppendList, list,
"""Compile a list of host executable names.

Просмотреть файл

@ -73,7 +73,7 @@ void genProfileEntry(/*MODIFIED*/UnwinderThreadBuffer* utb,
// Add a pseudostack-entry start label
utb__addEntry( utb, ProfileEntry('h', 'P') );
// And the SP value, if it is non-zero
if (entry.stackAddress() != 0) {
if (entry.isCpp() && entry.stackAddress() != 0) {
utb__addEntry( utb, ProfileEntry('S', entry.stackAddress()) );
}
@ -117,7 +117,9 @@ void genProfileEntry(/*MODIFIED*/UnwinderThreadBuffer* utb,
}
} else {
utb__addEntry( utb, ProfileEntry('c', sampleLabel) );
lineno = entry.line();
if (entry.isCpp()) {
lineno = entry.line();
}
}
if (lineno != -1) {
utb__addEntry( utb, ProfileEntry('n', lineno) );

Просмотреть файл

@ -68,7 +68,7 @@ this.WebappManager = {
// Perform the install if the user allows it
if (choice == 0) {
let nativeApp = new NativeApp(data.app, jsonManifest,
WebappRT.config.app.categories,
data.app.categories,
WebappRT.config.registryDir);
let localDir;
try {

Просмотреть файл

@ -37,8 +37,10 @@ function loadWebapp(manifest, parameters, onLoad) {
registerCleanupFunction(function() {
// We load DOMApplicationRegistry into a local scope to avoid appearing
// to leak it.
let scope = {};
Cu.import("resource://gre/modules/Webapps.jsm", scope);
scope.DOMApplicationRegistry.uninstall(url.spec);
let { DOMApplicationRegistry } = Cu.import("resource://gre/modules/Webapps.jsm", {});
return new Promise(function(resolve, reject) {
DOMApplicationRegistry.uninstall(url.spec, resolve, reject);
});
});
}

Просмотреть файл

@ -4,7 +4,6 @@
include $(topsrcdir)/config/rules.mk
LDFLAGS += -Wl,-soname=$(DLL_PREFIX)mozgtk$(DLL_SUFFIX)
# If LDFLAGS contains -Wl,--as-needed, we need to add -Wl,--no-as-needed
# before the gtk libraries, otherwise the linker will drop those dependencies

Просмотреть файл

@ -12,4 +12,6 @@ DEFINES['GTK3_SYMBOLS'] = True
LIBRARY_NAME = 'mozgtk2'
SONAME = 'mozgtk'
FORCE_SHARED_LIB = True

Просмотреть файл

@ -12,4 +12,6 @@ DEFINES['GTK2_SYMBOLS'] = True
LIBRARY_NAME = 'mozgtk'
SONAME = 'mozgtk'
FORCE_SHARED_LIB = True

Просмотреть файл

@ -1,7 +0,0 @@
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
include $(topsrcdir)/config/rules.mk
LDFLAGS += -Wl,-soname=$(DLL_PREFIX)mozgtk$(DLL_SUFFIX)

Просмотреть файл

@ -13,4 +13,6 @@ for var in ('COMMON_SYMBOLS', 'GTK2_SYMBOLS', 'GTK3_SYMBOLS'):
LIBRARY_NAME = 'mozgtk_stub'
SONAME = 'mozgtk'
FORCE_SHARED_LIB = True