Merge m-c to b2g-inbound. a=merge

This commit is contained in:
Ryan VanderMeulen 2014-08-13 15:53:06 -04:00
Родитель b21a0eeebf 8543ea8030
Коммит f557ddb7de
162 изменённых файлов: 4856 добавлений и 1300 удалений

Просмотреть файл

@ -1683,8 +1683,6 @@ pref("media.gmp-manager.certs.1.commonName", "aus4.mozilla.org");
pref("media.gmp-manager.certs.2.issuerName", "CN=Thawte SSL CA,O=\"Thawte, Inc.\",C=US");
pref("media.gmp-manager.certs.2.commonName", "aus4.mozilla.org");
// Delete HTTP cache v2 data of users that didn't opt-in manually
pref("browser.cache.auto_delete_cache_version", 1);
// Play with different values of the decay time and get telemetry,
// 0 means to randomize (and persist) the experiment value in users' profiles,
// -1 means no experiment is run and we use the preferred value for frecency (6h)

Просмотреть файл

@ -92,7 +92,7 @@ header {
margin: 0 auto;
height: 30px;
background-size: contain;
background-image: url("../../content/shared/img/mozilla-logo.png");
background-image: url("../shared/img/mozilla-logo.png");
background-repeat: no-repeat;
}
@ -106,7 +106,7 @@ header {
width: 100px;
height: 100px;
margin: 1rem auto;
background-image: url("../../content/shared/img/firefox-logo.png");
background-image: url("../shared/img/firefox-logo.png");
background-size: cover;
background-repeat: no-repeat;
}

Просмотреть файл

@ -1358,8 +1358,10 @@ Toolbox.prototype = {
// Destroying the walker and inspector fronts
outstanding.push(this.destroyInspector().then(() => {
// Removing buttons
this._pickerButton.removeEventListener("command", this._togglePicker, false);
this._pickerButton = null;
if (this._pickerButton) {
this._pickerButton.removeEventListener("command", this._togglePicker, false);
this._pickerButton = null;
}
}));
// Remove the host UI

Просмотреть файл

@ -277,7 +277,9 @@ Tools.jsprofiler = {
inMenu: true,
isTargetSupported: function (target) {
return !target.isAddon;
// Hide the profiler when debugging devices pre bug 1046394,
// that don't expose profiler actor in content processes.
return !target.isAddon && (!target.isApp || target.form.profilerActor);
},
build: function (frame, target) {

Просмотреть файл

@ -93,6 +93,7 @@ skip-if = true # Bug 1047124
[browser_profiler_tree-abstract-01.js]
[browser_profiler_tree-abstract-02.js]
[browser_profiler_tree-abstract-03.js]
[browser_profiler_tree-abstract-04.js]
[browser_profiler_tree-frame-node.js]
[browser_profiler_tree-model-01.js]
[browser_profiler_tree-model-02.js]

Просмотреть файл

@ -0,0 +1,68 @@
/* Any copyright is dedicated to the Public Domain.
http://creativecommons.org/publicdomain/zero/1.0/ */
/**
* Tests that the treeview expander arrow doesn't react to dblclick events.
*/
let { AbstractTreeItem } = Cu.import("resource:///modules/devtools/AbstractTreeItem.jsm", {});
let { Heritage } = Cu.import("resource:///modules/devtools/ViewHelpers.jsm", {});
let test = Task.async(function*() {
let container = document.createElement("vbox");
gBrowser.selectedBrowser.parentNode.appendChild(container);
// Populate the tree and test the root item...
let treeRoot = new MyCustomTreeItem(gDataSrc, { parent: null });
treeRoot.attachTo(container);
let originalTreeRootExpanded = treeRoot.expanded;
info("Double clicking on the root item arrow and waiting for focus event.");
let receivedFocusEvent = treeRoot.once("focus");
EventUtils.sendMouseEvent({ type: "dblclick" }, treeRoot.target.querySelector(".arrow"));
yield receivedFocusEvent;
is(treeRoot.expanded, originalTreeRootExpanded,
"A double click on the arrow was ignored.");
container.remove();
finish();
});
function MyCustomTreeItem(dataSrc, properties) {
AbstractTreeItem.call(this, properties);
this.itemDataSrc = dataSrc;
}
MyCustomTreeItem.prototype = Heritage.extend(AbstractTreeItem.prototype, {
_displaySelf: function(document, arrowNode) {
let node = document.createElement("hbox");
node.MozMarginStart = (this.level * 10) + "px";
node.appendChild(arrowNode);
node.appendChild(document.createTextNode(this.itemDataSrc.label));
return node;
},
_populateSelf: function(children) {
for (let childDataSrc of this.itemDataSrc.children) {
children.push(new MyCustomTreeItem(childDataSrc, {
parent: this,
level: this.level + 1
}));
}
}
});
let gDataSrc = {
label: "root",
children: [{
label: "foo",
children: []
}, {
label: "bar",
children: [{
label: "baz",
children: []
}]
}]
};

Просмотреть файл

@ -88,6 +88,12 @@ ProfilerConnection.prototype = {
if (this._target.chrome) {
this._profiler = this._target.form.profilerActor;
}
// Or when we are debugging content processes, we already have the tab
// specific one. Use it immediately.
else if (this._target.form && this._target.form.profilerActor) {
this._profiler = this._target.form.profilerActor;
yield this._registerEventNotifications();
}
// Check if we already have a grip to the `listTabs` response object
// and, if we do, use it to get to the profiler actor.
else if (this._target.root) {

Просмотреть файл

@ -427,7 +427,12 @@ AbstractTreeItem.prototype = {
* Handler for the "dblclick" event on the element displaying this tree item.
*/
_onDoubleClick: function(e) {
this._onArrowClick(e);
// Ignore dblclick on the arrow as it has already recived and handled two
// click events.
if (!e.target.classList.contains("arrow")) {
this._onArrowClick(e);
}
this.focus();
},

Просмотреть файл

@ -136,7 +136,7 @@
.gcli-menu-more {
font-size: 80%;
text-align: right;
text-align: end;
-moz-padding-end: 8px;
}

Просмотреть файл

@ -50,7 +50,7 @@ h1 .info {
font-weight: bold;
color: #000;
white-space: nowrap;
text-align: right;
text-align: end;
vertical-align: top;
width: 10%;
}

Просмотреть файл

@ -136,7 +136,7 @@
.gcli-menu-more {
font-size: 80%;
text-align: right;
text-align: end;
-moz-padding-end: 8px;
}

Просмотреть файл

@ -51,7 +51,7 @@ h1 .info {
padding-right: 4px;
color: #000;
white-space: nowrap;
text-align: right;
text-align: end;
vertical-align: top;
width: 10%;
}

Просмотреть файл

@ -190,7 +190,7 @@ button {
}
.permission-table-header > div:first-child {
text-align: left;
text-align: start;
padding-left: 10px;
flex-basis: 30%;
}
@ -228,7 +228,7 @@ button {
}
.permission > div:first-child {
text-align: left;
text-align: start;
padding: 3px 10px;
flex-basis: 30%;
font-weight: bold;
@ -277,7 +277,7 @@ button {
display: flex;
padding: 15px 10px;
display: block;
text-align: left;
text-align: start;
flex-grow: 1;
}

Просмотреть файл

@ -105,7 +105,7 @@
}
.devtools-autocomplete-listbox > richlistitem > .autocomplete-count {
text-align: right;
text-align: end;
}
/* Rest of the dark and light theme */

Просмотреть файл

@ -8,7 +8,7 @@
.ruleview-rule-source {
-moz-padding-start: 5px;
text-align: right;
text-align: end;
float: right;
-moz-user-select: none;
}

Просмотреть файл

@ -136,7 +136,7 @@
.gcli-menu-more {
font-size: 80%;
text-align: right;
text-align: end;
-moz-padding-end: 8px;
}

Просмотреть файл

@ -51,7 +51,7 @@ h1 .info {
padding-right: 4px;
color: #000;
white-space: nowrap;
text-align: right;
text-align: end;
vertical-align: top;
width: 10%;
}

Просмотреть файл

@ -819,7 +819,7 @@ class Automation(object):
xrePath = None, certPath = None,
debuggerInfo = None, symbolsPath = None,
timeout = -1, maxTime = None, onLaunch = None,
webapprtChrome = False, screenshotOnFail=False, testPath=None, bisectChunk=None):
detectShutdownLeaks = False, screenshotOnFail=False, testPath=None, bisectChunk=None):
"""
Run the app, log the duration it took to execute, return the status code.
Kills the app if it runs for longer than |maxTime| seconds, or outputs nothing for |timeout| seconds.

Просмотреть файл

@ -606,6 +606,8 @@ class ShutdownLeaks(object):
self._logWindow(line)
elif line[2:10] == "DOCSHELL":
self._logDocShell(line)
elif line.startswith("TEST-START | Shutdown"):
self.seenShutdown = True
elif message['action'] == 'test_start':
fileName = message['test'].replace("chrome://mochitests/content/browser/", "")
self.currentTest = {"fileName": fileName, "windows": set(), "docShells": set()}
@ -614,10 +616,11 @@ class ShutdownLeaks(object):
if self.currentTest and (self.currentTest["windows"] or self.currentTest["docShells"]):
self.tests.append(self.currentTest)
self.currentTest = None
elif message['action'] == 'suite_end':
self.seenShutdown = True
def process(self):
if not self.seenShutdown:
self.logger("TEST-UNEXPECTED-FAIL | ShutdownLeaks | process() called before end of test suite")
for test in self._parseLeakingTests():
for url, count in self._zipLeakedWindows(test["leakedWindows"]):
self.logger("TEST-UNEXPECTED-FAIL | %s | leaked %d window(s) until shutdown [url = %s]" % (test["fileName"], count, url))

Просмотреть файл

@ -51,7 +51,7 @@ gyp_vars = {
# codec enable/disables:
'include_g711': 1,
'include_opus': 1,
'include_g722': 0,
'include_g722': 1,
'include_ilbc': 0,
'include_isac': 0,
'include_pcm16b': 1,

Просмотреть файл

@ -16,6 +16,7 @@ Implement HTML5 sandbox attribute for IFRAMEs - general tests
SimpleTest.expectAssertions(0, 1);
SimpleTest.waitForExplicitFinish();
SimpleTest.requestCompleteLog();
// a postMessage handler that is used by sandboxed iframes without
// 'allow-same-origin' to communicate pass/fail back to this main page.

Просмотреть файл

@ -58,7 +58,8 @@ public:
duration.value(),
framesCopied,
buffer.forget(),
aChannels));
aChannels,
aSampleRate));
// Remove the frames we just pushed into the queue and loop if there is
// more to be done.

Просмотреть файл

@ -71,10 +71,12 @@ public:
int64_t aDuration,
uint32_t aFrames,
AudioDataValue* aData,
uint32_t aChannels)
uint32_t aChannels,
uint32_t aRate)
: MediaData(AUDIO_SAMPLES, aOffset, aTime, aDuration)
, mFrames(aFrames)
, mChannels(aChannels)
, mRate(aRate)
, mAudioData(aData)
{
MOZ_COUNT_CTOR(AudioData);
@ -92,6 +94,7 @@ public:
const uint32_t mFrames;
const uint32_t mChannels;
const uint32_t mRate;
// At least one of mAudioBuffer/mAudioData must be non-null.
// mChannels channels, each with mFrames frames
nsRefPtr<SharedBuffer> mAudioBuffer;

Просмотреть файл

@ -2802,7 +2802,8 @@ MediaDecoderStateMachine::DropAudioUpToSeekTarget(AudioData* aSample)
duration.value(),
frames,
audioData.forget(),
channels));
channels,
audio->mRate));
AudioQueue().PushFront(data.forget());
return NS_OK;

Просмотреть файл

@ -260,7 +260,7 @@ AppleMP3Reader::AudioSampleCallback(UInt32 aNumBytes,
AudioData *audio = new AudioData(mDecoder->GetResource()->Tell(),
time, duration, numFrames,
reinterpret_cast<AudioDataValue *>(decoded.forget()),
mAudioChannels);
mAudioChannels, mAudioSampleRate);
mAudioQueue.Push(audio);
mCurrentAudioFrame += numFrames;

Просмотреть файл

@ -62,10 +62,9 @@ public:
AMR_AUDIO_FRAME,
UNKNOWN // FrameType not set
};
nsresult SwapInFrameData(nsTArray<uint8_t>& aData)
void SwapInFrameData(nsTArray<uint8_t>& aData)
{
mFrameData.SwapElements(aData);
return NS_OK;
}
nsresult SwapOutFrameData(nsTArray<uint8_t>& aData)
{

Просмотреть файл

@ -143,8 +143,7 @@ OmxVideoTrackEncoder::GetEncodedTrack(EncodedFrameContainer& aData)
videoData->SetFrameType((outFlags & OMXCodecWrapper::BUFFER_SYNC_FRAME) ?
EncodedFrame::AVC_I_FRAME : EncodedFrame::AVC_P_FRAME);
}
rv = videoData->SwapInFrameData(buffer);
NS_ENSURE_SUCCESS(rv, rv);
videoData->SwapInFrameData(buffer);
videoData->SetTimeStamp(outTimeStampUs);
aData.AppendEncodedFrame(videoData);
}
@ -187,8 +186,7 @@ OmxAudioTrackEncoder::AppendEncodedFrames(EncodedFrameContainer& aContainer)
MOZ_ASSERT(false, "audio codec not supported");
}
audiodata->SetTimeStamp(outTimeUs);
rv = audiodata->SwapInFrameData(frameData);
NS_ENSURE_SUCCESS(rv, rv);
audiodata->SwapInFrameData(frameData);
aContainer.AppendEncodedFrame(audiodata);
}

Просмотреть файл

@ -173,7 +173,6 @@ VP8TrackEncoder::GetEncodedPartitions(EncodedFrameContainer& aData)
vpx_codec_iter_t iter = nullptr;
EncodedFrame::FrameType frameType = EncodedFrame::VP8_P_FRAME;
nsTArray<uint8_t> frameData;
nsresult rv;
const vpx_codec_cx_pkt_t *pkt = nullptr;
while ((pkt = vpx_codec_get_cx_data(mVPXContext, &iter)) != nullptr) {
switch (pkt->kind) {
@ -212,8 +211,7 @@ VP8TrackEncoder::GetEncodedPartitions(EncodedFrameContainer& aData)
videoData->SetDuration(
(uint64_t)FramesToUsecs(pkt->data.frame.duration, mTrackRate).value());
}
rv = videoData->SwapInFrameData(frameData);
NS_ENSURE_SUCCESS(rv, rv);
videoData->SwapInFrameData(frameData);
VP8LOG("GetEncodedPartitions TimeStamp %lld Duration %lld\n",
videoData->GetTimeStamp(), videoData->GetDuration());
VP8LOG("frameType %d\n", videoData->GetFrameType());

Просмотреть файл

@ -193,7 +193,8 @@ public:
aDuration,
uint32_t(frames.value()),
samples,
mChannelCount);
mChannelCount,
mSampleRate);
}
private:

Просмотреть файл

@ -381,6 +381,10 @@ MP4Reader::ReadMetadata(MediaInfo* aInfo,
NS_ENSURE_TRUE(mAudio.mDecoder != nullptr, NS_ERROR_FAILURE);
nsresult rv = mAudio.mDecoder->Init();
NS_ENSURE_SUCCESS(rv, rv);
// Decode one audio frame to detect potentially incorrect channels count or
// sampling rate from demuxer.
Decode(kAudio);
}
if (HasVideo()) {
@ -585,7 +589,15 @@ MP4Reader::Output(TrackType aTrack, MediaData* aSample)
switch (aTrack) {
case kAudio: {
MOZ_ASSERT(aSample->mType == MediaData::AUDIO_SAMPLES);
AudioQueue().Push(static_cast<AudioData*>(aSample));
AudioData* audioData = static_cast<AudioData*>(aSample);
AudioQueue().Push(audioData);
if (audioData->mChannels != mInfo.mAudio.mChannels ||
audioData->mRate != mInfo.mAudio.mRate) {
LOG("MP4Reader::Output change of sampling rate:%d->%d",
mInfo.mAudio.mRate, audioData->mRate);
mInfo.mAudio.mRate = audioData->mRate;
mInfo.mAudio.mChannels = audioData->mChannels;
}
break;
}
case kVideo: {

Просмотреть файл

@ -133,7 +133,10 @@ PlatformDecoderModule::Create()
#endif
#ifdef MOZ_FFMPEG
if (sFFmpegDecoderEnabled) {
return FFmpegRuntimeLinker::CreateDecoderModule();
nsAutoPtr<PlatformDecoderModule> m(FFmpegRuntimeLinker::CreateDecoderModule());
if (m) {
return m.forget();
}
}
#endif
#ifdef MOZ_APPLEMEDIA

Просмотреть файл

@ -240,7 +240,7 @@ AppleATDecoder::SampleCallback(uint32_t aNumBytes,
AudioBufferList decBuffer;
decBuffer.mNumberBuffers = 1;
decBuffer.mBuffers[0].mNumberChannels = mConfig.channel_count;
decBuffer.mBuffers[0].mNumberChannels = mOutputFormat.mChannelsPerFrame;
decBuffer.mBuffers[0].mDataByteSize = decodedSize;
decBuffer.mBuffers[0].mData = decoded.get();
@ -271,7 +271,9 @@ AppleATDecoder::SampleCallback(uint32_t aNumBytes,
break;
}
const int rate = mConfig.samples_per_second;
const int rate = mOutputFormat.mSampleRate;
const int channels = mOutputFormat.mChannelsPerFrame;
int64_t time = FramesToUsecs(mCurrentAudioFrame, rate).value();
int64_t duration = FramesToUsecs(numFrames, rate).value();
@ -281,7 +283,7 @@ AppleATDecoder::SampleCallback(uint32_t aNumBytes,
AudioData *audio = new AudioData(mSamplePosition,
time, duration, numFrames,
reinterpret_cast<AudioDataValue *>(decoded.forget()),
rate);
channels, rate);
mCallback->Output(audio);
mHaveOutput = true;
@ -299,30 +301,30 @@ AppleATDecoder::SampleCallback(uint32_t aNumBytes,
void
AppleATDecoder::SetupDecoder()
{
AudioStreamBasicDescription inputFormat, outputFormat;
AudioStreamBasicDescription inputFormat;
// Fill in the input format description from the stream.
AppleUtils::GetProperty(mStream,
kAudioFileStreamProperty_DataFormat, &inputFormat);
// Fill in the output format manually.
PodZero(&outputFormat);
outputFormat.mFormatID = kAudioFormatLinearPCM;
outputFormat.mSampleRate = inputFormat.mSampleRate;
outputFormat.mChannelsPerFrame = inputFormat.mChannelsPerFrame;
PodZero(&mOutputFormat);
mOutputFormat.mFormatID = kAudioFormatLinearPCM;
mOutputFormat.mSampleRate = inputFormat.mSampleRate;
mOutputFormat.mChannelsPerFrame = inputFormat.mChannelsPerFrame;
#if defined(MOZ_SAMPLE_TYPE_FLOAT32)
outputFormat.mBitsPerChannel = 32;
outputFormat.mFormatFlags =
mOutputFormat.mBitsPerChannel = 32;
mOutputFormat.mFormatFlags =
kLinearPCMFormatFlagIsFloat |
0;
#else
# error Unknown audio sample type
#endif
// Set up the decoder so it gives us one sample per frame
outputFormat.mFramesPerPacket = 1;
outputFormat.mBytesPerPacket = outputFormat.mBytesPerFrame
= outputFormat.mChannelsPerFrame * outputFormat.mBitsPerChannel / 8;
mOutputFormat.mFramesPerPacket = 1;
mOutputFormat.mBytesPerPacket = mOutputFormat.mBytesPerFrame
= mOutputFormat.mChannelsPerFrame * mOutputFormat.mBitsPerChannel / 8;
OSStatus rv = AudioConverterNew(&inputFormat, &outputFormat, &mConverter);
OSStatus rv = AudioConverterNew(&inputFormat, &mOutputFormat, &mConverter);
if (rv) {
LOG("Error %d constructing AudioConverter", rv);
mConverter = nullptr;

Просмотреть файл

@ -52,6 +52,7 @@ private:
uint64_t mCurrentAudioFrame;
int64_t mSamplePosition;
bool mHaveOutput;
AudioStreamBasicDescription mOutputFormat;
void SetupDecoder();
void SubmitSample(nsAutoPtr<mp4_demuxer::MP4Sample> aSample);

Просмотреть файл

@ -180,7 +180,8 @@ EMEAACDecoder::Decoded(const nsTArray<int16_t>& aPCM,
duration.value(),
numFrames,
audioData.forget(),
aChannels));
aChannels,
aRate));
#ifdef LOG_SAMPLE_DECODE
LOG("Decoded audio sample! timestamp=%lld duration=%lld currentLength=%u",

Просмотреть файл

@ -36,9 +36,6 @@ FFmpegAACDecoder<LIBAV_VER>::Init()
static AudioDataValue*
CopyAndPackAudio(AVFrame* aFrame, uint32_t aNumChannels, uint32_t aNumSamples)
{
// These are the only two valid AAC packet sizes.
NS_ASSERTION(aNumSamples == 960 || aNumSamples == 1024,
"Should have exactly one AAC audio packet.");
MOZ_ASSERT(aNumChannels <= MAX_CHANNELS);
nsAutoArrayPtr<AudioDataValue> audio(
@ -93,13 +90,14 @@ FFmpegAACDecoder<LIBAV_VER>::DecodePacket(MP4Sample* aSample)
"Only one audio packet should be received at a time.");
uint32_t numChannels = mCodecContext->channels;
uint32_t samplingRate = mCodecContext->sample_rate;
nsAutoArrayPtr<AudioDataValue> audio(
CopyAndPackAudio(mFrame, numChannels, mFrame->nb_samples));
nsAutoPtr<AudioData> data(
new AudioData(packet.pos, aSample->composition_timestamp, aSample->duration,
mFrame->nb_samples, audio.forget(), numChannels));
mFrame->nb_samples, audio.forget(), numChannels, samplingRate));
mCallback->Output(data.forget());

Просмотреть файл

@ -99,6 +99,9 @@ FFmpegRuntimeLinker::Bind(const char* aLibName, uint32_t Version)
/* static */ PlatformDecoderModule*
FFmpegRuntimeLinker::CreateDecoderModule()
{
if (!Link()) {
return nullptr;
}
PlatformDecoderModule* module = sLib->Factory();
return module;
}

Просмотреть файл

@ -115,8 +115,13 @@ GonkAudioDecoderManager::CreateAudioData(int64_t aStreamOffset, AudioData **v) {
if (!duration.isValid()) {
return NS_ERROR_UNEXPECTED;
}
*v = new AudioData(aStreamOffset, timeUs, duration.value(), frames, buffer.forget(),
mAudioChannels);
*v = new AudioData(aStreamOffset,
timeUs,
duration.value(),
frames,
buffer.forget(),
mAudioChannels,
mAudioRate);
ReleaseAudioBuffer();
return NS_OK;
}

Просмотреть файл

@ -263,7 +263,8 @@ WMFAudioMFTManager::Output(int64_t aStreamOffset,
duration,
numFrames,
audioData.forget(),
mAudioChannels);
mAudioChannels,
mAudioRate);
#ifdef LOG_SAMPLE_DECODE
LOG("Decoded audio sample! timestamp=%lld duration=%lld currentLength=%u",

Просмотреть файл

@ -434,7 +434,8 @@ nsresult OggReader::DecodeVorbis(ogg_packet* aPacket) {
duration,
frames,
buffer.forget(),
channels));
channels,
mVorbisState->mInfo.rate));
mDecodedAudioFrames += frames;
@ -550,7 +551,8 @@ nsresult OggReader::DecodeOpus(ogg_packet* aPacket) {
endTime - startTime,
frames,
buffer.forget(),
channels));
channels,
mOpusState->mRate));
mDecodedAudioFrames += frames;

Просмотреть файл

@ -243,7 +243,8 @@ bool WaveReader::DecodeAudioData()
static_cast<int64_t>(readSizeTime * USECS_PER_S),
static_cast<int32_t>(frames),
sampleBuffer.forget(),
mChannels));
mChannels,
mSampleRate));
return true;
}

Просмотреть файл

@ -613,11 +613,12 @@ bool WebMReader::DecodeAudioPacket(nestegg_packet* aPacket, int64_t aOffset)
total_frames += frames;
AudioQueue().Push(new AudioData(aOffset,
time.value(),
duration.value(),
frames,
buffer.forget(),
mChannels));
time.value(),
duration.value(),
frames,
buffer.forget(),
mChannels,
rate));
mAudioFrames += frames;
if (vorbis_synthesis_read(&mVorbisDsp, frames) != 0) {
return false;
@ -738,11 +739,12 @@ bool WebMReader::DecodeAudioPacket(nestegg_packet* aPacket, int64_t aOffset)
return false;
};
AudioQueue().Push(new AudioData(mDecoder->GetResource()->Tell(),
time.value(),
duration.value(),
frames,
buffer.forget(),
mChannels));
time.value(),
duration.value(),
frames,
buffer.forget(),
mChannels,
rate));
mAudioFrames += frames;
#else

Просмотреть файл

@ -650,7 +650,8 @@ WMFReader::DecodeAudioData()
duration,
numFrames,
pcmSamples.forget(),
mAudioChannels));
mAudioChannels,
mAudioRate));
#ifdef LOG_SAMPLE_DECODE
DECODER_LOG("Decoded audio sample! timestamp=%lld duration=%lld currentLength=%u",

Просмотреть файл

@ -203,6 +203,16 @@ URL::RevokeObjectURL(const GlobalObject& aGlobal, const nsAString& aURL)
}
}
nsIPrincipal*
URL::GetPrincipalFromURL(const GlobalObject& aGlobal, const nsAString& aURL,
ErrorResult& aRv)
{
MOZ_ASSERT(nsContentUtils::IsCallerChrome());
NS_LossyConvertUTF16toASCII asciiurl(aURL);
return nsHostObjectProtocolHandler::GetDataEntryPrincipal(asciiurl);
}
void
URL::GetHref(nsString& aHref, ErrorResult& aRv) const
{

Просмотреть файл

@ -12,6 +12,7 @@
#include "nsString.h"
class nsIDOMBlob;
class nsIPrincipal;
class nsISupports;
class nsIURI;
@ -68,6 +69,9 @@ public:
ErrorResult& aError);
static void RevokeObjectURL(const GlobalObject& aGlobal,
const nsAString& aURL);
static nsIPrincipal* GetPrincipalFromURL(const GlobalObject& aGlobal,
const nsAString& aURL,
ErrorResult& aError);
void GetHref(nsString& aHref, ErrorResult& aRv) const;

Просмотреть файл

@ -2676,9 +2676,15 @@ nsGlobalWindow::SetNewDocument(nsIDocument* aDocument,
if (!aState) {
if (reUseInnerWindow) {
if (newInnerWindow->mDoc != aDocument) {
newInnerWindow->mDoc = aDocument;
// The storage objects contain the URL of the window. We have to
// recreate them when the innerWindow is reused.
newInnerWindow->mLocalStorage = nullptr;
newInnerWindow->mSessionStorage = nullptr;
if (newInnerWindow->IsDOMBinding()) {
WindowBinding::ClearCachedDocumentValue(cx, newInnerWindow);
} else {
@ -11460,11 +11466,15 @@ nsGlobalWindow::Observe(nsISupports* aSubject, const char* aTopic,
// Clone the storage event included in the observer notification. We want
// to dispatch clones rather than the original event.
ErrorResult error;
nsRefPtr<StorageEvent> newEvent =
CloneStorageEvent(fireMozStorageChanged ?
NS_LITERAL_STRING("MozStorageChanged") :
NS_LITERAL_STRING("storage"),
event);
event, error);
if (error.Failed()) {
return error.ErrorCode();
}
newEvent->SetTrusted(true);
@ -11560,7 +11570,8 @@ nsGlobalWindow::Observe(nsISupports* aSubject, const char* aTopic,
already_AddRefed<StorageEvent>
nsGlobalWindow::CloneStorageEvent(const nsAString& aType,
const nsRefPtr<StorageEvent>& aEvent)
const nsRefPtr<StorageEvent>& aEvent,
ErrorResult& aRv)
{
MOZ_ASSERT(IsInnerWindow());
@ -11572,7 +11583,26 @@ nsGlobalWindow::CloneStorageEvent(const nsAString& aType,
aEvent->GetOldValue(dict.mOldValue);
aEvent->GetNewValue(dict.mNewValue);
aEvent->GetUrl(dict.mUrl);
dict.mStorageArea = aEvent->GetStorageArea();
nsRefPtr<DOMStorage> storageArea = aEvent->GetStorageArea();
MOZ_ASSERT(storageArea);
nsRefPtr<DOMStorage> storage;
if (storageArea->GetType() == DOMStorage::LocalStorage) {
storage = GetLocalStorage(aRv);
} else {
MOZ_ASSERT(storageArea->GetType() == DOMStorage::SessionStorage);
storage = GetSessionStorage(aRv);
}
if (aRv.Failed() || !storage) {
return nullptr;
}
MOZ_ASSERT(storage);
MOZ_ASSERT(storage->IsForkOf(storageArea));
dict.mStorageArea = storage;
nsRefPtr<StorageEvent> event = StorageEvent::Constructor(this, aType, dict);
return event.forget();

Просмотреть файл

@ -1386,7 +1386,8 @@ protected:
// Inner windows only.
already_AddRefed<mozilla::dom::StorageEvent>
CloneStorageEvent(const nsAString& aType,
const nsRefPtr<mozilla::dom::StorageEvent>& aEvent);
const nsRefPtr<mozilla::dom::StorageEvent>& aEvent,
mozilla::ErrorResult& aRv);
// Outer windows only.
nsDOMWindowList* GetWindowList();

Просмотреть файл

@ -13,6 +13,10 @@ this.checkFromJSM = function checkFromJSM(ok, is) {
var url = URL.createObjectURL(blob);
ok(url, "URL is created!");
var p = URL.getPrincipalFromURL(url);
ok(p, "Principal exists.");
ok(p instanceof Components.interfaces.nsIPrincipal, "Principal is a nsIPrincipal");
URL.revokeObjectURL(url);
ok(true, "URL is revoked");
}

Просмотреть файл

@ -24,6 +24,7 @@ skip-if = buildapp == 'mulet'
[test_bug989665.html]
[test_bug999456.html]
[test_bug1022229.html]
[test_bug1043106.html]
[test_clearTimeoutIntervalNoArg.html]
[test_consoleEmptyStack.html]
[test_constructor-assignment.html]

Просмотреть файл

@ -0,0 +1,43 @@
<!DOCTYPE HTML>
<html>
<!--
https://bugzilla.mozilla.org/show_bug.cgi?id=1043106
-->
<head>
<meta charset="utf-8">
<title>Test for Bug 1043106</title>
<script type="application/javascript" src="/tests/SimpleTest/SimpleTest.js"></script>
<link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css"/>
</head>
<body>
<a target="_blank" href="https://bugzilla.mozilla.org/show_bug.cgi?id=1043106">Mozilla Bug 1043106</a>
<iframe id="iframe"></iframe>
<script type="application/javascript">
var storage;
window.addEventListener("storage", function (event) {
ok(event.storageArea, storage, "The storageArea is correct");
runTests();
}, false);
var tests = [ { key: 'localStorage', storage: localStorage },
{ key: 'sessionStorage', storage: sessionStorage } ];
function runTests() {
if (!tests.length) {
SimpleTest.finish();
return;
}
var t = tests.shift();
storage = t.storage;
var ifr = document.getElementById("iframe");
ifr.src = "data:text/html,<script>" + t.key + ".setItem(\"a\",\"b\");</" + "script>";
}
SimpleTest.waitForExplicitFinish();
runTests();
</script>
</body>
</html>

Просмотреть файл

@ -319,5 +319,19 @@
url.hostname = "2001::1";
is(url.hostname, "localhost", "Setting bad hostname fails");
</script>
<script>
var blob = new Blob(['a']);
var url = URL.createObjectURL(blob);
ok(!('getPrincipalFromURL' in URL), "URL.getPrincipalFromURL is not exposed");
var fakeP = SpecialPowers.wrap(URL).getPrincipalFromURL("hello world!");
ok(!fakeP, "Principal doesn't exists.");
var p = SpecialPowers.wrap(URL).getPrincipalFromURL(url);
ok(p, "Principal exists.");
ok(p.URI, "Principal.URI exists.");
is(p.URI.spec, window.location.href, "Principal.URI is correct");
</script>
</body>
</html>

Просмотреть файл

@ -7,6 +7,7 @@
#include "ContentEventHandler.h"
#include "IMEContentObserver.h"
#include "mozilla/AsyncEventDispatcher.h"
#include "mozilla/AutoRestore.h"
#include "mozilla/EventStateManager.h"
#include "mozilla/IMEStateManager.h"
#include "mozilla/TextComposition.h"
@ -87,6 +88,7 @@ IMEContentObserver::IMEContentObserver()
, mIsSelectionChangeEventPending(false)
, mSelectionChangeCausedOnlyByComposition(false)
, mIsPositionChangeEventPending(false)
, mIsFlushingPendingNotifications(false)
{
#ifdef DEBUG
TestMergingTextChangeData();
@ -429,12 +431,14 @@ class TextChangeEvent : public nsRunnable
{
public:
TextChangeEvent(IMEContentObserver* aDispatcher,
const IMEContentObserver::TextChangeData& aData)
IMEContentObserver::TextChangeData& aData)
: mDispatcher(aDispatcher)
, mData(aData)
{
MOZ_ASSERT(mDispatcher);
MOZ_ASSERT(mData.mStored);
// Reset mStored because this now consumes the data.
aData.mStored = false;
}
NS_IMETHOD Run()
@ -962,27 +966,73 @@ IMEContentObserver::MaybeNotifyIMEOfPositionChange()
FlushMergeableNotifications();
}
class AsyncMergeableNotificationsFlusher : public nsRunnable
{
public:
AsyncMergeableNotificationsFlusher(IMEContentObserver* aIMEContentObserver)
: mIMEContentObserver(aIMEContentObserver)
{
MOZ_ASSERT(mIMEContentObserver);
}
NS_IMETHOD Run()
{
mIMEContentObserver->FlushMergeableNotifications();
return NS_OK;
}
private:
nsRefPtr<IMEContentObserver> mIMEContentObserver;
};
void
IMEContentObserver::FlushMergeableNotifications()
{
// If we're in handling an edit action, this method will be called later.
// If this is already detached from the widget, this doesn't need to notify
// anything.
if (mIsEditorInTransaction || !mWidget) {
return;
}
// Notifying something may cause nested call of this method. For example,
// when somebody notified one of the notifications may dispatch query content
// event. Then, it causes flushing layout which may cause another layout
// change notification.
if (mIsFlushingPendingNotifications) {
// So, if this is already called, this should do nothing.
return;
}
AutoRestore<bool> flusing(mIsFlushingPendingNotifications);
mIsFlushingPendingNotifications = true;
// NOTE: Reset each pending flag because sending notification may cause
// another change.
if (mTextChangeData.mStored) {
nsContentUtils::AddScriptRunner(new TextChangeEvent(this, mTextChangeData));
mTextChangeData.mStored = false;
}
if (mIsSelectionChangeEventPending) {
mIsSelectionChangeEventPending = false;
nsContentUtils::AddScriptRunner(
new SelectionChangeEvent(this, mSelectionChangeCausedOnlyByComposition));
mIsSelectionChangeEventPending = false;
}
if (mIsPositionChangeEventPending) {
nsContentUtils::AddScriptRunner(new PositionChangeEvent(this));
mIsPositionChangeEventPending = false;
nsContentUtils::AddScriptRunner(new PositionChangeEvent(this));
}
// If notifications may cause new change, we should notify them now.
if (mTextChangeData.mStored ||
mIsSelectionChangeEventPending ||
mIsPositionChangeEventPending) {
nsRefPtr<AsyncMergeableNotificationsFlusher> asyncFlusher =
new AsyncMergeableNotificationsFlusher(this);
NS_DispatchToCurrentThread(asyncFlusher);
}
}

Просмотреть файл

@ -38,6 +38,8 @@ class IMEContentObserver MOZ_FINAL : public nsISelectionListener
, public nsSupportsWeakReference
, public nsIEditorObserver
{
friend class AsyncMergeableNotificationsFlusher;
public:
IMEContentObserver();
@ -219,6 +221,7 @@ private:
bool mIsSelectionChangeEventPending;
bool mSelectionChangeCausedOnlyByComposition;
bool mIsPositionChangeEventPending;
bool mIsFlushingPendingNotifications;
};
} // namespace mozilla

Просмотреть файл

@ -126,6 +126,12 @@ public:
bool IsPrivate() const { return mIsPrivate; }
bool IsSessionOnly() const { return mIsSessionOnly; }
bool IsForkOf(const DOMStorage* aOther) const
{
MOZ_ASSERT(aOther);
return mCache == aOther->mCache;
}
private:
~DOMStorage();

Просмотреть файл

@ -38,3 +38,9 @@ partial interface URL {
[Throws]
static DOMString? createObjectURL(MediaSource source, optional objectURLOptions options);
};
// mozilla extensions
partial interface URL {
[Throws, ChromeOnly]
static Principal getPrincipalFromURL(DOMString blobURL);
};

Просмотреть файл

@ -893,6 +893,16 @@ URL::RevokeObjectURL(const GlobalObject& aGlobal, const nsAString& aUrl)
}
}
// static
nsIPrincipal*
URL::GetPrincipalFromURL(const GlobalObject& aGlobal, const nsAString& aUrl,
ErrorResult& aRv)
{
// This method is not implemented in workers.
aRv.Throw(NS_ERROR_NOT_IMPLEMENTED);
return nullptr;
}
void
URL::URLSearchParamsUpdated(URLSearchParams* aSearchParams)
{

Просмотреть файл

@ -13,6 +13,8 @@
#include "mozilla/dom/BindingDeclarations.h"
#include "mozilla/dom/URLSearchParams.h"
class nsIPrincipal;
namespace mozilla {
namespace dom {
struct objectURLOptions;
@ -67,6 +69,10 @@ public:
static void
RevokeObjectURL(const GlobalObject& aGlobal, const nsAString& aUrl);
static nsIPrincipal* GetPrincipalFromURL(const GlobalObject& aGlobal,
const nsAString& aURL,
ErrorResult& aError);
void GetHref(nsString& aHref, ErrorResult& aRv) const;
void SetHref(const nsAString& aHref, ErrorResult& aRv);

Просмотреть файл

@ -1,380 +0,0 @@
/*
* Copyright 2012 The Android Open Source Project
*
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
*/
#include "SkBlitRow.h"
#include "SkColorPriv.h"
#include "SkDither.h"
#include "SkMathPriv.h"
#include "SkUtils.h"
#include "SkUtilsArm.h"
// Define USE_NEON_CODE to indicate that we need to build NEON routines
#define USE_NEON_CODE (!SK_ARM_NEON_IS_NONE)
// Define USE_ARM_CODE to indicate that we need to build ARM routines
#define USE_ARM_CODE (!SK_ARM_NEON_IS_ALWAYS)
#if USE_NEON_CODE
#include "SkBlitRow_opts_arm_neon.h"
#endif
#if USE_ARM_CODE
static void S32A_D565_Opaque(uint16_t* SK_RESTRICT dst,
const SkPMColor* SK_RESTRICT src, int count,
U8CPU alpha, int /*x*/, int /*y*/) {
SkASSERT(255 == alpha);
asm volatile (
"1: \n\t"
"ldr r3, [%[src]], #4 \n\t"
"cmp r3, #0xff000000 \n\t"
"blo 2f \n\t"
"and r4, r3, #0x0000f8 \n\t"
"and r5, r3, #0x00fc00 \n\t"
"and r6, r3, #0xf80000 \n\t"
"pld [r1, #32] \n\t"
"lsl r3, r4, #8 \n\t"
"orr r3, r3, r5, lsr #5 \n\t"
"orr r3, r3, r6, lsr #19 \n\t"
"subs %[count], %[count], #1 \n\t"
"strh r3, [%[dst]], #2 \n\t"
"bne 1b \n\t"
"b 4f \n\t"
"2: \n\t"
"lsrs r7, r3, #24 \n\t"
"beq 3f \n\t"
"ldrh r4, [%[dst]] \n\t"
"rsb r7, r7, #255 \n\t"
"and r6, r4, #0x001f \n\t"
#if SK_ARM_ARCH == 6
"lsl r5, r4, #21 \n\t"
"lsr r5, r5, #26 \n\t"
#else
"ubfx r5, r4, #5, #6 \n\t"
#endif
"pld [r0, #16] \n\t"
"lsr r4, r4, #11 \n\t"
#ifdef SK_ARM_HAS_EDSP
"smulbb r6, r6, r7 \n\t"
"smulbb r5, r5, r7 \n\t"
"smulbb r4, r4, r7 \n\t"
#else
"mul r6, r6, r7 \n\t"
"mul r5, r5, r7 \n\t"
"mul r4, r4, r7 \n\t"
#endif
"uxtb r7, r3, ROR #16 \n\t"
"uxtb ip, r3, ROR #8 \n\t"
"and r3, r3, #0xff \n\t"
"add r6, r6, #16 \n\t"
"add r5, r5, #32 \n\t"
"add r4, r4, #16 \n\t"
"add r6, r6, r6, lsr #5 \n\t"
"add r5, r5, r5, lsr #6 \n\t"
"add r4, r4, r4, lsr #5 \n\t"
"add r6, r7, r6, lsr #5 \n\t"
"add r5, ip, r5, lsr #6 \n\t"
"add r4, r3, r4, lsr #5 \n\t"
"lsr r6, r6, #3 \n\t"
"and r5, r5, #0xfc \n\t"
"and r4, r4, #0xf8 \n\t"
"orr r6, r6, r5, lsl #3 \n\t"
"orr r4, r6, r4, lsl #8 \n\t"
"strh r4, [%[dst]], #2 \n\t"
"pld [r1, #32] \n\t"
"subs %[count], %[count], #1 \n\t"
"bne 1b \n\t"
"b 4f \n\t"
"3: \n\t"
"subs %[count], %[count], #1 \n\t"
"add %[dst], %[dst], #2 \n\t"
"bne 1b \n\t"
"4: \n\t"
: [dst] "+r" (dst), [src] "+r" (src), [count] "+r" (count)
:
: "memory", "cc", "r3", "r4", "r5", "r6", "r7", "ip"
);
}
static void S32A_Opaque_BlitRow32_arm(SkPMColor* SK_RESTRICT dst,
const SkPMColor* SK_RESTRICT src,
int count, U8CPU alpha) {
SkASSERT(255 == alpha);
asm volatile (
"cmp %[count], #0 \n\t" /* comparing count with 0 */
"beq 3f \n\t" /* if zero exit */
"mov ip, #0xff \n\t" /* load the 0xff mask in ip */
"orr ip, ip, ip, lsl #16 \n\t" /* convert it to 0xff00ff in ip */
"cmp %[count], #2 \n\t" /* compare count with 2 */
"blt 2f \n\t" /* if less than 2 -> single loop */
/* Double Loop */
"1: \n\t" /* <double loop> */
"ldm %[src]!, {r5,r6} \n\t" /* load the src(s) at r5-r6 */
"ldm %[dst], {r7,r8} \n\t" /* loading dst(s) into r7-r8 */
"lsr r4, r5, #24 \n\t" /* extracting the alpha from source and storing it to r4 */
/* ----------- */
"and r9, ip, r7 \n\t" /* r9 = br masked by ip */
"rsb r4, r4, #256 \n\t" /* subtracting the alpha from 256 -> r4=scale */
"and r10, ip, r7, lsr #8 \n\t" /* r10 = ag masked by ip */
"mul r9, r9, r4 \n\t" /* br = br * scale */
"mul r10, r10, r4 \n\t" /* ag = ag * scale */
"and r9, ip, r9, lsr #8 \n\t" /* lsr br by 8 and mask it */
"and r10, r10, ip, lsl #8 \n\t" /* mask ag with reverse mask */
"lsr r4, r6, #24 \n\t" /* extracting the alpha from source and storing it to r4 */
"orr r7, r9, r10 \n\t" /* br | ag*/
"add r7, r5, r7 \n\t" /* dst = src + calc dest(r7) */
"rsb r4, r4, #256 \n\t" /* subtracting the alpha from 255 -> r4=scale */
/* ----------- */
"and r9, ip, r8 \n\t" /* r9 = br masked by ip */
"and r10, ip, r8, lsr #8 \n\t" /* r10 = ag masked by ip */
"mul r9, r9, r4 \n\t" /* br = br * scale */
"sub %[count], %[count], #2 \n\t"
"mul r10, r10, r4 \n\t" /* ag = ag * scale */
"and r9, ip, r9, lsr #8 \n\t" /* lsr br by 8 and mask it */
"and r10, r10, ip, lsl #8 \n\t" /* mask ag with reverse mask */
"cmp %[count], #1 \n\t" /* comparing count with 1 */
"orr r8, r9, r10 \n\t" /* br | ag */
"add r8, r6, r8 \n\t" /* dst = src + calc dest(r8) */
/* ----------------- */
"stm %[dst]!, {r7,r8} \n\t" /* *dst = r7, increment dst by two (each times 4) */
/* ----------------- */
"bgt 1b \n\t" /* if greater than 1 -> reloop */
"blt 3f \n\t" /* if less than 1 -> exit */
/* Single Loop */
"2: \n\t" /* <single loop> */
"ldr r5, [%[src]], #4 \n\t" /* load the src pointer into r5 r5=src */
"ldr r7, [%[dst]] \n\t" /* loading dst into r7 */
"lsr r4, r5, #24 \n\t" /* extracting the alpha from source and storing it to r4 */
/* ----------- */
"and r9, ip, r7 \n\t" /* r9 = br masked by ip */
"rsb r4, r4, #256 \n\t" /* subtracting the alpha from 256 -> r4=scale */
"and r10, ip, r7, lsr #8 \n\t" /* r10 = ag masked by ip */
"mul r9, r9, r4 \n\t" /* br = br * scale */
"mul r10, r10, r4 \n\t" /* ag = ag * scale */
"and r9, ip, r9, lsr #8 \n\t" /* lsr br by 8 and mask it */
"and r10, r10, ip, lsl #8 \n\t" /* mask ag */
"orr r7, r9, r10 \n\t" /* br | ag */
"add r7, r5, r7 \n\t" /* *dst = src + calc dest(r7) */
/* ----------------- */
"str r7, [%[dst]], #4 \n\t" /* *dst = r7, increment dst by one (times 4) */
/* ----------------- */
"3: \n\t" /* <exit> */
: [dst] "+r" (dst), [src] "+r" (src), [count] "+r" (count)
:
: "cc", "r4", "r5", "r6", "r7", "r8", "r9", "r10", "ip", "memory"
);
}
/*
* ARM asm version of S32A_Blend_BlitRow32
*/
void S32A_Blend_BlitRow32_arm(SkPMColor* SK_RESTRICT dst,
const SkPMColor* SK_RESTRICT src,
int count, U8CPU alpha) {
asm volatile (
"cmp %[count], #0 \n\t" /* comparing count with 0 */
"beq 3f \n\t" /* if zero exit */
"mov r12, #0xff \n\t" /* load the 0xff mask in r12 */
"orr r12, r12, r12, lsl #16 \n\t" /* convert it to 0xff00ff in r12 */
/* src1,2_scale */
"add %[alpha], %[alpha], #1 \n\t" /* loading %[alpha]=src_scale=alpha+1 */
"cmp %[count], #2 \n\t" /* comparing count with 2 */
"blt 2f \n\t" /* if less than 2 -> single loop */
/* Double Loop */
"1: \n\t" /* <double loop> */
"ldm %[src]!, {r5, r6} \n\t" /* loading src pointers into r5 and r6 */
"ldm %[dst], {r7, r8} \n\t" /* loading dst pointers into r7 and r8 */
/* dst1_scale and dst2_scale*/
"lsr r9, r5, #24 \n\t" /* src >> 24 */
"lsr r10, r6, #24 \n\t" /* src >> 24 */
#ifdef SK_ARM_HAS_EDSP
"smulbb r9, r9, %[alpha] \n\t" /* r9 = SkMulS16 r9 with src_scale */
"smulbb r10, r10, %[alpha] \n\t" /* r10 = SkMulS16 r10 with src_scale */
#else
"mul r9, r9, %[alpha] \n\t" /* r9 = SkMulS16 r9 with src_scale */
"mul r10, r10, %[alpha] \n\t" /* r10 = SkMulS16 r10 with src_scale */
#endif
"lsr r9, r9, #8 \n\t" /* r9 >> 8 */
"lsr r10, r10, #8 \n\t" /* r10 >> 8 */
"rsb r9, r9, #256 \n\t" /* dst1_scale = r9 = 255 - r9 + 1 */
"rsb r10, r10, #256 \n\t" /* dst2_scale = r10 = 255 - r10 + 1 */
/* ---------------------- */
/* src1, src1_scale */
"and r11, r12, r5, lsr #8 \n\t" /* ag = r11 = r5 masked by r12 lsr by #8 */
"and r4, r12, r5 \n\t" /* rb = r4 = r5 masked by r12 */
"mul r11, r11, %[alpha] \n\t" /* ag = r11 times src_scale */
"mul r4, r4, %[alpha] \n\t" /* rb = r4 times src_scale */
"and r11, r11, r12, lsl #8 \n\t" /* ag masked by reverse mask (r12) */
"and r4, r12, r4, lsr #8 \n\t" /* rb masked by mask (r12) */
"orr r5, r11, r4 \n\t" /* r5 = (src1, src_scale) */
/* dst1, dst1_scale */
"and r11, r12, r7, lsr #8 \n\t" /* ag = r11 = r7 masked by r12 lsr by #8 */
"and r4, r12, r7 \n\t" /* rb = r4 = r7 masked by r12 */
"mul r11, r11, r9 \n\t" /* ag = r11 times dst_scale (r9) */
"mul r4, r4, r9 \n\t" /* rb = r4 times dst_scale (r9) */
"and r11, r11, r12, lsl #8 \n\t" /* ag masked by reverse mask (r12) */
"and r4, r12, r4, lsr #8 \n\t" /* rb masked by mask (r12) */
"orr r9, r11, r4 \n\t" /* r9 = (dst1, dst_scale) */
/* ---------------------- */
"add r9, r5, r9 \n\t" /* *dst = src plus dst both scaled */
/* ---------------------- */
/* ====================== */
/* src2, src2_scale */
"and r11, r12, r6, lsr #8 \n\t" /* ag = r11 = r6 masked by r12 lsr by #8 */
"and r4, r12, r6 \n\t" /* rb = r4 = r6 masked by r12 */
"mul r11, r11, %[alpha] \n\t" /* ag = r11 times src_scale */
"mul r4, r4, %[alpha] \n\t" /* rb = r4 times src_scale */
"and r11, r11, r12, lsl #8 \n\t" /* ag masked by reverse mask (r12) */
"and r4, r12, r4, lsr #8 \n\t" /* rb masked by mask (r12) */
"orr r6, r11, r4 \n\t" /* r6 = (src2, src_scale) */
/* dst2, dst2_scale */
"and r11, r12, r8, lsr #8 \n\t" /* ag = r11 = r8 masked by r12 lsr by #8 */
"and r4, r12, r8 \n\t" /* rb = r4 = r8 masked by r12 */
"mul r11, r11, r10 \n\t" /* ag = r11 times dst_scale (r10) */
"mul r4, r4, r10 \n\t" /* rb = r4 times dst_scale (r6) */
"and r11, r11, r12, lsl #8 \n\t" /* ag masked by reverse mask (r12) */
"and r4, r12, r4, lsr #8 \n\t" /* rb masked by mask (r12) */
"orr r10, r11, r4 \n\t" /* r10 = (dst2, dst_scale) */
"sub %[count], %[count], #2 \n\t" /* decrease count by 2 */
/* ---------------------- */
"add r10, r6, r10 \n\t" /* *dst = src plus dst both scaled */
/* ---------------------- */
"cmp %[count], #1 \n\t" /* compare count with 1 */
/* ----------------- */
"stm %[dst]!, {r9, r10} \n\t" /* copy r9 and r10 to r7 and r8 respectively */
/* ----------------- */
"bgt 1b \n\t" /* if %[count] greater than 1 reloop */
"blt 3f \n\t" /* if %[count] less than 1 exit */
/* else get into the single loop */
/* Single Loop */
"2: \n\t" /* <single loop> */
"ldr r5, [%[src]], #4 \n\t" /* loading src pointer into r5: r5=src */
"ldr r7, [%[dst]] \n\t" /* loading dst pointer into r7: r7=dst */
"lsr r6, r5, #24 \n\t" /* src >> 24 */
"and r8, r12, r5, lsr #8 \n\t" /* ag = r8 = r5 masked by r12 lsr by #8 */
#ifdef SK_ARM_HAS_EDSP
"smulbb r6, r6, %[alpha] \n\t" /* r6 = SkMulS16 with src_scale */
#else
"mul r6, r6, %[alpha] \n\t" /* r6 = SkMulS16 with src_scale */
#endif
"and r9, r12, r5 \n\t" /* rb = r9 = r5 masked by r12 */
"lsr r6, r6, #8 \n\t" /* r6 >> 8 */
"mul r8, r8, %[alpha] \n\t" /* ag = r8 times scale */
"rsb r6, r6, #256 \n\t" /* r6 = 255 - r6 + 1 */
/* src, src_scale */
"mul r9, r9, %[alpha] \n\t" /* rb = r9 times scale */
"and r8, r8, r12, lsl #8 \n\t" /* ag masked by reverse mask (r12) */
"and r9, r12, r9, lsr #8 \n\t" /* rb masked by mask (r12) */
"orr r10, r8, r9 \n\t" /* r10 = (scr, src_scale) */
/* dst, dst_scale */
"and r8, r12, r7, lsr #8 \n\t" /* ag = r8 = r7 masked by r12 lsr by #8 */
"and r9, r12, r7 \n\t" /* rb = r9 = r7 masked by r12 */
"mul r8, r8, r6 \n\t" /* ag = r8 times scale (r6) */
"mul r9, r9, r6 \n\t" /* rb = r9 times scale (r6) */
"and r8, r8, r12, lsl #8 \n\t" /* ag masked by reverse mask (r12) */
"and r9, r12, r9, lsr #8 \n\t" /* rb masked by mask (r12) */
"orr r7, r8, r9 \n\t" /* r7 = (dst, dst_scale) */
"add r10, r7, r10 \n\t" /* *dst = src plus dst both scaled */
/* ----------------- */
"str r10, [%[dst]], #4 \n\t" /* *dst = r10, postincrement dst by one (times 4) */
/* ----------------- */
"3: \n\t" /* <exit> */
: [dst] "+r" (dst), [src] "+r" (src), [count] "+r" (count), [alpha] "+r" (alpha)
:
: "cc", "r4", "r5", "r6", "r7", "r8", "r9", "r10", "r11", "r12", "memory"
);
}
///////////////////////////////////////////////////////////////////////////////
static const SkBlitRow::Proc sk_blitrow_platform_565_procs_arm[] = {
// no dither
// NOTE: For the functions below, we don't have a special version
// that assumes that each source pixel is opaque. But our S32A is
// still faster than the default, so use it.
S32A_D565_Opaque, // S32_D565_Opaque
NULL, // S32_D565_Blend
S32A_D565_Opaque, // S32A_D565_Opaque
NULL, // S32A_D565_Blend
// dither
NULL, // S32_D565_Opaque_Dither
NULL, // S32_D565_Blend_Dither
NULL, // S32A_D565_Opaque_Dither
NULL, // S32A_D565_Blend_Dither
};
static const SkBlitRow::Proc32 sk_blitrow_platform_32_procs_arm[] = {
NULL, // S32_Opaque,
NULL, // S32_Blend,
S32A_Opaque_BlitRow32_arm, // S32A_Opaque,
S32A_Blend_BlitRow32_arm // S32A_Blend
};
#endif // USE_ARM_CODE
SkBlitRow::Proc SkBlitRow::PlatformProcs565(unsigned flags) {
return SK_ARM_NEON_WRAP(sk_blitrow_platform_565_procs_arm)[flags];
}
SkBlitRow::Proc32 SkBlitRow::PlatformProcs32(unsigned flags) {
return SK_ARM_NEON_WRAP(sk_blitrow_platform_32_procs_arm)[flags];
}
///////////////////////////////////////////////////////////////////////////////
#define Color32_arm NULL
SkBlitRow::ColorProc SkBlitRow::PlatformColorProc() {
return SK_ARM_NEON_WRAP(Color32_arm);
}
SkBlitRow::ColorRectProc PlatformColorRectProcFactory() {
return NULL;
}

Просмотреть файл

@ -1,175 +0,0 @@
/*
* Copyright 2012 Google Inc.
*
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
*/
#include "SkPathOpsCubic.h"
#include "SkPathOpsLine.h"
#include "SkPathOpsQuad.h"
// Sources
// computer-aided design - volume 22 number 9 november 1990 pp 538 - 549
// online at http://cagd.cs.byu.edu/~tom/papers/bezclip.pdf
// This turns a line segment into a parameterized line, of the form
// ax + by + c = 0
// When a^2 + b^2 == 1, the line is normalized.
// The distance to the line for (x, y) is d(x,y) = ax + by + c
//
// Note that the distances below are not necessarily normalized. To get the true
// distance, it's necessary to either call normalize() after xxxEndPoints(), or
// divide the result of xxxDistance() by sqrt(normalSquared())
class SkLineParameters {
public:
bool cubicEndPoints(const SkDCubic& pts) {
int endIndex = 1;
cubicEndPoints(pts, 0, endIndex);
if (dy() != 0) {
return true;
}
if (dx() == 0) {
cubicEndPoints(pts, 0, ++endIndex);
SkASSERT(endIndex == 2);
if (dy() != 0) {
return true;
}
if (dx() == 0) {
cubicEndPoints(pts, 0, ++endIndex); // line
SkASSERT(endIndex == 3);
return false;
}
}
// FIXME: after switching to round sort, remove bumping fA
if (dx() < 0) { // only worry about y bias when breaking cw/ccw tie
return true;
}
// if cubic tangent is on x axis, look at next control point to break tie
// control point may be approximate, so it must move significantly to account for error
if (NotAlmostEqualUlps(pts[0].fY, pts[++endIndex].fY)) {
if (pts[0].fY > pts[endIndex].fY) {
fA = DBL_EPSILON; // push it from 0 to slightly negative (y() returns -a)
}
return true;
}
if (endIndex == 3) {
return true;
}
SkASSERT(endIndex == 2);
if (pts[0].fY > pts[3].fY) {
fA = DBL_EPSILON; // push it from 0 to slightly negative (y() returns -a)
}
return true;
}
void cubicEndPoints(const SkDCubic& pts, int s, int e) {
fA = pts[s].fY - pts[e].fY;
fB = pts[e].fX - pts[s].fX;
fC = pts[s].fX * pts[e].fY - pts[e].fX * pts[s].fY;
}
double cubicPart(const SkDCubic& part) {
cubicEndPoints(part);
if (part[0] == part[1] || ((const SkDLine& ) part[0]).nearRay(part[2])) {
return pointDistance(part[3]);
}
return pointDistance(part[2]);
}
void lineEndPoints(const SkDLine& pts) {
fA = pts[0].fY - pts[1].fY;
fB = pts[1].fX - pts[0].fX;
fC = pts[0].fX * pts[1].fY - pts[1].fX * pts[0].fY;
}
bool quadEndPoints(const SkDQuad& pts) {
quadEndPoints(pts, 0, 1);
if (dy() != 0) {
return true;
}
if (dx() == 0) {
quadEndPoints(pts, 0, 2);
return false;
}
if (dx() < 0) { // only worry about y bias when breaking cw/ccw tie
return true;
}
// FIXME: after switching to round sort, remove this
if (pts[0].fY > pts[2].fY) {
fA = DBL_EPSILON;
}
return true;
}
void quadEndPoints(const SkDQuad& pts, int s, int e) {
fA = pts[s].fY - pts[e].fY;
fB = pts[e].fX - pts[s].fX;
fC = pts[s].fX * pts[e].fY - pts[e].fX * pts[s].fY;
}
double quadPart(const SkDQuad& part) {
quadEndPoints(part);
return pointDistance(part[2]);
}
double normalSquared() const {
return fA * fA + fB * fB;
}
bool normalize() {
double normal = sqrt(normalSquared());
if (approximately_zero(normal)) {
fA = fB = fC = 0;
return false;
}
double reciprocal = 1 / normal;
fA *= reciprocal;
fB *= reciprocal;
fC *= reciprocal;
return true;
}
void cubicDistanceY(const SkDCubic& pts, SkDCubic& distance) const {
double oneThird = 1 / 3.0;
for (int index = 0; index < 4; ++index) {
distance[index].fX = index * oneThird;
distance[index].fY = fA * pts[index].fX + fB * pts[index].fY + fC;
}
}
void quadDistanceY(const SkDQuad& pts, SkDQuad& distance) const {
double oneHalf = 1 / 2.0;
for (int index = 0; index < 3; ++index) {
distance[index].fX = index * oneHalf;
distance[index].fY = fA * pts[index].fX + fB * pts[index].fY + fC;
}
}
double controlPtDistance(const SkDCubic& pts, int index) const {
SkASSERT(index == 1 || index == 2);
return fA * pts[index].fX + fB * pts[index].fY + fC;
}
double controlPtDistance(const SkDQuad& pts) const {
return fA * pts[1].fX + fB * pts[1].fY + fC;
}
double pointDistance(const SkDPoint& pt) const {
return fA * pt.fX + fB * pt.fY + fC;
}
double dx() const {
return fB;
}
double dy() const {
return -fA;
}
private:
double fA;
double fB;
double fC;
};

Просмотреть файл

@ -1,13 +0,0 @@
--- src/pathops/SkLineParameters.h
+++ src/pathops/SkLineParameters.h
@@ -168,8 +172,10 @@
return -a;
}
private:
double a;
double b;
double c;
};
+
+#endif

Просмотреть файл

@ -39,13 +39,8 @@
#include "jit/none/BaseMacroAssembler-none.h"
namespace JSC { typedef MacroAssemblerNone MacroAssembler; }
#elif WTF_CPU_ARM_THUMB2
#include "assembler/assembler/MacroAssemblerARMv7.h"
namespace JSC { typedef MacroAssemblerARMv7 MacroAssembler; }
#elif WTF_CPU_ARM_TRADITIONAL
#include "assembler/assembler/MacroAssemblerARM.h"
namespace JSC { typedef MacroAssemblerARM MacroAssembler; }
#elif JS_CODEGEN_ARM
// Merged with the jit backend support.
#elif WTF_CPU_MIPS
#include "assembler/assembler/MacroAssemblerMIPS.h"

Просмотреть файл

@ -1,100 +0,0 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
* vim: set ts=8 sts=4 et sw=4 tw=99:
*
* ***** BEGIN LICENSE BLOCK *****
* Copyright (C) 2009 University of Szeged
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY UNIVERSITY OF SZEGED ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL UNIVERSITY OF SZEGED OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* ***** END LICENSE BLOCK ***** */
#include "assembler/wtf/Platform.h"
#if ENABLE_ASSEMBLER && WTF_CPU_ARM_TRADITIONAL
#include "assembler/assembler/MacroAssemblerARM.h"
#if (WTF_OS_LINUX || WTF_OS_ANDROID) && !defined(JS_ARM_SIMULATOR)
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <unistd.h>
#include <elf.h>
#include <stdio.h>
// lame check for kernel version
// see bug 586550
#include <linux/version.h>
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
#include <asm/procinfo.h>
#else
#include <asm/hwcap.h>
#endif
#endif
namespace JSC {
static bool isVFPPresent()
{
#ifdef JS_ARM_SIMULATOR
return true;
#else
#if WTF_OS_LINUX
int fd = open("/proc/self/auxv", O_RDONLY);
if (fd > 0) {
Elf32_auxv_t aux;
while (read(fd, &aux, sizeof(Elf32_auxv_t))) {
if (aux.a_type == AT_HWCAP) {
close(fd);
return aux.a_un.a_val & HWCAP_VFP;
}
}
close(fd);
}
#endif
#if defined(__GNUC__) && defined(__VFP_FP__)
return true;
#endif
#ifdef WTF_OS_ANDROID
FILE *fp = fopen("/proc/cpuinfo", "r");
if (!fp)
return false;
char buf[1024];
fread(buf, sizeof(char), sizeof(buf), fp);
fclose(fp);
if (strstr(buf, "vfp"))
return true;
#endif
return false;
#endif // JS_ARM_SIMULATOR
}
const bool MacroAssemblerARM::s_isVFPPresent = isVFPPresent();
}
#endif // ENABLE(ASSEMBLER) && CPU(ARM_TRADITIONAL)

Просмотреть файл

@ -1,51 +0,0 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
* vim: set ts=8 sts=4 et sw=4 tw=99:
*
* ***** BEGIN LICENSE BLOCK *****
* Copyright (C) 2008 Apple Inc.
* Copyright (C) 2009, 2010 University of Szeged
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* ***** END LICENSE BLOCK ***** */
#ifndef assembler_assembler_MacroAssemblerARM_h
#define assembler_assembler_MacroAssemblerARM_h
#include "assembler/wtf/Platform.h"
#if ENABLE_ASSEMBLER && WTF_CPU_ARM_TRADITIONAL
namespace JSC {
class MacroAssemblerARM {
public:
static bool supportsFloatingPoint() { return s_isVFPPresent; }
static const bool s_isVFPPresent;
};
}
#endif // ENABLE(ASSEMBLER) && CPU(ARM_TRADITIONAL)
#endif /* assembler_assembler_MacroAssemblerARM_h */

Просмотреть файл

@ -1,49 +0,0 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
* vim: set ts=8 sts=4 et sw=4 tw=99:
*
* ***** BEGIN LICENSE BLOCK *****
* Copyright (C) 2009 Apple Inc. All rights reserved.
* Copyright (C) 2010 University of Szeged
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* ***** END LICENSE BLOCK ***** */
#ifndef assembler_assembler_MacroAssemblerARMv7_h
#define assembler_assembler_MacroAssemblerARMv7_h
#include "assembler/wtf/Platform.h"
#if ENABLE(ASSEMBLER)
namespace JSC {
class MacroAssemblerARMv7 {
public:
static bool supportsFloatingPoint() { return true; }
};
} // namespace JSC
#endif // ENABLE(ASSEMBLER)
#endif /* assembler_assembler_MacroAssemblerARMv7_h */

Просмотреть файл

@ -298,12 +298,16 @@ private:
OP2_UCOMISD_VsdWsd = 0x2E,
OP2_MOVMSKPD_EdVd = 0x50,
OP2_ADDSD_VsdWsd = 0x58,
OP2_ADDPS_VpsWps = 0x58,
OP2_MULSD_VsdWsd = 0x59,
OP2_MULPS_VpsWps = 0x59,
OP2_CVTSS2SD_VsdEd = 0x5A,
OP2_CVTSD2SS_VsdEd = 0x5A,
OP2_SUBSD_VsdWsd = 0x5C,
OP2_SUBPS_VpsWps = 0x5C,
OP2_MINSD_VsdWsd = 0x5D,
OP2_DIVSD_VsdWsd = 0x5E,
OP2_DIVPS_VpsWps = 0x5E,
OP2_MAXSD_VsdWsd = 0x5F,
OP2_SQRTSD_VsdWsd = 0x51,
OP2_SQRTSS_VssWss = 0x51,
@ -328,7 +332,10 @@ private:
OP2_MOVZX_GvEw = 0xB7,
OP2_XADD_EvGv = 0xC1,
OP2_PEXTRW_GdUdIb = 0xC5,
OP2_SHUFPS_VpsWpsIb = 0xC6
OP2_SHUFPS_VpsWpsIb = 0xC6,
OP2_PXORDQ_VdqWdq = 0xEF,
OP2_PSUBD_VdqWdq = 0xFA,
OP2_PADDD_VdqWdq = 0xFE
} TwoByteOpcodeID;
typedef enum {
@ -661,6 +668,124 @@ public:
m_formatter.twoByteOp(OP2_XADD_EvGv, srcdest, base, index, scale, offset);
}
void paddd_rr(XMMRegisterID src, XMMRegisterID dst)
{
spew("paddd %s, %s", nameFPReg(src), nameFPReg(dst));
m_formatter.prefix(PRE_SSE_66);
m_formatter.twoByteOp(OP2_PADDD_VdqWdq, (RegisterID)dst, (RegisterID)src);
}
void paddd_mr(int offset, RegisterID base, XMMRegisterID dst)
{
spew("paddd %s0x%x(%s), %s",
PRETTY_PRINT_OFFSET(offset), nameIReg(base), nameFPReg(dst));
m_formatter.prefix(PRE_SSE_66);
m_formatter.twoByteOp(OP2_PADDD_VdqWdq, (RegisterID)dst, base, offset);
}
void paddd_mr(const void* address, XMMRegisterID dst)
{
spew("paddd %p, %s",
address, nameFPReg(dst));
m_formatter.prefix(PRE_SSE_66);
m_formatter.twoByteOp(OP2_PADDD_VdqWdq, (RegisterID)dst, address);
}
void psubd_rr(XMMRegisterID src, XMMRegisterID dst)
{
spew("psubd %s, %s", nameFPReg(src), nameFPReg(dst));
m_formatter.prefix(PRE_SSE_66);
m_formatter.twoByteOp(OP2_PSUBD_VdqWdq, (RegisterID)dst, (RegisterID)src);
}
void psubd_mr(int offset, RegisterID base, XMMRegisterID dst)
{
spew("psubd %s0x%x(%s), %s",
PRETTY_PRINT_OFFSET(offset), nameIReg(base), nameFPReg(dst));
m_formatter.prefix(PRE_SSE_66);
m_formatter.twoByteOp(OP2_PSUBD_VdqWdq, (RegisterID)dst, base, offset);
}
void psubd_mr(const void* address, XMMRegisterID dst)
{
spew("psubd %p, %s",
address, nameFPReg(dst));
m_formatter.prefix(PRE_SSE_66);
m_formatter.twoByteOp(OP2_PSUBD_VdqWdq, (RegisterID)dst, address);
}
void addps_rr(XMMRegisterID src, XMMRegisterID dst)
{
spew("addps %s, %s",
nameFPReg(src), nameFPReg(dst));
m_formatter.twoByteOp(OP2_ADDPS_VpsWps, (RegisterID)dst, (RegisterID)src);
}
void addps_mr(int offset, RegisterID base, XMMRegisterID dst)
{
spew("addps %s0x%x(%s), %s",
PRETTY_PRINT_OFFSET(offset), nameIReg(base), nameFPReg(dst));
m_formatter.twoByteOp(OP2_ADDPS_VpsWps, (RegisterID)dst, base, offset);
}
void addps_mr(const void* address, XMMRegisterID dst)
{
spew("addps %p, %s",
address, nameFPReg(dst));
m_formatter.twoByteOp(OP2_ADDPS_VpsWps, (RegisterID)dst, address);
}
void subps_rr(XMMRegisterID src, XMMRegisterID dst)
{
spew("subps %s, %s",
nameFPReg(src), nameFPReg(dst));
m_formatter.twoByteOp(OP2_SUBPS_VpsWps, (RegisterID)dst, (RegisterID)src);
}
void subps_mr(int offset, RegisterID base, XMMRegisterID dst)
{
spew("subps %s0x%x(%s), %s",
PRETTY_PRINT_OFFSET(offset), nameIReg(base), nameFPReg(dst));
m_formatter.twoByteOp(OP2_SUBPS_VpsWps, (RegisterID)dst, base, offset);
}
void subps_mr(const void* address, XMMRegisterID dst)
{
spew("subps %p, %s",
address, nameFPReg(dst));
m_formatter.twoByteOp(OP2_SUBPS_VpsWps, (RegisterID)dst, address);
}
void mulps_rr(XMMRegisterID src, XMMRegisterID dst)
{
spew("mulps %s, %s",
nameFPReg(src), nameFPReg(dst));
m_formatter.twoByteOp(OP2_MULPS_VpsWps, (RegisterID)dst, (RegisterID)src);
}
void mulps_mr(int offset, RegisterID base, XMMRegisterID dst)
{
spew("mulps %s0x%x(%s), %s",
PRETTY_PRINT_OFFSET(offset), nameIReg(base), nameFPReg(dst));
m_formatter.twoByteOp(OP2_MULPS_VpsWps, (RegisterID)dst, base, offset);
}
void mulps_mr(const void* address, XMMRegisterID dst)
{
spew("mulps %p, %s",
address, nameFPReg(dst));
m_formatter.twoByteOp(OP2_MULPS_VpsWps, (RegisterID)dst, address);
}
void divps_rr(XMMRegisterID src, XMMRegisterID dst)
{
spew("divps %s, %s",
nameFPReg(src), nameFPReg(dst));
m_formatter.twoByteOp(OP2_DIVPS_VpsWps, (RegisterID)dst, (RegisterID)src);
}
void divps_mr(int offset, RegisterID base, XMMRegisterID dst)
{
spew("divps %s0x%x(%s), %s",
PRETTY_PRINT_OFFSET(offset), nameIReg(base), nameFPReg(dst));
m_formatter.twoByteOp(OP2_DIVPS_VpsWps, (RegisterID)dst, base, offset);
}
void divps_mr(const void* address, XMMRegisterID dst)
{
spew("divps %p, %s",
address, nameFPReg(dst));
m_formatter.twoByteOp(OP2_DIVPS_VpsWps, (RegisterID)dst, address);
}
void andl_rr(RegisterID src, RegisterID dst)
{
spew("andl %s, %s",
@ -2590,6 +2715,14 @@ public:
m_formatter.twoByteOp(OP2_MOVD_VdEd, (RegisterID)dst, src);
}
void pxor_rr(XMMRegisterID src, XMMRegisterID dst)
{
spew("pxor %s, %s",
nameFPReg(src), nameFPReg(dst));
m_formatter.prefix(PRE_SSE_66);
m_formatter.twoByteOp(OP2_PXORDQ_VdqWdq, (RegisterID)dst, (RegisterID)src);
}
void pshufd_irr(uint32_t mask, XMMRegisterID src, XMMRegisterID dst)
{
JS_ASSERT(mask < 256);
@ -2932,6 +3065,40 @@ public:
m_formatter.twoByteOp(OP2_MOVAPD_VsdWsd, (RegisterID)dst, (RegisterID)src);
}
#ifdef WTF_CPU_X86_64
JmpSrc movaps_ripr(XMMRegisterID dst)
{
spew("movaps ?(%%rip), %s",
nameFPReg(dst));
m_formatter.twoByteRipOp(OP2_MOVAPS_VsdWsd, (RegisterID)dst, 0);
return JmpSrc(m_formatter.size());
}
JmpSrc movdqa_ripr(XMMRegisterID dst)
{
spew("movdqa ?(%%rip), %s",
nameFPReg(dst));
m_formatter.prefix(PRE_SSE_66);
m_formatter.twoByteRipOp(OP2_MOVDQ_VdqWdq, (RegisterID)dst, 0);
return JmpSrc(m_formatter.size());
}
#else
void movaps_mr(const void* address, XMMRegisterID dst)
{
spew("movaps %p, %s",
address, nameFPReg(dst));
m_formatter.twoByteOp(OP2_MOVAPS_VsdWsd, (RegisterID)dst, address);
}
void movdqa_mr(const void* address, XMMRegisterID dst)
{
spew("movdqa %p, %s",
address, nameFPReg(dst));
m_formatter.prefix(PRE_SSE_66);
m_formatter.twoByteOp(OP2_MOVDQ_VdqWdq, (RegisterID)dst, address);
}
#endif // WTF_CPU_X86_64
void movdqu_rm(XMMRegisterID src, int offset, RegisterID base)
{
spew("movdqu %s, %s0x%x(%s)",
@ -3346,6 +3513,19 @@ public:
m_formatter.floatConstant(f);
}
void int32x4Constant(const int32_t s[4])
{
spew(".int32x4 (%d %d %d %d)", s[0], s[1], s[2], s[3]);
MOZ_ASSERT(m_formatter.isAligned(16));
m_formatter.int32x4Constant(s);
}
void float32x4Constant(const float f[4])
{
spew(".float32x4 (%f %f %f %f)", f[0], f[1], f[2], f[3]);
MOZ_ASSERT(m_formatter.isAligned(16));
m_formatter.float32x4Constant(f);
}
void int64Constant(int64_t i)
{
spew(".quad %lld", (long long)i);
@ -4030,12 +4210,30 @@ private:
m_buffer.putIntUnchecked(u.u32);
}
void int32x4Constant(const int32_t s[4])
{
for (size_t i = 0; i < 4; ++i)
int32Constant(s[i]);
}
void float32x4Constant(const float s[4])
{
for (size_t i = 0; i < 4; ++i)
floatConstant(s[i]);
}
void int64Constant(int64_t i)
{
m_buffer.ensureSpace(sizeof(int64_t));
m_buffer.putInt64Unchecked(i);
}
void int32Constant(int32_t i)
{
m_buffer.ensureSpace(sizeof(int32_t));
m_buffer.putIntUnchecked(i);
}
// Administrative methods:
size_t size() const { return m_buffer.size(); }

Просмотреть файл

@ -417,7 +417,7 @@ TypedObjectMemory(HandleValue v)
template<typename V>
JSObject *
js::Create(JSContext *cx, typename V::Elem *data)
js::CreateSimd(JSContext *cx, typename V::Elem *data)
{
typedef typename V::Elem Elem;
Rooted<TypeDescr*> typeDescr(cx, &V::GetTypeDescr(*cx->global()));
@ -433,8 +433,8 @@ js::Create(JSContext *cx, typename V::Elem *data)
return result;
}
template JSObject *js::Create<Float32x4>(JSContext *cx, Float32x4::Elem *data);
template JSObject *js::Create<Int32x4>(JSContext *cx, Int32x4::Elem *data);
template JSObject *js::CreateSimd<Float32x4>(JSContext *cx, Float32x4::Elem *data);
template JSObject *js::CreateSimd<Int32x4>(JSContext *cx, Int32x4::Elem *data);
namespace js {
template<typename T>
@ -608,7 +608,7 @@ CoercedFunc(JSContext *cx, unsigned argc, Value *vp)
}
RetElem *coercedResult = reinterpret_cast<RetElem *>(result);
RootedObject obj(cx, Create<Out>(cx, coercedResult));
RootedObject obj(cx, CreateSimd<Out>(cx, coercedResult));
if (!obj)
return false;
@ -653,7 +653,7 @@ FuncWith(JSContext *cx, unsigned argc, Value *vp)
result[i] = OpWith<Elem>::apply(i, withAsBool, val[i]);
}
RootedObject obj(cx, Create<V>(cx, result));
RootedObject obj(cx, CreateSimd<V>(cx, result));
if (!obj)
return false;
@ -712,7 +712,7 @@ FuncShuffle(JSContext *cx, unsigned argc, Value *vp)
result[i] = val2[(maskArg >> (i * SELECT_SHIFT)) & SELECT_MASK];
}
RootedObject obj(cx, Create<V>(cx, result));
RootedObject obj(cx, CreateSimd<V>(cx, result));
if (!obj)
return false;
@ -740,7 +740,7 @@ Int32x4BinaryScalar(JSContext *cx, unsigned argc, Value *vp)
for (unsigned i = 0; i < 4; i++)
result[i] = Op::apply(val[i], bits);
RootedObject obj(cx, Create<Int32x4>(cx, result));
RootedObject obj(cx, CreateSimd<Int32x4>(cx, result));
if (!obj)
return false;
@ -764,7 +764,7 @@ FuncConvert(JSContext *cx, unsigned argc, Value *vp)
for (unsigned i = 0; i < Vret::lanes; i++)
result[i] = RetElem(val[i]);
RootedObject obj(cx, Create<Vret>(cx, result));
RootedObject obj(cx, CreateSimd<Vret>(cx, result));
if (!obj)
return false;
@ -783,7 +783,7 @@ FuncConvertBits(JSContext *cx, unsigned argc, Value *vp)
return ErrorBadArgs(cx);
RetElem *val = TypedObjectMemory<RetElem *>(args[0]);
RootedObject obj(cx, Create<Vret>(cx, val));
RootedObject obj(cx, CreateSimd<Vret>(cx, val));
if (!obj)
return false;
@ -805,7 +805,7 @@ FuncZero(JSContext *cx, unsigned argc, Value *vp)
for (unsigned i = 0; i < Vret::lanes; i++)
result[i] = RetElem(0);
RootedObject obj(cx, Create<Vret>(cx, result));
RootedObject obj(cx, CreateSimd<Vret>(cx, result));
if (!obj)
return false;
@ -831,7 +831,7 @@ FuncSplat(JSContext *cx, unsigned argc, Value *vp)
for (unsigned i = 0; i < Vret::lanes; i++)
result[i] = arg;
RootedObject obj(cx, Create<Vret>(cx, result));
RootedObject obj(cx, CreateSimd<Vret>(cx, result));
if (!obj)
return false;
@ -854,7 +854,7 @@ Int32x4Bool(JSContext *cx, unsigned argc, Value *vp)
for (unsigned i = 0; i < Int32x4::lanes; i++)
result[i] = args[i].toBoolean() ? 0xFFFFFFFF : 0x0;
RootedObject obj(cx, Create<Int32x4>(cx, result));
RootedObject obj(cx, CreateSimd<Int32x4>(cx, result));
if (!obj)
return false;
@ -882,7 +882,7 @@ Float32x4Clamp(JSContext *cx, unsigned argc, Value *vp)
result[i] = result[i] > upperLimit[i] ? upperLimit[i] : result[i];
}
RootedObject obj(cx, Create<Float32x4>(cx, result));
RootedObject obj(cx, CreateSimd<Float32x4>(cx, result));
if (!obj)
return false;
@ -917,7 +917,7 @@ Int32x4Select(JSContext *cx, unsigned argc, Value *vp)
orInt[i] = Or<int32_t>::apply(tr[i], fr[i]);
float *result = reinterpret_cast<float *>(orInt);
RootedObject obj(cx, Create<Float32x4>(cx, result));
RootedObject obj(cx, CreateSimd<Float32x4>(cx, result));
if (!obj)
return false;

Просмотреть файл

@ -164,7 +164,7 @@ struct Int32x4 {
};
template<typename V>
JSObject *Create(JSContext *cx, typename V::Elem *data);
JSObject *CreateSimd(JSContext *cx, typename V::Elem *data);
#define DECLARE_SIMD_FLOAT32X4_FUNCTION(Name, Func, Operands, Flags, MIRId) \
extern bool \

Просмотреть файл

@ -274,7 +274,8 @@ class GCRuntime
bool triggerGC(JS::gcreason::Reason reason);
bool triggerZoneGC(Zone *zone, JS::gcreason::Reason reason);
void maybeGC(Zone *zone);
bool maybeGC(Zone *zone);
void maybePeriodicFullGC();
void minorGC(JS::gcreason::Reason reason);
void minorGC(JSContext *cx, JS::gcreason::Reason reason);
void gcIfNeeded(JSContext *cx);
@ -487,7 +488,7 @@ class GCRuntime
void getNextZoneGroup();
void endMarkingZoneGroup();
void beginSweepingZoneGroup();
bool releaseObservedTypes();
bool shouldReleaseObservedTypes();
void endSweepingZoneGroup();
bool sweepPhase(SliceBudget &sliceBudget);
void endSweepPhase(JSGCInvocationKind gckind, bool lastGC);
@ -567,7 +568,6 @@ class GCRuntime
bool chunkAllocationSinceLastGC;
int64_t nextFullGCTime;
int64_t lastGCTime;
int64_t jitReleaseTime;
JSGCMode mode;
@ -589,6 +589,12 @@ class GCRuntime
*/
volatile uintptr_t isNeeded;
/* Incremented at the start of every major GC. */
uint64_t majorGCNumber;
/* The major GC number at which to release observed type information. */
uint64_t jitReleaseNumber;
/* Incremented on every GC slice. */
uint64_t number;
@ -624,6 +630,9 @@ class GCRuntime
/* Whether any sweeping will take place in the separate GC helper thread. */
bool sweepOnBackgroundThread;
/* Whether observed type information is being released in the current GC. */
bool releaseObservedTypes;
/* Whether any black->gray edges were found during marking. */
bool foundBlackGrayEdges;

Просмотреть файл

@ -0,0 +1,83 @@
setJitCompilerOption("baseline.usecount.trigger", 10);
setJitCompilerOption("ion.usecount.trigger", 20);
function join_check() {
var lengthWasCalled = false;
var obj = {"0": "", "1": ""};
Object.defineProperty(obj, "length", {
get : function(){ lengthWasCalled = true; return 2; },
enumerable : true,
configurable : true
});
var res = Array.prototype.join.call(obj, { toString: function () {
if (lengthWasCalled)
return "good";
else
return "bad";
}})
assertEq(res, "good");
}
function split(i) {
var x = (i + "->" + i).split("->");
assertEq(x[0], "" + i);
return i;
}
function join(i) {
var x = [i, i].join("->");
assertEq(x, i + "->" + i);
return i;
}
function split_join(i) {
var x = (i + "-" + i).split("-").join("->");
assertEq(x, i + "->" + i);
return i;
}
function split_join_2(i) {
var x = (i + "-" + i).split("-");
x.push("" + i);
var res = x.join("->");
assertEq(res, i + "->" + i + "->" + i);
return i;
}
function resumeHere() { bailout(); }
function split_join_3(i) {
var x = (i + "-" + i).split("-");
resumeHere();
var res = x.join("->");
assertEq(res, i + "->" + i);
return i;
}
function trip(i) {
if (i == 99)
assertEq(myjoin.arguments[1][0], "" + i)
}
function myjoin(i, x) {
trip(i);
return x.join("->");
}
function split_join_4(i) {
var x = (i + "-" + i).split("-");
var res = myjoin(i, x);
assertEq(res, i + "->" + i);
return i;
}
for (var i = 0; i < 100; ++i) {
join_check(i);
split(i);
join(i);
split_join(i);
split_join_2(i);
split_join_3(i);
split_join_4(i);
}

Просмотреть файл

@ -13,6 +13,7 @@
#include "jit/IonAnalysis.h"
#include "jit/IonLinker.h"
#include "jit/IonSpewer.h"
#include "jit/JitcodeMap.h"
#ifdef JS_ION_PERF
# include "jit/PerfSpewer.h"
#endif
@ -244,6 +245,21 @@ BaselineCompiler::compile()
if (script->compartment()->debugMode())
baselineScript->setDebugMode();
// Register a native => bytecode mapping entry for this script if needed.
if (cx->runtime()->jitRuntime()->isNativeToBytecodeMapEnabled(cx->runtime())) {
IonSpew(IonSpew_Profiling, "Added JitcodeGlobalEntry for baseline script %s:%d (%p)",
script->filename(), script->lineno(), baselineScript);
JitcodeGlobalEntry::BaselineEntry entry;
entry.init(code->raw(), code->raw() + code->instructionsSize(), script);
JitcodeGlobalTable *globalTable = cx->runtime()->jitRuntime()->getJitcodeGlobalTable();
if (!globalTable->addEntry(entry))
return Method_Error;
// Mark the jitcode as having a bytecode map.
code->setHasBytecodeMap();
}
script->setBaselineScript(cx, baselineScript);
return Method_Compiled;

Просмотреть файл

@ -9,6 +9,8 @@
#include "mozilla/DebugOnly.h"
#include "jit/IonLinker.h"
#include "jit/JitcodeMap.h"
#include "jit/PerfSpewer.h"
#include "jit/IonFrames-inl.h"
@ -659,6 +661,11 @@ jit::RecompileOnStackBaselineScriptsForDebugMode(JSContext *cx, JSCompartment *c
MinorGC(cx->runtime(), JS::gcreason::EVICT_NURSERY);
#endif
// When the profiler is enabled, we need to suppress sampling from here until
// the end of the function, since the basline jit scripts are in a state of
// flux.
AutoSuppressProfilerSampling suppressProfilerSampling(cx);
// Try to recompile all the scripts. If we encounter an error, we need to
// roll back as if none of the compilations happened, so that we don't
// crash.

Просмотреть файл

@ -446,6 +446,7 @@ BaselineScript::Destroy(FreeOp *fop, BaselineScript *script)
*/
JS_ASSERT(fop->runtime()->gc.nursery.isEmpty());
#endif
fop->delete_(script);
}
@ -690,6 +691,27 @@ BaselineScript::nativeCodeForPC(JSScript *script, jsbytecode *pc, PCMappingSlotI
jsbytecode *
BaselineScript::pcForReturnOffset(JSScript *script, uint32_t nativeOffset)
{
return pcForNativeOffset(script, nativeOffset, true);
}
jsbytecode *
BaselineScript::pcForReturnAddress(JSScript *script, uint8_t *nativeAddress)
{
JS_ASSERT(script->baselineScript() == this);
JS_ASSERT(nativeAddress >= method_->raw());
JS_ASSERT(nativeAddress < method_->raw() + method_->instructionsSize());
return pcForReturnOffset(script, uint32_t(nativeAddress - method_->raw()));
}
jsbytecode *
BaselineScript::pcForNativeOffset(JSScript *script, uint32_t nativeOffset)
{
return pcForNativeOffset(script, nativeOffset, false);
}
jsbytecode *
BaselineScript::pcForNativeOffset(JSScript *script, uint32_t nativeOffset, bool isReturn)
{
JS_ASSERT(script->baselineScript() == this);
JS_ASSERT(nativeOffset < method_->instructionsSize());
@ -707,14 +729,19 @@ BaselineScript::pcForReturnOffset(JSScript *script, uint32_t nativeOffset)
i--;
PCMappingIndexEntry &entry = pcMappingIndexEntry(i);
JS_ASSERT(nativeOffset >= entry.nativeOffset);
JS_ASSERT_IF(isReturn, nativeOffset >= entry.nativeOffset);
CompactBufferReader reader(pcMappingReader(i));
jsbytecode *curPC = script->offsetToPC(entry.pcOffset);
uint32_t curNativeOffset = entry.nativeOffset;
JS_ASSERT(script->containsPC(curPC));
JS_ASSERT(curNativeOffset <= nativeOffset);
JS_ASSERT_IF(isReturn, nativeOffset >= curNativeOffset);
// In the raw native-lookup case, the native code address can occur
// before the start of ops. Associate those with bytecode offset 0.
if (!isReturn && (curNativeOffset > nativeOffset))
return script->code();
while (true) {
// If the high bit is set, the native offset relative to the
@ -723,22 +750,28 @@ BaselineScript::pcForReturnOffset(JSScript *script, uint32_t nativeOffset)
if (b & 0x80)
curNativeOffset += reader.readUnsigned();
if (curNativeOffset == nativeOffset)
if (isReturn ? (nativeOffset == curNativeOffset) : (nativeOffset <= curNativeOffset))
return curPC;
// If this is a raw native lookup (not jsop return addresses), then
// the native address may lie in-between the last delta-entry in
// a pcMappingIndexEntry, and the next pcMappingIndexEntry.
if (!isReturn && !reader.more())
return curPC;
curPC += GetBytecodeLength(curPC);
}
MOZ_ASSUME_UNREACHABLE("Invalid pc");
MOZ_ASSUME_UNREACHABLE("Bad baseline jitcode address");
}
jsbytecode *
BaselineScript::pcForReturnAddress(JSScript *script, uint8_t *nativeAddress)
BaselineScript::pcForNativeAddress(JSScript *script, uint8_t *nativeAddress)
{
JS_ASSERT(script->baselineScript() == this);
JS_ASSERT(nativeAddress >= method_->raw());
JS_ASSERT(nativeAddress < method_->raw() + method_->instructionsSize());
return pcForReturnOffset(script, uint32_t(nativeAddress - method_->raw()));
return pcForNativeOffset(script, uint32_t(nativeAddress - method_->raw()));
}
void

Просмотреть файл

@ -315,9 +315,17 @@ struct BaselineScript
void copyPCMappingEntries(const CompactBufferWriter &entries);
uint8_t *nativeCodeForPC(JSScript *script, jsbytecode *pc, PCMappingSlotInfo *slotInfo = nullptr);
jsbytecode *pcForReturnOffset(JSScript *script, uint32_t nativeOffset);
jsbytecode *pcForReturnAddress(JSScript *script, uint8_t *nativeAddress);
jsbytecode *pcForNativeAddress(JSScript *script, uint8_t *nativeAddress);
jsbytecode *pcForNativeOffset(JSScript *script, uint32_t nativeOffset);
private:
jsbytecode *pcForNativeOffset(JSScript *script, uint32_t nativeOffset, bool isReturn);
public:
// Toggle debug traps (used for breakpoints and step mode) in the script.
// If |pc| is nullptr, toggle traps for all ops in the script. Else, only
// toggle traps at |pc|.

Просмотреть файл

@ -26,6 +26,7 @@
#include "jit/IonLinker.h"
#include "jit/IonOptimizationLevels.h"
#include "jit/IonSpewer.h"
#include "jit/JitcodeMap.h"
#include "jit/Lowering.h"
#include "jit/MIRGenerator.h"
#include "jit/MoveEmitter.h"
@ -120,7 +121,7 @@ CodeGeneratorShared::addCache(LInstruction *lir, size_t cacheIndex)
cache->setIdempotent();
OutOfLineUpdateCache *ool = new(alloc()) OutOfLineUpdateCache(lir, cacheIndex);
if (!addOutOfLineCode(ool))
if (!addOutOfLineCode(ool, mir))
return false;
// OOL-specific state depends on the type of cache.
@ -184,7 +185,7 @@ CodeGenerator::visitValueToInt32(LValueToInt32 *lir)
Label fails;
if (lir->mode() == LValueToInt32::TRUNCATE) {
OutOfLineCode *oolDouble = oolTruncateDouble(temp, output);
OutOfLineCode *oolDouble = oolTruncateDouble(temp, output, lir->mir());
if (!oolDouble)
return false;
@ -691,7 +692,7 @@ CodeGenerator::visitTestOAndBranch(LTestOAndBranch *lir)
"Objects which can't emulate undefined should have been constant-folded");
OutOfLineTestObject *ool = new(alloc()) OutOfLineTestObject();
if (!addOutOfLineCode(ool))
if (!addOutOfLineCode(ool, lir->mir()))
return false;
Label *truthy = getJumpLabelForBranch(lir->ifTruthy());
@ -714,7 +715,7 @@ CodeGenerator::visitTestVAndBranch(LTestVAndBranch *lir)
// object.
if (lir->mir()->operandMightEmulateUndefined() && input->mightBeType(MIRType_Object)) {
ool = new(alloc()) OutOfLineTestObject();
if (!addOutOfLineCode(ool))
if (!addOutOfLineCode(ool, lir->mir()))
return false;
}
@ -1317,7 +1318,7 @@ bool
CodeGenerator::visitInterruptCheckImplicit(LInterruptCheckImplicit *lir)
{
OutOfLineInterruptCheckImplicit *ool = new(alloc()) OutOfLineInterruptCheckImplicit(current, lir);
if (!addOutOfLineCode(ool))
if (!addOutOfLineCode(ool, lir->mir()))
return false;
lir->setOolEntry(ool->entry());
@ -1408,6 +1409,15 @@ CodeGenerator::visitStart(LStart *lir)
return true;
}
bool
CodeGenerator::visitPcOffset(LPcOffset *lir)
{
if (!addNativeToBytecodeEntry(lir->mir()->trackedSite()))
return false;
return true;
}
bool
CodeGenerator::visitReturn(LReturn *lir)
{
@ -2043,7 +2053,7 @@ CodeGenerator::visitPostWriteBarrierO(LPostWriteBarrierO *lir)
{
#ifdef JSGC_GENERATIONAL
OutOfLineCallPostWriteBarrier *ool = new(alloc()) OutOfLineCallPostWriteBarrier(lir, lir->object());
if (!addOutOfLineCode(ool))
if (!addOutOfLineCode(ool, lir->mir()))
return false;
Register temp = ToTempRegisterOrInvalid(lir->temp());
@ -2069,7 +2079,7 @@ CodeGenerator::visitPostWriteBarrierV(LPostWriteBarrierV *lir)
{
#ifdef JSGC_GENERATIONAL
OutOfLineCallPostWriteBarrier *ool = new(alloc()) OutOfLineCallPostWriteBarrier(lir, lir->object());
if (!addOutOfLineCode(ool))
if (!addOutOfLineCode(ool, lir->mir()))
return false;
Register temp = ToTempRegisterOrInvalid(lir->temp());
@ -2749,7 +2759,6 @@ CodeGenerator::visitArraySplice(LArraySplice *lir)
return callVM(ArraySpliceDenseInfo, lir);
}
bool
CodeGenerator::visitBail(LBail *lir)
{
@ -2985,7 +2994,7 @@ CodeGenerator::visitCheckOverRecursed(LCheckOverRecursed *lir)
const void *limitAddr = GetIonContext()->runtime->addressOfJitStackLimit();
CheckOverRecursedFailure *ool = new(alloc()) CheckOverRecursedFailure(lir);
if (!addOutOfLineCode(ool))
if (!addOutOfLineCode(ool, lir->mir()))
return false;
// Conditional forward (unlikely) branch to failure.
@ -3072,7 +3081,7 @@ CodeGenerator::visitCheckOverRecursedPar(LCheckOverRecursedPar *lir)
// Conditional forward (unlikely) branch to failure.
CheckOverRecursedFailure *ool = new(alloc()) CheckOverRecursedFailure(lir);
if (!addOutOfLineCode(ool))
if (!addOutOfLineCode(ool, lir->mir()))
return false;
masm.branchPtr(Assembler::BelowOrEqual, StackPointer, tempReg, ool->entry());
@ -3405,6 +3414,14 @@ CodeGenerator::generateBody()
resetOsiPointRegs(iter->safepoint());
#endif
if (iter->mirRaw()) {
// Only add instructions that have a tracked inline script tree.
if (iter->mirRaw()->trackedSite().hasTree()) {
if (!addNativeToBytecodeEntry(iter->mirRaw()->trackedSite()))
return false;
}
}
if (!iter->accept(this))
return false;
@ -3540,7 +3557,7 @@ CodeGenerator::visitNewArray(LNewArray *lir)
return visitNewArrayCallVM(lir);
OutOfLineNewArray *ool = new(alloc()) OutOfLineNewArray(lir);
if (!addOutOfLineCode(ool))
if (!addOutOfLineCode(ool, lir->mir()))
return false;
masm.createGCObject(objReg, tempReg, templateObject, lir->mir()->initialHeap(), ool->entry());
@ -3710,7 +3727,7 @@ CodeGenerator::visitNewObject(LNewObject *lir)
return visitNewObjectVMCall(lir);
OutOfLineNewObject *ool = new(alloc()) OutOfLineNewObject(lir);
if (!addOutOfLineCode(ool))
if (!addOutOfLineCode(ool, lir->mir()))
return false;
bool initFixedSlots = ShouldInitFixedSlots(lir, templateObject);
@ -3923,6 +3940,9 @@ bool
CodeGenerator::emitAllocateGCThingPar(LInstruction *lir, Register objReg, Register cxReg,
Register tempReg1, Register tempReg2, JSObject *templateObj)
{
JS_ASSERT(lir->mirRaw());
JS_ASSERT(lir->mirRaw()->isInstruction());
gc::AllocKind allocKind = templateObj->tenuredGetAllocKind();
#ifdef JSGC_FJGENERATIONAL
OutOfLineCode *ool = oolCallVM(NewGCThingParInfo, lir,
@ -3931,7 +3951,7 @@ CodeGenerator::emitAllocateGCThingPar(LInstruction *lir, Register objReg, Regist
return false;
#else
OutOfLineNewGCThingPar *ool = new(alloc()) OutOfLineNewGCThingPar(lir, allocKind, objReg, cxReg);
if (!ool || !addOutOfLineCode(ool))
if (!ool || !addOutOfLineCode(ool, lir->mirRaw()->toInstruction()))
return false;
#endif
@ -4826,7 +4846,7 @@ CodeGenerator::visitIsNullOrLikeUndefined(LIsNullOrLikeUndefined *lir)
Label *notNullOrLikeUndefined;
if (lir->mir()->operandMightEmulateUndefined()) {
ool = new(alloc()) OutOfLineTestObjectWithLabels();
if (!addOutOfLineCode(ool))
if (!addOutOfLineCode(ool, lir->mir()))
return false;
nullOrLikeUndefined = ool->label1();
notNullOrLikeUndefined = ool->label2();
@ -4910,7 +4930,7 @@ CodeGenerator::visitIsNullOrLikeUndefinedAndBranch(LIsNullOrLikeUndefinedAndBran
OutOfLineTestObject *ool = nullptr;
if (lir->cmpMir()->operandMightEmulateUndefined()) {
ool = new(alloc()) OutOfLineTestObject();
if (!addOutOfLineCode(ool))
if (!addOutOfLineCode(ool, lir->cmpMir()))
return false;
}
@ -4959,7 +4979,7 @@ CodeGenerator::visitEmulatesUndefined(LEmulatesUndefined *lir)
MOZ_ASSERT(op == JSOP_EQ || op == JSOP_NE, "Strict equality should have been folded");
OutOfLineTestObjectWithLabels *ool = new(alloc()) OutOfLineTestObjectWithLabels();
if (!addOutOfLineCode(ool))
if (!addOutOfLineCode(ool, lir->mir()))
return false;
Label *emulatesUndefined = ool->label1();
@ -4993,7 +5013,7 @@ CodeGenerator::visitEmulatesUndefinedAndBranch(LEmulatesUndefinedAndBranch *lir)
MOZ_ASSERT(op == JSOP_EQ || op == JSOP_NE, "Strict equality should have been folded");
OutOfLineTestObject *ool = new(alloc()) OutOfLineTestObject();
if (!addOutOfLineCode(ool))
if (!addOutOfLineCode(ool, lir->cmpMir()))
return false;
Label *equal;
@ -5497,7 +5517,7 @@ CodeGenerator::visitNotO(LNotO *lir)
"This should be constant-folded if the object can't emulate undefined.");
OutOfLineTestObjectWithLabels *ool = new(alloc()) OutOfLineTestObjectWithLabels();
if (!addOutOfLineCode(ool))
if (!addOutOfLineCode(ool, lir->mir()))
return false;
Label *ifEmulatesUndefined = ool->label1();
@ -5536,7 +5556,7 @@ CodeGenerator::visitNotV(LNotV *lir)
// object.
if (lir->mir()->operandMightEmulateUndefined() && operand->mightBeType(MIRType_Object)) {
ool = new(alloc()) OutOfLineTestObjectWithLabels();
if (!addOutOfLineCode(ool))
if (!addOutOfLineCode(ool, lir->mir()))
return false;
ifTruthy = ool->label1();
ifFalsy = ool->label2();
@ -5758,7 +5778,7 @@ bool
CodeGenerator::visitStoreElementHoleT(LStoreElementHoleT *lir)
{
OutOfLineStoreElementHole *ool = new(alloc()) OutOfLineStoreElementHole(lir);
if (!addOutOfLineCode(ool))
if (!addOutOfLineCode(ool, lir->mir()))
return false;
Register elements = ToRegister(lir->elements());
@ -5783,7 +5803,7 @@ bool
CodeGenerator::visitStoreElementHoleV(LStoreElementHoleV *lir)
{
OutOfLineStoreElementHole *ool = new(alloc()) OutOfLineStoreElementHole(lir);
if (!addOutOfLineCode(ool))
if (!addOutOfLineCode(ool, lir->mir()))
return false;
Register elements = ToRegister(lir->elements());
@ -6110,6 +6130,18 @@ CodeGenerator::visitArrayConcat(LArrayConcat *lir)
return callVM(ArrayConcatDenseInfo, lir);
}
typedef JSString *(*ArrayJoinFn)(JSContext *, HandleObject, HandleString);
static const VMFunction ArrayJoinInfo = FunctionInfo<ArrayJoinFn>(jit::ArrayJoin);
bool
CodeGenerator::visitArrayJoin(LArrayJoin *lir)
{
pushArg(ToRegister(lir->separator()));
pushArg(ToRegister(lir->array()));
return callVM(ArrayJoinInfo, lir);
}
typedef JSObject *(*GetIteratorObjectFn)(JSContext *, HandleObject, uint32_t);
static const VMFunction GetIteratorObjectInfo = FunctionInfo<GetIteratorObjectFn>(GetIteratorObject);
@ -6564,6 +6596,13 @@ CodeGenerator::generate()
gen->info().script()->filename(),
gen->info().script()->lineno());
// Initialize native code table with an entry to the start of
// top-level script.
InlineScriptTree *tree = gen->info().inlineScriptTree();
jsbytecode *startPC = tree->script()->code();
if (!addNativeToBytecodeEntry(BytecodeSite(tree, startPC)))
return false;
if (!snapshots_.init())
return false;
@ -6618,22 +6657,74 @@ CodeGenerator::generate()
if (!generatePrologue())
return false;
// Reset native => bytecode map table with top-level script and startPc.
if (!addNativeToBytecodeEntry(BytecodeSite(tree, startPC)))
return false;
if (!generateBody())
return false;
// Reset native => bytecode map table with top-level script and startPc.
if (!addNativeToBytecodeEntry(BytecodeSite(tree, startPC)))
return false;
if (!generateEpilogue())
return false;
// Reset native => bytecode map table with top-level script and startPc.
if (!addNativeToBytecodeEntry(BytecodeSite(tree, startPC)))
return false;
if (!generateInvalidateEpilogue())
return false;
#if defined(JS_ION_PERF)
// Note the end of the inline code and start of the OOL code.
perfSpewer_.noteEndInlineCode(masm);
#endif
// native => bytecode entries for OOL code will be added
// by CodeGeneratorShared::generateOutOfLineCode
if (!generateOutOfLineCode())
return false;
// Add terminal entry.
if (!addNativeToBytecodeEntry(BytecodeSite(tree, startPC)))
return false;
// Dump Native to bytecode entries to spew.
dumpNativeToBytecodeEntries();
return !masm.oom();
}
struct AutoDiscardIonCode
{
JSContext *cx;
types::RecompileInfo *recompileInfo;
IonScript *ionScript;
bool keep;
AutoDiscardIonCode(JSContext *cx, types::RecompileInfo *recompileInfo)
: cx(cx), recompileInfo(recompileInfo), ionScript(nullptr), keep(false) {}
~AutoDiscardIonCode() {
if (keep)
return;
// Use js_free instead of IonScript::Destroy: the cache list and
// backedge list are still uninitialized.
if (ionScript)
js_free(ionScript);
recompileInfo->compilerOutput(cx->zone()->types)->invalidate();
}
void keepIonCode() {
keep = true;
}
};
bool
CodeGenerator::link(JSContext *cx, types::CompilerConstraintList *constraints)
{
@ -6678,6 +6769,8 @@ CodeGenerator::link(JSContext *cx, types::CompilerConstraintList *constraints)
if (executionMode == ParallelExecution)
AddPossibleCallees(cx, graph.mir(), callTargets);
AutoDiscardIonCode discardIonCode(cx, &recompileInfo);
IonScript *ionScript =
IonScript::New(cx, recompileInfo,
graph.totalSlotCount(), scriptFrameSize,
@ -6687,10 +6780,9 @@ CodeGenerator::link(JSContext *cx, types::CompilerConstraintList *constraints)
cacheList_.length(), runtimeData_.length(),
safepoints_.size(), callTargets.length(),
patchableBackedges_.length(), optimizationLevel);
if (!ionScript) {
recompileInfo.compilerOutput(cx->zone()->types)->invalidate();
if (!ionScript)
return false;
}
discardIonCode.ionScript = ionScript;
// Lock the runtime against interrupt callbacks during the link.
// We don't want an interrupt request to protect the code for the script
@ -6714,23 +6806,38 @@ CodeGenerator::link(JSContext *cx, types::CompilerConstraintList *constraints)
JitCode *code = (executionMode == SequentialExecution)
? linker.newCodeForIonScript(cx)
: linker.newCode<CanGC>(cx, ION_CODE);
if (!code) {
// Use js_free instead of IonScript::Destroy: the cache list and
// backedge list are still uninitialized.
js_free(ionScript);
recompileInfo.compilerOutput(cx->zone()->types)->invalidate();
if (!code)
return false;
// Encode native to bytecode map if profiling is enabled.
if (isNativeToBytecodeMapEnabled()) {
// Generate native-to-bytecode main table.
if (!generateCompactNativeToBytecodeMap(cx, code))
return false;
uint8_t *ionTableAddr = ((uint8_t *) nativeToBytecodeMap_) + nativeToBytecodeTableOffset_;
JitcodeIonTable *ionTable = (JitcodeIonTable *) ionTableAddr;
// Construct the IonEntry that will go into the global table.
JitcodeGlobalEntry::IonEntry entry;
if (!ionTable->makeIonEntry(cx, code, nativeToBytecodeScriptListLength_,
nativeToBytecodeScriptList_, entry))
{
return false;
}
// Add entry to the global table.
JitcodeGlobalTable *globalTable = cx->runtime()->jitRuntime()->getJitcodeGlobalTable();
if (!globalTable->addEntry(entry)) {
// Memory may have been allocated for the entry.
entry.destroy();
return false;
}
// Mark the jitcode as having a bytecode map.
code->setHasBytecodeMap();
}
ionScript->setMethod(code);
ionScript->setSkipArgCheckEntryOffset(getSkipArgCheckEntryOffset());
// If SPS is enabled, mark IonScript as having been instrumented with SPS
if (sps_.enabled())
ionScript->setHasSPSInstrumentation();
SetIonScript(script, executionMode, ionScript);
if (cx->runtime()->spsProfiler.enabled()) {
const char *filename = script->filename();
if (filename == nullptr)
@ -6744,6 +6851,15 @@ CodeGenerator::link(JSContext *cx, types::CompilerConstraintList *constraints)
js_free(buf);
}
ionScript->setMethod(code);
ionScript->setSkipArgCheckEntryOffset(getSkipArgCheckEntryOffset());
// If SPS is enabled, mark IonScript as having been instrumented with SPS
if (sps_.enabled())
ionScript->setHasSPSInstrumentation();
SetIonScript(script, executionMode, ionScript);
// In parallel execution mode, when we first compile a script, we
// don't know that its potential callees are compiled, so set a
// flag warning that the callees may not be fully compiled.
@ -6844,6 +6960,9 @@ CodeGenerator::link(JSContext *cx, types::CompilerConstraintList *constraints)
if (IonScriptCounts *counts = extractScriptCounts())
script->addIonCounts(counts);
// Make sure that AutoDiscardIonCode does not free the relevant info.
discardIonCode.keepIonCode();
return true;
}
@ -6875,7 +6994,7 @@ CodeGenerator::visitUnboxFloatingPoint(LUnboxFloatingPoint *lir)
// Out-of-line path to convert int32 to double or bailout
// if this instruction is fallible.
OutOfLineUnboxFloatingPoint *ool = new(alloc()) OutOfLineUnboxFloatingPoint(lir);
if (!addOutOfLineCode(ool))
if (!addOutOfLineCode(ool, lir->mir()))
return false;
FloatRegister resultReg = ToFloatRegister(result);
@ -7673,7 +7792,7 @@ CodeGenerator::visitTypeOfV(LTypeOfV *lir)
// The input may be a callable object (result is "function") or may
// emulate undefined (result is "undefined"). Use an OOL path.
ool = new(alloc()) OutOfLineTypeOfV(lir);
if (!addOutOfLineCode(ool))
if (!addOutOfLineCode(ool, lir->mir()))
return false;
masm.branchTestObject(Assembler::Equal, tag, ool->entry());

Просмотреть файл

@ -68,6 +68,7 @@ class CodeGenerator : public CodeGeneratorSpecific
bool visitParameter(LParameter *lir);
bool visitCallee(LCallee *lir);
bool visitStart(LStart *lir);
bool visitPcOffset(LPcOffset *lir);
bool visitReturn(LReturn *ret);
bool visitDefVar(LDefVar *lir);
bool visitDefFun(LDefFun *lir);
@ -251,6 +252,7 @@ class CodeGenerator : public CodeGeneratorSpecific
bool visitArrayPushV(LArrayPushV *lir);
bool visitArrayPushT(LArrayPushT *lir);
bool visitArrayConcat(LArrayConcat *lir);
bool visitArrayJoin(LArrayJoin *lir);
bool visitLoadTypedArrayElement(LLoadTypedArrayElement *lir);
bool visitLoadTypedArrayElementHole(LLoadTypedArrayElementHole *lir);
bool visitStoreTypedArrayElement(LStoreTypedArrayElement *lir);

Просмотреть файл

@ -68,6 +68,11 @@ class CompactBufferReader
uint32_t b1 = readByte();
return b0 | (b1 << 8);
}
uint32_t readNativeEndianUint32_t() {
// Must be at 4-byte boundary
JS_ASSERT(uintptr_t(buffer_) % sizeof(uint32_t) == 0);
return *reinterpret_cast<const uint32_t *>(buffer_);
}
uint32_t readUnsigned() {
return readVariableLength();
}
@ -93,6 +98,10 @@ class CompactBufferReader
MOZ_ASSERT(start < end_);
MOZ_ASSERT(buffer_ < end_);
}
const uint8_t *currentPosition() const {
return buffer_;
}
};
class CompactBufferWriter
@ -140,6 +149,15 @@ class CompactBufferWriter
writeByte(value & 0xFF);
writeByte(value >> 8);
}
void writeNativeEndianUint32_t(uint32_t value) {
// Must be at 4-byte boundary
JS_ASSERT(length() % sizeof(uint32_t) == 0);
writeFixedUint32_t(0);
if (oom())
return;
uint8_t *endPtr = buffer() + length();
reinterpret_cast<uint32_t *>(endPtr)[-1] = value;
}
size_t length() const {
return buffer_.length();
}

Просмотреть файл

@ -83,6 +83,9 @@ class InlineScriptTree {
bool isOutermostCaller() const {
return caller_ == nullptr;
}
bool hasCaller() const {
return caller_ != nullptr;
}
InlineScriptTree *outermostCaller() {
if (isOutermostCaller())
return this;
@ -97,12 +100,27 @@ class InlineScriptTree {
return script_;
}
InlineScriptTree *children() const {
bool hasChildren() const {
return children_ != nullptr;
}
InlineScriptTree *firstChild() const {
JS_ASSERT(hasChildren());
return children_;
}
bool hasNextCallee() const {
return nextCallee_ != nullptr;
}
InlineScriptTree *nextCallee() const {
JS_ASSERT(hasNextCallee());
return nextCallee_;
}
unsigned depth() const {
if (isOutermostCaller())
return 1;
return 1 + caller_->depth();
}
};
class BytecodeSite {
@ -119,7 +137,14 @@ class BytecodeSite {
BytecodeSite(InlineScriptTree *tree, jsbytecode *pc)
: tree_(tree), pc_(pc)
{}
{
JS_ASSERT(tree_ != nullptr);
JS_ASSERT(pc_ != nullptr);
}
bool hasTree() const {
return tree_ != nullptr;
}
InlineScriptTree *tree() const {
return tree_;
@ -128,6 +153,10 @@ class BytecodeSite {
jsbytecode *pc() const {
return pc_;
}
JSScript *script() const {
return tree_ ? tree_->script() : nullptr;
}
};

Просмотреть файл

@ -27,6 +27,7 @@
#include "jit/IonBuilder.h"
#include "jit/IonOptimizationLevels.h"
#include "jit/IonSpewer.h"
#include "jit/JitcodeMap.h"
#include "jit/JitCommon.h"
#include "jit/JitCompartment.h"
#include "jit/LICM.h"
@ -165,7 +166,8 @@ JitRuntime::JitRuntime()
functionWrappers_(nullptr),
osrTempData_(nullptr),
ionCodeProtected_(false),
ionReturnOverride_(MagicValue(JS_ARG_POISON))
ionReturnOverride_(MagicValue(JS_ARG_POISON)),
jitcodeGlobalTable_(nullptr)
{
}
@ -177,6 +179,10 @@ JitRuntime::~JitRuntime()
// Note: The interrupt lock is not taken here, as JitRuntime is only
// destroyed along with its containing JSRuntime.
js_delete(ionAlloc_);
// By this point, the jitcode global table should be empty.
JS_ASSERT_IF(jitcodeGlobalTable_, jitcodeGlobalTable_->empty());
js_delete(jitcodeGlobalTable_);
}
bool
@ -289,6 +295,10 @@ JitRuntime::initialize(JSContext *cx)
return false;
}
jitcodeGlobalTable_ = cx->new_<JitcodeGlobalTable>();
if (!jitcodeGlobalTable_)
return false;
return true;
}
@ -761,6 +771,12 @@ JitCode::finalize(FreeOp *fop)
// to read the contents of the pool we are releasing references in.
JS_ASSERT(fop->runtime()->currentThreadOwnsInterruptLock());
// If this jitcode has a bytecode map, de-register it.
if (hasBytecodeMap_) {
JS_ASSERT(fop->runtime()->jitRuntime()->hasJitcodeGlobalTable());
fop->runtime()->jitRuntime()->getJitcodeGlobalTable()->removeEntry(raw());
}
// Buffer can be freed at any time hereafter. Catch use-after-free bugs.
// Don't do this if the Ion code is protected, as the signal handler will
// deadlock trying to reacquire the interrupt lock.

Просмотреть файл

@ -1277,6 +1277,9 @@ IonBuilder::traverseBytecode()
}
#endif
if (isNativeToBytecodeMapEnabled())
current->add(MPcOffset::New(alloc()));
// Nothing in inspectOpcode() is allowed to advance the pc.
JSOp op = JSOp(*pc);
if (!inspectOpcode(op))

Просмотреть файл

@ -675,6 +675,7 @@ class IonBuilder : public MIRGenerator
InliningStatus inlineArrayPopShift(CallInfo &callInfo, MArrayPopShift::Mode mode);
InliningStatus inlineArrayPush(CallInfo &callInfo);
InliningStatus inlineArrayConcat(CallInfo &callInfo);
InliningStatus inlineArrayJoin(CallInfo &callInfo);
InliningStatus inlineArraySplice(CallInfo &callInfo);
// Math natives.

Просмотреть файл

@ -16,6 +16,7 @@
#include "jit/Ion.h"
#include "jit/IonLinker.h"
#include "jit/IonSpewer.h"
#include "jit/JitcodeMap.h"
#include "jit/Lowering.h"
#ifdef JS_ION_PERF
# include "jit/PerfSpewer.h"
@ -429,6 +430,22 @@ IonCache::linkAndAttachStub(JSContext *cx, MacroAssembler &masm, StubAttacher &a
attachStub(masm, attacher, code);
// Add entry to native => bytecode mapping for this stub if needed.
if (cx->runtime()->jitRuntime()->isNativeToBytecodeMapEnabled(cx->runtime())) {
JitcodeGlobalEntry::IonCacheEntry entry;
entry.init(code->raw(), code->raw() + code->instructionsSize(), rejoinAddress());
// Add entry to the global table.
JitcodeGlobalTable *globalTable = cx->runtime()->jitRuntime()->getJitcodeGlobalTable();
if (!globalTable->addEntry(entry)) {
entry.destroy();
return false;
}
// Mark the jitcode as having a bytecode map.
code->setHasBytecodeMap();
}
return true;
}

Просмотреть файл

@ -213,6 +213,9 @@ class IonCache
profilerLeavePc_ = pc;
}
// Get the address at which IC rejoins the mainline jitcode.
virtual void *rejoinAddress() = 0;
virtual void emitInitialJump(MacroAssembler &masm, AddCacheState &addState) = 0;
virtual void bindInitialJump(MacroAssembler &masm, AddCacheState &addState) = 0;
virtual void updateBaseAddress(JitCode *code, MacroAssembler &masm);
@ -398,6 +401,10 @@ class RepatchIonCache : public IonCache
// Update the labels once the code is finalized.
void updateBaseAddress(JitCode *code, MacroAssembler &masm);
virtual void *rejoinAddress() MOZ_OVERRIDE {
return rejoinLabel().raw();
}
};
//
@ -496,6 +503,10 @@ class DispatchIonCache : public IonCache
// Fix up the first stub pointer once the code is finalized.
void updateBaseAddress(JitCode *code, MacroAssembler &masm);
virtual void *rejoinAddress() MOZ_OVERRIDE {
return rejoinLabel_.raw();
}
};
// Define the cache kind and pre-declare data structures used for calling inline

Просмотреть файл

@ -44,6 +44,8 @@ class JitCode : public gc::BarrieredCell<JitCode>
uint8_t kind_ : 3; // jit::CodeKind, for the memory reporters.
bool invalidated_ : 1; // Whether the code object has been invalidated.
// This is necessary to prevent GC tracing.
bool hasBytecodeMap_ : 1; // Whether the code object has been registered with
// native=>bytecode mapping tables.
#if JS_BITS_PER_WORD == 32
// Ensure JitCode is gc::Cell aligned.
@ -66,7 +68,8 @@ class JitCode : public gc::BarrieredCell<JitCode>
preBarrierTableBytes_(0),
headerSize_(headerSize),
kind_(kind),
invalidated_(false)
invalidated_(false),
hasBytecodeMap_(false)
{
MOZ_ASSERT(CodeKind(kind_) == kind);
MOZ_ASSERT(headerSize_ == headerSize);
@ -89,6 +92,9 @@ class JitCode : public gc::BarrieredCell<JitCode>
uint8_t *raw() const {
return code_;
}
uint8_t *rawEnd() const {
return code_ + insnSize_;
}
size_t instructionsSize() const {
return insnSize_;
}
@ -98,6 +104,10 @@ class JitCode : public gc::BarrieredCell<JitCode>
invalidated_ = true;
}
void setHasBytecodeMap() {
hasBytecodeMap_ = true;
}
void togglePreBarriers(bool enabled);
// If this JitCode object has been, effectively, corrupted due to

Просмотреть файл

@ -19,6 +19,7 @@
#include "jit/Ion.h"
#include "jit/IonMacroAssembler.h"
#include "jit/IonSpewer.h"
#include "jit/JitcodeMap.h"
#include "jit/JitCompartment.h"
#include "jit/ParallelFunctions.h"
#include "jit/PcScriptCache.h"
@ -112,6 +113,7 @@ JitFrameIterator::JitFrameIterator(IonJSFrameLayout *fp, ExecutionMode mode)
mode_(mode),
kind_(Kind_FrameIterator)
{
verifyReturnAddressUsingNativeToBytecodeMap();
}
IonBailoutIterator *
@ -328,6 +330,9 @@ JitFrameIterator::operator++()
type_ = JitFrame_BaselineStub;
returnAddressToFp_ = current()->returnAddress();
current_ = prev;
verifyReturnAddressUsingNativeToBytecodeMap();
return *this;
}
@ -2229,6 +2234,80 @@ JitFrameIterator::dump() const
fputc('\n', stderr);
}
#ifdef DEBUG
bool
JitFrameIterator::verifyReturnAddressUsingNativeToBytecodeMap()
{
JS_ASSERT(returnAddressToFp_ != nullptr);
// Only handle Ion frames for now.
if (type_ != JitFrame_IonJS && type_ != JitFrame_BaselineJS)
return true;
JSRuntime *rt = js::TlsPerThreadData.get()->runtimeIfOnOwnerThread();
// Don't verify on non-main-thread.
if (!rt)
return true;
// Don't verify if sampling is being suppressed.
if (!rt->isProfilerSamplingEnabled())
return true;
if (rt->isHeapMinorCollecting())
return true;
JitRuntime *jitrt = rt->jitRuntime();
// Look up and print bytecode info for the native address.
JitcodeGlobalEntry entry;
if (!jitrt->getJitcodeGlobalTable()->lookup(returnAddressToFp_, &entry))
return true;
IonSpew(IonSpew_Profiling, "Found nativeToBytecode entry for %p: %p - %p",
returnAddressToFp_, entry.nativeStartAddr(), entry.nativeEndAddr());
JitcodeGlobalEntry::BytecodeLocationVector location;
uint32_t depth = UINT32_MAX;
if (!entry.callStackAtAddr(rt, returnAddressToFp_, location, &depth))
return false;
JS_ASSERT(depth > 0 && depth != UINT32_MAX);
JS_ASSERT(location.length() == depth);
IonSpew(IonSpew_Profiling, "Found bytecode location of depth %d:", depth);
for (size_t i = 0; i < location.length(); i++) {
IonSpew(IonSpew_Profiling, " %s:%d - %d",
location[i].script->filename(), location[i].script->lineno(),
(int) (location[i].pc - location[i].script->code()));
}
if (type_ == JitFrame_IonJS) {
// Create an InlineFrameIterator here and verify the mapped info against the iterator info.
InlineFrameIterator inlineFrames(GetJSContextFromJitCode(), this);
for (size_t idx = 0; idx < location.length(); idx++) {
JS_ASSERT(idx < location.length());
JS_ASSERT_IF(idx < location.length() - 1, inlineFrames.more());
IonSpew(IonSpew_Profiling, "Match %d: ION %s:%d(%d) vs N2B %s:%d(%d)",
(int)idx,
inlineFrames.script()->filename(),
inlineFrames.script()->lineno(),
inlineFrames.pc() - inlineFrames.script()->code(),
location[idx].script->filename(),
location[idx].script->lineno(),
location[idx].pc - location[idx].script->code());
JS_ASSERT(inlineFrames.script() == location[idx].script);
if (inlineFrames.more())
++inlineFrames;
}
}
return true;
}
#endif // DEBUG
IonJSFrameLayout *
InvalidationBailoutStack::fp() const
{

Просмотреть файл

@ -250,6 +250,7 @@ jit::CheckLogging()
" range Range Analysis\n"
" unroll Loop unrolling\n"
" logs C1 and JSON visualization logging\n"
" profiling Profiling-related information\n"
" all Everything\n"
"\n"
" bl-aborts Baseline compiler abort messages\n"
@ -304,6 +305,8 @@ jit::CheckLogging()
EnableChannel(IonSpew_CacheFlush);
if (ContainsFlag(env, "logs"))
EnableIonDebugLogging();
if (ContainsFlag(env, "profiling"))
EnableChannel(IonSpew_Profiling);
if (ContainsFlag(env, "all"))
LoggingBits = uint32_t(-1);

Просмотреть файл

@ -56,6 +56,8 @@ namespace jit {
_(Safepoints) \
/* Debug info about Pools*/ \
_(Pools) \
/* Profiling-related information */ \
_(Profiling) \
/* Debug info about the I$ */ \
_(CacheFlush) \
\

Просмотреть файл

@ -7,6 +7,7 @@
#ifndef jit_IonTypes_h
#define jit_IonTypes_h
#include "mozilla/HashFunctions.h"
#include "mozilla/TypedEnum.h"
#include "jstypes.h"
@ -232,6 +233,111 @@ static const uint32_t VECTOR_SCALE_BITS = 2;
static const uint32_t VECTOR_SCALE_SHIFT = ELEMENT_TYPE_BITS + ELEMENT_TYPE_SHIFT;
static const uint32_t VECTOR_SCALE_MASK = (1 << VECTOR_SCALE_BITS) - 1;
class SimdConstant {
public:
enum Type {
Int32x4,
Float32x4,
Undefined = -1
};
private:
Type type_;
union {
int32_t i32x4[4];
float f32x4[4];
} u;
bool defined() const {
return type_ != Undefined;
}
void fillInt32x4(int32_t x, int32_t y, int32_t z, int32_t w)
{
type_ = Int32x4;
u.i32x4[0] = x;
u.i32x4[1] = y;
u.i32x4[2] = z;
u.i32x4[3] = w;
}
void fillFloat32x4(float x, float y, float z, float w)
{
type_ = Float32x4;
u.f32x4[0] = x;
u.f32x4[1] = y;
u.f32x4[2] = z;
u.f32x4[3] = w;
}
public:
// Doesn't have a default constructor, as it would prevent it from being
// included in unions.
static SimdConstant CreateX4(int32_t x, int32_t y, int32_t z, int32_t w) {
SimdConstant cst;
cst.fillInt32x4(x, y, z, w);
return cst;
}
static SimdConstant CreateX4(int32_t *array) {
SimdConstant cst;
cst.fillInt32x4(array[0], array[1], array[2], array[3]);
return cst;
}
static SimdConstant CreateX4(float x, float y, float z, float w) {
SimdConstant cst;
cst.fillFloat32x4(x, y, z, w);
return cst;
}
static SimdConstant CreateX4(float *array) {
SimdConstant cst;
cst.fillFloat32x4(array[0], array[1], array[2], array[3]);
return cst;
}
uint32_t length() const {
JS_ASSERT(defined());
switch(type_) {
case Int32x4:
case Float32x4:
return 4;
case Undefined:
break;
}
MOZ_CRASH("Unexpected SIMD kind");
}
Type type() const {
JS_ASSERT(defined());
return type_;
}
const int32_t *asInt32x4() const {
JS_ASSERT(defined() && type_ == Int32x4);
return u.i32x4;
}
const float *asFloat32x4() const {
JS_ASSERT(defined() && type_ == Float32x4);
return u.f32x4;
}
bool operator==(const SimdConstant &rhs) const {
JS_ASSERT(defined() && rhs.defined());
if (type() != rhs.type())
return false;
return memcmp(&u, &rhs.u, sizeof(u)) == 0;
}
// SimdConstant is a HashPolicy
typedef SimdConstant Lookup;
static HashNumber hash(const SimdConstant &val) {
return mozilla::HashBytes(&val.u, sizeof(SimdConstant));
}
static bool match(const SimdConstant &lhs, const SimdConstant &rhs) {
return lhs == rhs;
}
};
// The ordering of this enumeration is important: Anything < Value is a
// specialized type. Furthermore, anything < String has trivial conversion to
// a number.

Просмотреть файл

@ -56,6 +56,7 @@ typedef void (*EnterJitCode)(void *code, unsigned argc, Value *argv, Interpreter
size_t numStackValues, Value *vp);
class IonBuilder;
class JitcodeGlobalTable;
// ICStubSpace is an abstraction for allocation policy and storage for stub data.
// There are two kinds of stubs: optimized stubs and fallback stubs (the latter
@ -232,6 +233,9 @@ class JitRuntime
// their callee.
js::Value ionReturnOverride_;
// Global table of jitcode native address => bytecode address mappings.
JitcodeGlobalTable *jitcodeGlobalTable_;
private:
JitCode *generateExceptionTailStub(JSContext *cx);
JitCode *generateBailoutTailStub(JSContext *cx);
@ -381,6 +385,23 @@ class JitRuntime
JS_ASSERT(!v.isMagic());
ionReturnOverride_ = v;
}
bool hasJitcodeGlobalTable() const {
return jitcodeGlobalTable_ != nullptr;
}
JitcodeGlobalTable *getJitcodeGlobalTable() {
JS_ASSERT(hasJitcodeGlobalTable());
return jitcodeGlobalTable_;
}
bool isNativeToBytecodeMapEnabled(JSRuntime *rt) {
#ifdef DEBUG
return true;
#else // DEBUG
return rt->spsProfiler.enabled();
#endif // DEBUG
}
};
class JitZone

Просмотреть файл

@ -258,6 +258,12 @@ class JitFrameIterator
void dump() const;
inline BaselineFrame *baselineFrame() const;
#ifdef DEBUG
bool verifyReturnAddressUsingNativeToBytecodeMap();
#else
inline bool verifyReturnAddressUsingNativeToBytecodeMap() { return true; }
#endif
};
class IonJSFrameLayout;

702
js/src/jit/JitcodeMap.cpp Normal file
Просмотреть файл

@ -0,0 +1,702 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
* vim: set ts=8 sts=4 et sw=4 tw=99:
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "jit/JitcodeMap.h"
#include "mozilla/DebugOnly.h"
#include "jit/BaselineJIT.h"
#include "jit/IonSpewer.h"
#include "js/Vector.h"
namespace js {
namespace jit {
bool
JitcodeGlobalEntry::IonEntry::callStackAtAddr(JSRuntime *rt, void *ptr,
BytecodeLocationVector &results,
uint32_t *depth) const
{
JS_ASSERT(containsPointer(ptr));
uint32_t ptrOffset = reinterpret_cast<uint8_t *>(ptr) -
reinterpret_cast<uint8_t *>(nativeStartAddr());
uint32_t regionIdx = regionTable()->findRegionEntry(ptrOffset);
JS_ASSERT(regionIdx < regionTable()->numRegions());
JitcodeRegionEntry region = regionTable()->regionEntry(regionIdx);
*depth = region.scriptDepth();
JitcodeRegionEntry::ScriptPcIterator locationIter = region.scriptPcIterator();
JS_ASSERT(locationIter.hasMore());
bool first = true;
while (locationIter.hasMore()) {
uint32_t scriptIdx, pcOffset;
locationIter.readNext(&scriptIdx, &pcOffset);
// For the first entry pushed (innermost frame), the pcOffset is obtained
// from the delta-run encodings.
if (first) {
pcOffset = region.findPcOffset(ptrOffset, pcOffset);
first = false;
}
JSScript *script = getScript(scriptIdx);
jsbytecode *pc = script->offsetToPC(pcOffset);
if (!results.append(BytecodeLocation(script, pc)))
return false;
}
return true;
}
void
JitcodeGlobalEntry::IonEntry::destroy()
{
// The region table is stored at the tail of the compacted data,
// which means the start of the region table is a pointer to
// the _middle_ of the memory space allocated for it.
//
// When freeing it, obtain the payload start pointer first.
if (regionTable_)
js_free((void*) (regionTable_->payloadStart()));
regionTable_ = nullptr;
// Single tag is just pointer-to-jsscript, no memory to free.
ScriptListTag tag = scriptListTag();
if (tag > Single)
js_free(scriptListPointer());
scriptList_ = 0;
}
bool
JitcodeGlobalEntry::BaselineEntry::callStackAtAddr(JSRuntime *rt, void *ptr,
BytecodeLocationVector &results,
uint32_t *depth) const
{
JS_ASSERT(containsPointer(ptr));
JS_ASSERT(script_->hasBaselineScript());
jsbytecode *pc = script_->baselineScript()->pcForNativeAddress(script_, (uint8_t*) ptr);
if (!results.append(BytecodeLocation(script_, pc)))
return false;
*depth = 1;
return true;
}
bool
JitcodeGlobalEntry::IonCacheEntry::callStackAtAddr(JSRuntime *rt, void *ptr,
BytecodeLocationVector &results,
uint32_t *depth) const
{
JS_ASSERT(containsPointer(ptr));
// There must exist an entry for the rejoin addr if this entry exists.
JitRuntime *jitrt = rt->jitRuntime();
JitcodeGlobalEntry entry;
jitrt->getJitcodeGlobalTable()->lookupInfallible(rejoinAddr(), &entry);
JS_ASSERT(entry.isIon());
return entry.callStackAtAddr(rt, rejoinAddr(), results, depth);
}
static int ComparePointers(const void *a, const void *b) {
const uint8_t *a_ptr = reinterpret_cast<const uint8_t *>(a);
const uint8_t *b_ptr = reinterpret_cast<const uint8_t *>(b);
if (a_ptr < b_ptr)
return -1;
if (a_ptr > b_ptr)
return 1;
return 0;
}
/* static */ int
JitcodeGlobalEntry::compare(const JitcodeGlobalEntry &ent1, const JitcodeGlobalEntry &ent2)
{
// Both parts of compare cannot be a query.
JS_ASSERT(!(ent1.isQuery() && ent2.isQuery()));
// Ensure no overlaps for non-query lookups.
JS_ASSERT_IF(!ent1.isQuery() && !ent2.isQuery(), !ent1.overlapsWith(ent2));
return ComparePointers(ent1.nativeStartAddr(), ent2.nativeStartAddr());
}
bool
JitcodeGlobalTable::lookup(void *ptr, JitcodeGlobalEntry *result)
{
JS_ASSERT(result);
// Construct a JitcodeGlobalEntry::Query to do the lookup
JitcodeGlobalEntry query = JitcodeGlobalEntry::MakeQuery(ptr);
return tree_.contains(query, result);
}
void
JitcodeGlobalTable::lookupInfallible(void *ptr, JitcodeGlobalEntry *result)
{
mozilla::DebugOnly<bool> success = lookup(ptr, result);
JS_ASSERT(success);
}
bool
JitcodeGlobalTable::addEntry(const JitcodeGlobalEntry &entry)
{
// Should only add Main entries for now.
JS_ASSERT(entry.isIon() || entry.isBaseline() || entry.isIonCache());
return tree_.insert(entry);
}
void
JitcodeGlobalTable::removeEntry(void *startAddr)
{
JitcodeGlobalEntry query = JitcodeGlobalEntry::MakeQuery(startAddr);
tree_.remove(query);
}
/* static */ void
JitcodeRegionEntry::WriteHead(CompactBufferWriter &writer,
uint32_t nativeOffset, uint8_t scriptDepth)
{
writer.writeUnsigned(nativeOffset);
writer.writeByte(scriptDepth);
}
/* static */ void
JitcodeRegionEntry::ReadHead(CompactBufferReader &reader,
uint32_t *nativeOffset, uint8_t *scriptDepth)
{
*nativeOffset = reader.readUnsigned();
*scriptDepth = reader.readByte();
}
/* static */ void
JitcodeRegionEntry::WriteScriptPc(CompactBufferWriter &writer,
uint32_t scriptIdx, uint32_t pcOffset)
{
writer.writeUnsigned(scriptIdx);
writer.writeUnsigned(pcOffset);
}
/* static */ void
JitcodeRegionEntry::ReadScriptPc(CompactBufferReader &reader,
uint32_t *scriptIdx, uint32_t *pcOffset)
{
*scriptIdx = reader.readUnsigned();
*pcOffset = reader.readUnsigned();
}
/* static */ void
JitcodeRegionEntry::WriteDelta(CompactBufferWriter &writer,
uint32_t nativeDelta, int32_t pcDelta)
{
if (pcDelta >= 0) {
// 1 and 2-byte formats possible.
// NNNN-BBB0
if (pcDelta <= ENC1_PC_DELTA_MAX && nativeDelta <= ENC1_NATIVE_DELTA_MAX) {
uint8_t encVal = ENC1_MASK_VAL | (pcDelta << ENC1_PC_DELTA_SHIFT) |
(nativeDelta << ENC1_NATIVE_DELTA_SHIFT);
writer.writeByte(encVal);
return;
}
// NNNN-NNNN BBBB-BB01
if (pcDelta <= ENC2_PC_DELTA_MAX && nativeDelta <= ENC2_NATIVE_DELTA_MAX) {
uint16_t encVal = ENC2_MASK_VAL | (pcDelta << ENC2_PC_DELTA_SHIFT) |
(nativeDelta << ENC2_NATIVE_DELTA_SHIFT);
writer.writeByte(encVal & 0xff);
writer.writeByte((encVal >> 8) & 0xff);
return;
}
}
// NNNN-NNNN NNNB-BBBB BBBB-B011
if (pcDelta >= ENC3_PC_DELTA_MIN && pcDelta <= ENC3_PC_DELTA_MAX &&
nativeDelta <= ENC3_NATIVE_DELTA_MAX)
{
uint32_t encVal = ENC3_MASK_VAL |
((pcDelta << ENC3_PC_DELTA_SHIFT) & ENC3_PC_DELTA_MASK) |
(nativeDelta << ENC3_NATIVE_DELTA_SHIFT);
writer.writeByte(encVal & 0xff);
writer.writeByte((encVal >> 8) & 0xff);
writer.writeByte((encVal >> 16) & 0xff);
return;
}
// NNNN-NNNN NNNN-NNNN BBBB-BBBB BBBB-B111
if (pcDelta >= ENC4_PC_DELTA_MIN && pcDelta <= ENC4_PC_DELTA_MAX &&
nativeDelta <= ENC4_NATIVE_DELTA_MAX)
{
uint32_t encVal = ENC4_MASK_VAL |
((pcDelta << ENC4_PC_DELTA_SHIFT) & ENC4_PC_DELTA_MASK) |
(nativeDelta << ENC4_NATIVE_DELTA_SHIFT);
writer.writeByte(encVal & 0xff);
writer.writeByte((encVal >> 8) & 0xff);
writer.writeByte((encVal >> 16) & 0xff);
writer.writeByte((encVal >> 24) & 0xff);
return;
}
// Should never get here.
MOZ_CRASH("pcDelta/nativeDelta values are too large to encode.");
}
/* static */ void
JitcodeRegionEntry::ReadDelta(CompactBufferReader &reader,
uint32_t *nativeDelta, int32_t *pcDelta)
{
// NB:
// It's possible to get nativeDeltas with value 0 in two cases:
//
// 1. The last region's run. This is because the region table's start
// must be 4-byte aligned, and we must insert padding bytes to align the
// payload section before emitting the table.
//
// 2. A zero-offset nativeDelta with a negative pcDelta.
//
// So if nativeDelta is zero, then pcDelta must be <= 0.
// NNNN-BBB0
const uint32_t firstByte = reader.readByte();
if ((firstByte & ENC1_MASK) == ENC1_MASK_VAL) {
uint32_t encVal = firstByte;
*nativeDelta = encVal >> ENC1_NATIVE_DELTA_SHIFT;
*pcDelta = (encVal & ENC1_PC_DELTA_MASK) >> ENC1_PC_DELTA_SHIFT;
JS_ASSERT_IF(*nativeDelta == 0, *pcDelta <= 0);
return;
}
// NNNN-NNNN BBBB-BB01
const uint32_t secondByte = reader.readByte();
if ((firstByte & ENC2_MASK) == ENC2_MASK_VAL) {
uint32_t encVal = firstByte | secondByte << 8;
*nativeDelta = encVal >> ENC2_NATIVE_DELTA_SHIFT;
*pcDelta = (encVal & ENC2_PC_DELTA_MASK) >> ENC2_PC_DELTA_SHIFT;
JS_ASSERT(*pcDelta != 0);
JS_ASSERT_IF(*nativeDelta == 0, *pcDelta <= 0);
return;
}
// NNNN-NNNN NNNB-BBBB BBBB-B011
const uint32_t thirdByte = reader.readByte();
if ((firstByte & ENC3_MASK) == ENC3_MASK_VAL) {
uint32_t encVal = firstByte | secondByte << 8 | thirdByte << 16;
*nativeDelta = encVal >> ENC3_NATIVE_DELTA_SHIFT;
uint32_t pcDeltaU = (encVal & ENC3_PC_DELTA_MASK) >> ENC3_PC_DELTA_SHIFT;
// Fix sign if necessary.
if (pcDeltaU > ENC3_PC_DELTA_MAX)
pcDeltaU |= ~ENC3_PC_DELTA_MAX;
*pcDelta = pcDeltaU;
JS_ASSERT(*pcDelta != 0);
JS_ASSERT_IF(*nativeDelta == 0, *pcDelta <= 0);
return;
}
// NNNN-NNNN NNNN-NNNN BBBB-BBBB BBBB-B111
JS_ASSERT((firstByte & ENC4_MASK) == ENC4_MASK_VAL);
const uint32_t fourthByte = reader.readByte();
uint32_t encVal = firstByte | secondByte << 8 | thirdByte << 16 | fourthByte << 24;
*nativeDelta = encVal >> ENC4_NATIVE_DELTA_SHIFT;
uint32_t pcDeltaU = (encVal & ENC4_PC_DELTA_MASK) >> ENC4_PC_DELTA_SHIFT;
// fix sign if necessary
if (pcDeltaU > ENC4_PC_DELTA_MAX)
pcDeltaU |= ~ENC4_PC_DELTA_MAX;
*pcDelta = pcDeltaU;
JS_ASSERT(*pcDelta != 0);
JS_ASSERT_IF(*nativeDelta == 0, *pcDelta <= 0);
}
/* static */ uint32_t
JitcodeRegionEntry::ExpectedRunLength(const CodeGeneratorShared::NativeToBytecode *entry,
const CodeGeneratorShared::NativeToBytecode *end)
{
JS_ASSERT(entry < end);
// We always use the first entry, so runLength starts at 1
uint32_t runLength = 1;
uint32_t curNativeOffset = entry->nativeOffset.offset();
uint32_t curBytecodeOffset = entry->tree->script()->pcToOffset(entry->pc);
for (auto nextEntry = entry + 1; nextEntry != end; nextEntry += 1) {
// If the next run moves to a different inline site, stop the run.
if (nextEntry->tree != entry->tree)
break;
uint32_t nextNativeOffset = nextEntry->nativeOffset.offset();
uint32_t nextBytecodeOffset = nextEntry->tree->script()->pcToOffset(nextEntry->pc);
JS_ASSERT(nextNativeOffset >= curNativeOffset);
uint32_t nativeDelta = nextNativeOffset - curNativeOffset;
int32_t bytecodeDelta = int32_t(nextBytecodeOffset) - int32_t(curBytecodeOffset);
// If deltas are too large (very unlikely), stop the run.
if (!IsDeltaEncodeable(nativeDelta, bytecodeDelta))
break;
runLength++;
// If the run has grown to its maximum length, stop the run.
if (runLength == MAX_RUN_LENGTH)
break;
curNativeOffset = nextNativeOffset;
curBytecodeOffset = nextBytecodeOffset;
}
return runLength;
}
struct JitcodeMapBufferWriteSpewer
{
#ifdef DEBUG
CompactBufferWriter *writer;
uint32_t startPos;
static const uint32_t DumpMaxBytes = 50;
JitcodeMapBufferWriteSpewer(CompactBufferWriter &w)
: writer(&w), startPos(writer->length())
{}
void spewAndAdvance(const char *name) {
uint32_t curPos = writer->length();
const uint8_t *start = writer->buffer() + startPos;
const uint8_t *end = writer->buffer() + curPos;
const char *MAP = "0123456789ABCDEF";
uint32_t bytes = end - start;
char buffer[DumpMaxBytes * 3];
for (uint32_t i = 0; i < bytes; i++) {
buffer[i*3] = MAP[(start[i] >> 4) & 0xf];
buffer[i*3 + 1] = MAP[(start[i] >> 0) & 0xf];
buffer[i*3 + 2] = ' ';
}
if (bytes >= DumpMaxBytes)
buffer[DumpMaxBytes*3 - 1] = '\0';
else
buffer[bytes*3 - 1] = '\0';
IonSpew(IonSpew_Profiling, "%s@%d[%d bytes] - %s", name, int(startPos), int(bytes), buffer);
// Move to the end of the current buffer.
startPos = writer->length();
}
#else // !DEBUG
JitcodeMapBufferWriteSpewer(CompactBufferWriter &w) {}
void spewAndAdvance(const char *name) {}
#endif // DEBUG
};
// Write a run, starting at the given NativeToBytecode entry, into the given buffer writer.
/* static */ bool
JitcodeRegionEntry::WriteRun(CompactBufferWriter &writer,
JSScript **scriptList, uint32_t scriptListSize,
uint32_t runLength, const CodeGeneratorShared::NativeToBytecode *entry)
{
JS_ASSERT(runLength > 0);
JS_ASSERT(runLength <= MAX_RUN_LENGTH);
// Calculate script depth.
JS_ASSERT(entry->tree->depth() <= 0xff);
uint8_t scriptDepth = entry->tree->depth();
uint32_t regionNativeOffset = entry->nativeOffset.offset();
JitcodeMapBufferWriteSpewer spewer(writer);
// Write the head info.
IonSpew(IonSpew_Profiling, " Head Info: nativeOffset=%d scriptDepth=%d",
int(regionNativeOffset), int(scriptDepth));
WriteHead(writer, regionNativeOffset, scriptDepth);
spewer.spewAndAdvance(" ");
// Write each script/pc pair.
{
InlineScriptTree *curTree = entry->tree;
jsbytecode *curPc = entry->pc;
for (uint8_t i = 0; i < scriptDepth; i++) {
// Find the index of the script within the list.
// NB: scriptList is guaranteed to contain curTree->script()
uint32_t scriptIdx = 0;
for (; scriptIdx < scriptListSize; scriptIdx++) {
if (scriptList[scriptIdx] == curTree->script())
break;
}
JS_ASSERT(scriptIdx < scriptListSize);
uint32_t pcOffset = curTree->script()->pcToOffset(curPc);
IonSpew(IonSpew_Profiling, " Script/PC %d: scriptIdx=%d pcOffset=%d",
int(i), int(scriptIdx), int(pcOffset));
WriteScriptPc(writer, scriptIdx, pcOffset);
spewer.spewAndAdvance(" ");
JS_ASSERT_IF(i < scriptDepth - 1, curTree->hasCaller());
curPc = curTree->callerPc();
curTree = curTree->caller();
}
}
// Start writing runs.
uint32_t curNativeOffset = entry->nativeOffset.offset();
uint32_t curBytecodeOffset = entry->tree->script()->pcToOffset(entry->pc);
IonSpew(IonSpew_Profiling, " Writing Delta Run from nativeOffset=%d bytecodeOffset=%d",
int(curNativeOffset), int(curBytecodeOffset));
// Skip first entry because it is implicit in the header. Start at subsequent entry.
for (uint32_t i = 1; i < runLength; i++) {
JS_ASSERT(entry[i].tree == entry->tree);
uint32_t nextNativeOffset = entry[i].nativeOffset.offset();
uint32_t nextBytecodeOffset = entry[i].tree->script()->pcToOffset(entry[i].pc);
JS_ASSERT(nextNativeOffset >= curNativeOffset);
uint32_t nativeDelta = nextNativeOffset - curNativeOffset;
int32_t bytecodeDelta = int32_t(nextBytecodeOffset) - int32_t(curBytecodeOffset);
JS_ASSERT(IsDeltaEncodeable(nativeDelta, bytecodeDelta));
IonSpew(IonSpew_Profiling, " RunEntry native: %d-%d [%d] bytecode: %d-%d [%d]",
int(curNativeOffset), int(nextNativeOffset), int(nativeDelta),
int(curBytecodeOffset), int(nextBytecodeOffset), int(bytecodeDelta));
WriteDelta(writer, nativeDelta, bytecodeDelta);
// Spew the bytecode in these ranges.
if (curBytecodeOffset < nextBytecodeOffset) {
IonSpewStart(IonSpew_Profiling, " OPS: ");
uint32_t curBc = curBytecodeOffset;
while (curBc < nextBytecodeOffset) {
jsbytecode *pc = entry[i].tree->script()->offsetToPC(curBc);
JSOp op = JSOp(*pc);
IonSpewCont(IonSpew_Profiling, "%s ", js_CodeName[op]);
curBc += GetBytecodeLength(pc);
}
IonSpewFin(IonSpew_Profiling);
}
spewer.spewAndAdvance(" ");
curNativeOffset = nextNativeOffset;
curBytecodeOffset = nextBytecodeOffset;
}
if (writer.oom())
return false;
return true;
}
void
JitcodeRegionEntry::unpack()
{
CompactBufferReader reader(data_, end_);
ReadHead(reader, &nativeOffset_, &scriptDepth_);
JS_ASSERT(scriptDepth_ > 0);
scriptPcStack_ = reader.currentPosition();
// Skip past script/pc stack
for (unsigned i = 0; i < scriptDepth_; i++) {
uint32_t scriptIdx, pcOffset;
ReadScriptPc(reader, &scriptIdx, &pcOffset);
}
deltaRun_ = reader.currentPosition();
}
uint32_t
JitcodeRegionEntry::findPcOffset(uint32_t queryNativeOffset, uint32_t startPcOffset) const
{
DeltaIterator iter = deltaIterator();
uint32_t curNativeOffset = nativeOffset();
uint32_t curPcOffset = startPcOffset;
while (iter.hasMore()) {
uint32_t nativeDelta;
int32_t pcDelta;
iter.readNext(&nativeDelta, &pcDelta);
// The start address of the next delta-run entry is counted towards
// the current delta-run entry, because return addresses should
// associate with the bytecode op prior (the call) not the op after.
if (queryNativeOffset <= curNativeOffset + nativeDelta)
break;
curNativeOffset += nativeDelta;
curPcOffset += pcDelta;
}
return curPcOffset;
}
bool
JitcodeIonTable::makeIonEntry(JSContext *cx, JitCode *code,
uint32_t numScripts, JSScript **scripts,
JitcodeGlobalEntry::IonEntry &out)
{
typedef JitcodeGlobalEntry::IonEntry::SizedScriptList SizedScriptList;
JS_ASSERT(numScripts > 0);
if (numScripts == 1) {
out.init(code->raw(), code->raw() + code->instructionsSize(), scripts[0], this);
return true;
}
if (numScripts < uint32_t(JitcodeGlobalEntry::IonEntry::Multi)) {
out.init(code->raw(), code->raw() + code->instructionsSize(), numScripts, scripts, this);
return true;
}
// Create SizedScriptList
void *mem = cx->malloc_(SizedScriptList::AllocSizeFor(numScripts));
if (!mem)
return false;
SizedScriptList *scriptList = new (mem) SizedScriptList(numScripts, scripts);
out.init(code->raw(), code->raw() + code->instructionsSize(), scriptList, this);
return true;
}
uint32_t
JitcodeIonTable::findRegionEntry(uint32_t nativeOffset) const
{
static const uint32_t LINEAR_SEARCH_THRESHOLD = 8;
uint32_t regions = numRegions();
JS_ASSERT(regions > 0);
// For small region lists, just search linearly.
if (regions <= LINEAR_SEARCH_THRESHOLD) {
JitcodeRegionEntry previousEntry = regionEntry(0);
for (uint32_t i = 1; i < regions; i++) {
JitcodeRegionEntry nextEntry = regionEntry(i);
JS_ASSERT(nextEntry.nativeOffset() >= previousEntry.nativeOffset());
// See note in binary-search code below about why we use '<=' here instead of
// '<'. Short explanation: regions are closed at their ending addresses,
// and open at their starting addresses.
if (nativeOffset <= nextEntry.nativeOffset())
return i-1;
previousEntry = nextEntry;
}
// If nothing found, assume it falls within last region.
return regions - 1;
}
// For larger ones, binary search the region table.
uint32_t idx = 0;
uint32_t count = regions;
while (count > 1) {
uint32_t step = count/2;
uint32_t mid = idx + step;
JitcodeRegionEntry midEntry = regionEntry(mid);
// A region memory range is closed at its ending address, not starting
// address. This is because the return address for calls must associate
// with the call's bytecode PC, not the PC of the bytecode operator after
// the call.
//
// So a query is < an entry if the query nativeOffset is <= the start address
// of the entry, and a query is >= an entry if the query nativeOffset is > the
// start address of an entry.
if (nativeOffset <= midEntry.nativeOffset()) {
// Target entry is below midEntry.
count = step;
} else { // if (nativeOffset > midEntry.nativeOffset())
// Target entry is at midEntry or above.
idx = mid;
count -= step;
}
}
return idx;
}
/* static */ bool
JitcodeIonTable::WriteIonTable(CompactBufferWriter &writer,
JSScript **scriptList, uint32_t scriptListSize,
const CodeGeneratorShared::NativeToBytecode *start,
const CodeGeneratorShared::NativeToBytecode *end,
uint32_t *tableOffsetOut, uint32_t *numRegionsOut)
{
JS_ASSERT(tableOffsetOut != nullptr);
JS_ASSERT(numRegionsOut != nullptr);
JS_ASSERT(writer.length() == 0);
JS_ASSERT(scriptListSize > 0);
IonSpew(IonSpew_Profiling, "Writing native to bytecode map for %s:%d (%d entries)",
scriptList[0]->filename(), scriptList[0]->lineno(),
int(end - start));
IonSpew(IonSpew_Profiling, " ScriptList of size %d", int(scriptListSize));
for (uint32_t i = 0; i < scriptListSize; i++) {
IonSpew(IonSpew_Profiling, " Script %d - %s:%d",
int(i), scriptList[i]->filename(), int(scriptList[i]->lineno()));
}
// Write out runs first. Keep a vector tracking the positive offsets from payload
// start to the run.
const CodeGeneratorShared::NativeToBytecode *curEntry = start;
js::Vector<uint32_t, 32, SystemAllocPolicy> runOffsets;
while (curEntry != end) {
// Calculate the length of the next run.
uint32_t runLength = JitcodeRegionEntry::ExpectedRunLength(curEntry, end);
JS_ASSERT(runLength > 0);
JS_ASSERT(runLength <= (end - curEntry));
IonSpew(IonSpew_Profiling, " Run at entry %d, length %d, buffer offset %d",
int(curEntry - start), int(runLength), int(writer.length()));
// Store the offset of the run.
if (!runOffsets.append(writer.length()))
return false;
// Encode the run.
if (!JitcodeRegionEntry::WriteRun(writer, scriptList, scriptListSize, runLength, curEntry))
return false;
curEntry += runLength;
}
// Done encoding regions. About to start table. Ensure we are aligned to 4 bytes
// since table is composed of uint32_t values.
uint32_t padding = sizeof(uint32_t) - (writer.length() % sizeof(uint32_t));
if (padding == sizeof(uint32_t))
padding = 0;
IonSpew(IonSpew_Profiling, " Padding %d bytes after run @%d",
int(padding), int(writer.length()));
for (uint32_t i = 0; i < padding; i++)
writer.writeByte(0);
// Now at start of table.
uint32_t tableOffset = writer.length();
// The table being written at this point will be accessed directly via uint32_t
// pointers, so all writes below use native endianness.
// Write out numRegions
IonSpew(IonSpew_Profiling, " Writing numRuns=%d", int(runOffsets.length()));
writer.writeNativeEndianUint32_t(runOffsets.length());
// Write out region offset table. The offsets in |runOffsets| are currently forward
// offsets from the beginning of the buffer. We convert them to backwards offsets
// from the start of the table before writing them into their table entries.
for (uint32_t i = 0; i < runOffsets.length(); i++) {
IonSpew(IonSpew_Profiling, " Run %d offset=%d backOffset=%d @%d",
int(i), int(runOffsets[i]), int(tableOffset - runOffsets[i]), int(writer.length()));
writer.writeNativeEndianUint32_t(tableOffset - runOffsets[i]);
}
if (writer.oom())
return false;
*tableOffsetOut = tableOffset;
*numRegionsOut = runOffsets.length();
return true;
}
} // namespace jit
} // namespace js

865
js/src/jit/JitcodeMap.h Normal file
Просмотреть файл

@ -0,0 +1,865 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
* vim: set ts=8 sts=4 et sw=4 tw=99:
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef jit_JitcodeMap_h
#define jit_JitcodeMap_h
#include "ds/SplayTree.h"
#include "jit/CompactBuffer.h"
#include "jit/CompileInfo.h"
#include "jit/shared/CodeGenerator-shared.h"
namespace js {
namespace jit {
/*
* The Ion jitcode map implements tables to allow mapping from addresses in ion jitcode
* to the list of (JSScript *, jsbytecode *) pairs that are implicitly active in the frame at
* that point in the native code.
*
* To represent this information efficiently, a multi-level table is used.
*
* At the top level, a global splay-tree of JitcodeGlobalEntry describings the mapping for
* each individual IonCode script generated by compiles. The entries are ordered by their
* nativeStartAddr.
*
* Every entry in the table is of fixed size, but there are different entry types,
* distinguished by the kind field.
*/
class JitcodeIonTable;
class JitcodeRegionEntry;
class JitcodeGlobalEntry
{
public:
enum Kind {
INVALID = 0,
Ion,
Baseline,
IonCache,
Query,
LIMIT
};
JS_STATIC_ASSERT(LIMIT <= 8);
struct BytecodeLocation {
JSScript *script;
jsbytecode *pc;
BytecodeLocation(JSScript *script, jsbytecode *pc) : script(script), pc(pc) {}
};
typedef Vector<BytecodeLocation, 0, SystemAllocPolicy> BytecodeLocationVector;
struct BaseEntry
{
void *nativeStartAddr_;
void *nativeEndAddr_;
Kind kind_;
void init() {
nativeStartAddr_ = nullptr;
nativeEndAddr_ = nullptr;
kind_ = INVALID;
}
void init(Kind kind, void *nativeStartAddr, void *nativeEndAddr) {
JS_ASSERT(nativeStartAddr);
JS_ASSERT(nativeEndAddr);
JS_ASSERT(kind > INVALID && kind < LIMIT);
nativeStartAddr_ = nativeStartAddr;
nativeEndAddr_ = nativeEndAddr;
kind_ = kind;
}
Kind kind() const {
return kind_;
}
void *nativeStartAddr() const {
return nativeStartAddr_;
}
void *nativeEndAddr() const {
return nativeEndAddr_;
}
bool startsBelowPointer(void *ptr) const {
return ((uint8_t *)nativeStartAddr()) <= ((uint8_t *) ptr);
}
bool endsAbovePointer(void *ptr) const {
return ((uint8_t *)nativeEndAddr()) > ((uint8_t *) ptr);
}
bool containsPointer(void *ptr) const {
return startsBelowPointer(ptr) && endsAbovePointer(ptr);
}
};
struct IonEntry : public BaseEntry
{
uintptr_t scriptList_;
// regionTable_ points to the start of the region table within the
// packed map for compile represented by this entry. Since the
// region table occurs at the tail of the memory region, this pointer
// points somewhere inside the region memory space, and not to the start
// of the memory space.
JitcodeIonTable *regionTable_;
static const unsigned LowBits = 3;
static const uintptr_t LowMask = (uintptr_t(1) << LowBits) - 1;
enum ScriptListTag {
Single = 0,
Multi = 7
};
struct SizedScriptList {
uint32_t size;
JSScript *scripts[0];
SizedScriptList(uint32_t sz, JSScript **scr) : size(sz) {
for (uint32_t i = 0; i < size; i++)
scripts[i] = scr[i];
}
static uint32_t AllocSizeFor(uint32_t nscripts) {
return sizeof(SizedScriptList) + (nscripts * sizeof(JSScript *));
}
};
void init(void *nativeStartAddr, void *nativeEndAddr,
JSScript *script, JitcodeIonTable *regionTable)
{
JS_ASSERT((uintptr_t(script) & LowMask) == 0);
JS_ASSERT(script);
JS_ASSERT(regionTable);
BaseEntry::init(Ion, nativeStartAddr, nativeEndAddr);
scriptList_ = uintptr_t(script);
regionTable_ = regionTable;
}
void init(void *nativeStartAddr, void *nativeEndAddr,
unsigned numScripts, JSScript **scripts, JitcodeIonTable *regionTable)
{
JS_ASSERT((uintptr_t(scripts) & LowMask) == 0);
JS_ASSERT(numScripts >= 1);
JS_ASSERT(numScripts <= 6);
JS_ASSERT(scripts);
JS_ASSERT(regionTable);
BaseEntry::init(Ion, nativeStartAddr, nativeEndAddr);
scriptList_ = uintptr_t(scripts) | numScripts;
regionTable_ = regionTable;
}
void init(void *nativeStartAddr, void *nativeEndAddr,
SizedScriptList *scripts, JitcodeIonTable *regionTable)
{
JS_ASSERT((uintptr_t(scripts) & LowMask) == 0);
JS_ASSERT(scripts->size > 6);
JS_ASSERT(scripts);
JS_ASSERT(regionTable);
BaseEntry::init(Ion, nativeStartAddr, nativeEndAddr);
scriptList_ = uintptr_t(scripts) | uintptr_t(Multi);
regionTable_ = regionTable;
}
ScriptListTag scriptListTag() const {
return static_cast<ScriptListTag>(scriptList_ & LowMask);
}
void *scriptListPointer() const {
return reinterpret_cast<void *>(scriptList_ & ~LowMask);
}
JSScript *singleScript() const {
JS_ASSERT(scriptListTag() == Single);
return reinterpret_cast<JSScript *>(scriptListPointer());
}
JSScript **rawScriptArray() const {
JS_ASSERT(scriptListTag() < Multi);
return reinterpret_cast<JSScript **>(scriptListPointer());
}
SizedScriptList *sizedScriptList() const {
JS_ASSERT(scriptListTag() == Multi);
return reinterpret_cast<SizedScriptList *>(scriptListPointer());
}
unsigned numScripts() const {
ScriptListTag tag = scriptListTag();
if (tag == Single)
return 1;
if (tag < Multi) {
JS_ASSERT(int(tag) >= 2);
return static_cast<unsigned>(tag);
}
return sizedScriptList()->size;
}
JSScript *getScript(unsigned idx) const {
JS_ASSERT(idx < numScripts());
ScriptListTag tag = scriptListTag();
if (tag == Single)
return singleScript();
if (tag < Multi) {
JS_ASSERT(int(tag) >= 2);
return rawScriptArray()[idx];
}
return sizedScriptList()->scripts[idx];
}
void destroy();
JitcodeIonTable *regionTable() const {
return regionTable_;
}
int scriptIndex(JSScript *script) const {
unsigned count = numScripts();
for (unsigned i = 0; i < count; i++) {
if (getScript(i) == script)
return i;
}
return -1;
}
bool callStackAtAddr(JSRuntime *rt, void *ptr, BytecodeLocationVector &results,
uint32_t *depth) const;
};
struct BaselineEntry : public BaseEntry
{
JSScript *script_;
void init(void *nativeStartAddr, void *nativeEndAddr, JSScript *script)
{
JS_ASSERT(script != nullptr);
BaseEntry::init(Baseline, nativeStartAddr, nativeEndAddr);
script_ = script;
}
JSScript *script() const {
return script_;
}
void destroy() {}
bool callStackAtAddr(JSRuntime *rt, void *ptr, BytecodeLocationVector &results,
uint32_t *depth) const;
};
struct IonCacheEntry : public BaseEntry
{
void *rejoinAddr_;
void init(void *nativeStartAddr, void *nativeEndAddr, void *rejoinAddr)
{
JS_ASSERT(rejoinAddr != nullptr);
BaseEntry::init(IonCache, nativeStartAddr, nativeEndAddr);
rejoinAddr_ = rejoinAddr;
}
void *rejoinAddr() const {
return rejoinAddr_;
}
void destroy() {}
bool callStackAtAddr(JSRuntime *rt, void *ptr, BytecodeLocationVector &results,
uint32_t *depth) const;
};
// QueryEntry is never stored in the table, just used for queries
// where an instance of JitcodeGlobalEntry is required to do tree
// lookups.
struct QueryEntry : public BaseEntry
{
void init(void *addr) {
BaseEntry::init(Query, addr, addr);
}
uint8_t *addr() const {
return reinterpret_cast<uint8_t *>(nativeStartAddr());
}
void destroy() {}
};
private:
union {
// Shadowing BaseEntry instance to allow access to base fields
// and type extraction.
BaseEntry base_;
// The most common entry type: describing jitcode generated by
// Ion main-line code.
IonEntry ion_;
// Baseline jitcode.
BaselineEntry baseline_;
// IonCache stubs.
IonCacheEntry ionCache_;
// When doing queries on the SplayTree for particular addresses,
// the query addresses are representd using a QueryEntry.
QueryEntry query_;
};
public:
JitcodeGlobalEntry() {
base_.init();
}
JitcodeGlobalEntry(const IonEntry &ion) {
ion_ = ion;
}
JitcodeGlobalEntry(const BaselineEntry &baseline) {
baseline_ = baseline;
}
JitcodeGlobalEntry(const IonCacheEntry &ionCache) {
ionCache_ = ionCache;
}
JitcodeGlobalEntry(const QueryEntry &query) {
query_ = query;
}
static JitcodeGlobalEntry MakeQuery(void *ptr) {
QueryEntry query;
query.init(ptr);
return JitcodeGlobalEntry(query);
}
void destroy() {
switch (kind()) {
case Ion:
ionEntry().destroy();
break;
case Baseline:
baselineEntry().destroy();
break;
case IonCache:
ionCacheEntry().destroy();
break;
case Query:
queryEntry().destroy();
break;
default:
MOZ_ASSUME_UNREACHABLE("Invalid JitcodeGlobalEntry kind.");
}
}
void *nativeStartAddr() const {
return base_.nativeStartAddr();
}
void *nativeEndAddr() const {
return base_.nativeEndAddr();
}
bool startsBelowPointer(void *ptr) const {
return base_.startsBelowPointer(ptr);
}
bool endsAbovePointer(void *ptr) const {
return base_.endsAbovePointer(ptr);
}
bool containsPointer(void *ptr) const {
return base_.containsPointer(ptr);
}
bool overlapsWith(const JitcodeGlobalEntry &entry) const {
// Catch full containment of |entry| within |this|, and partial overlaps.
if (containsPointer(entry.nativeStartAddr()) || containsPointer(entry.nativeEndAddr()))
return true;
// Catch full containment of |this| within |entry|.
if (startsBelowPointer(entry.nativeEndAddr()) && endsAbovePointer(entry.nativeStartAddr()))
return true;
return false;
}
Kind kind() const {
return base_.kind();
}
bool isIon() const {
return kind() == Ion;
}
bool isBaseline() const {
return kind() == Baseline;
}
bool isIonCache() const {
return kind() == IonCache;
}
bool isQuery() const {
return kind() == Query;
}
IonEntry &ionEntry() {
JS_ASSERT(isIon());
return ion_;
}
BaselineEntry &baselineEntry() {
JS_ASSERT(isBaseline());
return baseline_;
}
IonCacheEntry &ionCacheEntry() {
JS_ASSERT(isIonCache());
return ionCache_;
}
QueryEntry &queryEntry() {
JS_ASSERT(isQuery());
return query_;
}
const IonEntry &ionEntry() const {
JS_ASSERT(isIon());
return ion_;
}
const BaselineEntry &baselineEntry() const {
JS_ASSERT(isBaseline());
return baseline_;
}
const IonCacheEntry &ionCacheEntry() const {
JS_ASSERT(isIonCache());
return ionCache_;
}
const QueryEntry &queryEntry() const {
JS_ASSERT(isQuery());
return query_;
}
// Read the inline call stack at a given point in the native code and append into
// the given vector. Innermost (script,pc) pair will be appended first, and
// outermost appended last.
//
// Returns false on memory failure.
bool callStackAtAddr(JSRuntime *rt, void *ptr, BytecodeLocationVector &results,
uint32_t *depth) const
{
switch (kind()) {
case Ion:
return ionEntry().callStackAtAddr(rt, ptr, results, depth);
case Baseline:
return baselineEntry().callStackAtAddr(rt, ptr, results, depth);
case IonCache:
return ionCacheEntry().callStackAtAddr(rt, ptr, results, depth);
default:
MOZ_ASSUME_UNREACHABLE("Invalid JitcodeGlobalEntry kind.");
}
return false;
}
// Figure out the number of the (JSScript *, jsbytecode *) pairs that are active
// at this location.
uint32_t lookupInlineCallDepth(void *ptr);
// Compare two global entries.
static int compare(const JitcodeGlobalEntry &ent1, const JitcodeGlobalEntry &ent2);
};
/*
* Global table of JitcodeGlobalEntry values sorted by native address range.
*/
class JitcodeGlobalTable
{
public:
typedef SplayTree<JitcodeGlobalEntry, JitcodeGlobalEntry> EntryTree;
typedef Vector<JitcodeGlobalEntry, 0, SystemAllocPolicy> EntryVector;
private:
static const size_t LIFO_CHUNK_SIZE = 16 * 1024;
LifoAlloc treeAlloc_;
EntryTree tree_;
EntryVector entries_;
public:
JitcodeGlobalTable() : treeAlloc_(LIFO_CHUNK_SIZE), tree_(&treeAlloc_), entries_() {}
~JitcodeGlobalTable() {}
bool empty() const {
return tree_.empty();
}
bool lookup(void *ptr, JitcodeGlobalEntry *result);
void lookupInfallible(void *ptr, JitcodeGlobalEntry *result);
bool addEntry(const JitcodeGlobalEntry::IonEntry &entry) {
return addEntry(JitcodeGlobalEntry(entry));
}
bool addEntry(const JitcodeGlobalEntry::BaselineEntry &entry) {
return addEntry(JitcodeGlobalEntry(entry));
}
bool addEntry(const JitcodeGlobalEntry::IonCacheEntry &entry) {
return addEntry(JitcodeGlobalEntry(entry));
}
void removeEntry(void *startAddr);
private:
bool addEntry(const JitcodeGlobalEntry &entry);
};
/*
* Container class for main jitcode table.
* The Region table's memory is structured as follows:
*
* +------------------------------------------------+ |
* | Region 1 Run | |
* |------------------------------------------------| |
* | Region 2 Run | |
* | | |
* | | |
* |------------------------------------------------| |
* | Region 3 Run | |
* | | |
* |------------------------------------------------| |-- Payload
* | | |
* | ... | |
* | | |
* |------------------------------------------------| |
* | Region M Run | |
* | | |
* +================================================+ <- RegionTable pointer points here
* | uint23_t numRegions = M | |
* +------------------------------------------------+ |
* | Region 1 | |
* | uint32_t entryOffset = size(Payload) | |
* +------------------------------------------------+ |
* | | |-- Table
* | ... | |
* | | |
* +------------------------------------------------+ |
* | Region M | |
* | uint32_t entryOffset | |
* +------------------------------------------------+ |
*
* The region table is composed of two sections: a tail section that contains a table of
* fixed-size entries containing offsets into the the head section, and a head section that
* holds a sequence of variable-sized runs. The table in the tail section serves to
* locate the variable-length encoded structures in the head section.
*
* The entryOffsets in the table indicate the bytes offset to subtract from the regionTable
* pointer to arrive at the encoded region in the payload.
*
*
* Variable-length entries in payload
* ----------------------------------
* The entryOffsets in the region table's fixed-sized entries refer to a location within the
* variable-length payload section. This location contains a compactly encoded "run" of
* mappings.
*
* Each run starts by describing the offset within the native code it starts at, and the
* sequence of (JSScript *, jsbytecode *) pairs active at that site. Following that, there
* are a number of variable-length entries encoding (nativeOffsetDelta, bytecodeOffsetDelta)
* pairs for the run.
*
* VarUint32 nativeOffset;
* - The offset from nativeStartAddr in the global table entry at which
* the jitcode for this region starts.
*
* Uint8_t scriptDepth;
* - The depth of inlined scripts for this region.
*
* List<VarUint32> inlineScriptPcStack;
* - We encode (2 * scriptDepth) VarUint32s here. Each pair of uint32s are taken
* as an index into the scriptList in the global table entry, and a pcOffset
* respectively.
*
* List<NativeAndBytecodeDelta> deltaRun;
* - The rest of the entry is a deltaRun that stores a series of variable-length
* encoded NativeAndBytecodeDelta datums.
*/
class JitcodeRegionEntry
{
private:
static const unsigned MAX_RUN_LENGTH = 100;
public:
static void WriteHead(CompactBufferWriter &writer,
uint32_t nativeOffset, uint8_t scriptDepth);
static void ReadHead(CompactBufferReader &reader,
uint32_t *nativeOffset, uint8_t *scriptDepth);
static void WriteScriptPc(CompactBufferWriter &writer, uint32_t scriptIdx, uint32_t pcOffset);
static void ReadScriptPc(CompactBufferReader &reader, uint32_t *scriptIdx, uint32_t *pcOffset);
static void WriteDelta(CompactBufferWriter &writer, uint32_t nativeDelta, int32_t pcDelta);
static void ReadDelta(CompactBufferReader &reader, uint32_t *nativeDelta, int32_t *pcDelta);
// Given a pointer into an array of NativeToBytecode (and a pointer to the end of the array),
// compute the number of entries that would be consume by outputting a run starting
// at this one.
static uint32_t ExpectedRunLength(const CodeGeneratorShared::NativeToBytecode *entry,
const CodeGeneratorShared::NativeToBytecode *end);
// Write a run, starting at the given NativeToBytecode entry, into the given buffer writer.
static bool WriteRun(CompactBufferWriter &writer,
JSScript **scriptList, uint32_t scriptListSize,
uint32_t runLength, const CodeGeneratorShared::NativeToBytecode *entry);
// Delta Run entry formats are encoded little-endian:
//
// byte 0
// NNNN-BBB0
// Single byte format. nativeDelta in [0, 15], pcDelta in [0, 7]
//
static const uint32_t ENC1_MASK = 0x1;
static const uint32_t ENC1_MASK_VAL = 0x0;
static const uint32_t ENC1_NATIVE_DELTA_MAX = 0xf;
static const unsigned ENC1_NATIVE_DELTA_SHIFT = 4;
static const uint32_t ENC1_PC_DELTA_MASK = 0x0e;
static const int32_t ENC1_PC_DELTA_MAX = 0x7;
static const unsigned ENC1_PC_DELTA_SHIFT = 1;
// byte 1 byte 0
// NNNN-NNNN BBBB-BB01
// Two-byte format. nativeDelta in [0, 255], pcDelta in [0, 63]
//
static const uint32_t ENC2_MASK = 0x3;
static const uint32_t ENC2_MASK_VAL = 0x1;
static const uint32_t ENC2_NATIVE_DELTA_MAX = 0xff;
static const unsigned ENC2_NATIVE_DELTA_SHIFT = 8;
static const uint32_t ENC2_PC_DELTA_MASK = 0x00fc;
static const int32_t ENC2_PC_DELTA_MAX = 0x3f;
static const unsigned ENC2_PC_DELTA_SHIFT = 2;
// byte 2 byte 1 byte 0
// NNNN-NNNN NNNB-BBBB BBBB-B011
// Three-byte format. nativeDelta in [0, 2047], pcDelta in [-512, 511]
//
static const uint32_t ENC3_MASK = 0x7;
static const uint32_t ENC3_MASK_VAL = 0x3;
static const uint32_t ENC3_NATIVE_DELTA_MAX = 0x7ff;
static const unsigned ENC3_NATIVE_DELTA_SHIFT = 13;
static const uint32_t ENC3_PC_DELTA_MASK = 0x001ff8;
static const int32_t ENC3_PC_DELTA_MAX = 0x1ff;
static const int32_t ENC3_PC_DELTA_MIN = -ENC3_PC_DELTA_MAX - 1;
static const unsigned ENC3_PC_DELTA_SHIFT = 3;
// byte 3 byte 2 byte 1 byte 0
// NNNN-NNNN NNNN-NNNN BBBB-BBBB BBBB-B111
// Three-byte format. nativeDelta in [0, 65535], pcDelta in [-4096, 4095]
static const uint32_t ENC4_MASK = 0x7;
static const uint32_t ENC4_MASK_VAL = 0x7;
static const uint32_t ENC4_NATIVE_DELTA_MAX = 0xffff;
static const unsigned ENC4_NATIVE_DELTA_SHIFT = 16;
static const uint32_t ENC4_PC_DELTA_MASK = 0x0000fff8;
static const int32_t ENC4_PC_DELTA_MAX = 0xfff;
static const int32_t ENC4_PC_DELTA_MIN = -ENC4_PC_DELTA_MAX - 1;
static const unsigned ENC4_PC_DELTA_SHIFT = 3;
static bool IsDeltaEncodeable(uint32_t nativeDelta, int32_t pcDelta) {
return (nativeDelta <= ENC4_NATIVE_DELTA_MAX) &&
(pcDelta >= ENC4_PC_DELTA_MIN) && (pcDelta <= ENC4_PC_DELTA_MAX);
}
private:
const uint8_t *data_;
const uint8_t *end_;
// Unpacked state from jitcode entry.
uint32_t nativeOffset_;
uint8_t scriptDepth_;
const uint8_t *scriptPcStack_;
const uint8_t *deltaRun_;
void unpack();
public:
JitcodeRegionEntry(const uint8_t *data, const uint8_t *end)
: data_(data), end_(end),
nativeOffset_(0), scriptDepth_(0),
scriptPcStack_(nullptr), deltaRun_(nullptr)
{
JS_ASSERT(data_ < end_);
unpack();
JS_ASSERT(scriptPcStack_ < end_);
JS_ASSERT(deltaRun_ <= end_);
}
uint32_t nativeOffset() const {
return nativeOffset_;
}
uint32_t scriptDepth() const {
return scriptDepth_;
}
class ScriptPcIterator
{
private:
uint32_t count_;
const uint8_t *start_;
const uint8_t *end_;
uint32_t idx_;
const uint8_t *cur_;
public:
ScriptPcIterator(uint32_t count, const uint8_t *start, const uint8_t *end)
: count_(count), start_(start), end_(end), idx_(0), cur_(start_)
{}
bool hasMore() const
{
JS_ASSERT((idx_ == count_) == (cur_ == end_));
JS_ASSERT((idx_ < count_) == (cur_ < end_));
return cur_ < end_;
}
void readNext(uint32_t *scriptIdxOut, uint32_t *pcOffsetOut)
{
JS_ASSERT(scriptIdxOut);
JS_ASSERT(pcOffsetOut);
JS_ASSERT(hasMore());
CompactBufferReader reader(cur_, end_);
ReadScriptPc(reader, scriptIdxOut, pcOffsetOut);
cur_ = reader.currentPosition();
JS_ASSERT(cur_ <= end_);
idx_++;
JS_ASSERT_IF(idx_ == count_, cur_ == end_);
}
void reset() {
idx_ = 0;
cur_ = start_;
}
};
ScriptPcIterator scriptPcIterator() const {
// End of script+pc sequence is the start of the delta run.
return ScriptPcIterator(scriptDepth_, scriptPcStack_, deltaRun_);
}
class DeltaIterator {
private:
const uint8_t *start_;
const uint8_t *end_;
const uint8_t *cur_;
public:
DeltaIterator(const uint8_t *start, const uint8_t *end)
: start_(start), end_(end), cur_(start)
{}
bool hasMore() const
{
JS_ASSERT(cur_ <= end_);
return cur_ < end_;
}
void readNext(uint32_t *nativeDeltaOut, int32_t *pcDeltaOut)
{
JS_ASSERT(nativeDeltaOut != nullptr);
JS_ASSERT(pcDeltaOut != nullptr);
JS_ASSERT(hasMore());
CompactBufferReader reader(cur_, end_);
ReadDelta(reader, nativeDeltaOut, pcDeltaOut);
cur_ = reader.currentPosition();
JS_ASSERT(cur_ <= end_);
}
void reset() {
cur_ = start_;
}
};
DeltaIterator deltaIterator() const {
return DeltaIterator(deltaRun_, end_);
}
uint32_t findPcOffset(uint32_t queryNativeOffset, uint32_t startPcOffset) const;
};
class JitcodeIonTable
{
private:
/* Variable length payload section "below" here. */
uint32_t numRegions_;
uint32_t regionOffsets_[0];
const uint8_t *payloadEnd() const {
return reinterpret_cast<const uint8_t *>(this);
}
public:
JitcodeIonTable(uint32_t numRegions)
: numRegions_(numRegions)
{
for (uint32_t i = 0; i < numRegions; i++)
regionOffsets_[i] = 0;
}
bool makeIonEntry(JSContext *cx, JitCode *code, uint32_t numScripts, JSScript **scripts,
JitcodeGlobalEntry::IonEntry &out);
uint32_t numRegions() const {
return numRegions_;
}
uint32_t regionOffset(uint32_t regionIndex) const {
JS_ASSERT(regionIndex < numRegions());
return regionOffsets_[regionIndex];
}
JitcodeRegionEntry regionEntry(uint32_t regionIndex) const {
const uint8_t *regionStart = payloadEnd() - regionOffset(regionIndex);
const uint8_t *regionEnd = payloadEnd();
if (regionIndex < numRegions_ - 1)
regionEnd -= regionOffset(regionIndex + 1);
return JitcodeRegionEntry(regionStart, regionEnd);
}
bool regionContainsOffset(uint32_t regionIndex, uint32_t nativeOffset) {
JS_ASSERT(regionIndex < numRegions());
JitcodeRegionEntry ent = regionEntry(regionIndex);
if (nativeOffset < ent.nativeOffset())
return false;
if (regionIndex == numRegions_ - 1)
return true;
return nativeOffset < regionEntry(regionIndex + 1).nativeOffset();
}
uint32_t findRegionEntry(uint32_t offset) const;
const uint8_t *payloadStart() const {
// The beginning of the payload the beginning of the first region are the same.
return payloadEnd() - regionOffset(0);
}
static bool WriteIonTable(CompactBufferWriter &writer,
JSScript **scriptList, uint32_t scriptListSize,
const CodeGeneratorShared::NativeToBytecode *start,
const CodeGeneratorShared::NativeToBytecode *end,
uint32_t *tableOffsetOut, uint32_t *numRegionsOut);
};
} // namespace jit
} // namespace js
#endif /* jit_JitcodeMap_h */

Просмотреть файл

@ -128,6 +128,25 @@ class LMoveGroup : public LInstructionHelper<0, 0, 0>
}
};
// Constructs a SIMD value with 4 components (e.g. int32x4, float32x4).
class LSimdValueX4 : public LInstructionHelper<1, 4, 0>
{
public:
LIR_HEADER(SimdValueX4)
LSimdValueX4(const LAllocation &x, const LAllocation &y,
const LAllocation &z, const LAllocation &w)
{
setOperand(0, x);
setOperand(1, y);
setOperand(2, z);
setOperand(3, w);
}
MSimdValueX4 *mir() const {
return mir_->toSimdValueX4();
}
};
// Extracts an element from a given SIMD int32x4 lane.
class LSimdExtractElementI : public LInstructionHelper<1, 1, 0>
{
@ -166,6 +185,42 @@ class LSimdExtractElementF : public LInstructionHelper<1, 1, 0>
}
};
// Binary SIMD arithmetic operation between two SIMD operands
class LSimdBinaryArith : public LInstructionHelper<1, 2, 0>
{
public:
LSimdBinaryArith() {}
const LAllocation *lhs() {
return getOperand(0);
}
const LAllocation *rhs() {
return getOperand(1);
}
MSimdBinaryArith::Operation operation() const {
return mir_->toSimdBinaryArith()->operation();
}
const char *extraName() const {
return MSimdBinaryArith::OperationName(operation());
}
};
// Binary SIMD arithmetic operation between two Int32x4 operands
class LSimdBinaryArithIx4 : public LSimdBinaryArith
{
public:
LIR_HEADER(SimdBinaryArithIx4);
LSimdBinaryArithIx4() : LSimdBinaryArith() {}
};
// Binary SIMD arithmetic operation between two Float32x4 operands
class LSimdBinaryArithFx4 : public LSimdBinaryArith
{
public:
LIR_HEADER(SimdBinaryArithFx4);
LSimdBinaryArithFx4() : LSimdBinaryArith() {}
};
// Constant 32-bit integer.
class LInteger : public LInstructionHelper<1, 0, 0>
{
@ -253,6 +308,26 @@ class LFloat32 : public LInstructionHelper<1, 0, 0>
}
};
// Constant SIMD int32x4
class LInt32x4 : public LInstructionHelper<1, 0, 0>
{
public:
LIR_HEADER(Int32x4);
explicit LInt32x4() {}
const SimdConstant &getValue() const { return mir_->toSimdConstant()->value(); }
};
// Constant SIMD float32x4
class LFloat32x4 : public LInstructionHelper<1, 0, 0>
{
public:
LIR_HEADER(Float32x4);
explicit LFloat32x4() {}
const SimdConstant &getValue() const { return mir_->toSimdConstant()->value(); }
};
// A constant Value.
class LValue : public LInstructionHelper<BOX_PIECES, 0, 0>
{
@ -725,6 +800,10 @@ class LCheckOverRecursed : public LInstructionHelper<0, 0, 0>
LCheckOverRecursed()
{ }
MCheckOverRecursed *mir() const {
return mir_->toCheckOverRecursed();
}
};
class LCheckOverRecursedPar : public LInstructionHelper<0, 1, 1>
@ -744,6 +823,10 @@ class LCheckOverRecursedPar : public LInstructionHelper<0, 1, 1>
const LDefinition *getTempReg() {
return getTemp(0);
}
MCheckOverRecursedPar *mir() const {
return mir_->toCheckOverRecursedPar();
}
};
class LAsmJSInterruptCheck : public LInstructionHelper<0, 0, 1>
@ -804,6 +887,9 @@ class LInterruptCheckImplicit : public LInstructionHelper<0, 0, 0>
void setOolEntry(Label *oolEntry) {
oolEntry_ = oolEntry;
}
MInterruptCheck *mir() const {
return mir_->toInterruptCheck();
}
};
class LInterruptCheckPar : public LInstructionHelper<0, 1, 1>
@ -823,6 +909,9 @@ class LInterruptCheckPar : public LInstructionHelper<0, 1, 1>
const LDefinition *getTempReg() {
return getTemp(0);
}
MInterruptCheckPar *mir() const {
return mir_->toInterruptCheckPar();
}
};
class LDefVar : public LCallInstructionHelper<0, 1, 0>
@ -2767,6 +2856,10 @@ class LAddI : public LBinaryMath<0>
void setRecoversInput() {
recoversInput_ = true;
}
MAdd *mir() const {
return mir_->toAdd();
}
};
// Subtracts two integers, returning an integer value.
@ -2791,6 +2884,9 @@ class LSubI : public LBinaryMath<0>
void setRecoversInput() {
recoversInput_ = true;
}
MSub *mir() const {
return mir_->toSub();
}
};
// Performs an add, sub, mul, or div on two double values.
@ -3133,6 +3229,9 @@ class LValueToInt32 : public LInstructionHelper<1, BOX_PIECES, 2>
JS_ASSERT(mode_ == TRUNCATE);
return mir_->toTruncateToInt32();
}
MInstruction *mir() const {
return mir_->toInstruction();
}
};
// Convert a double to an int32.
@ -3187,6 +3286,10 @@ class LTruncateDToInt32 : public LInstructionHelper<1, 1, 1>
const LDefinition *tempFloat() {
return getTemp(0);
}
MTruncateToInt32 *mir() const {
return mir_->toTruncateToInt32();
}
};
// Convert a float32 to a truncated int32.
@ -3205,6 +3308,10 @@ class LTruncateFToInt32 : public LInstructionHelper<1, 1, 1>
const LDefinition *tempFloat() {
return getTemp(0);
}
MTruncateToInt32 *mir() const {
return mir_->toTruncateToInt32();
}
};
// Convert a boolean value to a string.
@ -3287,6 +3394,17 @@ class LStart : public LInstructionHelper<0, 0, 0>
LIR_HEADER(Start)
};
// No-op instruction that prints nativeOffset, script, pcOffset during codegen.
class LPcOffset : public LInstructionHelper<0, 0, 0>
{
public:
LIR_HEADER(PcOffset)
const MPcOffset *mir() const {
return mir_->toPcOffset();
}
};
// Passed the BaselineFrame address in the OsrFrameReg by SideCannon().
// Forwards this object to the LOsrValues for Value materialization.
class LOsrEntry : public LInstructionHelper<1, 0, 0>
@ -4267,6 +4385,27 @@ class LArrayConcat : public LCallInstructionHelper<1, 2, 2>
}
};
class LArrayJoin : public LCallInstructionHelper<1, 2, 0>
{
public:
LIR_HEADER(ArrayJoin)
LArrayJoin(const LAllocation &array, const LAllocation &sep) {
setOperand(0, array);
setOperand(1, sep);
}
const MArrayJoin *mir() const {
return mir_->toArrayJoin();
}
const LAllocation *array() {
return getOperand(0);
}
const LAllocation *separator() {
return getOperand(1);
}
};
// Load a typed value from a typed array's elements vector.
class LLoadTypedArrayElement : public LInstructionHelper<1, 2, 1>
{

Просмотреть файл

@ -16,8 +16,13 @@
_(Pointer) \
_(Double) \
_(Float32) \
_(SimdValueX4) \
_(Int32x4) \
_(Float32x4) \
_(SimdExtractElementI) \
_(SimdExtractElementF) \
_(SimdBinaryArithIx4) \
_(SimdBinaryArithFx4) \
_(Value) \
_(CloneLiteral) \
_(Parameter) \
@ -151,6 +156,7 @@
_(DoubleToString) \
_(ValueToString) \
_(Start) \
_(PcOffset) \
_(OsrEntry) \
_(OsrValue) \
_(OsrScopeChain) \
@ -200,6 +206,7 @@
_(ArrayPushV) \
_(ArrayPushT) \
_(ArrayConcat) \
_(ArrayJoin) \
_(StoreElementHoleV) \
_(StoreElementHoleT) \
_(LoadTypedArrayElement) \

Просмотреть файл

@ -1674,6 +1674,13 @@ LIRGenerator::visitStart(MStart *start)
return add(lir);
}
bool
LIRGenerator::visitPcOffset(MPcOffset *pcOffset)
{
LPcOffset *lir = new(alloc()) LPcOffset;
return add(lir, pcOffset);
}
bool
LIRGenerator::visitNop(MNop *nop)
{
@ -2758,6 +2765,18 @@ LIRGenerator::visitArrayConcat(MArrayConcat *ins)
return defineReturn(lir, ins) && assignSafepoint(lir, ins);
}
bool
LIRGenerator::visitArrayJoin(MArrayJoin *ins)
{
JS_ASSERT(ins->type() == MIRType_String);
JS_ASSERT(ins->array()->type() == MIRType_Object);
JS_ASSERT(ins->sep()->type() == MIRType_String);
LArrayJoin *lir = new(alloc()) LArrayJoin(useRegisterAtStart(ins->array()),
useRegisterAtStart(ins->sep()));
return defineReturn(lir, ins) && assignSafepoint(lir, ins);
}
bool
LIRGenerator::visitStringSplit(MStringSplit *ins)
{
@ -3638,6 +3657,31 @@ LIRGenerator::visitRecompileCheck(MRecompileCheck *ins)
return assignSafepoint(lir, ins);
}
bool
LIRGenerator::visitSimdValueX4(MSimdValueX4 *ins)
{
LAllocation x = useRegisterAtStart(ins->getOperand(0));
LAllocation y = useRegisterAtStart(ins->getOperand(1));
LAllocation z = useRegisterAtStart(ins->getOperand(2));
LAllocation w = useRegisterAtStart(ins->getOperand(3));
return define(new(alloc()) LSimdValueX4(x, y, z, w), ins);
}
bool
LIRGenerator::visitSimdConstant(MSimdConstant *ins)
{
JS_ASSERT(IsSimdType(ins->type()));
if (ins->type() == MIRType_Int32x4)
return define(new(alloc()) LInt32x4(), ins);
if (ins->type() == MIRType_Float32x4)
return define(new(alloc()) LFloat32x4(), ins);
MOZ_ASSUME_UNREACHABLE("Unknown SIMD kind when generating constant");
return false;
}
bool
LIRGenerator::visitSimdExtractElement(MSimdExtractElement *ins)
{
@ -3660,6 +3704,25 @@ LIRGenerator::visitSimdExtractElement(MSimdExtractElement *ins)
return false;
}
bool
LIRGenerator::visitSimdBinaryArith(MSimdBinaryArith *ins)
{
JS_ASSERT(IsSimdType(ins->type()));
if (ins->type() == MIRType_Int32x4) {
LSimdBinaryArithIx4 *add = new(alloc()) LSimdBinaryArithIx4();
return lowerForFPU(add, ins, ins->lhs(), ins->rhs());
}
if (ins->type() == MIRType_Float32x4) {
LSimdBinaryArithFx4 *add = new(alloc()) LSimdBinaryArithFx4();
return lowerForFPU(add, ins, ins->lhs(), ins->rhs());
}
MOZ_ASSUME_UNREACHABLE("Unknown SIMD kind when adding values");
return false;
}
static void
SpewResumePoint(MBasicBlock *block, MInstruction *ins, MResumePoint *resumePoint)
{

Просмотреть файл

@ -141,6 +141,7 @@ class LIRGenerator : public LIRGeneratorSpecific
bool visitFromCharCode(MFromCharCode *ins);
bool visitStringSplit(MStringSplit *ins);
bool visitStart(MStart *start);
bool visitPcOffset(MPcOffset *pcOffset);
bool visitOsrEntry(MOsrEntry *entry);
bool visitNop(MNop *nop);
bool visitLimitedTruncate(MLimitedTruncate *nop);
@ -199,6 +200,7 @@ class LIRGenerator : public LIRGeneratorSpecific
bool visitArrayPopShift(MArrayPopShift *ins);
bool visitArrayPush(MArrayPush *ins);
bool visitArrayConcat(MArrayConcat *ins);
bool visitArrayJoin(MArrayJoin *ins);
bool visitLoadTypedArrayElement(MLoadTypedArrayElement *ins);
bool visitLoadTypedArrayElementHole(MLoadTypedArrayElementHole *ins);
bool visitLoadTypedArrayElementStatic(MLoadTypedArrayElementStatic *ins);
@ -264,6 +266,9 @@ class LIRGenerator : public LIRGeneratorSpecific
bool visitGetDOMMember(MGetDOMMember *ins);
bool visitRecompileCheck(MRecompileCheck *ins);
bool visitSimdExtractElement(MSimdExtractElement *ins);
bool visitSimdBinaryArith(MSimdBinaryArith *ins);
bool visitSimdValueX4(MSimdValueX4 *ins);
bool visitSimdConstant(MSimdConstant *ins);
};
} // namespace jit

Просмотреть файл

@ -42,6 +42,8 @@ IonBuilder::inlineNativeCall(CallInfo &callInfo, JSFunction *target)
return inlineArrayPush(callInfo);
if (native == js::array_concat)
return inlineArrayConcat(callInfo);
if (native == js::array_join)
return inlineArrayJoin(callInfo);
if (native == js::array_splice)
return inlineArraySplice(callInfo);
@ -477,6 +479,29 @@ IonBuilder::inlineArraySplice(CallInfo &callInfo)
return InliningStatus_Inlined;
}
IonBuilder::InliningStatus
IonBuilder::inlineArrayJoin(CallInfo &callInfo)
{
if (callInfo.argc() != 1 || callInfo.constructing())
return InliningStatus_Error;
if (getInlineReturnType() != MIRType_String)
return InliningStatus_Error;
if (callInfo.thisArg()->type() != MIRType_Object)
return InliningStatus_Error;
if (callInfo.getArg(0)->type() != MIRType_String)
return InliningStatus_Error;
callInfo.setImplicitlyUsedUnchecked();
MArrayJoin *ins = MArrayJoin::New(alloc(), callInfo.thisArg(), callInfo.getArg(0));
current->add(ins);
current->push(ins);
return InliningStatus_Inlined;
}
IonBuilder::InliningStatus
IonBuilder::inlineArrayPush(CallInfo &callInfo)
{

Просмотреть файл

@ -30,6 +30,7 @@ using namespace js::jit;
using mozilla::NumbersAreIdentical;
using mozilla::IsFloat32Representable;
using mozilla::Maybe;
using mozilla::DebugOnly;
#ifdef DEBUG
size_t MUse::index() const
@ -588,6 +589,39 @@ MConstant::canProduceFloat32() const
return true;
}
MDefinition*
MSimdValueX4::foldsTo(TempAllocator &alloc)
{
DebugOnly<MIRType> scalarType = SimdTypeToScalarType(type());
for (size_t i = 0; i < 4; ++i) {
MDefinition *op = getOperand(i);
if (!op->isConstant())
return this;
JS_ASSERT(op->type() == scalarType);
}
SimdConstant cst;
switch (type()) {
case MIRType_Int32x4: {
int32_t a[4];
for (size_t i = 0; i < 4; ++i)
a[i] = getOperand(i)->toConstant()->value().toInt32();
cst = SimdConstant::CreateX4(a);
break;
}
case MIRType_Float32x4: {
float a[4];
for (size_t i = 0; i < 4; ++i)
a[i] = getOperand(i)->toConstant()->value().toNumber();
cst = SimdConstant::CreateX4(a);
break;
}
default: MOZ_ASSUME_UNREACHABLE("unexpected type in MSimdValueX4::foldsTo");
}
return MSimdConstant::New(alloc, cst, type());
}
MCloneLiteral *
MCloneLiteral::New(TempAllocator &alloc, MDefinition *obj)
{
@ -3421,6 +3455,32 @@ MBoundsCheck::foldsTo(TempAllocator &alloc)
return this;
}
MDefinition *
MArrayJoin::foldsTo(TempAllocator &alloc) {
MDefinition *arr = array();
if (!arr->isStringSplit())
return this;
this->setRecoveredOnBailout();
if (arr->hasLiveDefUses()) {
this->setNotRecoveredOnBailout();
return this;
}
// We're replacing foo.split(bar).join(baz) by
// foo.replace(bar, baz). MStringSplit could be recovered by
// a bailout. As we are removing its last use, and its result
// could be captured by a resume point, this MStringSplit will
// be executed on the bailout path.
MDefinition *string = arr->toStringSplit()->string();
MDefinition *pattern = arr->toStringSplit()->separator();
MDefinition *replacement = sep();
setNotRecoveredOnBailout();
return MStringReplace::New(alloc, string, pattern, replacement);
}
bool
jit::ElementAccessIsDenseNative(MDefinition *obj, MDefinition *id)
{

Просмотреть файл

@ -13,6 +13,7 @@
#define jit_MIR_h
#include "mozilla/Array.h"
#include "mozilla/DebugOnly.h"
#include "jit/CompilerRoot.h"
#include "jit/FixedList.h"
@ -1094,6 +1095,24 @@ class MStart : public MNullaryInstruction
}
};
class MPcOffset : public MNullaryInstruction
{
private:
MPcOffset() {
setGuard();
}
public:
INSTRUCTION_HEADER(PcOffset)
static MPcOffset *New(TempAllocator &alloc) {
return new(alloc) MPcOffset();
}
AliasSet getAliasSet() const {
return AliasSet::None();
}
};
// Instruction marking on entrypoint for on-stack replacement.
// OSR may occur at loop headers (at JSOP_TRACE).
// There is at most one MOsrEntry per MIRGraph.
@ -1233,6 +1252,77 @@ class MConstant : public MNullaryInstruction
ALLOW_CLONE(MConstant)
};
// Generic constructor of SIMD valuesX4.
class MSimdValueX4 : public MQuaternaryInstruction
{
protected:
MSimdValueX4(MIRType type, MDefinition *x, MDefinition *y, MDefinition *z, MDefinition *w)
: MQuaternaryInstruction(x, y, z, w)
{
JS_ASSERT(IsSimdType(type));
mozilla::DebugOnly<MIRType> scalarType = SimdTypeToScalarType(type);
JS_ASSERT(scalarType == x->type());
JS_ASSERT(scalarType == y->type());
JS_ASSERT(scalarType == z->type());
JS_ASSERT(scalarType == w->type());
setMovable();
setResultType(type);
}
public:
INSTRUCTION_HEADER(SimdValueX4)
static MSimdValueX4 *New(TempAllocator &alloc, MIRType type, MDefinition *x,
MDefinition *y, MDefinition *z, MDefinition *w)
{
return new(alloc) MSimdValueX4(type, x, y, z, w);
}
AliasSet getAliasSet() const {
return AliasSet::None();
}
bool congruentTo(const MDefinition *ins) const {
return congruentIfOperandsEqual(ins);
}
MDefinition *foldsTo(TempAllocator &alloc);
};
// A constant SIMD value.
class MSimdConstant : public MNullaryInstruction
{
SimdConstant value_;
protected:
MSimdConstant(const SimdConstant &v, MIRType type) : value_(v) {
JS_ASSERT(IsSimdType(type));
setResultType(type);
setMovable();
}
public:
INSTRUCTION_HEADER(SimdConstant);
static MSimdConstant *New(TempAllocator &alloc, const SimdConstant &v, MIRType type) {
return new(alloc) MSimdConstant(v, type);
}
bool congruentTo(const MDefinition *ins) const {
if (!ins->isSimdConstant())
return false;
return value() == ins->toSimdConstant()->value();
}
const SimdConstant &value() const {
return value_;
}
AliasSet getAliasSet() const {
return AliasSet::None();
}
};
// Extracts a lane element from a given vector type, given by its lane symbol.
class MSimdExtractElement : public MUnaryInstruction
{
@ -1274,6 +1364,63 @@ class MSimdExtractElement : public MUnaryInstruction
}
};
class MSimdBinaryArith : public MBinaryInstruction
{
public:
enum Operation {
Add,
Sub,
Mul,
Div
};
static const char* OperationName(Operation op) {
switch (op) {
case Add: return "Add";
case Sub: return "Sub";
case Mul: return "Mul";
case Div: return "Div";
}
MOZ_ASSUME_UNREACHABLE("unexpected operation");
}
private:
Operation operation_;
MSimdBinaryArith(MDefinition *left, MDefinition *right, Operation op, MIRType type)
: MBinaryInstruction(left, right), operation_(op)
{
JS_ASSERT_IF(type == MIRType_Int32x4, op == Add || op == Sub);
JS_ASSERT(IsSimdType(type));
JS_ASSERT(left->type() == right->type());
JS_ASSERT(left->type() == type);
setResultType(type);
setMovable();
if (op == Add || op == Mul)
setCommutative();
}
public:
INSTRUCTION_HEADER(SimdBinaryArith);
static MSimdBinaryArith *NewAsmJS(TempAllocator &alloc, MDefinition *left, MDefinition *right,
Operation op, MIRType t)
{
return new(alloc) MSimdBinaryArith(left, right, op, t);
}
AliasSet getAliasSet() const {
return AliasSet::None();
}
Operation operation() const { return operation_; }
bool congruentTo(const MDefinition *ins) const {
if (!binaryCongruentTo(ins))
return false;
return operation_ == ins->toSimdBinaryArith()->operation();
}
};
// Deep clone a constant JSObject.
class MCloneLiteral
: public MUnaryInstruction,
@ -7166,6 +7313,39 @@ class MArrayConcat
}
};
class MArrayJoin
: public MBinaryInstruction,
public MixPolicy<ObjectPolicy<0>, StringPolicy<1> >
{
MArrayJoin(MDefinition *array, MDefinition *sep)
: MBinaryInstruction(array, sep)
{
setResultType(MIRType_String);
}
public:
INSTRUCTION_HEADER(ArrayJoin)
static MArrayJoin *New(TempAllocator &alloc, MDefinition *array, MDefinition *sep)
{
return new (alloc) MArrayJoin(array, sep);
}
TypePolicy *typePolicy() {
return this;
}
MDefinition *array() const {
return getOperand(0);
}
MDefinition *sep() const {
return getOperand(1);
}
bool possiblyCalls() const {
return true;
}
virtual AliasSet getAliasSet() const {
return AliasSet::Load(AliasSet::Element | AliasSet::ObjectFields);
}
MDefinition *foldsTo(TempAllocator &alloc);
};
class MLoadTypedArrayElement
: public MBinaryInstruction
{

Просмотреть файл

@ -76,7 +76,21 @@ class MIRGenerator
}
bool instrumentedProfiling() {
return GetIonContext()->runtime->spsProfiler().enabled();
if (!instrumentedProfilingIsCached_) {
instrumentedProfiling_ = GetIonContext()->runtime->spsProfiler().enabled();
instrumentedProfilingIsCached_ = true;
}
return instrumentedProfiling_;
}
bool isNativeToBytecodeMapEnabled() {
if (compilingAsmJS())
return false;
#ifdef DEBUG
return true;
#else
return instrumentedProfiling();
#endif
}
// Whether the main thread is trying to cancel this build.
@ -167,6 +181,9 @@ class MIRGenerator
// slots is not compatible with that.
bool modifiesFrameArguments_;
bool instrumentedProfiling_;
bool instrumentedProfilingIsCached_;
#if defined(JS_ION_PERF)
AsmJSPerfSpewer asmJSPerfSpewer_;

Просмотреть файл

@ -34,6 +34,8 @@ MIRGenerator::MIRGenerator(CompileCompartment *compartment, const JitCompileOpti
usesSimdCached_(false),
minAsmJSHeapLength_(AsmJSAllocationGranularity),
modifiesFrameArguments_(false),
instrumentedProfiling_(false),
instrumentedProfilingIsCached_(false),
options(options)
{ }

Просмотреть файл

@ -12,7 +12,10 @@ namespace jit {
#define MIR_OPCODE_LIST(_) \
_(Constant) \
_(SimdValueX4) \
_(SimdConstant) \
_(SimdExtractElement) \
_(SimdBinaryArith) \
_(CloneLiteral) \
_(Parameter) \
_(Callee) \
@ -103,6 +106,7 @@ namespace jit {
_(InitProp) \
_(InitPropGetterSetter) \
_(Start) \
_(PcOffset) \
_(OsrEntry) \
_(Nop) \
_(LimitedTruncate) \
@ -157,6 +161,7 @@ namespace jit {
_(ArrayPopShift) \
_(ArrayPush) \
_(ArrayConcat) \
_(ArrayJoin) \
_(LoadTypedArrayElement) \
_(LoadTypedArrayElementHole) \
_(LoadTypedArrayElementStatic) \

Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше