зеркало из https://github.com/mozilla/gecko-dev.git
Merge autoland to mozilla-central r=merge a=merge
This commit is contained in:
Коммит
bf4a9b39b7
|
@ -594,6 +594,33 @@ if (typeof Mozilla == "undefined") {
|
|||
});
|
||||
};
|
||||
|
||||
/**
|
||||
* Request the browser open the "Connect Another Device" Firefox Accounts page.
|
||||
*
|
||||
* @param {Object} extraURLCampaignParams - An object containing additional
|
||||
* parameters for the URL opened by the browser for reasons of promotional
|
||||
* campaign tracking. Each attribute of the object must have a name that
|
||||
* is a string, begins with "utm_" and contains only only alphanumeric
|
||||
* characters, dashes or underscores. The values may be any string and will
|
||||
* automatically be encoded.
|
||||
* @since 59
|
||||
* @example
|
||||
* // Will open https://accounts.firefox.com/connect_another_device?entrypoint=uitour
|
||||
* Mozilla.UITour.showConnectAnotherDevice();
|
||||
* @example
|
||||
* // Will open:
|
||||
* // https://accounts.firefox.com/connect_another_device?entrypoint=uitour&utm_foo=bar&utm_bar=baz
|
||||
* Mozilla.UITour.showConnectAnotherDevice({
|
||||
* 'utm_foo': 'bar',
|
||||
* 'utm_bar': 'baz'
|
||||
* });
|
||||
*/
|
||||
Mozilla.UITour.showConnectAnotherDevice = function(extraURLCampaignParams) {
|
||||
_sendEvent("showConnectAnotherDevice", {
|
||||
extraURLCampaignParams: JSON.stringify(extraURLCampaignParams)
|
||||
});
|
||||
};
|
||||
|
||||
/**
|
||||
* Show a profile refresh/reset dialog, allowing users to choose to reomve
|
||||
* add-ons and customizations as well as restore browser defaults, if possible.
|
||||
|
|
|
@ -569,6 +569,20 @@ this.UITour = {
|
|||
break;
|
||||
}
|
||||
|
||||
case "showConnectAnotherDevice": {
|
||||
const url = new URL(Services.prefs.getCharPref("identity.fxaccounts.remote.connectdevice.uri"));
|
||||
url.searchParams.append("entrypoint", "uitour");
|
||||
// Call our helper to validate extraURLCampaignParams and populate URLSearchParams
|
||||
if (!this._populateCampaignParams(url, data.extraURLCampaignParams)) {
|
||||
log.warn("showConnectAnotherDevice: invalid campaign args specified");
|
||||
return false;
|
||||
}
|
||||
|
||||
// We want to replace the current tab.
|
||||
browser.loadURI(url.href);
|
||||
break;
|
||||
}
|
||||
|
||||
case "resetFirefox": {
|
||||
// Open a reset profile dialog window.
|
||||
if (ResetProfile.resetSupported()) {
|
||||
|
|
|
@ -91,6 +91,9 @@ FormAutofillPreferences.prototype = {
|
|||
addressAutofillLearnMore.setAttribute("value", this.bundle.GetStringFromName("learnMoreLabel"));
|
||||
addressAutofillCheckbox.setAttribute("label", this.bundle.GetStringFromName("autofillAddressesCheckbox"));
|
||||
savedAddressesBtn.setAttribute("label", this.bundle.GetStringFromName("savedAddressesBtnLabel"));
|
||||
// Align the start to keep the savedAddressesBtn as original size
|
||||
// when addressAutofillCheckboxGroup's height is changed by a longer l10n string
|
||||
savedAddressesBtnWrapper.setAttribute("align", "start");
|
||||
|
||||
addressAutofillLearnMore.setAttribute("href", learnMoreURL);
|
||||
|
||||
|
@ -135,6 +138,9 @@ FormAutofillPreferences.prototype = {
|
|||
creditCardAutofillLearnMore.setAttribute("value", this.bundle.GetStringFromName("learnMoreLabel"));
|
||||
creditCardAutofillCheckbox.setAttribute("label", this.bundle.GetStringFromName("autofillCreditCardsCheckbox"));
|
||||
savedCreditCardsBtn.setAttribute("label", this.bundle.GetStringFromName("savedCreditCardsBtnLabel"));
|
||||
// Align the start to keep the savedCreditCardsBtn as original size
|
||||
// when creditCardAutofillCheckboxGroup's height is changed by a longer l10n string
|
||||
savedCreditCardsBtnWrapper.setAttribute("align", "start");
|
||||
|
||||
creditCardAutofillLearnMore.setAttribute("href", learnMoreURL);
|
||||
|
||||
|
|
|
@ -57,6 +57,9 @@ let onClick = evt => {
|
|||
Mozilla.UITour.showFirefoxAccounts(null, emailInput.value);
|
||||
}
|
||||
break;
|
||||
case "onboarding-tour-sync-connect-device-button":
|
||||
Mozilla.UITour.showConnectAnotherDevice();
|
||||
break;
|
||||
}
|
||||
let classList = evt.target.classList;
|
||||
// On keyboard navigation the target would be .onboarding-tour-item.
|
||||
|
|
|
@ -356,11 +356,6 @@
|
|||
border: none;
|
||||
}
|
||||
|
||||
.onboarding-tour-page.onboarding-no-button > .onboarding-tour-content {
|
||||
grid-row: tour-page-start / tour-page-end;
|
||||
grid-column: tour-content-start / tour-page-end;
|
||||
}
|
||||
|
||||
.onboarding-tour-button-container {
|
||||
/* Get higher z-index in order to ensure buttons within container are selectable */
|
||||
z-index: 2;
|
||||
|
@ -368,12 +363,6 @@
|
|||
grid-column: tour-content-start / tour-page-end;
|
||||
}
|
||||
|
||||
.onboarding-tour-page.onboarding-no-button > .onboarding-tour-button-container {
|
||||
display: none;
|
||||
grid-row: tour-page-end;
|
||||
grid-column: tour-page-end;
|
||||
}
|
||||
|
||||
.onboarding-tour-action-button {
|
||||
background: #0060df;
|
||||
/* With 1px transparent border, could see a border in the high-constrast mode */
|
||||
|
|
|
@ -43,7 +43,6 @@ const ICON_STATE_DEFAULT = "default";
|
|||
* // Return a div appended with elements for this tours.
|
||||
* // Each tour should contain the following 3 sections in the div:
|
||||
* // .onboarding-tour-description, .onboarding-tour-content, .onboarding-tour-button-container.
|
||||
* // Add onboarding-no-button css class in the div if this tour does not need a button container.
|
||||
* // If there was a .onboarding-tour-action-button present and was clicked, tour would be marked as completed.
|
||||
* getPage() {},
|
||||
* },
|
||||
|
@ -184,7 +183,6 @@ var onboardingTourset = {
|
|||
const STATE_LOGOUT = "logged-out";
|
||||
const STATE_LOGIN = "logged-in";
|
||||
let div = win.document.createElement("div");
|
||||
div.classList.add("onboarding-no-button");
|
||||
div.dataset.loginState = STATE_LOGOUT;
|
||||
// The email validation pattern used in the form comes from IETF rfc5321,
|
||||
// which is identical to server-side checker of Firefox Account. See
|
||||
|
@ -207,6 +205,9 @@ var onboardingTourset = {
|
|||
</form>
|
||||
<img src="resource://onboarding/img/figure_sync.svg" role="presentation"/>
|
||||
</section>
|
||||
<aside class="onboarding-tour-button-container show-on-logged-in">
|
||||
<button id="onboarding-tour-sync-connect-device-button" class="onboarding-tour-action-button" data-l10n-id="onboarding.tour-sync.connect-device.button"></button>
|
||||
</aside>
|
||||
`;
|
||||
let emailInput = div.querySelector("#onboarding-tour-sync-email-input");
|
||||
emailInput.placeholder =
|
||||
|
|
|
@ -84,6 +84,7 @@ onboarding.tour-sync.form.title=Create a Firefox Account
|
|||
# "Continue to Firefox Sync" instead.
|
||||
onboarding.tour-sync.form.description=to continue to Firefox Sync
|
||||
onboarding.tour-sync.button=Next
|
||||
onboarding.tour-sync.connect-device.button=Connect Another Device
|
||||
onboarding.tour-sync.email-input.placeholder=Email
|
||||
onboarding.notification.onboarding-tour-sync.title=Pick up where you left off.
|
||||
onboarding.notification.onboarding-tour-sync.message=Still sending yourself links to save or read on your phone? Do it the easy way: get Sync and have the things you save here show up on all of your devices.
|
||||
|
|
|
@ -557,14 +557,14 @@ ChannelMediaResource::GetCurrentPrincipal()
|
|||
|
||||
bool ChannelMediaResource::CanClone()
|
||||
{
|
||||
return mCacheStream.IsAvailableForSharing();
|
||||
return !mClosed && mCacheStream.IsAvailableForSharing();
|
||||
}
|
||||
|
||||
already_AddRefed<BaseMediaResource>
|
||||
ChannelMediaResource::CloneData(MediaResourceCallback* aCallback)
|
||||
{
|
||||
NS_ASSERTION(NS_IsMainThread(), "Only call on main thread");
|
||||
NS_ASSERTION(mCacheStream.IsAvailableForSharing(), "Stream can't be cloned");
|
||||
MOZ_ASSERT(NS_IsMainThread());
|
||||
MOZ_ASSERT(CanClone(), "Stream can't be cloned");
|
||||
|
||||
RefPtr<ChannelMediaResource> resource =
|
||||
new ChannelMediaResource(aCallback, nullptr, mURI);
|
||||
|
|
|
@ -264,12 +264,12 @@ public:
|
|||
, mNext(0)
|
||||
{
|
||||
}
|
||||
MediaCacheStream* Next()
|
||||
MediaCacheStream* Next(AutoLock& aLock)
|
||||
{
|
||||
while (mNext < mMediaCache->mStreams.Length()) {
|
||||
MediaCacheStream* stream = mMediaCache->mStreams[mNext];
|
||||
++mNext;
|
||||
if (stream->GetResourceID() == mResourceID && !stream->IsClosed())
|
||||
if (stream->GetResourceID() == mResourceID && !stream->IsClosed(aLock))
|
||||
return stream;
|
||||
}
|
||||
return nullptr;
|
||||
|
@ -1578,7 +1578,7 @@ MediaCache::Update()
|
|||
// Notify streams about the suspended status changes.
|
||||
for (uint32_t i = 0; i < mSuspendedStatusToNotify.Length(); ++i) {
|
||||
MediaCache::ResourceStreamIterator iter(this, mSuspendedStatusToNotify[i]);
|
||||
while (MediaCacheStream* stream = iter.Next()) {
|
||||
while (MediaCacheStream* stream = iter.Next(lock)) {
|
||||
stream->mClient->CacheClientNotifySuspendedStatusChanged(
|
||||
stream->AreAllStreamsForResourceSuspended(lock));
|
||||
}
|
||||
|
@ -1701,7 +1701,7 @@ MediaCache::AllocateAndWriteBlock(AutoLock& aLock,
|
|||
|
||||
// Remove all cached copies of this block
|
||||
ResourceStreamIterator iter(this, aStream->mResourceID);
|
||||
while (MediaCacheStream* stream = iter.Next()) {
|
||||
while (MediaCacheStream* stream = iter.Next(aLock)) {
|
||||
while (aStreamBlockIndex >= int32_t(stream->mBlocks.Length())) {
|
||||
stream->mBlocks.AppendElement(-1);
|
||||
}
|
||||
|
@ -1733,7 +1733,7 @@ MediaCache::AllocateAndWriteBlock(AutoLock& aLock,
|
|||
aStreamBlockIndex * BLOCK_SIZE);
|
||||
|
||||
ResourceStreamIterator iter(this, aStream->mResourceID);
|
||||
while (MediaCacheStream* stream = iter.Next()) {
|
||||
while (MediaCacheStream* stream = iter.Next(aLock)) {
|
||||
BlockOwner* bo = block->mOwners.AppendElement();
|
||||
if (!bo) {
|
||||
// Roll back mOwners if any allocation fails.
|
||||
|
@ -2121,7 +2121,7 @@ MediaCacheStream::NotifyDataReceived(uint32_t aLoadID,
|
|||
}
|
||||
|
||||
MediaCache::ResourceStreamIterator iter(mMediaCache, mResourceID);
|
||||
while (MediaCacheStream* stream = iter.Next()) {
|
||||
while (MediaCacheStream* stream = iter.Next(lock)) {
|
||||
if (stream->mStreamLength >= 0) {
|
||||
// The stream is at least as long as what we've read
|
||||
stream->mStreamLength = std::max(stream->mStreamLength, mChannelOffset);
|
||||
|
@ -2253,7 +2253,7 @@ MediaCacheStream::NotifyDataEndedInternal(uint32_t aLoadID,
|
|||
FlushPartialBlockInternal(lock, true);
|
||||
|
||||
MediaCache::ResourceStreamIterator iter(mMediaCache, mResourceID);
|
||||
while (MediaCacheStream* stream = iter.Next()) {
|
||||
while (MediaCacheStream* stream = iter.Next(lock)) {
|
||||
// We read the whole stream, so remember the true length
|
||||
stream->mStreamLength = mChannelOffset;
|
||||
if (!stream->mDidNotifyDataEnded) {
|
||||
|
@ -2353,7 +2353,7 @@ MediaCacheStream::AreAllStreamsForResourceSuspended(AutoLock& aLock)
|
|||
MediaCache::ResourceStreamIterator iter(mMediaCache, mResourceID);
|
||||
// Look for a stream that's able to read the data we need
|
||||
int64_t dataOffset = -1;
|
||||
while (MediaCacheStream* stream = iter.Next()) {
|
||||
while (MediaCacheStream* stream = iter.Next(aLock)) {
|
||||
if (stream->mCacheSuspended || stream->mChannelEnded || stream->mClosed) {
|
||||
continue;
|
||||
}
|
||||
|
@ -2373,28 +2373,39 @@ MediaCacheStream::AreAllStreamsForResourceSuspended(AutoLock& aLock)
|
|||
void
|
||||
MediaCacheStream::Close()
|
||||
{
|
||||
NS_ASSERTION(NS_IsMainThread(), "Only call on main thread");
|
||||
|
||||
if (!mMediaCache || mClosed) {
|
||||
MOZ_ASSERT(NS_IsMainThread());
|
||||
if (!mMediaCache) {
|
||||
return;
|
||||
}
|
||||
OwnerThread()->Dispatch(NS_NewRunnableFunction(
|
||||
"MediaCacheStream::Close",
|
||||
[ this, client = RefPtr<ChannelMediaResource>(mClient) ]() {
|
||||
AutoLock lock(mMediaCache->Monitor());
|
||||
CloseInternal(lock);
|
||||
}));
|
||||
}
|
||||
|
||||
AutoLock lock(mMediaCache->Monitor());
|
||||
void
|
||||
MediaCacheStream::CloseInternal(AutoLock& aLock)
|
||||
{
|
||||
MOZ_ASSERT(OwnerThread()->IsOnCurrentThread());
|
||||
|
||||
if (mClosed) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Closing a stream will change the return value of
|
||||
// MediaCacheStream::AreAllStreamsForResourceSuspended as well as
|
||||
// ChannelMediaResource::IsSuspendedByCache. Let's notify it.
|
||||
mMediaCache->QueueSuspendedStatusUpdate(lock, mResourceID);
|
||||
mMediaCache->QueueSuspendedStatusUpdate(aLock, mResourceID);
|
||||
|
||||
mClosed = true;
|
||||
mMediaCache->ReleaseStreamBlocks(lock, this);
|
||||
mMediaCache->ReleaseStreamBlocks(aLock, this);
|
||||
// Wake up any blocked readers
|
||||
lock.NotifyAll();
|
||||
aLock.NotifyAll();
|
||||
|
||||
// Queue an Update since we may have created more free space. Don't do
|
||||
// it from CloseInternal since that gets called by Update() itself
|
||||
// sometimes, and we try to not to queue updates from Update().
|
||||
mMediaCache->QueueUpdate(lock);
|
||||
// Queue an Update since we may have created more free space.
|
||||
mMediaCache->QueueUpdate(aLock);
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -2716,7 +2727,7 @@ MediaCacheStream::Read(char* aBuffer, uint32_t aCount, uint32_t* aBytes)
|
|||
// that is reaching EOS.
|
||||
bool foundDataInPartialBlock = false;
|
||||
MediaCache::ResourceStreamIterator iter(mMediaCache, mResourceID);
|
||||
while (MediaCacheStream* stream = iter.Next()) {
|
||||
while (MediaCacheStream* stream = iter.Next(lock)) {
|
||||
if (OffsetToBlockIndexUnchecked(stream->mChannelOffset) ==
|
||||
OffsetToBlockIndexUnchecked(streamOffset) &&
|
||||
stream->mChannelOffset == stream->mStreamLength) {
|
||||
|
@ -2862,7 +2873,6 @@ MediaCacheStream::Init(int64_t aContentLength)
|
|||
nsresult
|
||||
MediaCacheStream::InitAsClone(MediaCacheStream* aOriginal)
|
||||
{
|
||||
MOZ_ASSERT(aOriginal->IsAvailableForSharing());
|
||||
MOZ_ASSERT(!mMediaCache, "Has been initialized.");
|
||||
MOZ_ASSERT(aOriginal->mMediaCache, "Don't clone an uninitialized stream.");
|
||||
|
||||
|
|
|
@ -227,11 +227,10 @@ public:
|
|||
// used to create this MediaCacheStream is deleted.
|
||||
void Close();
|
||||
// This returns true when the stream has been closed.
|
||||
// Must be used on the main thread or while holding the cache lock.
|
||||
bool IsClosed() const { return mClosed; }
|
||||
bool IsClosed(AutoLock&) const { return mClosed; }
|
||||
// Returns true when this stream is can be shared by a new resource load.
|
||||
// Called on the main thread only.
|
||||
bool IsAvailableForSharing() const { return !mClosed && !mIsPrivateBrowsing; }
|
||||
bool IsAvailableForSharing() const { return !mIsPrivateBrowsing; }
|
||||
|
||||
// These callbacks are called on the main thread by the client
|
||||
// when data has been received via the channel.
|
||||
|
@ -466,6 +465,8 @@ private:
|
|||
|
||||
void UpdateDownloadStatistics(AutoLock&);
|
||||
|
||||
void CloseInternal(AutoLock&);
|
||||
|
||||
// Instance of MediaCache to use with this MediaCacheStream.
|
||||
RefPtr<MediaCache> mMediaCache;
|
||||
|
||||
|
|
|
@ -137,7 +137,7 @@ protected:
|
|||
res.mID3MinorVersion = 0;
|
||||
res.mID3Flags = 0;
|
||||
res.mID3Size = 115304;
|
||||
res.mDuration = 3160816;
|
||||
res.mDuration = 3166167;
|
||||
res.mDurationError = 0.001f;
|
||||
res.mSeekError = 0.02f;
|
||||
res.mSampleRate = 44100;
|
||||
|
|
|
@ -391,16 +391,31 @@ MP3TrackDemuxer::Duration() const
|
|||
if (mParser.VBRInfo().IsValid() && numAudioFrames.valueOr(0) + 1 > 1) {
|
||||
// VBR headers don't include the VBR header frame.
|
||||
numFrames = numAudioFrames.value() + 1;
|
||||
} else {
|
||||
const int64_t streamLen = StreamLength();
|
||||
if (streamLen < 0) {
|
||||
// Unknown length, we can't estimate duration.
|
||||
return TimeUnit::FromMicroseconds(-1);
|
||||
}
|
||||
if (AverageFrameLength() > 0) {
|
||||
numFrames = (streamLen - mFirstFrameOffset) / AverageFrameLength();
|
||||
}
|
||||
return Duration(numFrames);
|
||||
}
|
||||
|
||||
const int64_t streamLen = StreamLength();
|
||||
if (streamLen < 0) { // Live streams.
|
||||
// Unknown length, we can't estimate duration.
|
||||
return TimeUnit::FromMicroseconds(-1);
|
||||
}
|
||||
// We can't early return when streamLen < 0 before checking numAudioFrames
|
||||
// since some live radio will give an opening remark before playing music
|
||||
// and the duration of the opening talk can be calculated by numAudioFrames.
|
||||
|
||||
const int64_t size = streamLen - mFirstFrameOffset;
|
||||
MOZ_ASSERT(size);
|
||||
|
||||
// If it's CBR, calculate the duration by bitrate.
|
||||
if (!mParser.VBRInfo().IsValid()) {
|
||||
const int32_t bitrate = mParser.CurrentFrame().Header().Bitrate();
|
||||
return media::TimeUnit::FromSeconds(static_cast<double>(size) * 8 / bitrate);
|
||||
}
|
||||
|
||||
if (AverageFrameLength() > 0) {
|
||||
numFrames = size / AverageFrameLength();
|
||||
}
|
||||
|
||||
return Duration(numFrames);
|
||||
}
|
||||
|
||||
|
|
|
@ -201,7 +201,7 @@ class BackendMakeFile(object):
|
|||
actually change. We use FileAvoidWrite to accomplish this.
|
||||
"""
|
||||
|
||||
def __init__(self, srcdir, objdir, environment, topsrcdir, topobjdir):
|
||||
def __init__(self, srcdir, objdir, environment, topsrcdir, topobjdir, dry_run):
|
||||
self.topsrcdir = topsrcdir
|
||||
self.srcdir = srcdir
|
||||
self.objdir = objdir
|
||||
|
@ -211,7 +211,7 @@ class BackendMakeFile(object):
|
|||
|
||||
self.xpt_name = None
|
||||
|
||||
self.fh = FileAvoidWrite(self.name, capture_diff=True)
|
||||
self.fh = FileAvoidWrite(self.name, capture_diff=True, dry_run=dry_run)
|
||||
self.fh.write('# THIS FILE WAS AUTOMATICALLY GENERATED. DO NOT EDIT.\n')
|
||||
self.fh.write('\n')
|
||||
|
||||
|
@ -433,7 +433,7 @@ class RecursiveMakeBackend(CommonBackend):
|
|||
if obj.objdir not in self._backend_files:
|
||||
self._backend_files[obj.objdir] = \
|
||||
BackendMakeFile(obj.srcdir, obj.objdir, obj.config,
|
||||
obj.topsrcdir, self.environment.topobjdir)
|
||||
obj.topsrcdir, self.environment.topobjdir, self.dry_run)
|
||||
return self._backend_files[obj.objdir]
|
||||
|
||||
def consume_object(self, obj):
|
||||
|
|
|
@ -75,13 +75,11 @@ static const char contentSandboxRules[] = R"(
|
|||
(subpath "/usr/lib")
|
||||
(subpath "/usr/share"))))
|
||||
|
||||
; Top-level directory metadata access (bug 1404298)
|
||||
(allow file-read-metadata (regex #"^/[^/]+$"))
|
||||
|
||||
(allow file-read-metadata
|
||||
(literal "/etc")
|
||||
(literal "/tmp")
|
||||
(literal "/var")
|
||||
(literal "/private/etc/localtime")
|
||||
(literal "/home")
|
||||
(literal "/net")
|
||||
(regex #"^/private/tmp/KSInstallAction\."))
|
||||
|
||||
; Allow read access to standard special files.
|
||||
|
|
|
@ -88,6 +88,19 @@ function readFile(path) {
|
|||
return promise;
|
||||
}
|
||||
|
||||
// Does a stat of |path| and returns a promise that resolves if the
|
||||
// stat is successful. Returned object has boolean .ok to indicate
|
||||
// success or failure.
|
||||
function statPath(path) {
|
||||
Components.utils.import("resource://gre/modules/osfile.jsm");
|
||||
let promise = OS.File.stat(path).then(function (stat) {
|
||||
return {ok: true};
|
||||
}).catch(function (error) {
|
||||
return {ok: false};
|
||||
});
|
||||
return promise;
|
||||
}
|
||||
|
||||
// Returns true if the current content sandbox level, passed in
|
||||
// the |level| argument, supports filesystem sandboxing.
|
||||
function isContentFileIOSandboxed(level) {
|
||||
|
@ -347,6 +360,7 @@ async function testFileAccess() {
|
|||
browser: webBrowser, // browser to run test in
|
||||
file: fontFile, // nsIFile object
|
||||
minLevel: minHomeReadSandboxLevel(), // min level to enable test
|
||||
func: readFile, // the test function to use
|
||||
});
|
||||
}
|
||||
for (let fontPath of badFontTestPaths) {
|
||||
|
@ -360,6 +374,7 @@ async function testFileAccess() {
|
|||
browser: webBrowser, // browser to run test in
|
||||
file: fontFile, // nsIFile object
|
||||
minLevel: minHomeReadSandboxLevel(), // min level to enable test
|
||||
func: readFile, // the test function to use
|
||||
});
|
||||
}
|
||||
}
|
||||
|
@ -375,6 +390,7 @@ async function testFileAccess() {
|
|||
browser: webBrowser, // browser to run test in
|
||||
file: profileDir, // nsIFile object
|
||||
minLevel: minProfileReadSandboxLevel(), // min level to enable test
|
||||
func: readDir,
|
||||
});
|
||||
}
|
||||
if (fileContentProcessEnabled) {
|
||||
|
@ -384,6 +400,7 @@ async function testFileAccess() {
|
|||
browser: fileBrowser,
|
||||
file: profileDir,
|
||||
minLevel: 0,
|
||||
func: readDir,
|
||||
});
|
||||
}
|
||||
|
||||
|
@ -394,6 +411,7 @@ async function testFileAccess() {
|
|||
browser: webBrowser,
|
||||
file: homeDir,
|
||||
minLevel: minHomeReadSandboxLevel(),
|
||||
func: readDir,
|
||||
});
|
||||
if (fileContentProcessEnabled) {
|
||||
tests.push({
|
||||
|
@ -402,6 +420,7 @@ async function testFileAccess() {
|
|||
browser: fileBrowser,
|
||||
file: homeDir,
|
||||
minLevel: 0,
|
||||
func: readDir,
|
||||
});
|
||||
}
|
||||
|
||||
|
@ -412,6 +431,7 @@ async function testFileAccess() {
|
|||
browser: webBrowser,
|
||||
file: sysExtDevDir,
|
||||
minLevel: 0,
|
||||
func: readDir,
|
||||
});
|
||||
|
||||
if (isWin()) {
|
||||
|
@ -422,6 +442,7 @@ async function testFileAccess() {
|
|||
browser: webBrowser,
|
||||
file: extDir,
|
||||
minLevel: minHomeReadSandboxLevel(),
|
||||
func: readDir,
|
||||
});
|
||||
}
|
||||
|
||||
|
@ -445,6 +466,7 @@ async function testFileAccess() {
|
|||
browser: webBrowser,
|
||||
file: homeTempDir,
|
||||
minLevel,
|
||||
func: readDir,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
@ -465,6 +487,7 @@ async function testFileAccess() {
|
|||
browser: webBrowser,
|
||||
file: varDir,
|
||||
minLevel: minHomeReadSandboxLevel(),
|
||||
func: readDir,
|
||||
});
|
||||
if (fileContentProcessEnabled) {
|
||||
tests.push({
|
||||
|
@ -473,6 +496,7 @@ async function testFileAccess() {
|
|||
browser: fileBrowser,
|
||||
file: varDir,
|
||||
minLevel: 0,
|
||||
func: readDir,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
@ -493,6 +517,7 @@ async function testFileAccess() {
|
|||
browser: webBrowser,
|
||||
file: macTempDir,
|
||||
minLevel: minHomeReadSandboxLevel(),
|
||||
func: readDir,
|
||||
});
|
||||
if (fileContentProcessEnabled) {
|
||||
tests.push({
|
||||
|
@ -501,6 +526,7 @@ async function testFileAccess() {
|
|||
browser: fileBrowser,
|
||||
file: macTempDir,
|
||||
minLevel: 0,
|
||||
func: readDir,
|
||||
});
|
||||
}
|
||||
|
||||
|
@ -512,6 +538,7 @@ async function testFileAccess() {
|
|||
browser: webBrowser,
|
||||
file: volumes,
|
||||
minLevel: minHomeReadSandboxLevel(),
|
||||
func: readDir,
|
||||
});
|
||||
// Test that we cannot read from /Network at level 3
|
||||
let network = GetDir("/Network");
|
||||
|
@ -521,6 +548,7 @@ async function testFileAccess() {
|
|||
browser: webBrowser,
|
||||
file: network,
|
||||
minLevel: minHomeReadSandboxLevel(),
|
||||
func: readDir,
|
||||
});
|
||||
// Test that we cannot read from /Users at level 3
|
||||
let users = GetDir("/Users");
|
||||
|
@ -530,6 +558,69 @@ async function testFileAccess() {
|
|||
browser: webBrowser,
|
||||
file: users,
|
||||
minLevel: minHomeReadSandboxLevel(),
|
||||
func: readDir,
|
||||
});
|
||||
|
||||
// Test that we can stat /Users at level 3
|
||||
tests.push({
|
||||
desc: "/Users",
|
||||
ok: true,
|
||||
browser: webBrowser,
|
||||
file: users,
|
||||
minLevel: minHomeReadSandboxLevel(),
|
||||
func: statPath,
|
||||
});
|
||||
|
||||
// Test that we can stat /Library at level 3, but can't
|
||||
// stat something within /Library. This test uses "/Library"
|
||||
// because it's a path that is expected to always be present
|
||||
// and isn't something content processes have read access to
|
||||
// (just read-metadata).
|
||||
let libraryDir = GetDir("/Library");
|
||||
tests.push({
|
||||
desc: "/Library",
|
||||
ok: true,
|
||||
browser: webBrowser,
|
||||
file: libraryDir,
|
||||
minLevel: minHomeReadSandboxLevel(),
|
||||
func: statPath,
|
||||
});
|
||||
tests.push({
|
||||
desc: "/Library",
|
||||
ok: false,
|
||||
browser: webBrowser,
|
||||
file: libraryDir,
|
||||
minLevel: minHomeReadSandboxLevel(),
|
||||
func: readDir,
|
||||
});
|
||||
let libraryWidgetsDir = GetDir("/Library/Widgets");
|
||||
tests.push({
|
||||
desc: "/Library/Widgets",
|
||||
ok: false,
|
||||
browser: webBrowser,
|
||||
file: libraryWidgetsDir,
|
||||
minLevel: minHomeReadSandboxLevel(),
|
||||
func: statPath,
|
||||
});
|
||||
|
||||
// Similarly, test that we can stat /private, but not /private/etc.
|
||||
let privateDir = GetDir("/private");
|
||||
tests.push({
|
||||
desc: "/private",
|
||||
ok: true,
|
||||
browser: webBrowser,
|
||||
file: privateDir,
|
||||
minLevel: minHomeReadSandboxLevel(),
|
||||
func: statPath,
|
||||
});
|
||||
let privateEtcDir = GetFile("/private/etc");
|
||||
tests.push({
|
||||
desc: "/private/etc",
|
||||
ok: false,
|
||||
browser: webBrowser,
|
||||
file: privateEtcDir,
|
||||
minLevel: minHomeReadSandboxLevel(),
|
||||
func: statPath,
|
||||
});
|
||||
}
|
||||
|
||||
|
@ -541,6 +632,7 @@ async function testFileAccess() {
|
|||
browser: webBrowser,
|
||||
file: extensionsDir,
|
||||
minLevel: 0,
|
||||
func: readDir,
|
||||
});
|
||||
} else {
|
||||
ok(false, `${extensionsDir.path} is a valid dir`);
|
||||
|
@ -554,6 +646,7 @@ async function testFileAccess() {
|
|||
browser: webBrowser,
|
||||
file: chromeDir,
|
||||
minLevel: 0,
|
||||
func: readDir,
|
||||
});
|
||||
} else {
|
||||
ok(false, `${chromeDir.path} is valid dir`);
|
||||
|
@ -570,6 +663,7 @@ async function testFileAccess() {
|
|||
browser: webBrowser,
|
||||
file: cookiesFile,
|
||||
minLevel: minProfileReadSandboxLevel(),
|
||||
func: readFile,
|
||||
});
|
||||
}
|
||||
if (fileContentProcessEnabled) {
|
||||
|
@ -579,6 +673,7 @@ async function testFileAccess() {
|
|||
browser: fileBrowser,
|
||||
file: cookiesFile,
|
||||
minLevel: 0,
|
||||
func: readFile,
|
||||
});
|
||||
}
|
||||
} else {
|
||||
|
@ -589,12 +684,17 @@ async function testFileAccess() {
|
|||
tests = tests.filter((test) => (test.minLevel <= level));
|
||||
|
||||
for (let test of tests) {
|
||||
let testFunc = test.file.isDirectory() ? readDir : readFile;
|
||||
let okString = test.ok ? "allowed" : "blocked";
|
||||
let processType = test.browser === webBrowser ? "web" : "file";
|
||||
|
||||
// ensure the file/dir exists before we ask a content process to stat
|
||||
// it so we know a failure is not due to a nonexistent file/dir
|
||||
if (test.func === statPath) {
|
||||
ok(test.file.exists(), `${test.file.path} exists`);
|
||||
}
|
||||
|
||||
let result = await ContentTask.spawn(test.browser, test.file.path,
|
||||
testFunc);
|
||||
test.func);
|
||||
|
||||
ok(result.ok == test.ok,
|
||||
`reading ${test.desc} from a ${processType} process ` +
|
||||
|
@ -602,7 +702,7 @@ async function testFileAccess() {
|
|||
|
||||
// if the directory is not expected to be readable,
|
||||
// ensure the listing has zero entries
|
||||
if (test.file.isDirectory() && !test.ok) {
|
||||
if (test.func === readDir && !test.ok) {
|
||||
ok(result.numEntries == 0, `directory list is empty (${test.file.path})`);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -11,7 +11,7 @@ use azure::azure_hl::SurfacePattern;
|
|||
use canvas_traits::canvas::*;
|
||||
use cssparser::RGBA;
|
||||
use euclid::{Transform2D, Point2D, Vector2D, Rect, Size2D};
|
||||
use ipc_channel::ipc::{self, IpcSender, IpcReceiver};
|
||||
use ipc_channel::ipc::{self, IpcSender};
|
||||
use num_traits::ToPrimitive;
|
||||
use std::borrow::ToOwned;
|
||||
use std::mem;
|
||||
|
|
|
@ -355,13 +355,14 @@ linux-rusttests/opt:
|
|||
product: firefox
|
||||
job-name: linux-rusttests-opt
|
||||
treeherder:
|
||||
platform: linux32-rusttests/opt
|
||||
platform: linux32/opt
|
||||
symbol: tc(BR)
|
||||
tier: 2
|
||||
worker-type: aws-provisioner-v1/gecko-{level}-b-linux
|
||||
worker:
|
||||
max-run-time: 5400
|
||||
env:
|
||||
PERFHERDER_EXTRA_OPTIONS: rusttests
|
||||
TOOLTOOL_MANIFEST: "browser/config/tooltool-manifests/linux32/releng.manifest"
|
||||
run:
|
||||
using: mozharness
|
||||
|
@ -389,13 +390,14 @@ linux-rusttests/debug:
|
|||
product: firefox
|
||||
job-name: linux-rusttests-debug
|
||||
treeherder:
|
||||
platform: linux32-rusttests/debug
|
||||
platform: linux32/debug
|
||||
symbol: tc(BR)
|
||||
tier: 2
|
||||
worker-type: aws-provisioner-v1/gecko-{level}-b-linux
|
||||
worker:
|
||||
max-run-time: 5400
|
||||
env:
|
||||
PERFHERDER_EXTRA_OPTIONS: rusttests
|
||||
TOOLTOOL_MANIFEST: "browser/config/tooltool-manifests/linux32/releng.manifest"
|
||||
run:
|
||||
using: mozharness
|
||||
|
@ -698,13 +700,14 @@ linux64-rusttests/opt:
|
|||
product: firefox
|
||||
job-name: linux64-rusttests-opt
|
||||
treeherder:
|
||||
platform: linux64-rusttests/opt
|
||||
platform: linux64/opt
|
||||
symbol: tc(BR)
|
||||
tier: 2
|
||||
worker-type: aws-provisioner-v1/gecko-{level}-b-linux
|
||||
worker:
|
||||
max-run-time: 5400
|
||||
env:
|
||||
PERFHERDER_EXTRA_OPTIONS: rusttests
|
||||
TOOLTOOL_MANIFEST: "browser/config/tooltool-manifests/linux64/releng.manifest"
|
||||
run:
|
||||
using: mozharness
|
||||
|
@ -732,13 +735,14 @@ linux64-rusttests/debug:
|
|||
product: firefox
|
||||
job-name: linux64-rusttests-debug
|
||||
treeherder:
|
||||
platform: linux64-rusttests/debug
|
||||
platform: linux64/debug
|
||||
symbol: tc(BR)
|
||||
tier: 2
|
||||
worker-type: aws-provisioner-v1/gecko-{level}-b-linux
|
||||
worker:
|
||||
max-run-time: 5400
|
||||
env:
|
||||
PERFHERDER_EXTRA_OPTIONS: rusttests
|
||||
TOOLTOOL_MANIFEST: "browser/config/tooltool-manifests/linux64/releng.manifest"
|
||||
run:
|
||||
using: mozharness
|
||||
|
|
|
@ -424,13 +424,14 @@ win32-rusttests/opt:
|
|||
product: firefox
|
||||
job-name: win32-rusttests-opt
|
||||
treeherder:
|
||||
platform: windows2012-32-rusttests/opt
|
||||
platform: windows2012-32/opt
|
||||
symbol: tc(BR)
|
||||
tier: 2
|
||||
worker-type: aws-provisioner-v1/gecko-{level}-b-win2012
|
||||
worker:
|
||||
max-run-time: 7200
|
||||
env:
|
||||
PERFHERDER_EXTRA_OPTIONS: rusttests
|
||||
TOOLTOOL_MANIFEST: "browser/config/tooltool-manifests/win32/releng.manifest"
|
||||
run:
|
||||
using: mozharness
|
||||
|
@ -453,13 +454,14 @@ win64-rusttests/opt:
|
|||
product: firefox
|
||||
job-name: win64-rusttests-opt
|
||||
treeherder:
|
||||
platform: windows2012-64-rusttests/opt
|
||||
platform: windows2012-64/opt
|
||||
symbol: tc(BR)
|
||||
tier: 2
|
||||
worker-type: aws-provisioner-v1/gecko-{level}-b-win2012
|
||||
worker:
|
||||
max-run-time: 7200
|
||||
env:
|
||||
PERFHERDER_EXTRA_OPTIONS: rusttests
|
||||
TOOLTOOL_MANIFEST: "browser/config/tooltool-manifests/win64/releng.manifest"
|
||||
run:
|
||||
using: mozharness
|
||||
|
|
|
@ -46,8 +46,7 @@ job-template:
|
|||
using: run-task
|
||||
command: >
|
||||
cd /builds/worker/checkouts/gecko &&
|
||||
wget https://queue.taskcluster.net/v1/task/${ARTIFACT_TASKID}/artifacts/public/build/target.crashreporter-symbols-full.zip &&
|
||||
./mach python toolkit/crashreporter/tools/upload_symbols.py target.crashreporter-symbols-full.zip
|
||||
./mach python toolkit/crashreporter/tools/upload_symbols.py https://queue.taskcluster.net/v1/task/${ARTIFACT_TASKID}/artifacts/public/build/target.crashreporter-symbols-full.zip
|
||||
sparse-profile: upload-symbols
|
||||
optimization:
|
||||
only-if-dependencies-run: null
|
||||
|
|
|
@ -13,13 +13,13 @@ if [ -f /etc/lsb-release ]; then
|
|||
|
||||
if [ "${DISTRIB_ID}" = "Ubuntu" -a "${DISTRIB_RELEASE}" = "16.04" ]; then
|
||||
HG_DEB=1
|
||||
HG_DIGEST=dd4dd7759fe73985b6a0424b34a3036d130c26defdd866a9fdd7302e40c7417433b93f020497ceb40593eaead8e86be55e48340887015645202b47ff7b0d7ac6
|
||||
HG_SIZE=181722
|
||||
HG_FILENAME=mercurial_4.3.1_amd64.deb
|
||||
HG_DIGEST=458746bd82b4732c72c611f1041f77a47a683bc75ff3f6ab7ed86ea394f48d94cd7e2d3d1d5b020906318a9a24bea27401a3a63d7e645514dbc2cb581621977f
|
||||
HG_SIZE=193710
|
||||
HG_FILENAME=mercurial_4.4.2_amd64.deb
|
||||
|
||||
HG_COMMON_DIGEST=045f7e07f1e2e0fef767b2f50a7e9ab37d5da0bfead5ddf473ae044b61a4566aed2d6f2706f52d227947d713ef8e89eb9a269288f08e52924e4de88a39cd7ac0
|
||||
HG_COMMON_SIZE=2017628
|
||||
HG_COMMON_FILENAME=mercurial-common_4.3.1_all.deb
|
||||
HG_COMMON_DIGEST=8074efbfff974f0bbdd0c3be3d272cc7a634456921e04db31369fbec1c9256ddaf44bdbe120f6f33113d2be9324a1537048028ebaaf205c6659e476a757358fd
|
||||
HG_COMMON_SIZE=2097892
|
||||
HG_COMMON_FILENAME=mercurial-common_4.4.2_all.deb
|
||||
elif [ "${DISTRIB_ID}" = "Ubuntu" -a "${DISTRIB_RELEASE}" = "12.04" ]; then
|
||||
echo "Ubuntu 12.04 not supported"
|
||||
exit 1
|
||||
|
@ -100,15 +100,15 @@ elif [ -n "${PIP_PATH}" ]; then
|
|||
tooltool_fetch <<EOF
|
||||
[
|
||||
{
|
||||
"size": 5475042,
|
||||
"digest": "4c42d06b7f111a3e825dd927704a30f88f0b2225cf87ab8954bf53a7fbc0edf561374dd49b13d9c10140d98ff5853a64acb5a744349727abae81d32da401922b",
|
||||
"size": 5647013,
|
||||
"digest": "3d1d103689eac4f50cc1005be44144b37d75ebfac3ff3b4fc90d6f41fbee46e107a168d04f2c366ce7cca2733ea4e5b5127df462af8e253f61a72f8938833993",
|
||||
"algorithm": "sha512",
|
||||
"filename": "mercurial-4.3.1.tar.gz"
|
||||
"filename": "mercurial-4.4.2.tar.gz"
|
||||
}
|
||||
]
|
||||
EOF
|
||||
|
||||
${PIP_PATH} install mercurial-4.3.1.tar.gz
|
||||
${PIP_PATH} install mercurial-4.4.2.tar.gz
|
||||
else
|
||||
echo "Do not know how to install Mercurial on this OS"
|
||||
exit 1
|
||||
|
|
|
@ -37,18 +37,34 @@ from mercurial import (
|
|||
util,
|
||||
)
|
||||
|
||||
testedwith = '3.7 3.8 3.9 4.0 4.1 4.2 4.3'
|
||||
# TRACKING hg43
|
||||
try:
|
||||
from mercurial import configitems
|
||||
except ImportError:
|
||||
configitems = None
|
||||
|
||||
testedwith = '3.7 3.8 3.9 4.0 4.1 4.2 4.3 4.4'
|
||||
minimumhgversion = '3.7'
|
||||
|
||||
cmdtable = {}
|
||||
|
||||
# Mercurial 4.3 introduced registrar.command as a replacement for
|
||||
# TRACKING hg43 Mercurial 4.3 introduced registrar.command as a replacement for
|
||||
# cmdutil.command.
|
||||
if util.safehasattr(registrar, 'command'):
|
||||
command = registrar.command(cmdtable)
|
||||
else:
|
||||
command = cmdutil.command(cmdtable)
|
||||
|
||||
# TRACKING hg43 Mercurial 4.3 introduced the config registrar. 4.4 requires
|
||||
# config items to be registered to avoid a devel warning
|
||||
if util.safehasattr(registrar, 'configitem'):
|
||||
configtable = {}
|
||||
configitem = registrar.configitem(configtable)
|
||||
|
||||
configitem('robustcheckout', 'retryjittermin', default=configitems.dynamicdefault)
|
||||
configitem('robustcheckout', 'retryjittermax', default=configitems.dynamicdefault)
|
||||
|
||||
|
||||
# Mercurial 4.2 introduced the vfs module and deprecated the symbol in
|
||||
# scmutil.
|
||||
def getvfs():
|
||||
|
@ -206,9 +222,9 @@ def robustcheckout(ui, url, dest, upstream=None, revision=None, branch=None,
|
|||
# However, given that sparse has performance implications, we want to fail
|
||||
# fast if we can't satisfy the desired checkout request.
|
||||
if sparseprofile:
|
||||
if util.versiontuple(n=2) != (4, 3):
|
||||
if util.versiontuple(n=2) not in ((4, 3), (4, 4)):
|
||||
raise error.Abort('sparse profile support only available for '
|
||||
'Mercurial 4.3 (using %s)' % util.version())
|
||||
'Mercurial versions greater than 4.3 (using %s)' % util.version())
|
||||
|
||||
try:
|
||||
extensions.find('sparse')
|
||||
|
@ -545,7 +561,7 @@ def _docheckout(ui, url, dest, upstream, revision, branch, purge, sharebase,
|
|||
try:
|
||||
old_sparse_fn = getattr(repo.dirstate, '_sparsematchfn', None)
|
||||
if old_sparse_fn is not None:
|
||||
assert util.versiontuple(n=2) == (4, 3)
|
||||
assert util.versiontuple(n=2) in ((4, 3), (4, 4))
|
||||
repo.dirstate._sparsematchfn = lambda: matchmod.always(repo.root, '')
|
||||
|
||||
if purgeext.purge(ui, repo, all=True, abort_on_err=True,
|
||||
|
|
|
@ -4,14 +4,14 @@
|
|||
# License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
#
|
||||
# This script uploads a symbol zip file passed on the commandline
|
||||
# to the Tecken symbol upload API at https://symbols.mozilla.org/ .
|
||||
# This script uploads a symbol zip file from a path or URL passed on the commandline
|
||||
# to the symbol server at https://symbols.mozilla.org/ .
|
||||
#
|
||||
# Using this script requires you to have generated an authentication
|
||||
# token in the Tecken web interface. You must store the token in a Taskcluster
|
||||
# token in the symbol server web interface. You must store the token in a Taskcluster
|
||||
# secret as the JSON blob `{"token": "<token>"}` and set the `SYMBOL_SECRET`
|
||||
# environment variable to the name of the Taskcluster secret. Alternatively,
|
||||
# you can pu the token in a file and set SOCORRO_SYMBOL_UPLOAD_TOKEN_FILE
|
||||
# environment variable to the name of the Taskcluster secret. Alternately,
|
||||
# you can put the token in a file and set `SOCORRO_SYMBOL_UPLOAD_TOKEN_FILE`
|
||||
# environment variable to the path to the file.
|
||||
|
||||
from __future__ import absolute_import, print_function, unicode_literals
|
||||
|
@ -68,14 +68,15 @@ def main():
|
|||
parser = argparse.ArgumentParser(
|
||||
description='Upload symbols in ZIP using token from Taskcluster secrets service.')
|
||||
parser.add_argument('zip',
|
||||
help='Symbols zip file')
|
||||
help='Symbols zip file - URL or path to local file')
|
||||
args = parser.parse_args()
|
||||
|
||||
if not os.path.isfile(args.zip):
|
||||
if not args.zip.startswith('http') and not os.path.isfile(args.zip):
|
||||
log.error('Error: zip file "{0}" does not exist!'.format(args.zip),
|
||||
file=sys.stderr)
|
||||
return 1
|
||||
|
||||
|
||||
secret_name = os.environ.get('SYMBOL_SECRET')
|
||||
if secret_name is not None:
|
||||
auth_token = get_taskcluster_secret(secret_name)
|
||||
|
@ -105,12 +106,19 @@ def main():
|
|||
for i, _ in enumerate(redo.retrier(attempts=MAX_RETRIES), start=1):
|
||||
log.info('Attempt %d of %d...' % (i, MAX_RETRIES))
|
||||
try:
|
||||
if args.zip.startswith('http'):
|
||||
zip_arg = {'data': {'url': args.zip}}
|
||||
else:
|
||||
zip_arg = {'files': {'symbols.zip': open(args.zip, 'rb')}}
|
||||
r = requests.post(
|
||||
url,
|
||||
files={'symbols.zip': open(args.zip, 'rb')},
|
||||
headers={'Auth-Token': auth_token},
|
||||
allow_redirects=False,
|
||||
timeout=120)
|
||||
# Allow a longer read timeout because uploading by URL means the server
|
||||
# has to fetch the entire zip file, which can take a while. The load balancer
|
||||
# in front of symbols.mozilla.org has a 300 second timeout, so we'll use that.
|
||||
timeout=(10, 300),
|
||||
**zip_arg)
|
||||
# 500 is likely to be a transient failure.
|
||||
# Break out for success or other error codes.
|
||||
if r.status_code < 500:
|
||||
|
|
Загрузка…
Ссылка в новой задаче