Merge autoland to mozilla-central. a=merge

This commit is contained in:
shindli 2018-11-10 23:35:27 +02:00
Родитель edecf57f25 d94c11cf32
Коммит 56a9ea2d0f
29 изменённых файлов: 441 добавлений и 442 удалений

Просмотреть файл

@ -178,9 +178,7 @@ let REQUEST_2 = {
},
},
],
data: {
supportedTypes: "credit",
},
data: {},
},
],
error: "",

Просмотреть файл

@ -250,7 +250,7 @@ var paymentRequest = {
}
let modifier = modifiers.find(m => {
// take the first matching modifier
// TODO (bug 1429198): match on supportedTypes and supportedNetworks
// TODO (bug 1429198): match on supportedNetworks
return m.supportedMethods == "basic-card";
});
return modifier || null;

Просмотреть файл

@ -421,9 +421,7 @@ var PaymentTestUtils = {
label: "Total due",
amount: { currency: "USD", value: "2.50" },
},
data: {
supportedTypes: "credit",
},
data: {},
},
{
additionalDisplayItems: [

Просмотреть файл

@ -1,47 +1,67 @@
{
"en-CA": {
"platforms": [
"linux",
"linux64",
"macosx64",
"win32",
"win64"
],
"linux",
"linux-devedition",
"linux64",
"linux64-devedition",
"macosx64",
"macosx64-devedition",
"win32",
"win32-devedition",
"win64",
"win64-devedition"
],
"revision": "default"
},
},
"he": {
"platforms": [
"linux",
"linux64",
"macosx64",
"win32",
"win64"
],
"linux",
"linux-devedition",
"linux64",
"linux64-devedition",
"macosx64",
"macosx64-devedition",
"win32",
"win32-devedition",
"win64",
"win64-devedition"
],
"revision": "default"
},
},
"it": {
"platforms": [
"linux",
"linux64",
"macosx64",
"win32",
"win64"
],
"linux",
"linux-devedition",
"linux64",
"linux64-devedition",
"macosx64",
"macosx64-devedition",
"win32",
"win32-devedition",
"win64",
"win64-devedition"
],
"revision": "default"
},
},
"ja": {
"platforms": [
"linux",
"linux64",
"win32",
"win64"
],
"linux",
"linux-devedition",
"linux64",
"linux64-devedition",
"win32",
"win32-devedition",
"win64",
"win64-devedition"
],
"revision": "default"
},
},
"ja-JP-mac": {
"platforms": [
"macosx64"
],
"macosx64",
"macosx64-devedition"
],
"revision": "default"
}
}

Просмотреть файл

@ -108,20 +108,6 @@ function checkComplexRequest(payRequest) {
supportedNetworks[idx] + "'.");
}
}
const supportedTypes = data.supportedTypes;
const expectedSupportedTypes = ["prepaid", "debit", "credit"];
if (supportedTypes.length != expectedSupportedTypes.length) {
emitTestFail("supportedTypes.length should be '" +
expectedSupportedTypes.length + "', but got '" +
supportedTypes.length + "'.");
}
for (let idx = 0; idx < supportedTypes.length; idx++) {
if (supportedTypes[idx] != expectedSupportedTypes[idx]) {
emitTestFail("supportedTypes[" + idx + "] should be '" +
expectedSupportedTypes[idx] + "', but got '" +
supportedTypes[idx] + "'.");
}
}
// checking the passed PaymentDetails parameter
const details = payRequest.paymentDetails;
if (details.id != "payment details" ) {

Просмотреть файл

@ -5,7 +5,6 @@ const defaultMethods = [{
supportedNetworks: ['unionpay', 'visa', 'mastercard', 'amex', 'discover',
'diners', 'jcb', 'mir',
],
supportedTypes: ['prepaid', 'debit', 'credit'],
},
}, {
supportedMethods: "testing-payment-method",

Просмотреть файл

@ -21,13 +21,6 @@ https://bugzilla.mozilla.org/show_bug.cgi?id=1375345
}
gScript.addMessageListener("test-fail", testFailHandler);
const errorTypesMethods = [{
supportedMethods: "basic-card",
data: {
supportedTypes: ["myType"],
},
}];
const errorNetworksMethods = [{
supportedMethods: "basic-card",
data: {
@ -55,7 +48,6 @@ https://bugzilla.mozilla.org/show_bug.cgi?id=1375345
supportedNetworks: ["unionpay", "visa", "mastercard", "amex", "discover",
"diners", "jcb", "mir",
],
supportedTypes: ["prepaid", "debit", "credit"],
},
}];
const defaultDetails = {
@ -97,19 +89,6 @@ https://bugzilla.mozilla.org/show_bug.cgi?id=1375345
shippingType: "shipping"
};
function testBasicCardRequestWithErrorTypes() {
return new Promise((resolve, reject) => {
try {
const payRequest = new PaymentRequest(errorTypesMethods, defaultDetails, defaultOptions);
ok(false, "Expected 'TypeError', but got success construction.");
resolve();
} catch (e) {
is(e.name, "TypeError", "Expected TypeError, but got " + e.name);
resolve();
}
});
}
function testBasicCardRequestWithErrorNetworks() {
return new Promise((resolve, reject) => {
try {
@ -282,8 +261,7 @@ https://bugzilla.mozilla.org/show_bug.cgi?id=1375345
}
function runTests() {
testBasicCardRequestWithErrorTypes()
.then(testBasicCardRequestWithErrorNetworks)
testBasicCardRequestWithErrorNetworks()
.then(testBasicCardRequestWithUnconvertableData)
.then(testBasicCardRequestWithNullData)
.then(testBasicCardRequestWithEmptyData)

Просмотреть файл

@ -40,7 +40,6 @@ https://bugzilla.mozilla.org/show_bug.cgi?id=1345361
supportedNetworks: ['unionpay', 'visa', 'mastercard', 'amex', 'discover',
'diners', 'jcb', 'mir',
],
supportedTypes: ['prepaid', 'debit', 'credit'],
},
}];

Просмотреть файл

@ -27,7 +27,6 @@ https://bugzilla.mozilla.org/show_bug.cgi?id=1436903
supportedNetworks: ['unionpay', 'visa', 'mastercard', 'amex', 'discover',
'diners', 'jcb', 'mir',
],
supportedTypes: ['prepaid', 'debit', 'credit'],
},
}, {
supportedMethods: "testing-payment-method",

Просмотреть файл

@ -33,7 +33,6 @@ https://bugzilla.mozilla.org/show_bug.cgi?id=1345366
supportedNetworks: ['unionpay', 'visa', 'mastercard', 'amex', 'discover',
'diners', 'jcb', 'mir',
],
supportedTypes: ['prepaid', 'debit', 'credit'],
},
}, {
supportedMethods: "testing-payment-method",

Просмотреть файл

@ -6,15 +6,9 @@
* The origin of this WebIDL file is
* https://www.w3.org/TR/payment-request/#paymentrequest-interface
*/
enum BasicCardType {
"credit",
"debit",
"prepaid"
};
dictionary BasicCardRequest {
sequence<DOMString> supportedNetworks;
sequence<BasicCardType> supportedTypes;
};
dictionary BasicCardResponse {

Просмотреть файл

@ -818,6 +818,7 @@ pub struct Device {
resource_override_path: Option<PathBuf>,
max_texture_size: u32,
max_texture_layers: u32,
renderer_name: String,
cached_programs: Option<Rc<ProgramCache>>,
@ -903,10 +904,13 @@ impl Device {
cached_programs: Option<Rc<ProgramCache>>,
) -> Device {
let mut max_texture_size = [0];
let mut max_texture_layers = [0];
unsafe {
gl.get_integer_v(gl::MAX_TEXTURE_SIZE, &mut max_texture_size);
gl.get_integer_v(gl::MAX_ARRAY_TEXTURE_LAYERS, &mut max_texture_layers);
}
let max_texture_size = max_texture_size[0] as u32;
let max_texture_layers = max_texture_layers[0] as u32;
let renderer_name = gl.get_string(gl::RENDERER);
let mut extension_count = [0];
@ -1022,6 +1026,7 @@ impl Device {
depth_available: true,
max_texture_size,
max_texture_layers,
renderer_name,
cached_programs,
frame_id: GpuFrameId(0),
@ -1046,10 +1051,23 @@ impl Device {
self.cached_programs = Some(cached_programs);
}
/// Ensures that the maximum texture size is less than or equal to the
/// provided value. If the provided value is less than the value supported
/// by the driver, the latter is used.
pub fn clamp_max_texture_size(&mut self, size: u32) {
self.max_texture_size = self.max_texture_size.min(size);
}
/// Returns the limit on texture dimensions (width or height).
pub fn max_texture_size(&self) -> u32 {
self.max_texture_size
}
/// Returns the limit on texture array layers.
pub fn max_texture_layers(&self) -> usize {
self.max_texture_layers as usize
}
#[cfg(feature = "debug_renderer")]
pub fn get_capabilities(&self) -> &Capabilities {
&self.capabilities

Просмотреть файл

@ -732,7 +732,7 @@ mod test_glyph_rasterizer {
let mut glyph_rasterizer = GlyphRasterizer::new(workers).unwrap();
let mut glyph_cache = GlyphCache::new();
let mut gpu_cache = GpuCache::new();
let mut texture_cache = TextureCache::new(2048);
let mut texture_cache = TextureCache::new(2048, 1024);
let mut render_task_cache = RenderTaskCache::new();
let mut render_task_tree = RenderTaskTree::new(FrameId::invalid());
let mut special_render_passes = SpecialRenderPasses::new(&DeviceIntSize::new(1366, 768));
@ -786,7 +786,7 @@ mod test_glyph_rasterizer {
glyph_rasterizer.resolve_glyphs(
&mut glyph_cache,
&mut TextureCache::new(4096),
&mut TextureCache::new(4096, 1024),
&mut gpu_cache,
&mut render_task_cache,
&mut render_task_tree,

Просмотреть файл

@ -1525,7 +1525,6 @@ pub struct Renderer {
pub gpu_glyph_renderer: GpuGlyphRenderer,
max_texture_size: u32,
max_recorded_profiles: usize,
clear_color: Option<ColorF>,
@ -1669,25 +1668,23 @@ impl Renderer {
device.supports_extension("GL_ARB_blend_func_extended") &&
device.supports_extension("GL_ARB_explicit_attrib_location");
let device_max_size = device.max_texture_size();
// 512 is the minimum that the texture cache can work with.
// Broken GL contexts can return a max texture size of zero (See #1260). Better to
// gracefully fail now than panic as soon as a texture is allocated.
let min_texture_size = 512;
if device_max_size < min_texture_size {
const MIN_TEXTURE_SIZE: u32 = 512;
if let Some(user_limit) = options.max_texture_size {
assert!(user_limit >= MIN_TEXTURE_SIZE);
device.clamp_max_texture_size(user_limit);
}
if device.max_texture_size() < MIN_TEXTURE_SIZE {
// Broken GL contexts can return a max texture size of zero (See #1260).
// Better to gracefully fail now than panic as soon as a texture is allocated.
error!(
"Device reporting insufficient max texture size ({})",
device_max_size
device.max_texture_size()
);
return Err(RendererError::MaxTextureSize);
}
let max_device_size = cmp::max(
cmp::min(
device_max_size,
options.max_texture_size.unwrap_or(device_max_size),
),
min_texture_size,
);
let max_texture_size = device.max_texture_size();
let max_texture_layers = device.max_texture_layers();
register_thread_with_profiler("Compositor".to_owned());
@ -1936,7 +1933,11 @@ impl Renderer {
thread_listener.thread_started(&rb_thread_name);
}
let texture_cache = TextureCache::new(max_device_size);
let texture_cache = TextureCache::new(
max_texture_size,
max_texture_layers,
);
let resource_cache = ResourceCache::new(
texture_cache,
glyph_rasterizer,
@ -1992,7 +1993,6 @@ impl Renderer {
new_scene_indicator: ChangeIndicator::new(),
#[cfg(feature = "debug_renderer")]
slow_frame_indicator: ChangeIndicator::new(),
max_texture_size: max_device_size,
max_recorded_profiles: options.max_recorded_profiles,
clear_color: options.clear_color,
enable_clear_scissor: options.enable_clear_scissor,
@ -2041,7 +2041,7 @@ impl Renderer {
}
pub fn get_max_texture_size(&self) -> u32 {
self.max_texture_size
self.device.max_texture_size()
}
pub fn get_graphics_api_info(&self) -> GraphicsApiInfo {
@ -2751,7 +2751,7 @@ impl Renderer {
(count + list.blocks.len(), cmp::max(height, list.height))
});
if max_requested_height > self.max_texture_size && !self.gpu_cache_overflow {
if max_requested_height > self.get_max_texture_size() && !self.gpu_cache_overflow {
self.gpu_cache_overflow = true;
self.renderer_errors.push(RendererError::MaxTextureSize);
}

Просмотреть файл

@ -2037,8 +2037,10 @@ impl ResourceCache {
self.cached_glyph_dimensions.clear();
self.cached_images.clear();
self.cached_render_tasks.clear();
let max_texture_size = self.texture_cache.max_texture_size();
self.texture_cache = TextureCache::new(max_texture_size);
self.texture_cache = TextureCache::new(
self.texture_cache.max_texture_size(),
self.texture_cache.max_texture_layers(),
);
}
}

Просмотреть файл

@ -22,12 +22,12 @@ use std::rc::Rc;
/// The size of each region/layer in shared cache texture arrays.
const TEXTURE_REGION_DIMENSIONS: u32 = 512;
// Items in the texture cache can either be standalone textures,
// or a sub-rect inside the shared cache.
/// Items in the texture cache can either be standalone textures,
/// or a sub-rect inside the shared cache.
#[derive(Debug)]
#[cfg_attr(feature = "capture", derive(Serialize))]
#[cfg_attr(feature = "replay", derive(Deserialize))]
enum EntryKind {
enum EntryDetails {
Standalone,
Cache {
// Origin within the texture layer where this item exists.
@ -37,13 +37,23 @@ enum EntryKind {
},
}
impl EntryKind {
/// Returns true if this corresponds to a standalone cache entry.
fn is_standalone(&self) -> bool {
matches!(*self, EntryKind::Standalone)
impl EntryDetails {
/// Returns the kind associated with the details.
fn kind(&self) -> EntryKind {
match *self {
EntryDetails::Standalone => EntryKind::Standalone,
EntryDetails::Cache { .. } => EntryKind::Shared,
}
}
}
/// Tag identifying standalone-versus-shared, without the details.
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
enum EntryKind {
Standalone,
Shared,
}
#[derive(Debug)]
pub enum CacheEntryMarker {}
@ -57,7 +67,7 @@ struct CacheEntry {
/// Size the requested item, in device pixels.
size: DeviceUintSize,
/// Details specific to standalone or shared items.
kind: EntryKind,
details: EntryDetails,
/// Arbitrary user data associated with this item.
user_data: [f32; 3],
/// The last frame this item was requested for rendering.
@ -81,24 +91,20 @@ impl CacheEntry {
// Create a new entry for a standalone texture.
fn new_standalone(
texture_id: CacheTextureId,
size: DeviceUintSize,
format: ImageFormat,
filter: TextureFilter,
user_data: [f32; 3],
last_access: FrameId,
uv_rect_kind: UvRectKind,
params: &CacheAllocParams,
) -> Self {
CacheEntry {
size,
user_data,
size: params.descriptor.size,
user_data: params.user_data,
last_access,
kind: EntryKind::Standalone,
details: EntryDetails::Standalone,
texture_id,
format,
filter,
format: params.descriptor.format,
filter: params.filter,
uv_rect_handle: GpuCacheHandle::new(),
eviction_notice: None,
uv_rect_kind,
uv_rect_kind: params.uv_rect_kind,
eviction: Eviction::Auto,
}
}
@ -109,9 +115,9 @@ impl CacheEntry {
// to fetch from.
fn update_gpu_cache(&mut self, gpu_cache: &mut GpuCache) {
if let Some(mut request) = gpu_cache.request(&mut self.uv_rect_handle) {
let (origin, layer_index) = match self.kind {
EntryKind::Standalone { .. } => (DeviceUintPoint::zero(), 0.0),
EntryKind::Cache {
let (origin, layer_index) = match self.details {
EntryDetails::Standalone { .. } => (DeviceUintPoint::zero(), 0.0),
EntryDetails::Cache {
origin,
layer_index,
..
@ -210,32 +216,24 @@ impl SharedTextures {
array_a8_linear: TextureArray::new(
ImageFormat::R8,
TextureFilter::Linear,
4,
),
// Used for experimental hdr yuv texture support, but not used in
// production Firefox.
array_a16_linear: TextureArray::new(
ImageFormat::R16,
TextureFilter::Linear,
4,
),
// The primary cache for images, glyphs, etc.
array_rgba8_linear: TextureArray::new(
ImageFormat::BGRA8,
TextureFilter::Linear,
32, /* More than 32 layers breaks on mac intel drivers for some reason */
),
// Used for image-rendering: crisp. This is mostly favicons, which
// are small. Some other images use it too, but those tend to be
// larger than 512x512 and thus don't use the shared cache anyway.
//
// Even though most of the buckets will be sparsely-used, we still
// need a few to account for different favicon sizes. 4 seems enough
// in practice, though we might also be able to get away with 2 or 3.
array_rgba8_nearest: TextureArray::new(
ImageFormat::BGRA8,
TextureFilter::Nearest,
4,
),
}
}
@ -260,6 +258,38 @@ impl SharedTextures {
}
}
/// Lists of strong handles owned by the texture cache. There is only one strong
/// handle for each entry, but unlimited weak handles. Consumers receive the weak
/// handles, and `TextureCache` owns the strong handles internally.
#[derive(Default)]
#[cfg_attr(feature = "capture", derive(Serialize))]
#[cfg_attr(feature = "replay", derive(Deserialize))]
struct EntryHandles {
/// Handles for each standalone texture cache entry.
standalone: Vec<FreeListHandle<CacheEntryMarker>>,
/// Handles for each shared texture cache entry.
shared: Vec<FreeListHandle<CacheEntryMarker>>,
}
impl EntryHandles {
/// Mutably borrows the requested handle list.
fn select(&mut self, kind: EntryKind) -> &mut Vec<FreeListHandle<CacheEntryMarker>> {
if kind == EntryKind::Standalone {
&mut self.standalone
} else {
&mut self.shared
}
}
}
/// Container struct for the various parameters used in cache allocation.
struct CacheAllocParams {
descriptor: ImageDescriptor,
filter: TextureFilter,
user_data: [f32; 3],
uv_rect_kind: UvRectKind,
}
/// General-purpose manager for images in GPU memory. This includes images,
/// rasterized glyphs, rasterized blobs, cached render tasks, etc.
///
@ -283,53 +313,76 @@ pub struct TextureCache {
/// Set of texture arrays in different formats used for the shared cache.
shared_textures: SharedTextures,
// Maximum texture size supported by hardware.
/// Maximum texture size supported by hardware.
max_texture_size: u32,
// The next unused virtual texture ID. Monotonically increasing.
/// Maximum number of texture layers supported by hardware.
max_texture_layers: usize,
/// The next unused virtual texture ID. Monotonically increasing.
next_id: CacheTextureId,
// A list of allocations and updates that need to be
// applied to the texture cache in the rendering thread
// this frame.
/// A list of allocations and updates that need to be applied to the texture
/// cache in the rendering thread this frame.
#[cfg_attr(all(feature = "serde", any(feature = "capture", feature = "replay")), serde(skip))]
pending_updates: TextureUpdateList,
// The current frame ID. Used for cache eviction policies.
/// The current frame ID. Used for cache eviction policies.
frame_id: FrameId,
// Maintains the list of all current items in
// the texture cache.
/// The last FrameId in which we expired the shared cache.
last_shared_cache_expiration: FrameId,
/// Maintains the list of all current items in the texture cache.
entries: FreeList<CacheEntry, CacheEntryMarker>,
// A list of the strong handles of items that were
// allocated in the standalone texture pool. Used
// for evicting old standalone textures.
standalone_entry_handles: Vec<FreeListHandle<CacheEntryMarker>>,
// A list of the strong handles of items that were
// allocated in the shared texture cache. Used
// for evicting old cache items.
shared_entry_handles: Vec<FreeListHandle<CacheEntryMarker>>,
/// Strong handles for all entries allocated from the above `FreeList`.
handles: EntryHandles,
}
impl TextureCache {
pub fn new(max_texture_size: u32) -> Self {
pub fn new(max_texture_size: u32, mut max_texture_layers: usize) -> Self {
if cfg!(target_os = "macos") {
// On MBP integrated Intel GPUs, texture arrays appear to be
// implemented as a single texture of stacked layers, and that
// texture appears to be subject to the texture size limit. As such,
// allocating more than 32 512x512 regions results in a dimension
// longer than 16k (the max texture size), causing incorrect behavior.
//
// So we clamp the number of layers on mac. This results in maximum
// texture array size of 32MB, which isn't ideal but isn't terrible
// either. OpenGL on mac is not long for this earth, so this may be
// good enough until we have WebRender on gfx-rs (on Metal).
//
// Note that we could also define this more generally in terms of
// |max_texture_size / TEXTURE_REGION_DIMENSION|, except:
// * max_texture_size is actually clamped beyond the device limit
// by Gecko to 8192, so we'd need to thread the raw device value
// here, and:
// * The bug we're working around is likely specific to a single
// driver family, and those drivers are also likely to share
// the same max texture size of 16k. If we do encounter a driver
// with the same bug but a lower max texture size, we might need
// to rethink our strategy anyway, since a limit below 32MB might
// start to introduce performance issues.
max_texture_layers = max_texture_layers.min(32);
}
TextureCache {
shared_textures: SharedTextures::new(),
max_texture_size,
max_texture_layers,
next_id: CacheTextureId(1),
pending_updates: TextureUpdateList::new(),
frame_id: FrameId::invalid(),
last_shared_cache_expiration: FrameId::invalid(),
entries: FreeList::new(),
standalone_entry_handles: Vec::new(),
shared_entry_handles: Vec::new(),
handles: EntryHandles::default(),
}
}
pub fn clear(&mut self) {
let standalone_entry_handles = mem::replace(
&mut self.standalone_entry_handles,
&mut self.handles.standalone,
Vec::new(),
);
@ -340,7 +393,7 @@ impl TextureCache {
}
let shared_entry_handles = mem::replace(
&mut self.shared_entry_handles,
&mut self.handles.shared,
Vec::new(),
);
@ -404,6 +457,11 @@ impl TextureCache {
self.max_texture_size
}
#[allow(dead_code)]
pub fn max_texture_layers(&self) -> usize {
self.max_texture_layers
}
pub fn pending_updates(&mut self) -> TextureUpdateList {
mem::replace(&mut self.pending_updates, TextureUpdateList::new())
}
@ -439,13 +497,8 @@ impl TextureCache {
};
if realloc {
self.allocate(
handle,
descriptor,
filter,
user_data,
uv_rect_kind,
);
let params = CacheAllocParams { descriptor, filter, user_data, uv_rect_kind };
self.allocate(&params, handle);
// If we reallocated, we need to upload the whole item again.
dirty_rect = None;
@ -471,9 +524,9 @@ impl TextureCache {
// to upload the new image data into the correct location
// in GPU memory.
if let Some(data) = data {
let (layer_index, origin) = match entry.kind {
EntryKind::Standalone { .. } => (0, DeviceUintPoint::zero()),
EntryKind::Cache {
let (layer_index, origin) = match entry.details {
EntryDetails::Standalone { .. } => (0, DeviceUintPoint::zero()),
EntryDetails::Cache {
layer_index,
origin,
..
@ -519,11 +572,11 @@ impl TextureCache {
.get_opt(handle)
.expect("BUG: was dropped from cache or not updated!");
debug_assert_eq!(entry.last_access, self.frame_id);
let (layer_index, origin) = match entry.kind {
EntryKind::Standalone { .. } => {
let (layer_index, origin) = match entry.details {
EntryDetails::Standalone { .. } => {
(0, DeviceUintPoint::zero())
}
EntryKind::Cache {
EntryDetails::Cache {
layer_index,
origin,
..
@ -549,11 +602,11 @@ impl TextureCache {
.get_opt(handle)
.expect("BUG: was dropped from cache or not updated!");
debug_assert_eq!(entry.last_access, self.frame_id);
let (layer_index, origin) = match entry.kind {
EntryKind::Standalone { .. } => {
let (layer_index, origin) = match entry.details {
EntryDetails::Standalone { .. } => {
(0, DeviceUintPoint::zero())
}
EntryKind::Cache {
EntryDetails::Cache {
layer_index,
origin,
..
@ -574,20 +627,25 @@ impl TextureCache {
}
/// Expires old standalone textures. Called at the end of every frame.
///
/// Most of the time, standalone cache entries correspond to images whose
/// width or height is greater than the region size in the shared cache, i.e.
/// 512 pixels. Cached render tasks also frequently get standalone entries,
/// but those use the Eviction::Eager policy (for now). So the tradeoff here
/// is largely around reducing texture upload jank while keeping memory usage
/// at an acceptable level.
fn expire_old_standalone_entries(&mut self) {
self.expire_old_entries(EntryKind::Standalone);
}
/// Shared eviction code for standalone and shared entries.
///
/// Our eviction scheme is based on the age of the entry, i.e. the difference
/// between the current frame index and that of the last frame in
/// which the entry was used. It does not directly consider the size of the
/// entry, but does consider overall memory usage by WebRender, by making
/// eviction increasingly aggressive as overall memory usage increases.
fn expire_old_standalone_entries(&mut self) {
///
/// Most of the time, standalone cache entries correspond to images whose
/// width or height is greater than the region size in the shared cache, i.e.
/// 512 pixels. Cached render tasks also frequently get standalone entries,
/// but those use the Eviction::Eager policy (for now). So the tradeoff there
/// is largely around reducing texture upload jank while keeping memory usage
/// at an acceptable level.
fn expire_old_entries(&mut self, kind: EntryKind) {
// These parameters are based on some discussion and local tuning, but
// no hard measurement. There may be room for improvement.
//
@ -600,7 +658,7 @@ impl TextureCache {
(total_gpu_bytes_allocated() as f64 / MAX_MEMORY_PRESSURE_BYTES as f64).min(1.0);
// Use the pressure factor to compute the maximum number of frames that
// a standalone texture can go unused before being evicted.
// an entry can go unused before being evicted.
let max_frame_age_raw =
((1.0 - pressure_factor) * MAX_FRAME_AGE_WITHOUT_PRESSURE) as usize;
@ -614,9 +672,9 @@ impl TextureCache {
// Iterate over the entries in reverse order, evicting the ones older than
// the frame age threshold. Reverse order avoids iterator invalidation when
// removing entries.
for i in (0..self.standalone_entry_handles.len()).rev() {
for i in (0..self.handles.select(kind).len()).rev() {
let evict = {
let entry = self.entries.get(&self.standalone_entry_handles[i]);
let entry = self.entries.get(&self.handles.select(kind)[i]);
match entry.eviction {
Eviction::Manual => false,
Eviction::Auto => entry.last_access < frame_id_threshold,
@ -624,7 +682,7 @@ impl TextureCache {
}
};
if evict {
let handle = self.standalone_entry_handles.swap_remove(i);
let handle = self.handles.select(kind).swap_remove(i);
let entry = self.entries.free(handle);
entry.evict();
self.free(entry);
@ -632,74 +690,26 @@ impl TextureCache {
}
}
// Expire old shared items. Pass in the allocation size
// that is being requested, so we know when we've evicted
// enough items to guarantee we can fit this allocation in
// the cache.
fn expire_old_shared_entries(&mut self, required_alloc: &ImageDescriptor) {
let mut eviction_candidates = Vec::new();
let mut retained_entries = Vec::new();
// Build a list of eviction candidates (which are
// anything not used this frame).
for handle in self.shared_entry_handles.drain(..) {
let entry = self.entries.get(&handle);
if entry.eviction == Eviction::Manual || entry.last_access == self.frame_id {
retained_entries.push(handle);
} else {
eviction_candidates.push(handle);
}
/// Expires old shared entries, if we haven't done so recently.
///
/// Returns true if any entries were expired.
fn maybe_expire_old_shared_entries(&mut self) -> bool {
let old_len = self.handles.shared.len();
if self.last_shared_cache_expiration + 25 < self.frame_id {
self.expire_old_entries(EntryKind::Shared);
self.last_shared_cache_expiration = self.frame_id;
}
// Sort by access time so we remove the oldest ones first.
eviction_candidates.sort_by_key(|handle| {
let entry = self.entries.get(handle);
entry.last_access
});
// Doing an eviction is quite expensive, so we don't want to
// do it all the time. To avoid this, try and evict a
// significant number of items each cycle. However, we don't
// want to evict everything we can, since that will result in
// more items being uploaded than necessary.
// Instead, we say we will keep evicting until both of these
// conditions are met:
// - We have evicted some arbitrary number of items (512 currently).
// AND
// - We have freed an item that will definitely allow us to
// fit the currently requested allocation.
let needed_slab_size = SlabSize::new(required_alloc.size);
let mut found_matching_slab = false;
let mut freed_complete_page = false;
let mut evicted_items = 0;
for handle in eviction_candidates {
if evicted_items > 512 && (found_matching_slab || freed_complete_page) {
retained_entries.push(handle);
} else {
let entry = self.entries.free(handle);
entry.evict();
if let Some(region) = self.free(entry) {
found_matching_slab |= region.slab_size == needed_slab_size;
freed_complete_page |= region.is_empty();
}
evicted_items += 1;
}
}
// Keep a record of the remaining handles for next eviction cycle.
self.shared_entry_handles = retained_entries;
self.handles.shared.len() != old_len
}
// Free a cache entry from the standalone list or shared cache.
fn free(&mut self, entry: CacheEntry) -> Option<&TextureRegion> {
match entry.kind {
EntryKind::Standalone { .. } => {
fn free(&mut self, entry: CacheEntry) {
match entry.details {
EntryDetails::Standalone { .. } => {
// This is a standalone texture allocation. Free it directly.
self.pending_updates.push_free(entry.texture_id);
None
}
EntryKind::Cache {
EntryDetails::Cache {
origin,
layer_index,
} => {
@ -710,7 +720,6 @@ impl TextureCache {
layer_index,
);
region.free(origin);
Some(region)
}
}
}
@ -718,13 +727,13 @@ impl TextureCache {
// Attempt to allocate a block from the shared cache.
fn allocate_from_shared_cache(
&mut self,
descriptor: &ImageDescriptor,
filter: TextureFilter,
user_data: [f32; 3],
uv_rect_kind: UvRectKind,
params: &CacheAllocParams
) -> Option<CacheEntry> {
// Mutably borrow the correct texture.
let texture_array = self.shared_textures.select(descriptor.format, filter);
let texture_array = self.shared_textures.select(
params.descriptor.format,
params.filter,
);
// Lazy initialize this texture array if required.
if texture_array.texture_id.is_none() {
@ -735,7 +744,7 @@ impl TextureCache {
let info = TextureCacheAllocInfo {
width: TEXTURE_REGION_DIMENSIONS,
height: TEXTURE_REGION_DIMENSIONS,
format: descriptor.format,
format: params.descriptor.format,
filter: texture_array.filter,
layer_count: 1,
};
@ -747,12 +756,7 @@ impl TextureCache {
// Do the allocation. This can fail and return None
// if there are no free slots or regions available.
texture_array.alloc(
descriptor.size,
user_data,
self.frame_id,
uv_rect_kind,
)
texture_array.alloc(params, self.frame_id)
}
// Returns true if the given image descriptor *may* be
@ -784,108 +788,97 @@ impl TextureCache {
allowed_in_shared_cache
}
// Allocate storage for a given image. This attempts to allocate
// from the shared cache, but falls back to standalone texture
// if the image is too large, or the cache is full.
fn allocate(
/// Allocates a new standalone cache entry.
fn allocate_standalone_entry(
&mut self,
handle: &mut TextureCacheHandle,
descriptor: ImageDescriptor,
filter: TextureFilter,
user_data: [f32; 3],
uv_rect_kind: UvRectKind,
) {
assert!(descriptor.size.width > 0 && descriptor.size.height > 0);
params: &CacheAllocParams,
) -> CacheEntry {
let texture_id = self.next_id;
self.next_id.0 += 1;
// Work out if this image qualifies to go in the shared (batching) cache.
let allowed_in_shared_cache = self.is_allowed_in_shared_cache(
filter,
&descriptor,
// Push a command to allocate device storage of the right size / format.
let info = TextureCacheAllocInfo {
width: params.descriptor.size.width,
height: params.descriptor.size.height,
format: params.descriptor.format,
filter: params.filter,
layer_count: 1,
};
self.pending_updates.push_alloc(texture_id, info);
return CacheEntry::new_standalone(
texture_id,
self.frame_id,
params,
);
let mut allocated_standalone = false;
let mut new_cache_entry = None;
let frame_id = self.frame_id;
}
// If it's allowed in the cache, see if there is a spot for it.
if allowed_in_shared_cache {
/// Allocates a cache entry appropriate for the given parameters.
///
/// This allocates from the shared cache unless the parameters do not meet
/// the shared cache requirements, in which case a standalone texture is
/// used.
fn allocate_cache_entry(
&mut self,
params: &CacheAllocParams,
) -> CacheEntry {
assert!(params.descriptor.size.width > 0 && params.descriptor.size.height > 0);
new_cache_entry = self.allocate_from_shared_cache(
&descriptor,
filter,
user_data,
uv_rect_kind,
);
// If this image doesn't qualify to go in the shared (batching) cache,
// allocate a standalone entry.
if !self.is_allowed_in_shared_cache(params.filter, &params.descriptor) {
return self.allocate_standalone_entry(params);
}
// If we failed to allocate in the shared cache, make some space
// and then try to allocate again. If we still have room to grow
// the cache, we do that. Otherwise, we evict.
//
// We should improve this logic to support some degree of eviction
// before the cache fills up eintirely.
if new_cache_entry.is_none() {
let reallocated = {
let texture_array = self.shared_textures.select(descriptor.format, filter);
let num_regions = texture_array.regions.len();
if num_regions < texture_array.max_layer_count {
// We have room to grow.
let info = TextureCacheAllocInfo {
width: TEXTURE_REGION_DIMENSIONS,
height: TEXTURE_REGION_DIMENSIONS,
format: descriptor.format,
filter: texture_array.filter,
layer_count: (num_regions + 1) as i32,
};
self.pending_updates.push_realloc(texture_array.texture_id.unwrap(), info);
texture_array.regions.push(TextureRegion::new(num_regions));
true
} else {
false
}
};
if !reallocated {
// Out of room. Evict.
self.expire_old_shared_entries(&descriptor);
}
new_cache_entry = self.allocate_from_shared_cache(
&descriptor,
filter,
user_data,
uv_rect_kind,
);
// Try allocating from the shared cache.
if let Some(entry) = self.allocate_from_shared_cache(params) {
return entry;
}
// If we failed to allocate and haven't GCed in a while, do so and try
// again.
if self.maybe_expire_old_shared_entries() {
if let Some(entry) = self.allocate_from_shared_cache(params) {
return entry;
}
}
// If not allowed in the cache, or if the shared cache is full, then it
// will just have to be in a unique texture. This hurts batching but should
// only occur on a small number of images (or pathological test cases!).
if new_cache_entry.is_none() {
let texture_id = self.next_id;
self.next_id.0 += 1;
let added_layer = {
// If we've hit our layer limit, allocate standalone.
let texture_array =
self.shared_textures.select(params.descriptor.format, params.filter);
let num_regions = texture_array.regions.len();
// Add a layer, unless we've hit our limit.
if num_regions < self.max_texture_layers as usize {
let info = TextureCacheAllocInfo {
width: TEXTURE_REGION_DIMENSIONS,
height: TEXTURE_REGION_DIMENSIONS,
format: params.descriptor.format,
filter: texture_array.filter,
layer_count: (num_regions + 1) as i32,
};
self.pending_updates.push_realloc(texture_array.texture_id.unwrap(), info);
texture_array.regions.push(TextureRegion::new(num_regions));
true
} else {
false
}
};
// Push a command to allocate device storage of the right size / format.
let info = TextureCacheAllocInfo {
width: descriptor.size.width,
height: descriptor.size.height,
format: descriptor.format,
filter,
layer_count: 1,
};
self.pending_updates.push_alloc(texture_id, info);
new_cache_entry = Some(CacheEntry::new_standalone(
texture_id,
descriptor.size,
descriptor.format,
filter,
user_data,
frame_id,
uv_rect_kind,
));
allocated_standalone = true;
if added_layer {
self.allocate_from_shared_cache(params)
.expect("Allocation should succeed after adding a fresh layer")
} else {
self.allocate_standalone_entry(params)
}
let new_cache_entry = new_cache_entry.expect("BUG: must have allocated by now");
}
/// Allocates a cache entry for the given parameters, and updates the
/// provided handle to point to the new entry.
fn allocate(&mut self, params: &CacheAllocParams, handle: &mut TextureCacheHandle) {
let new_cache_entry = self.allocate_cache_entry(params);
let new_kind = new_cache_entry.details.kind();
// If the handle points to a valid cache entry, we want to replace the
// cache entry with our newly updated location. We also need to ensure
@ -898,14 +891,14 @@ impl TextureCache {
// This is managed with a database style upsert operation.
match self.entries.upsert(handle, new_cache_entry) {
UpsertResult::Updated(old_entry) => {
if allocated_standalone != old_entry.kind.is_standalone() {
if new_kind != old_entry.details.kind() {
// Handle the rare case than an update moves an entry from
// shared to standalone or vice versa. This involves a linear
// search, but should be rare enough not to matter.
let (from, to) = if allocated_standalone {
(&mut self.shared_entry_handles, &mut self.standalone_entry_handles)
let (from, to) = if new_kind == EntryKind::Standalone {
(&mut self.handles.shared, &mut self.handles.standalone)
} else {
(&mut self.standalone_entry_handles, &mut self.shared_entry_handles)
(&mut self.handles.standalone, &mut self.handles.shared)
};
let idx = from.iter().position(|h| h.weak() == *handle).unwrap();
to.push(from.remove(idx));
@ -914,11 +907,7 @@ impl TextureCache {
}
UpsertResult::Inserted(new_handle) => {
*handle = new_handle.weak();
if allocated_standalone {
self.standalone_entry_handles.push(new_handle);
} else {
self.shared_entry_handles.push(new_handle);
}
self.handles.select(new_kind).push(new_handle);
}
}
}
@ -1068,7 +1057,6 @@ impl TextureRegion {
#[cfg_attr(feature = "replay", derive(Deserialize))]
struct TextureArray {
filter: TextureFilter,
max_layer_count: usize,
format: ImageFormat,
regions: Vec<TextureRegion>,
texture_id: Option<CacheTextureId>,
@ -1078,12 +1066,10 @@ impl TextureArray {
fn new(
format: ImageFormat,
filter: TextureFilter,
max_layer_count: usize,
) -> Self {
TextureArray {
format,
filter,
max_layer_count,
regions: Vec::new(),
texture_id: None,
}
@ -1107,17 +1093,15 @@ impl TextureArray {
}
}
// Allocate space in this texture array.
/// Allocate space in this texture array.
fn alloc(
&mut self,
size: DeviceUintSize,
user_data: [f32; 3],
params: &CacheAllocParams,
frame_id: FrameId,
uv_rect_kind: UvRectKind,
) -> Option<CacheEntry> {
// Quantize the size of the allocation to select a region to
// allocate from.
let slab_size = SlabSize::new(size);
let slab_size = SlabSize::new(params.descriptor.size);
// TODO(gw): For simplicity, the initial implementation just
// has a single vec<> of regions. We could easily
@ -1128,7 +1112,7 @@ impl TextureArray {
// in case we need to select a new empty region
// after the loop.
let mut empty_region_index = None;
let mut entry_kind = None;
let mut entry_details = None;
// Run through the existing regions of this size, and see if
// we can find a free block in any of them.
@ -1137,7 +1121,7 @@ impl TextureArray {
empty_region_index = Some(i);
} else if region.slab_size == slab_size {
if let Some(location) = region.alloc() {
entry_kind = Some(EntryKind::Cache {
entry_details = Some(EntryDetails::Cache {
layer_index: region.layer_index,
origin: location,
});
@ -1147,12 +1131,12 @@ impl TextureArray {
}
// Find a region of the right size and try to allocate from it.
if entry_kind.is_none() {
if entry_details.is_none() {
if let Some(empty_region_index) = empty_region_index {
let region = &mut self.regions[empty_region_index];
region.init(slab_size);
entry_kind = region.alloc().map(|location| {
EntryKind::Cache {
entry_details = region.alloc().map(|location| {
EntryDetails::Cache {
layer_index: region.layer_index,
origin: location,
}
@ -1160,18 +1144,18 @@ impl TextureArray {
}
}
entry_kind.map(|kind| {
entry_details.map(|details| {
CacheEntry {
size,
user_data,
size: params.descriptor.size,
user_data: params.user_data,
last_access: frame_id,
kind,
details,
uv_rect_handle: GpuCacheHandle::new(),
format: self.format,
filter: self.filter,
texture_id: self.texture_id.unwrap(),
eviction_notice: None,
uv_rect_kind,
uv_rect_kind: params.uv_rect_kind,
eviction: Eviction::Auto,
}
})

Просмотреть файл

@ -1 +1 @@
56ffe6edc4c453a370784314194f9516af0a8950
9e65149a487afa35994076d7db918c07d2b967bf

Просмотреть файл

@ -7,64 +7,46 @@
/*
* JSAPI functions and callbacks related to WHATWG Stream objects.
*
* Much of the API here mirrors the JS API of ReadableStream and associated
* classes, e.g. ReadableStreamDefaultReader, ReadableStreamBYOBReader,
* ReadableStreamDefaultController, ReadableByteStreamController, and
* ReadableStreamBYOBRequest.
* Much of the API here mirrors the standard algorithms and standard JS methods
* of the objects defined in the Streams standard. One difference is that the
* functionality of the JS controller object is exposed to C++ as functions
* taking ReadableStream instances instead, for convenience.
*
* There are some crucial differences, though: Functionality that's exposed
* as methods/accessors on controllers in JS is exposed as functions taking
* ReadableStream instances instead. This is because an analysis of how
* the API would be used showed that all functions that'd take controllers
* would do so by first getting the controller from the stream instance it's
* associated with and then call the function taking it. I.e., it would purely
* add boilerplate without any gains in ease of use of the API.
* ## External streams
*
* It would probably still make sense to factor the API the same as the JS API
* if we had to keep any API stability guarantees: the JS API won't change, so
* we could be sure that the C++ API could stay the same, too. Given that we
* don't guarantee API stability, this concern isn't too pressing.
*
* Some functions exposed here deal with ReadableStream instances that have an
* embedding-provided underlying source. These instances are largely similar
* to byte streams as created using |new ReadableStream({type: "bytes"})|:
* They enable users to acquire ReadableStreamBYOBReaders and only vend chunks
* that're typed array instances.
* Embeddings can create ReadableStreams that read from custom C++ data
* sources. Such streams are always byte streams: the chunks they produce are
* typed arrays (and they will support ReadableStreamBYOBReader once we have
* it).
*
* When creating an "external readable stream" using
* JS::NewReadableExternalSourceStreamObject, an underlying source and a set
* of flags can be passed to be stored on the stream. The underlying source is
* treated as an opaque void* pointer by the JS engine: it's purely meant as
* a reference to be used by the embedding to identify whatever actual source
* it uses to supply data for the stream. Similarly, the flags aren't
* interpreted by the JS engine, but are passed to some of the callbacks below
* and can be retrieved using JS::ReadableStreamGetEmbeddingFlags.
* JS::NewReadableExternalSourceStreamObject, an underlying source and a set of
* flags can be passed to be stored on the stream. The underlying source is
* treated as an opaque void* pointer by the JS engine: it's purely meant as a
* reference to be used by the embedding to identify whatever actual source it
* uses to supply data for the stream. Similarly, the flags aren't interpreted
* by the JS engine, but are passed to some of the callbacks below and can be
* retrieved using JS::ReadableStreamGetEmbeddingFlags.
*
* External readable streams are optimized to allow the embedding to interact
* with them with a minimum of overhead: chunks aren't enqueued as individual
* typed array instances; instead, the embedding only updates the amount of
* data available using ReadableStreamUpdateDataAvailableFromSource.
* When content requests data by reading from a reader,
* WriteIntoReadRequestBufferCallback is invoked, asking the embedding to
* write data directly into the buffer we're about to hand to content.
* typed arrays; instead, the embedding only updates the amount of data
* available using ReadableStreamUpdateDataAvailableFromSource. When JS
* requests data from a reader, WriteIntoReadRequestBufferCallback is invoked,
* asking the embedding to write data directly into the buffer we're about to
* hand to JS.
*
* Additionally, ReadableStreamGetExternalUnderlyingSource can be used to
* get the void* pointer to the underlying source. This is equivalent to
* acquiring a reader for the stream in that it locks the stream until it
* Additionally, ReadableStreamGetExternalUnderlyingSource can be used to get
* the void* pointer to the underlying source. This locks the stream until it
* is released again using JS::ReadableStreamReleaseExternalUnderlyingSource.
*
* Embeddings are expected to detect situations where an API exposed to JS
* takes a ReadableStream to read from that has an external underlying source.
* In those situations, it might be preferable to directly perform data
* transfers from the stream's underlying source to whatever sink the
* embedding uses, assuming that such direct transfers can be performed
* more efficiently.
*
* An example of such an optimized operation might be a ServiceWorker piping a
* fetch Response body to a TextDecoder: instead of writing chunks of data
* into JS typed array buffers only to immediately read from them again, the
* embedding can presumably directly feed the incoming data to the
* TextDecoder's underlying implementation.
* Embeddings can use this to optimize away the JS `ReadableStream` overhead
* when an embedding-defined C++ stream is passed to an embedding-defined C++
* consumer. For example, consider a ServiceWorker piping a `fetch` Response
* body to a TextDecoder. Instead of copying chunks of data into JS typed array
* buffers and creating a Promise per chunk, only to immediately resolve the
* Promises and read the data out again, the embedding can directly feed the
* incoming data to the TextDecoder.
*/
#ifndef js_Stream_h

Просмотреть файл

@ -22,7 +22,7 @@
#include "NSSKeyStore.h"
#endif
NS_IMPL_ISUPPORTS(OSKeyStore, nsIOSKeyStore)
NS_IMPL_ISUPPORTS(OSKeyStore, nsIOSKeyStore, nsIObserver)
using namespace mozilla;
using dom::Promise;

Просмотреть файл

@ -23,6 +23,7 @@ kind-dependencies:
- repackage-signing-l10n
- partials
- partials-signing
- repackage-signing-msi
primary-dependency:
- repackage-signing-l10n

Просмотреть файл

@ -586,6 +586,12 @@ def target_tasks_staging_release(full_task_graph, parameters, graph_config):
def filter(task):
if not task.attributes.get('shipping_product'):
return False
if (parameters['release_type'].startswith('esr')
and 'android' in task.attributes.get('build_platform', '')):
return False
if (parameters['release_type'] != 'beta'
and 'devedition' in task.attributes.get('build_platform', '')):
return False
if task.attributes.get('shipping_phase') == 'build':
return True
return False
@ -598,13 +604,29 @@ def target_tasks_beta_simulation(full_task_graph, parameters, graph_config):
"""
Select builds that would run on mozilla-beta.
"""
project_by_release = {
'nightly': 'mozilla-central',
'beta': 'mozilla-beta',
'release': 'mozilla-release',
'esr60': 'mozilla-esr60',
}
target_project = project_by_release.get(parameters['release_type'])
if target_project is None:
raise Exception('Unknown or unspecified release type in simulation run.')
def filter_for_beta(task):
def filter_for_target_project(task):
"""Filter tasks by project. Optionally enable nightlies."""
run_on_projects = set(task.attributes.get('run_on_projects', []))
return match_run_on_projects('mozilla-beta', run_on_projects)
return match_run_on_projects(target_project, run_on_projects)
def filter_out_android_on_esr(task):
if (parameters['release_type'].startswith('esr')
and 'android' in task.attributes.get('build_platform', '')):
return False
return True
return [l for l, t in full_task_graph.tasks.iteritems()
if filter_release_tasks(t, parameters)
and filter_out_cron(t, parameters)
and filter_for_beta(t)]
and filter_for_target_project(t)
and filter_out_android_on_esr(t)]

Просмотреть файл

@ -132,6 +132,10 @@ UPSTREAM_ARTIFACT_SIGNED_REPACKAGE_PATHS = [
'target.stub-installer.exe',
]
UPSTREAM_ARTIFACT_SIGNED_MSI_PATHS = [
'target.installer.msi',
]
# Compile every regex once at import time
for dict_ in (
UPSTREAM_ARTIFACT_UNSIGNED_PATHS, UPSTREAM_ARTIFACT_SIGNED_PATHS,
@ -213,6 +217,7 @@ def make_task_description(config, jobs):
build_name = "build"
repackage_name = "repackage"
repackage_signing_name = "repackage-signing"
msi_signing_name = "repackage-signing-msi"
if job.get('locale'):
signing_name = "nightly-l10n-signing"
build_name = "nightly-l10n"
@ -226,6 +231,8 @@ def make_task_description(config, jobs):
}
if 'partials-signing' in upstream_deps:
dependencies['partials-signing'] = upstream_deps['partials-signing']
if msi_signing_name in upstream_deps:
dependencies[msi_signing_name] = upstream_deps[msi_signing_name]
attributes = copy_attributes_from_dependent_job(dep_job)
if job.get('locale'):
@ -256,6 +263,7 @@ def generate_upstream_artifacts(job, dependencies, platform, locale=None, projec
build_signing_mapping = UPSTREAM_ARTIFACT_SIGNED_PATHS
repackage_mapping = UPSTREAM_ARTIFACT_REPACKAGE_PATHS
repackage_signing_mapping = UPSTREAM_ARTIFACT_SIGNED_REPACKAGE_PATHS
msi_signing_mapping = UPSTREAM_ARTIFACT_SIGNED_MSI_PATHS
artifact_prefix = get_artifact_prefix(job)
if locale:
@ -298,7 +306,11 @@ def generate_upstream_artifacts(job, dependencies, platform, locale=None, projec
for task_type, cot_type, paths in [
('repackage', 'repackage', repackage_mapping),
('repackage-signing', 'repackage', repackage_signing_mapping),
('repackage-signing-msi', 'repackage', msi_signing_mapping),
]:
if task_type not in dependencies:
continue
paths = ["{}/{}".format(artifact_prefix, path) for path in paths]
paths = [
path for path in paths

Просмотреть файл

@ -3,7 +3,8 @@
user_pref("app.update.disabledForTesting", true);
user_pref("browser.chrome.guess_favicon", false);
user_pref("browser.dom.window.dump.enabled", true);
// Use an empty list of sites to avoid fetching
// Use a python-eval-able empty JSON array even though asrouter expects plain object
user_pref("browser.newtabpage.activity-stream.asrouter.providers.snippets", "[]");
user_pref("browser.newtabpage.activity-stream.feeds.section.topstories", false);
user_pref("browser.newtabpage.activity-stream.feeds.snippets", false);
user_pref("browser.newtabpage.activity-stream.tippyTop.service.endpoint", "");

Просмотреть файл

@ -6123,10 +6123,10 @@
"description": "Firefox: Time in ms spent on switching tabs in response to a tab click"
},
"FX_TAB_REMOTE_NAVIGATION_DELAY_MS": {
"record_in_processes": ["main", "content"],
"record_in_processes": ["content"],
"alert_emails": ["mconley@mozilla.com"],
"bug_numbers": [1352961],
"expires_in_version": "66",
"bug_numbers": [1352961, 1501295],
"expires_in_version": "69",
"kind": "exponential",
"high": 4000,
"n_buckets": 100,

Просмотреть файл

@ -45,7 +45,7 @@ extern "C" {
nsProfiler::SymbolTable* symbol_table);
}
NS_IMPL_ISUPPORTS(nsProfiler, nsIProfiler)
NS_IMPL_ISUPPORTS(nsProfiler, nsIProfiler, nsIObserver)
nsProfiler::nsProfiler()
: mLockedForPrivateBrowsing(false)

Просмотреть файл

@ -12,16 +12,20 @@ For staging a beta release, run the following (with an appropriate version numbe
.. code-block:: shell
$ mach try release --version 64.0b5 --migration central-to-beta
$ mach try release --version 64.0b5 --migration central-to-beta
For staging a final release (rc or patch), run the following (with an appropriate version number)
.. code-block:: shell
$ mach try release --version 64.0 --migration central-to-beta --migration beta-to-release
$ mach try release --version 64.0 --migration central-to-beta --migration beta-to-release
Once the decision task is on the push is complete, you can start the release
through `staging ship-it instance <https://shipit.staging.mozilla-releng.net/new>`_\ [#shipit]_.
.. note::
If pushing from beta or release, the corresponding migration should not be
passed, as they have already been applied.
.. [#shipit] This is only available to release engineering and release management (as of 2018-10-15).

Просмотреть файл

@ -54,16 +54,17 @@ class ReleaseParser(BaseTryParser):
]
common_groups = ['push']
def __init__(self, *args, **kwargs):
super(ReleaseParser, self).__init__(*args, **kwargs)
self.set_defaults(migrations=[])
def run_try_release(
version, migrations, limit_locales, tasks,
push=True, message='{msg}', **kwargs
):
if version.is_beta:
app_version = attr.evolve(version, beta_number=None)
else:
app_version = version
app_version = attr.evolve(version, beta_number=None, is_esr=False)
files_to_change = {
'browser/config/version.txt': '{}\n'.format(app_version),
@ -76,6 +77,8 @@ def run_try_release(
raise Exception(
"Can't do staging release for version: {} type: {}".format(
version, version.version_type))
elif release_type == 'esr':
release_type += str(version.major_number)
task_config = {
'version': 2,
'parameters': {

Просмотреть файл

@ -90,7 +90,7 @@ nsClipboard::~nsClipboard()
}
}
NS_IMPL_ISUPPORTS(nsClipboard, nsIClipboard)
NS_IMPL_ISUPPORTS(nsClipboard, nsIClipboard, nsIObserver)
nsresult
nsClipboard::Init(void)

Просмотреть файл

@ -115,7 +115,7 @@ nsSound::GetInstance()
#define SND_PURGE 0
#endif
NS_IMPL_ISUPPORTS(nsSound, nsISound, nsIStreamLoaderObserver)
NS_IMPL_ISUPPORTS(nsSound, nsISound, nsIStreamLoaderObserver, nsIObserver)
nsSound::nsSound()