MozReview-Commit-ID: BYjLy4wkFjg
This commit is contained in:
Wes Kocher 2017-02-23 16:57:34 -08:00
Родитель a9db6df686 47dc9207cd
Коммит 39dbca81b0
150 изменённых файлов: 17754 добавлений и 28745 удалений

Просмотреть файл

@ -78,6 +78,9 @@ check_and_add_gcc_warning('-Werror=non-literal-null-conversion',
# catches string literals used in boolean expressions
check_and_add_gcc_warning('-Wstring-conversion')
# catches inconsistent use of mutexes
check_and_add_gcc_warning('-Wthread-safety')
# we inline 'new' and 'delete' in mozalloc
check_and_add_gcc_warning('-Wno-inline-new-delete', cxx_compiler)

Просмотреть файл

@ -79,4 +79,4 @@ to make sure that mozjs_sys also has its Cargo.lock file updated if needed, henc
the need to run the cargo update command in js/src as well. Hopefully this will
be resolved soon.
Latest Commit: 938b32ca93bf5e878422ac4bafcdd53f8058f880
Latest Commit: edc74274d28b1fa1229a1d1ea05027f57172b992

Просмотреть файл

@ -922,6 +922,7 @@ struct ParamTraits<mozilla::layers::TextureFactoryIdentifier>
WriteParam(aMsg, aParam.mParentBackend);
WriteParam(aMsg, aParam.mParentProcessType);
WriteParam(aMsg, aParam.mMaxTextureSize);
WriteParam(aMsg, aParam.mCompositorUseANGLE);
WriteParam(aMsg, aParam.mSupportsTextureBlitting);
WriteParam(aMsg, aParam.mSupportsPartialUploads);
WriteParam(aMsg, aParam.mSupportsComponentAlpha);
@ -933,6 +934,7 @@ struct ParamTraits<mozilla::layers::TextureFactoryIdentifier>
bool result = ReadParam(aMsg, aIter, &aResult->mParentBackend) &&
ReadParam(aMsg, aIter, &aResult->mParentProcessType) &&
ReadParam(aMsg, aIter, &aResult->mMaxTextureSize) &&
ReadParam(aMsg, aIter, &aResult->mCompositorUseANGLE) &&
ReadParam(aMsg, aIter, &aResult->mSupportsTextureBlitting) &&
ReadParam(aMsg, aIter, &aResult->mSupportsPartialUploads) &&
ReadParam(aMsg, aIter, &aResult->mSupportsComponentAlpha) &&

Просмотреть файл

@ -168,6 +168,7 @@ struct TextureFactoryIdentifier
LayersBackend mParentBackend;
GeckoProcessType mParentProcessType;
int32_t mMaxTextureSize;
bool mCompositorUseANGLE;
bool mSupportsTextureBlitting;
bool mSupportsPartialUploads;
bool mSupportsComponentAlpha;
@ -176,6 +177,7 @@ struct TextureFactoryIdentifier
explicit TextureFactoryIdentifier(LayersBackend aLayersBackend = LayersBackend::LAYERS_NONE,
GeckoProcessType aParentProcessType = GeckoProcessType_Default,
int32_t aMaxTextureSize = 4096,
bool aCompositorUseANGLE = false,
bool aSupportsTextureBlitting = false,
bool aSupportsPartialUploads = false,
bool aSupportsComponentAlpha = true,
@ -183,6 +185,7 @@ struct TextureFactoryIdentifier
: mParentBackend(aLayersBackend)
, mParentProcessType(aParentProcessType)
, mMaxTextureSize(aMaxTextureSize)
, mCompositorUseANGLE(aCompositorUseANGLE)
, mSupportsTextureBlitting(aSupportsTextureBlitting)
, mSupportsPartialUploads(aSupportsPartialUploads)
, mSupportsComponentAlpha(aSupportsComponentAlpha)

Просмотреть файл

@ -62,6 +62,11 @@ public:
return mTextureFactoryIdentifier.mSupportsComponentAlpha;
}
bool GetCompositorUseANGLE() const
{
return mTextureFactoryIdentifier.mCompositorUseANGLE;
}
const TextureFactoryIdentifier& GetTextureFactoryIdentifier() const
{
return mTextureFactoryIdentifier;

Просмотреть файл

@ -141,6 +141,7 @@ public:
TextureFactoryIdentifier(LayersBackend::LAYERS_OPENGL,
XRE_GetProcessType(),
GetMaxTextureSize(),
false,
mFBOTextureTarget == LOCAL_GL_TEXTURE_2D,
SupportsPartialTextureUpdate());
return result;

Просмотреть файл

@ -696,7 +696,8 @@ WebRenderBridgeParent::GetTextureFactoryIdentifier()
return TextureFactoryIdentifier(LayersBackend::LAYERS_WR,
XRE_GetProcessType(),
mApi->GetMaxTextureSize());
mApi->GetMaxTextureSize(),
mApi->GetUseANGLE());
}
} // namespace layers

Просмотреть файл

@ -1,6 +1,6 @@
[package]
name = "webrender"
version = "0.15.0"
version = "0.19.0"
authors = ["Glenn Watson <gw@intuitionlibrary.com>"]
license = "MPL-2.0"
repository = "https://github.com/servo/webrender"
@ -14,17 +14,17 @@ serde_derive = ["webrender_traits/serde_derive"]
profiler = ["thread_profiler/thread_profiler"]
[dependencies]
app_units = "0.3"
bincode = "0.6"
app_units = "0.4"
bincode = "1.0.0-alpha2"
bit-set = "0.4"
byteorder = "1.0"
euclid = "0.10.3"
euclid = "0.11"
fnv="1.0"
gleam = "0.2.30"
lazy_static = "0.2"
log = "0.3"
num-traits = "0.1.32"
offscreen_gl_context = {version = "0.5", features = ["serde_serialization", "osmesa"]}
offscreen_gl_context = {version = "0.6", features = ["serde", "osmesa"]}
time = "0.1"
threadpool = "1.3.2"
webrender_traits = {path = "../webrender_traits", default-features = false}
@ -39,8 +39,8 @@ angle = {git = "https://github.com/servo/angle", branch = "servo"}
freetype = { version = "0.2", default-features = false }
[target.'cfg(target_os = "windows")'.dependencies]
dwrote = "0.1.7"
servo-dwrote = "0.2"
[target.'cfg(target_os = "macos")'.dependencies]
core-graphics = "0.6.0"
core-text = "3.0"
core-graphics = "0.7.0"
core-text = "4.0"

Просмотреть файл

@ -19,7 +19,7 @@ void main(void) {
// The render task origin is in device-pixels. Offset that by
// the glyph offset, relative to its primitive bounding rect.
vec2 size = res.uv_rect.zw - res.uv_rect.xy;
vec2 origin = task.data0.xy + uDevicePixelRatio * (glyph.offset.xy - pg.local_rect.xy);
vec2 origin = task.data0.xy + uDevicePixelRatio * (glyph.offset.xy - pg.local_rect.p0);
vec4 local_rect = vec4(origin, size);
vec2 texture_size = vec2(textureSize(sColor0, 0));

Просмотреть файл

@ -86,10 +86,53 @@ ivec2 get_fetch_uv_8(int index) {
return get_fetch_uv(index, 8);
}
struct RectWithSize {
vec2 p0;
vec2 size;
};
struct RectWithEndpoint {
vec2 p0;
vec2 p1;
};
RectWithEndpoint to_rect_with_endpoint(RectWithSize rect) {
RectWithEndpoint result;
result.p0 = rect.p0;
result.p1 = rect.p0 + rect.size;
return result;
}
RectWithSize to_rect_with_size(RectWithEndpoint rect) {
RectWithSize result;
result.p0 = rect.p0;
result.size = rect.p1 - rect.p0;
return result;
}
vec2 clamp_rect(vec2 point, RectWithSize rect) {
return clamp(point, rect.p0, rect.p0 + rect.size);
}
vec2 clamp_rect(vec2 point, RectWithEndpoint rect) {
return clamp(point, rect.p0, rect.p1);
}
// Clamp 2 points at once.
vec4 clamp_rect(vec4 points, RectWithSize rect) {
return clamp(points, rect.p0.xyxy, rect.p0.xyxy + rect.size.xyxy);
}
vec4 clamp_rect(vec4 points, RectWithEndpoint rect) {
return clamp(points, rect.p0.xyxy, rect.p1.xyxy);
}
struct Layer {
mat4 transform;
mat4 inv_transform;
vec4 local_clip_rect;
RectWithSize local_clip_rect;
vec4 screen_vertices[4];
};
@ -114,7 +157,8 @@ Layer fetch_layer(int index) {
layer.inv_transform[2] = texelFetchOffset(sLayers, uv0, 0, ivec2(6, 0));
layer.inv_transform[3] = texelFetchOffset(sLayers, uv0, 0, ivec2(7, 0));
layer.local_clip_rect = texelFetchOffset(sLayers, uv1, 0, ivec2(0, 0));
vec4 clip_rect = texelFetchOffset(sLayers, uv1, 0, ivec2(0, 0));
layer.local_clip_rect = RectWithSize(clip_rect.xy, clip_rect.zw);
layer.screen_vertices[0] = texelFetchOffset(sLayers, uv1, 0, ivec2(1, 0));
layer.screen_vertices[1] = texelFetchOffset(sLayers, uv1, 0, ivec2(2, 0));
@ -246,17 +290,17 @@ Glyph fetch_glyph(int index) {
return glyph;
}
vec4 fetch_instance_geometry(int index) {
RectWithSize fetch_instance_geometry(int index) {
ivec2 uv = get_fetch_uv_1(index);
vec4 rect = texelFetchOffset(sData16, uv, 0, ivec2(0, 0));
return rect;
return RectWithSize(rect.xy, rect.zw);
}
struct PrimitiveGeometry {
vec4 local_rect;
vec4 local_clip_rect;
RectWithSize local_rect;
RectWithSize local_clip_rect;
};
PrimitiveGeometry fetch_prim_geometry(int index) {
@ -264,8 +308,10 @@ PrimitiveGeometry fetch_prim_geometry(int index) {
ivec2 uv = get_fetch_uv(index, VECS_PER_PRIM_GEOM);
pg.local_rect = texelFetchOffset(sPrimGeometry, uv, 0, ivec2(0, 0));
pg.local_clip_rect = texelFetchOffset(sPrimGeometry, uv, 0, ivec2(1, 0));
vec4 local_rect = texelFetchOffset(sPrimGeometry, uv, 0, ivec2(0, 0));
pg.local_rect = RectWithSize(local_rect.xy, local_rect.zw);
vec4 local_clip_rect = texelFetchOffset(sPrimGeometry, uv, 0, ivec2(1, 0));
pg.local_clip_rect = RectWithSize(local_clip_rect.xy, local_clip_rect.zw);
return pg;
}
@ -322,8 +368,8 @@ struct Primitive {
Layer layer;
ClipArea clip_area;
AlphaBatchTask task;
vec4 local_rect;
vec4 local_clip_rect;
RectWithSize local_rect;
RectWithSize local_clip_rect;
int prim_index;
// when sending multiple primitives of the same type (e.g. border segments)
// this index allows the vertex shader to recognize the difference
@ -402,47 +448,28 @@ vec4 get_layer_pos(vec2 pos, Layer layer) {
return untransform(pos, n, a, layer.inv_transform);
}
vec2 clamp_rect(vec2 point, vec4 rect) {
return clamp(point, rect.xy, rect.xy + rect.zw);
}
struct Rect {
vec2 p0;
vec2 p1;
};
struct VertexInfo {
Rect local_rect;
RectWithEndpoint local_rect;
vec2 local_pos;
vec2 screen_pos;
};
VertexInfo write_vertex(vec4 instance_rect,
vec4 local_clip_rect,
VertexInfo write_vertex(RectWithSize instance_rect,
RectWithSize local_clip_rect,
float z,
Layer layer,
AlphaBatchTask task) {
// Get the min/max local space coords of the rectangle.
vec2 local_p0 = instance_rect.xy;
vec2 local_p1 = instance_rect.xy + instance_rect.zw;
// Get the min/max coords of the local space clip rect.
vec2 local_clip_p0 = local_clip_rect.xy;
vec2 local_clip_p1 = local_clip_rect.xy + local_clip_rect.zw;
// Get the min/max coords of the layer clip rect.
vec2 layer_clip_p0 = layer.local_clip_rect.xy;
vec2 layer_clip_p1 = layer.local_clip_rect.xy + layer.local_clip_rect.zw;
RectWithEndpoint local_rect = to_rect_with_endpoint(instance_rect);
// Select the corner of the local rect that we are processing.
vec2 local_pos = mix(local_p0, local_p1, aPosition.xy);
vec2 local_pos = mix(local_rect.p0, local_rect.p1, aPosition.xy);
// xy = top left corner of the local rect, zw = position of current vertex.
vec4 local_p0_pos = vec4(local_p0, local_pos);
vec4 local_p0_pos = vec4(local_rect.p0, local_pos);
// Clamp to the two local clip rects.
local_p0_pos = clamp(local_p0_pos, local_clip_p0.xyxy, local_clip_p1.xyxy);
local_p0_pos = clamp(local_p0_pos, layer_clip_p0.xyxy, layer_clip_p1.xyxy);
local_p0_pos = clamp_rect(local_p0_pos, local_clip_rect);
local_p0_pos = clamp_rect(local_p0_pos, layer.local_clip_rect);
// Transform the top corner and current vertex to world space.
vec4 world_p0 = layer.transform * vec4(local_p0_pos.xy, 0.0, 1.0);
@ -464,7 +491,7 @@ VertexInfo write_vertex(vec4 instance_rect,
gl_Position = uTransform * vec4(final_pos, z, 1.0);
VertexInfo vi = VertexInfo(Rect(local_p0, local_p1), local_p0_pos.zw, device_p0_pos.zw);
VertexInfo vi = VertexInfo(local_rect, local_p0_pos.zw, device_p0_pos.zw);
return vi;
}
@ -476,13 +503,13 @@ struct TransformVertexInfo {
vec4 clipped_local_rect;
};
TransformVertexInfo write_transform_vertex(vec4 instance_rect,
vec4 local_clip_rect,
TransformVertexInfo write_transform_vertex(RectWithSize instance_rect,
RectWithSize local_clip_rect,
float z,
Layer layer,
AlphaBatchTask task) {
vec2 lp0_base = instance_rect.xy;
vec2 lp1_base = instance_rect.xy + instance_rect.zw;
vec2 lp0_base = instance_rect.p0;
vec2 lp1_base = instance_rect.p0 + instance_rect.size;
vec2 lp0 = clamp_rect(clamp_rect(lp0_base, local_clip_rect),
layer.local_clip_rect);

Просмотреть файл

@ -32,29 +32,30 @@ void main(void) {
Border border = fetch_border(prim.prim_index);
int sub_part = prim.sub_index;
vec2 tl_outer = prim.local_rect.xy;
vec2 tl_outer = prim.local_rect.p0;
vec2 tl_inner = tl_outer + vec2(max(border.radii[0].x, border.widths.x),
max(border.radii[0].y, border.widths.y));
vec2 tr_outer = vec2(prim.local_rect.x + prim.local_rect.z,
prim.local_rect.y);
vec2 tr_outer = vec2(prim.local_rect.p0.x + prim.local_rect.size.x,
prim.local_rect.p0.y);
vec2 tr_inner = tr_outer + vec2(-max(border.radii[0].z, border.widths.z),
max(border.radii[0].w, border.widths.y));
vec2 br_outer = vec2(prim.local_rect.x + prim.local_rect.z,
prim.local_rect.y + prim.local_rect.w);
vec2 br_outer = vec2(prim.local_rect.p0.x + prim.local_rect.size.x,
prim.local_rect.p0.y + prim.local_rect.size.y);
vec2 br_inner = br_outer - vec2(max(border.radii[1].x, border.widths.z),
max(border.radii[1].y, border.widths.w));
vec2 bl_outer = vec2(prim.local_rect.x,
prim.local_rect.y + prim.local_rect.w);
vec2 bl_outer = vec2(prim.local_rect.p0.x,
prim.local_rect.p0.y + prim.local_rect.size.y);
vec2 bl_inner = bl_outer + vec2(max(border.radii[1].z, border.widths.x),
-max(border.radii[1].w, border.widths.w));
vec4 segment_rect;
RectWithSize segment_rect;
switch (sub_part) {
case PST_TOP_LEFT:
segment_rect = vec4(tl_outer, tl_inner - tl_outer);
segment_rect.p0 = tl_outer;
segment_rect.size = tl_inner - tl_outer;
vBorderStyle = int(border.style.x);
vHorizontalColor = border.colors[BORDER_LEFT];
vVerticalColor = border.colors[BORDER_TOP];
@ -62,10 +63,8 @@ void main(void) {
border.radii[0].xy - border.widths.xy);
break;
case PST_TOP_RIGHT:
segment_rect = vec4(tr_inner.x,
tr_outer.y,
tr_outer.x - tr_inner.x,
tr_inner.y - tr_outer.y);
segment_rect.p0 = vec2(tr_inner.x, tr_outer.y);
segment_rect.size = vec2(tr_outer.x - tr_inner.x, tr_inner.y - tr_outer.y);
vBorderStyle = int(border.style.y);
vHorizontalColor = border.colors[BORDER_TOP];
vVerticalColor = border.colors[BORDER_RIGHT];
@ -73,7 +72,8 @@ void main(void) {
border.radii[0].zw - border.widths.zy);
break;
case PST_BOTTOM_RIGHT:
segment_rect = vec4(br_inner, br_outer - br_inner);
segment_rect.p0 = br_inner;
segment_rect.size = br_outer - br_inner;
vBorderStyle = int(border.style.z);
vHorizontalColor = border.colors[BORDER_BOTTOM];
vVerticalColor = border.colors[BORDER_RIGHT];
@ -81,10 +81,8 @@ void main(void) {
border.radii[1].xy - border.widths.zw);
break;
case PST_BOTTOM_LEFT:
segment_rect = vec4(bl_outer.x,
bl_inner.y,
bl_inner.x - bl_outer.x,
bl_outer.y - bl_inner.y);
segment_rect.p0 = vec2(bl_outer.x, bl_inner.y);
segment_rect.size = vec2(bl_inner.x - bl_outer.x, bl_outer.y - bl_inner.y);
vBorderStyle = int(border.style.w);
vHorizontalColor = border.colors[BORDER_BOTTOM];
vVerticalColor = border.colors[BORDER_LEFT];
@ -92,40 +90,32 @@ void main(void) {
border.radii[1].zw - border.widths.xw);
break;
case PST_LEFT:
segment_rect = vec4(tl_outer.x,
tl_inner.y,
border.widths.x,
bl_inner.y - tl_inner.y);
segment_rect.p0 = vec2(tl_outer.x, tl_inner.y);
segment_rect.size = vec2(border.widths.x, bl_inner.y - tl_inner.y);
vBorderStyle = int(border.style.x);
vHorizontalColor = border.colors[BORDER_LEFT];
vVerticalColor = border.colors[BORDER_LEFT];
vRadii = vec4(0.0);
break;
case PST_RIGHT:
segment_rect = vec4(tr_outer.x - border.widths.z,
tr_inner.y,
border.widths.z,
br_inner.y - tr_inner.y);
segment_rect.p0 = vec2(tr_outer.x - border.widths.z, tr_inner.y);
segment_rect.size = vec2(border.widths.z, br_inner.y - tr_inner.y);
vBorderStyle = int(border.style.z);
vHorizontalColor = border.colors[BORDER_RIGHT];
vVerticalColor = border.colors[BORDER_RIGHT];
vRadii = vec4(0.0);
break;
case PST_BOTTOM:
segment_rect = vec4(bl_inner.x,
bl_outer.y - border.widths.w,
br_inner.x - bl_inner.x,
border.widths.w);
segment_rect.p0 = vec2(bl_inner.x, bl_outer.y - border.widths.w);
segment_rect.size = vec2(br_inner.x - bl_inner.x, border.widths.w);
vBorderStyle = int(border.style.w);
vHorizontalColor = border.colors[BORDER_BOTTOM];
vVerticalColor = border.colors[BORDER_BOTTOM];
vRadii = vec4(0.0);
break;
case PST_TOP:
segment_rect = vec4(tl_inner.x,
tl_outer.y,
tr_inner.x - tl_inner.x,
border.widths.y);
segment_rect.p0 = vec2(tl_inner.x, tl_outer.y);
segment_rect.size = vec2(tr_inner.x - tl_inner.x, border.widths.y);
vBorderStyle = int(border.style.y);
vHorizontalColor = border.colors[BORDER_TOP];
vVerticalColor = border.colors[BORDER_TOP];
@ -152,53 +142,53 @@ void main(void) {
vLocalPos = vi.local_pos.xy;
// Local space
vLocalRect = prim.local_rect;
vLocalRect = vec4(prim.local_rect.p0, prim.local_rect.size);
#endif
float x0, y0, x1, y1;
switch (sub_part) {
// These are the layer tile part PrimitivePart as uploaded by the tiling.rs
case PST_TOP_LEFT:
x0 = segment_rect.x;
y0 = segment_rect.y;
x0 = segment_rect.p0.x;
y0 = segment_rect.p0.y;
// These are width / heights
x1 = segment_rect.x + segment_rect.z;
y1 = segment_rect.y + segment_rect.w;
x1 = segment_rect.p0.x + segment_rect.size.x;
y1 = segment_rect.p0.y + segment_rect.size.y;
// The radius here is the border-radius. This is 0, so vRefPoint will
// just be the top left (x,y) corner.
vRefPoint = vec2(x0, y0) + vRadii.xy;
break;
case PST_TOP_RIGHT:
x0 = segment_rect.x + segment_rect.z;
y0 = segment_rect.y;
x1 = segment_rect.x;
y1 = segment_rect.y + segment_rect.w;
x0 = segment_rect.p0.x + segment_rect.size.x;
y0 = segment_rect.p0.y;
x1 = segment_rect.p0.x;
y1 = segment_rect.p0.y + segment_rect.size.y;
vRefPoint = vec2(x0, y0) + vec2(-vRadii.x, vRadii.y);
break;
case PST_BOTTOM_LEFT:
x0 = segment_rect.x;
y0 = segment_rect.y + segment_rect.w;
x1 = segment_rect.x + segment_rect.z;
y1 = segment_rect.y;
x0 = segment_rect.p0.x;
y0 = segment_rect.p0.y + segment_rect.size.y;
x1 = segment_rect.p0.x + segment_rect.size.x;
y1 = segment_rect.p0.y;
vRefPoint = vec2(x0, y0) + vec2(vRadii.x, -vRadii.y);
break;
case PST_BOTTOM_RIGHT:
x0 = segment_rect.x;
y0 = segment_rect.y;
x1 = segment_rect.x + segment_rect.z;
y1 = segment_rect.y + segment_rect.w;
x0 = segment_rect.p0.x;
y0 = segment_rect.p0.y;
x1 = segment_rect.p0.x + segment_rect.size.x;
y1 = segment_rect.p0.y + segment_rect.size.y;
vRefPoint = vec2(x1, y1) + vec2(-vRadii.x, -vRadii.y);
break;
case PST_TOP:
case PST_LEFT:
case PST_BOTTOM:
case PST_RIGHT:
vRefPoint = segment_rect.xy;
x0 = segment_rect.x;
y0 = segment_rect.y;
x1 = segment_rect.x + segment_rect.z;
y1 = segment_rect.y + segment_rect.w;
vRefPoint = segment_rect.p0.xy;
x0 = segment_rect.p0.x;
y0 = segment_rect.p0.y;
x1 = segment_rect.p0.x + segment_rect.size.x;
y1 = segment_rect.p0.y + segment_rect.size.y;
break;
}

Просмотреть файл

@ -6,7 +6,7 @@
void main(void) {
Primitive prim = load_primitive();
BoxShadow bs = fetch_boxshadow(prim.prim_index);
vec4 segment_rect = fetch_instance_geometry(prim.sub_index);
RectWithSize segment_rect = fetch_instance_geometry(prim.sub_index);
VertexInfo vi = write_vertex(segment_rect,
prim.local_clip_rect,
@ -22,8 +22,8 @@ void main(void) {
vec2 patch_size_device_pixels = child_task.data0.zw - vec2(2.0);
vec2 patch_size = patch_size_device_pixels / uDevicePixelRatio;
vUv.xy = (vi.local_pos - prim.local_rect.xy) / patch_size;
vMirrorPoint = 0.5 * prim.local_rect.zw / patch_size;
vUv.xy = (vi.local_pos - prim.local_rect.p0) / patch_size;
vMirrorPoint = 0.5 * prim.local_rect.size / patch_size;
vec2 texture_size = vec2(textureSize(sCache, 0));
vCacheUvRectCoords = vec4(patch_origin, patch_origin + patch_size_device_pixels) / texture_size.xyxy;

Просмотреть файл

@ -22,7 +22,7 @@ void main(void) {
vec2 uv0 = child_task.data0.xy / texture_size;
vec2 uv1 = (child_task.data0.xy + child_task.data0.zw) / texture_size;
vec2 f = (vi.local_pos - prim.local_rect.xy) / prim.local_rect.zw;
vec2 f = (vi.local_pos - prim.local_rect.p0) / prim.local_rect.size;
vUv.xy = mix(uv0, uv1, f);
}

Просмотреть файл

@ -10,7 +10,7 @@ void main(void) {
GradientStop g0 = fetch_gradient_stop(prim.sub_index + 0);
GradientStop g1 = fetch_gradient_stop(prim.sub_index + 1);
vec4 segment_rect;
RectWithSize segment_rect;
vec2 axis;
if (gradient.start_end_point.y == gradient.start_end_point.w) {
float x0 = mix(gradient.start_end_point.x,
@ -19,9 +19,8 @@ void main(void) {
float x1 = mix(gradient.start_end_point.x,
gradient.start_end_point.z,
g1.offset.x);
segment_rect.yw = prim.local_rect.yw;
segment_rect.x = x0;
segment_rect.z = x1 - x0;
segment_rect.p0 = vec2(x0, prim.local_rect.p0.y);
segment_rect.size = vec2(x1 - x0, prim.local_rect.size.y);
axis = vec2(1.0, 0.0);
} else {
float y0 = mix(gradient.start_end_point.y,
@ -30,9 +29,8 @@ void main(void) {
float y1 = mix(gradient.start_end_point.y,
gradient.start_end_point.w,
g1.offset.x);
segment_rect.xz = prim.local_rect.xz;
segment_rect.y = y0;
segment_rect.w = y1 - y0;
segment_rect.p0 = vec2(prim.local_rect.p0.x, y0);
segment_rect.size = vec2(prim.local_rect.size.x, y1 - y0);
axis = vec2(0.0, 1.0);
}
@ -44,7 +42,7 @@ void main(void) {
prim.task);
vLocalRect = vi.clipped_local_rect;
vLocalPos = vi.local_pos;
vec2 f = (vi.local_pos.xy - prim.local_rect.xy) / prim.local_rect.zw;
vec2 f = (vi.local_pos.xy - prim.local_rect.p0) / prim.local_rect.size;
#else
VertexInfo vi = write_vertex(segment_rect,
prim.local_clip_rect,
@ -52,7 +50,7 @@ void main(void) {
prim.layer,
prim.task);
vec2 f = (vi.local_pos - segment_rect.xy) / segment_rect.zw;
vec2 f = (vi.local_pos - segment_rect.p0) / segment_rect.size;
vPos = vi.local_pos;
#endif

Просмотреть файл

@ -9,7 +9,8 @@ void main(void) {
Glyph glyph = fetch_glyph(prim.sub_index);
ResourceRect res = fetch_resource_rect(prim.user_data.x);
vec4 local_rect = vec4(glyph.offset.xy, (res.uv_rect.zw - res.uv_rect.xy) / uDevicePixelRatio);
RectWithSize local_rect = RectWithSize(glyph.offset.xy,
(res.uv_rect.zw - res.uv_rect.xy) / uDevicePixelRatio);
#ifdef WR_FEATURE_TRANSFORM
TransformVertexInfo vi = write_transform_vertex(local_rect,
@ -19,7 +20,7 @@ void main(void) {
prim.task);
vLocalRect = vi.clipped_local_rect;
vLocalPos = vi.local_pos;
vec2 f = (vi.local_pos.xy / vi.local_pos.z - local_rect.xy) / local_rect.zw;
vec2 f = (vi.local_pos.xy / vi.local_pos.z - local_rect.p0) / local_rect.size;
#else
VertexInfo vi = write_vertex(local_rect,
prim.local_clip_rect,

Просмотреть файл

@ -19,7 +19,7 @@ const CAN_OVERSCROLL: bool = false;
/// Contains scrolling and transform information stacking contexts.
#[derive(Clone)]
pub struct Layer {
pub struct ClipScrollNode {
/// Manages scrolling offset, overscroll state etc.
pub scrolling: ScrollingState,
@ -48,13 +48,13 @@ pub struct Layer {
pub children: Vec<ScrollLayerId>,
}
impl Layer {
impl ClipScrollNode {
pub fn new(local_viewport_rect: &LayerRect,
content_size: LayerSize,
local_transform: &LayerToScrollTransform,
pipeline_id: PipelineId)
-> Layer {
Layer {
-> ClipScrollNode {
ClipScrollNode {
scrolling: ScrollingState::new(),
content_size: content_size,
local_viewport_rect: *local_viewport_rect,
@ -244,7 +244,7 @@ impl Layer {
}
}
pub fn ray_intersects_layer(&self, cursor: &WorldPoint) -> bool {
pub fn ray_intersects_node(&self, cursor: &WorldPoint) -> bool {
let inv = self.world_viewport_transform.inverse().unwrap();
let z0 = -10000.0;
let z1 = 10000.0;

Просмотреть файл

@ -3,7 +3,7 @@
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use fnv::FnvHasher;
use layer::{Layer, ScrollingState};
use clip_scroll_node::{ClipScrollNode, ScrollingState};
use std::collections::{HashMap, HashSet};
use std::hash::BuildHasherDefault;
use webrender_traits::{LayerPoint, LayerRect, LayerSize, LayerToScrollTransform, PipelineId};
@ -13,25 +13,25 @@ use webrender_traits::{ServoScrollRootId, WorldPoint, as_scroll_parent_rect};
pub type ScrollStates = HashMap<ScrollLayerId, ScrollingState, BuildHasherDefault<FnvHasher>>;
pub struct ScrollTree {
pub layers: HashMap<ScrollLayerId, Layer, BuildHasherDefault<FnvHasher>>,
pub struct ClipScrollTree {
pub nodes: HashMap<ScrollLayerId, ClipScrollNode, BuildHasherDefault<FnvHasher>>,
pub pending_scroll_offsets: HashMap<(PipelineId, ServoScrollRootId), LayerPoint>,
/// The ScrollLayerId of the currently scrolling layer. Used to allow the same
/// layer to scroll even if a touch operation leaves the boundaries of that layer.
/// The ScrollLayerId of the currently scrolling node. Used to allow the same
/// node to scroll even if a touch operation leaves the boundaries of that node.
pub current_scroll_layer_id: Option<ScrollLayerId>,
/// The current reference frame id, used for giving a unique id to all new
/// reference frames. The ScrollTree increments this by one every time a
/// reference frames. The ClipScrollTree increments this by one every time a
/// reference frame is created.
current_reference_frame_id: usize,
/// The root reference frame, which is the true root of the ScrollTree. Initially
/// this ID is not valid, which is indicated by ```layers``` being empty.
/// The root reference frame, which is the true root of the ClipScrollTree. Initially
/// this ID is not valid, which is indicated by ```node``` being empty.
pub root_reference_frame_id: ScrollLayerId,
/// The root scroll layer, which is the first child of the root reference frame.
/// Initially this ID is not valid, which is indicated by ```layers``` being empty.
/// The root scroll node which is the first child of the root reference frame.
/// Initially this ID is not valid, which is indicated by ```nodes``` being empty.
pub topmost_scroll_layer_id: ScrollLayerId,
/// A set of pipelines which should be discarded the next time this
@ -39,11 +39,11 @@ pub struct ScrollTree {
pub pipelines_to_discard: HashSet<PipelineId>,
}
impl ScrollTree {
pub fn new() -> ScrollTree {
impl ClipScrollTree {
pub fn new() -> ClipScrollTree {
let dummy_pipeline = PipelineId(0, 0);
ScrollTree {
layers: HashMap::with_hasher(Default::default()),
ClipScrollTree {
nodes: HashMap::with_hasher(Default::default()),
pending_scroll_offsets: HashMap::new(),
current_scroll_layer_id: None,
root_reference_frame_id: ScrollLayerId::root_reference_frame(dummy_pipeline),
@ -55,15 +55,15 @@ impl ScrollTree {
pub fn root_reference_frame_id(&self) -> ScrollLayerId {
// TODO(mrobinson): We should eventually make this impossible to misuse.
debug_assert!(!self.layers.is_empty());
debug_assert!(self.layers.contains_key(&self.root_reference_frame_id));
debug_assert!(!self.nodes.is_empty());
debug_assert!(self.nodes.contains_key(&self.root_reference_frame_id));
self.root_reference_frame_id
}
pub fn topmost_scroll_layer_id(&self) -> ScrollLayerId {
// TODO(mrobinson): We should eventually make this impossible to misuse.
debug_assert!(!self.layers.is_empty());
debug_assert!(self.layers.contains_key(&self.topmost_scroll_layer_id));
debug_assert!(!self.nodes.is_empty());
debug_assert!(self.nodes.contains_key(&self.topmost_scroll_layer_id));
self.topmost_scroll_layer_id
}
@ -71,41 +71,41 @@ impl ScrollTree {
pipeline_id: PipelineId,
viewport_size: &LayerSize,
content_size: &LayerSize) {
debug_assert!(self.layers.is_empty());
debug_assert!(self.nodes.is_empty());
let identity = LayerToScrollTransform::identity();
let viewport = LayerRect::new(LayerPoint::zero(), *viewport_size);
let root_reference_frame_id = ScrollLayerId::root_reference_frame(pipeline_id);
self.root_reference_frame_id = root_reference_frame_id;
let reference_frame = Layer::new(&viewport, viewport.size, &identity, pipeline_id);
self.layers.insert(self.root_reference_frame_id, reference_frame);
let reference_frame = ClipScrollNode::new(&viewport, viewport.size, &identity, pipeline_id);
self.nodes.insert(self.root_reference_frame_id, reference_frame);
let scroll_layer = Layer::new(&viewport, *content_size, &identity, pipeline_id);
let scroll_node = ClipScrollNode::new(&viewport, *content_size, &identity, pipeline_id);
let topmost_scroll_layer_id = ScrollLayerId::root_scroll_layer(pipeline_id);
self.topmost_scroll_layer_id = topmost_scroll_layer_id;
self.add_layer(scroll_layer, topmost_scroll_layer_id, root_reference_frame_id);
self.add_node(scroll_node, topmost_scroll_layer_id, root_reference_frame_id);
}
pub fn collect_layers_bouncing_back(&self)
-> HashSet<ScrollLayerId, BuildHasherDefault<FnvHasher>> {
let mut layers_bouncing_back = HashSet::with_hasher(Default::default());
for (scroll_layer_id, layer) in self.layers.iter() {
if layer.scrolling.bouncing_back {
layers_bouncing_back.insert(*scroll_layer_id);
pub fn collect_nodes_bouncing_back(&self)
-> HashSet<ScrollLayerId, BuildHasherDefault<FnvHasher>> {
let mut nodes_bouncing_back = HashSet::with_hasher(Default::default());
for (scroll_layer_id, node) in self.nodes.iter() {
if node.scrolling.bouncing_back {
nodes_bouncing_back.insert(*scroll_layer_id);
}
}
layers_bouncing_back
nodes_bouncing_back
}
fn find_scrolling_layer_at_point_in_layer(&self,
cursor: &WorldPoint,
scroll_layer_id: ScrollLayerId)
-> Option<ScrollLayerId> {
self.layers.get(&scroll_layer_id).and_then(|layer| {
for child_layer_id in layer.children.iter().rev() {
fn find_scrolling_node_at_point_in_node(&self,
cursor: &WorldPoint,
scroll_layer_id: ScrollLayerId)
-> Option<ScrollLayerId> {
self.nodes.get(&scroll_layer_id).and_then(|node| {
for child_layer_id in node.children.iter().rev() {
if let Some(layer_id) =
self.find_scrolling_layer_at_point_in_layer(cursor, *child_layer_id) {
self.find_scrolling_node_at_point_in_node(cursor, *child_layer_id) {
return Some(layer_id);
}
}
@ -114,7 +114,7 @@ impl ScrollTree {
return None;
}
if layer.ray_intersects_layer(cursor) {
if node.ray_intersects_node(cursor) {
Some(scroll_layer_id)
} else {
None
@ -122,20 +122,20 @@ impl ScrollTree {
})
}
pub fn find_scrolling_layer_at_point(&self, cursor: &WorldPoint) -> ScrollLayerId {
self.find_scrolling_layer_at_point_in_layer(cursor, self.root_reference_frame_id())
pub fn find_scrolling_node_at_point(&self, cursor: &WorldPoint) -> ScrollLayerId {
self.find_scrolling_node_at_point_in_node(cursor, self.root_reference_frame_id())
.unwrap_or(self.topmost_scroll_layer_id())
}
pub fn get_scroll_layer_state(&self) -> Vec<ScrollLayerState> {
pub fn get_scroll_node_state(&self) -> Vec<ScrollLayerState> {
let mut result = vec![];
for (scroll_layer_id, scroll_layer) in self.layers.iter() {
for (scroll_layer_id, scroll_node) in self.nodes.iter() {
match scroll_layer_id.info {
ScrollLayerInfo::Scrollable(_, servo_scroll_root_id) => {
result.push(ScrollLayerState {
pipeline_id: scroll_layer.pipeline_id,
pipeline_id: scroll_node.pipeline_id,
scroll_root_id: servo_scroll_root_id,
scroll_offset: scroll_layer.scrolling.offset,
scroll_offset: scroll_node.scrolling.offset,
})
}
ScrollLayerInfo::ReferenceFrame(..) => {}
@ -148,9 +148,9 @@ impl ScrollTree {
self.current_reference_frame_id = 1;
let mut scroll_states = HashMap::with_hasher(Default::default());
for (layer_id, old_layer) in &mut self.layers.drain() {
for (layer_id, old_node) in &mut self.nodes.drain() {
if !self.pipelines_to_discard.contains(&layer_id.pipeline_id) {
scroll_states.insert(layer_id, old_layer.scrolling);
scroll_states.insert(layer_id, old_node.scrolling);
}
}
@ -158,21 +158,21 @@ impl ScrollTree {
scroll_states
}
pub fn scroll_layers(&mut self,
origin: LayerPoint,
pipeline_id: PipelineId,
scroll_root_id: ServoScrollRootId)
-> bool {
if self.layers.is_empty() {
pub fn scroll_nodes(&mut self,
origin: LayerPoint,
pipeline_id: PipelineId,
scroll_root_id: ServoScrollRootId)
-> bool {
if self.nodes.is_empty() {
self.pending_scroll_offsets.insert((pipeline_id, scroll_root_id), origin);
return false;
}
let origin = LayerPoint::new(origin.x.max(0.0), origin.y.max(0.0));
let mut scrolled_a_layer = false;
let mut found_layer = false;
for (layer_id, layer) in self.layers.iter_mut() {
let mut scrolled_a_node = false;
let mut found_node = false;
for (layer_id, node) in self.nodes.iter_mut() {
if layer_id.pipeline_id != pipeline_id {
continue;
}
@ -183,15 +183,15 @@ impl ScrollTree {
ScrollLayerInfo::Scrollable(..) => {}
}
found_layer = true;
scrolled_a_layer |= layer.set_scroll_origin(&origin);
found_node = true;
scrolled_a_node |= node.set_scroll_origin(&origin);
}
if !found_layer {
if !found_node {
self.pending_scroll_offsets.insert((pipeline_id, scroll_root_id), origin);
}
scrolled_a_layer
scrolled_a_node
}
pub fn scroll(&mut self,
@ -199,24 +199,24 @@ impl ScrollTree {
cursor: WorldPoint,
phase: ScrollEventPhase)
-> bool {
if self.layers.is_empty() {
if self.nodes.is_empty() {
return false;
}
let scroll_layer_id = match (
phase,
self.find_scrolling_layer_at_point(&cursor),
self.find_scrolling_node_at_point(&cursor),
self.current_scroll_layer_id) {
(ScrollEventPhase::Start, scroll_layer_at_point_id, _) => {
self.current_scroll_layer_id = Some(scroll_layer_at_point_id);
scroll_layer_at_point_id
(ScrollEventPhase::Start, scroll_node_at_point_id, _) => {
self.current_scroll_layer_id = Some(scroll_node_at_point_id);
scroll_node_at_point_id
},
(_, scroll_layer_at_point_id, Some(cached_scroll_layer_id)) => {
let scroll_layer_id = match self.layers.get(&cached_scroll_layer_id) {
(_, scroll_node_at_point_id, Some(cached_scroll_layer_id)) => {
let scroll_layer_id = match self.nodes.get(&cached_scroll_layer_id) {
Some(_) => cached_scroll_layer_id,
None => {
self.current_scroll_layer_id = Some(scroll_layer_at_point_id);
scroll_layer_at_point_id
self.current_scroll_layer_id = Some(scroll_node_at_point_id);
scroll_node_at_point_id
},
};
scroll_layer_id
@ -226,52 +226,52 @@ impl ScrollTree {
let topmost_scroll_layer_id = self.topmost_scroll_layer_id();
let non_root_overscroll = if scroll_layer_id != topmost_scroll_layer_id {
// true if the current layer is overscrolling,
// and it is not the root scroll layer.
let child_layer = self.layers.get(&scroll_layer_id).unwrap();
let overscroll_amount = child_layer.overscroll_amount();
// true if the current node is overscrolling,
// and it is not the root scroll node.
let child_node = self.nodes.get(&scroll_layer_id).unwrap();
let overscroll_amount = child_node.overscroll_amount();
overscroll_amount.width != 0.0 || overscroll_amount.height != 0.0
} else {
false
};
let switch_layer = match phase {
let switch_node = match phase {
ScrollEventPhase::Start => {
// if this is a new gesture, we do not switch layer,
// if this is a new gesture, we do not switch node,
// however we do save the state of non_root_overscroll,
// for use in the subsequent Move phase.
let mut current_layer = self.layers.get_mut(&scroll_layer_id).unwrap();
current_layer.scrolling.should_handoff_scroll = non_root_overscroll;
let mut current_node = self.nodes.get_mut(&scroll_layer_id).unwrap();
current_node.scrolling.should_handoff_scroll = non_root_overscroll;
false
},
ScrollEventPhase::Move(_) => {
// Switch layer if movement originated in a new gesture,
// from a non root layer in overscroll.
let current_layer = self.layers.get_mut(&scroll_layer_id).unwrap();
current_layer.scrolling.should_handoff_scroll && non_root_overscroll
// Switch node if movement originated in a new gesture,
// from a non root node in overscroll.
let current_node = self.nodes.get_mut(&scroll_layer_id).unwrap();
current_node.scrolling.should_handoff_scroll && non_root_overscroll
},
ScrollEventPhase::End => {
// clean-up when gesture ends.
let mut current_layer = self.layers.get_mut(&scroll_layer_id).unwrap();
current_layer.scrolling.should_handoff_scroll = false;
let mut current_node = self.nodes.get_mut(&scroll_layer_id).unwrap();
current_node.scrolling.should_handoff_scroll = false;
false
},
};
let scroll_layer_info = if switch_layer {
let scroll_node_info = if switch_node {
topmost_scroll_layer_id.info
} else {
scroll_layer_id.info
};
let scroll_root_id = match scroll_layer_info {
let scroll_root_id = match scroll_node_info {
ScrollLayerInfo::Scrollable(_, scroll_root_id) => scroll_root_id,
_ => unreachable!("Tried to scroll a reference frame."),
};
let mut scrolled_a_layer = false;
for (layer_id, layer) in self.layers.iter_mut() {
let mut scrolled_a_node = false;
for (layer_id, node) in self.nodes.iter_mut() {
if layer_id.pipeline_id != scroll_layer_id.pipeline_id {
continue;
}
@ -282,66 +282,66 @@ impl ScrollTree {
_ => {}
}
let scrolled_this_layer = layer.scroll(scroll_location, phase);
scrolled_a_layer = scrolled_a_layer || scrolled_this_layer;
let scrolled_this_node = node.scroll(scroll_location, phase);
scrolled_a_node = scrolled_a_node || scrolled_this_node;
}
scrolled_a_layer
scrolled_a_node
}
pub fn update_all_layer_transforms(&mut self) {
if self.layers.is_empty() {
pub fn update_all_node_transforms(&mut self) {
if self.nodes.is_empty() {
return;
}
let root_reference_frame_id = self.root_reference_frame_id();
let root_viewport = self.layers[&root_reference_frame_id].local_viewport_rect;
self.update_layer_transform(root_reference_frame_id,
let root_viewport = self.nodes[&root_reference_frame_id].local_viewport_rect;
self.update_node_transform(root_reference_frame_id,
&ScrollToWorldTransform::identity(),
&as_scroll_parent_rect(&root_viewport));
}
fn update_layer_transform(&mut self,
layer_id: ScrollLayerId,
parent_world_transform: &ScrollToWorldTransform,
parent_viewport_rect: &ScrollLayerRect) {
fn update_node_transform(&mut self,
layer_id: ScrollLayerId,
parent_world_transform: &ScrollToWorldTransform,
parent_viewport_rect: &ScrollLayerRect) {
// TODO(gw): This is an ugly borrow check workaround to clone these.
// Restructure this to avoid the clones!
let (layer_transform_for_children, viewport_rect, layer_children) = {
match self.layers.get_mut(&layer_id) {
Some(layer) => {
layer.update_transform(parent_world_transform, parent_viewport_rect);
let (node_transform_for_children, viewport_rect, node_children) = {
match self.nodes.get_mut(&layer_id) {
Some(node) => {
node.update_transform(parent_world_transform, parent_viewport_rect);
(layer.world_content_transform.with_source::<ScrollLayerPixel>(),
as_scroll_parent_rect(&layer.combined_local_viewport_rect),
layer.children.clone())
(node.world_content_transform.with_source::<ScrollLayerPixel>(),
as_scroll_parent_rect(&node.combined_local_viewport_rect),
node.children.clone())
}
None => return,
}
};
for child_layer_id in layer_children {
self.update_layer_transform(child_layer_id,
&layer_transform_for_children,
&viewport_rect);
for child_layer_id in node_children {
self.update_node_transform(child_layer_id,
&node_transform_for_children,
&viewport_rect);
}
}
pub fn tick_scrolling_bounce_animations(&mut self) {
for (_, layer) in &mut self.layers {
layer.tick_scrolling_bounce_animation()
for (_, node) in &mut self.nodes {
node.tick_scrolling_bounce_animation()
}
}
pub fn finalize_and_apply_pending_scroll_offsets(&mut self, old_states: ScrollStates) {
// TODO(gw): These are all independent - can be run through thread pool if it shows up
// in the profile!
for (scroll_layer_id, layer) in &mut self.layers {
for (scroll_layer_id, node) in &mut self.nodes {
let scrolling_state = match old_states.get(&scroll_layer_id) {
Some(old_scrolling_state) => *old_scrolling_state,
None => ScrollingState::new(),
};
layer.finalize(&scrolling_state);
node.finalize(&scrolling_state);
let scroll_root_id = match scroll_layer_id.info {
ScrollLayerInfo::Scrollable(_, scroll_root_id) => scroll_root_id,
@ -352,7 +352,7 @@ impl ScrollTree {
let pipeline_id = scroll_layer_id.pipeline_id;
if let Some(pending_offset) =
self.pending_scroll_offsets.remove(&(pipeline_id, scroll_root_id)) {
layer.set_scroll_origin(&pending_offset);
node.set_scroll_origin(&pending_offset);
}
}
@ -369,17 +369,17 @@ impl ScrollTree {
};
self.current_reference_frame_id += 1;
let layer = Layer::new(&rect, rect.size, &transform, pipeline_id);
self.add_layer(layer, reference_frame_id, parent_id);
let node = ClipScrollNode::new(&rect, rect.size, &transform, pipeline_id);
self.add_node(node, reference_frame_id, parent_id);
reference_frame_id
}
pub fn add_layer(&mut self, layer: Layer, id: ScrollLayerId, parent_id: ScrollLayerId) {
debug_assert!(!self.layers.contains_key(&id));
self.layers.insert(id, layer);
pub fn add_node(&mut self, node: ClipScrollNode, id: ScrollLayerId, parent_id: ScrollLayerId) {
debug_assert!(!self.nodes.contains_key(&id));
self.nodes.insert(id, node);
debug_assert!(parent_id != id);
self.layers.get_mut(&parent_id).unwrap().add_child(id);
self.nodes.get_mut(&parent_id).unwrap().add_child(id);
}
pub fn discard_frame_state_for_pipeline(&mut self, pipeline_id: PipelineId) {

Просмотреть файл

@ -42,12 +42,6 @@ const SHADER_VERSION: &'static str = "#version 300 es\n";
static SHADER_PREAMBLE: &'static str = "shared";
lazy_static! {
pub static ref MAX_TEXTURE_SIZE: gl::GLint = {
gl::get_integer_v(gl::MAX_TEXTURE_SIZE)
};
}
#[repr(u32)]
pub enum DepthFunction {
Less = gl::LESS,
@ -825,6 +819,8 @@ pub struct Device {
// Used on android only
#[allow(dead_code)]
next_vao_id: gl::GLuint,
max_texture_size: u32,
}
impl Device {
@ -863,9 +859,15 @@ impl Device {
next_vao_id: 1,
//file_watcher: file_watcher,
max_texture_size: gl::get_integer_v(gl::MAX_TEXTURE_SIZE) as u32
}
}
pub fn max_texture_size(&self) -> u32 {
self.max_texture_size
}
pub fn get_capabilities(&self) -> &Capabilities {
&self.capabilities
}

Просмотреть файл

@ -8,10 +8,10 @@ use internal_types::{ANGLE_FLOAT_TO_FIXED, AxisDirection};
use internal_types::{LowLevelFilterOp};
use internal_types::{RendererFrame};
use frame_builder::{FrameBuilder, FrameBuilderConfig};
use layer::Layer;
use clip_scroll_node::ClipScrollNode;
use resource_cache::ResourceCache;
use scene::{Scene, SceneProperties};
use scroll_tree::{ScrollTree, ScrollStates};
use clip_scroll_tree::{ClipScrollTree, ScrollStates};
use std::collections::HashMap;
use std::hash::BuildHasherDefault;
use tiling::{AuxiliaryListsMap, CompositeOps, PrimitiveFlags};
@ -33,7 +33,7 @@ struct FlattenContext<'a> {
// TODO: doc
pub struct Frame {
pub scroll_tree: ScrollTree,
pub clip_scroll_tree: ClipScrollTree,
pub pipeline_epoch_map: HashMap<PipelineId, Epoch, BuildHasherDefault<FnvHasher>>,
pub pipeline_auxiliary_lists: AuxiliaryListsMap,
id: FrameId,
@ -180,7 +180,7 @@ impl Frame {
Frame {
pipeline_epoch_map: HashMap::with_hasher(Default::default()),
pipeline_auxiliary_lists: HashMap::with_hasher(Default::default()),
scroll_tree: ScrollTree::new(),
clip_scroll_tree: ClipScrollTree::new(),
id: FrameId(0),
frame_builder: None,
frame_builder_config: config,
@ -193,37 +193,37 @@ impl Frame {
// Advance to the next frame.
self.id.0 += 1;
self.scroll_tree.drain()
self.clip_scroll_tree.drain()
}
pub fn get_scroll_layer_state(&self) -> Vec<ScrollLayerState> {
self.scroll_tree.get_scroll_layer_state()
pub fn get_scroll_node_state(&self) -> Vec<ScrollLayerState> {
self.clip_scroll_tree.get_scroll_node_state()
}
/// Returns true if any layers actually changed position or false otherwise.
pub fn scroll_layers(&mut self,
origin: LayerPoint,
pipeline_id: PipelineId,
scroll_root_id: ServoScrollRootId)
-> bool {
self.scroll_tree.scroll_layers(origin, pipeline_id, scroll_root_id)
/// Returns true if any nodes actually changed position or false otherwise.
pub fn scroll_nodes(&mut self,
origin: LayerPoint,
pipeline_id: PipelineId,
scroll_root_id: ServoScrollRootId)
-> bool {
self.clip_scroll_tree.scroll_nodes(origin, pipeline_id, scroll_root_id)
}
/// Returns true if any layers actually changed position or false otherwise.
/// Returns true if any nodes actually changed position or false otherwise.
pub fn scroll(&mut self,
scroll_location: ScrollLocation,
cursor: WorldPoint,
phase: ScrollEventPhase)
-> bool {
self.scroll_tree.scroll(scroll_location, cursor, phase,)
self.clip_scroll_tree.scroll(scroll_location, cursor, phase,)
}
pub fn tick_scrolling_bounce_animations(&mut self) {
self.scroll_tree.tick_scrolling_bounce_animations();
self.clip_scroll_tree.tick_scrolling_bounce_animations();
}
pub fn discard_frame_state_for_pipeline(&mut self, pipeline_id: PipelineId) {
self.scroll_tree.discard_frame_state_for_pipeline(pipeline_id);
self.clip_scroll_tree.discard_frame_state_for_pipeline(pipeline_id);
}
pub fn create(&mut self, scene: &Scene) {
@ -256,9 +256,9 @@ impl Frame {
}
};
self.scroll_tree.establish_root(root_pipeline_id,
&root_pipeline.viewport_size,
&root_clip.main.size);
self.clip_scroll_tree.establish_root(root_pipeline_id,
&root_pipeline.viewport_size,
&root_clip.main.size);
let background_color = root_pipeline.background_color.and_then(|color| {
if color.a > 0.0 {
@ -279,20 +279,20 @@ impl Frame {
};
let mut traversal = DisplayListTraversal::new_skipping_first(display_list);
let reference_frame_id = self.scroll_tree.root_reference_frame_id();
let topmost_scroll_layer_id = self.scroll_tree.topmost_scroll_layer_id();
let reference_frame_id = self.clip_scroll_tree.root_reference_frame_id();
let topmost_scroll_layer_id = self.clip_scroll_tree.topmost_scroll_layer_id();
debug_assert!(reference_frame_id != topmost_scroll_layer_id);
let viewport_rect = LayerRect::new(LayerPoint::zero(), root_pipeline.viewport_size);
let clip = ClipRegion::simple(&viewport_rect);
context.builder.push_scroll_layer(reference_frame_id,
&clip,
&LayerPoint::zero(),
&root_pipeline.viewport_size);
context.builder.push_scroll_layer(topmost_scroll_layer_id,
&clip,
&LayerPoint::zero(),
&root_clip.main.size);
context.builder.push_clip_scroll_node(reference_frame_id,
&clip,
&LayerPoint::zero(),
&root_pipeline.viewport_size);
context.builder.push_clip_scroll_node(topmost_scroll_layer_id,
&clip,
&LayerPoint::zero(),
&root_clip.main.size);
self.flatten_stacking_context(&mut traversal,
root_pipeline_id,
@ -304,12 +304,12 @@ impl Frame {
&root_stacking_context,
root_clip);
context.builder.pop_scroll_layer();
context.builder.pop_scroll_layer();
context.builder.pop_clip_scroll_node();
context.builder.pop_clip_scroll_node();
}
self.frame_builder = Some(frame_builder);
self.scroll_tree.finalize_and_apply_pending_scroll_offsets(old_scrolling_states);
self.clip_scroll_tree.finalize_and_apply_pending_scroll_offsets(old_scrolling_states);
}
fn flatten_scroll_layer<'a>(&mut self,
@ -330,12 +330,15 @@ impl Frame {
}
let clip_rect = clip.main;
let layer = Layer::new(&clip_rect, *content_size, &layer_relative_transform, pipeline_id);
self.scroll_tree.add_layer(layer, new_scroll_layer_id, parent_scroll_layer_id);
context.builder.push_scroll_layer(new_scroll_layer_id,
clip,
&clip_rect.origin,
&content_size);
let node = ClipScrollNode::new(&clip_rect,
*content_size,
&layer_relative_transform,
pipeline_id);
self.clip_scroll_tree.add_node(node, new_scroll_layer_id, parent_scroll_layer_id);
context.builder.push_clip_scroll_node(new_scroll_layer_id,
clip,
&clip_rect.origin,
&content_size);
self.flatten_items(traversal,
pipeline_id,
@ -345,7 +348,7 @@ impl Frame {
LayerToScrollTransform::identity(),
level);
context.builder.pop_scroll_layer();
context.builder.pop_clip_scroll_node();
}
fn flatten_stacking_context<'a>(&mut self,
@ -399,10 +402,10 @@ impl Frame {
// that fixed position stacking contexts are positioned relative to us.
if stacking_context_transform != LayoutTransform::identity() ||
stacking_context.perspective != LayoutTransform::identity() {
scroll_layer_id = self.scroll_tree.add_reference_frame(clip_region.main,
transform,
pipeline_id,
scroll_layer_id);
scroll_layer_id = self.clip_scroll_tree.add_reference_frame(clip_region.main,
transform,
pipeline_id,
scroll_layer_id);
reference_frame_id = scroll_layer_id;
transform = LayerToScrollTransform::identity();
}
@ -420,7 +423,7 @@ impl Frame {
CompositeOps::empty());
//Note: we don't use the original clip region here,
// it's already processed by the layer we just pushed.
// it's already processed by the node we just pushed.
context.builder.add_solid_rectangle(&clip_region.main,
&no_clip,
&bg_color,
@ -452,7 +455,7 @@ impl Frame {
&scrollbar_rect,
&ClipRegion::simple(&scrollbar_rect),
&DEFAULT_SCROLLBAR_COLOR,
PrimitiveFlags::Scrollbar(self.scroll_tree.topmost_scroll_layer_id, 4.0));
PrimitiveFlags::Scrollbar(self.clip_scroll_tree.topmost_scroll_layer_id(), 4.0));
}
context.builder.pop_stacking_context();
@ -491,27 +494,27 @@ impl Frame {
bounds.origin.y,
0.0);
let iframe_reference_frame_id =
self.scroll_tree.add_reference_frame(iframe_rect,
transform,
pipeline_id,
current_scroll_layer_id);
self.clip_scroll_tree.add_reference_frame(iframe_rect,
transform,
pipeline_id,
current_scroll_layer_id);
let iframe_scroll_layer_id = ScrollLayerId::root_scroll_layer(pipeline_id);
let layer = Layer::new(&LayerRect::new(LayerPoint::zero(), iframe_rect.size),
iframe_clip.main.size,
&LayerToScrollTransform::identity(),
pipeline_id);
self.scroll_tree.add_layer(layer.clone(),
iframe_scroll_layer_id,
iframe_reference_frame_id);
let node = ClipScrollNode::new(&LayerRect::new(LayerPoint::zero(), iframe_rect.size),
iframe_clip.main.size,
&LayerToScrollTransform::identity(),
pipeline_id);
self.clip_scroll_tree.add_node(node.clone(),
iframe_scroll_layer_id,
iframe_reference_frame_id);
context.builder.push_scroll_layer(iframe_reference_frame_id,
iframe_clip,
&LayerPoint::zero(),
&iframe_rect.size);
context.builder.push_scroll_layer(iframe_scroll_layer_id,
iframe_clip,
&LayerPoint::zero(),
&iframe_clip.main.size);
context.builder.push_clip_scroll_node(iframe_reference_frame_id,
iframe_clip,
&LayerPoint::zero(),
&iframe_rect.size);
context.builder.push_clip_scroll_node(iframe_scroll_layer_id,
iframe_clip,
&LayerPoint::zero(),
&iframe_clip.main.size);
let mut traversal = DisplayListTraversal::new_skipping_first(display_list);
@ -525,8 +528,8 @@ impl Frame {
&iframe_stacking_context,
iframe_clip);
context.builder.pop_scroll_layer();
context.builder.pop_scroll_layer();
context.builder.pop_clip_scroll_node();
context.builder.pop_clip_scroll_node();
}
fn flatten_items<'a>(&mut self,
@ -548,6 +551,7 @@ impl Frame {
&item.clip,
&info.stretch_size,
&info.tile_spacing,
None,
info.image_key,
info.image_rendering);
}
@ -647,7 +651,7 @@ impl Frame {
auxiliary_lists_map: &AuxiliaryListsMap,
device_pixel_ratio: f32)
-> RendererFrame {
self.scroll_tree.update_all_layer_transforms();
self.clip_scroll_tree.update_all_node_transforms();
let frame = self.build_frame(resource_cache,
auxiliary_lists_map,
device_pixel_ratio);
@ -663,13 +667,13 @@ impl Frame {
let frame = frame_builder.as_mut().map(|builder|
builder.build(resource_cache,
self.id,
&self.scroll_tree,
&self.clip_scroll_tree,
auxiliary_lists_map,
device_pixel_ratio)
);
self.frame_builder = frame_builder;
let layers_bouncing_back = self.scroll_tree.collect_layers_bouncing_back();
RendererFrame::new(self.pipeline_epoch_map.clone(), layers_bouncing_back, frame)
let nodes_bouncing_back = self.clip_scroll_tree.collect_nodes_bouncing_back();
RendererFrame::new(self.pipeline_epoch_map.clone(), nodes_bouncing_back, frame)
}
}

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -52,6 +52,8 @@ extern crate bitflags;
extern crate thread_profiler;
mod batch_builder;
mod clip_scroll_node;
mod clip_scroll_tree;
mod debug_colors;
mod debug_font_data;
mod debug_render;
@ -62,7 +64,6 @@ mod freelist;
mod geometry;
mod gpu_store;
mod internal_types;
mod layer;
mod mask_cache;
mod prim_store;
mod profiler;
@ -71,7 +72,6 @@ mod render_backend;
mod render_task;
mod resource_cache;
mod scene;
mod scroll_tree;
mod spring;
mod texture_cache;
mod tiling;

Просмотреть файл

@ -82,18 +82,24 @@ fn dwrite_render_mode(font_face: &dwrote::FontFace,
fn get_glyph_dimensions_with_analysis(analysis: dwrote::GlyphRunAnalysis,
texture_type: dwrote::DWRITE_TEXTURE_TYPE)
-> GlyphDimensions {
-> Option<GlyphDimensions> {
let bounds = analysis.get_alpha_texture_bounds(texture_type);
let width = (bounds.right - bounds.left) as u32;
let height = (bounds.bottom - bounds.top) as u32;
assert!(width > 0 && height > 0);
GlyphDimensions {
// Alpha texture bounds can sometimes return an empty rect
// Such as for spaces
if width == 0 || height == 0 {
return None
}
Some(GlyphDimensions {
left: bounds.left,
top: -bounds.top,
width: width,
height: height,
}
})
}
impl FontContext {
@ -203,7 +209,7 @@ impl FontContext {
let analysis = self.create_glyph_analysis(key, render_mode, None);
let texture_type = dwrite_texture_type(render_mode);
Some(get_glyph_dimensions_with_analysis(analysis, texture_type))
get_glyph_dimensions_with_analysis(analysis, texture_type)
}
// DWRITE gives us values in RGB. WR doesn't really touch it after. Note, CG returns in BGR
@ -266,6 +272,10 @@ impl FontContext {
let width = (bounds.right - bounds.left) as usize;
let height = (bounds.bottom - bounds.top) as usize;
// We should not get here since glyph_dimensions would return
// None for empty glyphs.
assert!(width > 0 && height > 0);
let mut pixels = analysis.create_alpha_texture(texture_type, bounds);
let lut_correction = match glyph_options {

Просмотреть файл

@ -30,7 +30,7 @@ pub const MASK_DATA_GPU_SIZE: usize = 1;
/// may grow. Storing them as texel coords and normalizing
/// the UVs in the vertex shader means nothing needs to be
/// updated on the CPU when the texture size changes.
#[derive(Clone)]
#[derive(Copy, Clone, Debug)]
pub struct TexelRect {
pub uv0: DevicePoint,
pub uv1: DevicePoint,
@ -45,6 +45,15 @@ impl Default for TexelRect {
}
}
impl TexelRect {
pub fn new(u0: u32, v0: u32, u1: u32, v1: u32) -> TexelRect {
TexelRect {
uv0: DevicePoint::new(u0 as f32, v0 as f32),
uv1: DevicePoint::new(u1 as f32, v1 as f32),
}
}
}
/// For external images, it's not possible to know the
/// UV coords of the image (or the image data itself)
/// until the render thread receives the frame and issues
@ -136,6 +145,7 @@ pub struct ImagePrimitiveCpu {
pub kind: ImagePrimitiveKind,
pub color_texture_id: SourceTexture,
pub resource_address: GpuStoreAddress,
pub sub_rect: Option<TexelRect>,
}
#[derive(Debug, Clone)]
@ -920,8 +930,18 @@ impl PrimitiveStore {
if let Some(cache_item) = cache_item {
let resource_rect = self.gpu_resource_rects.get_mut(image_cpu.resource_address);
resource_rect.uv0 = cache_item.uv0;
resource_rect.uv1 = cache_item.uv1;
match image_cpu.sub_rect {
Some(sub_rect) => {
resource_rect.uv0.x = cache_item.uv0.x + sub_rect.uv0.x;
resource_rect.uv0.y = cache_item.uv0.y + sub_rect.uv0.y;
resource_rect.uv1.x = cache_item.uv0.x + sub_rect.uv1.x;
resource_rect.uv1.y = cache_item.uv0.y + sub_rect.uv1.y;
}
None => {
resource_rect.uv0 = cache_item.uv0;
resource_rect.uv1 = cache_item.uv1;
}
}
}
image_cpu.color_texture_id = texture_id;
}

Просмотреть файл

@ -2,8 +2,7 @@
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use bincode::serde::serialize;
use bincode;
use bincode::{SizeLimit, serialize};
use std::fmt::Debug;
use std::mem;
use std::any::TypeId;
@ -51,7 +50,7 @@ impl BinaryRecorder {
impl ApiRecordingReceiver for BinaryRecorder {
fn write_msg(&mut self, _: u32, msg: &ApiMsg) {
if should_record_msg(msg) {
let buf = serialize(msg, bincode::SizeLimit::Infinite).unwrap();
let buf = serialize(msg, SizeLimit::Infinite).unwrap();
self.write_length_and_data(&buf);
}
}

Просмотреть файл

@ -21,7 +21,7 @@ use threadpool::ThreadPool;
use webrender_traits::{ApiMsg, AuxiliaryLists, BuiltDisplayList, IdNamespace, ImageData};
use webrender_traits::{PipelineId, RenderNotifier, RenderDispatcher, WebGLCommand, WebGLContextId};
use webrender_traits::channel::{PayloadHelperMethods, PayloadReceiver, PayloadSender, MsgReceiver};
use webrender_traits::{VRCompositorCommand, VRCompositorHandler};
use webrender_traits::{BlobImageRenderer, VRCompositorCommand, VRCompositorHandler};
use offscreen_gl_context::GLContextDispatcher;
/// The render backend is responsible for transforming high level display lists into
@ -68,9 +68,10 @@ impl RenderBackend {
config: FrameBuilderConfig,
recorder: Option<Box<ApiRecordingReceiver>>,
main_thread_dispatcher: Arc<Mutex<Option<Box<RenderDispatcher>>>>,
blob_image_renderer: Option<Box<BlobImageRenderer>>,
vr_compositor_handler: Arc<Mutex<Option<Box<VRCompositorHandler>>>>) -> RenderBackend {
let resource_cache = ResourceCache::new(texture_cache, workers, enable_aa);
let resource_cache = ResourceCache::new(texture_cache, workers, blob_image_renderer, enable_aa);
register_thread_with_profiler("Backend".to_string());
@ -231,7 +232,7 @@ impl RenderBackend {
ApiMsg::ScrollLayersWithScrollId(origin, pipeline_id, scroll_root_id) => {
profile_scope!("ScrollLayersWithScrollId");
let frame = profile_counters.total_time.profile(|| {
if self.frame.scroll_layers(origin, pipeline_id, scroll_root_id) {
if self.frame.scroll_nodes(origin, pipeline_id, scroll_root_id) {
Some(self.render())
} else {
None
@ -261,7 +262,7 @@ impl RenderBackend {
}
ApiMsg::GetScrollLayerState(tx) => {
profile_scope!("GetScrollLayerState");
tx.send(self.frame.get_scroll_layer_state())
tx.send(self.frame.get_scroll_node_state())
.unwrap()
}
ApiMsg::RequestWebGLContext(size, attributes, tx) => {

Просмотреть файл

@ -6,8 +6,8 @@ use internal_types::{HardwareCompositeOp, LowLevelFilterOp};
use mask_cache::MaskCacheInfo;
use prim_store::{PrimitiveCacheKey, PrimitiveIndex};
use std::{cmp, f32, i32, mem, usize};
use tiling::{PackedLayerIndex, RenderPass, RenderTargetIndex, ScrollLayerIndex};
use tiling::StackingContextIndex;
use tiling::{ClipScrollGroupIndex, PackedLayerIndex, RenderPass, RenderTargetIndex};
use tiling::{ScrollLayerIndex, StackingContextIndex};
use webrender_traits::{DeviceIntLength, DeviceIntPoint, DeviceIntRect, DeviceIntSize};
use webrender_traits::MixBlendMode;
@ -51,7 +51,7 @@ pub enum RenderTaskLocation {
#[derive(Debug, Clone)]
pub enum AlphaRenderItem {
Primitive(StackingContextIndex, PrimitiveIndex, i32),
Primitive(ClipScrollGroupIndex, PrimitiveIndex, i32),
Blend(StackingContextIndex, RenderTaskId, LowLevelFilterOp, i32),
Composite(StackingContextIndex, RenderTaskId, RenderTaskId, MixBlendMode, i32),
HardwareComposite(StackingContextIndex, RenderTaskId, HardwareCompositeOp, i32),

Просмотреть файл

@ -48,7 +48,7 @@ use util::TransformedRectKind;
use webrender_traits::{ColorF, Epoch, PipelineId, RenderNotifier, RenderDispatcher};
use webrender_traits::{ExternalImageId, ImageData, ImageFormat, RenderApiSender, RendererKind};
use webrender_traits::{DeviceIntRect, DevicePoint, DeviceIntPoint, DeviceIntSize, DeviceUintSize};
use webrender_traits::ImageDescriptor;
use webrender_traits::{ImageDescriptor, BlobImageRenderer};
use webrender_traits::channel;
use webrender_traits::VRCompositorHandler;
@ -463,6 +463,11 @@ pub struct Renderer {
/// use a hashmap, and allows a flat vector for performance.
cache_texture_id_map: Vec<TextureId>,
/// A special 1x1 dummy cache texture used for shaders that expect to work
/// with the cache but are actually running in the first pass
/// when no target is yet provided as a cache texture input.
dummy_cache_texture_id: TextureId,
/// Optional trait object that allows the client
/// application to provide external buffers for image data.
external_image_handler: Option<Box<ExternalImageHandler>>,
@ -675,7 +680,10 @@ impl Renderer {
options.precache_shaders)
};
let mut texture_cache = TextureCache::new();
let device_max_size = device.max_texture_size();
let max_texture_size = cmp::min(device_max_size, options.max_texture_size.unwrap_or(device_max_size));
let mut texture_cache = TextureCache::new(max_texture_size);
let white_pixels: Vec<u8> = vec![
0xff, 0xff, 0xff, 0xff,
@ -712,6 +720,15 @@ impl Renderer {
TextureFilter::Linear,
ImageData::Raw(Arc::new(mask_pixels)));
let dummy_cache_texture_id = device.create_texture_ids(1, TextureTarget::Array)[0];
device.init_texture(dummy_cache_texture_id,
1,
1,
ImageFormat::RGBA8,
TextureFilter::Linear,
RenderTargetMode::LayerRenderTarget(1),
None);
let debug_renderer = DebugRenderer::new(&mut device);
let gpu_data_textures = [
@ -780,6 +797,8 @@ impl Renderer {
// TODO(gw): Use a heuristic to select best # of worker threads.
Arc::new(Mutex::new(ThreadPool::new_with_name("WebRender:Worker".to_string(), 4)))
});
let blob_image_renderer = options.blob_image_renderer.take();
try!{ thread::Builder::new().name("RenderBackend".to_string()).spawn(move || {
let mut backend = RenderBackend::new(api_rx,
payload_rx,
@ -794,6 +813,7 @@ impl Renderer {
config,
recorder,
backend_main_thread_dispatcher,
blob_image_renderer,
backend_vr_compositor);
backend.run();
})};
@ -844,6 +864,7 @@ impl Renderer {
pipeline_epoch_map: HashMap::with_hasher(Default::default()),
main_thread_dispatcher: main_thread_dispatcher,
cache_texture_id_map: Vec::new(),
dummy_cache_texture_id: dummy_cache_texture_id,
external_image_handler: None,
external_images: HashMap::with_hasher(Default::default()),
vr_compositor_handler: vr_compositor
@ -1163,7 +1184,7 @@ impl Renderer {
batch: &PrimitiveBatch,
projection: &Matrix4D<f32>,
render_task_data: &Vec<RenderTaskData>,
cache_texture: Option<TextureId>,
cache_texture: TextureId,
render_target: Option<(TextureId, i32)>,
target_dimensions: DeviceUintSize) {
let transform_kind = batch.key.flags.transform_kind();
@ -1256,8 +1277,7 @@ impl Renderer {
// Before submitting the composite batch, do the
// framebuffer readbacks that are needed for each
// composite operation in this batch.
let cache_texture_id = cache_texture.unwrap();
let cache_texture_dimensions = self.device.get_texture_dimensions(cache_texture_id);
let cache_texture_dimensions = self.device.get_texture_dimensions(cache_texture);
let backdrop = &render_task_data[instance.task_index as usize];
let readback = &render_task_data[instance.user_data[0] as usize];
@ -1267,7 +1287,7 @@ impl Renderer {
// Called per-instance in case the layer (and therefore FBO)
// changes. The device will skip the GL call if the requested
// target is already bound.
let cache_draw_target = (cache_texture_id, readback.data[4] as i32);
let cache_draw_target = (cache_texture, readback.data[4] as i32);
self.device.bind_draw_target(Some(cache_draw_target), Some(cache_texture_dimensions));
let src_x = backdrop.data[0] - backdrop.data[4] + source.data[4];
@ -1314,7 +1334,7 @@ impl Renderer {
render_target: Option<(TextureId, i32)>,
target: &RenderTarget,
target_size: DeviceUintSize,
cache_texture: Option<TextureId>,
cache_texture: TextureId,
should_clear: bool,
background_color: Option<ColorF>,
render_task_data: &Vec<RenderTaskData>) {
@ -1327,9 +1347,7 @@ impl Renderer {
self.device.set_blend(false);
self.device.set_blend_mode_alpha();
if let Some(cache_texture) = cache_texture {
self.device.bind_texture(TextureSampler::Cache, cache_texture);
}
self.device.bind_texture(TextureSampler::Cache, cache_texture);
let (color, projection) = match render_target {
Some(..) => (
@ -1632,7 +1650,7 @@ impl Renderer {
self.gpu_data_textures[self.gdt_index].init_frame(&mut self.device, frame);
self.gdt_index = (self.gdt_index + 1) % GPU_DATA_TEXTURE_POOL;
let mut src_id = None;
let mut src_id = self.dummy_cache_texture_id;
for (pass_index, pass) in frame.passes.iter().enumerate() {
let (do_clear, size, target_id) = if pass.is_framebuffer {
@ -1657,7 +1675,7 @@ impl Renderer {
}
src_id = target_id;
src_id = target_id.unwrap_or(self.dummy_cache_texture_id);
}
self.draw_render_target_debug(framebuffer_size);
@ -1716,6 +1734,14 @@ impl Renderer {
}
}
}
// De-initialize the Renderer safely, assuming the GL is still alive and active.
pub fn deinit(mut self) {
//Note: this is a fake frame, only needed because texture deletion is require to happen inside a frame
self.device.begin_frame(1.0);
self.device.deinit_texture(self.dummy_cache_texture_id);
self.device.end_frame();
}
}
pub enum ExternalImageSource<'a> {
@ -1756,7 +1782,6 @@ pub trait ExternalImageHandler {
fn release(&mut self, key: ExternalImageId);
}
#[derive(Debug)]
pub struct RendererOptions {
pub device_pixel_ratio: f32,
pub resource_override_path: Option<PathBuf>,
@ -1770,7 +1795,9 @@ pub struct RendererOptions {
pub clear_framebuffer: bool,
pub clear_color: ColorF,
pub render_target_debug: bool,
pub max_texture_size: Option<u32>,
pub workers: Option<Arc<Mutex<ThreadPool>>>,
pub blob_image_renderer: Option<Box<BlobImageRenderer>>,
pub recorder: Option<Box<ApiRecordingReceiver>>,
}
@ -1789,7 +1816,9 @@ impl Default for RendererOptions {
clear_framebuffer: true,
clear_color: ColorF::new(1.0, 1.0, 1.0, 1.0),
render_target_debug: false,
max_texture_size: None,
workers: None,
blob_image_renderer: None,
recorder: None,
}
}

Просмотреть файл

@ -24,6 +24,7 @@ use webrender_traits::{Epoch, FontKey, GlyphKey, ImageKey, ImageFormat, ImageRen
use webrender_traits::{FontRenderMode, ImageData, GlyphDimensions, WebGLContextId};
use webrender_traits::{DevicePoint, DeviceIntSize, ImageDescriptor, ColorF};
use webrender_traits::{ExternalImageId, GlyphOptions, GlyphInstance};
use webrender_traits::{BlobImageRenderer, BlobImageDescriptor, BlobImageError};
use threadpool::ThreadPool;
use euclid::Point2D;
@ -209,11 +210,15 @@ pub struct ResourceCache {
glyph_cache_tx: Sender<GlyphCacheMsg>,
glyph_cache_result_queue: Receiver<GlyphCacheResultMsg>,
pending_external_image_update_list: ExternalImageUpdateList,
blob_image_renderer: Option<Box<BlobImageRenderer>>,
blob_image_requests: HashSet<ImageRequest>,
}
impl ResourceCache {
pub fn new(texture_cache: TextureCache,
workers: Arc<Mutex<ThreadPool>>,
blob_image_renderer: Option<Box<BlobImageRenderer>>,
enable_aa: bool) -> ResourceCache {
let (glyph_cache_tx, glyph_cache_result_queue) = spawn_glyph_cache_thread(workers);
@ -232,9 +237,16 @@ impl ResourceCache {
glyph_cache_tx: glyph_cache_tx,
glyph_cache_result_queue: glyph_cache_result_queue,
pending_external_image_update_list: ExternalImageUpdateList::new(),
blob_image_renderer: blob_image_renderer,
blob_image_requests: HashSet::new(),
}
}
pub fn max_texture_size(&self) -> u32 {
self.texture_cache.max_texture_size()
}
pub fn add_font_template(&mut self, font_key: FontKey, template: FontTemplate) {
// Push the new font to the glyph cache thread, and also store
// it locally for glyph metric requests.
@ -248,6 +260,12 @@ impl ResourceCache {
image_key: ImageKey,
descriptor: ImageDescriptor,
data: ImageData) {
if descriptor.width > self.max_texture_size() || descriptor.height > self.max_texture_size() {
// TODO: we need to support handle this case gracefully, cf. issue #620.
println!("Warning: texture size ({} {}) larger than the maximum size",
descriptor.width, descriptor.height);
}
let resource = ImageResource {
descriptor: descriptor,
data: data,
@ -321,14 +339,38 @@ impl ResourceCache {
webgl_texture.size = size;
}
pub fn request_image(&mut self,
key: ImageKey,
rendering: ImageRendering) {
pub fn request_image(&mut self, key: ImageKey, rendering: ImageRendering) {
debug_assert!(self.state == State::AddResources);
self.pending_image_requests.push(ImageRequest {
let request = ImageRequest {
key: key,
rendering: rendering,
});
};
let template = self.image_templates.get(&key).unwrap();
if let ImageData::Blob(ref data) = template.data {
if let Some(ref mut renderer) = self.blob_image_renderer {
let same_epoch = match self.cached_images.resources.get(&request) {
Some(entry) => entry.epoch == template.epoch,
None => false,
};
if !same_epoch && self.blob_image_requests.insert(request) {
renderer.request_blob_image(
key,
data.clone(),
&BlobImageDescriptor {
width: template.descriptor.width,
height: template.descriptor.height,
format: template.descriptor.format,
// TODO(nical): figure out the scale factor (should change with zoom).
scale_factor: 1.0,
},
);
}
}
} else {
self.pending_image_requests.push(request);
}
}
pub fn request_glyphs(&mut self,
@ -453,7 +495,7 @@ impl ResourceCache {
let external_id = match image_template.data {
ImageData::ExternalHandle(id) => Some(id),
// raw and externalBuffer are all use resource_cache.
ImageData::Raw(..) | ImageData::ExternalBuffer(..) => None,
ImageData::Raw(..) | ImageData::ExternalBuffer(..) | ImageData::Blob(..) => None,
};
ImageProperties {
@ -539,51 +581,85 @@ impl ResourceCache {
}
}
for request in self.pending_image_requests.drain(..) {
let cached_images = &mut self.cached_images;
let image_template = &self.image_templates[&request.key];
let image_data = image_template.data.clone();
let mut image_requests = mem::replace(&mut self.pending_image_requests, Vec::new());
for request in image_requests.drain(..) {
self.finalize_image_request(request, None);
}
match image_template.data {
ImageData::ExternalHandle(..) => {
// external handle doesn't need to update the texture_cache.
let mut blob_image_requests = mem::replace(&mut self.blob_image_requests, HashSet::new());
if self.blob_image_renderer.is_some() {
for request in blob_image_requests.drain() {
match self.blob_image_renderer.as_mut().unwrap()
.resolve_blob_image(request.key) {
Ok(image) => {
self.finalize_image_request(request, Some(ImageData::new(image.data)));
}
// TODO(nical): I think that we should handle these somewhat gracefully,
// at least in the out-of-memory scenario.
Err(BlobImageError::Oom) => {
// This one should be recoverable-ish.
panic!("Failed to render a vector image (OOM)");
}
Err(BlobImageError::InvalidKey) => {
panic!("Invalid vector image key");
}
Err(BlobImageError::InvalidData) => {
// TODO(nical): If we run into this we should kill the content process.
panic!("Invalid vector image data");
}
Err(BlobImageError::Other(msg)) => {
panic!("Vector image error {}", msg);
}
}
ImageData::Raw(..) | ImageData::ExternalBuffer(..) => {
match cached_images.entry(request.clone(), self.current_frame_id) {
Occupied(entry) => {
let image_id = entry.get().texture_cache_id;
}
}
}
if entry.get().epoch != image_template.epoch {
self.texture_cache.update(image_id,
image_template.descriptor,
image_data);
fn finalize_image_request(&mut self, request: ImageRequest, image_data: Option<ImageData>) {
let image_template = &self.image_templates[&request.key];
let image_data = image_data.unwrap_or_else(||{
image_template.data.clone()
});
// Update the cached epoch
*entry.into_mut() = CachedImageInfo {
texture_cache_id: image_id,
epoch: image_template.epoch,
};
}
}
Vacant(entry) => {
let image_id = self.texture_cache.new_item_id();
match image_template.data {
ImageData::ExternalHandle(..) => {
// external handle doesn't need to update the texture_cache.
}
ImageData::Raw(..) | ImageData::ExternalBuffer(..) | ImageData::Blob(..) => {
match self.cached_images.entry(request.clone(), self.current_frame_id) {
Occupied(entry) => {
let image_id = entry.get().texture_cache_id;
let filter = match request.rendering {
ImageRendering::Pixelated => TextureFilter::Nearest,
ImageRendering::Auto | ImageRendering::CrispEdges => TextureFilter::Linear,
};
self.texture_cache.insert(image_id,
if entry.get().epoch != image_template.epoch {
self.texture_cache.update(image_id,
image_template.descriptor,
filter,
image_data);
entry.insert(CachedImageInfo {
// Update the cached epoch
*entry.into_mut() = CachedImageInfo {
texture_cache_id: image_id,
epoch: image_template.epoch,
});
};
}
}
Vacant(entry) => {
let image_id = self.texture_cache.new_item_id();
let filter = match request.rendering {
ImageRendering::Pixelated => TextureFilter::Nearest,
ImageRendering::Auto | ImageRendering::CrispEdges => TextureFilter::Linear,
};
self.texture_cache.insert(image_id,
image_template.descriptor,
filter,
image_data);
entry.insert(CachedImageInfo {
texture_cache_id: image_id,
epoch: image_template.epoch,
});
}
}
}
}

Просмотреть файл

@ -2,7 +2,7 @@
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use device::{MAX_TEXTURE_SIZE, TextureFilter};
use device::TextureFilter;
use fnv::FnvHasher;
use freelist::{FreeList, FreeListItem, FreeListItemId};
use internal_types::{TextureUpdate, TextureUpdateOp};
@ -28,9 +28,6 @@ const MAX_RGBA_PIXELS_PER_TEXTURE: u32 = MAX_BYTES_PER_TEXTURE / 4;
/// The desired initial size of each texture, in pixels.
const INITIAL_TEXTURE_SIZE: u32 = 1024;
/// The desired initial area of each texture, in pixels squared.
const INITIAL_TEXTURE_AREA: u32 = INITIAL_TEXTURE_SIZE * INITIAL_TEXTURE_SIZE;
/// The square root of the number of RGBA pixels we're allowed to use for a texture, rounded down.
/// to the next power of two.
const SQRT_MAX_RGBA_PIXELS_PER_TEXTURE: u32 = 8192;
@ -330,9 +327,8 @@ impl TexturePage {
self.texture_size = new_texture_size
}
fn can_grow(&self) -> bool {
self.texture_size.width < max_texture_size() ||
self.texture_size.height < max_texture_size()
fn can_grow(&self, max_size: u32) -> bool {
self.texture_size.width < max_size || self.texture_size.height < max_size
}
}
@ -541,6 +537,7 @@ pub struct TextureCache {
items: FreeList<TextureCacheItem>,
arena: TextureCacheArena,
pending_updates: TextureUpdateList,
max_texture_size: u32,
}
#[derive(PartialEq, Eq, Debug)]
@ -556,16 +553,25 @@ pub struct AllocationResult {
}
impl TextureCache {
pub fn new() -> TextureCache {
pub fn new(mut max_texture_size: u32) -> TextureCache {
if max_texture_size * max_texture_size > MAX_RGBA_PIXELS_PER_TEXTURE {
max_texture_size = SQRT_MAX_RGBA_PIXELS_PER_TEXTURE;
}
TextureCache {
cache_id_list: CacheTextureIdList::new(),
free_texture_levels: HashMap::with_hasher(Default::default()),
items: FreeList::new(),
pending_updates: TextureUpdateList::new(),
arena: TextureCacheArena::new(),
max_texture_size: max_texture_size,
}
}
pub fn max_texture_size(&self) -> u32 {
self.max_texture_size
}
pub fn pending_updates(&mut self) -> TextureUpdateList {
mem::replace(&mut self.pending_updates, TextureUpdateList::new())
}
@ -626,8 +632,8 @@ impl TextureCache {
};
// TODO(gw): Handle this sensibly (support failing to render items that can't fit?)
assert!(requested_size.width < max_texture_size());
assert!(requested_size.height < max_texture_size());
assert!(requested_size.width < self.max_texture_size);
assert!(requested_size.height < self.max_texture_size);
// Loop until an allocation succeeds, growing or adding new
// texture pages as required.
@ -651,11 +657,11 @@ impl TextureCache {
}
}
if !page_list.is_empty() && page_list.last().unwrap().can_grow() {
if !page_list.is_empty() && page_list.last().unwrap().can_grow(self.max_texture_size) {
let last_page = page_list.last_mut().unwrap();
// Grow the texture.
let new_width = cmp::min(last_page.texture_size.width * 2, max_texture_size());
let new_height = cmp::min(last_page.texture_size.height * 2, max_texture_size());
let new_width = cmp::min(last_page.texture_size.width * 2, self.max_texture_size);
let new_height = cmp::min(last_page.texture_size.height * 2, self.max_texture_size);
let texture_size = DeviceUintSize::new(new_width, new_height);
self.pending_updates.push(TextureUpdate {
id: last_page.texture_id,
@ -673,7 +679,7 @@ impl TextureCache {
}
// We need a new page.
let texture_size = initial_texture_size();
let texture_size = initial_texture_size(self.max_texture_size);
let free_texture_levels_entry = self.free_texture_levels.entry(format);
let mut free_texture_levels = match free_texture_levels_entry {
Entry::Vacant(entry) => entry.insert(Vec::new()),
@ -714,6 +720,9 @@ impl TextureCache {
ImageData::ExternalHandle(..) | ImageData::ExternalBuffer(..)=> {
panic!("Doesn't support Update() for external image.");
}
ImageData::Blob(..) => {
panic!("The vector image should have been rasterized into a raw image.");
}
ImageData::Raw(bytes) => {
TextureUpdateOp::Update {
page_pos_x: existing_item.allocated_rect.origin.x,
@ -739,6 +748,10 @@ impl TextureCache {
descriptor: ImageDescriptor,
filter: TextureFilter,
data: ImageData) {
if let ImageData::Blob(..) = data {
panic!("must rasterize the vector image before adding to the cache");
}
let width = descriptor.width;
let height = descriptor.height;
let format = descriptor.format;
@ -756,6 +769,9 @@ impl TextureCache {
ImageData::ExternalHandle(..) => {
panic!("External handle should not go through texture_cache.");
}
ImageData::Blob(..) => {
panic!("The vector image should have been rasterized.");
}
ImageData::Raw(bytes) => {
let update_op = TextureUpdate {
id: result.item.texture_id,
@ -877,22 +893,7 @@ pub struct FreeTextureLevel {
}
/// Returns the number of pixels on a side we start out with for our texture atlases.
fn initial_texture_size() -> DeviceUintSize {
let max_hardware_texture_size = *MAX_TEXTURE_SIZE as u32;
let initial_size = if max_hardware_texture_size * max_hardware_texture_size > INITIAL_TEXTURE_AREA {
INITIAL_TEXTURE_SIZE
} else {
max_hardware_texture_size
};
fn initial_texture_size(max_texture_size: u32) -> DeviceUintSize {
let initial_size = cmp::min(max_texture_size, INITIAL_TEXTURE_SIZE);
DeviceUintSize::new(initial_size, initial_size)
}
/// Returns the number of pixels on a side we're allowed to use for our texture atlases.
fn max_texture_size() -> u32 {
let max_hardware_texture_size = *MAX_TEXTURE_SIZE as u32;
if max_hardware_texture_size * max_hardware_texture_size > MAX_RGBA_PIXELS_PER_TEXTURE {
SQRT_MAX_RGBA_PIXELS_PER_TEXTURE
} else {
max_hardware_texture_size
}
}

Просмотреть файл

@ -534,12 +534,13 @@ impl AlphaBatcher {
for item in &task.alpha_items {
let (batch_key, item_bounding_rect) = match item {
&AlphaRenderItem::Blend(stacking_context_index, ..) => {
let stacking_context = &ctx.stacking_context_store[stacking_context_index.0];
let stacking_context =
&ctx.stacking_context_store[stacking_context_index.0];
(AlphaBatchKey::new(AlphaBatchKind::Blend,
AlphaBatchKeyFlags::empty(),
BlendMode::Alpha,
BatchTextures::no_texture()),
&stacking_context.xf_rect.as_ref().unwrap().bounding_rect)
&stacking_context.bounding_rect)
}
&AlphaRenderItem::HardwareComposite(stacking_context_index, _, composite_op, ..) => {
let stacking_context = &ctx.stacking_context_store[stacking_context_index.0];
@ -547,7 +548,7 @@ impl AlphaBatcher {
AlphaBatchKeyFlags::empty(),
composite_op.to_blend_mode(),
BatchTextures::no_texture()),
&stacking_context.xf_rect.as_ref().unwrap().bounding_rect)
&stacking_context.bounding_rect)
}
&AlphaRenderItem::Composite(stacking_context_index,
backdrop_id,
@ -567,11 +568,10 @@ impl AlphaBatcher {
alpha_batches.push(batch);
continue;
}
&AlphaRenderItem::Primitive(stacking_context_index, prim_index, _) => {
let stacking_context =
&ctx.stacking_context_store[stacking_context_index.0];
&AlphaRenderItem::Primitive(clip_scroll_group_index, prim_index, _) => {
let group = &ctx.clip_scroll_group_store[clip_scroll_group_index.0];
let prim_metadata = ctx.prim_store.get_metadata(prim_index);
let transform_kind = stacking_context.xf_rect.as_ref().unwrap().kind;
let transform_kind = group.xf_rect.as_ref().unwrap().kind;
let needs_clipping = prim_metadata.clip_task.is_some();
let needs_blending = transform_kind == TransformedRectKind::Complex ||
!prim_metadata.is_opaque ||
@ -616,11 +616,7 @@ impl AlphaBatcher {
PrimitiveBatchItem::StackingContext(stacking_context_index) => {
let stacking_context =
&ctx.stacking_context_store[stacking_context_index.0];
stacking_context.xf_rect
.as_ref()
.unwrap()
.bounding_rect
.intersects(item_bounding_rect)
stacking_context.bounding_rect.intersects(item_bounding_rect)
}
PrimitiveBatchItem::Primitive(prim_index) => {
let bounding_rect = &ctx.prim_store.cpu_bounding_rects[prim_index.0];
@ -665,18 +661,19 @@ impl AlphaBatcher {
z);
}
&AlphaRenderItem::HardwareComposite(stacking_context_index, src_id, _, z) => {
ctx.prim_store.add_hardware_composite_to_batch(stacking_context_index,
batch,
task_index,
render_tasks.get_static_task_index(&src_id),
z);
ctx.prim_store.add_hardware_composite_to_batch(
stacking_context_index,
batch,
task_index,
render_tasks.get_static_task_index(&src_id),
z);
}
&AlphaRenderItem::Primitive(stacking_context_index, prim_index, z) => {
let stacking_context =
&ctx.stacking_context_store[stacking_context_index.0];
&AlphaRenderItem::Primitive(clip_scroll_group_index, prim_index, z) => {
let packed_layer = ctx.clip_scroll_group_store[clip_scroll_group_index.0]
.packed_layer_index;
ctx.prim_store.add_prim_to_batch(prim_index,
batch,
stacking_context.packed_layer_index,
packed_layer,
task_index,
render_tasks,
child_pass_index,
@ -690,10 +687,10 @@ impl AlphaBatcher {
&AlphaRenderItem::Composite(..) => unreachable!(),
&AlphaRenderItem::Blend(..) => unreachable!(),
&AlphaRenderItem::HardwareComposite(..) => unreachable!(),
&AlphaRenderItem::Primitive(stacking_context_index, prim_index, _) => {
let stacking_context = &ctx.stacking_context_store[stacking_context_index.0];
&AlphaRenderItem::Primitive(clip_scroll_group_index, prim_index, _) => {
let group = &ctx.clip_scroll_group_store[clip_scroll_group_index.0];
let transform_kind = group.xf_rect.as_ref().unwrap().kind;
let prim_metadata = ctx.prim_store.get_metadata(prim_index);
let transform_kind = stacking_context.xf_rect.as_ref().unwrap().kind;
let needs_clipping = prim_metadata.clip_task.is_some();
let needs_blending = transform_kind == TransformedRectKind::Complex ||
!prim_metadata.is_opaque ||
@ -745,12 +742,13 @@ impl AlphaBatcher {
&AlphaRenderItem::Composite(..) => unreachable!(),
&AlphaRenderItem::Blend(..) => unreachable!(),
&AlphaRenderItem::HardwareComposite(..) => unreachable!(),
&AlphaRenderItem::Primitive(stacking_context_index, prim_index, z) => {
let stacking_context =
&ctx.stacking_context_store[stacking_context_index.0];
&AlphaRenderItem::Primitive(clip_scroll_group_index, prim_index, z) => {
let packed_layer_index =
ctx.clip_scroll_group_store[clip_scroll_group_index.0]
.packed_layer_index;
ctx.prim_store.add_prim_to_batch(prim_index,
batch,
stacking_context.packed_layer_index,
packed_layer_index,
task_index,
render_tasks,
child_pass_index,
@ -848,6 +846,7 @@ impl ClipBatcher {
pub struct RenderTargetContext<'a> {
pub stacking_context_store: &'a [StackingContext],
pub clip_scroll_group_store: &'a [ClipScrollGroup],
pub prim_store: &'a PrimitiveStore,
pub resource_cache: &'a ResourceCache,
}
@ -1299,14 +1298,63 @@ pub struct PackedLayerIndex(pub usize);
#[derive(Debug, Copy, Clone, Eq, PartialEq, Hash, Ord, PartialOrd)]
pub struct StackingContextIndex(pub usize);
#[derive(Debug)]
pub struct StackingContext {
pub pipeline_id: PipelineId,
pub local_transform: LayerToScrollTransform,
pub local_rect: LayerRect,
pub scroll_layer_id: ScrollLayerId,
pub xf_rect: Option<TransformedRect>,
pub bounding_rect: DeviceIntRect,
pub composite_ops: CompositeOps,
pub clip_scroll_groups: Vec<ClipScrollGroupIndex>,
pub is_visible: bool,
}
impl StackingContext {
pub fn new(pipeline_id: PipelineId,
local_transform: LayerToScrollTransform,
local_rect: LayerRect,
composite_ops: CompositeOps,
clip_scroll_group_index: ClipScrollGroupIndex)
-> StackingContext {
StackingContext {
pipeline_id: pipeline_id,
local_transform: local_transform,
local_rect: local_rect,
bounding_rect: DeviceIntRect::zero(),
composite_ops: composite_ops,
clip_scroll_groups: vec![clip_scroll_group_index],
is_visible: false,
}
}
pub fn clip_scroll_group(&self) -> ClipScrollGroupIndex {
// Currently there is only one scrolled stacking context per context,
// but eventually this will be selected from the vector based on the
// scroll layer of this primitive.
self.clip_scroll_groups[0]
}
pub fn can_contribute_to_scene(&self) -> bool {
!self.composite_ops.will_make_invisible()
}
}
#[derive(Debug, Copy, Clone, Eq, PartialEq, Hash, Ord, PartialOrd)]
pub struct ClipScrollGroupIndex(pub usize);
#[derive(Debug)]
pub struct ClipScrollGroup {
pub stacking_context_index: StackingContextIndex,
pub scroll_layer_id: ScrollLayerId,
pub packed_layer_index: PackedLayerIndex,
pub pipeline_id: PipelineId,
pub xf_rect: Option<TransformedRect>,
}
impl ClipScrollGroup {
pub fn is_visible(&self) -> bool {
self.xf_rect.is_some()
}
}
#[derive(Debug, Copy, Clone, Eq, PartialEq, Hash, Ord, PartialOrd)]
@ -1386,16 +1434,6 @@ impl CompositeOps {
}
}
impl StackingContext {
pub fn is_visible(&self) -> bool {
self.xf_rect.is_some()
}
pub fn can_contribute_to_scene(&self) -> bool {
!self.composite_ops.will_make_invisible()
}
}
/// A rendering-oriented representation of frame::Frame built by the render backend
/// and presented to the renderer.
pub struct Frame {

Просмотреть файл

@ -5,15 +5,15 @@ authors = ["The Mozilla Project Developers"]
license = "MPL-2.0"
[dependencies]
webrender_traits = {path = "../webrender_traits", version = "0.14"}
euclid = "0.10"
app_units = "0.3"
webrender_traits = {path = "../webrender_traits", version = "0.19"}
euclid = "0.11"
app_units = "0.4"
gleam = "0.2"
fnv="1.0"
[dependencies.webrender]
path = "../webrender"
version = "0.15"
version = "0.19"
default-features = false
features = ["codegen"]

Просмотреть файл

@ -18,11 +18,13 @@ class NewRenderer : public RendererEvent
public:
NewRenderer(WrAPI** aApi, layers::CompositorBridgeParentBase* aBridge,
GLint* aMaxTextureSize,
bool* aUseANGLE,
RefPtr<widget::CompositorWidget>&& aWidget,
layers::SynchronousTask* aTask,
bool aEnableProfiler)
: mWrApi(aApi)
, mMaxTextureSize(aMaxTextureSize)
, mUseANGLE(aUseANGLE)
, mBridge(aBridge)
, mCompositorWidget(Move(aWidget))
, mTask(aTask)
@ -46,6 +48,7 @@ public:
}
gl->fGetIntegerv(LOCAL_GL_MAX_TEXTURE_SIZE, mMaxTextureSize);
*mUseANGLE = gl->IsANGLE();
WrRenderer* wrRenderer = nullptr;
if (!wr_window_new(aWindowId, gl.get(), this->mEnableProfiler, nullptr, mWrApi, &wrRenderer)) {
@ -67,6 +70,7 @@ public:
private:
WrAPI** mWrApi;
GLint* mMaxTextureSize;
bool* mUseANGLE;
layers::CompositorBridgeParentBase* mBridge;
RefPtr<widget::CompositorWidget> mCompositorWidget;
layers::SynchronousTask* mTask;
@ -112,12 +116,13 @@ WebRenderAPI::Create(bool aEnableProfiler,
WrAPI* wrApi = nullptr;
GLint maxTextureSize = 0;
bool useANGLE = false;
// Dispatch a synchronous task because the WrApi object needs to be created
// on the render thread. If need be we could delay waiting on this task until
// the next time we need to access the WrApi object.
layers::SynchronousTask task("Create Renderer");
auto event = MakeUnique<NewRenderer>(&wrApi, aBridge, &maxTextureSize,
auto event = MakeUnique<NewRenderer>(&wrApi, aBridge, &maxTextureSize, &useANGLE,
Move(aWidget), &task, aEnableProfiler);
RenderThread::Get()->RunEvent(id, Move(event));
@ -127,7 +132,7 @@ WebRenderAPI::Create(bool aEnableProfiler,
return nullptr;
}
return RefPtr<WebRenderAPI>(new WebRenderAPI(wrApi, id, maxTextureSize)).forget();
return RefPtr<WebRenderAPI>(new WebRenderAPI(wrApi, id, maxTextureSize, useANGLE)).forget();
}
WebRenderAPI::~WebRenderAPI()

Просмотреть файл

@ -71,12 +71,14 @@ public:
void Readback(gfx::IntSize aSize, uint8_t *aBuffer, uint32_t aBufferSize);
GLint GetMaxTextureSize() const { return mMaxTextureSize; }
bool GetUseANGLE() const { return mUseANGLE; }
protected:
WebRenderAPI(WrAPI* aRawApi, wr::WindowId aId, GLint aMaxTextureSize)
WebRenderAPI(WrAPI* aRawApi, wr::WindowId aId, GLint aMaxTextureSize, bool aUseANGLE)
: mWrApi(aRawApi)
, mId(aId)
, mMaxTextureSize(aMaxTextureSize)
, mUseANGLE(aUseANGLE)
{}
~WebRenderAPI();
@ -84,6 +86,7 @@ protected:
WrAPI* mWrApi;
wr::WindowId mId;
GLint mMaxTextureSize;
bool mUseANGLE;
friend class DisplayListBuilder;
};

Просмотреть файл

@ -227,6 +227,14 @@ static inline WrRect ToWrRect(const gfx::IntRectTyped<T>& rect)
return ToWrRect(IntRectToRect(rect));
}
static inline WrPoint ToWrPoint(const gfx::Point& point)
{
WrPoint p;
p.x = point.x;
p.y = point.y;
return p;
}
struct ByteBuffer
{
ByteBuffer(size_t aLength, uint8_t* aData)

Просмотреть файл

@ -3,7 +3,7 @@ use std::{mem, slice};
use std::path::PathBuf;
use std::os::raw::{c_void, c_char};
use gleam::gl;
use webrender_traits::{BorderSide, BorderStyle, BorderRadius};
use webrender_traits::{BorderSide, BorderStyle, BorderRadius, BorderWidths, BorderDetails, NormalBorder};
use webrender_traits::{PipelineId, ClipRegion, PropertyBinding};
use webrender_traits::{Epoch, ExtendMode, ColorF, GlyphInstance, GradientStop, ImageDescriptor};
use webrender_traits::{FilterOp, ImageData, ImageFormat, ImageKey, ImageMask, ImageRendering, RendererKind, MixBlendMode};
@ -189,20 +189,10 @@ pub extern fn wr_window_new(window_id: WrWindowId,
println!("WebRender - OpenGL version new {}", version);
let opts = RendererOptions {
device_pixel_ratio: 1.0,
resource_override_path: None,
enable_aa: false,
enable_subpixel_aa: false,
enable_profiler: enable_profiler,
enable_scrollbars: false,
precache_shaders: false,
renderer_kind: RendererKind::Native,
debug: false,
clear_framebuffer: true,
render_target_debug: false,
clear_color: ColorF::new(1.0, 1.0, 1.0, 1.0),
recorder: recorder,
workers: None,
.. Default::default()
};
let (mut renderer, sender) = match Renderer::new(opts) {
@ -605,7 +595,9 @@ pub extern fn wr_api_set_root_pipeline(api: &mut RenderApi, pipeline_id: Pipelin
pub extern fn wr_api_add_image(api: &mut RenderApi, descriptor: &WrImageDescriptor, bytes: * const u8, size: usize) -> ImageKey {
assert!( unsafe { is_in_compositor_thread() });
let bytes = unsafe { slice::from_raw_parts(bytes, size).to_owned() };
return api.add_image(
let image_key = api.generate_image_key();
api.add_image(
image_key,
ImageDescriptor {
width: descriptor.width,
height: descriptor.height,
@ -615,6 +607,7 @@ pub extern fn wr_api_add_image(api: &mut RenderApi, descriptor: &WrImageDescript
},
ImageData::new(bytes)
);
image_key
}
#[no_mangle]
@ -690,14 +683,24 @@ pub extern fn wr_dp_push_border(state: &mut WrState, rect: WrRect, clip: WrRect,
radius: WrBorderRadius) {
assert!( unsafe { is_in_compositor_thread() });
let clip_region = state.frame_builder.dl_builder.new_clip_region(&clip.to_rect(), Vec::new(), None);
let border_widths = BorderWidths {
left: left.width,
top: top.width,
right: right.width,
bottom: bottom.width
};
let border_details = BorderDetails::Normal(NormalBorder {
left: left.to_border_side(),
right: right.to_border_side(),
top: top.to_border_side(),
bottom: bottom.to_border_side(),
radius: radius.to_border_radius(),
});
state.frame_builder.dl_builder.push_border(
rect.to_rect(),
clip_region,
left.to_border_side(),
top.to_border_side(),
right.to_border_side(),
bottom.to_border_side(),
radius.to_border_radius());
border_widths,
border_details);
}
#[no_mangle]
@ -803,7 +806,7 @@ impl WrBorderSide
{
pub fn to_border_side(&self) -> BorderSide
{
BorderSide { width: self.width, color: self.color.to_color(), style: self.style }
BorderSide { color: self.color.to_color(), style: self.style }
}
}
@ -929,7 +932,9 @@ pub extern fn wr_api_add_raw_font(api: &mut RenderApi,
let mut font_vector = Vec::new();
font_vector.extend_from_slice(font_slice);
return api.add_raw_font(font_vector);
let font_key = api.generate_font_key();
api.add_raw_font(font_key, font_vector);
font_key
}
#[no_mangle]

Просмотреть файл

@ -283,6 +283,7 @@ struct WrPoint
return x == aRhs.x && y == aRhs.y;
}
operator mozilla::gfx::Point() const { return mozilla::gfx::Point(x, y); }
};
struct WrImageMask

Просмотреть файл

@ -1,6 +1,6 @@
[package]
name = "webrender_traits"
version = "0.14.0"
version = "0.19.0"
authors = ["Glenn Watson <gw@intuitionlibrary.com>"]
license = "MPL-2.0"
repository = "https://github.com/servo/webrender"
@ -13,23 +13,23 @@ codegen = ["serde_codegen", "serde_codegen/with-syntex"]
ipc = ["ipc-channel"]
[dependencies]
app_units = "0.3.0"
app_units = "0.4"
byteorder = "1.0"
euclid = "0.10.3"
euclid = "0.11"
gleam = "0.2"
heapsize = "0.3.6"
offscreen_gl_context = {version = "0.5.0", features = ["serde_serialization"]}
serde = "0.8"
serde_derive = {version = "0.8", optional = true}
ipc-channel = { version = "0.5.0", optional = true }
ipc-channel = {version = "0.7", optional = true}
offscreen_gl_context = {version = "0.6", features = ["serde"]}
serde = "0.9"
serde_derive = {version = "0.9", optional = true}
[target.'cfg(target_os = "macos")'.dependencies]
core-graphics = "0.6"
core-graphics = "0.7"
[target.'cfg(target_os = "windows")'.dependencies]
dwrote = "0.1.1"
servo-dwrote = "0.2"
[build-dependencies.serde_codegen]
version = "0.8"
version = "0.9"
default_features = false
optional = true

Просмотреть файл

@ -54,20 +54,19 @@ impl RenderApi {
RenderApiSender::new(self.api_sender.clone(), self.payload_sender.clone())
}
pub fn add_raw_font(&self, bytes: Vec<u8>) -> FontKey {
pub fn generate_font_key(&self) -> FontKey {
let new_id = self.next_unique_id();
let key = FontKey::new(new_id.0, new_id.1);
let msg = ApiMsg::AddRawFont(key, bytes);
self.api_sender.send(msg).unwrap();
key
FontKey::new(new_id.0, new_id.1)
}
pub fn add_native_font(&self, native_font_handle: NativeFontHandle) -> FontKey {
let new_id = self.next_unique_id();
let key = FontKey::new(new_id.0, new_id.1);
pub fn add_raw_font(&self, key: FontKey, bytes: Vec<u8>) {
let msg = ApiMsg::AddRawFont(key, bytes);
self.api_sender.send(msg).unwrap();
}
pub fn add_native_font(&self, key: FontKey, native_font_handle: NativeFontHandle) {
let msg = ApiMsg::AddNativeFont(key, native_font_handle);
self.api_sender.send(msg).unwrap();
key
}
/// Gets the dimensions for the supplied glyph keys
@ -84,19 +83,18 @@ impl RenderApi {
}
/// Creates an `ImageKey`.
pub fn alloc_image(&self) -> ImageKey {
pub fn generate_image_key(&self) -> ImageKey {
let new_id = self.next_unique_id();
ImageKey::new(new_id.0, new_id.1)
}
/// Adds an image and returns the corresponding `ImageKey`.
/// Adds an image identified by the `ImageKey`.
pub fn add_image(&self,
key: ImageKey,
descriptor: ImageDescriptor,
data: ImageData) -> ImageKey {
let key = self.alloc_image();
data: ImageData) {
let msg = ApiMsg::AddImage(key, descriptor, data);
self.api_sender.send(msg).unwrap();
key
}
/// Updates a specific image.

Просмотреть файл

@ -67,26 +67,26 @@ pub fn msg_channel<T>() -> Result<(MsgSender<T>, MsgReceiver<T>), Error> {
///
impl<T> Serialize for MsgReceiver<T> {
fn serialize<S: Serializer>(&self, _: &mut S) -> Result<(), S::Error> {
fn serialize<S: Serializer>(&self, _: S) -> Result<S::Ok, S::Error> {
unreachable!();
}
}
impl<T> Serialize for MsgSender<T> {
fn serialize<S: Serializer>(&self, _: &mut S) -> Result<(), S::Error> {
fn serialize<S: Serializer>(&self, _: S) -> Result<S::Ok, S::Error> {
unreachable!();
}
}
impl<T> Deserialize for MsgReceiver<T> {
fn deserialize<D>(_: &mut D) -> Result<MsgReceiver<T>, D::Error>
fn deserialize<D>(_: D) -> Result<MsgReceiver<T>, D::Error>
where D: Deserializer {
unreachable!();
}
}
impl<T> Deserialize for MsgSender<T> {
fn deserialize<D>(_: &mut D) -> Result<MsgSender<T>, D::Error>
fn deserialize<D>(_: D) -> Result<MsgSender<T>, D::Error>
where D: Deserializer {
unreachable!();
}

Просмотреть файл

@ -3,33 +3,11 @@
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use display_list::AuxiliaryListsBuilder;
use {BorderRadius, BorderDisplayItem, ClipRegion, ColorF, ComplexClipRegion};
use {BorderRadius, ClipRegion, ColorF, ComplexClipRegion};
use {FontKey, ImageKey, PipelineId, ScrollLayerId, ScrollLayerInfo, ServoScrollRootId};
use {ImageMask, ItemRange};
use {LayoutSize, LayoutPoint, LayoutRect};
impl BorderDisplayItem {
pub fn top_left_inner_radius(&self) -> LayoutSize {
LayoutSize::new((self.radius.top_left.width - self.left.width).max(0.0),
(self.radius.top_left.height - self.top.width).max(0.0))
}
pub fn top_right_inner_radius(&self) -> LayoutSize {
LayoutSize::new((self.radius.top_right.width - self.right.width).max(0.0),
(self.radius.top_right.height - self.top.width).max(0.0))
}
pub fn bottom_left_inner_radius(&self) -> LayoutSize {
LayoutSize::new((self.radius.bottom_left.width - self.left.width).max(0.0),
(self.radius.bottom_left.height - self.bottom.width).max(0.0))
}
pub fn bottom_right_inner_radius(&self) -> LayoutSize {
LayoutSize::new((self.radius.bottom_right.width - self.right.width).max(0.0),
(self.radius.bottom_right.height - self.bottom.width).max(0.0))
}
}
impl BorderRadius {
pub fn zero() -> BorderRadius {
BorderRadius {
@ -49,7 +27,23 @@ impl BorderRadius {
}
}
pub fn is_uniform(&self) -> Option<LayoutSize> {
pub fn uniform_size(radius: LayoutSize) -> BorderRadius {
BorderRadius {
top_left: radius,
top_right: radius,
bottom_left: radius,
bottom_right: radius,
}
}
pub fn is_uniform(&self) -> Option<f32> {
match self.is_uniform_size() {
Some(radius) if radius.width == radius.height => Some(radius.width),
_ => None
}
}
pub fn is_uniform_size(&self) -> Option<LayoutSize> {
let uniform_radius = self.top_left;
if self.top_right == uniform_radius &&
self.bottom_left == uniform_radius &&
@ -62,7 +56,7 @@ impl BorderRadius {
pub fn is_zero(&self) -> bool {
if let Some(radius) = self.is_uniform() {
radius.width == 0.0 && radius.height == 0.0
radius == 0.0
} else {
false
}

Просмотреть файл

@ -5,8 +5,8 @@
use app_units::Au;
use std::mem;
use std::slice;
use {AuxiliaryLists, AuxiliaryListsDescriptor, BorderDisplayItem, BorderRadius};
use {BorderSide, BoxShadowClipMode, BoxShadowDisplayItem, BuiltDisplayList};
use {AuxiliaryLists, AuxiliaryListsDescriptor, BorderDisplayItem};
use {BoxShadowClipMode, BoxShadowDisplayItem, BuiltDisplayList};
use {BuiltDisplayListDescriptor, ClipRegion, ComplexClipRegion, ColorF};
use {DisplayItem, DisplayListMode, ExtendMode, FilterOp, YuvColorSpace};
use {FontKey, GlyphInstance, GradientDisplayItem, RadialGradientDisplayItem, GradientStop, IframeDisplayItem};
@ -15,7 +15,7 @@ use {PushScrollLayerItem, PushStackingContextDisplayItem, RectangleDisplayItem,
use {ScrollPolicy, ServoScrollRootId, SpecificDisplayItem, StackingContext, TextDisplayItem};
use {WebGLContextId, WebGLDisplayItem, YuvImageDisplayItem};
use {LayoutTransform, LayoutPoint, LayoutRect, LayoutSize};
use {GlyphOptions, PropertyBinding};
use {BorderDetails, BorderWidths, GlyphOptions, PropertyBinding};
impl BuiltDisplayListDescriptor {
pub fn size(&self) -> usize {
@ -186,17 +186,11 @@ impl DisplayListBuilder {
pub fn push_border(&mut self,
rect: LayoutRect,
clip: ClipRegion,
left: BorderSide,
top: BorderSide,
right: BorderSide,
bottom: BorderSide,
radius: BorderRadius) {
widths: BorderWidths,
details: BorderDetails) {
let item = BorderDisplayItem {
left: left,
top: top,
right: right,
bottom: bottom,
radius: radius,
details: details,
widths: widths,
};
let display_item = DisplayItem {

Просмотреть файл

@ -6,7 +6,7 @@
// for the serde implementations.
use app_units::Au;
use euclid::Point2D;
use euclid::{Point2D, SideOffsets2D};
use channel::{PayloadSender, MsgSender};
#[cfg(feature = "nightly")]
use core::nonzero::NonZero;
@ -175,7 +175,7 @@ pub struct AuxiliaryListsDescriptor {
}
#[derive(Clone, Copy, Debug, Deserialize, PartialEq, Serialize)]
pub struct BorderDisplayItem {
pub struct NormalBorder {
pub left: BorderSide,
pub right: BorderSide,
pub top: BorderSide,
@ -183,6 +183,42 @@ pub struct BorderDisplayItem {
pub radius: BorderRadius,
}
#[derive(Debug, Copy, Clone, PartialEq, Serialize, Deserialize)]
pub enum RepeatMode {
Stretch,
Repeat,
Round,
Space,
}
#[derive(Clone, Copy, Debug, Deserialize, PartialEq, Serialize)]
pub struct NinePatchDescriptor {
pub width: u32,
pub height: u32,
pub slice: SideOffsets2D<u32>,
}
#[derive(Clone, Copy, Debug, Deserialize, PartialEq, Serialize)]
pub struct ImageBorder {
pub image_key: ImageKey,
pub patch: NinePatchDescriptor,
pub outset: SideOffsets2D<f32>,
pub repeat_horizontal: RepeatMode,
pub repeat_vertical: RepeatMode,
}
#[derive(Clone, Copy, Debug, Deserialize, PartialEq, Serialize)]
pub enum BorderDetails {
Normal(NormalBorder),
Image(ImageBorder),
}
#[derive(Clone, Copy, Debug, Deserialize, PartialEq, Serialize)]
pub struct BorderDisplayItem {
pub widths: BorderWidths,
pub details: BorderDetails,
}
#[derive(Clone, Copy, Debug, Deserialize, PartialEq, Serialize)]
pub struct BorderRadius {
pub top_left: LayoutSize,
@ -191,10 +227,17 @@ pub struct BorderRadius {
pub bottom_right: LayoutSize,
}
#[derive(Clone, Copy, Debug, Deserialize, PartialEq, Serialize)]
pub struct BorderWidths {
pub left: f32,
pub top: f32,
pub right: f32,
pub bottom: f32,
}
#[repr(C)]
#[derive(Clone, Copy, Debug, Deserialize, PartialEq, Serialize)]
pub struct BorderSide {
pub width: f32,
pub color: ColorF,
pub style: BorderStyle,
}
@ -605,9 +648,44 @@ pub enum YuvColorSpace {
#[derive(Copy, Clone, Debug, Eq, PartialEq, Hash, Serialize, Deserialize)]
pub struct ExternalImageId(pub u64);
pub trait BlobImageRenderer: Send {
fn request_blob_image(&mut self,
key: ImageKey,
data: Arc<BlobImageData>,
descriptor: &BlobImageDescriptor);
fn resolve_blob_image(&mut self, key: ImageKey) -> BlobImageResult;
}
pub type BlobImageData = Vec<u8>;
#[derive(Copy, Clone, Debug)]
pub struct BlobImageDescriptor {
pub width: u32,
pub height: u32,
pub format: ImageFormat,
pub scale_factor: f32,
}
pub struct RasterizedBlobImage {
pub width: u32,
pub height: u32,
pub data: Vec<u8>,
}
#[derive(Clone, Debug)]
pub enum BlobImageError {
Oom,
InvalidKey,
InvalidData,
Other(String),
}
pub type BlobImageResult = Result<RasterizedBlobImage, BlobImageError>;
#[derive(Clone, Serialize, Deserialize)]
pub enum ImageData {
Raw(Arc<Vec<u8>>),
Blob(Arc<BlobImageData>),
ExternalHandle(ExternalImageId),
ExternalBuffer(ExternalImageId),
}
@ -620,6 +698,14 @@ impl ImageData {
pub fn new_shared(bytes: Arc<Vec<u8>>) -> ImageData {
ImageData::Raw(bytes)
}
pub fn new_blob_image(commands: Vec<u8>) -> ImageData {
ImageData::Blob(Arc::new(commands))
}
pub fn new_shared_blob_image(commands: Arc<Vec<u8>>) -> ImageData {
ImageData::Blob(commands)
}
}
#[repr(C)]
@ -987,12 +1073,12 @@ macro_rules! define_resource_id {
define_resource_id_struct!($name);
impl ::serde::Deserialize for $name {
fn deserialize<D>(deserializer: &mut D) -> Result<Self, D::Error>
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where D: ::serde::Deserializer
{
let id = try!(u32::deserialize(deserializer));
if id == 0 {
Err(::serde::Error::invalid_value("expected a non-zero value"))
Err(::serde::de::Error::custom("expected a non-zero value"))
} else {
Ok(unsafe { $name::new(id) })
}
@ -1000,7 +1086,7 @@ macro_rules! define_resource_id {
}
impl ::serde::Serialize for $name {
fn serialize<S>(&self, serializer: &mut S) -> Result<(), S::Error>
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where S: ::serde::Serializer
{
self.get().serialize(serializer)

Просмотреть файл

@ -4717,10 +4717,10 @@ PresShell::RenderDocument(const nsRect& aRect, uint32_t aFlags,
if (view && view->GetWidget() &&
nsLayoutUtils::GetDisplayRootFrame(rootFrame) == rootFrame) {
LayerManager* layerManager = view->GetWidget()->GetLayerManager();
// ClientLayerManagers in content processes don't support
// taking snapshots.
// ClientLayerManagers or WebRenderLayerManagers in content processes
// don't support taking snapshots.
if (layerManager &&
(!layerManager->AsClientLayerManager() ||
(!layerManager->AsKnowsCompositor() ||
XRE_IsParentProcess())) {
flags |= PaintFrameFlags::PAINT_WIDGET_LAYERS;
}

Просмотреть файл

@ -1391,6 +1391,56 @@ nsCSSRendering::EndFrameTreesLocked()
}
}
bool
nsCSSRendering::HasBoxShadowNativeTheme(nsIFrame* aFrame,
bool& aMaybeHasBorderRadius)
{
const nsStyleDisplay* styleDisplay = aFrame->StyleDisplay();
nsITheme::Transparency transparency;
if (aFrame->IsThemed(styleDisplay, &transparency)) {
aMaybeHasBorderRadius = false;
// For opaque (rectangular) theme widgets we can take the generic
// border-box path with border-radius disabled.
return transparency != nsITheme::eOpaque;
}
aMaybeHasBorderRadius = true;
return false;
}
gfx::Color
nsCSSRendering::GetShadowColor(nsCSSShadowItem* aShadow,
nsIFrame* aFrame,
float aOpacity)
{
// Get the shadow color; if not specified, use the foreground color
nscolor shadowColor;
if (aShadow->mHasColor)
shadowColor = aShadow->mColor;
else
shadowColor = aFrame->StyleColor()->mColor;
Color color = Color::FromABGR(shadowColor);
color.a *= aOpacity;
return color;
}
nsRect
nsCSSRendering::GetShadowRect(const nsRect aFrameArea,
bool aNativeTheme,
nsIFrame* aForFrame)
{
nsRect frameRect = aNativeTheme ?
aForFrame->GetVisualOverflowRectRelativeToSelf() + aFrameArea.TopLeft() :
aFrameArea;
Sides skipSides = aForFrame->GetSkipSides();
frameRect = ::BoxDecorationRectForBorder(aForFrame, frameRect, skipSides);
// Explicitly do not need to account for the spread radius here
// Webrender does it for us or PaintBoxShadow will for non-WR
return frameRect;
}
void
nsCSSRendering::PaintBoxShadowOuter(nsPresContext* aPresContext,
nsRenderingContext& aRenderingContext,
@ -1405,25 +1455,11 @@ nsCSSRendering::PaintBoxShadowOuter(nsPresContext* aPresContext,
return;
bool hasBorderRadius;
bool nativeTheme; // mutually exclusive with hasBorderRadius
// mutually exclusive with hasBorderRadius
bool nativeTheme = HasBoxShadowNativeTheme(aForFrame, hasBorderRadius);
const nsStyleDisplay* styleDisplay = aForFrame->StyleDisplay();
nsITheme::Transparency transparency;
if (aForFrame->IsThemed(styleDisplay, &transparency)) {
// We don't respect border-radius for native-themed widgets
hasBorderRadius = false;
// For opaque (rectangular) theme widgets we can take the generic
// border-box path with border-radius disabled.
nativeTheme = transparency != nsITheme::eOpaque;
} else {
nativeTheme = false;
hasBorderRadius = true; // we'll update this below
}
nsRect frameRect = nativeTheme ?
aForFrame->GetVisualOverflowRectRelativeToSelf() + aFrameArea.TopLeft() :
aFrameArea;
Sides skipSides = aForFrame->GetSkipSides();
frameRect = ::BoxDecorationRectForBorder(aForFrame, frameRect, skipSides);
nsRect frameRect = GetShadowRect(aFrameArea, nativeTheme, aForFrame);
// Get any border radius, since box-shadow must also have rounded corners if
// the frame does.
@ -1487,15 +1523,7 @@ nsCSSRendering::PaintBoxShadowOuter(nsPresContext* aPresContext,
shadowGfxRectPlusBlur.RoundOut();
MaybeSnapToDevicePixels(shadowGfxRectPlusBlur, aDrawTarget, true);
// Set the shadow color; if not specified, use the foreground color
nscolor shadowColor;
if (shadowItem->mHasColor)
shadowColor = shadowItem->mColor;
else
shadowColor = aForFrame->StyleColor()->mColor;
Color gfxShadowColor(Color::FromABGR(shadowColor));
gfxShadowColor.a *= aOpacity;
Color gfxShadowColor = GetShadowColor(shadowItem, aForFrame, aOpacity);
if (nativeTheme) {
nsContextBoxBlur blurringArea;
@ -1569,6 +1597,7 @@ nsCSSRendering::PaintBoxShadowOuter(nsPresContext* aPresContext,
// Clip the shadow so that we only get the part that applies to aForFrame.
nsRect fragmentClip = shadowRectPlusBlur;
Sides skipSides = aForFrame->GetSkipSides();
if (!skipSides.IsEmpty()) {
if (skipSides.Left()) {
nscoord xmost = fragmentClip.XMost();
@ -1762,11 +1791,7 @@ nsCSSRendering::PaintBoxShadowInner(nsPresContext* aPresContext,
Rect shadowGfxRect = NSRectToRect(paddingRect, twipsPerPixel);
shadowGfxRect.Round();
// Set the shadow color; if not specified, use the foreground color
Color shadowColor = Color::FromABGR(shadowItem->mHasColor ?
shadowItem->mColor :
aForFrame->StyleColor()->mColor);
Color shadowColor = GetShadowColor(shadowItem, aForFrame, 1.0);
renderContext->Save();
// This clips the outside border radius.

Просмотреть файл

@ -354,6 +354,7 @@ struct nsBackgroundLayerState {
};
struct nsCSSRendering {
typedef mozilla::gfx::Color Color;
typedef mozilla::gfx::CompositionOp CompositionOp;
typedef mozilla::gfx::DrawTarget DrawTarget;
typedef mozilla::gfx::Float Float;
@ -379,6 +380,17 @@ struct nsCSSRendering {
nsIFrame* aForFrame,
const nsRect& aFrameArea);
static nsRect GetShadowRect(const nsRect aFrameArea,
bool aNativeTheme,
nsIFrame* aForFrame);
static mozilla::gfx::Color GetShadowColor(nsCSSShadowItem* aShadow,
nsIFrame* aFrame,
float aOpacity);
// Returns if the frame has a themed frame.
// aMaybeHasBorderRadius will return false if we can early detect
// that we don't have a border radius.
static bool HasBoxShadowNativeTheme(nsIFrame* aFrame,
bool& aMaybeHasBorderRadius);
static void PaintBoxShadowOuter(nsPresContext* aPresContext,
nsRenderingContext& aRenderingContext,
nsIFrame* aForFrame,

Просмотреть файл

@ -4838,45 +4838,54 @@ nsDisplayBoxShadowOuter::CreateWebRenderCommands(nsTArray<WebRenderCommand>& aCo
if (!shadows)
return;
bool hasBorderRadius;
bool nativeTheme = nsCSSRendering::HasBoxShadowNativeTheme(mFrame,
hasBorderRadius);
// Everything here is in app units, change to device units.
for (uint32_t i = 0; i < rects.Length(); ++i) {
Rect clipRect = NSRectToRect(rects[i], appUnitsPerDevPixel);
Rect gfxBorderRect = NSRectToRect(borderRect, appUnitsPerDevPixel);
nsCSSShadowArray* shadows = mFrame->StyleEffects()->mBoxShadow;
Rect deviceClipRect = aLayer->RelativeToParent(clipRect);
Rect deviceBoxRect = aLayer->RelativeToParent(gfxBorderRect);
for (uint32_t j = shadows->Length(); j > 0; j--) {
nsCSSShadowItem* shadow = shadows->ShadowAt(j - 1);
// Don't need the full size of the shadow rect like we do in
// nsCSSRendering since WR takes care of calculations for blur
// and spread radius.
nsRect shadowRect = nsCSSRendering::GetShadowRect(borderRect,
nativeTheme,
mFrame);
gfx::Color shadowColor = nsCSSRendering::GetShadowColor(shadow,
mFrame,
mOpacity);
shadowRect.MoveBy(shadow->mXOffset, shadow->mYOffset);
for (uint32_t j = shadows->Length(); j > 0; --j) {
nsCSSShadowItem* shadowItem = shadows->ShadowAt(j - 1);
nscoord blurRadius = shadowItem->mRadius;
float gfxBlurRadius = blurRadius / appUnitsPerDevPixel;
// Now translate everything to device pixels.
Point shadowOffset;
shadowOffset.x = (shadow->mXOffset / appUnitsPerDevPixel);
shadowOffset.y = (shadow->mYOffset / appUnitsPerDevPixel);
// TODO: Have to refactor our nsCSSRendering
// to get the acual rects correct.
nscolor shadowColor;
if (shadowItem->mHasColor)
shadowColor = shadowItem->mColor;
else
shadowColor = mFrame->StyleColor()->mColor;
Rect deviceBoxRect = NSRectToRect(shadowRect, appUnitsPerDevPixel);
deviceBoxRect = aLayer->RelativeToParent(deviceBoxRect);
Color gfxShadowColor(Color::FromABGR(shadowColor));
gfxShadowColor.a *= mOpacity;
Rect deviceClipRect = aLayer->RelativeToParent(clipRect + shadowOffset);
WrPoint offset;
offset.x = shadowItem->mXOffset;
offset.y = shadowItem->mYOffset;
float blurRadius = shadow->mRadius / appUnitsPerDevPixel;
// TODO: Calculate the border radius here.
float borderRadius = 0.0;
float spreadRadius = shadow->mSpread / appUnitsPerDevPixel;
aCommands.AppendElement(OpDPPushBoxShadow(
wr::ToWrRect(deviceBoxRect),
wr::ToWrRect(deviceClipRect),
wr::ToWrRect(deviceBoxRect),
offset,
wr::ToWrColor(gfxShadowColor),
gfxBlurRadius,
0,
0,
wr::ToWrPoint(shadowOffset),
wr::ToWrColor(shadowColor),
blurRadius,
spreadRadius,
borderRadius,
WrBoxShadowClipMode::Outset
));
));
}
}
}

Просмотреть файл

@ -326,9 +326,6 @@
'-Wimplicit-fallthrough',
'-Wthread-safety',
],
'cflags_mozilla': [
'-Wthread-safety',
],
}],
],
}],

Просмотреть файл

@ -1157,4 +1157,4 @@ static const TransportSecurityPreload kPublicKeyPinningPreloadList[] = {
static const int32_t kUnknownId = -1;
static const PRTime kPreloadPKPinsExpirationTime = INT64_C(1496246555187000);
static const PRTime kPreloadPKPinsExpirationTime = INT64_C(1496331215141000);

Просмотреть файл

@ -6,6 +6,7 @@
0x1337.eu: could not connect to host
0x44.net: did not receive HSTS header
0xa.in: could not connect to host
0xacab.org: [Exception... "Component returned failure code: 0x80004005 (NS_ERROR_FAILURE) [nsISiteSecurityService.processHeader]" nsresult: "0x80004005 (NS_ERROR_FAILURE)" location: "JS frame :: /builds/slave/m-cen-l64-periodicupdate-00000/getHSTSPreloadList.js :: processStsHeader :: line 121" data: no]
0xb612.org: could not connect to host
0xf00.ch: did not receive HSTS header
100dayloans.com: max-age too low: 0
@ -44,7 +45,6 @@
3sreporting.com: did not receive HSTS header
3yearloans.com: max-age too low: 0
404.sh: max-age too low: 0
404404.info: could not connect to host
41844.de: could not connect to host
420dongstorm.com: could not connect to host
42ms.org: could not connect to host
@ -53,6 +53,7 @@
47ronin.com: did not receive HSTS header
4cclothing.com: could not connect to host
4elements.com: did not receive HSTS header
4eyes.ch: did not receive HSTS header
4mm.org: did not receive HSTS header
4sqsu.eu: could not connect to host
50millionablaze.org: did not receive HSTS header
@ -151,6 +152,7 @@ airbnb.com: did not receive HSTS header
aircomms.com: did not receive HSTS header
airproto.com: did not receive HSTS header
aishnair.com: could not connect to host
aisle3.space: could not connect to host
aiticon.de: did not receive HSTS header
aiw-thkoeln.online: could not connect to host
ajmahal.com: could not connect to host
@ -309,6 +311,9 @@ askfit.cz: did not receive HSTS header
asm-x.com: could not connect to host
asmui.ga: could not connect to host
asmui.ml: could not connect to host
asr.li: could not connect to host
asr.rocks: could not connect to host
asr.solar: could not connect to host
asrob.eu: did not receive HSTS header
ass.org.au: did not receive HSTS header
assdecoeur.org: could not connect to host
@ -327,7 +332,6 @@ athaliasoft.com: did not receive HSTS header
athenelive.com: could not connect to host
athul.xyz: did not receive HSTS header
atlex.nl: did not receive HSTS header
atlseccon.com: did not receive HSTS header
atomik.pro: could not connect to host
atop.io: could not connect to host
attimidesigns.com: did not receive HSTS header
@ -448,12 +452,11 @@ besixdouze.world: could not connect to host
bestbeards.ca: could not connect to host
bestcellular.com: did not receive HSTS header
betafive.net: could not connect to host
betaworx.de: did not receive HSTS header
betaworx.eu: did not receive HSTS header
betcafearena.ro: did not receive HSTS header
betnet.fr: could not connect to host
betplanning.it: did not receive HSTS header
bets.de: did not receive HSTS header
bettercrypto.org: could not connect to host
bettween.com: could not connect to host
betz.ro: did not receive HSTS header
bevapehappy.com: did not receive HSTS header
@ -541,14 +544,13 @@ bodyweightsolution.com: could not connect to host
boensou.com: did not receive HSTS header
bogosity.se: could not connect to host
bohan.life: could not connect to host
bombsquad.studio: could not connect to host
bonapp.restaurant: could not connect to host
bonfi.net: did not receive HSTS header
bonigo.de: [Exception... "Component returned failure code: 0x80004005 (NS_ERROR_FAILURE) [nsISiteSecurityService.processHeader]" nsresult: "0x80004005 (NS_ERROR_FAILURE)" location: "JS frame :: /builds/slave/m-cen-l64-periodicupdate-00000/getHSTSPreloadList.js :: processStsHeader :: line 121" data: no]
bonitabrazilian.co.nz: did not receive HSTS header
bookcelerator.com: did not receive HSTS header
booked.holiday: could not connect to host
bookourdjs.com: could not connect to host
bookourdjs.com: [Exception... "Component returned failure code: 0x80004005 (NS_ERROR_FAILURE) [nsISiteSecurityService.processHeader]" nsresult: "0x80004005 (NS_ERROR_FAILURE)" location: "JS frame :: /builds/slave/m-cen-l64-periodicupdate-00000/getHSTSPreloadList.js :: processStsHeader :: line 121" data: no]
boomerang.com: could not connect to host
boosterlearnpro.com: did not receive HSTS header
bootjp.me: did not receive HSTS header
@ -597,11 +599,12 @@ buhler.pro: did not receive HSTS header
buildci.asia: could not connect to host
buildsaver.co.za: did not receive HSTS header
built.by: did not receive HSTS header
builtritetrailerplans.com: could not connect to host
builtritetrailerplans.com: did not receive HSTS header
bulletpoint.cz: did not receive HSTS header
bulmafox.com: could not connect to host
bumarkamoda.com: could not connect to host
bunaken.asia: could not connect to host
bunbomenu.de: could not connect to host
burian-server.cz: could not connect to host
burrow.ovh: could not connect to host
burtrum.me: could not connect to host
@ -628,6 +631,7 @@ bysymphony.com: max-age too low: 0
byte.wtf: did not receive HSTS header
bytepark.de: did not receive HSTS header
bytesund.biz: could not connect to host
c0rn3j.com: could not connect to host
c1yd3i.me: could not connect to host
c3b.info: could not connect to host
cabarave.com: could not connect to host
@ -662,6 +666,7 @@ capturethepen.co.uk: could not connect to host
car-navi.ph: did not receive HSTS header
carano-service.de: did not receive HSTS header
caraudio69.cz: could not connect to host
carck.co.uk: did not receive HSTS header
cardloan-manual.net: could not connect to host
cardoni.net: did not receive HSTS header
cardstream.com: did not receive HSTS header
@ -734,7 +739,6 @@ cheesetart.my: could not connect to host
cheetah85.de: could not connect to host
chejianer.cn: did not receive HSTS header
chensir.net: could not connect to host
chepaofen.com: could not connect to host
cherysunzhang.com: max-age too low: 7776000
chihiro.xyz: could not connect to host
chijiokeindustries.co.uk: could not connect to host
@ -778,7 +782,6 @@ clara-baumert.de: could not connect to host
classicsandexotics.com: did not receive HSTS header
classicspublishing.com: could not connect to host
clcleaningco.com: could not connect to host
cldly.com: could not connect to host
cleanexperts.co.uk: could not connect to host
cleaningsquad.ca: could not connect to host
cleanmta.com: could not connect to host
@ -789,7 +792,7 @@ clickandgo.com: did not receive HSTS header
clickandshoot.nl: did not receive HSTS header
clickgram.biz: could not connect to host
clientsecure.me: could not connect to host
clint.id.au: max-age too low: 0
clint.id.au: could not connect to host
clintonbloodworth.com: could not connect to host
clintonbloodworth.io: could not connect to host
clintwilson.technology: max-age too low: 2592000
@ -801,7 +804,6 @@ cloudcy.net: could not connect to host
clouddesktop.co.nz: could not connect to host
cloudey.net: did not receive HSTS header
cloudflare.com: did not receive HSTS header
cloudily.com: could not connect to host
cloudimag.es: could not connect to host
cloudlink.club: could not connect to host
cloudns.com.au: could not connect to host
@ -824,6 +826,7 @@ cmsbattle.com: could not connect to host
cmscafe.ru: did not receive HSTS header
cn.search.yahoo.com: did not receive HSTS header
cni-certing.it: max-age too low: 0
cnwage.com: could not connect to host
co50.com: did not receive HSTS header
cocaine-import.agency: could not connect to host
cocktailfuture.fr: could not connect to host
@ -837,7 +840,6 @@ codeforce.io: could not connect to host
codelayer.ca: could not connect to host
codemonkeyrawks.net: did not receive HSTS header
codepoet.de: could not connect to host
codepult.com: could not connect to host
codepx.com: did not receive HSTS header
codewiththepros.org: could not connect to host
codiva.io: max-age too low: 2592000
@ -889,7 +891,6 @@ cordial-restaurant.com: did not receive HSTS header
core.mx: could not connect to host
core4system.de: could not connect to host
corenetworking.de: could not connect to host
corgicloud.com: did not receive HSTS header
cormactagging.ie: could not connect to host
cormilu.com.br: did not receive HSTS header
correctpaardbatterijnietje.nl: did not receive HSTS header
@ -931,14 +932,13 @@ cruzr.xyz: could not connect to host
crypt.guru: could not connect to host
crypticshell.co.uk: could not connect to host
cryptify.eu: could not connect to host
cryptobin.co: could not connect to host
cryptobin.org: could not connect to host
cryptojar.io: did not receive HSTS header
cryptoki.fr: max-age too low: 7776000
cryptolab.pro: could not connect to host
cryptolab.tk: could not connect to host
cryptopartyatx.org: could not connect to host
cryptopush.com: did not receive HSTS header
cryptopush.com: could not connect to host
crysadm.com: max-age too low: 1
crystalclassics.co.uk: did not receive HSTS header
csapak.com: max-age too low: 0
@ -1048,6 +1048,7 @@ deco.me: could not connect to host
dedicatutiempo.es: could not connect to host
deepcovelabs.net: could not connect to host
deepearth.uk: did not receive HSTS header
deetz.nl: could not connect to host
deetzen.de: did not receive HSTS header
defiler.tk: could not connect to host
degroetenvanrosaline.nl: did not receive HSTS header
@ -1090,7 +1091,6 @@ dewin.io: could not connect to host
dhpcs.com: did not receive HSTS header
dhpiggott.net: did not receive HSTS header
diablotine.rocks: could not connect to host
diamante.ro: could not connect to host
diarbag.us: did not receive HSTS header
diasp.cz: [Exception... "Component returned failure code: 0x80004005 (NS_ERROR_FAILURE) [nsISiteSecurityService.processHeader]" nsresult: "0x80004005 (NS_ERROR_FAILURE)" location: "JS frame :: /builds/slave/m-cen-l64-periodicupdate-00000/getHSTSPreloadList.js :: processStsHeader :: line 121" data: no]
dick.red: could not connect to host
@ -1115,6 +1115,7 @@ diva-ey.com: could not connect to host
dizihocasi.com: could not connect to host
dizorg.net: could not connect to host
dj4et.de: could not connect to host
djlnetworks.co.uk: did not receive HSTS header
djz4music.com: did not receive HSTS header
dl.google.com: did not receive HSTS header (error ignored - included regardless)
dlc.viasinc.com: could not connect to host
@ -1157,9 +1158,12 @@ download.jitsi.org: did not receive HSTS header
downsouthweddings.com.au: did not receive HSTS header
doyoucheck.com: did not receive HSTS header
dpratt.de: did not receive HSTS header
dpsg-roden.de: could not connect to host
dragonisles.net: could not connect to host
dragons-of-highlands.cz: could not connect to host
dragontrainingmobilezoo.com.au: max-age too low: 0
dragontrainingmobilezoo.com.au: could not connect to host
drakeanddragon.com: [Exception... "Component returned failure code: 0x80004005 (NS_ERROR_FAILURE) [nsISiteSecurityService.processHeader]" nsresult: "0x80004005 (NS_ERROR_FAILURE)" location: "JS frame :: /builds/slave/m-cen-l64-periodicupdate-00000/getHSTSPreloadList.js :: processStsHeader :: line 121" data: no]
drakefortreasurer.sexy: could not connect to host
draw.uy: could not connect to host
drdevil.ru: could not connect to host
drdim.ru: could not connect to host
@ -1181,16 +1185,15 @@ duesee.org: could not connect to host
dullsir.com: did not receive HSTS header
duria.de: max-age too low: 3600
dustri.org: did not receive HSTS header
dutchessuganda.com: did not receive HSTS header
dutchrank.com: could not connect to host
dwhd.org: [Exception... "Component returned failure code: 0x80004005 (NS_ERROR_FAILURE) [nsISiteSecurityService.processHeader]" nsresult: "0x80004005 (NS_ERROR_FAILURE)" location: "JS frame :: /builds/slave/m-cen-l64-periodicupdate-00000/getHSTSPreloadList.js :: processStsHeader :: line 121" data: no]
dworzak.ch: could not connect to host
dycontrol.de: could not connect to host
dylanscott.com.au: did not receive HSTS header
dymersion.com: did not receive HSTS header
dynamic-innovations.net: could not connect to host
dzimejl.sk: did not receive HSTS header
dzlibs.io: could not connect to host
dzndk.net: could not connect to host
dzndk.org: could not connect to host
e-aut.net: did not receive HSTS header
e-deca2.org: did not receive HSTS header
@ -1198,7 +1201,7 @@ e-sa.com: did not receive HSTS header
e3amn2l.com: could not connect to host
earlybirdsnacks.com: could not connect to host
earthrise16.com: could not connect to host
easez.net: did not receive HSTS header
easez.net: could not connect to host
easychiller.org: could not connect to host
easyhaul.com: did not receive HSTS header
eatlowcarb.de: did not receive HSTS header
@ -1246,6 +1249,7 @@ elanguest.ru: did not receive HSTS header
elbetech.net: could not connect to host
electricianforum.co.uk: did not receive HSTS header
electromc.com: could not connect to host
elektronring.com: could not connect to host
elemenx.com: did not receive HSTS header
elemprendedor.com.ve: could not connect to host
elenag.ga: could not connect to host
@ -1278,7 +1282,6 @@ encode.space: did not receive HSTS header
encoder.pw: could not connect to host
encontrebarato.com.br: did not receive HSTS header
encrypted.google.com: did not receive HSTS header (error ignored - included regardless)
encryptedaudience.com: could not connect to host
end.pp.ua: could not connect to host
endlessdark.net: max-age too low: 600
endlessdiy.ca: could not connect to host
@ -1312,7 +1315,6 @@ equilibre-yoga-jennifer-will.com: could not connect to host
erawanarifnugroho.com: did not receive HSTS header
eressea.xyz: could not connect to host
eridanus.uk: could not connect to host
erikhubers.nl: could not connect to host
ernaehrungsberatung-rapperswil.ch: [Exception... "Component returned failure code: 0x80004005 (NS_ERROR_FAILURE) [nsISiteSecurityService.processHeader]" nsresult: "0x80004005 (NS_ERROR_FAILURE)" location: "JS frame :: /builds/slave/m-cen-l64-periodicupdate-00000/getHSTSPreloadList.js :: processStsHeader :: line 121" data: no]
ernaehrungsberatung-zurich.ch: could not connect to host
ernesto.at: could not connect to host
@ -1361,7 +1363,6 @@ exfiles.cz: did not receive HSTS header
exgravitus.com: could not connect to host
exitus.jp: max-age too low: 0
exno.co: could not connect to host
expatads.com: could not connect to host
expertmile.com: did not receive HSTS header
expoundite.net: did not receive HSTS header
expressfinance.co.za: did not receive HSTS header
@ -1395,10 +1396,12 @@ fallenangelspirits.uk: could not connect to host
familie-sprink.de: could not connect to host
familie-zimmermann.at: could not connect to host
familjenm.se: could not connect to host
fantopia.club: could not connect to host
fanyl.cn: could not connect to host
farhadexchange.com: did not receive HSTS header
fashioncare.cz: did not receive HSTS header
fasset.jp: could not connect to host
fastconfirm.com: could not connect to host
fastograph.com: could not connect to host
fastopen.ml: could not connect to host
fatgeekflix.net: could not connect to host
@ -1425,7 +1428,6 @@ fexmen.com: could not connect to host
ffmradio.de: did not receive HSTS header
fhdhelp.de: could not connect to host
fhdhilft.de: could not connect to host
fierman.net: [Exception... "Component returned failure code: 0x80004005 (NS_ERROR_FAILURE) [nsISiteSecurityService.processHeader]" nsresult: "0x80004005 (NS_ERROR_FAILURE)" location: "JS frame :: /builds/slave/m-cen-l64-periodicupdate-00000/getHSTSPreloadList.js :: processStsHeader :: line 121" data: no]
fifieldtech.com: could not connect to host
fiftyshadesofluca.ml: could not connect to host
fig.co: did not receive HSTS header
@ -1509,6 +1511,7 @@ foxtrot.pw: could not connect to host
fr33d0m.link: could not connect to host
francevpn.xyz: could not connect to host
frangor.info: did not receive HSTS header
frankwei.xyz: could not connect to host
franta.biz: did not receive HSTS header
franta.email: did not receive HSTS header
franzt.de: could not connect to host
@ -1544,7 +1547,6 @@ fuckgfw233.org: could not connect to host
fukushima-web.com: did not receive HSTS header
fundacionhijosdelsol.org: could not connect to host
funkyweddingideas.com.au: could not connect to host
funnyang.com: could not connect to host
funrun.com: did not receive HSTS header
furiffic.com: did not receive HSTS header
furnation.com: could not connect to host
@ -1692,7 +1694,7 @@ gogold-g.com: could not connect to host
gold24.in: did not receive HSTS header
goldendata.io: could not connect to host
golocal-media.de: did not receive HSTS header
gonzalosanchez.mx: could not connect to host
gonzalosanchez.mx: did not receive HSTS header
goodenough.nz: did not receive HSTS header
goodwin43.ru: could not connect to host
google: could not connect to host (error ignored - included regardless)
@ -1708,7 +1710,6 @@ gov.ax: could not connect to host
govillemo.ca: did not receive HSTS header
gozel.com.tr: did not receive HSTS header
gparent.org: did not receive HSTS header
gpfclan.de: could not connect to host
gpsfix.cz: could not connect to host
gpstuner.com: did not receive HSTS header
gracesofgrief.com: max-age too low: 86400
@ -1749,13 +1750,11 @@ gtraxapp.com: could not connect to host
gts-schulsoftware.de: did not receive HSTS header
guava.studio: did not receive HSTS header
guilde-vindicta.fr: did not receive HSTS header
guildgearscore.cf: [Exception... "Component returned failure code: 0x80004005 (NS_ERROR_FAILURE) [nsISiteSecurityService.processHeader]" nsresult: "0x80004005 (NS_ERROR_FAILURE)" location: "JS frame :: /builds/slave/m-cen-l64-periodicupdate-00000/getHSTSPreloadList.js :: processStsHeader :: line 121" data: no]
gulenet.com: could not connect to host
gunnarhafdal.com: did not receive HSTS header
gurom.lv: could not connect to host
gurusupe.com: could not connect to host
guso.gq: could not connect to host
guso.ml: could not connect to host
guso.site: could not connect to host
guso.tech: could not connect to host
gussi.is: did not receive HSTS header
@ -1770,7 +1769,6 @@ gyboche.science: could not connect to host
gycis.me: could not connect to host
gypthecat.com: max-age too low: 604800
gyz.io: could not connect to host
gzitech.com: could not connect to host
h2check.org: could not connect to host
haarkliniek.com: did not receive HSTS header
habanaavenue.com: did not receive HSTS header
@ -1779,6 +1777,7 @@ hablemosdetecnologia.com.ve: could not connect to host
hack.cz: could not connect to host
hack.li: did not receive HSTS header
hacker.one: could not connect to host
hackerchai.com: could not connect to host
hackerforever.com: did not receive HSTS header
hackerone-ext-adroll.com: could not connect to host
hackest.org: did not receive HSTS header
@ -1806,11 +1805,9 @@ happyfabric.me: did not receive HSTS header
happygastro.com: could not connect to host
harabuhouse.com: did not receive HSTS header
harbor-light.net: could not connect to host
hardfalcon.net: could not connect to host
hardline.xyz: could not connect to host
haribosupermix.com: could not connect to host
harmonycosmetic.com: max-age too low: 300
harrisonsand.com: could not connect to host
harristony.com: could not connect to host
hartmancpa.com: did not receive HSTS header
harvestrenewal.org: did not receive HSTS header
@ -1896,6 +1893,7 @@ hosted-service.com: did not receive HSTS header
hostedtalkgadget.google.com: did not receive HSTS header (error ignored - included regardless)
hostgarou.com: did not receive HSTS header
hostinaus.com.au: could not connect to host
hostinghelp.guru: could not connect to host
hostisan.com: did not receive HSTS header
hotchillibox.com: max-age too low: 0
hotchoc.io: did not receive HSTS header
@ -1920,7 +1918,6 @@ humblefinances.com: could not connect to host
humeurs.net: could not connect to host
humpteedumptee.in: did not receive HSTS header
huntshomeinspections.com: [Exception... "Component returned failure code: 0x80004005 (NS_ERROR_FAILURE) [nsISiteSecurityService.processHeader]" nsresult: "0x80004005 (NS_ERROR_FAILURE)" location: "JS frame :: /builds/slave/m-cen-l64-periodicupdate-00000/getHSTSPreloadList.js :: processStsHeader :: line 121" data: no]
hupp.se: could not connect to host
hurricanelabs.com: did not receive HSTS header
huskybutt.dog: could not connect to host
hydra.ws: could not connect to host
@ -1970,7 +1967,6 @@ ihrnationalrat.ch: could not connect to host
ihsbsd.me: could not connect to host
ihsbsd.tk: could not connect to host
ihuanmeng.com: did not receive HSTS header
ikkatsu-satei.jp: did not receive HSTS header
ikujii.com: max-age too low: 0
ikwilguidobellen.nl: did not receive HSTS header
ilbuongiorno.it: did not receive HSTS header
@ -2025,14 +2021,12 @@ inleaked.com: could not connect to host
inmyarea.com: max-age too low: 0
innophate-security.nl: could not connect to host
ins1gn1a.com: did not receive HSTS header
insane-bullets.com: could not connect to host
insane.zone: could not connect to host
insite-feedback.com: did not receive HSTS header
inspire-av.com: did not receive HSTS header
inspiroinc.com: could not connect to host
instacart.com: did not receive HSTS header
instantdev.io: could not connect to host
instela.com: did not receive HSTS header
institutoflordelavida.com: could not connect to host
intel.li: could not connect to host
intelldynamics.com: could not connect to host
@ -2051,7 +2045,7 @@ invite24.pro: could not connect to host
inwesttitle.com: max-age too low: 0
ionx.co.uk: did not receive HSTS header
iop.intuit.com: max-age too low: 86400
iosmods.com: could not connect to host
iosmods.com: did not receive HSTS header
iostips.ru: could not connect to host
iotsms.io: could not connect to host
ip6.im: did not receive HSTS header
@ -2133,7 +2127,7 @@ jartza.org: could not connect to host
jasmineconseil.com: could not connect to host
jasonrobinson.me: [Exception... "Component returned failure code: 0x80004005 (NS_ERROR_FAILURE) [nsISiteSecurityService.processHeader]" nsresult: "0x80004005 (NS_ERROR_FAILURE)" location: "JS frame :: /builds/slave/m-cen-l64-periodicupdate-00000/getHSTSPreloadList.js :: processStsHeader :: line 121" data: no]
jasonroe.me: did not receive HSTS header
jasonsansone.com: could not connect to host
jasonsansone.com: [Exception... "Component returned failure code: 0x80004005 (NS_ERROR_FAILURE) [nsISiteSecurityService.processHeader]" nsresult: "0x80004005 (NS_ERROR_FAILURE)" location: "JS frame :: /builds/slave/m-cen-l64-periodicupdate-00000/getHSTSPreloadList.js :: processStsHeader :: line 121" data: no]
jastoria.pl: could not connect to host
jayblock.com: did not receive HSTS header
jayschulman.com: did not receive HSTS header
@ -2191,7 +2185,6 @@ jonn.me: could not connect to host
joostbovee.nl: did not receive HSTS header
jordanhamilton.me: could not connect to host
joretapo.fr: did not receive HSTS header
jornadasciberdefensa2016.es: did not receive HSTS header
josahrens.me: could not connect to host
joshi.su: could not connect to host
joshstroup.me: could not connect to host
@ -2213,7 +2206,6 @@ junaos.xyz: did not receive HSTS header
junge-selbsthilfe.info: could not connect to host
junqtion.com: could not connect to host
jupp0r.de: did not receive HSTS header
justanothercompany.name: could not connect to host
justlikethat.hosting: did not receive HSTS header
justnaw.co.uk: could not connect to host
justudin.com: did not receive HSTS header
@ -2230,6 +2222,7 @@ kaela.design: could not connect to host
kahopoon.net: could not connect to host
kaisers.de: did not receive HSTS header
kalami.nl: did not receive HSTS header
kaleidomarketing.com: could not connect to host
kamikano.com: could not connect to host
kaneo-gmbh.de: did not receive HSTS header
kaplatz.is: could not connect to host
@ -2332,14 +2325,12 @@ krayx.com: could not connect to host
kreavis.com: did not receive HSTS header
kredite.sale: could not connect to host
kriegt.es: could not connect to host
krmela.com: could not connect to host
kroetenfuchs.de: could not connect to host
kropkait.pl: could not connect to host
krouzkyliduska.cz: [Exception... "Component returned failure code: 0x80004005 (NS_ERROR_FAILURE) [nsISiteSecurityService.processHeader]" nsresult: "0x80004005 (NS_ERROR_FAILURE)" location: "JS frame :: /builds/slave/m-cen-l64-periodicupdate-00000/getHSTSPreloadList.js :: processStsHeader :: line 121" data: no]
krunut.com: did not receive HSTS header
krypteia.org: could not connect to host
kryptomech.com: could not connect to host
kschv-rdeck.de: did not receive HSTS header
ksfh-mail.de: could not connect to host
kstan.me: could not connect to host
kswriter.com: could not connect to host
@ -2451,7 +2442,6 @@ liaoshuma.com: could not connect to host
libertyrp.org: could not connect to host
library.linode.com: did not receive HSTS header
librechan.net: could not connect to host
libscode.com: could not connect to host
lidl-selection.at: could not connect to host
lidow.eu: could not connect to host
lifeguard.aecom.com: did not receive HSTS header
@ -2567,10 +2557,10 @@ maarten.nyc: did not receive HSTS header
maartenvandekamp.nl: did not receive HSTS header
mac-torrents.me: did not receive HSTS header
macchaberrycream.com: could not connect to host
macchedil.com: did not receive HSTS header
macdj.tk: could not connect to host
macgeneral.de: did not receive HSTS header
machon.biz: could not connect to host
macosxfilerecovery.com: did not receive HSTS header
madars.org: did not receive HSTS header
maddin.ga: could not connect to host
madebymagnitude.com: did not receive HSTS header
@ -2615,9 +2605,9 @@ mario.party: did not receive HSTS header
markaconnor.com: could not connect to host
markayapilandirma.com: could not connect to host
market.android.com: did not receive HSTS header (error ignored - included regardless)
markprof.ru: could not connect to host
markrego.com: could not connect to host
marktboten.de: did not receive HSTS header
marktissink.nl: did not receive HSTS header
markus-dev.com: did not receive HSTS header
markusweimar.de: did not receive HSTS header
marleyresort.com: did not receive HSTS header
@ -2658,7 +2648,6 @@ mccarty.io: could not connect to host
mccrackon.com: could not connect to host
mcdonalds.ru: did not receive HSTS header
mcga.media: did not receive HSTS header
mcjackk77.com: could not connect to host
mclab.su: could not connect to host
mdewendt.de: could not connect to host
mdfnet.se: did not receive HSTS header
@ -2786,7 +2775,6 @@ moebel-nagel.de: did not receive HSTS header
moelord.org: could not connect to host
moen.io: did not receive HSTS header
mogry.net: did not receive HSTS header
moho.kr: did not receive HSTS header
monarca.systems: could not connect to host
monasterialis.eu: could not connect to host
mondar.io: did not receive HSTS header
@ -2819,7 +2807,6 @@ moviesabout.net: could not connect to host
moy-gorod.od.ua: did not receive HSTS header
moy.cat: did not receive HSTS header
mp3juices.is: could not connect to host
mpintaamalabanna.it: could not connect to host
mqas.net: could not connect to host
mrdani.net: could not connect to host
mrettich.org: did not receive HSTS header
@ -2864,7 +2851,6 @@ mycollab.net: could not connect to host
mycoted.com: did not receive HSTS header
mydeos.com: could not connect to host
mydigipass.com: did not receive HSTS header
myg21.com: max-age too low: 0
mygate.at: could not connect to host
mygdut.com: did not receive HSTS header
mygov.scot: did not receive HSTS header
@ -2880,9 +2866,9 @@ myphonebox.de: could not connect to host
myraytech.net: did not receive HSTS header
mysecretrewards.com: did not receive HSTS header
mystery-science-theater-3000.de: did not receive HSTS header
mythslegendscollection.com: did not receive HSTS header
myvirtualserver.com: max-age too low: 2592000
myzone.com: did not receive HSTS header
mziulu.me: could not connect to host
n0psled.nl: could not connect to host
n2x.in: could not connect to host
n4l.pw: could not connect to host
@ -2893,7 +2879,7 @@ naiharngym.com: did not receive HSTS header
najedlo.sk: did not receive HSTS header
nakliyatsirketi.biz: did not receive HSTS header
nakuro.de: could not connect to host
nalifornia.com: did not receive HSTS header
nalifornia.com: could not connect to host
nallon.com.br: could not connect to host
namacindia.com: did not receive HSTS header
namaho.com: could not connect to host
@ -2903,7 +2889,6 @@ nan.zone: could not connect to host
nanogeneinc.com: could not connect to host
nanto.eu: could not connect to host
narada.com.ua: could not connect to host
nargileh.nl: could not connect to host
narindal.ch: did not receive HSTS header
natalia-fadeeva.ru: could not connect to host
natalia.io: could not connect to host
@ -2924,7 +2909,6 @@ ncpc.gov: could not connect to host
nct.org.uk: max-age too low: 1
nctx.co.uk: did not receive HSTS header
near.st: did not receive HSTS header
neel.ch: could not connect to host
neels.ch: [Exception... "Component returned failure code: 0x80004005 (NS_ERROR_FAILURE) [nsISiteSecurityService.processHeader]" nsresult: "0x80004005 (NS_ERROR_FAILURE)" location: "JS frame :: /builds/slave/m-cen-l64-periodicupdate-00000/getHSTSPreloadList.js :: processStsHeader :: line 121" data: no]
neftaly.com: did not receive HSTS header
neilgreen.net: did not receive HSTS header
@ -2978,10 +2962,9 @@ nien.chat: could not connect to host
nightwinds.tk: could not connect to host
nightx.uk: could not connect to host
niho.jp: [Exception... "Component returned failure code: 0x80004005 (NS_ERROR_FAILURE) [nsISiteSecurityService.processHeader]" nsresult: "0x80004005 (NS_ERROR_FAILURE)" location: "JS frame :: /builds/slave/m-cen-l64-periodicupdate-00000/getHSTSPreloadList.js :: processStsHeader :: line 121" data: no]
niklas.pw: could not connect to host
nikomo.fi: could not connect to host
ninchisho-online.com: did not receive HSTS header
ninhs.org: did not receive HSTS header
ninhs.org: could not connect to host
nippler.org: did not receive HSTS header
nippombashi.net: did not receive HSTS header
nipponcareers.com: did not receive HSTS header
@ -3022,6 +3005,7 @@ nozoe.jp: did not receive HSTS header
np.search.yahoo.com: did not receive HSTS header
npol.de: could not connect to host
nqesh.com: could not connect to host
nrizzio.me: could not connect to host
ntbs.pro: could not connect to host
ntse.xyz: could not connect to host
nu3.at: did not receive HSTS header
@ -3034,6 +3018,7 @@ nu3.fi: did not receive HSTS header
nu3.fr: did not receive HSTS header
nu3.no: did not receive HSTS header
nu3.se: did not receive HSTS header
nubella.com.au: did not receive HSTS header
nufla.de: could not connect to host
null-sec.ru: could not connect to host
null.cat: could not connect to host
@ -3343,7 +3328,7 @@ poleartschool.com: could not connect to host
policeiwitness.sg: could not connect to host
polimat.org: could not connect to host
politically-incorrect.xyz: could not connect to host
politologos.org: could not connect to host
politologos.org: did not receive HSTS header
polycoise.com: could not connect to host
polypho.nyc: could not connect to host
pompompoes.com: could not connect to host
@ -3353,6 +3338,7 @@ poon.tech: could not connect to host
portalplatform.net: did not receive HSTS header
poshpak.com: max-age too low: 86400
postcodewise.co.uk: did not receive HSTS header
posterspy.com: did not receive HSTS header
postscheduler.org: could not connect to host
posylka.de: did not receive HSTS header
poussinooz.fr: could not connect to host
@ -3461,6 +3447,7 @@ raajheshkannaa.com: could not connect to host
radicaleducation.net: could not connect to host
rafaelcz.de: could not connect to host
railgun.com.cn: could not connect to host
railjob.cn: could not connect to host
rainbowbarracuda.com: could not connect to host
ramonj.nl: could not connect to host
randomcage.com: did not receive HSTS header
@ -3472,7 +3459,6 @@ rapido.nu: did not receive HSTS header
rapidresearch.me: could not connect to host
rapidthunder.io: could not connect to host
rasing.me: did not receive HSTS header
raspass.me: could not connect to host
rastreador.com.es: did not receive HSTS header
ratajczak.fr: could not connect to host
rate-esport.de: could not connect to host
@ -3539,7 +3525,6 @@ respostas.com.br: did not receive HSTS header
restchart.com: did not receive HSTS header
retcor.net: could not connect to host
retrotracks.net: max-age too low: 0
reulitz.de: could not connect to host
revealdata.com: did not receive HSTS header
revello.org: did not receive HSTS header
reverie.pw: could not connect to host
@ -3581,7 +3566,6 @@ robteix.com: did not receive HSTS header
robtex.net: did not receive HSTS header
robtex.org: did not receive HSTS header
rochman.id: could not connect to host
rocketr.net: did not receive HSTS header
rocksberg.net: could not connect to host
rockstarloan.com: max-age too low: 0
roddis.net: did not receive HSTS header
@ -3610,6 +3594,7 @@ rouvray.org: could not connect to host
royalpub.net: did not receive HSTS header
rr.in.th: could not connect to host
rrke.cc: did not receive HSTS header
rrom.me: could not connect to host
rsajeey.info: could not connect to host
rsauget.fr: could not connect to host
rsf.io: could not connect to host
@ -3639,7 +3624,6 @@ rxv.cc: could not connect to host
ryansmithphotography.com: did not receive HSTS header
ryanteck.uk: did not receive HSTS header
s.how: did not receive HSTS header
s007.co: could not connect to host
safematix.com: could not connect to host
safewings-nh.nl: did not receive HSTS header
safic.net: could not connect to host
@ -3662,7 +3646,7 @@ sandrolittke.de: did not receive HSTS header
sandviks.com: did not receive HSTS header
sangwon.org: could not connect to host
sansemea.com: did not receive HSTS header
sansonehowell.com: could not connect to host
sansonehowell.com: [Exception... "Component returned failure code: 0x80004005 (NS_ERROR_FAILURE) [nsISiteSecurityService.processHeader]" nsresult: "0x80004005 (NS_ERROR_FAILURE)" location: "JS frame :: /builds/slave/m-cen-l64-periodicupdate-00000/getHSTSPreloadList.js :: processStsHeader :: line 121" data: no]
sarah-beckett-harpist.com: did not receive HSTS header
sarahsweetlife.com: could not connect to host
sarakas.com: could not connect to host
@ -3689,6 +3673,7 @@ sazima.ru: did not receive HSTS header
sbox-archives.com: could not connect to host
sby.de: did not receive HSTS header
sc4le.com: could not connect to host
scannabi.com: could not connect to host
scepticism.com: could not connect to host
schadegarant.net: could not connect to host
schnell-gold.com: could not connect to host
@ -3765,6 +3750,7 @@ seomobo.com: could not connect to host
seowarp.net: did not receive HSTS header
sep23.ru: did not receive HSTS header
seq.tf: did not receive HSTS header
serathius.ovh: could not connect to host
serenitycreams.com: did not receive HSTS header
serfdom.io: did not receive HSTS header
serized.pw: could not connect to host
@ -3783,12 +3769,12 @@ setuid0.kr: could not connect to host
sexpay.net: could not connect to host
seyahatsagliksigortalari.com: could not connect to host
sfsltd.com: did not receive HSTS header
sh-network.de: could not connect to host
shadoom.com: did not receive HSTS header
shadowmorph.info: did not receive HSTS header
shadowsocks.net: could not connect to host
shakepeers.org: did not receive HSTS header
shanesage.com: could not connect to host
shanewadleigh.com: could not connect to host
shareimg.xyz: could not connect to host
sharepass.pw: could not connect to host
shauncrowley.co.uk: could not connect to host
@ -3802,7 +3788,7 @@ shiinko.com: could not connect to host
shinebijoux.com.br: could not connect to host
shinju.moe: could not connect to host
shinonome-lab.eu.org: could not connect to host
shiona.xyz: could not connect to host
shiona.xyz: did not receive HSTS header
shocksrv.com: did not receive HSTS header
shooshosha.com: did not receive HSTS header
shopontarget.com: did not receive HSTS header
@ -3850,7 +3836,6 @@ sitennisclub.com: did not receive HSTS header
siterip.org: could not connect to host
sites.google.com: did not receive HSTS header (error ignored - included regardless)
sitesten.com: did not receive HSTS header
sitsy.ru: did not receive HSTS header
sixtwentyten.com: did not receive HSTS header
skhosting.eu: did not receive HSTS header
skile.ru: could not connect to host
@ -3871,6 +3856,7 @@ slightfuture.click: could not connect to host
slix.io: could not connect to host
slope.haus: could not connect to host
slovakiana.sk: did not receive HSTS header
slowfood.es: could not connect to host
sluitkampzeist.nl: [Exception... "Component returned failure code: 0x80004005 (NS_ERROR_FAILURE) [nsISiteSecurityService.processHeader]" nsresult: "0x80004005 (NS_ERROR_FAILURE)" location: "JS frame :: /builds/slave/m-cen-l64-periodicupdate-00000/getHSTSPreloadList.js :: processStsHeader :: line 121" data: no]
slycurity.de: could not connect to host
smart-mirror.de: did not receive HSTS header
@ -3893,7 +3879,6 @@ snakehosting.dk: did not receive HSTS header
snapappointments.com: did not receive HSTS header
snapappts.com: could not connect to host
snapworks.net: did not receive HSTS header
sneberger.cz: [Exception... "Component returned failure code: 0x80004005 (NS_ERROR_FAILURE) [nsISiteSecurityService.processHeader]" nsresult: "0x80004005 (NS_ERROR_FAILURE)" location: "JS frame :: /builds/slave/m-cen-l64-periodicupdate-00000/getHSTSPreloadList.js :: processStsHeader :: line 121" data: no]
snel4u.nl: could not connect to host
snelwerk.be: could not connect to host
sng.my: could not connect to host
@ -3926,7 +3911,7 @@ somethingnew.xyz: could not connect to host
sonicrainboom.rocks: could not connect to host
soobi.org: did not receive HSTS header
soondy.com: did not receive HSTS header
sotiran.com: [Exception... "Component returned failure code: 0x80004005 (NS_ERROR_FAILURE) [nsISiteSecurityService.processHeader]" nsresult: "0x80004005 (NS_ERROR_FAILURE)" location: "JS frame :: /builds/slave/m-cen-l64-periodicupdate-00000/getHSTSPreloadList.js :: processStsHeader :: line 121" data: no]
sotiran.com: did not receive HSTS header
sotor.de: did not receive HSTS header
soulema.com: [Exception... "Component returned failure code: 0x80004005 (NS_ERROR_FAILURE) [nsISiteSecurityService.processHeader]" nsresult: "0x80004005 (NS_ERROR_FAILURE)" location: "JS frame :: /builds/slave/m-cen-l64-periodicupdate-00000/getHSTSPreloadList.js :: processStsHeader :: line 121" data: no]
soulfulglamour.uk: could not connect to host
@ -3938,6 +3923,7 @@ souyar.net: could not connect to host
souyar.us: could not connect to host
sovereignshare.com: could not connect to host
sown.dyndns.org: could not connect to host
sowncloud.de: did not receive HSTS header
spacehq.org: max-age too low: 0
spaggel.nl: could not connect to host
sparelib.com: max-age too low: 3650
@ -3986,7 +3972,6 @@ ssl.rip: could not connect to host
ssmato.me: could not connect to host
ssnc.org: max-age too low: 300
sss3s.com: did not receive HSTS header
st-news.de: could not connect to host
stabletoken.com: could not connect to host
stadjerspasonline.nl: could not connect to host
stahl.xyz: could not connect to host
@ -4033,6 +4018,8 @@ storecove.com: did not receive HSTS header
storefrontify.com: did not receive HSTS header
stormhub.org: could not connect to host
stpatricksguild.com: did not receive HSTS header
stpip.com: could not connect to host
stpip.net: could not connect to host
stqry.com: did not receive HSTS header
str0.at: did not receive HSTS header
strasweb.fr: did not receive HSTS header
@ -4157,7 +4144,7 @@ team-teasers.com: could not connect to host
teamsocial.co: did not receive HSTS header
teamzeus.cz: could not connect to host
tech55i.com: did not receive HSTS header
techassist.io: could not connect to host
techassist.io: did not receive HSTS header
techelements.co: could not connect to host
techhipster.net: could not connect to host
techhub.ml: could not connect to host
@ -4173,7 +4160,6 @@ tegelsensanitaironline.nl: did not receive HSTS header
tekshrek.com: did not receive HSTS header
telefonnummer.online: could not connect to host
telefoonnummerinfo.nl: could not connect to host
telling.xyz: could not connect to host
temehu.com: did not receive HSTS header
tempcraft.net: could not connect to host
tendertool.nl: could not connect to host
@ -4243,10 +4229,12 @@ therewill.be: could not connect to host
theseed.io: could not connect to host
thestack.xyz: could not connect to host
thestagchorleywood.co.uk: did not receive HSTS header
thetechnical.me: could not connect to host
thetomharling.com: max-age too low: 86400
theurbanyoga.com: did not receive HSTS header
thevintagenews.com: [Exception... "Component returned failure code: 0x80004005 (NS_ERROR_FAILURE) [nsISiteSecurityService.processHeader]" nsresult: "0x80004005 (NS_ERROR_FAILURE)" location: "JS frame :: /builds/slave/m-cen-l64-periodicupdate-00000/getHSTSPreloadList.js :: processStsHeader :: line 121" data: no]
thewebfellas.com: did not receive HSTS header
theyosh.nl: could not connect to host
thezonders.com: did not receive HSTS header
thierfreund.de: could not connect to host
thingies.site: could not connect to host
@ -4278,7 +4266,6 @@ tikutiku.pl: max-age too low: 0
tildebot.com: could not connect to host
tilkah.com.au: could not connect to host
tillcraft.com: could not connect to host
tillseasyscore.com: could not connect to host
timbeilby.com: could not connect to host
timbuktutimber.com: did not receive HSTS header
timcamara.com: did not receive HSTS header
@ -4326,12 +4313,14 @@ tomharling.co.uk: max-age too low: 86400
tomharling.uk: max-age too low: 86400
tomharris.tech: did not receive HSTS header
tomlankhorst.nl: did not receive HSTS header
tomli.blog: could not connect to host
tommsy.com: did not receive HSTS header
tommyads.com: could not connect to host
tonburi.jp: [Exception... "Component returned failure code: 0x80004005 (NS_ERROR_FAILURE) [nsISiteSecurityService.processHeader]" nsresult: "0x80004005 (NS_ERROR_FAILURE)" location: "JS frame :: /builds/slave/m-cen-l64-periodicupdate-00000/getHSTSPreloadList.js :: processStsHeader :: line 121" data: no]
tonyfantjr.com: could not connect to host
tonyw.xyz: could not connect to host
toomanypillows.com: could not connect to host
toool.org: did not receive HSTS header
top-stage.net: could not connect to host
topbargains.com.au: did not receive HSTS header
topdeskdev.net: could not connect to host
@ -4362,6 +4351,7 @@ trinityaffirmations.com: max-age too low: 0
trinitycore.org: max-age too low: 2592000
tripdelta.com: did not receive HSTS header
trixies-wish.nz: could not connect to host
trkpuls.tk: could not connect to host
trollme.me: could not connect to host
trunkjunk.co: did not receive HSTS header
trusitio.com: did not receive HSTS header
@ -4440,7 +4430,7 @@ unblocked.works: did not receive HSTS header
unblocked.world: did not receive HSTS header
unccdesign.club: could not connect to host
unclegen.xyz: could not connect to host
undernet.uy: did not receive HSTS header
undernet.uy: could not connect to host
unfiltered.nyc: did not receive HSTS header
unfuddle.cn: could not connect to host
uni-games.com: could not connect to host
@ -4477,7 +4467,7 @@ used-in.jp: did not receive HSTS header
usercare.com: did not receive HSTS header
userify.com: did not receive HSTS header
ustr.gov: max-age too low: 86400
utilitarianism.net: could not connect to host
utilitarianism.net: did not receive HSTS header
utleieplassen.no: could not connect to host
utopiagalaxy.space: could not connect to host
utopianhomespa.com: did not receive HSTS header
@ -4508,7 +4498,6 @@ vanitynailworkz.com: could not connect to host
vanlaanen.com: did not receive HSTS header
vansieleghem.com: could not connect to host
vasanth.org: did not receive HSTS header
vbazile.com: could not connect to host
vbulletin-russia.com: could not connect to host
vbulletinrussia.com: could not connect to host
vcdove.com: did not receive HSTS header
@ -4555,7 +4544,6 @@ viva-french.com: did not receive HSTS header
vlastimilburian.cz: did not receive HSTS header
vlora.city: could not connect to host
vm0.eu: did not receive HSTS header
vmc.co.id: could not connect to host
vmrdev.com: could not connect to host
voceinveste.com: did not receive HSTS header
vodpay.com: could not connect to host
@ -4728,7 +4716,7 @@ wufu.org: did not receive HSTS header
wuhengmin.com: did not receive HSTS header
wurzelzwerg.net: could not connect to host
wusx.club: could not connect to host
www.apollo-auto.com: [Exception... "Component returned failure code: 0x80004005 (NS_ERROR_FAILURE) [nsISiteSecurityService.processHeader]" nsresult: "0x80004005 (NS_ERROR_FAILURE)" location: "JS frame :: /builds/slave/m-cen-l64-periodicupdate-00000/getHSTSPreloadList.js :: processStsHeader :: line 121" data: no]
www.apollo-auto.com: could not connect to host
www.braintreepayments.com: did not receive HSTS header
www.calyxinstitute.org: [Exception... "Component returned failure code: 0x80004005 (NS_ERROR_FAILURE) [nsISiteSecurityService.processHeader]" nsresult: "0x80004005 (NS_ERROR_FAILURE)" location: "JS frame :: /builds/slave/m-cen-l64-periodicupdate-00000/getHSTSPreloadList.js :: processStsHeader :: line 121" data: no]
www.cueup.com: could not connect to host
@ -4753,7 +4741,9 @@ www.zenpayroll.com: did not receive HSTS header
www3.info: could not connect to host
wxukang.cn: could not connect to host
wyzphoto.nl: [Exception... "Component returned failure code: 0x80004005 (NS_ERROR_FAILURE) [nsISiteSecurityService.processHeader]" nsresult: "0x80004005 (NS_ERROR_FAILURE)" location: "JS frame :: /builds/slave/m-cen-l64-periodicupdate-00000/getHSTSPreloadList.js :: processStsHeader :: line 121" data: no]
x-case.de: max-age too low: 0
x2w.io: could not connect to host
x3led.com: could not connect to host
xa.search.yahoo.com: did not receive HSTS header
xandocs.com: could not connect to host
xatr0z.org: could not connect to host
@ -4883,6 +4873,7 @@ zentraler-kreditausschuss.de: did not receive HSTS header
zentralwolke.de: did not receive HSTS header
zera.com.au: could not connect to host
zerolab.org: could not connect to host
zertif.info: could not connect to host
zerudi.com: did not receive HSTS header
zett4.me: could not connect to host
zeytin.pro: could not connect to host

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -1 +0,0 @@
{"files":{".cargo-ok":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",".gitignore":"12cc0f91b51fedf41ae1670d1624ee1d78a284bdb101645b60a06a12de16c069",".travis.yml":"6b96b2c6bfd7e1acef4b825a2813fc4277859eb9400a16800db8835c25e4087d","Cargo.toml":"789b93a48ce76901375209d5462408469c31809e09a98e71370c57187a4b0923","README.md":"9f048d969f9f8333cdcdb892744cd0816e4f2922c8817fa5e9e07f9472fe1050","src/app_unit.rs":"71b0ac2fa378427883649def1a03008ac9d4eb45addd084b7d9885867049551e","src/lib.rs":"2df7d863c47d8b22f9af66caeafa87e6a206ee713a8aeaa55c5a80a42a92513b"},"package":"636ee56f12e31dbc11dc0a1ac6004f08b04e6e6595963716fc8130e90d4e04cf"}

2
third_party/rust/app_units-0.3.0/.gitignore поставляемый
Просмотреть файл

@ -1,2 +0,0 @@
target/
Cargo.lock

Просмотреть файл

@ -1,8 +0,0 @@
language: rust
notifications:
webhooks: http://build.servo.org:54856/travis
rust:
- stable
- beta
- nightly

18
third_party/rust/app_units-0.3.0/Cargo.toml поставляемый
Просмотреть файл

@ -1,18 +0,0 @@
[package]
name = "app_units"
version = "0.3.0"
authors = ["The Servo Project Developers"]
description = "Servo app units type (Au)"
documentation = "http://doc.servo.org/app_units/"
repository = "https://github.com/servo/app_units"
license = "MPL-2.0"
[features]
default = []
plugins = []
[dependencies]
heapsize = "0.3"
num-traits = "0.1.32"
rustc-serialize = "0.3"
serde = "0.8"

3
third_party/rust/app_units-0.3.0/README.md поставляемый
Просмотреть файл

@ -1,3 +0,0 @@
# app-units
[Documentation](http://doc.servo.org/app_units/index.html)

Просмотреть файл

@ -1,315 +0,0 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use heapsize::HeapSizeOf;
use num_traits::Zero;
use rustc_serialize::{Encodable, Encoder};
use serde::de::{Deserialize, Deserializer};
use serde::ser::{Serialize, Serializer};
use std::default::Default;
use std::fmt;
use std::i32;
use std::ops::{Add, AddAssign, Div, DivAssign, Mul, MulAssign, Neg, Rem, Sub, SubAssign};
/// The number of app units in a pixel.
pub const AU_PER_PX: i32 = 60;
#[derive(Clone, Copy, Hash, PartialEq, PartialOrd, Eq, Ord)]
pub struct Au(pub i32);
impl HeapSizeOf for Au {
fn heap_size_of_children(&self) -> usize { 0 }
}
impl Deserialize for Au {
fn deserialize<D: Deserializer>(deserializer: &mut D) -> Result<Au, D::Error> {
Ok(Au(try!(i32::deserialize(deserializer))))
}
}
impl Serialize for Au {
fn serialize<S: Serializer>(&self, serializer: &mut S) -> Result<(), S::Error> {
self.0.serialize(serializer)
}
}
impl Default for Au {
#[inline]
fn default() -> Au {
Au(0)
}
}
impl Zero for Au {
#[inline]
fn zero() -> Au {
Au(0)
}
#[inline]
fn is_zero(&self) -> bool {
self.0 == 0
}
}
pub const MIN_AU: Au = Au(i32::MIN);
pub const MAX_AU: Au = Au(i32::MAX);
impl Encodable for Au {
fn encode<S: Encoder>(&self, e: &mut S) -> Result<(), S::Error> {
e.emit_f64(self.to_f64_px())
}
}
impl fmt::Debug for Au {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}px", self.to_f64_px())
}
}
impl Add for Au {
type Output = Au;
#[inline]
fn add(self, other: Au) -> Au {
Au(self.0.wrapping_add(other.0))
}
}
impl Sub for Au {
type Output = Au;
#[inline]
fn sub(self, other: Au) -> Au {
Au(self.0.wrapping_sub(other.0))
}
}
impl Mul<i32> for Au {
type Output = Au;
#[inline]
fn mul(self, other: i32) -> Au {
Au(self.0.wrapping_mul(other))
}
}
impl Div<i32> for Au {
type Output = Au;
#[inline]
fn div(self, other: i32) -> Au {
Au(self.0 / other)
}
}
impl Rem<i32> for Au {
type Output = Au;
#[inline]
fn rem(self, other: i32) -> Au {
Au(self.0 % other)
}
}
impl Neg for Au {
type Output = Au;
#[inline]
fn neg(self) -> Au {
Au(-self.0)
}
}
impl AddAssign for Au {
#[inline]
fn add_assign(&mut self, other: Au) {
*self = *self + other;
}
}
impl SubAssign for Au {
#[inline]
fn sub_assign(&mut self, other: Au) {
*self = *self - other;
}
}
impl MulAssign<i32> for Au {
#[inline]
fn mul_assign(&mut self, other: i32) {
*self = *self * other;
}
}
impl DivAssign<i32> for Au {
#[inline]
fn div_assign(&mut self, other: i32) {
*self = *self / other;
}
}
impl Au {
/// FIXME(pcwalton): Workaround for lack of cross crate inlining of newtype structs!
#[inline]
pub fn new(value: i32) -> Au {
Au(value)
}
#[inline]
pub fn scale_by(self, factor: f32) -> Au {
Au(((self.0 as f32) * factor) as i32)
}
#[inline]
pub fn from_px(px: i32) -> Au {
Au((px * AU_PER_PX) as i32)
}
/// Rounds this app unit down to the pixel towards zero and returns it.
#[inline]
pub fn to_px(self) -> i32 {
self.0 / AU_PER_PX
}
/// Ceil this app unit to the appropriate pixel boundary and return it.
#[inline]
pub fn ceil_to_px(self) -> i32 {
((self.0 as f64) / (AU_PER_PX as f64)).ceil() as i32
}
#[inline]
pub fn to_nearest_px(self) -> i32 {
((self.0 as f64) / (AU_PER_PX as f64)).round() as i32
}
#[inline]
pub fn to_nearest_pixel(self, pixels_per_px: f32) -> f32 {
((self.0 as f32) / (AU_PER_PX as f32) * pixels_per_px).round() / pixels_per_px
}
#[inline]
pub fn to_f32_px(self) -> f32 {
(self.0 as f32) / (AU_PER_PX as f32)
}
#[inline]
pub fn to_f64_px(self) -> f64 {
(self.0 as f64) / (AU_PER_PX as f64)
}
#[inline]
pub fn from_f32_px(px: f32) -> Au {
Au((px * (AU_PER_PX as f32)) as i32)
}
#[inline]
pub fn from_f64_px(px: f64) -> Au {
Au((px * (AU_PER_PX as f64)) as i32)
}
}
#[test]
fn create() {
assert_eq!(Au::zero(), Au(0));
assert_eq!(Au::default(), Au(0));
assert_eq!(Au::new(7), Au(7));
}
#[test]
fn operations() {
assert_eq!(Au(7) + Au(5), Au(12));
assert_eq!(MAX_AU + Au(1), MIN_AU);
assert_eq!(Au(7) - Au(5), Au(2));
assert_eq!(MIN_AU - Au(1), MAX_AU);
assert_eq!(Au(7) * 5, Au(35));
assert_eq!(MAX_AU * -1, MIN_AU + Au(1));
assert_eq!(MIN_AU * -1, MIN_AU);
assert_eq!(Au(35) / 5, Au(7));
assert_eq!(Au(35) % 6, Au(5));
assert_eq!(-Au(7), Au(-7));
}
#[test]
#[should_panic]
fn overflowing_div() {
MIN_AU / -1;
}
#[test]
#[should_panic]
fn overflowing_rem() {
MIN_AU % -1;
}
#[test]
fn scale() {
assert_eq!(Au(12).scale_by(1.5), Au(18));
}
#[test]
fn convert() {
assert_eq!(Au::from_px(5), Au(300));
assert_eq!(Au(300).to_px(), 5);
assert_eq!(Au(330).to_px(), 5);
assert_eq!(Au(350).to_px(), 5);
assert_eq!(Au(360).to_px(), 6);
assert_eq!(Au(300).ceil_to_px(), 5);
assert_eq!(Au(310).ceil_to_px(), 6);
assert_eq!(Au(330).ceil_to_px(), 6);
assert_eq!(Au(350).ceil_to_px(), 6);
assert_eq!(Au(360).ceil_to_px(), 6);
assert_eq!(Au(300).to_nearest_px(), 5);
assert_eq!(Au(310).to_nearest_px(), 5);
assert_eq!(Au(330).to_nearest_px(), 6);
assert_eq!(Au(350).to_nearest_px(), 6);
assert_eq!(Au(360).to_nearest_px(), 6);
assert_eq!(Au(60).to_nearest_pixel(2.), 1.);
assert_eq!(Au(70).to_nearest_pixel(2.), 1.);
assert_eq!(Au(80).to_nearest_pixel(2.), 1.5);
assert_eq!(Au(90).to_nearest_pixel(2.), 1.5);
assert_eq!(Au(100).to_nearest_pixel(2.), 1.5);
assert_eq!(Au(110).to_nearest_pixel(2.), 2.);
assert_eq!(Au(120).to_nearest_pixel(2.), 2.);
assert_eq!(Au(300).to_f32_px(), 5.);
assert_eq!(Au(312).to_f32_px(), 5.2);
assert_eq!(Au(330).to_f32_px(), 5.5);
assert_eq!(Au(348).to_f32_px(), 5.8);
assert_eq!(Au(360).to_f32_px(), 6.);
assert_eq!(Au(300).to_f64_px(), 5.);
assert_eq!(Au(312).to_f64_px(), 5.2);
assert_eq!(Au(330).to_f64_px(), 5.5);
assert_eq!(Au(348).to_f64_px(), 5.8);
assert_eq!(Au(360).to_f64_px(), 6.);
assert_eq!(Au::from_f32_px(5.), Au(300));
assert_eq!(Au::from_f32_px(5.2), Au(312));
assert_eq!(Au::from_f32_px(5.5), Au(330));
assert_eq!(Au::from_f32_px(5.8), Au(348));
assert_eq!(Au::from_f32_px(6.), Au(360));
assert_eq!(Au::from_f64_px(5.), Au(300));
assert_eq!(Au::from_f64_px(5.2), Au(312));
assert_eq!(Au::from_f64_px(5.5), Au(330));
assert_eq!(Au::from_f64_px(5.8), Au(348));
assert_eq!(Au::from_f64_px(6.), Au(360));
}
#[test]
fn heapsize() {
use heapsize::HeapSizeOf;
fn f<T: HeapSizeOf>(_: T) {}
f(Au::new(0));
}

16
third_party/rust/app_units-0.3.0/src/lib.rs поставляемый
Просмотреть файл

@ -1,16 +0,0 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! An Au is an "App Unit" and represents 1/60th of a CSS pixel. It was
//! originally proposed in 2002 as a standard unit of measure in Gecko.
//! See https://bugzilla.mozilla.org/show_bug.cgi?id=177805 for more info.
extern crate heapsize;
extern crate num_traits;
extern crate rustc_serialize;
extern crate serde;
mod app_unit;
pub use app_unit::{Au, MIN_AU, MAX_AU, AU_PER_PX};

Просмотреть файл

@ -1 +1 @@
{"files":{".cargo-ok":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",".gitignore":"e084df3ce631ce22082bd63f9e421e7f4d7a2408d6520de532f6a649e4d320dd",".travis.yml":"cb3f687453522852cb74371892a77d5e6eb61d771b8ef27f6cc6628e556de3d6","Cargo.toml":"d631ecb2eef5a18307a68e795080ed073851c9bea0800405cad98642ed3cc053","LICENSE.md":"90d7e062634054e6866d3c81e6a2b3058a840e6af733e98e80bdfe1a7dec6912","examples/basic.rs":"cdf97f2c4facbc202bf9e1496030d09bef3b7cd5538407325a38f0fe2e49415e","logo.png":"ebc5305aae938c1f834cf35302faa8be0f1b7b8c3c3beef5cf6b2f68b9628c35","readme.dev.md":"43bad3bcc13a5c057344d3ba7f64bd2b313f8c133d6afa068108df73e8e8facd","readme.md":"1fe1bda36327400cfedfcf103d58091c8465067b62706b0a368d287ca0312cd9","src/lib.rs":"1a85a12afad0b6150b8dbede093d19f4a32a3cd6976ee018a625fbc05051bf80","src/refbox.rs":"f0470baabbf0f9852df939c2535865793dc31c9d9d35eecf9c237a9df431a9fc","src/rustc_serialize/mod.rs":"188f5ff7fc9c5e0ac1404b919ceafac5ce4385950d22ae470ddc1775d2a0643b","src/rustc_serialize/reader.rs":"7983c37556fdef552bfeba386d557863fb5113c8fada55d4cf6a605f13214253","src/rustc_serialize/writer.rs":"684844799673fce3c54f1aca42430b6730da13473d732ee2954ebc56994ebd95","src/serde/mod.rs":"7818bbe5c320af2a15762c421d5471865a7364e1c9754c57960402fdcf09c595","src/serde/reader.rs":"1f88a55923dfc3ad82ec32571c9c7ca42818d996897966dea08a595f804d117f","src/serde/writer.rs":"d987134b3a00eb17a25e601757ad20607dd1de8989452266e9e4e7955fcd87f1","tests/test.rs":"b72a5902be11c3210dd56814276ff036155eba10d5f0aa566c86e7a1ce463adf"},"package":"55eb0b7fd108527b0c77860f75eca70214e11a8b4c6ef05148c54c05a25d48ad"}
{"files":{".cargo-ok":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",".gitignore":"e084df3ce631ce22082bd63f9e421e7f4d7a2408d6520de532f6a649e4d320dd",".travis.yml":"f705a11b487bf71c41ebd8223cc1f3cbde0dfdfeea96a100af55e06e93397a1b","Cargo.toml":"c1d0f68b42bff71b04c8e763f13b0141f30dc849bee5b0ab5b9008e3627aac99","LICENSE.md":"90d7e062634054e6866d3c81e6a2b3058a840e6af733e98e80bdfe1a7dec6912","changelist.org":"90bb4036f90c3792c8294de2e3d52a54cc6230c3e5dc78013a781a9aa468f5f3","examples/basic.rs":"57aeca11d5cc5c3d5bb613e78b2ea43a2e80d66c15a2fffae303b165aa4ab41d","logo.png":"ebc5305aae938c1f834cf35302faa8be0f1b7b8c3c3beef5cf6b2f68b9628c35","readme.dev.md":"43bad3bcc13a5c057344d3ba7f64bd2b313f8c133d6afa068108df73e8e8facd","readme.md":"1fe1bda36327400cfedfcf103d58091c8465067b62706b0a368d287ca0312cd9","src/lib.rs":"04d6e4533f4bbb2ce2126bca414f95610075642b223f4e0c0b8f7a573792d7fd","src/refbox.rs":"fe266cec4f9f36942a1a9a9ad094a4bb1003d0c0f3c070cfb6214790d0f21b69","src/serde/mod.rs":"ef0c0a55936d835ae756d84a6ac38de312687d7c0f2cfc6810ec994413464516","src/serde/reader.rs":"6bfde2e2df9b450f6c07576198e47fdc837bbc4ddc74f447c72875c188c72ddc","src/serde/writer.rs":"eb3b439e8822871d715464ef6aca4b93a73b2b57625f9c586b68007f7386ab12","tests/test.rs":"f009e979fda892ad531ddd0f2003f0a7df607b19bd453a53f87c9041dfd9c745"},"package":"62650bb5651ba8f0580cebf4ef255d791b8b0ef53800322661e1bb5791d42966"}

34
third_party/rust/bincode/.travis.yml поставляемый
Просмотреть файл

@ -1,29 +1,5 @@
lang: c
after_success: |
[ $TRAVIS_BRANCH = master ] &&
[ $TRAVIS_PULL_REQUEST = false ] &&
cargo doc &&
echo "<meta http-equiv=refresh content=0;url=`echo $TRAVIS_REPO_SLUG | cut -d '/' -f 2`/index.html>" > target/doc/index.html &&
sudo pip install ghp-import &&
ghp-import -n target/doc &&
git push -fq https://${GH_TOKEN}@github.com/${TRAVIS_REPO_SLUG}.git gh-pages
env:
matrix:
- CHANNEL='stable'
- CHANNEL='beta'
- CHANNEL='nightly'
global:
- secure: SZSxNqg9wiGx8EnJhifJ2kb/aCRcLim9TzTQyfurPqd8qVGkDOeVjTtbs+VTxLVXYtMJAz+YYnrQDwsu8kc/uYpQajU+gRMqNGEP5gNj3Ha5iNGDasAS6piIHQSMROayZ+D9g22nlGnjk8t9eZtLHC/Z8IWMCnjcIHvqMFY6cgI=
install:
- curl -sf https://raw.githubusercontent.com/brson/multirust/master/blastoff.sh > ./rustup.sh
- chmod +x ./rustup.sh
- ./rustup.sh --yes
script:
- multirust default $CHANNEL
- cargo build
- cargo build --no-default-features --features "rustc-serialize"
- cargo build --no-default-features --features "serde"
- if [ $CHANNEL = 'nightly' ] ; then cargo test ; fi
language: rust
rust:
- stable
- beta
- nightly

20
third_party/rust/bincode/Cargo.toml поставляемый
Просмотреть файл

@ -1,29 +1,21 @@
[package]
name = "bincode"
version = "0.6.1"
authors = ["Ty Overby <ty@pre-alpha.com>", "Francesco Mazzoli <f@mazzo.li>"]
version = "1.0.0-alpha2"
authors = ["Ty Overby <ty@pre-alpha.com>", "Francesco Mazzoli <f@mazzo.li>", "David Tolnay <dtolnay@gmail.com>", "Daniel Griffen"]
repository = "https://github.com/TyOverby/bincode"
documentation = "http://tyoverby.github.io/bincode/bincode/"
documentation = "https://docs.rs/bincode"
keywords = ["binary", "encode", "decode", "serialize", "deserialize"]
license = "MIT"
description = "A binary serialization / deserialization strategy and implementation with serde and rustc-serialize backends."
description = "A binary serialization / deserialization strategy that uses Serde for transforming structs into bytes and vice versa!"
[dependencies]
byteorder = "1.0.0"
num-traits = "0.1.32"
[dependencies.rustc-serialize]
version = "0.3.*"
optional = true
[dependencies.serde]
version = "0.8.*"
optional = true
version = "0.9.*"
[dev-dependencies]
serde_derive = "0.8.*"
[features]
default = ["rustc-serialize", "serde"]
serde_derive = "0.9.*"

18
third_party/rust/bincode/changelist.org поставляемый Normal file
Просмотреть файл

@ -0,0 +1,18 @@
* 1.0.0
** Removed depricated rustc-serialize support
Rustc-serialize was a stopgap until projects like Serde were able to catch up.
With macros stabilization on its way, we are able to switch to serde without any
big user-friendliness issues. Major congratulations to Serde for coming this far!
** Moved Refbox, Strbox and Slicebox into a "refbox" module
Refbox, Strbox and Slicebox are still an integral piece of bincode, but since
they are mainly used by power-users, this move will make the crate API more organized
and easier for new users to understand.
** Upgraded to Serde 0.9.*
Serde 0.9.* gives us a better API surface area and allows use of procedural macros for
deriving serialize and deserialize implemenetations.
** Moved serde functions into global module
Since serde is the only supported serialization mechanism, it makes sense to have these
functions available at the top level.

6
third_party/rust/bincode/examples/basic.rs поставляемый
Просмотреть файл

@ -1,5 +1,6 @@
/*
extern crate bincode;
extern crate rustc_serialize;
extern crate
use bincode::SizeLimit;
use bincode::rustc_serialize::{encode, decode};
@ -29,3 +30,6 @@ fn main() {
assert!(world == decoded);
}
*/
fn main() {}

21
third_party/rust/bincode/src/lib.rs поставляемый
Просмотреть файл

@ -16,17 +16,16 @@
//! ### Using Basic Functions
//!
//! ```rust
//! #![allow(unstable)]
//! extern crate bincode;
//! use bincode::rustc_serialize::{encode, decode};
//! use bincode::{serialize, deserialize};
//! fn main() {
//! // The object that we will serialize.
//! let target = Some("hello world".to_string());
//! // The maximum size of the encoded message.
//! let limit = bincode::SizeLimit::Bounded(20);
//!
//! let encoded: Vec<u8> = encode(&target, limit).unwrap();
//! let decoded: Option<String> = decode(&encoded[..]).unwrap();
//! let encoded: Vec<u8> = serialize(&target, limit).unwrap();
//! let decoded: Option<String> = deserialize(&encoded[..]).unwrap();
//! assert_eq!(target, decoded);
//! }
//! ```
@ -37,21 +36,14 @@
#![doc(html_logo_url = "./icon.png")]
#[cfg(feature = "rustc-serialize")]
extern crate rustc_serialize as rustc_serialize_crate;
extern crate byteorder;
extern crate num_traits;
#[cfg(feature = "serde")]
extern crate serde as serde_crate;
pub mod refbox;
mod serde;
pub use refbox::{RefBox, StrBox, SliceBox};
mod refbox;
#[cfg(feature = "rustc-serialize")]
pub mod rustc_serialize;
#[cfg(feature = "serde")]
pub mod serde;
pub use serde::*;
/// A limit on the amount of bytes that can be read or written.
///
@ -76,4 +68,3 @@ pub enum SizeLimit {
Infinite,
Bounded(u64)
}

87
third_party/rust/bincode/src/refbox.rs поставляемый
Просмотреть файл

@ -1,10 +1,6 @@
use std::boxed::Box;
use std::ops::Deref;
#[cfg(feature = "rustc-serialize")]
use rustc_serialize_crate::{Encodable, Encoder, Decodable, Decoder};
#[cfg(feature = "serde")]
use serde_crate as serde;
/// A struct for encoding nested reference types.
@ -141,35 +137,18 @@ impl <T> RefBox<'static, T> {
}
}
#[cfg(feature = "rustc-serialize")]
impl <'a, T: Encodable> Encodable for RefBox<'a, T> {
fn encode<S: Encoder>(&self, s: &mut S) -> Result<(), S::Error> {
self.inner.encode(s)
}
}
#[cfg(feature = "rustc-serialize")]
impl <T: Decodable> Decodable for RefBox<'static, T> {
fn decode<D: Decoder>(d: &mut D) -> Result<RefBox<'static, T>, D::Error> {
let inner = try!(Decodable::decode(d));
Ok(RefBox{inner: inner})
}
}
#[cfg(feature = "serde")]
impl<'a, T> serde::Serialize for RefBox<'a, T>
where T: serde::Serialize,
{
fn serialize<S>(&self, serializer: &mut S) -> Result<(), S::Error>
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where S: serde::Serializer
{
serde::Serialize::serialize(&self.inner, serializer)
}
}
#[cfg(feature = "serde")]
impl<'a, T: serde::Deserialize> serde::Deserialize for RefBox<'a, T> {
fn deserialize<D>(deserializer: &mut D) -> Result<Self, D::Error>
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where D: serde::Deserializer
{
let inner = try!(serde::Deserialize::deserialize(deserializer));
@ -239,33 +218,17 @@ impl StrBox<'static> {
}
}
#[cfg(feature = "rustc-serialize")]
impl <'a> Encodable for StrBox<'a> {
fn encode<S: Encoder>(&self, s: &mut S) -> Result<(), S::Error> {
self.inner.encode(s)
}
}
#[cfg(feature = "rustc-serialize")]
impl Decodable for StrBox<'static> {
fn decode<D: Decoder>(d: &mut D) -> Result<StrBox<'static>, D::Error> {
let inner: RefBoxInner<'static, str, String> = try!(Decodable::decode(d));
Ok(StrBox{inner: inner})
}
}
#[cfg(feature = "serde")]
impl<'a> serde::Serialize for StrBox<'a> {
fn serialize<S>(&self, serializer: &mut S) -> Result<(), S::Error>
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where S: serde::Serializer
{
serde::Serialize::serialize(&self.inner, serializer)
}
}
#[cfg(feature = "serde")]
impl serde::Deserialize for StrBox<'static> {
fn deserialize<D>(deserializer: &mut D) -> Result<Self, D::Error>
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where D: serde::Deserializer
{
let inner = try!(serde::Deserialize::deserialize(deserializer));
@ -330,35 +293,19 @@ impl <T> SliceBox<'static, T> {
}
}
#[cfg(feature = "rustc-serialize")]
impl <'a, T: Encodable> Encodable for SliceBox<'a, T> {
fn encode<S: Encoder>(&self, s: &mut S) -> Result<(), S::Error> {
self.inner.encode(s)
}
}
#[cfg(feature = "rustc-serialize")]
impl <T: Decodable> Decodable for SliceBox<'static, T> {
fn decode<D: Decoder>(d: &mut D) -> Result<SliceBox<'static, T>, D::Error> {
let inner: RefBoxInner<'static, [T], Vec<T>> = try!(Decodable::decode(d));
Ok(SliceBox{inner: inner})
}
}
#[cfg(feature = "serde")]
impl<'a, T> serde::Serialize for SliceBox<'a, T>
where T: serde::Serialize,
{
fn serialize<S>(&self, serializer: &mut S) -> Result<(), S::Error>
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where S: serde::Serializer
{
serde::Serialize::serialize(&self.inner, serializer)
}
}
#[cfg(feature = "serde")]
impl<'a, T: serde::Deserialize> serde::Deserialize for SliceBox<'a, T> {
fn deserialize<D>(deserializer: &mut D) -> Result<Self, D::Error>
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where D: serde::Deserializer
{
let inner = try!(serde::Deserialize::deserialize(deserializer));
@ -366,22 +313,12 @@ impl<'a, T: serde::Deserialize> serde::Deserialize for SliceBox<'a, T> {
}
}
#[cfg(feature = "rustc-serialize")]
impl <'a, A: Encodable + ?Sized, B: Encodable> Encodable for RefBoxInner<'a, A, B> {
fn encode<S: Encoder>(&self, s: &mut S) -> Result<(), S::Error> {
match self {
&RefBoxInner::Ref(ref r) => r.encode(s),
&RefBoxInner::Box(ref b) => b.encode(s)
}
}
}
#[cfg(feature = "serde")]
impl<'a, A: ?Sized, B> serde::Serialize for RefBoxInner<'a, A, B>
where A: serde::Serialize,
B: serde::Serialize,
{
fn serialize<S>(&self, serializer: &mut S) -> Result<(), S::Error>
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where S: serde::Serializer
{
match self {
@ -391,19 +328,11 @@ impl<'a, A: ?Sized, B> serde::Serialize for RefBoxInner<'a, A, B>
}
}
#[cfg(feature = "rustc-serialize")]
impl <A: ?Sized, B: Decodable> Decodable for RefBoxInner<'static, A, B> {
fn decode<D: Decoder>(d: &mut D) -> Result<RefBoxInner<'static, A, B>, D::Error> {
let decoded = try!(Decodable::decode(d));
Ok(RefBoxInner::Box(decoded))
}
}
#[cfg(feature = "serde")]
impl<'a, A: ?Sized, B> serde::Deserialize for RefBoxInner<'a, A, B>
where B: serde::Deserialize,
{
fn deserialize<D>(deserializer: &mut D) -> Result<Self, D::Error>
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where D: serde::Deserializer
{
let deserialized = try!(serde::Deserialize::deserialize(deserializer));

Просмотреть файл

@ -1,102 +0,0 @@
//! A collection of serialization and deserialization functions
//! that use the `rustc_serialize` crate for the encodable and decodable
//! implementation.
use rustc_serialize_crate::{Encodable, Decodable};
use std::io::{Write, Read};
use ::SizeLimit;
pub use self::writer::{SizeChecker, EncoderWriter, EncodingResult, EncodingError};
pub use self::reader::{DecoderReader, DecodingResult, DecodingError, InvalidEncoding};
mod reader;
mod writer;
/// Encodes an encodable object into a `Vec` of bytes.
///
/// If the encoding would take more bytes than allowed by `size_limit`,
/// an error is returned.
pub fn encode<T: Encodable>(t: &T, size_limit: SizeLimit) -> EncodingResult<Vec<u8>> {
// Since we are putting values directly into a vector, we can do size
// computation out here and pre-allocate a buffer of *exactly*
// the right size.
let mut w = if let SizeLimit::Bounded(l) = size_limit {
let actual_size = encoded_size_bounded(t, l);
let actual_size = try!(actual_size.ok_or(EncodingError::SizeLimit));
Vec::with_capacity(actual_size as usize)
} else {
vec![]
};
match encode_into(t, &mut w, SizeLimit::Infinite) {
Ok(()) => Ok(w),
Err(e) => Err(e)
}
}
/// Decodes a slice of bytes into an object.
///
/// This method does not have a size-limit because if you already have the bytes
/// in memory, then you don't gain anything by having a limiter.
pub fn decode<T: Decodable>(b: &[u8]) -> DecodingResult<T> {
let mut b = b;
decode_from(&mut b, SizeLimit::Infinite)
}
/// Encodes an object directly into a `Writer`.
///
/// If the encoding would take more bytes than allowed by `size_limit`, an error
/// is returned and *no bytes* will be written into the `Writer`.
///
/// If this returns an `EncodingError` (other than SizeLimit), assume that the
/// writer is in an invalid state, as writing could bail out in the middle of
/// encoding.
pub fn encode_into<T: Encodable, W: Write>(t: &T,
w: &mut W,
size_limit: SizeLimit)
-> EncodingResult<()> {
try!(match size_limit {
SizeLimit::Infinite => Ok(()),
SizeLimit::Bounded(x) => {
let mut size_checker = SizeChecker::new(x);
t.encode(&mut size_checker)
}
});
t.encode(&mut writer::EncoderWriter::new(w))
}
/// Decoes an object directly from a `Buffer`ed Reader.
///
/// If the provided `SizeLimit` is reached, the decode will bail immediately.
/// A SizeLimit can help prevent an attacker from flooding your server with
/// a neverending stream of values that runs your server out of memory.
///
/// If this returns an `DecodingError`, assume that the buffer that you passed
/// in is in an invalid state, as the error could be returned during any point
/// in the reading.
pub fn decode_from<R: Read, T: Decodable>(r: &mut R, size_limit: SizeLimit) -> DecodingResult<T> {
Decodable::decode(&mut reader::DecoderReader::new(r, size_limit))
}
/// Returns the size that an object would be if encoded using bincode.
///
/// This is used internally as part of the check for encode_into, but it can
/// be useful for preallocating buffers if thats your style.
pub fn encoded_size<T: Encodable>(t: &T) -> u64 {
use std::u64::MAX;
let mut size_checker = SizeChecker::new(MAX);
t.encode(&mut size_checker).ok();
size_checker.written
}
/// Given a maximum size limit, check how large an object would be if it
/// were to be encoded.
///
/// If it can be encoded in `max` or fewer bytes, that number will be returned
/// inside `Some`. If it goes over bounds, then None is returned.
pub fn encoded_size_bounded<T: Encodable>(t: &T, max: u64) -> Option<u64> {
let mut size_checker = SizeChecker::new(max);
t.encode(&mut size_checker).ok().map(|_| size_checker.written)
}

Просмотреть файл

@ -1,392 +0,0 @@
use std::io::Read;
use std::io::Error as IoError;
use std::error::Error;
use std::fmt;
use std::convert::From;
use rustc_serialize_crate::Decoder;
use byteorder::{BigEndian, ReadBytesExt};
use ::SizeLimit;
#[derive(Eq, PartialEq, Clone, Debug)]
pub struct InvalidEncoding {
pub desc: &'static str,
pub detail: Option<String>,
}
impl fmt::Display for InvalidEncoding {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
match *self {
InvalidEncoding { detail: None, desc } =>
write!(fmt, "{}", desc),
InvalidEncoding { detail: Some(ref detail), desc } =>
write!(fmt, "{} ({})", desc, detail)
}
}
}
/// An error that can be produced during decoding.
///
/// If decoding from a Buffer, assume that the buffer has been left
/// in an invalid state.
#[derive(Debug)]
pub enum DecodingError {
/// If the error stems from the reader that is being used
/// during decoding, that error will be stored and returned here.
IoError(IoError),
/// If the bytes in the reader are not decodable because of an invalid
/// encoding, this error will be returned. This error is only possible
/// if a stream is corrupted. A stream produced from `encode` or `encode_into`
/// should **never** produce an InvalidEncoding error.
InvalidEncoding(InvalidEncoding),
/// If decoding a message takes more than the provided size limit, this
/// error is returned.
SizeLimit
}
impl fmt::Display for DecodingError {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
match *self {
DecodingError::IoError(ref ioerr) =>
write!(fmt, "IoError: {}", ioerr),
DecodingError::InvalidEncoding(ref ib) =>
write!(fmt, "InvalidEncoding: {}", ib),
DecodingError::SizeLimit =>
write!(fmt, "SizeLimit")
}
}
}
pub type DecodingResult<T> = Result<T, DecodingError>;
fn wrap_io(err: IoError) -> DecodingError {
DecodingError::IoError(err)
}
impl Error for DecodingError {
fn description(&self) -> &str {
match *self {
DecodingError::IoError(ref err) => Error::description(err),
DecodingError::InvalidEncoding(ref ib) => ib.desc,
DecodingError::SizeLimit => "the size limit for decoding has been reached"
}
}
fn cause(&self) -> Option<&Error> {
match *self {
DecodingError::IoError(ref err) => err.cause(),
DecodingError::InvalidEncoding(_) => None,
DecodingError::SizeLimit => None
}
}
}
impl From<IoError> for DecodingError {
fn from(err: IoError) -> DecodingError {
DecodingError::IoError(err)
}
}
/// A Decoder that reads bytes from a buffer.
///
/// This struct should rarely be used.
/// In most cases, prefer the `decode_from` function.
///
/// ```rust,ignore
/// let dr = bincode::rustc_serialize::DecoderReader::new(&mut some_reader, SizeLimit::Infinite);
/// let result: T = Decodable::decode(&mut dr);
/// let bytes_read = dr.bytes_read();
/// ```
pub struct DecoderReader<'a, R: 'a> {
reader: &'a mut R,
size_limit: SizeLimit,
read: u64
}
impl<'a, R: Read> DecoderReader<'a, R> {
pub fn new(r: &'a mut R, size_limit: SizeLimit) -> DecoderReader<'a, R> {
DecoderReader {
reader: r,
size_limit: size_limit,
read: 0
}
}
/// Returns the number of bytes read from the contained Reader.
pub fn bytes_read(&self) -> u64 {
self.read
}
}
impl <'a, A> DecoderReader<'a, A> {
fn read_bytes(&mut self, count: u64) -> Result<(), DecodingError> {
self.read = match self.read.checked_add(count) {
Some(read) => read,
None => return Err(DecodingError::SizeLimit),
};
match self.size_limit {
SizeLimit::Infinite => Ok(()),
SizeLimit::Bounded(x) if self.read <= x => Ok(()),
SizeLimit::Bounded(_) => Err(DecodingError::SizeLimit)
}
}
fn read_type<T>(&mut self) -> Result<(), DecodingError> {
use std::mem::size_of;
self.read_bytes(size_of::<T>() as u64)
}
}
impl<'a, R: Read> Decoder for DecoderReader<'a, R> {
type Error = DecodingError;
fn read_nil(&mut self) -> DecodingResult<()> {
Ok(())
}
fn read_usize(&mut self) -> DecodingResult<usize> {
Ok(try!(self.read_u64().map(|x| x as usize)))
}
fn read_u64(&mut self) -> DecodingResult<u64> {
try!(self.read_type::<u64>());
self.reader.read_u64::<BigEndian>().map_err(wrap_io)
}
fn read_u32(&mut self) -> DecodingResult<u32> {
try!(self.read_type::<u32>());
self.reader.read_u32::<BigEndian>().map_err(wrap_io)
}
fn read_u16(&mut self) -> DecodingResult<u16> {
try!(self.read_type::<u16>());
self.reader.read_u16::<BigEndian>().map_err(wrap_io)
}
fn read_u8(&mut self) -> DecodingResult<u8> {
try!(self.read_type::<u8>());
self.reader.read_u8().map_err(wrap_io)
}
fn read_isize(&mut self) -> DecodingResult<isize> {
self.read_i64().map(|x| x as isize)
}
fn read_i64(&mut self) -> DecodingResult<i64> {
try!(self.read_type::<i64>());
self.reader.read_i64::<BigEndian>().map_err(wrap_io)
}
fn read_i32(&mut self) -> DecodingResult<i32> {
try!(self.read_type::<i32>());
self.reader.read_i32::<BigEndian>().map_err(wrap_io)
}
fn read_i16(&mut self) -> DecodingResult<i16> {
try!(self.read_type::<i16>());
self.reader.read_i16::<BigEndian>().map_err(wrap_io)
}
fn read_i8(&mut self) -> DecodingResult<i8> {
try!(self.read_type::<i8>());
self.reader.read_i8().map_err(wrap_io)
}
fn read_bool(&mut self) -> DecodingResult<bool> {
let x = try!(self.read_i8());
match x {
1 => Ok(true),
0 => Ok(false),
_ => Err(DecodingError::InvalidEncoding(InvalidEncoding{
desc: "invalid u8 when decoding bool",
detail: Some(format!("Expected 0 or 1, got {}", x))
})),
}
}
fn read_f64(&mut self) -> DecodingResult<f64> {
try!(self.read_type::<f64>());
self.reader.read_f64::<BigEndian>().map_err(wrap_io)
}
fn read_f32(&mut self) -> DecodingResult<f32> {
try!(self.read_type::<f32>());
self.reader.read_f32::<BigEndian>().map_err(wrap_io)
}
fn read_char(&mut self) -> DecodingResult<char> {
use std::str;
let error = DecodingError::InvalidEncoding(InvalidEncoding {
desc: "Invalid char encoding",
detail: None
});
let mut buf = [0];
let _ = try!(self.reader.read(&mut buf[..]));
let first_byte = buf[0];
let width = utf8_char_width(first_byte);
if width == 1 { return Ok(first_byte as char) }
if width == 0 { return Err(error)}
let mut buf = [first_byte, 0, 0, 0];
{
let mut start = 1;
while start < width {
match try!(self.reader.read(&mut buf[start .. width])) {
n if n == width - start => break,
n if n < width - start => { start += n; }
_ => return Err(error)
}
}
}
let res = try!(match str::from_utf8(&buf[..width]).ok() {
Some(s) => Ok(s.chars().next().unwrap()),
None => Err(error)
});
try!(self.read_bytes(res.len_utf8() as u64));
Ok(res)
}
fn read_str(&mut self) -> DecodingResult<String> {
let len = try!(self.read_usize());
try!(self.read_bytes(len as u64));
let mut buff = Vec::new();
try!(self.reader.by_ref().take(len as u64).read_to_end(&mut buff));
match String::from_utf8(buff) {
Ok(s) => Ok(s),
Err(err) => Err(DecodingError::InvalidEncoding(InvalidEncoding {
desc: "error while decoding utf8 string",
detail: Some(format!("Decoding error: {}", err))
})),
}
}
fn read_enum<T, F>(&mut self, _: &str, f: F) -> DecodingResult<T>
where F: FnOnce(&mut DecoderReader<'a, R>) -> DecodingResult<T>
{
f(self)
}
fn read_enum_variant<T, F>(&mut self, names: &[&str], mut f: F) -> DecodingResult<T>
where F: FnMut(&mut DecoderReader<'a, R>, usize) -> DecodingResult<T>
{
let id = try!(self.read_u32());
let id = id as usize;
if id >= names.len() {
Err(DecodingError::InvalidEncoding(InvalidEncoding {
desc: "out of bounds tag when reading enum variant",
detail: Some(format!("Expected tag < {}, got {}", names.len(), id))
}))
} else {
f(self, id)
}
}
fn read_enum_variant_arg<T, F>(&mut self, _: usize, f: F) -> DecodingResult<T>
where F: FnOnce(&mut DecoderReader<'a, R>) -> DecodingResult<T>
{
f(self)
}
fn read_enum_struct_variant<T, F>(&mut self, names: &[&str], f: F) -> DecodingResult<T>
where F: FnMut(&mut DecoderReader<'a, R>, usize) -> DecodingResult<T>
{
self.read_enum_variant(names, f)
}
fn read_enum_struct_variant_field<T, F>(&mut self,
_: &str,
f_idx: usize,
f: F)
-> DecodingResult<T>
where F: FnOnce(&mut DecoderReader<'a, R>) -> DecodingResult<T>
{
self.read_enum_variant_arg(f_idx, f)
}
fn read_struct<T, F>(&mut self, _: &str, _: usize, f: F) -> DecodingResult<T>
where F: FnOnce(&mut DecoderReader<'a, R>) -> DecodingResult<T>
{
f(self)
}
fn read_struct_field<T, F>(&mut self, _: &str, _: usize, f: F) -> DecodingResult<T>
where F: FnOnce(&mut DecoderReader<'a, R>) -> DecodingResult<T>
{
f(self)
}
fn read_tuple<T, F>(&mut self, _: usize, f: F) -> DecodingResult<T>
where F: FnOnce(&mut DecoderReader<'a, R>) -> DecodingResult<T>
{
f(self)
}
fn read_tuple_arg<T, F>(&mut self, _: usize, f: F) -> DecodingResult<T>
where F: FnOnce(&mut DecoderReader<'a, R>) -> DecodingResult<T>
{
f(self)
}
fn read_tuple_struct<T, F>(&mut self, _: &str, len: usize, f: F) -> DecodingResult<T>
where F: FnOnce(&mut DecoderReader<'a, R>) -> DecodingResult<T>
{
self.read_tuple(len, f)
}
fn read_tuple_struct_arg<T, F>(&mut self, a_idx: usize, f: F) -> DecodingResult<T>
where F: FnOnce(&mut DecoderReader<'a, R>) -> DecodingResult<T>
{
self.read_tuple_arg(a_idx, f)
}
fn read_option<T, F>(&mut self, mut f: F) -> DecodingResult<T>
where F: FnMut(&mut DecoderReader<'a, R>, bool) -> DecodingResult<T>
{
let x = try!(self.read_u8());
match x {
1 => f(self, true),
0 => f(self, false),
_ => Err(DecodingError::InvalidEncoding(InvalidEncoding {
desc: "invalid tag when decoding Option",
detail: Some(format!("Expected 0 or 1, got {}", x))
})),
}
}
fn read_seq<T, F>(&mut self, f: F) -> DecodingResult<T>
where F: FnOnce(&mut DecoderReader<'a, R>, usize) -> DecodingResult<T>
{
let len = try!(self.read_usize());
f(self, len)
}
fn read_seq_elt<T, F>(&mut self, _: usize, f: F) -> DecodingResult<T>
where F: FnOnce(&mut DecoderReader<'a, R>) -> DecodingResult<T>
{
f(self)
}
fn read_map<T, F>(&mut self, f: F) -> DecodingResult<T>
where F: FnOnce(&mut DecoderReader<'a, R>, usize) -> DecodingResult<T>
{
let len = try!(self.read_usize());
f(self, len)
}
fn read_map_elt_key<T, F>(&mut self, _: usize, f: F) -> DecodingResult<T>
where F: FnOnce(&mut DecoderReader<'a, R>) -> DecodingResult<T>
{
f(self)
}
fn read_map_elt_val<T, F>(&mut self, _: usize, f: F) -> DecodingResult<T>
where F: FnOnce(&mut DecoderReader<'a, R>) -> DecodingResult<T>
{
f(self)
}
fn error(&mut self, err: &str) -> DecodingError {
DecodingError::InvalidEncoding(InvalidEncoding {
desc: "user-induced error",
detail: Some(err.to_string()),
})
}
}
static UTF8_CHAR_WIDTH: [u8; 256] = [
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, // 0x1F
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, // 0x3F
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, // 0x5F
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, // 0x7F
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, // 0x9F
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, // 0xBF
0,0,2,2,2,2,2,2,2,2,2,2,2,2,2,2,
2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2, // 0xDF
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3, // 0xEF
4,4,4,4,4,0,0,0,0,0,0,0,0,0,0,0, // 0xFF
];
fn utf8_char_width(b: u8) -> usize {
UTF8_CHAR_WIDTH[b as usize] as usize
}

Просмотреть файл

@ -1,422 +0,0 @@
use std::io::Write;
use std::io::Error as IoError;
use std::error::Error;
use std::fmt;
use rustc_serialize_crate::Encoder;
use byteorder::{BigEndian, WriteBytesExt};
pub type EncodingResult<T> = Result<T, EncodingError>;
/// An error that can be produced during encoding.
#[derive(Debug)]
pub enum EncodingError {
/// An error originating from the underlying `Writer`.
IoError(IoError),
/// An object could not be encoded with the given size limit.
///
/// This error is returned before any bytes are written to the
/// output `Writer`.
SizeLimit,
}
/// An Encoder that encodes values directly into a Writer.
///
/// This struct should not be used often.
/// For most cases, prefer the `encode_into` function.
pub struct EncoderWriter<'a, W: 'a> {
writer: &'a mut W,
}
pub struct SizeChecker {
pub size_limit: u64,
pub written: u64
}
fn wrap_io(err: IoError) -> EncodingError {
EncodingError::IoError(err)
}
impl fmt::Display for EncodingError {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
match *self {
EncodingError::IoError(ref err) => write!(f, "IoError: {}", err),
EncodingError::SizeLimit => write!(f, "SizeLimit")
}
}
}
impl Error for EncodingError {
fn description(&self) -> &str {
match *self {
EncodingError::IoError(ref err) => Error::description(err),
EncodingError::SizeLimit => "the size limit for decoding has been reached"
}
}
fn cause(&self) -> Option<&Error> {
match *self {
EncodingError::IoError(ref err) => err.cause(),
EncodingError::SizeLimit => None
}
}
}
impl <'a, W: Write> EncoderWriter<'a, W> {
pub fn new(w: &'a mut W) -> EncoderWriter<'a, W> {
EncoderWriter {
writer: w,
}
}
}
impl SizeChecker {
pub fn new(limit: u64) -> SizeChecker {
SizeChecker {
size_limit: limit,
written: 0
}
}
fn add_raw(&mut self, size: usize) -> EncodingResult<()> {
self.written += size as u64;
if self.written <= self.size_limit {
Ok(())
} else {
Err(EncodingError::SizeLimit)
}
}
fn add_value<T>(&mut self, t: T) -> EncodingResult<()> {
use std::mem::size_of_val;
self.add_raw(size_of_val(&t))
}
}
impl<'a, W: Write> Encoder for EncoderWriter<'a, W> {
type Error = EncodingError;
fn emit_nil(&mut self) -> EncodingResult<()> {
Ok(())
}
fn emit_usize(&mut self, v: usize) -> EncodingResult<()> {
self.emit_u64(v as u64)
}
fn emit_u64(&mut self, v: u64) -> EncodingResult<()> {
self.writer.write_u64::<BigEndian>(v).map_err(wrap_io)
}
fn emit_u32(&mut self, v: u32) -> EncodingResult<()> {
self.writer.write_u32::<BigEndian>(v).map_err(wrap_io)
}
fn emit_u16(&mut self, v: u16) -> EncodingResult<()> {
self.writer.write_u16::<BigEndian>(v).map_err(wrap_io)
}
fn emit_u8(&mut self, v: u8) -> EncodingResult<()> {
self.writer.write_u8(v).map_err(wrap_io)
}
fn emit_isize(&mut self, v: isize) -> EncodingResult<()> {
self.emit_i64(v as i64)
}
fn emit_i64(&mut self, v: i64) -> EncodingResult<()> {
self.writer.write_i64::<BigEndian>(v).map_err(wrap_io)
}
fn emit_i32(&mut self, v: i32) -> EncodingResult<()> {
self.writer.write_i32::<BigEndian>(v).map_err(wrap_io)
}
fn emit_i16(&mut self, v: i16) -> EncodingResult<()> {
self.writer.write_i16::<BigEndian>(v).map_err(wrap_io)
}
fn emit_i8(&mut self, v: i8) -> EncodingResult<()> {
self.writer.write_i8(v).map_err(wrap_io)
}
fn emit_bool(&mut self, v: bool) -> EncodingResult<()> {
self.writer.write_u8(if v {1} else {0}).map_err(wrap_io)
}
fn emit_f64(&mut self, v: f64) -> EncodingResult<()> {
self.writer.write_f64::<BigEndian>(v).map_err(wrap_io)
}
fn emit_f32(&mut self, v: f32) -> EncodingResult<()> {
self.writer.write_f32::<BigEndian>(v).map_err(wrap_io)
}
fn emit_char(&mut self, v: char) -> EncodingResult<()> {
// TODO: change this back once unicode works
//let mut cbuf = [0; 4];
//let sz = v.encode_utf8(&mut cbuf[..]).unwrap_or(0);
//let ptr = &cbuf[..sz];
//self.writer.write_all(ptr).map_err(EncodingError::IoError)
let mut inter = String::with_capacity(1);
inter.push(v);
self.writer.write_all(inter.as_bytes()).map_err(EncodingError::IoError)
}
fn emit_str(&mut self, v: &str) -> EncodingResult<()> {
try!(self.emit_usize(v.len()));
self.writer.write_all(v.as_bytes()).map_err(EncodingError::IoError)
}
fn emit_enum<F>(&mut self, __: &str, f: F) -> EncodingResult<()>
where F: FnOnce(&mut EncoderWriter<'a, W>) -> EncodingResult<()>
{
f(self)
}
fn emit_enum_variant<F>(&mut self, _: &str, v_id: usize, _: usize, f: F) -> EncodingResult<()>
where F: FnOnce(&mut EncoderWriter<'a, W>) -> EncodingResult<()>
{
let max_u32: u32 = ::std::u32::MAX;
if v_id > (max_u32 as usize) {
panic!("Variant tag doesn't fit in a u32")
}
try!(self.emit_u32(v_id as u32));
f(self)
}
fn emit_enum_variant_arg<F>(&mut self, _: usize, f: F) -> EncodingResult<()>
where F: FnOnce(&mut EncoderWriter<'a, W>) -> EncodingResult<()>
{
f(self)
}
fn emit_enum_struct_variant<F>(&mut self,
v_name: &str,
v_id: usize,
len: usize,
f: F)
-> EncodingResult<()>
where F: FnOnce(&mut EncoderWriter<'a, W>) -> EncodingResult<()>
{
self.emit_enum_variant(v_name, v_id, len, f)
}
fn emit_enum_struct_variant_field<F>(&mut self, _: &str, _: usize, f: F) -> EncodingResult<()>
where F: FnOnce(&mut EncoderWriter<'a, W>) -> EncodingResult<()>
{
f(self)
}
fn emit_struct<F>(&mut self, _: &str, _: usize, f: F) -> EncodingResult<()>
where F: FnOnce(&mut EncoderWriter<'a, W>) -> EncodingResult<()>
{
f(self)
}
fn emit_struct_field<F>(&mut self, _: &str, _: usize, f: F) -> EncodingResult<()>
where F: FnOnce(&mut EncoderWriter<'a, W>) -> EncodingResult<()>
{
f(self)
}
fn emit_tuple<F>(&mut self, _: usize, f: F) -> EncodingResult<()>
where F: FnOnce(&mut EncoderWriter<'a, W>) -> EncodingResult<()>
{
f(self)
}
fn emit_tuple_arg<F>(&mut self, _: usize, f: F) -> EncodingResult<()>
where F: FnOnce(&mut EncoderWriter<'a, W>) -> EncodingResult<()>
{
f(self)
}
fn emit_tuple_struct<F>(&mut self, _: &str, len: usize, f: F) -> EncodingResult<()>
where F: FnOnce(&mut EncoderWriter<'a, W>) -> EncodingResult<()>
{
self.emit_tuple(len, f)
}
fn emit_tuple_struct_arg<F>(&mut self, f_idx: usize, f: F) -> EncodingResult<()>
where F: FnOnce(&mut EncoderWriter<'a, W>) -> EncodingResult<()>
{
self.emit_tuple_arg(f_idx, f)
}
fn emit_option<F>(&mut self, f: F) -> EncodingResult<()>
where F: FnOnce(&mut EncoderWriter<'a, W>) -> EncodingResult<()>
{
f(self)
}
fn emit_option_none(&mut self) -> EncodingResult<()> {
self.writer.write_u8(0).map_err(wrap_io)
}
fn emit_option_some<F>(&mut self, f: F) -> EncodingResult<()>
where F: FnOnce(&mut EncoderWriter<'a, W>) -> EncodingResult<()>
{
try!(self.writer.write_u8(1).map_err(wrap_io));
f(self)
}
fn emit_seq<F>(&mut self, len: usize, f: F) -> EncodingResult<()>
where F: FnOnce(&mut EncoderWriter<'a, W>) -> EncodingResult<()>
{
try!(self.emit_usize(len));
f(self)
}
fn emit_seq_elt<F>(&mut self, _: usize, f: F) -> EncodingResult<()>
where F: FnOnce(&mut EncoderWriter<'a, W>) -> EncodingResult<()>
{
f(self)
}
fn emit_map<F>(&mut self, len: usize, f: F) -> EncodingResult<()>
where F: FnOnce(&mut EncoderWriter<'a, W>) -> EncodingResult<()>
{
try!(self.emit_usize(len));
f(self)
}
fn emit_map_elt_key<F>(&mut self, _: usize, f: F) -> EncodingResult<()>
where F: FnOnce(&mut EncoderWriter<'a, W>) -> EncodingResult<()>
{
f(self)
}
fn emit_map_elt_val<F>(&mut self, _: usize, f: F) -> EncodingResult<()>
where F: FnOnce(&mut EncoderWriter<'a, W>) -> EncodingResult<()>
{
f(self)
}
}
impl Encoder for SizeChecker {
type Error = EncodingError;
fn emit_nil(&mut self) -> EncodingResult<()> {
Ok(())
}
fn emit_usize(&mut self, v: usize) -> EncodingResult<()> {
self.add_value(v as u64)
}
fn emit_u64(&mut self, v: u64) -> EncodingResult<()> {
self.add_value(v)
}
fn emit_u32(&mut self, v: u32) -> EncodingResult<()> {
self.add_value(v)
}
fn emit_u16(&mut self, v: u16) -> EncodingResult<()> {
self.add_value(v)
}
fn emit_u8(&mut self, v: u8) -> EncodingResult<()> {
self.add_value(v)
}
fn emit_isize(&mut self, v: isize) -> EncodingResult<()> {
self.add_value(v as i64)
}
fn emit_i64(&mut self, v: i64) -> EncodingResult<()> {
self.add_value(v)
}
fn emit_i32(&mut self, v: i32) -> EncodingResult<()> {
self.add_value(v)
}
fn emit_i16(&mut self, v: i16) -> EncodingResult<()> {
self.add_value(v)
}
fn emit_i8(&mut self, v: i8) -> EncodingResult<()> {
self.add_value(v)
}
fn emit_bool(&mut self, _: bool) -> EncodingResult<()> {
self.add_value(0 as u8)
}
fn emit_f64(&mut self, v: f64) -> EncodingResult<()> {
self.add_value(v)
}
fn emit_f32(&mut self, v: f32) -> EncodingResult<()> {
self.add_value(v)
}
fn emit_char(&mut self, v: char) -> EncodingResult<()> {
self.add_raw(v.len_utf8())
}
fn emit_str(&mut self, v: &str) -> EncodingResult<()> {
try!(self.add_value(0 as u64));
self.add_raw(v.len())
}
fn emit_enum<F>(&mut self, __: &str, f: F) -> EncodingResult<()>
where F: FnOnce(&mut SizeChecker) -> EncodingResult<()>
{
f(self)
}
fn emit_enum_variant<F>(&mut self, _: &str, v_id: usize, _: usize, f: F) -> EncodingResult<()>
where F: FnOnce(&mut SizeChecker) -> EncodingResult<()>
{
try!(self.add_value(v_id as u32));
f(self)
}
fn emit_enum_variant_arg<F>(&mut self, _: usize, f: F) -> EncodingResult<()>
where F: FnOnce(&mut SizeChecker) -> EncodingResult<()>
{
f(self)
}
fn emit_enum_struct_variant<F>(&mut self,
_: &str,
_: usize,
_: usize,
f: F)
-> EncodingResult<()>
where F: FnOnce(&mut SizeChecker) -> EncodingResult<()>
{
f(self)
}
fn emit_enum_struct_variant_field<F>(&mut self, _: &str, _: usize, f: F) -> EncodingResult<()>
where F: FnOnce(&mut SizeChecker) -> EncodingResult<()>
{
f(self)
}
fn emit_struct<F>(&mut self, _: &str, _: usize, f: F) -> EncodingResult<()>
where F: FnOnce(&mut SizeChecker) -> EncodingResult<()>
{
f(self)
}
fn emit_struct_field<F>(&mut self, _: &str, _: usize, f: F) -> EncodingResult<()>
where F: FnOnce(&mut SizeChecker) -> EncodingResult<()>
{
f(self)
}
fn emit_tuple<F>(&mut self, _: usize, f: F) -> EncodingResult<()>
where F: FnOnce(&mut SizeChecker) -> EncodingResult<()>
{
f(self)
}
fn emit_tuple_arg<F>(&mut self, _: usize, f: F) -> EncodingResult<()>
where F: FnOnce(&mut SizeChecker) -> EncodingResult<()>
{
f(self)
}
fn emit_tuple_struct<F>(&mut self, _: &str, len: usize, f: F) -> EncodingResult<()>
where F: FnOnce(&mut SizeChecker) -> EncodingResult<()>
{
self.emit_tuple(len, f)
}
fn emit_tuple_struct_arg<F>(&mut self, f_idx: usize, f: F) -> EncodingResult<()>
where F: FnOnce(&mut SizeChecker) -> EncodingResult<()>
{
self.emit_tuple_arg(f_idx, f)
}
fn emit_option<F>(&mut self, f: F) -> EncodingResult<()>
where F: FnOnce(&mut SizeChecker) -> EncodingResult<()>
{
f(self)
}
fn emit_option_none(&mut self) -> EncodingResult<()> {
self.add_value(0 as u8)
}
fn emit_option_some<F>(&mut self, f: F) -> EncodingResult<()>
where F: FnOnce(&mut SizeChecker) -> EncodingResult<()>
{
try!(self.add_value(1 as u8));
f(self)
}
fn emit_seq<F>(&mut self, len: usize, f: F) -> EncodingResult<()>
where F: FnOnce(&mut SizeChecker) -> EncodingResult<()>
{
try!(self.emit_usize(len));
f(self)
}
fn emit_seq_elt<F>(&mut self, _: usize, f: F) -> EncodingResult<()>
where F: FnOnce(&mut SizeChecker) -> EncodingResult<()>
{
f(self)
}
fn emit_map<F>(&mut self, len: usize, f: F) -> EncodingResult<()>
where F: FnOnce(&mut SizeChecker) -> EncodingResult<()>
{
try!(self.emit_usize(len));
f(self)
}
fn emit_map_elt_key<F>(&mut self, _: usize, f: F) -> EncodingResult<()>
where F: FnOnce(&mut SizeChecker) -> EncodingResult<()>
{
f(self)
}
fn emit_map_elt_val<F>(&mut self, _: usize, f: F) -> EncodingResult<()>
where F: FnOnce(&mut SizeChecker) -> EncodingResult<()>
{
f(self)
}
}

125
third_party/rust/bincode/src/serde/mod.rs поставляемый
Просмотреть файл

@ -3,19 +3,16 @@
//! implementation.
use std::io::{Write, Read};
use std::io::Error as IoError;
use std::{error, fmt, result};
use ::SizeLimit;
pub use self::reader::{
Deserializer,
DeserializeResult,
DeserializeError,
InvalidEncoding
};
pub use self::writer::{
Serializer,
SerializeResult,
SerializeError,
};
use self::writer::SizeChecker;
@ -25,16 +22,105 @@ use serde_crate as serde;
mod reader;
mod writer;
pub type Result<T> = result::Result<T, Error>;
/// An error that can be produced during (de)serializing.
///
/// If decoding from a Buffer, assume that the buffer has been left
/// in an invalid state.
pub type Error = Box<ErrorKind>;
#[derive(Debug)]
pub enum ErrorKind {
/// If the error stems from the reader/writer that is being used
/// during (de)serialization, that error will be stored and returned here.
IoError(IoError),
/// If the bytes in the reader are not decodable because of an invalid
/// encoding, this error will be returned. This error is only possible
/// if a stream is corrupted. A stream produced from `encode` or `encode_into`
/// should **never** produce an InvalidEncoding error.
InvalidEncoding{
desc: &'static str,
detail: Option<String>
},
/// If (de)serializing a message takes more than the provided size limit, this
/// error is returned.
SizeLimit,
SequenceMustHaveLength,
Custom(String)
}
impl error::Error for ErrorKind {
fn description(&self) -> &str {
match *self {
ErrorKind::IoError(ref err) => error::Error::description(err),
ErrorKind::InvalidEncoding{desc, ..} => desc,
ErrorKind::SequenceMustHaveLength => "bincode can't encode infinite sequences",
ErrorKind::SizeLimit => "the size limit for decoding has been reached",
ErrorKind::Custom(ref msg) => msg,
}
}
fn cause(&self) -> Option<&error::Error> {
match *self {
ErrorKind::IoError(ref err) => err.cause(),
ErrorKind::InvalidEncoding{..} => None,
ErrorKind::SequenceMustHaveLength => None,
ErrorKind::SizeLimit => None,
ErrorKind::Custom(_) => None,
}
}
}
impl From<IoError> for Error {
fn from(err: IoError) -> Error {
ErrorKind::IoError(err).into()
}
}
impl fmt::Display for ErrorKind {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
match *self {
ErrorKind::IoError(ref ioerr) =>
write!(fmt, "IoError: {}", ioerr),
ErrorKind::InvalidEncoding{desc, detail: None}=>
write!(fmt, "InvalidEncoding: {}", desc),
ErrorKind::InvalidEncoding{desc, detail: Some(ref detail)}=>
write!(fmt, "InvalidEncoding: {} ({})", desc, detail),
ErrorKind::SequenceMustHaveLength =>
write!(fmt, "Bincode can only encode sequences and maps that have a knowable size ahead of time."),
ErrorKind::SizeLimit =>
write!(fmt, "SizeLimit"),
ErrorKind::Custom(ref s) =>
s.fmt(fmt),
}
}
}
impl serde::de::Error for Error {
fn custom<T: fmt::Display>(desc: T) -> Error {
ErrorKind::Custom(desc.to_string()).into()
}
}
impl serde::ser::Error for Error {
fn custom<T: fmt::Display>(msg: T) -> Self {
ErrorKind::Custom(msg.to_string()).into()
}
}
/// Serializes an object directly into a `Writer`.
///
/// If the serialization would take more bytes than allowed by `size_limit`, an error
/// is returned and *no bytes* will be written into the `Writer`.
///
/// If this returns an `SerializeError` (other than SizeLimit), assume that the
/// If this returns an `Error` (other than SizeLimit), assume that the
/// writer is in an invalid state, as writing could bail out in the middle of
/// serializing.
pub fn serialize_into<W, T>(writer: &mut W, value: &T, size_limit: SizeLimit) -> SerializeResult<()>
where W: Write, T: serde::Serialize,
pub fn serialize_into<W: ?Sized, T: ?Sized>(writer: &mut W, value: &T, size_limit: SizeLimit) -> Result<()>
where W: Write,
T: serde::Serialize,
{
match size_limit {
SizeLimit::Infinite => { }
@ -52,18 +138,15 @@ pub fn serialize_into<W, T>(writer: &mut W, value: &T, size_limit: SizeLimit) ->
///
/// If the serialization would take more bytes than allowed by `size_limit`,
/// an error is returned.
pub fn serialize<T>(value: &T, size_limit: SizeLimit) -> SerializeResult<Vec<u8>>
where T: serde::Serialize,
pub fn serialize<T: ?Sized>(value: &T, size_limit: SizeLimit) -> Result<Vec<u8>>
where T: serde::Serialize
{
// Since we are putting values directly into a vector, we can do size
// computation out here and pre-allocate a buffer of *exactly*
// the right size.
let mut writer = match size_limit {
SizeLimit::Bounded(size_limit) => {
let actual_size = match serialized_size_bounded(value, size_limit) {
Some(actual_size) => actual_size,
None => { return Err(SerializeError::SizeLimit); }
};
let actual_size = try!(serialized_size_bounded(value, size_limit).ok_or(ErrorKind::SizeLimit));
Vec::with_capacity(actual_size as usize)
}
SizeLimit::Infinite => Vec::new()
@ -77,7 +160,9 @@ pub fn serialize<T>(value: &T, size_limit: SizeLimit) -> SerializeResult<Vec<u8>
///
/// This is used internally as part of the check for encode_into, but it can
/// be useful for preallocating buffers if thats your style.
pub fn serialized_size<T: serde::Serialize>(value: &T) -> u64 {
pub fn serialized_size<T: ?Sized>(value: &T) -> u64
where T: serde::Serialize
{
use std::u64::MAX;
let mut size_checker = SizeChecker::new(MAX);
value.serialize(&mut size_checker).ok();
@ -89,7 +174,9 @@ pub fn serialized_size<T: serde::Serialize>(value: &T) -> u64 {
///
/// If it can be serialized in `max` or fewer bytes, that number will be returned
/// inside `Some`. If it goes over bounds, then None is returned.
pub fn serialized_size_bounded<T: serde::Serialize>(value: &T, max: u64) -> Option<u64> {
pub fn serialized_size_bounded<T: ?Sized>(value: &T, max: u64) -> Option<u64>
where T: serde::Serialize
{
let mut size_checker = SizeChecker::new(max);
value.serialize(&mut size_checker).ok().map(|_| size_checker.written)
}
@ -100,10 +187,10 @@ pub fn serialized_size_bounded<T: serde::Serialize>(value: &T, max: u64) -> Opti
/// A SizeLimit can help prevent an attacker from flooding your server with
/// a neverending stream of values that runs your server out of memory.
///
/// If this returns an `DeserializeError`, assume that the buffer that you passed
/// If this returns an `Error`, assume that the buffer that you passed
/// in is in an invalid state, as the error could be returned during any point
/// in the reading.
pub fn deserialize_from<R, T>(reader: &mut R, size_limit: SizeLimit) -> DeserializeResult<T>
pub fn deserialize_from<R: ?Sized, T>(reader: &mut R, size_limit: SizeLimit) -> Result<T>
where R: Read,
T: serde::Deserialize,
{
@ -115,7 +202,7 @@ pub fn deserialize_from<R, T>(reader: &mut R, size_limit: SizeLimit) -> Deserial
///
/// This method does not have a size-limit because if you already have the bytes
/// in memory, then you don't gain anything by having a limiter.
pub fn deserialize<T>(bytes: &[u8]) -> DeserializeResult<T>
pub fn deserialize<T>(bytes: &[u8]) -> Result<T>
where T: serde::Deserialize,
{
let mut reader = bytes;

404
third_party/rust/bincode/src/serde/reader.rs поставляемый
Просмотреть файл

@ -1,113 +1,11 @@
use std::io::Read;
use std::io::Error as IoError;
use std::error::Error;
use std::fmt;
use std::convert::From;
use byteorder::{BigEndian, ReadBytesExt};
use num_traits;
use serde_crate as serde;
use serde_crate::de::value::ValueDeserializer;
use serde_crate::de::Error as DeError;
use ::SizeLimit;
#[derive(Eq, PartialEq, Clone, Debug)]
pub struct InvalidEncoding {
pub desc: &'static str,
pub detail: Option<String>,
}
impl fmt::Display for InvalidEncoding {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
match *self {
InvalidEncoding { detail: None, desc } =>
write!(fmt, "{}", desc),
InvalidEncoding { detail: Some(ref detail), desc } =>
write!(fmt, "{} ({})", desc, detail)
}
}
}
/// An error that can be produced during decoding.
///
/// If decoding from a Buffer, assume that the buffer has been left
/// in an invalid state.
#[derive(Debug)]
pub enum DeserializeError {
/// If the error stems from the reader that is being used
/// during decoding, that error will be stored and returned here.
IoError(IoError),
/// If the bytes in the reader are not decodable because of an invalid
/// encoding, this error will be returned. This error is only possible
/// if a stream is corrupted. A stream produced from `encode` or `encode_into`
/// should **never** produce an InvalidEncoding error.
InvalidEncoding(InvalidEncoding),
/// If decoding a message takes more than the provided size limit, this
/// error is returned.
SizeLimit,
Serde(serde::de::value::Error)
}
impl Error for DeserializeError {
fn description(&self) -> &str {
match *self {
DeserializeError::IoError(ref err) => Error::description(err),
DeserializeError::InvalidEncoding(ref ib) => ib.desc,
DeserializeError::SizeLimit => "the size limit for decoding has been reached",
DeserializeError::Serde(ref s) => s.description(),
}
}
fn cause(&self) -> Option<&Error> {
match *self {
DeserializeError::IoError(ref err) => err.cause(),
DeserializeError::InvalidEncoding(_) => None,
DeserializeError::SizeLimit => None,
DeserializeError::Serde(ref s) => s.cause(),
}
}
}
impl From<IoError> for DeserializeError {
fn from(err: IoError) -> DeserializeError {
DeserializeError::IoError(err)
}
}
impl From<serde::de::value::Error> for DeserializeError {
fn from(err: serde::de::value::Error) -> DeserializeError {
DeserializeError::Serde(err)
}
}
impl fmt::Display for DeserializeError {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
match *self {
DeserializeError::IoError(ref ioerr) =>
write!(fmt, "IoError: {}", ioerr),
DeserializeError::InvalidEncoding(ref ib) =>
write!(fmt, "InvalidEncoding: {}", ib),
DeserializeError::SizeLimit =>
write!(fmt, "SizeLimit"),
DeserializeError::Serde(ref s) =>
s.fmt(fmt),
}
}
}
impl serde::de::Error for DeserializeError {
fn custom<T: Into<String>>(desc: T) -> DeserializeError {
DeserializeError::Serde(serde::de::value::Error::Custom(desc.into()))
}
fn end_of_stream() -> DeserializeError {
DeserializeError::Serde(serde::de::value::Error::EndOfStream)
}
}
pub type DeserializeResult<T> = Result<T, DeserializeError>;
use super::{Result, Error, ErrorKind};
/// A Deserializer that reads bytes from a buffer.
///
@ -119,14 +17,14 @@ pub type DeserializeResult<T> = Result<T, DeserializeError>;
/// serde::Deserialize::deserialize(&mut deserializer);
/// let bytes_read = d.bytes_read();
/// ```
pub struct Deserializer<'a, R: 'a> {
reader: &'a mut R,
pub struct Deserializer<R> {
reader: R,
size_limit: SizeLimit,
read: u64
}
impl<'a, R: Read> Deserializer<'a, R> {
pub fn new(r: &'a mut R, size_limit: SizeLimit) -> Deserializer<'a, R> {
impl<R: Read> Deserializer<R> {
pub fn new(r: R, size_limit: SizeLimit) -> Deserializer<R> {
Deserializer {
reader: r,
size_limit: size_limit,
@ -139,39 +37,39 @@ impl<'a, R: Read> Deserializer<'a, R> {
self.read
}
fn read_bytes(&mut self, count: u64) -> Result<(), DeserializeError> {
fn read_bytes(&mut self, count: u64) -> Result<()> {
self.read += count;
match self.size_limit {
SizeLimit::Infinite => Ok(()),
SizeLimit::Bounded(x) if self.read <= x => Ok(()),
SizeLimit::Bounded(_) => Err(DeserializeError::SizeLimit)
SizeLimit::Bounded(_) => Err(ErrorKind::SizeLimit.into())
}
}
fn read_type<T>(&mut self) -> Result<(), DeserializeError> {
fn read_type<T>(&mut self) -> Result<()> {
use std::mem::size_of;
self.read_bytes(size_of::<T>() as u64)
}
fn read_string(&mut self) -> DeserializeResult<String> {
let len = try!(serde::Deserialize::deserialize(self));
fn read_string(&mut self) -> Result<String> {
let len = try!(serde::Deserialize::deserialize(&mut *self));
try!(self.read_bytes(len));
let mut buffer = Vec::new();
try!(self.reader.by_ref().take(len as u64).read_to_end(&mut buffer));
String::from_utf8(buffer).map_err(|err|
DeserializeError::InvalidEncoding(InvalidEncoding {
ErrorKind::InvalidEncoding{
desc: "error while decoding utf8 string",
detail: Some(format!("Deserialize error: {}", err))
}))
}.into())
}
}
macro_rules! impl_nums {
($ty:ty, $dser_method:ident, $visitor_method:ident, $reader_method:ident) => {
#[inline]
fn $dser_method<V>(&mut self, mut visitor: V) -> DeserializeResult<V::Value>
fn $dser_method<V>(self, visitor: V) -> Result<V::Value>
where V: serde::de::Visitor,
{
try!(self.read_type::<$ty>());
@ -182,18 +80,18 @@ macro_rules! impl_nums {
}
impl<'a, R: Read> serde::Deserializer for Deserializer<'a, R> {
type Error = DeserializeError;
impl<'a, R: Read> serde::Deserializer for &'a mut Deserializer<R> {
type Error = Error;
#[inline]
fn deserialize<V>(&mut self, _visitor: V) -> DeserializeResult<V::Value>
fn deserialize<V>(self, _visitor: V) -> Result<V::Value>
where V: serde::de::Visitor,
{
let message = "bincode does not support Deserializer::deserialize";
Err(DeserializeError::Serde(serde::de::value::Error::Custom(message.into())))
Err(Error::custom(message))
}
fn deserialize_bool<V>(&mut self, mut visitor: V) -> DeserializeResult<V::Value>
fn deserialize_bool<V>(self, visitor: V) -> Result<V::Value>
where V: serde::de::Visitor,
{
let value: u8 = try!(serde::Deserialize::deserialize(self));
@ -201,10 +99,10 @@ impl<'a, R: Read> serde::Deserializer for Deserializer<'a, R> {
1 => visitor.visit_bool(true),
0 => visitor.visit_bool(false),
value => {
Err(DeserializeError::InvalidEncoding(InvalidEncoding {
Err(ErrorKind::InvalidEncoding{
desc: "invalid u8 when decoding bool",
detail: Some(format!("Expected 0 or 1, got {}", value))
}))
}.into())
}
}
}
@ -220,7 +118,7 @@ impl<'a, R: Read> serde::Deserializer for Deserializer<'a, R> {
#[inline]
fn deserialize_u8<V>(&mut self, mut visitor: V) -> DeserializeResult<V::Value>
fn deserialize_u8<V>(self, visitor: V) -> Result<V::Value>
where V: serde::de::Visitor,
{
try!(self.read_type::<u8>());
@ -228,52 +126,28 @@ impl<'a, R: Read> serde::Deserializer for Deserializer<'a, R> {
}
#[inline]
fn deserialize_usize<V>(&mut self, mut visitor: V) -> DeserializeResult<V::Value>
where V: serde::de::Visitor,
{
try!(self.read_type::<u64>());
let value = try!(self.reader.read_u64::<BigEndian>());
match num_traits::cast(value) {
Some(value) => visitor.visit_usize(value),
None => Err(DeserializeError::Serde(serde::de::value::Error::Custom("expected usize".into())))
}
}
#[inline]
fn deserialize_i8<V>(&mut self, mut visitor: V) -> DeserializeResult<V::Value>
fn deserialize_i8<V>(self, visitor: V) -> Result<V::Value>
where V: serde::de::Visitor,
{
try!(self.read_type::<i8>());
visitor.visit_i8(try!(self.reader.read_i8()))
}
#[inline]
fn deserialize_isize<V>(&mut self, mut visitor: V) -> DeserializeResult<V::Value>
where V: serde::de::Visitor,
{
try!(self.read_type::<i64>());
let value = try!(self.reader.read_i64::<BigEndian>());
match num_traits::cast(value) {
Some(value) => visitor.visit_isize(value),
None => Err(DeserializeError::Serde(serde::de::value::Error::Custom("expected isize".into()))),
}
}
fn deserialize_unit<V>(&mut self, mut visitor: V) -> DeserializeResult<V::Value>
fn deserialize_unit<V>(self, visitor: V) -> Result<V::Value>
where V: serde::de::Visitor,
{
visitor.visit_unit()
}
fn deserialize_char<V>(&mut self, mut visitor: V) -> DeserializeResult<V::Value>
fn deserialize_char<V>(self, visitor: V) -> Result<V::Value>
where V: serde::de::Visitor,
{
use std::str;
let error = DeserializeError::InvalidEncoding(InvalidEncoding {
let error = ErrorKind::InvalidEncoding{
desc: "Invalid char encoding",
detail: None
});
}.into();
let mut buf = [0];
@ -303,245 +177,235 @@ impl<'a, R: Read> serde::Deserializer for Deserializer<'a, R> {
visitor.visit_char(res)
}
fn deserialize_str<V>(&mut self, mut visitor: V) -> DeserializeResult<V::Value>
fn deserialize_str<V>(self, visitor: V) -> Result<V::Value>
where V: serde::de::Visitor,
{
visitor.visit_str(&try!(self.read_string()))
}
fn deserialize_string<V>(&mut self, mut visitor: V) -> DeserializeResult<V::Value>
fn deserialize_string<V>(self, visitor: V) -> Result<V::Value>
where V: serde::de::Visitor,
{
visitor.visit_string(try!(self.read_string()))
}
fn deserialize_bytes<V>(&mut self, visitor: V) -> DeserializeResult<V::Value>
fn deserialize_bytes<V>(self, visitor: V) -> Result<V::Value>
where V: serde::de::Visitor,
{
self.deserialize_seq(visitor)
}
fn deserialize_enum<V>(&mut self,
_enum: &'static str,
_variants: &'static [&'static str],
mut visitor: V) -> Result<V::Value, Self::Error>
where V: serde::de::EnumVisitor,
{
visitor.visit(self)
}
fn deserialize_tuple<V>(&mut self,
_len: usize,
mut visitor: V) -> DeserializeResult<V::Value>
fn deserialize_byte_buf<V>(self, visitor: V) -> Result<V::Value>
where V: serde::de::Visitor,
{
struct TupleVisitor<'a, 'b: 'a, R: Read + 'b>(&'a mut Deserializer<'b, R>);
self.deserialize_seq(visitor)
}
impl<'a, 'b: 'a, R: Read + 'b> serde::de::SeqVisitor for TupleVisitor<'a, 'b, R> {
type Error = DeserializeError;
fn deserialize_enum<V>(self,
_enum: &'static str,
_variants: &'static [&'static str],
visitor: V) -> Result<V::Value>
where V: serde::de::Visitor,
{
impl<'a, R: Read + 'a> serde::de::EnumVisitor for &'a mut Deserializer<R> {
type Error = Error;
type Variant = Self;
fn visit<T>(&mut self) -> Result<Option<T>, Self::Error>
where T: serde::de::Deserialize,
fn visit_variant_seed<V>(self, seed: V) -> Result<(V::Value, Self::Variant)>
where V: serde::de::DeserializeSeed,
{
let value = try!(serde::Deserialize::deserialize(self.0));
Ok(Some(value))
let idx: u32 = try!(serde::de::Deserialize::deserialize(&mut *self));
let val: Result<_> = seed.deserialize(idx.into_deserializer());
Ok((try!(val), self))
}
}
fn end(&mut self) -> Result<(), Self::Error> {
Ok(())
visitor.visit_enum(self)
}
fn deserialize_tuple<V>(self,
_len: usize,
visitor: V) -> Result<V::Value>
where V: serde::de::Visitor,
{
struct TupleVisitor<'a, R: Read + 'a>(&'a mut Deserializer<R>);
impl<'a, 'b: 'a, R: Read + 'b> serde::de::SeqVisitor for TupleVisitor<'a, R> {
type Error = Error;
fn visit_seed<T>(&mut self, seed: T) -> Result<Option<T::Value>>
where T: serde::de::DeserializeSeed,
{
let value = try!(serde::de::DeserializeSeed::deserialize(seed, &mut *self.0));
Ok(Some(value))
}
}
visitor.visit_seq(TupleVisitor(self))
}
fn deserialize_seq_fixed_size<V>(&mut self,
_: usize,
visitor: V) -> DeserializeResult<V::Value>
fn deserialize_seq_fixed_size<V>(self,
len: usize,
visitor: V) -> Result<V::Value>
where V: serde::de::Visitor,
{
self.deserialize_seq(visitor)
}
fn deserialize_option<V>(&mut self, mut visitor: V) -> DeserializeResult<V::Value>
where V: serde::de::Visitor,
{
let value: u8 = try!(serde::de::Deserialize::deserialize(self));
match value {
0 => visitor.visit_none(),
1 => visitor.visit_some(self),
_ => Err(DeserializeError::InvalidEncoding(InvalidEncoding {
desc: "invalid tag when decoding Option",
detail: Some(format!("Expected 0 or 1, got {}", value))
})),
}
}
fn deserialize_seq<V>(&mut self, mut visitor: V) -> DeserializeResult<V::Value>
where V: serde::de::Visitor,
{
struct SeqVisitor<'a, 'b: 'a, R: Read + 'b> {
deserializer: &'a mut Deserializer<'b, R>,
struct SeqVisitor<'a, R: Read + 'a> {
deserializer: &'a mut Deserializer<R>,
len: usize,
}
impl<'a, 'b: 'a, R: Read + 'b> serde::de::SeqVisitor for SeqVisitor<'a, 'b, R> {
type Error = DeserializeError;
impl<'a, 'b: 'a, R: Read + 'b> serde::de::SeqVisitor for SeqVisitor<'a, R> {
type Error = Error;
fn visit<T>(&mut self) -> Result<Option<T>, Self::Error>
where T: serde::de::Deserialize,
fn visit_seed<T>(&mut self, seed: T) -> Result<Option<T::Value>>
where T: serde::de::DeserializeSeed,
{
if self.len > 0 {
self.len -= 1;
let value = try!(serde::Deserialize::deserialize(self.deserializer));
let value = try!(serde::de::DeserializeSeed::deserialize(seed, &mut *self.deserializer));
Ok(Some(value))
} else {
Ok(None)
}
}
fn end(&mut self) -> Result<(), Self::Error> {
if self.len == 0 {
Ok(())
} else {
Err(DeserializeError::Serde(serde::de::value::Error::Custom("expected end".into())))
}
}
}
let len = try!(serde::Deserialize::deserialize(self));
visitor.visit_seq(SeqVisitor { deserializer: self, len: len })
}
fn deserialize_map<V>(&mut self, mut visitor: V) -> DeserializeResult<V::Value>
fn deserialize_option<V>(self, visitor: V) -> Result<V::Value>
where V: serde::de::Visitor,
{
struct MapVisitor<'a, 'b: 'a, R: Read + 'b> {
deserializer: &'a mut Deserializer<'b, R>,
let value: u8 = try!(serde::de::Deserialize::deserialize(&mut *self));
match value {
0 => visitor.visit_none(),
1 => visitor.visit_some(&mut *self),
_ => Err(ErrorKind::InvalidEncoding{
desc: "invalid tag when decoding Option",
detail: Some(format!("Expected 0 or 1, got {}", value))
}.into()),
}
}
fn deserialize_seq<V>(self, visitor: V) -> Result<V::Value>
where V: serde::de::Visitor,
{
let len = try!(serde::Deserialize::deserialize(&mut *self));
self.deserialize_seq_fixed_size(len, visitor)
}
fn deserialize_map<V>(self, visitor: V) -> Result<V::Value>
where V: serde::de::Visitor,
{
struct MapVisitor<'a, R: Read + 'a> {
deserializer: &'a mut Deserializer<R>,
len: usize,
}
impl<'a, 'b: 'a, R: Read + 'b> serde::de::MapVisitor for MapVisitor<'a, 'b, R> {
type Error = DeserializeError;
impl<'a, 'b: 'a, R: Read + 'b> serde::de::MapVisitor for MapVisitor<'a, R> {
type Error = Error;
fn visit_key<K>(&mut self) -> Result<Option<K>, Self::Error>
where K: serde::de::Deserialize,
fn visit_key_seed<K>(&mut self, seed: K) -> Result<Option<K::Value>>
where K: serde::de::DeserializeSeed,
{
if self.len > 0 {
self.len -= 1;
let key = try!(serde::Deserialize::deserialize(self.deserializer));
let key = try!(serde::de::DeserializeSeed::deserialize(seed, &mut *self.deserializer));
Ok(Some(key))
} else {
Ok(None)
}
}
fn visit_value<V>(&mut self) -> Result<V, Self::Error>
where V: serde::de::Deserialize,
fn visit_value_seed<V>(&mut self, seed: V) -> Result<V::Value>
where V: serde::de::DeserializeSeed,
{
let value = try!(serde::Deserialize::deserialize(self.deserializer));
let value = try!(serde::de::DeserializeSeed::deserialize(seed, &mut *self.deserializer));
Ok(value)
}
fn end(&mut self) -> Result<(), Self::Error> {
if self.len == 0 {
Ok(())
} else {
Err(DeserializeError::Serde(serde::de::value::Error::Custom("expected end".into())))
}
}
}
let len = try!(serde::Deserialize::deserialize(self));
let len = try!(serde::Deserialize::deserialize(&mut *self));
visitor.visit_map(MapVisitor { deserializer: self, len: len })
}
fn deserialize_struct<V>(&mut self,
fn deserialize_struct<V>(self,
_name: &str,
fields: &'static [&'static str],
visitor: V) -> DeserializeResult<V::Value>
visitor: V) -> Result<V::Value>
where V: serde::de::Visitor,
{
self.deserialize_tuple(fields.len(), visitor)
}
fn deserialize_struct_field<V>(&mut self,
_visitor: V) -> DeserializeResult<V::Value>
fn deserialize_struct_field<V>(self,
_visitor: V) -> Result<V::Value>
where V: serde::de::Visitor,
{
let message = "bincode does not support Deserializer::deserialize_struct_field";
Err(DeserializeError::Serde(serde::de::value::Error::Custom(message.into())))
Err(Error::custom(message))
}
fn deserialize_newtype_struct<V>(&mut self,
fn deserialize_newtype_struct<V>(self,
_name: &str,
mut visitor: V) -> DeserializeResult<V::Value>
visitor: V) -> Result<V::Value>
where V: serde::de::Visitor,
{
visitor.visit_newtype_struct(self)
}
fn deserialize_unit_struct<V>(&mut self,
fn deserialize_unit_struct<V>(self,
_name: &'static str,
mut visitor: V) -> DeserializeResult<V::Value>
visitor: V) -> Result<V::Value>
where V: serde::de::Visitor,
{
visitor.visit_unit()
}
fn deserialize_tuple_struct<V>(&mut self,
fn deserialize_tuple_struct<V>(self,
_name: &'static str,
len: usize,
visitor: V) -> DeserializeResult<V::Value>
visitor: V) -> Result<V::Value>
where V: serde::de::Visitor,
{
self.deserialize_tuple(len, visitor)
}
fn deserialize_ignored_any<V>(&mut self,
_visitor: V) -> DeserializeResult<V::Value>
fn deserialize_ignored_any<V>(self,
_visitor: V) -> Result<V::Value>
where V: serde::de::Visitor,
{
let message = "bincode does not support Deserializer::deserialize_ignored_any";
Err(DeserializeError::Serde(serde::de::value::Error::Custom(message.into())))
Err(Error::custom(message))
}
}
impl<'a, R: Read> serde::de::VariantVisitor for Deserializer<'a, R> {
type Error = DeserializeError;
impl<'a, R: Read> serde::de::VariantVisitor for &'a mut Deserializer<R> {
type Error = Error;
fn visit_variant<V>(&mut self) -> Result<V, Self::Error>
where V: serde::Deserialize,
{
let index: u32 = try!(serde::Deserialize::deserialize(self));
let mut deserializer = (index as usize).into_deserializer();
let attempt: Result<V, serde::de::value::Error> = serde::Deserialize::deserialize(&mut deserializer);
Ok(try!(attempt))
}
fn visit_unit(&mut self) -> Result<(), Self::Error> {
fn visit_unit(self) -> Result<()> {
Ok(())
}
fn visit_newtype<T>(&mut self) -> Result<T, Self::Error>
where T: serde::de::Deserialize,
fn visit_newtype_seed<T>(self, seed: T) -> Result<T::Value>
where T: serde::de::DeserializeSeed,
{
serde::de::Deserialize::deserialize(self)
serde::de::DeserializeSeed::deserialize(seed, self)
}
fn visit_tuple<V>(&mut self,
fn visit_tuple<V>(self,
len: usize,
visitor: V) -> Result<V::Value, Self::Error>
visitor: V) -> Result<V::Value>
where V: serde::de::Visitor,
{
serde::de::Deserializer::deserialize_tuple(self, len, visitor)
}
fn visit_struct<V>(&mut self,
fn visit_struct<V>(self,
fields: &'static [&'static str],
visitor: V) -> Result<V::Value, Self::Error>
visitor: V) -> Result<V::Value>
where V: serde::de::Visitor,
{
serde::de::Deserializer::deserialize_tuple(self, fields.len(), visitor)

785
third_party/rust/bincode/src/serde/writer.rs поставляемый

Разница между файлами не показана из-за своего большого размера Загрузить разницу

262
third_party/rust/bincode/tests/test.rs поставляемый
Просмотреть файл

@ -1,64 +1,49 @@
#![feature(proc_macro)]
#[macro_use]
extern crate serde_derive;
extern crate bincode;
extern crate rustc_serialize;
extern crate serde;
use std::fmt::Debug;
use std::collections::HashMap;
use std::ops::Deref;
use rustc_serialize::{Encodable, Encoder, Decodable, Decoder};
use bincode::{RefBox, StrBox, SliceBox};
use bincode::refbox::{RefBox, StrBox, SliceBox};
use bincode::SizeLimit::{self, Infinite, Bounded};
use bincode::rustc_serialize::{encode, decode, decode_from, DecodingError};
use bincode::serde::{serialize, deserialize, deserialize_from, DeserializeError, DeserializeResult};
use bincode::{serialize, serialized_size, deserialize, deserialize_from, ErrorKind, Result};
fn proxy_encode<V>(element: &V, size_limit: SizeLimit) -> Vec<u8>
where V: Encodable + Decodable + serde::Serialize + serde::Deserialize + PartialEq + Debug + 'static
where V: serde::Serialize + serde::Deserialize + PartialEq + Debug + 'static
{
let v1 = bincode::rustc_serialize::encode(element, size_limit).unwrap();
let v2 = bincode::serde::serialize(element, size_limit).unwrap();
assert_eq!(v1, v2);
v1
let v2 = serialize(element, size_limit).unwrap();
v2
}
fn proxy_decode<V>(slice: &[u8]) -> V
where V: Encodable + Decodable + serde::Serialize + serde::Deserialize + PartialEq + Debug + 'static
where V: serde::Serialize + serde::Deserialize + PartialEq + Debug + 'static
{
let e1 = bincode::rustc_serialize::decode(slice).unwrap();
let e2 = bincode::serde::deserialize(slice).unwrap();
assert_eq!(e1, e2);
e1
let e2 = deserialize(slice).unwrap();
e2
}
fn proxy_encoded_size<V>(element: &V) -> u64
where V: Encodable + serde::Serialize + PartialEq + Debug + 'static
where V: serde::Serialize + PartialEq + Debug + 'static
{
let ser_size = bincode::rustc_serialize::encoded_size(element);
let serde_size = bincode::serde::serialized_size(element);
assert_eq!(ser_size, serde_size);
ser_size
let serde_size = serialized_size(element);
serde_size
}
fn the_same<V>(element: V)
where V: Encodable+Decodable+serde::Serialize+serde::Deserialize+PartialEq+Debug+'static
where V: serde::Serialize+serde::Deserialize+PartialEq+Debug+'static
{
// Make sure that the bahavior isize correct when wrapping with a RefBox.
fn ref_box_correct<V>(v: &V) -> bool
where V: Encodable + Decodable + PartialEq + Debug + 'static
where V: serde::Serialize + serde::Deserialize + PartialEq + Debug + 'static
{
let rf = RefBox::new(v);
let encoded = bincode::rustc_serialize::encode(&rf, Infinite).unwrap();
let decoded: RefBox<'static, V> = bincode::rustc_serialize::decode(&encoded[..]).unwrap();
let encoded = serialize(&rf, Infinite).unwrap();
let decoded: RefBox<'static, V> = deserialize(&encoded[..]).unwrap();
decoded.take().deref() == v
}
@ -116,7 +101,7 @@ fn test_tuple() {
#[test]
fn test_basic_struct() {
#[derive(RustcEncodable, RustcDecodable, Serialize, Deserialize, PartialEq, Debug)]
#[derive(Serialize, Deserialize, PartialEq, Debug)]
struct Easy {
x: isize,
s: String,
@ -127,13 +112,13 @@ fn test_basic_struct() {
#[test]
fn test_nested_struct() {
#[derive(RustcEncodable, RustcDecodable, Serialize, Deserialize, PartialEq, Debug)]
#[derive(Serialize, Deserialize, PartialEq, Debug)]
struct Easy {
x: isize,
s: String,
y: usize
}
#[derive(RustcEncodable, RustcDecodable, Serialize, Deserialize, PartialEq, Debug)]
#[derive(Serialize, Deserialize, PartialEq, Debug)]
struct Nest {
f: Easy,
b: usize,
@ -149,7 +134,7 @@ fn test_nested_struct() {
#[test]
fn test_struct_newtype() {
#[derive(RustcEncodable, RustcDecodable, Serialize, Deserialize, PartialEq, Debug)]
#[derive(Serialize, Deserialize, PartialEq, Debug)]
struct NewtypeStr(usize);
the_same(NewtypeStr(5));
@ -157,7 +142,7 @@ fn test_struct_newtype() {
#[test]
fn test_struct_tuple() {
#[derive(RustcEncodable, RustcDecodable, Serialize, Deserialize, PartialEq, Debug)]
#[derive(Serialize, Deserialize, PartialEq, Debug)]
struct TubStr(usize, String, f32);
the_same(TubStr(5, "hello".to_string(), 3.2));
@ -172,7 +157,7 @@ fn test_option() {
#[test]
fn test_enum() {
#[derive(RustcEncodable, RustcDecodable, Serialize, Deserialize, PartialEq, Debug)]
#[derive(Serialize, Deserialize, PartialEq, Debug)]
enum TestEnum {
NoArg,
OneArg(usize),
@ -182,7 +167,7 @@ fn test_enum() {
}
the_same(TestEnum::NoArg);
the_same(TestEnum::OneArg(4));
the_same(TestEnum::Args(4, 5));
//the_same(TestEnum::Args(4, 5));
the_same(TestEnum::AnotherNoArg);
the_same(TestEnum::StructLike{x: 4, y: 3.14159});
the_same(vec![TestEnum::NoArg, TestEnum::OneArg(5), TestEnum::AnotherNoArg,
@ -224,44 +209,21 @@ fn test_fixed_size_array() {
the_same([0u8; 19]);
}
#[test]
fn decoding_errors() {
fn isize_invalid_encoding<T>(res: bincode::rustc_serialize::DecodingResult<T>) {
match res {
Ok(_) => panic!("Expecting error"),
Err(DecodingError::IoError(_)) => panic!("Expecting InvalidEncoding"),
Err(DecodingError::SizeLimit) => panic!("Expecting InvalidEncoding"),
Err(DecodingError::InvalidEncoding(_)) => {},
}
}
isize_invalid_encoding(decode::<bool>(&vec![0xA][..]));
isize_invalid_encoding(decode::<String>(&vec![0, 0, 0, 0, 0, 0, 0, 1, 0xFF][..]));
// Out-of-bounds variant
#[derive(RustcEncodable, RustcDecodable, Serialize)]
enum Test {
One,
Two,
};
isize_invalid_encoding(decode::<Test>(&vec![0, 0, 0, 5][..]));
isize_invalid_encoding(decode::<Option<u8>>(&vec![5, 0][..]));
}
#[test]
fn deserializing_errors() {
fn isize_invalid_deserialize<T: Debug>(res: DeserializeResult<T>) {
match res {
Err(DeserializeError::InvalidEncoding(_)) => {},
Err(DeserializeError::Serde(serde::de::value::Error::UnknownVariant(_))) => {},
Err(DeserializeError::Serde(serde::de::value::Error::InvalidValue(_))) => {},
_ => panic!("Expecting InvalidEncoding, got {:?}", res),
fn isize_invalid_deserialize<T: Debug>(res: Result<T>) {
match res.map_err(|e| *e) {
Err(ErrorKind::InvalidEncoding{..}) => {},
Err(ErrorKind::Custom(ref s)) if s.contains("invalid encoding") => {},
Err(ErrorKind::Custom(ref s)) if s.contains("invalid value") => {},
other => panic!("Expecting InvalidEncoding, got {:?}", other),
}
}
isize_invalid_deserialize(deserialize::<bool>(&vec![0xA][..]));
isize_invalid_deserialize(deserialize::<String>(&vec![0, 0, 0, 0, 0, 0, 0, 1, 0xFF][..]));
// Out-of-bounds variant
#[derive(RustcEncodable, RustcDecodable, Serialize, Deserialize, Debug)]
#[derive(Serialize, Deserialize, Debug)]
enum Test {
One,
Two,
@ -270,25 +232,14 @@ fn deserializing_errors() {
isize_invalid_deserialize(deserialize::<Option<u8>>(&vec![5, 0][..]));
}
#[test]
fn too_big_decode() {
let encoded = vec![0,0,0,3];
let decoded: Result<u32, _> = decode_from(&mut &encoded[..], Bounded(3));
assert!(decoded.is_err());
let encoded = vec![0,0,0,3];
let decoded: Result<u32, _> = decode_from(&mut &encoded[..], Bounded(4));
assert!(decoded.is_ok());
}
#[test]
fn too_big_deserialize() {
let serialized = vec![0,0,0,3];
let deserialized: Result<u32, _> = deserialize_from(&mut &serialized[..], Bounded(3));
let deserialized: Result<u32> = deserialize_from(&mut &serialized[..], Bounded(3));
assert!(deserialized.is_err());
let serialized = vec![0,0,0,3];
let deserialized: Result<u32, _> = deserialize_from(&mut &serialized[..], Bounded(4));
let deserialized: Result<u32> = deserialize_from(&mut &serialized[..], Bounded(4));
assert!(deserialized.is_ok());
}
@ -302,31 +253,14 @@ fn char_serialization() {
}
}
#[test]
fn too_big_char_decode() {
let encoded = vec![0x41];
let decoded: Result<char, _> = decode_from(&mut &encoded[..], Bounded(1));
assert!(decoded.is_ok());
assert_eq!(decoded.unwrap(), 'A');
}
#[test]
fn too_big_char_deserialize() {
let serialized = vec![0x41];
let deserialized: Result<char, _> = deserialize_from(&mut &serialized[..], Bounded(1));
let deserialized: Result<char> = deserialize_from(&mut &serialized[..], Bounded(1));
assert!(deserialized.is_ok());
assert_eq!(deserialized.unwrap(), 'A');
}
#[test]
fn too_big_encode() {
assert!(encode(&0u32, Bounded(3)).is_err());
assert!(encode(&0u32, Bounded(4)).is_ok());
assert!(encode(&"abcde", Bounded(8 + 4)).is_err());
assert!(encode(&"abcde", Bounded(8 + 5)).is_ok());
}
#[test]
fn too_big_serialize() {
assert!(serialize(&0u32, Bounded(3)).is_err());
@ -370,42 +304,6 @@ fn encode_box() {
the_same(Box::new(5));
}
#[test]
fn test_refbox_encode() {
let large_object = vec![1u32,2,3,4,5,6];
let mut large_map = HashMap::new();
large_map.insert(1, 2);
#[derive(RustcEncodable, RustcDecodable, Debug)]
enum Message<'a> {
M1(RefBox<'a, Vec<u32>>),
M2(RefBox<'a, HashMap<u32, u32>>)
}
// Test 1
{
let encoded = encode(&Message::M1(RefBox::new(&large_object)), Infinite).unwrap();
let decoded: Message<'static> = decode(&encoded[..]).unwrap();
match decoded {
Message::M1(b) => assert!(b.take().deref() == &large_object),
_ => assert!(false)
}
}
// Test 2
{
let encoded = encode(&Message::M2(RefBox::new(&large_map)), Infinite).unwrap();
let decoded: Message<'static> = decode(&encoded[..]).unwrap();
match decoded {
Message::M2(b) => assert!(b.take().deref() == &large_map),
_ => assert!(false)
}
}
}
#[test]
fn test_refbox_serialize() {
let large_object = vec![1u32,2,3,4,5,6];
@ -413,7 +311,7 @@ fn test_refbox_serialize() {
large_map.insert(1, 2);
#[derive(RustcEncodable, RustcDecodable, Serialize, Deserialize, Debug)]
#[derive(Serialize, Deserialize, Debug)]
enum Message<'a> {
M1(RefBox<'a, Vec<u32>>),
M2(RefBox<'a, HashMap<u32, u32>>)
@ -442,15 +340,6 @@ fn test_refbox_serialize() {
}
}
#[test]
fn test_strbox_encode() {
let strx: &'static str = "hello world";
let encoded = encode(&StrBox::new(strx), Infinite).unwrap();
let decoded: StrBox<'static> = decode(&encoded[..]).unwrap();
let stringx: String = decoded.take();
assert!(strx == &stringx[..]);
}
#[test]
fn test_strbox_serialize() {
let strx: &'static str = "hello world";
@ -460,19 +349,6 @@ fn test_strbox_serialize() {
assert!(strx == &stringx[..]);
}
#[test]
fn test_slicebox_encode() {
let slice = [1u32, 2, 3 ,4, 5];
let encoded = encode(&SliceBox::new(&slice), Infinite).unwrap();
let decoded: SliceBox<'static, u32> = decode(&encoded[..]).unwrap();
{
let sb: &[u32] = &decoded;
assert!(slice == sb);
}
let vecx: Vec<u32> = decoded.take();
assert!(slice == &vecx[..]);
}
#[test]
fn test_slicebox_serialize() {
let slice = [1u32, 2, 3 ,4, 5];
@ -486,20 +362,15 @@ fn test_slicebox_serialize() {
assert!(slice == &vecx[..]);
}
#[test]
fn test_multi_strings_encode() {
assert!(encode(&("foo", "bar", "baz"), Infinite).is_ok());
}
#[test]
fn test_multi_strings_serialize() {
assert!(serialize(&("foo", "bar", "baz"), Infinite).is_ok());
}
/*
#[test]
fn test_oom_protection() {
use std::io::Cursor;
#[derive(RustcEncodable, RustcDecodable)]
struct FakeVec {
len: u64,
byte: u8
@ -507,73 +378,24 @@ fn test_oom_protection() {
let x = bincode::rustc_serialize::encode(&FakeVec { len: 0xffffffffffffffffu64, byte: 1 }, bincode::SizeLimit::Bounded(10)).unwrap();
let y : Result<Vec<u8>, _> = bincode::rustc_serialize::decode_from(&mut Cursor::new(&x[..]), bincode::SizeLimit::Bounded(10));
assert!(y.is_err());
}
}*/
#[test]
fn path_buf() {
use std::path::{Path, PathBuf};
let path = Path::new("foo").to_path_buf();
let serde_encoded = bincode::serde::serialize(&path, Infinite).unwrap();
let decoded: PathBuf = bincode::serde::deserialize(&serde_encoded).unwrap();
let serde_encoded = serialize(&path, Infinite).unwrap();
let decoded: PathBuf = deserialize(&serde_encoded).unwrap();
assert!(path.to_str() == decoded.to_str());
}
#[test]
fn bytes() {
let data = b"abc\0123";
let b = bincode::rustc_serialize::encode(&data, Infinite).unwrap();
let s = bincode::serde::serialize(&data, Infinite).unwrap();
assert_eq!(b, s);
use serde::bytes::Bytes;
let s2 = bincode::serde::serialize(&Bytes::new(data), Infinite).unwrap();
assert_eq!(s, s2);
let data = b"abc\0123";
let s = serialize(&data, Infinite).unwrap();
let s2 = serialize(&Bytes::new(data), Infinite).unwrap();
assert_eq!(s[..], s2[8..]);
}
#[test]
fn test_manual_enum_encoding() {
#[derive(PartialEq)]
enum Enumeration {
Variant1,
Variant2 { val: u64 }
}
impl Encodable for Enumeration {
fn encode<S: Encoder>(&self, s: &mut S) -> Result<(), S::Error> {
s.emit_enum("Enumeration", |s| {
match *self {
Enumeration::Variant1 => {
s.emit_enum_variant("Variant1", 0, 0, |_| Ok(()))
},
Enumeration::Variant2 { val } => {
s.emit_enum_struct_variant("Variant2", 1, 1, |s| {
s.emit_enum_struct_variant_field("val", 0, |s| s.emit_u64(val))
})
}
}
})
}
}
impl Decodable for Enumeration {
fn decode<D: Decoder>(s: &mut D) -> Result<Self, D::Error> {
s.read_enum("Enumeration", |s| {
s.read_enum_struct_variant(&["Variant1", "Variant2"], |s, num| {
match num {
0 => Ok(Enumeration::Variant1),
1 => Ok(Enumeration::Variant2 { val: try!(s.read_u64()) }),
_ => Err(s.error("Unknown enum variant"))
}
})
})
}
}
let encoded = bincode::rustc_serialize::encode(&Enumeration::Variant1, Infinite).unwrap();
let decoded: Enumeration = decode(&encoded[..]).unwrap();
assert!(decoded == Enumeration::Variant1);
let encoded = bincode::rustc_serialize::encode(&Enumeration::Variant2 { val: 42 }, Infinite).unwrap();
let decoded: Enumeration = decode(&encoded[..]).unwrap();
assert!(decoded == Enumeration::Variant2 { val: 42 });
}

Просмотреть файл

@ -1 +1 @@
{"files":{".cargo-ok":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",".gitignore":"d0114f648b7f61e473b61c6d682fefaa4e3fadf2101aff056e2ffc52e9229d87",".travis.yml":"b71b9a6f84b9263b2b89be6ec90dff5920ee68cf9e5768d73ed71957de2d0670","COPYRIGHT":"ec82b96487e9e778ee610c7ab245162464782cfa1f555c2299333f8dbe5c036a","Cargo.toml":"101ff1c674aad746f5a9cc0aec36b7bb7da61df7b621ade9f3c4052ab0901ec6","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"62065228e42caebca7e7d7db1204cbb867033de5982ca4009928915e4095f3a3","README.md":"4a45abeb1e684e30bb361dfa7db59189423348e18d310cbae694b7c8c57cd86a","src/base.rs":"3f3d5d69bd79b146cc3c0402de6260f7531c04e6a44b080f4ec7c8cedebd1337","src/color_space.rs":"7d447e774e85cc33de574637a93c9a8550b681c8d4b94e99f95261ea9740e288","src/context.rs":"7c764ffde2e0ebaecd30ced31ece29f82ddea2f3c8145f4ea59882df38fec0d2","src/data_provider.rs":"899a5762ea472b828e1726e1cefc8d2dbd237772ce171cf6b31a79f144ce8df1","src/display.rs":"906cbcb13f8214308a6afcfb3abdd04e409f48ce62673574d40087486f38b36d","src/event.rs":"7f25a98207f200f10717c2765179ece8ba02600767b7c194c49854e7bfaa470c","src/event_source.rs":"6d1c1378dab8988c46dd3bf20639913716418980b9b490a37a0d5120c60ad580","src/font.rs":"635ee3d1039c807e00fe93b974c9e375c532f09c99322dd93b9496783a662c0a","src/geometry.rs":"9f59dcf55f393a3fa001afe8aea68a85a3c9a06239aeafe6da5d2823ed37b271","src/lib.rs":"efed3638b05e6a806a6fa0c544893afeec931f6c6889bd4a69d8fd2f9838967f","src/private.rs":"87c96ed2002bd567bf02535b4c6e8e3f22827afb2dd92ee17d91cfb45bc6072c"},"package":"7b205856aba54bfd36e69a1058f45fbe0d3c37be7375309dcff4a22a2a631fea"}
{"files":{".cargo-ok":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",".gitignore":"d0114f648b7f61e473b61c6d682fefaa4e3fadf2101aff056e2ffc52e9229d87",".travis.yml":"b71b9a6f84b9263b2b89be6ec90dff5920ee68cf9e5768d73ed71957de2d0670","COPYRIGHT":"ec82b96487e9e778ee610c7ab245162464782cfa1f555c2299333f8dbe5c036a","Cargo.toml":"21861781fe43e924d0ae78c0f74dbd8bae7e73818a3ef9692f107ca52cdb04cf","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"62065228e42caebca7e7d7db1204cbb867033de5982ca4009928915e4095f3a3","README.md":"4a45abeb1e684e30bb361dfa7db59189423348e18d310cbae694b7c8c57cd86a","src/base.rs":"3f3d5d69bd79b146cc3c0402de6260f7531c04e6a44b080f4ec7c8cedebd1337","src/color_space.rs":"7d447e774e85cc33de574637a93c9a8550b681c8d4b94e99f95261ea9740e288","src/context.rs":"7c764ffde2e0ebaecd30ced31ece29f82ddea2f3c8145f4ea59882df38fec0d2","src/data_provider.rs":"899a5762ea472b828e1726e1cefc8d2dbd237772ce171cf6b31a79f144ce8df1","src/display.rs":"906cbcb13f8214308a6afcfb3abdd04e409f48ce62673574d40087486f38b36d","src/event.rs":"7f25a98207f200f10717c2765179ece8ba02600767b7c194c49854e7bfaa470c","src/event_source.rs":"6d1c1378dab8988c46dd3bf20639913716418980b9b490a37a0d5120c60ad580","src/font.rs":"f14340aee0979f6362da671cccf81c49f6e345cd645f07fc75e7074d06e99c70","src/geometry.rs":"9f59dcf55f393a3fa001afe8aea68a85a3c9a06239aeafe6da5d2823ed37b271","src/lib.rs":"efed3638b05e6a806a6fa0c544893afeec931f6c6889bd4a69d8fd2f9838967f","src/private.rs":"87c96ed2002bd567bf02535b4c6e8e3f22827afb2dd92ee17d91cfb45bc6072c"},"package":"ead017dcf77f503dc991f6b52de6084eeea60a94b0a652baa9bf88654a28e83f"}

4
third_party/rust/core-graphics/Cargo.toml поставляемый
Просмотреть файл

@ -3,7 +3,7 @@ name = "core-graphics"
description = "Bindings to Core Graphics for OS X"
homepage = "https://github.com/servo/core-graphics-rs"
repository = "https://github.com/servo/core-graphics-rs"
version = "0.6.0"
version = "0.7.0"
authors = ["The Servo Project Developers"]
license = "MIT / Apache-2.0"
@ -14,4 +14,4 @@ elcapitan = []
[dependencies]
libc = "0.2"
core-foundation = "0.3"
serde = "0.8"
serde = "0.9"

6
third_party/rust/core-graphics/src/font.rs поставляемый
Просмотреть файл

@ -32,17 +32,17 @@ unsafe impl Send for CGFont {}
unsafe impl Sync for CGFont {}
impl Serialize for CGFont {
fn serialize<S>(&self, serializer: &mut S) -> Result<(), S::Error> where S: Serializer {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: Serializer {
let postscript_name = self.postscript_name().to_string();
postscript_name.serialize(serializer)
}
}
impl Deserialize for CGFont {
fn deserialize<D>(deserializer: &mut D) -> Result<Self, D::Error> where D: Deserializer {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: Deserializer {
let postscript_name: String = try!(Deserialize::deserialize(deserializer));
CGFont::from_name(&CFString::new(&*postscript_name)).map_err(|_| {
de::Error::invalid_value("Couldn't find a font with that PostScript name!")
de::Error::custom("Couldn't find a font with that PostScript name!")
})
}
}

Просмотреть файл

@ -1 +1 @@
{"files":{".cargo-ok":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",".gitignore":"d0114f648b7f61e473b61c6d682fefaa4e3fadf2101aff056e2ffc52e9229d87",".travis.yml":"6aad961651169d31d79c0595624d1777b5c4cbb4cf2bed9a126c7e72d29411fd","COPYRIGHT":"ec82b96487e9e778ee610c7ab245162464782cfa1f555c2299333f8dbe5c036a","Cargo.toml":"fd225e94253c22c5a1ad569e5e2db7e0219ed9b0bc30d8a2d4f3a5e55fa2d533","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"62065228e42caebca7e7d7db1204cbb867033de5982ca4009928915e4095f3a3","README.md":"0c82015d302c9937e6376debd961350afeaeb6dde228aac95e3a3115c5813613","src/font.rs":"d9df5c37cb98436dbf8162af9c3449fea1eab41511d326840759d46d514bcada","src/font_collection.rs":"d4ca7f741fd54b4b22b823833dfa1f1ccd78a26cf112119ae992572835e48df6","src/font_descriptor.rs":"cedc4bd303abd4519c7c95201672ce5652f7396cd34383c059f945eefb64623b","src/font_manager.rs":"de5e22620528322d6811d01f03975c53b676ec743297590de5e17a45393df0f1","src/lib.rs":"b1fc720a9ab7ae4f054f0767e05ba5640b2d9fc8c34d05ae04f25b9dd44f6b81"},"package":"9703f459a41e622b15ca612dbc5fa4b30b6545a32864a83e0fdc538cfa08969c"}
{"files":{".cargo-ok":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",".gitignore":"d0114f648b7f61e473b61c6d682fefaa4e3fadf2101aff056e2ffc52e9229d87",".travis.yml":"6aad961651169d31d79c0595624d1777b5c4cbb4cf2bed9a126c7e72d29411fd","COPYRIGHT":"ec82b96487e9e778ee610c7ab245162464782cfa1f555c2299333f8dbe5c036a","Cargo.toml":"958d9b6c617dff0b709bd26ddcd5ef2989ad3a64e14494c2f94d12b6986f6dae","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"62065228e42caebca7e7d7db1204cbb867033de5982ca4009928915e4095f3a3","README.md":"0c82015d302c9937e6376debd961350afeaeb6dde228aac95e3a3115c5813613","src/font.rs":"d9df5c37cb98436dbf8162af9c3449fea1eab41511d326840759d46d514bcada","src/font_collection.rs":"d4ca7f741fd54b4b22b823833dfa1f1ccd78a26cf112119ae992572835e48df6","src/font_descriptor.rs":"cedc4bd303abd4519c7c95201672ce5652f7396cd34383c059f945eefb64623b","src/font_manager.rs":"de5e22620528322d6811d01f03975c53b676ec743297590de5e17a45393df0f1","src/lib.rs":"b1fc720a9ab7ae4f054f0767e05ba5640b2d9fc8c34d05ae04f25b9dd44f6b81"},"package":"0e9719616a10f717628e074744f8c55df7b450f7a34d29c196d14f4498aad05d"}

4
third_party/rust/core-text/Cargo.toml поставляемый
Просмотреть файл

@ -1,6 +1,6 @@
[package]
name = "core-text"
version = "3.0.0"
version = "4.0.0"
authors = ["The Servo Project Developers"]
description = "Bindings to the Core Text framework."
license = "MIT/Apache-2.0"
@ -11,4 +11,4 @@ libc = "0.2"
[target.x86_64-apple-darwin.dependencies]
core-foundation = "0.3"
core-graphics = "0.6"
core-graphics = "0.7"

0
third_party/rust/dwrote/.cargo-ok поставляемый
Просмотреть файл

Просмотреть файл

@ -1 +0,0 @@
{"files":{".cargo-ok":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",".gitignore":"118514fd9c4958df0d25584cda4917186c46011569f55ef350530c1ad3fbdb48",".travis.yml":"13d3e5a7bf83b04c8e8cfa14f0297bd8366d68391d977dd547f64707dffc275a","COPYRIGHT":"ec82b96487e9e778ee610c7ab245162464782cfa1f555c2299333f8dbe5c036a","Cargo.toml":"34d79e1ddea3e2169c03b1f8ad4c3d863d2029b59423030112b2853fced70498","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"62065228e42caebca7e7d7db1204cbb867033de5982ca4009928915e4095f3a3","README.md":"7a5648f52b09d3213348177860171d4f19b0fdda55e8fed7c04dafcb0ed9c215","src/approxeq.rs":"2987e046c90d948b6c7d7ddba52d10c8b7520d71dc0a50dbe7665de128d7410e","src/length.rs":"6e30abd125304985a7ae95dfb7dd92631053152a589b89df6e5c1879e91ecafa","src/lib.rs":"3ad04d3c1a18f697c8d28a1fbe871a4c75e0e957008745c6a4990e8fb07663d1","src/macros.rs":"1e999b322c2093c9c945386d15c3e95bd657c06b02e78235556db0bdbd162318","src/matrix2d.rs":"78d3b44e46be2b9c0ed1d98473cbbed78941cbf8cc76266be9f420966f1c1607","src/matrix4d.rs":"c3325f30a7a35575104a6b02fd0740d5be22e032881808550f28d22ea8eef625","src/num.rs":"62286aa642ce3afa7ebd950f50bf2197d8722907f2e23a2e2ea6690484d8b250","src/point.rs":"a585ad405a69505792efb624f0c0e6345b92b27a2c77e9a4366d6192ac914ef0","src/rect.rs":"d9bc96b8a3bc52ab2d49b53c4687e13230ab5d2920ea60e4070dea153489a633","src/scale_factor.rs":"3cffe0e88f035b8b5c9b27b105fb2825db5f317d7e067c88ee5d51cac4e6e583","src/side_offsets.rs":"f85526a421ffda63ff01a3478d4162c8717eef68e942acfa2fd9a1adee02ebb2","src/size.rs":"ef95a114f389a357ef940f42789e2cdbdbbdf4ae6993a80a74cc2c9d10c891c9","src/trig.rs":"6b207980052d13c625272f2a70a22f7741b59513c2a4882385926f497c763a63"},"package":"f93a556290e09f379cbfaa4f75ac52a72a3d2deb7d04076f312cdb2e6acba28e"}

0
third_party/rust/euclid-0.10.5/.cargo-ok поставляемый
Просмотреть файл

2
third_party/rust/euclid-0.10.5/.gitignore поставляемый
Просмотреть файл

@ -1,2 +0,0 @@
Cargo.lock
/target/

19
third_party/rust/euclid-0.10.5/.travis.yml поставляемый
Просмотреть файл

@ -1,19 +0,0 @@
language: rust
notifications:
webhooks: http://build.servo.org:54856/travis
matrix:
include:
- rust: stable
env: FEATURES=""
- rust: beta
env: FEATURES=""
- rust: nightly
env: FEATURES=""
- rust: nightly
env: FEATURES="unstable"
script:
- cargo build --verbose --features "$FEATURES"
- cargo test --verbose --features "$FEATURES"

5
third_party/rust/euclid-0.10.5/COPYRIGHT поставляемый
Просмотреть файл

@ -1,5 +0,0 @@
Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
<LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
option. All files in the project carrying such notice may not be
copied, modified, or distributed except according to those terms.

22
third_party/rust/euclid-0.10.5/Cargo.toml поставляемый
Просмотреть файл

@ -1,22 +0,0 @@
[package]
name = "euclid"
version = "0.10.5"
authors = ["The Servo Project Developers"]
description = "Geometry primitives"
documentation = "http://doc.servo.org/euclid/"
repository = "https://github.com/servo/euclid"
license = "MIT / Apache-2.0"
[features]
unstable = []
[dependencies]
heapsize = "0.3"
rustc-serialize = "0.3.2"
num-traits = {version = "0.1.32", default-features = false}
log = "0.3.1"
serde = "0.8"
[dev-dependencies]
rand = "0.3.7"
serde_test = "0.8"

201
third_party/rust/euclid-0.10.5/LICENSE-APACHE поставляемый
Просмотреть файл

@ -1,201 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

25
third_party/rust/euclid-0.10.5/LICENSE-MIT поставляемый
Просмотреть файл

@ -1,25 +0,0 @@
Copyright (c) 2012-2013 Mozilla Foundation
Permission is hereby granted, free of charge, to any
person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the
Software without restriction, including without
limitation the rights to use, copy, modify, merge,
publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software
is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice
shall be included in all copies or substantial portions
of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.

5
third_party/rust/euclid-0.10.5/README.md поставляемый
Просмотреть файл

@ -1,5 +0,0 @@
# euclid
This is a small library for geometric types.
[Documentation](http://doc.servo.org/euclid/)

Просмотреть файл

@ -1,47 +0,0 @@
// Copyright 2013 The Servo Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/// Trait for testing approximate equality
pub trait ApproxEq<Eps> {
fn approx_epsilon() -> Eps;
fn approx_eq(&self, other: &Self) -> bool;
fn approx_eq_eps(&self, other: &Self, approx_epsilon: &Eps) -> bool;
}
impl ApproxEq<f32> for f32 {
#[inline]
fn approx_epsilon() -> f32 { 1.0e-6 }
#[inline]
fn approx_eq(&self, other: &f32) -> bool {
self.approx_eq_eps(other, &1.0e-6)
}
#[inline]
fn approx_eq_eps(&self, other: &f32, approx_epsilon: &f32) -> bool {
(*self - *other).abs() < *approx_epsilon
}
}
impl ApproxEq<f64> for f64 {
#[inline]
fn approx_epsilon() -> f64 { 1.0e-6 }
#[inline]
fn approx_eq(&self, other: &f64) -> bool {
self.approx_eq_eps(other, &1.0e-6)
}
#[inline]
fn approx_eq_eps(&self, other: &f64, approx_epsilon: &f64) -> bool {
(*self - *other).abs() < *approx_epsilon
}
}

448
third_party/rust/euclid-0.10.5/src/length.rs поставляемый
Просмотреть файл

@ -1,448 +0,0 @@
// Copyright 2014 The Servo Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! A one-dimensional length, tagged with its units.
use scale_factor::ScaleFactor;
use num::Zero;
use heapsize::HeapSizeOf;
use num_traits::{NumCast, Saturating};
use serde::{Deserialize, Deserializer, Serialize, Serializer};
use std::cmp::Ordering;
use std::ops::{Add, Sub, Mul, Div, Neg};
use std::ops::{AddAssign, SubAssign};
use std::marker::PhantomData;
use std::fmt;
/// A one-dimensional distance, with value represented by `T` and unit of measurement `Unit`.
///
/// `T` can be any numeric type, for example a primitive type like u64 or f32.
///
/// `Unit` is not used in the representation of a Length value. It is used only at compile time
/// to ensure that a Length stored with one unit is converted explicitly before being used in an
/// expression that requires a different unit. It may be a type without values, such as an empty
/// enum.
///
/// You can multiply a Length by a `scale_factor::ScaleFactor` to convert it from one unit to
/// another. See the `ScaleFactor` docs for an example.
// Uncomment the derive, and remove the macro call, once heapsize gets
// PhantomData<T> support.
#[derive(RustcDecodable, RustcEncodable)]
pub struct Length<T, Unit>(pub T, PhantomData<Unit>);
impl<T: Clone, Unit> Clone for Length<T, Unit> {
fn clone(&self) -> Self {
Length(self.0.clone(), PhantomData)
}
}
impl<T: Copy, Unit> Copy for Length<T, Unit> {}
impl<Unit, T: HeapSizeOf> HeapSizeOf for Length<T, Unit> {
fn heap_size_of_children(&self) -> usize {
self.0.heap_size_of_children()
}
}
impl<Unit, T> Deserialize for Length<T, Unit> where T: Deserialize {
fn deserialize<D>(deserializer: &mut D) -> Result<Length<T, Unit>,D::Error>
where D: Deserializer {
Ok(Length(try!(Deserialize::deserialize(deserializer)), PhantomData))
}
}
impl<T, Unit> Serialize for Length<T, Unit> where T: Serialize {
fn serialize<S>(&self, serializer: &mut S) -> Result<(),S::Error> where S: Serializer {
self.0.serialize(serializer)
}
}
impl<T, Unit> Length<T, Unit> {
pub fn new(x: T) -> Length<T, Unit> {
Length(x, PhantomData)
}
}
impl<Unit, T: Clone> Length<T, Unit> {
pub fn get(&self) -> T {
self.0.clone()
}
}
impl<T: fmt::Debug + Clone, U> fmt::Debug for Length<T, U> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.get().fmt(f)
}
}
impl<T: fmt::Display + Clone, U> fmt::Display for Length<T, U> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.get().fmt(f)
}
}
// length + length
impl<U, T: Clone + Add<T, Output=T>> Add for Length<T, U> {
type Output = Length<T, U>;
fn add(self, other: Length<T, U>) -> Length<T, U> {
Length::new(self.get() + other.get())
}
}
// length += length
impl<U, T: Clone + AddAssign<T>> AddAssign for Length<T, U> {
fn add_assign(&mut self, other: Length<T, U>) {
self.0 += other.get();
}
}
// length - length
impl<U, T: Clone + Sub<T, Output=T>> Sub<Length<T, U>> for Length<T, U> {
type Output = Length<T, U>;
fn sub(self, other: Length<T, U>) -> <Self as Sub>::Output {
Length::new(self.get() - other.get())
}
}
// length -= length
impl<U, T: Clone + SubAssign<T>> SubAssign for Length<T, U> {
fn sub_assign(&mut self, other: Length<T, U>) {
self.0 -= other.get();
}
}
// Saturating length + length and length - length.
impl<U, T: Clone + Saturating> Saturating for Length<T, U> {
fn saturating_add(self, other: Length<T, U>) -> Length<T, U> {
Length::new(self.get().saturating_add(other.get()))
}
fn saturating_sub(self, other: Length<T, U>) -> Length<T, U> {
Length::new(self.get().saturating_sub(other.get()))
}
}
// length / length
impl<Src, Dst, T: Clone + Div<T, Output=T>> Div<Length<T, Src>> for Length<T, Dst> {
type Output = ScaleFactor<T, Src, Dst>;
#[inline]
fn div(self, other: Length<T, Src>) -> ScaleFactor<T, Src, Dst> {
ScaleFactor::new(self.get() / other.get())
}
}
// length * scaleFactor
impl<Src, Dst, T: Clone + Mul<T, Output=T>> Mul<ScaleFactor<T, Src, Dst>> for Length<T, Src> {
type Output = Length<T, Dst>;
#[inline]
fn mul(self, scale: ScaleFactor<T, Src, Dst>) -> Length<T, Dst> {
Length::new(self.get() * scale.get())
}
}
// length / scaleFactor
impl<Src, Dst, T: Clone + Div<T, Output=T>> Div<ScaleFactor<T, Src, Dst>> for Length<T, Dst> {
type Output = Length<T, Src>;
#[inline]
fn div(self, scale: ScaleFactor<T, Src, Dst>) -> Length<T, Src> {
Length::new(self.get() / scale.get())
}
}
// -length
impl <U, T:Clone + Neg<Output=T>> Neg for Length<T, U> {
type Output = Length<T, U>;
#[inline]
fn neg(self) -> Length<T, U> {
Length::new(-self.get())
}
}
impl<Unit, T0: NumCast + Clone> Length<T0, Unit> {
/// Cast from one numeric representation to another, preserving the units.
pub fn cast<T1: NumCast + Clone>(&self) -> Option<Length<T1, Unit>> {
NumCast::from(self.get()).map(Length::new)
}
}
impl<Unit, T: Clone + PartialEq> PartialEq for Length<T, Unit> {
fn eq(&self, other: &Length<T, Unit>) -> bool { self.get().eq(&other.get()) }
}
impl<Unit, T: Clone + PartialOrd> PartialOrd for Length<T, Unit> {
fn partial_cmp(&self, other: &Length<T, Unit>) -> Option<Ordering> {
self.get().partial_cmp(&other.get())
}
}
impl<Unit, T: Clone + Eq> Eq for Length<T, Unit> {}
impl<Unit, T: Clone + Ord> Ord for Length<T, Unit> {
fn cmp(&self, other: &Length<T, Unit>) -> Ordering { self.get().cmp(&other.get()) }
}
impl<Unit, T: Zero> Zero for Length<T, Unit> {
fn zero() -> Length<T, Unit> {
Length::new(Zero::zero())
}
}
#[cfg(test)]
mod tests {
use super::Length;
use num::Zero;
use heapsize::HeapSizeOf;
use num_traits::Saturating;
use scale_factor::ScaleFactor;
use std::f32::INFINITY;
extern crate serde_test;
use self::serde_test::Token;
use self::serde_test::assert_tokens;
enum Inch {}
enum Mm {}
enum Cm {}
enum Second {}
#[test]
fn test_clone() {
// A cloned Length is a separate length with the state matching the
// original Length at the point it was cloned.
let mut variable_length: Length<f32, Inch> = Length::new(12.0);
let one_foot = variable_length.clone();
variable_length.0 = 24.0;
assert_eq!(one_foot.get(), 12.0);
assert_eq!(variable_length.get(), 24.0);
}
#[test]
fn test_heapsizeof_builtins() {
// Heap size of built-ins is zero by default.
let one_foot: Length<f32, Inch> = Length::new(12.0);
let heap_size_length_f32 = one_foot.heap_size_of_children();
assert_eq!(heap_size_length_f32, 0);
}
#[test]
fn test_heapsizeof_length_vector() {
// Heap size of any Length is just the heap size of the length value.
for n in 0..5 {
let length: Length<Vec<f32>, Inch> = Length::new(Vec::with_capacity(n));
assert_eq!(length.heap_size_of_children(), length.0.heap_size_of_children());
}
}
#[test]
fn test_length_serde() {
let one_cm: Length<f32, Mm> = Length::new(10.0);
assert_tokens(&one_cm, &[Token::F32(10.0)]);
}
#[test]
fn test_get_clones_length_value() {
// Calling get returns a clone of the Length's value.
// To test this, we need something clone-able - hence a vector.
let mut length: Length<Vec<i32>, Inch> = Length::new(vec![1, 2, 3]);
let value = length.get();
length.0.push(4);
assert_eq!(value, vec![1, 2, 3]);
assert_eq!(length.get(), vec![1, 2, 3, 4]);
}
#[test]
fn test_fmt_debug() {
// Debug and display format the value only.
let one_cm: Length<f32, Mm> = Length::new(10.0);
let result = format!("{:?}", one_cm);
assert_eq!(result, "10");
}
#[test]
fn test_fmt_display() {
// Debug and display format the value only.
let one_cm: Length<f32, Mm> = Length::new(10.0);
let result = format!("{}", one_cm);
assert_eq!(result, "10");
}
#[test]
fn test_add() {
let length1: Length<u8, Mm> = Length::new(250);
let length2: Length<u8, Mm> = Length::new(5);
let result = length1 + length2;
assert_eq!(result.get(), 255);
}
#[test]
fn test_addassign() {
let one_cm: Length<f32, Mm> = Length::new(10.0);
let mut measurement: Length<f32, Mm> = Length::new(5.0);
measurement += one_cm;
assert_eq!(measurement.get(), 15.0);
}
#[test]
fn test_sub() {
let length1: Length<u8, Mm> = Length::new(250);
let length2: Length<u8, Mm> = Length::new(5);
let result = length1 - length2;
assert_eq!(result.get(), 245);
}
#[test]
fn test_subassign() {
let one_cm: Length<f32, Mm> = Length::new(10.0);
let mut measurement: Length<f32, Mm> = Length::new(5.0);
measurement -= one_cm;
assert_eq!(measurement.get(), -5.0);
}
#[test]
fn test_saturating_add() {
let length1: Length<u8, Mm> = Length::new(250);
let length2: Length<u8, Mm> = Length::new(6);
let result = length1.saturating_add(length2);
assert_eq!(result.get(), 255);
}
#[test]
fn test_saturating_sub() {
let length1: Length<u8, Mm> = Length::new(5);
let length2: Length<u8, Mm> = Length::new(10);
let result = length1.saturating_sub(length2);
assert_eq!(result.get(), 0);
}
#[test]
fn test_division_by_length() {
// Division results in a ScaleFactor from denominator units
// to numerator units.
let length: Length<f32, Cm> = Length::new(5.0);
let duration: Length<f32, Second> = Length::new(10.0);
let result = length / duration;
let expected: ScaleFactor<f32, Second, Cm> = ScaleFactor::new(0.5);
assert_eq!(result, expected);
}
#[test]
fn test_multiplication() {
let length_mm: Length<f32, Mm> = Length::new(10.0);
let cm_per_mm: ScaleFactor<f32, Mm, Cm> = ScaleFactor::new(0.1);
let result = length_mm * cm_per_mm;
let expected: Length<f32, Cm> = Length::new(1.0);
assert_eq!(result, expected);
}
#[test]
fn test_division_by_scalefactor() {
let length: Length<f32, Cm> = Length::new(5.0);
let cm_per_second: ScaleFactor<f32, Second, Cm> = ScaleFactor::new(10.0);
let result = length / cm_per_second;
let expected: Length<f32, Second> = Length::new(0.5);
assert_eq!(result, expected);
}
#[test]
fn test_negation() {
let length: Length<f32, Cm> = Length::new(5.0);
let result = -length;
let expected: Length<f32, Cm> = Length::new(-5.0);
assert_eq!(result, expected);
}
#[test]
fn test_cast() {
let length_as_i32: Length<i32, Cm> = Length::new(5);
let result: Length<f32, Cm> = length_as_i32.cast().unwrap();
let length_as_f32: Length<f32, Cm> = Length::new(5.0);
assert_eq!(result, length_as_f32);
}
#[test]
fn test_equality() {
let length_5_point_0: Length<f32, Cm> = Length::new(5.0);
let length_5_point_1: Length<f32, Cm> = Length::new(5.1);
let length_0_point_1: Length<f32, Cm> = Length::new(0.1);
assert!(length_5_point_0 == length_5_point_1 - length_0_point_1);
assert!(length_5_point_0 != length_5_point_1);
}
#[test]
fn test_order() {
let length_5_point_0: Length<f32, Cm> = Length::new(5.0);
let length_5_point_1: Length<f32, Cm> = Length::new(5.1);
let length_0_point_1: Length<f32, Cm> = Length::new(0.1);
assert!(length_5_point_0 < length_5_point_1);
assert!(length_5_point_0 <= length_5_point_1);
assert!(length_5_point_0 <= length_5_point_1 - length_0_point_1);
assert!(length_5_point_1 > length_5_point_0);
assert!(length_5_point_1 >= length_5_point_0);
assert!(length_5_point_0 >= length_5_point_1 - length_0_point_1);
}
#[test]
fn test_zero_add() {
type LengthCm = Length<f32, Cm>;
let length: LengthCm = Length::new(5.0);
let result = length - LengthCm::zero();
assert_eq!(result, length);
}
#[test]
fn test_zero_division() {
type LengthCm = Length<f32, Cm>;
let length: LengthCm = Length::new(5.0);
let length_zero: LengthCm = Length::zero();
let result = length / length_zero;
let expected: ScaleFactor<f32, Cm, Cm> = ScaleFactor::new(INFINITY);
assert_eq!(result, expected);
}
}

110
third_party/rust/euclid-0.10.5/src/lib.rs поставляемый
Просмотреть файл

@ -1,110 +0,0 @@
// Copyright 2013 The Servo Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![cfg_attr(feature = "unstable", feature(asm, repr_simd, test))]
//! A collection of strongly typed math tools for computer graphics with an inclination
//! towards 2d graphics and layout.
//!
//! All types are generic over the the scalar type of their component (f32, i32, etc.),
//! and tagged with a generic Unit parameter which is useful to prevent mixing
//! values from different spaces. For example it should not be legal to translate
//! a screen-space position by a world-space vector and this can be expressed using
//! the generic Unit parameter.
//!
//! This unit system is not mandatory and all Typed* structures have an alias
//! with the default unit: `UnknownUnit`.
//! for example ```Point2D<T>``` is equivalent to ```TypedPoint2D<T, UnknownUnit>```.
//! Client code typically creates a set of aliases for each type and doesn't need
//! to deal with the specifics of typed units further. For example:
//!
//! ```rust
//! use euclid::*;
//! pub struct ScreenSpace;
//! pub type ScreenPoint = TypedPoint2D<f32, ScreenSpace>;
//! pub type ScreenSize = TypedSize2D<f32, ScreenSpace>;
//! pub struct WorldSpace;
//! pub type WorldPoint = TypedPoint3D<f32, WorldSpace>;
//! pub type ProjectionMatrix = TypedMatrix4D<f32, WorldSpace, ScreenSpace>;
//! // etc...
//! ```
//!
//! Components are accessed in their scalar form by default for convenience, and most
//! types additionally implement strongly typed accessors which return typed ```Length``` wrappers.
//! For example:
//!
//! ```rust
//! # use euclid::*;
//! # pub struct WorldSpace;
//! # pub type WorldPoint = TypedPoint3D<f32, WorldSpace>;
//! let p = WorldPoint::new(0.0, 1.0, 1.0);
//! // p.x is an f32.
//! println!("p.x = {:?} ", p.x);
//! // p.x is a Length<f32, WorldSpace>.
//! println!("p.x_typed() = {:?} ", p.x_typed());
//! // Length::get returns the scalar value (f32).
//! assert_eq!(p.x, p.x_typed().get());
//! ```
extern crate heapsize;
#[macro_use]
extern crate log;
extern crate rustc_serialize;
extern crate serde;
#[cfg(test)]
extern crate rand;
#[cfg(feature = "unstable")]
extern crate test;
extern crate num_traits;
pub use length::Length;
pub use scale_factor::ScaleFactor;
pub use matrix2d::{Matrix2D, TypedMatrix2D};
pub use matrix4d::{Matrix4D, TypedMatrix4D};
pub use point::{
Point2D, TypedPoint2D,
Point3D, TypedPoint3D,
Point4D, TypedPoint4D,
};
pub use rect::{Rect, TypedRect};
pub use side_offsets::{SideOffsets2D, TypedSideOffsets2D};
#[cfg(feature = "unstable")] pub use side_offsets::SideOffsets2DSimdI32;
pub use size::{Size2D, TypedSize2D};
pub mod approxeq;
pub mod length;
#[macro_use]
mod macros;
pub mod matrix2d;
pub mod matrix4d;
pub mod num;
pub mod point;
pub mod rect;
pub mod scale_factor;
pub mod side_offsets;
pub mod size;
mod trig;
/// The default unit.
#[derive(Clone, Copy, RustcDecodable, RustcEncodable)]
pub struct UnknownUnit;
/// Unit for angles in radians.
pub struct Rad;
/// Unit for angles in degrees.
pub struct Deg;
/// A value in radians.
pub type Radians<T> = Length<T, Rad>;
/// A value in Degrees.
pub type Degrees<T> = Length<T, Deg>;

86
third_party/rust/euclid-0.10.5/src/macros.rs поставляемый
Просмотреть файл

@ -1,86 +0,0 @@
// Copyright 2013 The Servo Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
macro_rules! define_matrix {
(
$(#[$attr:meta])*
pub struct $name:ident<T, $($phantom:ident),+> {
$(pub $field:ident: T,)+
}
) => (
$(#[$attr])*
pub struct $name<T, $($phantom),+> {
$(pub $field: T,)+
_unit: PhantomData<($($phantom),+)>
}
impl<T: Clone, $($phantom),+> Clone for $name<T, $($phantom),+> {
fn clone(&self) -> Self {
$name {
$($field: self.$field.clone(),)+
_unit: PhantomData,
}
}
}
impl<T: Copy, $($phantom),+> Copy for $name<T, $($phantom),+> {}
impl<T, $($phantom),+> ::heapsize::HeapSizeOf for $name<T, $($phantom),+>
where T: ::heapsize::HeapSizeOf
{
fn heap_size_of_children(&self) -> usize {
$(self.$field.heap_size_of_children() +)+ 0
}
}
impl<T, $($phantom),+> ::serde::Deserialize for $name<T, $($phantom),+>
where T: ::serde::Deserialize
{
fn deserialize<D>(deserializer: &mut D) -> Result<Self, D::Error>
where D: ::serde::Deserializer
{
let ($($field,)+) =
try!(::serde::Deserialize::deserialize(deserializer));
Ok($name {
$($field: $field,)+
_unit: PhantomData,
})
}
}
impl<T, $($phantom),+> ::serde::Serialize for $name<T, $($phantom),+>
where T: ::serde::Serialize
{
fn serialize<S>(&self, serializer: &mut S) -> Result<(), S::Error>
where S: ::serde::Serializer
{
($(&self.$field,)+).serialize(serializer)
}
}
impl<T, $($phantom),+> ::std::cmp::Eq for $name<T, $($phantom),+>
where T: ::std::cmp::Eq {}
impl<T, $($phantom),+> ::std::cmp::PartialEq for $name<T, $($phantom),+>
where T: ::std::cmp::PartialEq
{
fn eq(&self, other: &Self) -> bool {
true $(&& self.$field == other.$field)+
}
}
impl<T, $($phantom),+> ::std::hash::Hash for $name<T, $($phantom),+>
where T: ::std::hash::Hash
{
fn hash<H: ::std::hash::Hasher>(&self, h: &mut H) {
$(self.$field.hash(h);)+
}
}
)
}

404
third_party/rust/euclid-0.10.5/src/matrix2d.rs поставляемый
Просмотреть файл

@ -1,404 +0,0 @@
// Copyright 2013 The Servo Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use super::{UnknownUnit, Radians};
use num::{One, Zero};
use point::TypedPoint2D;
use rect::TypedRect;
use std::ops::{Add, Mul, Div, Sub};
use std::marker::PhantomData;
use approxeq::ApproxEq;
use trig::Trig;
use std::fmt;
define_matrix! {
/// A 2d transform stored as a 2 by 3 matrix in row-major order in memory,
/// useful to represent 2d transformations.
///
/// Matrices can be parametrized over the source and destination units, to describe a
/// transformation from a space to another.
/// For example, TypedMatrix2D<f32, WordSpace, ScreenSpace>::transform_point4d
/// takes a TypedPoint2D<f32, WordSpace> and returns a TypedPoint2D<f32, ScreenSpace>.
///
/// Matrices expose a set of convenience methods for pre- and post-transformations.
/// A pre-transformation corresponds to adding an operation that is applied before
/// the rest of the transformation, while a post-transformation adds an operation
/// that is appled after.
pub struct TypedMatrix2D<T, Src, Dst> {
pub m11: T, pub m12: T,
pub m21: T, pub m22: T,
pub m31: T, pub m32: T,
}
}
/// The default 2d matrix type with no units.
pub type Matrix2D<T> = TypedMatrix2D<T, UnknownUnit, UnknownUnit>;
impl<T: Copy, Src, Dst> TypedMatrix2D<T, Src, Dst> {
/// Create a matrix specifying its components in row-major order.
pub fn row_major(m11: T, m12: T, m21: T, m22: T, m31: T, m32: T) -> TypedMatrix2D<T, Src, Dst> {
TypedMatrix2D {
m11: m11, m12: m12,
m21: m21, m22: m22,
m31: m31, m32: m32,
_unit: PhantomData,
}
}
/// Create a matrix specifying its components in column-major order.
pub fn column_major(m11: T, m21: T, m31: T, m12: T, m22: T, m32: T) -> TypedMatrix2D<T, Src, Dst> {
TypedMatrix2D {
m11: m11, m12: m12,
m21: m21, m22: m22,
m31: m31, m32: m32,
_unit: PhantomData,
}
}
/// Returns an array containing this matrix's terms in row-major order (the order
/// in which the matrix is actually laid out in memory).
pub fn to_row_major_array(&self) -> [T; 6] {
[
self.m11, self.m12,
self.m21, self.m22,
self.m31, self.m32
]
}
/// Returns an array containing this matrix's terms in column-major order.
pub fn to_column_major_array(&self) -> [T; 6] {
[
self.m11, self.m21, self.m31,
self.m12, self.m22, self.m32
]
}
/// Drop the units, preserving only the numeric value.
pub fn to_untyped(&self) -> Matrix2D<T> {
Matrix2D::row_major(
self.m11, self.m12,
self.m21, self.m22,
self.m31, self.m32
)
}
/// Tag a unitless value with units.
pub fn from_untyped(p: &Matrix2D<T>) -> TypedMatrix2D<T, Src, Dst> {
TypedMatrix2D::row_major(
p.m11, p.m12,
p.m21, p.m22,
p.m31, p.m32
)
}
}
impl<T, Src, Dst> TypedMatrix2D<T, Src, Dst>
where T: Copy + Clone +
Add<T, Output=T> +
Mul<T, Output=T> +
Div<T, Output=T> +
Sub<T, Output=T> +
Trig +
PartialOrd +
One + Zero {
pub fn identity() -> TypedMatrix2D<T, Src, Dst> {
let (_0, _1) = (Zero::zero(), One::one());
TypedMatrix2D::row_major(
_1, _0,
_0, _1,
_0, _0
)
}
/// Returns the multiplication of the two matrices such that mat's transformation
/// applies after self's transformation.
pub fn post_mul<NewDst>(&self, mat: &TypedMatrix2D<T, Dst, NewDst>) -> TypedMatrix2D<T, Src, NewDst> {
TypedMatrix2D::row_major(
self.m11 * mat.m11 + self.m12 * mat.m21,
self.m11 * mat.m12 + self.m12 * mat.m22,
self.m21 * mat.m11 + self.m22 * mat.m21,
self.m21 * mat.m12 + self.m22 * mat.m22,
self.m31 * mat.m11 + self.m32 * mat.m21 + mat.m31,
self.m31 * mat.m12 + self.m32 * mat.m22 + mat.m32,
)
}
/// Returns the multiplication of the two matrices such that mat's transformation
/// applies before self's transformation.
pub fn pre_mul<NewSrc>(&self, mat: &TypedMatrix2D<T, NewSrc, Src>) -> TypedMatrix2D<T, NewSrc, Dst> {
mat.post_mul(self)
}
/// Returns a translation matrix.
pub fn create_translation(x: T, y: T) -> TypedMatrix2D<T, Src, Dst> {
let (_0, _1): (T, T) = (Zero::zero(), One::one());
TypedMatrix2D::row_major(
_1, _0,
_0, _1,
x, y
)
}
/// Applies a translation after self's transformation and returns the resulting matrix.
pub fn post_translated(&self, x: T, y: T) -> TypedMatrix2D<T, Src, Dst> {
self.post_mul(&TypedMatrix2D::create_translation(x, y))
}
/// Applies a translation before self's transformation and returns the resulting matrix.
pub fn pre_translated(&self, x: T, y: T) -> TypedMatrix2D<T, Src, Dst> {
self.pre_mul(&TypedMatrix2D::create_translation(x, y))
}
/// Returns a scale matrix.
pub fn create_scale(x: T, y: T) -> TypedMatrix2D<T, Src, Dst> {
let _0 = Zero::zero();
TypedMatrix2D::row_major(
x, _0,
_0, y,
_0, _0
)
}
/// Applies a scale after self's transformation and returns the resulting matrix.
pub fn post_scaled(&self, x: T, y: T) -> TypedMatrix2D<T, Src, Dst> {
self.post_mul(&TypedMatrix2D::create_scale(x, y))
}
/// Applies a scale before self's transformation and returns the resulting matrix.
pub fn pre_scaled(&self, x: T, y: T) -> TypedMatrix2D<T, Src, Dst> {
TypedMatrix2D::row_major(
self.m11 * x, self.m12,
self.m21, self.m22 * y,
self.m31, self.m32
)
}
/// Returns a rotation matrix.
pub fn create_rotation(theta: Radians<T>) -> TypedMatrix2D<T, Src, Dst> {
let _0 = Zero::zero();
let cos = theta.get().cos();
let sin = theta.get().sin();
TypedMatrix2D::row_major(
cos, _0 - sin,
sin, cos,
_0, _0
)
}
/// Applies a rotation after self's transformation and returns the resulting matrix.
pub fn post_rotated(&self, theta: Radians<T>) -> TypedMatrix2D<T, Src, Dst> {
self.post_mul(&TypedMatrix2D::create_rotation(theta))
}
/// Applies a rotation after self's transformation and returns the resulting matrix.
pub fn pre_rotated(&self, theta: Radians<T>) -> TypedMatrix2D<T, Src, Dst> {
self.pre_mul(&TypedMatrix2D::create_rotation(theta))
}
/// Returns the given point transformed by this matrix.
#[inline]
pub fn transform_point(&self, point: &TypedPoint2D<T, Src>) -> TypedPoint2D<T, Dst> {
TypedPoint2D::new(point.x * self.m11 + point.y * self.m21 + self.m31,
point.x * self.m12 + point.y * self.m22 + self.m32)
}
/// Returns a rectangle that encompasses the result of transforming the given rectangle by this
/// matrix.
#[inline]
pub fn transform_rect(&self, rect: &TypedRect<T, Src>) -> TypedRect<T, Dst> {
TypedRect::from_points(&[
self.transform_point(&rect.origin),
self.transform_point(&rect.top_right()),
self.transform_point(&rect.bottom_left()),
self.transform_point(&rect.bottom_right()),
])
}
/// Computes and returns the determinant of this matrix.
pub fn determinant(&self) -> T {
self.m11 * self.m22 - self.m12 * self.m21
}
/// Returns the inverse matrix if possible.
pub fn inverse(&self) -> Option<TypedMatrix2D<T, Dst, Src>> {
let det = self.determinant();
let _0: T = Zero::zero();
let _1: T = One::one();
if det == _0 {
return None;
}
let inv_det = _1 / det;
Some(TypedMatrix2D::row_major(
inv_det * self.m22,
inv_det * (_0 - self.m12),
inv_det * (_0 - self.m21),
inv_det * self.m11,
inv_det * (self.m21 * self.m32 - self.m22 * self.m31),
inv_det * (self.m31 * self.m12 - self.m11 * self.m32),
))
}
/// Returns the same matrix with a different destination unit.
#[inline]
pub fn with_destination<NewDst>(&self) -> TypedMatrix2D<T, Src, NewDst> {
TypedMatrix2D::row_major(
self.m11, self.m12,
self.m21, self.m22,
self.m31, self.m32,
)
}
/// Returns the same matrix with a different source unit.
#[inline]
pub fn with_source<NewSrc>(&self) -> TypedMatrix2D<T, NewSrc, Dst> {
TypedMatrix2D::row_major(
self.m11, self.m12,
self.m21, self.m22,
self.m31, self.m32,
)
}
}
impl<T: ApproxEq<T>, Src, Dst> TypedMatrix2D<T, Src, Dst> {
pub fn approx_eq(&self, other: &Self) -> bool {
self.m11.approx_eq(&other.m11) && self.m12.approx_eq(&other.m12) &&
self.m21.approx_eq(&other.m21) && self.m22.approx_eq(&other.m22) &&
self.m31.approx_eq(&other.m31) && self.m32.approx_eq(&other.m32)
}
}
impl<T: Copy + fmt::Debug, Src, Dst> fmt::Debug for TypedMatrix2D<T, Src, Dst> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.to_row_major_array().fmt(f)
}
}
#[cfg(test)]
mod test {
use super::*;
use approxeq::ApproxEq;
use point::Point2D;
use Radians;
use std::f32::consts::FRAC_PI_2;
type Mat = Matrix2D<f32>;
fn rad(v: f32) -> Radians<f32> { Radians::new(v) }
#[test]
pub fn test_translation() {
let t1 = Mat::create_translation(1.0, 2.0);
let t2 = Mat::identity().pre_translated(1.0, 2.0);
let t3 = Mat::identity().post_translated(1.0, 2.0);
assert_eq!(t1, t2);
assert_eq!(t1, t3);
assert_eq!(t1.transform_point(&Point2D::new(1.0, 1.0)), Point2D::new(2.0, 3.0));
assert_eq!(t1.post_mul(&t1), Mat::create_translation(2.0, 4.0));
}
#[test]
pub fn test_rotation() {
let r1 = Mat::create_rotation(rad(FRAC_PI_2));
let r2 = Mat::identity().pre_rotated(rad(FRAC_PI_2));
let r3 = Mat::identity().post_rotated(rad(FRAC_PI_2));
assert_eq!(r1, r2);
assert_eq!(r1, r3);
assert!(r1.transform_point(&Point2D::new(1.0, 2.0)).approx_eq(&Point2D::new(2.0, -1.0)));
assert!(r1.post_mul(&r1).approx_eq(&Mat::create_rotation(rad(FRAC_PI_2*2.0))));
}
#[test]
pub fn test_scale() {
let s1 = Mat::create_scale(2.0, 3.0);
let s2 = Mat::identity().pre_scaled(2.0, 3.0);
let s3 = Mat::identity().post_scaled(2.0, 3.0);
assert_eq!(s1, s2);
assert_eq!(s1, s3);
assert!(s1.transform_point(&Point2D::new(2.0, 2.0)).approx_eq(&Point2D::new(4.0, 6.0)));
}
#[test]
fn test_column_major() {
assert_eq!(
Mat::row_major(
1.0, 2.0,
3.0, 4.0,
5.0, 6.0
),
Mat::column_major(
1.0, 3.0, 5.0,
2.0, 4.0, 6.0,
)
);
}
#[test]
pub fn test_inverse_simple() {
let m1 = Mat::identity();
let m2 = m1.inverse().unwrap();
assert!(m1.approx_eq(&m2));
}
#[test]
pub fn test_inverse_scale() {
let m1 = Mat::create_scale(1.5, 0.3);
let m2 = m1.inverse().unwrap();
assert!(m1.pre_mul(&m2).approx_eq(&Mat::identity()));
}
#[test]
pub fn test_inverse_translate() {
let m1 = Mat::create_translation(-132.0, 0.3);
let m2 = m1.inverse().unwrap();
assert!(m1.pre_mul(&m2).approx_eq(&Mat::identity()));
}
#[test]
fn test_inverse_none() {
assert!(Mat::create_scale(2.0, 0.0).inverse().is_none());
assert!(Mat::create_scale(2.0, 2.0).inverse().is_some());
}
#[test]
pub fn test_pre_post() {
let m1 = Matrix2D::identity().post_scaled(1.0, 2.0).post_translated(1.0, 2.0);
let m2 = Matrix2D::identity().pre_translated(1.0, 2.0).pre_scaled(1.0, 2.0);
assert!(m1.approx_eq(&m2));
let r = Mat::create_rotation(rad(FRAC_PI_2));
let t = Mat::create_translation(2.0, 3.0);
let a = Point2D::new(1.0, 1.0);
assert!(r.post_mul(&t).transform_point(&a).approx_eq(&Point2D::new(3.0, 2.0)));
assert!(t.post_mul(&r).transform_point(&a).approx_eq(&Point2D::new(4.0, -3.0)));
assert!(t.post_mul(&r).transform_point(&a).approx_eq(&r.transform_point(&t.transform_point(&a))));
assert!(r.pre_mul(&t).transform_point(&a).approx_eq(&Point2D::new(4.0, -3.0)));
assert!(t.pre_mul(&r).transform_point(&a).approx_eq(&Point2D::new(3.0, 2.0)));
assert!(t.pre_mul(&r).transform_point(&a).approx_eq(&t.transform_point(&r.transform_point(&a))));
}
#[test]
fn test_size_of() {
use std::mem::size_of;
assert_eq!(size_of::<Matrix2D<f32>>(), 6*size_of::<f32>());
assert_eq!(size_of::<Matrix2D<f64>>(), 6*size_of::<f64>());
}
}

797
third_party/rust/euclid-0.10.5/src/matrix4d.rs поставляемый
Просмотреть файл

@ -1,797 +0,0 @@
// Copyright 2013 The Servo Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use super::{UnknownUnit, Radians};
use approxeq::ApproxEq;
use trig::Trig;
use point::{TypedPoint2D, TypedPoint3D, TypedPoint4D};
use rect::TypedRect;
use matrix2d::TypedMatrix2D;
use scale_factor::ScaleFactor;
use num::{One, Zero};
use std::ops::{Add, Mul, Sub, Div, Neg};
use std::marker::PhantomData;
use std::fmt;
define_matrix! {
/// A 4 by 4 matrix stored in row-major order in memory, useful to represent
/// 3d transformations.
///
/// Matrices can be parametrized over the source and destination units, to describe a
/// transformation from a space to another.
/// For example, TypedMatrix4D<f32, WordSpace, ScreenSpace>::transform_point4d
/// takes a TypedPoint4D<f32, WordSpace> and returns a TypedPoint4D<f32, ScreenSpace>.
///
/// Matrices expose a set of convenience methods for pre- and post-transformations.
/// A pre-transformation corresponds to adding an operation that is applied before
/// the rest of the transformation, while a post-transformation adds an operation
/// that is appled after.
pub struct TypedMatrix4D<T, Src, Dst> {
pub m11: T, pub m12: T, pub m13: T, pub m14: T,
pub m21: T, pub m22: T, pub m23: T, pub m24: T,
pub m31: T, pub m32: T, pub m33: T, pub m34: T,
pub m41: T, pub m42: T, pub m43: T, pub m44: T,
}
}
/// The default 4d matrix type with no units.
pub type Matrix4D<T> = TypedMatrix4D<T, UnknownUnit, UnknownUnit>;
impl<T, Src, Dst> TypedMatrix4D<T, Src, Dst> {
/// Create a matrix specifying its components in row-major order.
///
/// For example, the translation terms m41, m42, m43 on the last row with the
/// row-major convention) are the 13rd, 14th and 15th parameters.
#[inline]
pub fn row_major(
m11: T, m12: T, m13: T, m14: T,
m21: T, m22: T, m23: T, m24: T,
m31: T, m32: T, m33: T, m34: T,
m41: T, m42: T, m43: T, m44: T)
-> TypedMatrix4D<T, Src, Dst> {
TypedMatrix4D {
m11: m11, m12: m12, m13: m13, m14: m14,
m21: m21, m22: m22, m23: m23, m24: m24,
m31: m31, m32: m32, m33: m33, m34: m34,
m41: m41, m42: m42, m43: m43, m44: m44,
_unit: PhantomData,
}
}
/// Create a matrix specifying its components in column-major order.
///
/// For example, the translation terms m41, m42, m43 on the last column with the
/// column-major convention) are the 4th, 8th and 12nd parameters.
#[inline]
pub fn column_major(
m11: T, m21: T, m31: T, m41: T,
m12: T, m22: T, m32: T, m42: T,
m13: T, m23: T, m33: T, m43: T,
m14: T, m24: T, m34: T, m44: T)
-> TypedMatrix4D<T, Src, Dst> {
TypedMatrix4D {
m11: m11, m12: m12, m13: m13, m14: m14,
m21: m21, m22: m22, m23: m23, m24: m24,
m31: m31, m32: m32, m33: m33, m34: m34,
m41: m41, m42: m42, m43: m43, m44: m44,
_unit: PhantomData,
}
}
}
impl <T, Src, Dst> TypedMatrix4D<T, Src, Dst>
where T: Copy + Clone +
Add<T, Output=T> +
Sub<T, Output=T> +
Mul<T, Output=T> +
Div<T, Output=T> +
Neg<Output=T> +
ApproxEq<T> +
PartialOrd +
Trig +
One + Zero {
/// Create a 4 by 4 matrix representing a 2d transformation, specifying its components
/// in row-major order.
#[inline]
pub fn row_major_2d(m11: T, m12: T, m21: T, m22: T, m41: T, m42: T) -> TypedMatrix4D<T, Src, Dst> {
let (_0, _1): (T, T) = (Zero::zero(), One::one());
TypedMatrix4D::row_major(
m11, m12, _0, _0,
m21, m22, _0, _0,
_0, _0, _1, _0,
m41, m42, _0, _1
)
}
/// Create an orthogonal projection matrix.
pub fn ortho(left: T, right: T,
bottom: T, top: T,
near: T, far: T) -> TypedMatrix4D<T, Src, Dst> {
let tx = -((right + left) / (right - left));
let ty = -((top + bottom) / (top - bottom));
let tz = -((far + near) / (far - near));
let (_0, _1): (T, T) = (Zero::zero(), One::one());
let _2 = _1 + _1;
TypedMatrix4D::row_major(
_2 / (right - left), _0 , _0 , _0,
_0 , _2 / (top - bottom), _0 , _0,
_0 , _0 , -_2 / (far - near), _0,
tx , ty , tz , _1
)
}
#[inline]
pub fn identity() -> TypedMatrix4D<T, Src, Dst> {
let (_0, _1): (T, T) = (Zero::zero(), One::one());
TypedMatrix4D::row_major(
_1, _0, _0, _0,
_0, _1, _0, _0,
_0, _0, _1, _0,
_0, _0, _0, _1
)
}
/// Returns true if this matrix can be represented with a TypedMatrix2D.
///
/// See https://drafts.csswg.org/css-transforms/#2d-matrix
#[inline]
pub fn is_2d(&self) -> bool {
let (_0, _1): (T, T) = (Zero::zero(), One::one());
self.m31 == _0 && self.m32 == _0 &&
self.m13 == _0 && self.m23 == _0 &&
self.m43 == _0 && self.m14 == _0 &&
self.m24 == _0 && self.m34 == _0 &&
self.m33 == _1 && self.m44 == _1
}
/// Create a 2D matrix picking the relevent terms from this matrix.
///
/// This method assumes that self represents a 2d transformation, callers
/// should check that self.is_2d() returns true beforehand.
pub fn to_2d(&self) -> TypedMatrix2D<T, Src, Dst> {
TypedMatrix2D::row_major(
self.m11, self.m12,
self.m21, self.m22,
self.m41, self.m42
)
}
pub fn approx_eq(&self, other: &TypedMatrix4D<T, Src, Dst>) -> bool {
self.m11.approx_eq(&other.m11) && self.m12.approx_eq(&other.m12) &&
self.m13.approx_eq(&other.m13) && self.m14.approx_eq(&other.m14) &&
self.m21.approx_eq(&other.m21) && self.m22.approx_eq(&other.m22) &&
self.m23.approx_eq(&other.m23) && self.m24.approx_eq(&other.m24) &&
self.m31.approx_eq(&other.m31) && self.m32.approx_eq(&other.m32) &&
self.m33.approx_eq(&other.m33) && self.m34.approx_eq(&other.m34) &&
self.m41.approx_eq(&other.m41) && self.m42.approx_eq(&other.m42) &&
self.m43.approx_eq(&other.m43) && self.m44.approx_eq(&other.m44)
}
/// Returns the same matrix with a different destination unit.
#[inline]
pub fn with_destination<NewDst>(&self) -> TypedMatrix4D<T, Src, NewDst> {
TypedMatrix4D::row_major(
self.m11, self.m12, self.m13, self.m14,
self.m21, self.m22, self.m23, self.m24,
self.m31, self.m32, self.m33, self.m34,
self.m41, self.m42, self.m43, self.m44,
)
}
/// Returns the same matrix with a different source unit.
#[inline]
pub fn with_source<NewSrc>(&self) -> TypedMatrix4D<T, NewSrc, Dst> {
TypedMatrix4D::row_major(
self.m11, self.m12, self.m13, self.m14,
self.m21, self.m22, self.m23, self.m24,
self.m31, self.m32, self.m33, self.m34,
self.m41, self.m42, self.m43, self.m44,
)
}
/// Drop the units, preserving only the numeric value.
#[inline]
pub fn to_untyped(&self) -> Matrix4D<T> {
Matrix4D::row_major(
self.m11, self.m12, self.m13, self.m14,
self.m21, self.m22, self.m23, self.m24,
self.m31, self.m32, self.m33, self.m34,
self.m41, self.m42, self.m43, self.m44,
)
}
/// Tag a unitless value with units.
#[inline]
pub fn from_untyped(m: &Matrix4D<T>) -> Self {
TypedMatrix4D::row_major(
m.m11, m.m12, m.m13, m.m14,
m.m21, m.m22, m.m23, m.m24,
m.m31, m.m32, m.m33, m.m34,
m.m41, m.m42, m.m43, m.m44,
)
}
/// Returns the multiplication of the two matrices such that mat's transformation
/// applies after self's transformation.
pub fn post_mul<NewDst>(&self, mat: &TypedMatrix4D<T, Dst, NewDst>) -> TypedMatrix4D<T, Src, NewDst> {
TypedMatrix4D::row_major(
self.m11 * mat.m11 + self.m12 * mat.m21 + self.m13 * mat.m31 + self.m14 * mat.m41,
self.m11 * mat.m12 + self.m12 * mat.m22 + self.m13 * mat.m32 + self.m14 * mat.m42,
self.m11 * mat.m13 + self.m12 * mat.m23 + self.m13 * mat.m33 + self.m14 * mat.m43,
self.m11 * mat.m14 + self.m12 * mat.m24 + self.m13 * mat.m34 + self.m14 * mat.m44,
self.m21 * mat.m11 + self.m22 * mat.m21 + self.m23 * mat.m31 + self.m24 * mat.m41,
self.m21 * mat.m12 + self.m22 * mat.m22 + self.m23 * mat.m32 + self.m24 * mat.m42,
self.m21 * mat.m13 + self.m22 * mat.m23 + self.m23 * mat.m33 + self.m24 * mat.m43,
self.m21 * mat.m14 + self.m22 * mat.m24 + self.m23 * mat.m34 + self.m24 * mat.m44,
self.m31 * mat.m11 + self.m32 * mat.m21 + self.m33 * mat.m31 + self.m34 * mat.m41,
self.m31 * mat.m12 + self.m32 * mat.m22 + self.m33 * mat.m32 + self.m34 * mat.m42,
self.m31 * mat.m13 + self.m32 * mat.m23 + self.m33 * mat.m33 + self.m34 * mat.m43,
self.m31 * mat.m14 + self.m32 * mat.m24 + self.m33 * mat.m34 + self.m34 * mat.m44,
self.m41 * mat.m11 + self.m42 * mat.m21 + self.m43 * mat.m31 + self.m44 * mat.m41,
self.m41 * mat.m12 + self.m42 * mat.m22 + self.m43 * mat.m32 + self.m44 * mat.m42,
self.m41 * mat.m13 + self.m42 * mat.m23 + self.m43 * mat.m33 + self.m44 * mat.m43,
self.m41 * mat.m14 + self.m42 * mat.m24 + self.m43 * mat.m34 + self.m44 * mat.m44,
)
}
/// Returns the multiplication of the two matrices such that mat's transformation
/// applies before self's transformation.
pub fn pre_mul<NewSrc>(&self, mat: &TypedMatrix4D<T, NewSrc, Src>) -> TypedMatrix4D<T, NewSrc, Dst> {
mat.post_mul(self)
}
/// Returns the inverse matrix if possible.
pub fn inverse(&self) -> Option<TypedMatrix4D<T, Dst, Src>> {
let det = self.determinant();
if det == Zero::zero() {
return None;
}
// todo(gw): this could be made faster by special casing
// for simpler matrix types.
let m = TypedMatrix4D::row_major(
self.m23*self.m34*self.m42 - self.m24*self.m33*self.m42 +
self.m24*self.m32*self.m43 - self.m22*self.m34*self.m43 -
self.m23*self.m32*self.m44 + self.m22*self.m33*self.m44,
self.m14*self.m33*self.m42 - self.m13*self.m34*self.m42 -
self.m14*self.m32*self.m43 + self.m12*self.m34*self.m43 +
self.m13*self.m32*self.m44 - self.m12*self.m33*self.m44,
self.m13*self.m24*self.m42 - self.m14*self.m23*self.m42 +
self.m14*self.m22*self.m43 - self.m12*self.m24*self.m43 -
self.m13*self.m22*self.m44 + self.m12*self.m23*self.m44,
self.m14*self.m23*self.m32 - self.m13*self.m24*self.m32 -
self.m14*self.m22*self.m33 + self.m12*self.m24*self.m33 +
self.m13*self.m22*self.m34 - self.m12*self.m23*self.m34,
self.m24*self.m33*self.m41 - self.m23*self.m34*self.m41 -
self.m24*self.m31*self.m43 + self.m21*self.m34*self.m43 +
self.m23*self.m31*self.m44 - self.m21*self.m33*self.m44,
self.m13*self.m34*self.m41 - self.m14*self.m33*self.m41 +
self.m14*self.m31*self.m43 - self.m11*self.m34*self.m43 -
self.m13*self.m31*self.m44 + self.m11*self.m33*self.m44,
self.m14*self.m23*self.m41 - self.m13*self.m24*self.m41 -
self.m14*self.m21*self.m43 + self.m11*self.m24*self.m43 +
self.m13*self.m21*self.m44 - self.m11*self.m23*self.m44,
self.m13*self.m24*self.m31 - self.m14*self.m23*self.m31 +
self.m14*self.m21*self.m33 - self.m11*self.m24*self.m33 -
self.m13*self.m21*self.m34 + self.m11*self.m23*self.m34,
self.m22*self.m34*self.m41 - self.m24*self.m32*self.m41 +
self.m24*self.m31*self.m42 - self.m21*self.m34*self.m42 -
self.m22*self.m31*self.m44 + self.m21*self.m32*self.m44,
self.m14*self.m32*self.m41 - self.m12*self.m34*self.m41 -
self.m14*self.m31*self.m42 + self.m11*self.m34*self.m42 +
self.m12*self.m31*self.m44 - self.m11*self.m32*self.m44,
self.m12*self.m24*self.m41 - self.m14*self.m22*self.m41 +
self.m14*self.m21*self.m42 - self.m11*self.m24*self.m42 -
self.m12*self.m21*self.m44 + self.m11*self.m22*self.m44,
self.m14*self.m22*self.m31 - self.m12*self.m24*self.m31 -
self.m14*self.m21*self.m32 + self.m11*self.m24*self.m32 +
self.m12*self.m21*self.m34 - self.m11*self.m22*self.m34,
self.m23*self.m32*self.m41 - self.m22*self.m33*self.m41 -
self.m23*self.m31*self.m42 + self.m21*self.m33*self.m42 +
self.m22*self.m31*self.m43 - self.m21*self.m32*self.m43,
self.m12*self.m33*self.m41 - self.m13*self.m32*self.m41 +
self.m13*self.m31*self.m42 - self.m11*self.m33*self.m42 -
self.m12*self.m31*self.m43 + self.m11*self.m32*self.m43,
self.m13*self.m22*self.m41 - self.m12*self.m23*self.m41 -
self.m13*self.m21*self.m42 + self.m11*self.m23*self.m42 +
self.m12*self.m21*self.m43 - self.m11*self.m22*self.m43,
self.m12*self.m23*self.m31 - self.m13*self.m22*self.m31 +
self.m13*self.m21*self.m32 - self.m11*self.m23*self.m32 -
self.m12*self.m21*self.m33 + self.m11*self.m22*self.m33
);
let _1: T = One::one();
Some(m.mul_s(_1 / det))
}
/// Compute the determinant of the matrix.
pub fn determinant(&self) -> T {
self.m14 * self.m23 * self.m32 * self.m41 -
self.m13 * self.m24 * self.m32 * self.m41 -
self.m14 * self.m22 * self.m33 * self.m41 +
self.m12 * self.m24 * self.m33 * self.m41 +
self.m13 * self.m22 * self.m34 * self.m41 -
self.m12 * self.m23 * self.m34 * self.m41 -
self.m14 * self.m23 * self.m31 * self.m42 +
self.m13 * self.m24 * self.m31 * self.m42 +
self.m14 * self.m21 * self.m33 * self.m42 -
self.m11 * self.m24 * self.m33 * self.m42 -
self.m13 * self.m21 * self.m34 * self.m42 +
self.m11 * self.m23 * self.m34 * self.m42 +
self.m14 * self.m22 * self.m31 * self.m43 -
self.m12 * self.m24 * self.m31 * self.m43 -
self.m14 * self.m21 * self.m32 * self.m43 +
self.m11 * self.m24 * self.m32 * self.m43 +
self.m12 * self.m21 * self.m34 * self.m43 -
self.m11 * self.m22 * self.m34 * self.m43 -
self.m13 * self.m22 * self.m31 * self.m44 +
self.m12 * self.m23 * self.m31 * self.m44 +
self.m13 * self.m21 * self.m32 * self.m44 -
self.m11 * self.m23 * self.m32 * self.m44 -
self.m12 * self.m21 * self.m33 * self.m44 +
self.m11 * self.m22 * self.m33 * self.m44
}
/// Multiplies all of the matrix's component by a scalar and returns the result.
pub fn mul_s(&self, x: T) -> TypedMatrix4D<T, Src, Dst> {
TypedMatrix4D::row_major(
self.m11 * x, self.m12 * x, self.m13 * x, self.m14 * x,
self.m21 * x, self.m22 * x, self.m23 * x, self.m24 * x,
self.m31 * x, self.m32 * x, self.m33 * x, self.m34 * x,
self.m41 * x, self.m42 * x, self.m43 * x, self.m44 * x
)
}
/// Convenience function to create a scale matrix from a ScaleFactor.
pub fn from_scale_factor(scale: ScaleFactor<T, Src, Dst>) -> TypedMatrix4D<T, Src, Dst> {
TypedMatrix4D::create_scale(scale.get(), scale.get(), scale.get())
}
/// Returns the given 2d point transformed by this matrix.
///
/// The input point must be use the unit Src, and the returned point has the unit Dst.
#[inline]
pub fn transform_point(&self, p: &TypedPoint2D<T, Src>) -> TypedPoint2D<T, Dst> {
self.transform_point4d(&TypedPoint4D::new(p.x, p.y, Zero::zero(), One::one())).to_2d()
}
/// Returns the given 3d point transformed by this matrix.
///
/// The input point must be use the unit Src, and the returned point has the unit Dst.
#[inline]
pub fn transform_point3d(&self, p: &TypedPoint3D<T, Src>) -> TypedPoint3D<T, Dst> {
self.transform_point4d(&TypedPoint4D::new(p.x, p.y, p.z, One::one())).to_3d()
}
/// Returns the given 4d point transformed by this matrix.
///
/// The input point must be use the unit Src, and the returned point has the unit Dst.
#[inline]
pub fn transform_point4d(&self, p: &TypedPoint4D<T, Src>) -> TypedPoint4D<T, Dst> {
let x = p.x * self.m11 + p.y * self.m21 + p.z * self.m31 + p.w * self.m41;
let y = p.x * self.m12 + p.y * self.m22 + p.z * self.m32 + p.w * self.m42;
let z = p.x * self.m13 + p.y * self.m23 + p.z * self.m33 + p.w * self.m43;
let w = p.x * self.m14 + p.y * self.m24 + p.z * self.m34 + p.w * self.m44;
TypedPoint4D::new(x, y, z, w)
}
/// Returns a rectangle that encompasses the result of transforming the given rectangle by this
/// matrix.
pub fn transform_rect(&self, rect: &TypedRect<T, Src>) -> TypedRect<T, Dst> {
TypedRect::from_points(&[
self.transform_point(&rect.origin),
self.transform_point(&rect.top_right()),
self.transform_point(&rect.bottom_left()),
self.transform_point(&rect.bottom_right()),
])
}
/// Create a 3d translation matrix
pub fn create_translation(x: T, y: T, z: T) -> TypedMatrix4D<T, Src, Dst> {
let (_0, _1): (T, T) = (Zero::zero(), One::one());
TypedMatrix4D::row_major(
_1, _0, _0, _0,
_0, _1, _0, _0,
_0, _0, _1, _0,
x, y, z, _1
)
}
/// Returns a matrix with a translation applied before self's transformation.
pub fn pre_translated(&self, x: T, y: T, z: T) -> TypedMatrix4D<T, Src, Dst> {
self.pre_mul(&TypedMatrix4D::create_translation(x, y, z))
}
/// Returns a matrix with a translation applied after self's transformation.
pub fn post_translated(&self, x: T, y: T, z: T) -> TypedMatrix4D<T, Src, Dst> {
self.post_mul(&TypedMatrix4D::create_translation(x, y, z))
}
/// Create a 3d scale matrix
pub fn create_scale(x: T, y: T, z: T) -> TypedMatrix4D<T, Src, Dst> {
let (_0, _1): (T, T) = (Zero::zero(), One::one());
TypedMatrix4D::row_major(
x, _0, _0, _0,
_0, y, _0, _0,
_0, _0, z, _0,
_0, _0, _0, _1
)
}
/// Returns a matrix with a scale applied before self's transformation.
pub fn pre_scaled(&self, x: T, y: T, z: T) -> TypedMatrix4D<T, Src, Dst> {
TypedMatrix4D::row_major(
self.m11 * x, self.m12, self.m13, self.m14,
self.m21 , self.m22 * y, self.m23, self.m24,
self.m31 , self.m32, self.m33 * z, self.m34,
self.m41 , self.m42, self.m43, self.m44
)
}
/// Returns a matrix with a scale applied after self's transformation.
pub fn post_scaled(&self, x: T, y: T, z: T) -> TypedMatrix4D<T, Src, Dst> {
self.post_mul(&TypedMatrix4D::create_scale(x, y, z))
}
/// Create a 3d rotation matrix from an angle / axis.
/// The supplied axis must be normalized.
pub fn create_rotation(x: T, y: T, z: T, theta: Radians<T>) -> TypedMatrix4D<T, Src, Dst> {
let (_0, _1): (T, T) = (Zero::zero(), One::one());
let _2 = _1 + _1;
let xx = x * x;
let yy = y * y;
let zz = z * z;
let half_theta = theta.get() / _2;
let sc = half_theta.sin() * half_theta.cos();
let sq = half_theta.sin() * half_theta.sin();
TypedMatrix4D::row_major(
_1 - _2 * (yy + zz) * sq,
_2 * (x * y * sq - z * sc),
_2 * (x * z * sq + y * sc),
_0,
_2 * (x * y * sq + z * sc),
_1 - _2 * (xx + zz) * sq,
_2 * (y * z * sq - x * sc),
_0,
_2 * (x * z * sq - y * sc),
_2 * (y * z * sq + x * sc),
_1 - _2 * (xx + yy) * sq,
_0,
_0,
_0,
_0,
_1
)
}
/// Returns a matrix with a rotation applied after self's transformation.
pub fn post_rotated(&self, x: T, y: T, z: T, theta: Radians<T>) -> TypedMatrix4D<T, Src, Dst> {
self.post_mul(&TypedMatrix4D::create_rotation(x, y, z, theta))
}
/// Returns a matrix with a rotation applied before self's transformation.
pub fn pre_rotated(&self, x: T, y: T, z: T, theta: Radians<T>) -> TypedMatrix4D<T, Src, Dst> {
self.pre_mul(&TypedMatrix4D::create_rotation(x, y, z, theta))
}
/// Create a 2d skew matrix.
///
/// See https://drafts.csswg.org/css-transforms/#funcdef-skew
pub fn create_skew(alpha: Radians<T>, beta: Radians<T>) -> TypedMatrix4D<T, Src, Dst> {
let (_0, _1): (T, T) = (Zero::zero(), One::one());
let (sx, sy) = (beta.get().tan(), alpha.get().tan());
TypedMatrix4D::row_major(
_1, sx, _0, _0,
sy, _1, _0, _0,
_0, _0, _1, _0,
_0, _0, _0, _1
)
}
/// Create a simple perspective projection matrix
pub fn create_perspective(d: T) -> TypedMatrix4D<T, Src, Dst> {
let (_0, _1): (T, T) = (Zero::zero(), One::one());
TypedMatrix4D::row_major(
_1, _0, _0, _0,
_0, _1, _0, _0,
_0, _0, _1, -_1 / d,
_0, _0, _0, _1
)
}
}
impl<T: Copy, Src, Dst> TypedMatrix4D<T, Src, Dst> {
/// Returns an array containing this matrix's terms in row-major order (the order
/// in which the matrix is actually laid out in memory).
pub fn to_row_major_array(&self) -> [T; 16] {
[
self.m11, self.m12, self.m13, self.m14,
self.m21, self.m22, self.m23, self.m24,
self.m31, self.m32, self.m33, self.m34,
self.m41, self.m42, self.m43, self.m44
]
}
/// Returns an array containing this matrix's terms in column-major order.
pub fn to_column_major_array(&self) -> [T; 16] {
[
self.m11, self.m21, self.m31, self.m41,
self.m12, self.m22, self.m32, self.m42,
self.m13, self.m23, self.m33, self.m43,
self.m14, self.m24, self.m34, self.m44
]
}
/// Returns an array containing this matrix's 4 rows in (in row-major order)
/// as arrays.
///
/// This is a convenience method to interface with other libraries like glium.
pub fn to_row_arrays(&self) -> [[T; 4];4] {
[
[self.m11, self.m12, self.m13, self.m14],
[self.m21, self.m22, self.m23, self.m24],
[self.m31, self.m32, self.m33, self.m34],
[self.m41, self.m42, self.m43, self.m44]
]
}
/// Returns an array containing this matrix's 4 columns in (in row-major order,
/// or 4 rows in column-major order) as arrays.
///
/// This is a convenience method to interface with other libraries like glium.
pub fn to_column_arrays(&self) -> [[T; 4]; 4] {
[
[self.m11, self.m21, self.m31, self.m41],
[self.m12, self.m22, self.m32, self.m42],
[self.m13, self.m23, self.m33, self.m43],
[self.m14, self.m24, self.m34, self.m44]
]
}
}
impl<T: Copy + fmt::Debug, Src, Dst> fmt::Debug for TypedMatrix4D<T, Src, Dst> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.to_row_major_array().fmt(f)
}
}
#[cfg(test)]
mod tests {
use approxeq::ApproxEq;
use matrix2d::Matrix2D;
use point::{Point2D, Point3D, Point4D};
use Radians;
use super::*;
use std::f32::consts::FRAC_PI_2;
type Mf32 = Matrix4D<f32>;
// For convenience.
fn rad(v: f32) -> Radians<f32> { Radians::new(v) }
#[test]
pub fn test_translation() {
let t1 = Mf32::create_translation(1.0, 2.0, 3.0);
let t2 = Mf32::identity().pre_translated(1.0, 2.0, 3.0);
let t3 = Mf32::identity().post_translated(1.0, 2.0, 3.0);
assert_eq!(t1, t2);
assert_eq!(t1, t3);
assert_eq!(t1.transform_point3d(&Point3D::new(1.0, 1.0, 1.0)), Point3D::new(2.0, 3.0, 4.0));
assert_eq!(t1.transform_point(&Point2D::new(1.0, 1.0)), Point2D::new(2.0, 3.0));
assert_eq!(t1.post_mul(&t1), Mf32::create_translation(2.0, 4.0, 6.0));
assert!(!t1.is_2d());
assert_eq!(Mf32::create_translation(1.0, 2.0, 3.0).to_2d(), Matrix2D::create_translation(1.0, 2.0));
}
#[test]
pub fn test_rotation() {
let r1 = Mf32::create_rotation(0.0, 0.0, 1.0, rad(FRAC_PI_2));
let r2 = Mf32::identity().pre_rotated(0.0, 0.0, 1.0, rad(FRAC_PI_2));
let r3 = Mf32::identity().post_rotated(0.0, 0.0, 1.0, rad(FRAC_PI_2));
assert_eq!(r1, r2);
assert_eq!(r1, r3);
assert!(r1.transform_point3d(&Point3D::new(1.0, 2.0, 3.0)).approx_eq(&Point3D::new(2.0, -1.0, 3.0)));
assert!(r1.transform_point(&Point2D::new(1.0, 2.0)).approx_eq(&Point2D::new(2.0, -1.0)));
assert!(r1.post_mul(&r1).approx_eq(&Mf32::create_rotation(0.0, 0.0, 1.0, rad(FRAC_PI_2*2.0))));
assert!(r1.is_2d());
assert!(r1.to_2d().approx_eq(&Matrix2D::create_rotation(rad(FRAC_PI_2))));
}
#[test]
pub fn test_scale() {
let s1 = Mf32::create_scale(2.0, 3.0, 4.0);
let s2 = Mf32::identity().pre_scaled(2.0, 3.0, 4.0);
let s3 = Mf32::identity().post_scaled(2.0, 3.0, 4.0);
assert_eq!(s1, s2);
assert_eq!(s1, s3);
assert!(s1.transform_point3d(&Point3D::new(2.0, 2.0, 2.0)).approx_eq(&Point3D::new(4.0, 6.0, 8.0)));
assert!(s1.transform_point(&Point2D::new(2.0, 2.0)).approx_eq(&Point2D::new(4.0, 6.0)));
assert_eq!(s1.post_mul(&s1), Mf32::create_scale(4.0, 9.0, 16.0));
assert!(!s1.is_2d());
assert_eq!(Mf32::create_scale(2.0, 3.0, 0.0).to_2d(), Matrix2D::create_scale(2.0, 3.0));
}
#[test]
pub fn test_ortho() {
let (left, right, bottom, top) = (0.0f32, 1.0f32, 0.1f32, 1.0f32);
let (near, far) = (-1.0f32, 1.0f32);
let result = Mf32::ortho(left, right, bottom, top, near, far);
let expected = Mf32::row_major(
2.0, 0.0, 0.0, 0.0,
0.0, 2.22222222, 0.0, 0.0,
0.0, 0.0, -1.0, 0.0,
-1.0, -1.22222222, -0.0, 1.0
);
debug!("result={:?} expected={:?}", result, expected);
assert!(result.approx_eq(&expected));
}
#[test]
pub fn test_is_2d() {
assert!(Mf32::identity().is_2d());
assert!(Mf32::create_rotation(0.0, 0.0, 1.0, rad(0.7854)).is_2d());
assert!(!Mf32::create_rotation(0.0, 1.0, 0.0, rad(0.7854)).is_2d());
}
#[test]
pub fn test_row_major_2d() {
let m1 = Mf32::row_major_2d(1.0, 2.0, 3.0, 4.0, 5.0, 6.0);
let m2 = Mf32::row_major(
1.0, 2.0, 0.0, 0.0,
3.0, 4.0, 0.0, 0.0,
0.0, 0.0, 1.0, 0.0,
5.0, 6.0, 0.0, 1.0
);
assert_eq!(m1, m2);
}
#[test]
fn test_column_major() {
assert_eq!(
Mf32::row_major(
1.0, 2.0, 3.0, 4.0,
5.0, 6.0, 7.0, 8.0,
9.0, 10.0, 11.0, 12.0,
13.0, 14.0, 15.0, 16.0,
),
Mf32::column_major(
1.0, 5.0, 9.0, 13.0,
2.0, 6.0, 10.0, 14.0,
3.0, 7.0, 11.0, 15.0,
4.0, 8.0, 12.0, 16.0,
)
);
}
#[test]
pub fn test_inverse_simple() {
let m1 = Mf32::identity();
let m2 = m1.inverse().unwrap();
assert!(m1.approx_eq(&m2));
}
#[test]
pub fn test_inverse_scale() {
let m1 = Mf32::create_scale(1.5, 0.3, 2.1);
let m2 = m1.inverse().unwrap();
assert!(m1.pre_mul(&m2).approx_eq(&Mf32::identity()));
}
#[test]
pub fn test_inverse_translate() {
let m1 = Mf32::create_translation(-132.0, 0.3, 493.0);
let m2 = m1.inverse().unwrap();
assert!(m1.pre_mul(&m2).approx_eq(&Mf32::identity()));
}
#[test]
pub fn test_inverse_rotate() {
let m1 = Mf32::create_rotation(0.0, 1.0, 0.0, rad(1.57));
let m2 = m1.inverse().unwrap();
assert!(m1.pre_mul(&m2).approx_eq(&Mf32::identity()));
}
#[test]
pub fn test_inverse_transform_point_2d() {
let m1 = Mf32::create_translation(100.0, 200.0, 0.0);
let m2 = m1.inverse().unwrap();
assert!(m1.pre_mul(&m2).approx_eq(&Mf32::identity()));
let p1 = Point2D::new(1000.0, 2000.0);
let p2 = m1.transform_point(&p1);
assert!(p2.eq(&Point2D::new(1100.0, 2200.0)));
let p3 = m2.transform_point(&p2);
assert!(p3.eq(&p1));
}
#[test]
fn test_inverse_none() {
assert!(Mf32::create_scale(2.0, 0.0, 2.0).inverse().is_none());
assert!(Mf32::create_scale(2.0, 2.0, 2.0).inverse().is_some());
}
#[test]
pub fn test_pre_post() {
let m1 = Matrix4D::identity().post_scaled(1.0, 2.0, 3.0).post_translated(1.0, 2.0, 3.0);
let m2 = Matrix4D::identity().pre_translated(1.0, 2.0, 3.0).pre_scaled(1.0, 2.0, 3.0);
assert!(m1.approx_eq(&m2));
let r = Mf32::create_rotation(0.0, 0.0, 1.0, rad(FRAC_PI_2));
let t = Mf32::create_translation(2.0, 3.0, 0.0);
let a = Point3D::new(1.0, 1.0, 1.0);
assert!(r.post_mul(&t).transform_point3d(&a).approx_eq(&Point3D::new(3.0, 2.0, 1.0)));
assert!(t.post_mul(&r).transform_point3d(&a).approx_eq(&Point3D::new(4.0, -3.0, 1.0)));
assert!(t.post_mul(&r).transform_point3d(&a).approx_eq(&r.transform_point3d(&t.transform_point3d(&a))));
assert!(r.pre_mul(&t).transform_point3d(&a).approx_eq(&Point3D::new(4.0, -3.0, 1.0)));
assert!(t.pre_mul(&r).transform_point3d(&a).approx_eq(&Point3D::new(3.0, 2.0, 1.0)));
assert!(t.pre_mul(&r).transform_point3d(&a).approx_eq(&t.transform_point3d(&r.transform_point3d(&a))));
}
#[test]
fn test_size_of() {
use std::mem::size_of;
assert_eq!(size_of::<Matrix4D<f32>>(), 16*size_of::<f32>());
assert_eq!(size_of::<Matrix4D<f64>>(), 16*size_of::<f64>());
}
#[test]
pub fn test_transform_associativity() {
let m1 = Mf32::row_major(3.0, 2.0, 1.5, 1.0,
0.0, 4.5, -1.0, -4.0,
0.0, 3.5, 2.5, 40.0,
0.0, 3.0, 0.0, 1.0);
let m2 = Mf32::row_major(1.0, -1.0, 3.0, 0.0,
-1.0, 0.5, 0.0, 2.0,
1.5, -2.0, 6.0, 0.0,
-2.5, 6.0, 1.0, 1.0);
let p = Point4D::new(1.0, 3.0, 5.0, 1.0);
let p1 = m2.pre_mul(&m1).transform_point4d(&p);
let p2 = m2.transform_point4d(&m1.transform_point4d(&p));
assert!(p1.approx_eq(&p2));
}
}

66
third_party/rust/euclid-0.10.5/src/num.rs поставляемый
Просмотреть файл

@ -1,66 +0,0 @@
// Copyright 2014 The Servo Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! A one-dimensional length, tagged with its units.
use num_traits;
pub trait Zero {
fn zero() -> Self;
}
impl<T: num_traits::Zero> Zero for T {
fn zero() -> T { num_traits::Zero::zero() }
}
pub trait One {
fn one() -> Self;
}
impl<T: num_traits::One> One for T {
fn one() -> T { num_traits::One::one() }
}
pub trait Round : Copy { fn round(self) -> Self; }
pub trait Floor : Copy { fn floor(self) -> Self; }
pub trait Ceil : Copy { fn ceil(self) -> Self; }
impl Round for f32 { fn round(self) -> Self { self.round() } }
impl Round for f64 { fn round(self) -> Self { self.round() } }
impl Round for i16 { fn round(self) -> Self { self } }
impl Round for u16 { fn round(self) -> Self { self } }
impl Round for i32 { fn round(self) -> Self { self } }
impl Round for i64 { fn round(self) -> Self { self } }
impl Round for u32 { fn round(self) -> Self { self } }
impl Round for u64 { fn round(self) -> Self { self } }
impl Round for usize { fn round(self) -> Self { self } }
impl Round for isize { fn round(self) -> Self { self } }
impl Floor for f32 { fn floor(self) -> Self { self.floor() } }
impl Floor for f64 { fn floor(self) -> Self { self.floor() } }
impl Floor for i16 { fn floor(self) -> Self { self } }
impl Floor for u16 { fn floor(self) -> Self { self } }
impl Floor for i32 { fn floor(self) -> Self { self } }
impl Floor for i64 { fn floor(self) -> Self { self } }
impl Floor for u32 { fn floor(self) -> Self { self } }
impl Floor for u64 { fn floor(self) -> Self { self } }
impl Floor for usize { fn floor(self) -> Self { self } }
impl Floor for isize { fn floor(self) -> Self { self } }
impl Ceil for f32 { fn ceil(self) -> Self { self.ceil() } }
impl Ceil for f64 { fn ceil(self) -> Self { self.ceil() } }
impl Ceil for i16 { fn ceil(self) -> Self { self } }
impl Ceil for u16 { fn ceil(self) -> Self { self } }
impl Ceil for i32 { fn ceil(self) -> Self { self } }
impl Ceil for i64 { fn ceil(self) -> Self { self } }
impl Ceil for u32 { fn ceil(self) -> Self { self } }
impl Ceil for u64 { fn ceil(self) -> Self { self } }
impl Ceil for usize { fn ceil(self) -> Self { self } }
impl Ceil for isize { fn ceil(self) -> Self { self } }

939
third_party/rust/euclid-0.10.5/src/point.rs поставляемый
Просмотреть файл

@ -1,939 +0,0 @@
// Copyright 2013 The Servo Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use super::UnknownUnit;
use approxeq::ApproxEq;
use length::Length;
use scale_factor::ScaleFactor;
use size::TypedSize2D;
use num::*;
use num_traits::{Float, NumCast};
use std::fmt;
use std::ops::{Add, Neg, Mul, Sub, Div};
use std::marker::PhantomData;
define_matrix! {
/// A 2d Point tagged with a unit.
#[derive(RustcDecodable, RustcEncodable)]
pub struct TypedPoint2D<T, U> {
pub x: T,
pub y: T,
}
}
/// Default 2d point type with no unit.
///
/// `Point2D` provides the same methods as `TypedPoint2D`.
pub type Point2D<T> = TypedPoint2D<T, UnknownUnit>;
impl<T: Copy + Zero, U> TypedPoint2D<T, U> {
/// Constructor, setting all components to zero.
#[inline]
pub fn zero() -> TypedPoint2D<T, U> {
TypedPoint2D::new(Zero::zero(), Zero::zero())
}
/// Convert into a 3d point.
#[inline]
pub fn to_3d(&self) -> TypedPoint3D<T, U> {
TypedPoint3D::new(self.x, self.y, Zero::zero())
}
}
impl<T: fmt::Debug, U> fmt::Debug for TypedPoint2D<T, U> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "({:?},{:?})", self.x, self.y)
}
}
impl<T: fmt::Display, U> fmt::Display for TypedPoint2D<T, U> {
fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
write!(formatter, "({},{})", self.x, self.y)
}
}
impl<T: Copy, U> TypedPoint2D<T, U> {
/// Constructor taking scalar values directly.
#[inline]
pub fn new(x: T, y: T) -> TypedPoint2D<T, U> {
TypedPoint2D { x: x, y: y, _unit: PhantomData }
}
/// Constructor taking properly typed Lengths instead of scalar values.
#[inline]
pub fn from_lengths(x: Length<T, U>, y: Length<T, U>) -> TypedPoint2D<T, U> {
TypedPoint2D::new(x.0, y.0)
}
/// Returns self.x as a Length carrying the unit.
#[inline]
pub fn x_typed(&self) -> Length<T, U> { Length::new(self.x) }
/// Returns self.y as a Length carrying the unit.
#[inline]
pub fn y_typed(&self) -> Length<T, U> { Length::new(self.y) }
/// Drop the units, preserving only the numeric value.
#[inline]
pub fn to_untyped(&self) -> Point2D<T> {
TypedPoint2D::new(self.x, self.y)
}
/// Tag a unitless value with units.
#[inline]
pub fn from_untyped(p: &Point2D<T>) -> TypedPoint2D<T, U> {
TypedPoint2D::new(p.x, p.y)
}
#[inline]
pub fn to_array(&self) -> [T; 2] {
[self.x, self.y]
}
}
impl<T, U> TypedPoint2D<T, U>
where T: Copy + Mul<T, Output=T> + Add<T, Output=T> + Sub<T, Output=T> {
/// Dot product.
#[inline]
pub fn dot(self, other: TypedPoint2D<T, U>) -> T {
self.x * other.x + self.y * other.y
}
/// Returns the norm of the cross product [self.x, self.y, 0] x [other.x, other.y, 0]..
#[inline]
pub fn cross(self, other: TypedPoint2D<T, U>) -> T {
self.x * other.y - self.y * other.x
}
}
impl<T: Copy + Add<T, Output=T>, U> Add for TypedPoint2D<T, U> {
type Output = TypedPoint2D<T, U>;
fn add(self, other: TypedPoint2D<T, U>) -> TypedPoint2D<T, U> {
TypedPoint2D::new(self.x + other.x, self.y + other.y)
}
}
impl<T: Copy + Add<T, Output=T>, U> Add<TypedSize2D<T, U>> for TypedPoint2D<T, U> {
type Output = TypedPoint2D<T, U>;
fn add(self, other: TypedSize2D<T, U>) -> TypedPoint2D<T, U> {
TypedPoint2D::new(self.x + other.width, self.y + other.height)
}
}
impl<T: Copy + Add<T, Output=T>, U> TypedPoint2D<T, U> {
pub fn add_size(&self, other: &TypedSize2D<T, U>) -> TypedPoint2D<T, U> {
TypedPoint2D::new(self.x + other.width, self.y + other.height)
}
}
impl<T: Copy + Sub<T, Output=T>, U> Sub for TypedPoint2D<T, U> {
type Output = TypedPoint2D<T, U>;
fn sub(self, other: TypedPoint2D<T, U>) -> TypedPoint2D<T, U> {
TypedPoint2D::new(self.x - other.x, self.y - other.y)
}
}
impl <T: Copy + Neg<Output=T>, U> Neg for TypedPoint2D<T, U> {
type Output = TypedPoint2D<T, U>;
#[inline]
fn neg(self) -> TypedPoint2D<T, U> {
TypedPoint2D::new(-self.x, -self.y)
}
}
impl<T: Float, U> TypedPoint2D<T, U> {
pub fn min(self, other: TypedPoint2D<T, U>) -> TypedPoint2D<T, U> {
TypedPoint2D::new(self.x.min(other.x), self.y.min(other.y))
}
pub fn max(self, other: TypedPoint2D<T, U>) -> TypedPoint2D<T, U> {
TypedPoint2D::new(self.x.max(other.x), self.y.max(other.y))
}
}
impl<T: Copy + Mul<T, Output=T>, U> Mul<T> for TypedPoint2D<T, U> {
type Output = TypedPoint2D<T, U>;
#[inline]
fn mul(self, scale: T) -> TypedPoint2D<T, U> {
TypedPoint2D::new(self.x * scale, self.y * scale)
}
}
impl<T: Copy + Div<T, Output=T>, U> Div<T> for TypedPoint2D<T, U> {
type Output = TypedPoint2D<T, U>;
#[inline]
fn div(self, scale: T) -> TypedPoint2D<T, U> {
TypedPoint2D::new(self.x / scale, self.y / scale)
}
}
impl<T: Copy + Mul<T, Output=T>, U1, U2> Mul<ScaleFactor<T, U1, U2>> for TypedPoint2D<T, U1> {
type Output = TypedPoint2D<T, U2>;
#[inline]
fn mul(self, scale: ScaleFactor<T, U1, U2>) -> TypedPoint2D<T, U2> {
TypedPoint2D::new(self.x * scale.get(), self.y * scale.get())
}
}
impl<T: Copy + Div<T, Output=T>, U1, U2> Div<ScaleFactor<T, U1, U2>> for TypedPoint2D<T, U2> {
type Output = TypedPoint2D<T, U1>;
#[inline]
fn div(self, scale: ScaleFactor<T, U1, U2>) -> TypedPoint2D<T, U1> {
TypedPoint2D::new(self.x / scale.get(), self.y / scale.get())
}
}
impl<T: Round, U> TypedPoint2D<T, U> {
/// Rounds each component to the nearest integer value.
///
/// This behavior is preserved for negative values (unlike the basic cast).
/// For example { -0.1, -0.8 }.round() == { 0.0, -1.0 }
pub fn round(&self) -> Self {
TypedPoint2D::new(self.x.round(), self.y.round())
}
}
impl<T: Ceil, U> TypedPoint2D<T, U> {
/// Rounds each component to the smallest integer equal or greater than the orginal value.
///
/// This behavior is preserved for negative values (unlike the basic cast).
/// For example { -0.1, -0.8 }.ceil() == { 0.0, 0.0 }.
pub fn ceil(&self) -> Self {
TypedPoint2D::new(self.x.ceil(), self.y.ceil())
}
}
impl<T: Floor, U> TypedPoint2D<T, U> {
/// Rounds each component to the biggest integer equal or lower than the orginal value.
///
/// This behavior is preserved for negative values (unlike the basic cast).
/// For example { -0.1, -0.8 }.floor() == { -1.0, -1.0 }.
pub fn floor(&self) -> Self {
TypedPoint2D::new(self.x.floor(), self.y.floor())
}
}
impl<T: NumCast + Copy, U> TypedPoint2D<T, U> {
/// Cast from one numeric representation to another, preserving the units.
///
/// When casting from floating point to integer coordinates, the decimals are truncated
/// as one would expect from a simple cast, but this behavior does not always make sense
/// geometrically. Consider using round(), ceil or floor() before casting.
pub fn cast<NewT: NumCast + Copy>(&self) -> Option<TypedPoint2D<NewT, U>> {
match (NumCast::from(self.x), NumCast::from(self.y)) {
(Some(x), Some(y)) => Some(TypedPoint2D::new(x, y)),
_ => None
}
}
// Convenience functions for common casts
/// Cast into an f32 vector.
pub fn to_f32(&self) -> TypedPoint2D<f32, U> {
self.cast().unwrap()
}
/// Cast into an usize point, truncating decimals if any.
///
/// When casting from floating point vectors, it is worth considering whether
/// to round(), ceil() or floor() before the cast in order to obtain the desired
/// conversion behavior.
pub fn to_uint(&self) -> TypedPoint2D<usize, U> {
self.cast().unwrap()
}
/// Cast into an i32 point, truncating decimals if any.
///
/// When casting from floating point vectors, it is worth considering whether
/// to round(), ceil() or floor() before the cast in order to obtain the desired
/// conversion behavior.
pub fn to_i32(&self) -> TypedPoint2D<i32, U> {
self.cast().unwrap()
}
/// Cast into an i64 point, truncating decimals if any.
///
/// When casting from floating point vectors, it is worth considering whether
/// to round(), ceil() or floor() before the cast in order to obtain the desired
/// conversion behavior.
pub fn to_i64(&self) -> TypedPoint2D<i64, U> {
self.cast().unwrap()
}
}
impl<T: Copy+ApproxEq<T>, U> ApproxEq<TypedPoint2D<T, U>> for TypedPoint2D<T, U> {
#[inline]
fn approx_epsilon() -> Self {
TypedPoint2D::new(T::approx_epsilon(), T::approx_epsilon())
}
#[inline]
fn approx_eq(&self, other: &Self) -> bool {
self.x.approx_eq(&other.x) && self.y.approx_eq(&other.y)
}
#[inline]
fn approx_eq_eps(&self, other: &Self, eps: &Self) -> bool {
self.x.approx_eq_eps(&other.x, &eps.x) && self.y.approx_eq_eps(&other.y, &eps.y)
}
}
define_matrix! {
/// A 3d Point tagged with a unit.
#[derive(RustcDecodable, RustcEncodable)]
pub struct TypedPoint3D<T, U> {
pub x: T,
pub y: T,
pub z: T,
}
}
/// Default 3d point type with no unit.
///
/// `Point3D` provides the same methods as `TypedPoint3D`.
pub type Point3D<T> = TypedPoint3D<T, UnknownUnit>;
impl<T: Copy + Zero, U> TypedPoint3D<T, U> {
/// Constructor, setting all copmonents to zero.
#[inline]
pub fn zero() -> TypedPoint3D<T, U> {
TypedPoint3D::new(Zero::zero(), Zero::zero(), Zero::zero())
}
}
impl<T: fmt::Debug, U> fmt::Debug for TypedPoint3D<T, U> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "({:?},{:?},{:?})", self.x, self.y, self.z)
}
}
impl<T: fmt::Display, U> fmt::Display for TypedPoint3D<T, U> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "({},{},{})", self.x, self.y, self.z)
}
}
impl<T: Copy, U> TypedPoint3D<T, U> {
/// Constructor taking scalar values directly.
#[inline]
pub fn new(x: T, y: T, z: T) -> TypedPoint3D<T, U> {
TypedPoint3D { x: x, y: y, z: z, _unit: PhantomData }
}
/// Constructor taking properly typed Lengths instead of scalar values.
#[inline]
pub fn from_lengths(x: Length<T, U>, y: Length<T, U>, z: Length<T, U>) -> TypedPoint3D<T, U> {
TypedPoint3D::new(x.0, y.0, z.0)
}
/// Returns self.x as a Length carrying the unit.
#[inline]
pub fn x_typed(&self) -> Length<T, U> { Length::new(self.x) }
/// Returns self.y as a Length carrying the unit.
#[inline]
pub fn y_typed(&self) -> Length<T, U> { Length::new(self.y) }
/// Returns self.z as a Length carrying the unit.
#[inline]
pub fn z_typed(&self) -> Length<T, U> { Length::new(self.z) }
#[inline]
pub fn to_array(&self) -> [T; 3] { [self.x, self.y, self.z] }
/// Drop the units, preserving only the numeric value.
#[inline]
pub fn to_untyped(&self) -> Point3D<T> {
TypedPoint3D::new(self.x, self.y, self.z)
}
/// Tag a unitless value with units.
#[inline]
pub fn from_untyped(p: &Point3D<T>) -> TypedPoint3D<T, U> {
TypedPoint3D::new(p.x, p.y, p.z)
}
/// Convert into a 2d point.
#[inline]
pub fn to_2d(&self) -> TypedPoint2D<T, U> {
TypedPoint2D::new(self.x, self.y)
}
}
impl<T: Mul<T, Output=T> +
Add<T, Output=T> +
Sub<T, Output=T> +
Copy, U> TypedPoint3D<T, U> {
// Dot product.
#[inline]
pub fn dot(self, other: TypedPoint3D<T, U>) -> T {
self.x * other.x +
self.y * other.y +
self.z * other.z
}
// Cross product.
#[inline]
pub fn cross(self, other: TypedPoint3D<T, U>) -> TypedPoint3D<T, U> {
TypedPoint3D::new(self.y * other.z - self.z * other.y,
self.z * other.x - self.x * other.z,
self.x * other.y - self.y * other.x)
}
}
impl<T: Copy + Add<T, Output=T>, U> Add for TypedPoint3D<T, U> {
type Output = TypedPoint3D<T, U>;
fn add(self, other: TypedPoint3D<T, U>) -> TypedPoint3D<T, U> {
TypedPoint3D::new(self.x + other.x,
self.y + other.y,
self.z + other.z)
}
}
impl<T: Copy + Sub<T, Output=T>, U> Sub for TypedPoint3D<T, U> {
type Output = TypedPoint3D<T, U>;
fn sub(self, other: TypedPoint3D<T, U>) -> TypedPoint3D<T, U> {
TypedPoint3D::new(self.x - other.x,
self.y - other.y,
self.z - other.z)
}
}
impl <T: Copy + Neg<Output=T>, U> Neg for TypedPoint3D<T, U> {
type Output = TypedPoint3D<T, U>;
#[inline]
fn neg(self) -> TypedPoint3D<T, U> {
TypedPoint3D::new(-self.x, -self.y, -self.z)
}
}
impl<T: Float, U> TypedPoint3D<T, U> {
pub fn min(self, other: TypedPoint3D<T, U>) -> TypedPoint3D<T, U> {
TypedPoint3D::new(self.x.min(other.x),
self.y.min(other.y),
self.z.min(other.z))
}
pub fn max(self, other: TypedPoint3D<T, U>) -> TypedPoint3D<T, U> {
TypedPoint3D::new(self.x.max(other.x), self.y.max(other.y),
self.z.max(other.z))
}
}
impl<T: Round, U> TypedPoint3D<T, U> {
/// Rounds each component to the nearest integer value.
///
/// This behavior is preserved for negative values (unlike the basic cast).
pub fn round(&self) -> Self {
TypedPoint3D::new(self.x.round(), self.y.round(), self.z.round())
}
}
impl<T: Ceil, U> TypedPoint3D<T, U> {
/// Rounds each component to the smallest integer equal or greater than the orginal value.
///
/// This behavior is preserved for negative values (unlike the basic cast).
pub fn ceil(&self) -> Self {
TypedPoint3D::new(self.x.ceil(), self.y.ceil(), self.z.ceil())
}
}
impl<T: Floor, U> TypedPoint3D<T, U> {
/// Rounds each component to the biggest integer equal or lower than the orginal value.
///
/// This behavior is preserved for negative values (unlike the basic cast).
pub fn floor(&self) -> Self {
TypedPoint3D::new(self.x.floor(), self.y.floor(), self.z.floor())
}
}
impl<T: NumCast + Copy, U> TypedPoint3D<T, U> {
/// Cast from one numeric representation to another, preserving the units.
///
/// When casting from floating point to integer coordinates, the decimals are truncated
/// as one would expect from a simple cast, but this behavior does not always make sense
/// geometrically. Consider using round(), ceil or floor() before casting.
pub fn cast<NewT: NumCast + Copy>(&self) -> Option<TypedPoint3D<NewT, U>> {
match (NumCast::from(self.x),
NumCast::from(self.y),
NumCast::from(self.z)) {
(Some(x), Some(y), Some(z)) => Some(TypedPoint3D::new(x, y, z)),
_ => None
}
}
// Convenience functions for common casts
/// Cast into an f32 vector.
pub fn to_f32(&self) -> TypedPoint3D<f32, U> {
self.cast().unwrap()
}
/// Cast into an usize point, truncating decimals if any.
///
/// When casting from floating point vectors, it is worth considering whether
/// to round(), ceil() or floor() before the cast in order to obtain the desired
/// conversion behavior.
pub fn to_uint(&self) -> TypedPoint3D<usize, U> {
self.cast().unwrap()
}
/// Cast into an i32 point, truncating decimals if any.
///
/// When casting from floating point vectors, it is worth considering whether
/// to round(), ceil() or floor() before the cast in order to obtain the desired
/// conversion behavior.
pub fn to_i32(&self) -> TypedPoint3D<i32, U> {
self.cast().unwrap()
}
/// Cast into an i64 point, truncating decimals if any.
///
/// When casting from floating point vectors, it is worth considering whether
/// to round(), ceil() or floor() before the cast in order to obtain the desired
/// conversion behavior.
pub fn to_i64(&self) -> TypedPoint3D<i64, U> {
self.cast().unwrap()
}
}
impl<T: Copy+ApproxEq<T>, U> ApproxEq<TypedPoint3D<T, U>> for TypedPoint3D<T, U> {
#[inline]
fn approx_epsilon() -> Self {
TypedPoint3D::new(T::approx_epsilon(), T::approx_epsilon(), T::approx_epsilon())
}
#[inline]
fn approx_eq(&self, other: &Self) -> bool {
self.x.approx_eq(&other.x)
&& self.y.approx_eq(&other.y)
&& self.z.approx_eq(&other.z)
}
#[inline]
fn approx_eq_eps(&self, other: &Self, eps: &Self) -> bool {
self.x.approx_eq_eps(&other.x, &eps.x)
&& self.y.approx_eq_eps(&other.y, &eps.y)
&& self.z.approx_eq_eps(&other.z, &eps.z)
}
}
define_matrix! {
/// A 4d Point tagged with a unit.
#[derive(RustcDecodable, RustcEncodable)]
pub struct TypedPoint4D<T, U> {
pub x: T,
pub y: T,
pub z: T,
pub w: T,
}
}
/// Default 4d point with no unit.
///
/// `Point4D` provides the same methods as `TypedPoint4D`.
pub type Point4D<T> = TypedPoint4D<T, UnknownUnit>;
impl<T: Copy + Zero, U> TypedPoint4D<T, U> {
/// Constructor, setting all copmonents to zero.
#[inline]
pub fn zero() -> TypedPoint4D<T, U> {
TypedPoint4D::new(Zero::zero(), Zero::zero(), Zero::zero(), Zero::zero())
}
}
impl<T: fmt::Debug, U> fmt::Debug for TypedPoint4D<T, U> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "({:?},{:?},{:?},{:?})", self.x, self.y, self.z, self.w)
}
}
impl<T: fmt::Display, U> fmt::Display for TypedPoint4D<T, U> {
fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
write!(formatter, "({},{},{},{})", self.x, self.y, self.z, self.w)
}
}
impl<T: Copy, U> TypedPoint4D<T, U> {
/// Constructor taking scalar values directly.
#[inline]
pub fn new(x: T, y: T, z: T, w: T) -> TypedPoint4D<T, U> {
TypedPoint4D { x: x, y: y, z: z, w: w, _unit: PhantomData }
}
/// Constructor taking properly typed Lengths instead of scalar values.
#[inline]
pub fn from_lengths(x: Length<T, U>,
y: Length<T, U>,
z: Length<T, U>,
w: Length<T, U>) -> TypedPoint4D<T, U> {
TypedPoint4D::new(x.0, y.0, z.0, w.0)
}
/// Returns self.x as a Length carrying the unit.
#[inline]
pub fn x_typed(&self) -> Length<T, U> { Length::new(self.x) }
/// Returns self.y as a Length carrying the unit.
#[inline]
pub fn y_typed(&self) -> Length<T, U> { Length::new(self.y) }
/// Returns self.z as a Length carrying the unit.
#[inline]
pub fn z_typed(&self) -> Length<T, U> { Length::new(self.z) }
/// Returns self.w as a Length carrying the unit.
#[inline]
pub fn w_typed(&self) -> Length<T, U> { Length::new(self.w) }
/// Drop the units, preserving only the numeric value.
#[inline]
pub fn to_untyped(&self) -> Point4D<T> {
TypedPoint4D::new(self.x, self.y, self.z, self.w)
}
/// Tag a unitless value with units.
#[inline]
pub fn from_untyped(p: &Point4D<T>) -> TypedPoint4D<T, U> {
TypedPoint4D::new(p.x, p.y, p.z, p.w)
}
#[inline]
pub fn to_array(&self) -> [T; 4] {
[self.x, self.y, self.z, self.w]
}
}
impl<T: Copy + Div<T, Output=T>, U> TypedPoint4D<T, U> {
/// Convert into a 2d point.
#[inline]
pub fn to_2d(self) -> TypedPoint2D<T, U> {
TypedPoint2D::new(self.x / self.w, self.y / self.w)
}
/// Convert into a 3d point.
#[inline]
pub fn to_3d(self) -> TypedPoint3D<T, U> {
TypedPoint3D::new(self.x / self.w, self.y / self.w, self.z / self.w)
}
}
impl<T: Copy + Add<T, Output=T>, U> Add for TypedPoint4D<T, U> {
type Output = TypedPoint4D<T, U>;
fn add(self, other: TypedPoint4D<T, U>) -> TypedPoint4D<T, U> {
TypedPoint4D::new(self.x + other.x,
self.y + other.y,
self.z + other.z,
self.w + other.w)
}
}
impl<T: Copy + Sub<T, Output=T>, U> Sub for TypedPoint4D<T, U> {
type Output = TypedPoint4D<T, U>;
fn sub(self, other: TypedPoint4D<T, U>) -> TypedPoint4D<T, U> {
TypedPoint4D::new(self.x - other.x,
self.y - other.y,
self.z - other.z,
self.w - other.w)
}
}
impl <T: Copy + Neg<Output=T>, U> Neg for TypedPoint4D<T, U> {
type Output = TypedPoint4D<T, U>;
#[inline]
fn neg(self) -> TypedPoint4D<T, U> {
TypedPoint4D::new(-self.x, -self.y, -self.z, -self.w)
}
}
impl<T: Float, U> TypedPoint4D<T, U> {
pub fn min(self, other: TypedPoint4D<T, U>) -> TypedPoint4D<T, U> {
TypedPoint4D::new(self.x.min(other.x), self.y.min(other.y),
self.z.min(other.z), self.w.min(other.w))
}
pub fn max(self, other: TypedPoint4D<T, U>) -> TypedPoint4D<T, U> {
TypedPoint4D::new(self.x.max(other.x), self.y.max(other.y),
self.z.max(other.z), self.w.max(other.w))
}
}
impl<T: Round, U> TypedPoint4D<T, U> {
/// Rounds each component to the nearest integer value.
///
/// This behavior is preserved for negative values (unlike the basic cast).
pub fn round(&self) -> Self {
TypedPoint4D::new(self.x.round(), self.y.round(), self.z.round(), self.w.round())
}
}
impl<T: Ceil, U> TypedPoint4D<T, U> {
/// Rounds each component to the smallest integer equal or greater than the orginal value.
///
/// This behavior is preserved for negative values (unlike the basic cast).
pub fn ceil(&self) -> Self {
TypedPoint4D::new(self.x.ceil(), self.y.ceil(), self.z.ceil(), self.w.ceil())
}
}
impl<T: Floor, U> TypedPoint4D<T, U> {
/// Rounds each component to the biggest integer equal or lower than the orginal value.
///
/// This behavior is preserved for negative values (unlike the basic cast).
pub fn floor(&self) -> Self {
TypedPoint4D::new(self.x.floor(), self.y.floor(), self.z.floor(), self.w.floor())
}
}
impl<T: NumCast + Copy, U> TypedPoint4D<T, U> {
/// Cast from one numeric representation to another, preserving the units.
///
/// When casting from floating point to integer coordinates, the decimals are truncated
/// as one would expect from a simple cast, but this behavior does not always make sense
/// geometrically. Consider using round(), ceil or floor() before casting.
pub fn cast<NewT: NumCast + Copy>(&self) -> Option<TypedPoint4D<NewT, U>> {
match (NumCast::from(self.x),
NumCast::from(self.y),
NumCast::from(self.z),
NumCast::from(self.w)) {
(Some(x), Some(y), Some(z), Some(w)) => Some(TypedPoint4D::new(x, y, z, w)),
_ => None
}
}
// Convenience functions for common casts
/// Cast into an f32 vector.
pub fn to_f32(&self) -> TypedPoint4D<f32, U> {
self.cast().unwrap()
}
/// Cast into an usize point, truncating decimals if any.
///
/// When casting from floating point vectors, it is worth considering whether
/// to round(), ceil() or floor() before the cast in order to obtain the desired
/// conversion behavior.
pub fn to_uint(&self) -> TypedPoint4D<usize, U> {
self.cast().unwrap()
}
/// Cast into an i32 point, truncating decimals if any.
///
/// When casting from floating point vectors, it is worth considering whether
/// to round(), ceil() or floor() before the cast in order to obtain the desired
/// conversion behavior.
pub fn to_i32(&self) -> TypedPoint4D<i32, U> {
self.cast().unwrap()
}
/// Cast into an i64 point, truncating decimals if any.
///
/// When casting from floating point vectors, it is worth considering whether
/// to round(), ceil() or floor() before the cast in order to obtain the desired
/// conversion behavior.
pub fn to_i64(&self) -> TypedPoint4D<i64, U> {
self.cast().unwrap()
}
}
impl<T: ApproxEq<T>, U> ApproxEq<T> for TypedPoint4D<T, U> {
fn approx_epsilon() -> T {
T::approx_epsilon()
}
fn approx_eq_eps(&self, other: &Self, approx_epsilon: &T) -> bool {
self.x.approx_eq_eps(&other.x, approx_epsilon)
&& self.y.approx_eq_eps(&other.y, approx_epsilon)
&& self.z.approx_eq_eps(&other.z, approx_epsilon)
&& self.w.approx_eq_eps(&other.w, approx_epsilon)
}
fn approx_eq(&self, other: &Self) -> bool {
self.approx_eq_eps(&other, &Self::approx_epsilon())
}
}
pub fn point2<T: Copy, U>(x: T, y: T) -> TypedPoint2D<T, U> {
TypedPoint2D::new(x, y)
}
pub fn point3<T: Copy, U>(x: T, y: T, z: T) -> TypedPoint3D<T, U> {
TypedPoint3D::new(x, y, z)
}
pub fn point4<T: Copy, U>(x: T, y: T, z: T, w: T) -> TypedPoint4D<T, U> {
TypedPoint4D::new(x, y, z, w)
}
#[cfg(test)]
mod point2d {
use super::Point2D;
#[test]
pub fn test_scalar_mul() {
let p1: Point2D<f32> = Point2D::new(3.0, 5.0);
let result = p1 * 5.0;
assert_eq!(result, Point2D::new(15.0, 25.0));
}
#[test]
pub fn test_dot() {
let p1: Point2D<f32> = Point2D::new(2.0, 7.0);
let p2: Point2D<f32> = Point2D::new(13.0, 11.0);
assert_eq!(p1.dot(p2), 103.0);
}
#[test]
pub fn test_cross() {
let p1: Point2D<f32> = Point2D::new(4.0, 7.0);
let p2: Point2D<f32> = Point2D::new(13.0, 8.0);
let r = p1.cross(p2);
assert_eq!(r, -59.0);
}
#[test]
pub fn test_min() {
let p1 = Point2D::new(1.0, 3.0);
let p2 = Point2D::new(2.0, 2.0);
let result = p1.min(p2);
assert_eq!(result, Point2D::new(1.0, 2.0));
}
#[test]
pub fn test_max() {
let p1 = Point2D::new(1.0, 3.0);
let p2 = Point2D::new(2.0, 2.0);
let result = p1.max(p2);
assert_eq!(result, Point2D::new(2.0, 3.0));
}
}
#[cfg(test)]
mod typedpoint2d {
use super::TypedPoint2D;
use scale_factor::ScaleFactor;
pub enum Mm {}
pub enum Cm {}
pub type Point2DMm<T> = TypedPoint2D<T, Mm>;
pub type Point2DCm<T> = TypedPoint2D<T, Cm>;
#[test]
pub fn test_add() {
let p1 = Point2DMm::new(1.0, 2.0);
let p2 = Point2DMm::new(3.0, 4.0);
let result = p1 + p2;
assert_eq!(result, Point2DMm::new(4.0, 6.0));
}
#[test]
pub fn test_scalar_mul() {
let p1 = Point2DMm::new(1.0, 2.0);
let cm_per_mm: ScaleFactor<f32, Mm, Cm> = ScaleFactor::new(0.1);
let result = p1 * cm_per_mm;
assert_eq!(result, Point2DCm::new(0.1, 0.2));
}
}
#[cfg(test)]
mod point3d {
use super::Point3D;
#[test]
pub fn test_dot() {
let p1 = Point3D::new(7.0, 21.0, 32.0);
let p2 = Point3D::new(43.0, 5.0, 16.0);
assert_eq!(p1.dot(p2), 918.0);
}
#[test]
pub fn test_cross() {
let p1 = Point3D::new(4.0, 7.0, 9.0);
let p2 = Point3D::new(13.0, 8.0, 3.0);
let p3 = p1.cross(p2);
assert_eq!(p3, Point3D::new(-51.0, 105.0, -59.0));
}
#[test]
pub fn test_min() {
let p1 = Point3D::new(1.0, 3.0, 5.0);
let p2 = Point3D::new(2.0, 2.0, -1.0);
let result = p1.min(p2);
assert_eq!(result, Point3D::new(1.0, 2.0, -1.0));
}
#[test]
pub fn test_max() {
let p1 = Point3D::new(1.0, 3.0, 5.0);
let p2 = Point3D::new(2.0, 2.0, -1.0);
let result = p1.max(p2);
assert_eq!(result, Point3D::new(2.0, 3.0, 5.0));
}
}
#[cfg(test)]
mod point4d {
use super::Point4D;
#[test]
pub fn test_add() {
let p1 = Point4D::new(7.0, 21.0, 32.0, 1.0);
let p2 = Point4D::new(43.0, 5.0, 16.0, 2.0);
let result = p1 + p2;
assert_eq!(result, Point4D::new(50.0, 26.0, 48.0, 3.0));
}
#[test]
pub fn test_sub() {
let p1 = Point4D::new(7.0, 21.0, 32.0, 1.0);
let p2 = Point4D::new(43.0, 5.0, 16.0, 2.0);
let result = p1 - p2;
assert_eq!(result, Point4D::new(-36.0, 16.0, 16.0, -1.0));
}
#[test]
pub fn test_min() {
let p1 = Point4D::new(1.0, 3.0, 5.0, 7.0);
let p2 = Point4D::new(2.0, 2.0, -1.0, 10.0);
let result = p1.min(p2);
assert_eq!(result, Point4D::new(1.0, 2.0, -1.0, 7.0));
}
#[test]
pub fn test_max() {
let p1 = Point4D::new(1.0, 3.0, 5.0, 7.0);
let p2 = Point4D::new(2.0, 2.0, -1.0, 10.0);
let result = p1.max(p2);
assert_eq!(result, Point4D::new(2.0, 3.0, 5.0, 10.0));
}
}

671
third_party/rust/euclid-0.10.5/src/rect.rs поставляемый
Просмотреть файл

@ -1,671 +0,0 @@
// Copyright 2013 The Servo Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use super::UnknownUnit;
use length::Length;
use scale_factor::ScaleFactor;
use num::*;
use point::TypedPoint2D;
use size::TypedSize2D;
use heapsize::HeapSizeOf;
use num_traits::NumCast;
use serde::{Deserialize, Deserializer, Serialize, Serializer};
use std::cmp::PartialOrd;
use std::fmt;
use std::ops::{Add, Sub, Mul, Div};
/// A 2d Rectangle optionally tagged with a unit.
#[derive(RustcDecodable, RustcEncodable)]
pub struct TypedRect<T, U = UnknownUnit> {
pub origin: TypedPoint2D<T, U>,
pub size: TypedSize2D<T, U>,
}
/// The default rectangle type with no unit.
pub type Rect<T> = TypedRect<T, UnknownUnit>;
impl<T: HeapSizeOf, U> HeapSizeOf for TypedRect<T, U> {
fn heap_size_of_children(&self) -> usize {
self.origin.heap_size_of_children() + self.size.heap_size_of_children()
}
}
impl<T: Copy + Deserialize, U> Deserialize for TypedRect<T, U> {
fn deserialize<D>(deserializer: &mut D) -> Result<Self, D::Error>
where D: Deserializer
{
let (origin, size) = try!(Deserialize::deserialize(deserializer));
Ok(TypedRect::new(origin, size))
}
}
impl<T: Serialize, U> Serialize for TypedRect<T, U> {
fn serialize<S>(&self, serializer: &mut S) -> Result<(), S::Error>
where S: Serializer
{
(&self.origin, &self.size).serialize(serializer)
}
}
impl<T: Copy, U> Copy for TypedRect<T, U> {}
impl<T: Copy, U> Clone for TypedRect<T, U> {
fn clone(&self) -> TypedRect<T, U> { *self }
}
impl<T: PartialEq, U> PartialEq<TypedRect<T, U>> for TypedRect<T, U> {
fn eq(&self, other: &TypedRect<T, U>) -> bool {
self.origin.eq(&other.origin) && self.size.eq(&other.size)
}
}
impl<T: Eq, U> Eq for TypedRect<T, U> {}
impl<T: fmt::Debug, U> fmt::Debug for TypedRect<T, U> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "TypedRect({:?} at {:?})", self.size, self.origin)
}
}
impl<T: fmt::Display, U> fmt::Display for TypedRect<T, U> {
fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
write!(formatter, "Rect({} at {})", self.size, self.origin)
}
}
impl<T, U> TypedRect<T, U> {
/// Constructor.
pub fn new(origin: TypedPoint2D<T, U>, size: TypedSize2D<T, U>) -> TypedRect<T, U> {
TypedRect {
origin: origin,
size: size,
}
}
}
impl<T, U> TypedRect<T, U>
where T: Copy + Clone + Zero + PartialOrd + PartialEq + Add<T, Output=T> + Sub<T, Output=T> {
#[inline]
pub fn intersects(&self, other: &TypedRect<T, U>) -> bool {
self.origin.x < other.origin.x + other.size.width &&
other.origin.x < self.origin.x + self.size.width &&
self.origin.y < other.origin.y + other.size.height &&
other.origin.y < self.origin.y + self.size.height
}
#[inline]
pub fn max_x(&self) -> T {
self.origin.x + self.size.width
}
#[inline]
pub fn min_x(&self) -> T {
self.origin.x
}
#[inline]
pub fn max_y(&self) -> T {
self.origin.y + self.size.height
}
#[inline]
pub fn min_y(&self) -> T {
self.origin.y
}
#[inline]
pub fn max_x_typed(&self) -> Length<T, U> {
Length::new(self.max_x())
}
#[inline]
pub fn min_x_typed(&self) -> Length<T, U> {
Length::new(self.min_x())
}
#[inline]
pub fn max_y_typed(&self) -> Length<T, U> {
Length::new(self.max_y())
}
#[inline]
pub fn min_y_typed(&self) -> Length<T, U> {
Length::new(self.min_y())
}
#[inline]
pub fn intersection(&self, other: &TypedRect<T, U>) -> Option<TypedRect<T, U>> {
if !self.intersects(other) {
return None;
}
let upper_left = TypedPoint2D::new(max(self.min_x(), other.min_x()),
max(self.min_y(), other.min_y()));
let lower_right_x = min(self.max_x(), other.max_x());
let lower_right_y = min(self.max_y(), other.max_y());
Some(TypedRect::new(upper_left, TypedSize2D::new(lower_right_x - upper_left.x,
lower_right_y - upper_left.y)))
}
/// Translates the rect by a vector.
#[inline]
pub fn translate(&self, other: &TypedPoint2D<T, U>) -> TypedRect<T, U> {
TypedRect::new(
TypedPoint2D::new(self.origin.x + other.x, self.origin.y + other.y),
self.size
)
}
/// Returns true if this rectangle contains the point. Points are considered
/// in the rectangle if they are on the left or top edge, but outside if they
/// are on the right or bottom edge.
#[inline]
pub fn contains(&self, other: &TypedPoint2D<T, U>) -> bool {
self.origin.x <= other.x && other.x < self.origin.x + self.size.width &&
self.origin.y <= other.y && other.y < self.origin.y + self.size.height
}
/// Returns true if this rectangle contains the interior of rect. Always
/// returns true if rect is empty, and always returns false if rect is
/// nonempty but this rectangle is empty.
#[inline]
pub fn contains_rect(&self, rect: &TypedRect<T, U>) -> bool {
rect.is_empty() ||
(self.min_x() <= rect.min_x() && rect.max_x() <= self.max_x() &&
self.min_y() <= rect.min_y() && rect.max_y() <= self.max_y())
}
#[inline]
pub fn inflate(&self, width: T, height: T) -> TypedRect<T, U> {
TypedRect::new(
TypedPoint2D::new(self.origin.x - width, self.origin.y - height),
TypedSize2D::new(self.size.width + width + width, self.size.height + height + height),
)
}
#[inline]
pub fn inflate_typed(&self, width: Length<T, U>, height: Length<T, U>) -> TypedRect<T, U> {
self.inflate(width.get(), height.get())
}
#[inline]
pub fn top_right(&self) -> TypedPoint2D<T, U> {
TypedPoint2D::new(self.max_x(), self.origin.y)
}
#[inline]
pub fn bottom_left(&self) -> TypedPoint2D<T, U> {
TypedPoint2D::new(self.origin.x, self.max_y())
}
#[inline]
pub fn bottom_right(&self) -> TypedPoint2D<T, U> {
TypedPoint2D::new(self.max_x(), self.max_y())
}
#[inline]
pub fn translate_by_size(&self, size: &TypedSize2D<T, U>) -> TypedRect<T, U> {
self.translate(&TypedPoint2D::new(size.width, size.height))
}
/// Returns the smallest rectangle containing the four points.
pub fn from_points(points: &[TypedPoint2D<T, U>]) -> Self {
if points.len() == 0 {
return TypedRect::zero();
}
let (mut min_x, mut min_y) = (points[0].x, points[0].y);
let (mut max_x, mut max_y) = (min_x, min_y);
for point in &points[1..] {
if point.x < min_x {
min_x = point.x
}
if point.x > max_x {
max_x = point.x
}
if point.y < min_y {
min_y = point.y
}
if point.y > max_y {
max_y = point.y
}
}
TypedRect::new(TypedPoint2D::new(min_x, min_y),
TypedSize2D::new(max_x - min_x, max_y - min_y))
}
}
impl<T, U> TypedRect<T, U>
where T: Copy + Clone + PartialOrd + Add<T, Output=T> + Sub<T, Output=T> + Zero {
#[inline]
pub fn union(&self, other: &TypedRect<T, U>) -> TypedRect<T, U> {
if self.size == Zero::zero() {
return *other;
}
if other.size == Zero::zero() {
return *self;
}
let upper_left = TypedPoint2D::new(min(self.min_x(), other.min_x()),
min(self.min_y(), other.min_y()));
let lower_right_x = max(self.max_x(), other.max_x());
let lower_right_y = max(self.max_y(), other.max_y());
TypedRect::new(
upper_left,
TypedSize2D::new(lower_right_x - upper_left.x, lower_right_y - upper_left.y)
)
}
}
impl<T, U> TypedRect<T, U> {
#[inline]
pub fn scale<Scale: Copy>(&self, x: Scale, y: Scale) -> TypedRect<T, U>
where T: Copy + Clone + Mul<Scale, Output=T> {
TypedRect::new(
TypedPoint2D::new(self.origin.x * x, self.origin.y * y),
TypedSize2D::new(self.size.width * x, self.size.height * y)
)
}
}
impl<T: Copy + PartialEq + Zero, U> TypedRect<T, U> {
/// Constructor, setting all sides to zero.
pub fn zero() -> TypedRect<T, U> {
TypedRect::new(
TypedPoint2D::zero(),
TypedSize2D::zero(),
)
}
/// Returns true if the size is zero, regardless of the origin's value.
pub fn is_empty(&self) -> bool {
self.size.width == Zero::zero() || self.size.height == Zero::zero()
}
}
pub fn min<T: Clone + PartialOrd>(x: T, y: T) -> T {
if x <= y { x } else { y }
}
pub fn max<T: Clone + PartialOrd>(x: T, y: T) -> T {
if x >= y { x } else { y }
}
impl<T: Copy + Mul<T, Output=T>, U> Mul<T> for TypedRect<T, U> {
type Output = TypedRect<T, U>;
#[inline]
fn mul(self, scale: T) -> TypedRect<T, U> {
TypedRect::new(self.origin * scale, self.size * scale)
}
}
impl<T: Copy + Div<T, Output=T>, U> Div<T> for TypedRect<T, U> {
type Output = TypedRect<T, U>;
#[inline]
fn div(self, scale: T) -> TypedRect<T, U> {
TypedRect::new(self.origin / scale, self.size / scale)
}
}
impl<T: Copy + Mul<T, Output=T>, U1, U2> Mul<ScaleFactor<T, U1, U2>> for TypedRect<T, U1> {
type Output = TypedRect<T, U2>;
#[inline]
fn mul(self, scale: ScaleFactor<T, U1, U2>) -> TypedRect<T, U2> {
TypedRect::new(self.origin * scale, self.size * scale)
}
}
impl<T: Copy + Div<T, Output=T>, U1, U2> Div<ScaleFactor<T, U1, U2>> for TypedRect<T, U2> {
type Output = TypedRect<T, U1>;
#[inline]
fn div(self, scale: ScaleFactor<T, U1, U2>) -> TypedRect<T, U1> {
TypedRect::new(self.origin / scale, self.size / scale)
}
}
impl<T: Copy, Unit> TypedRect<T, Unit> {
/// Drop the units, preserving only the numeric value.
pub fn to_untyped(&self) -> Rect<T> {
TypedRect::new(self.origin.to_untyped(), self.size.to_untyped())
}
/// Tag a unitless value with units.
pub fn from_untyped(r: &Rect<T>) -> TypedRect<T, Unit> {
TypedRect::new(TypedPoint2D::from_untyped(&r.origin), TypedSize2D::from_untyped(&r.size))
}
}
impl<T0: NumCast + Copy, Unit> TypedRect<T0, Unit> {
/// Cast from one numeric representation to another, preserving the units.
///
/// When casting from floating point to integer coordinates, the decimals are truncated
/// as one would expect from a simple cast, but this behavior does not always make sense
/// geometrically. Consider using round(), round_in or round_out() before casting.
pub fn cast<T1: NumCast + Copy>(&self) -> Option<TypedRect<T1, Unit>> {
match (self.origin.cast(), self.size.cast()) {
(Some(origin), Some(size)) => Some(TypedRect::new(origin, size)),
_ => None
}
}
}
impl<T: Floor + Ceil + Round + Add<T, Output=T> + Sub<T, Output=T>, U> TypedRect<T, U> {
/// Return a rectangle with edges rounded to integer coordinates, such that
/// the returned rectangle has the same set of pixel centers as the original
/// one.
/// Edges at offset 0.5 round up.
/// Suitable for most places where integral device coordinates
/// are needed, but note that any translation should be applied first to
/// avoid pixel rounding errors.
/// Note that this is *not* rounding to nearest integer if the values are negative.
/// They are always rounding as floor(n + 0.5).
pub fn round(&self) -> Self {
let origin = self.origin.round();
let size = self.origin.add_size(&self.size).round() - origin;
TypedRect::new(origin, TypedSize2D::new(size.x, size.y))
}
/// Return a rectangle with edges rounded to integer coordinates, such that
/// the original rectangle contains the resulting rectangle.
pub fn round_in(&self) -> Self {
let origin = self.origin.ceil();
let size = self.origin.add_size(&self.size).floor() - origin;
TypedRect::new(origin, TypedSize2D::new(size.x, size.y))
}
/// Return a rectangle with edges rounded to integer coordinates, such that
/// the original rectangle is contained in the resulting rectangle.
pub fn round_out(&self) -> Self {
let origin = self.origin.floor();
let size = self.origin.add_size(&self.size).ceil() - origin;
TypedRect::new(origin, TypedSize2D::new(size.x, size.y))
}
}
// Convenience functions for common casts
impl<T: NumCast + Copy, Unit> TypedRect<T, Unit> {
/// Cast into an f32 vector.
pub fn to_f32(&self) -> TypedRect<f32, Unit> {
self.cast().unwrap()
}
/// Cast into an usize vector, truncating decimals if any.
///
/// When casting from floating point vectors, it is worth considering whether
/// to round(), round_in() or round_out() before the cast in order to obtain the desired
/// conversion behavior.
pub fn to_uint(&self) -> TypedRect<usize, Unit> {
self.cast().unwrap()
}
/// Cast into an i32 vector, truncating decimals if any.
///
/// When casting from floating point vectors, it is worth considering whether
/// to round(), round_in() or round_out() before the cast in order to obtain the desired
/// conversion behavior.
pub fn to_i32(&self) -> TypedRect<i32, Unit> {
self.cast().unwrap()
}
/// Cast into an i64 vector, truncating decimals if any.
///
/// When casting from floating point vectors, it is worth considering whether
/// to round(), round_in() or round_out() before the cast in order to obtain the desired
/// conversion behavior.
pub fn to_i64(&self) -> TypedRect<i64, Unit> {
self.cast().unwrap()
}
}
/// Shorthand for TypedRect::new(TypedPoint2D::new(x, y), TypedSize2D::new(w, h)).
pub fn rect<T: Copy, U>(x: T, y: T, w: T, h: T) -> TypedRect<T, U> {
TypedRect::new(TypedPoint2D::new(x, y), TypedSize2D::new(w, h))
}
#[cfg(test)]
mod tests {
use point::Point2D;
use size::Size2D;
use super::*;
#[test]
fn test_min_max() {
assert!(min(0u32, 1u32) == 0u32);
assert!(min(-1.0f32, 0.0f32) == -1.0f32);
assert!(max(0u32, 1u32) == 1u32);
assert!(max(-1.0f32, 0.0f32) == 0.0f32);
}
#[test]
fn test_translate() {
let p = Rect::new(Point2D::new(0u32, 0u32), Size2D::new(50u32, 40u32));
let pp = p.translate(&Point2D::new(10,15));
assert!(pp.size.width == 50);
assert!(pp.size.height == 40);
assert!(pp.origin.x == 10);
assert!(pp.origin.y == 15);
let r = Rect::new(Point2D::new(-10, -5), Size2D::new(50, 40));
let rr = r.translate(&Point2D::new(0,-10));
assert!(rr.size.width == 50);
assert!(rr.size.height == 40);
assert!(rr.origin.x == -10);
assert!(rr.origin.y == -15);
}
#[test]
fn test_translate_by_size() {
let p = Rect::new(Point2D::new(0u32, 0u32), Size2D::new(50u32, 40u32));
let pp = p.translate_by_size(&Size2D::new(10,15));
assert!(pp.size.width == 50);
assert!(pp.size.height == 40);
assert!(pp.origin.x == 10);
assert!(pp.origin.y == 15);
let r = Rect::new(Point2D::new(-10, -5), Size2D::new(50, 40));
let rr = r.translate_by_size(&Size2D::new(0,-10));
assert!(rr.size.width == 50);
assert!(rr.size.height == 40);
assert!(rr.origin.x == -10);
assert!(rr.origin.y == -15);
}
#[test]
fn test_union() {
let p = Rect::new(Point2D::new(0, 0), Size2D::new(50, 40));
let q = Rect::new(Point2D::new(20,20), Size2D::new(5, 5));
let r = Rect::new(Point2D::new(-15, -30), Size2D::new(200, 15));
let s = Rect::new(Point2D::new(20, -15), Size2D::new(250, 200));
let pq = p.union(&q);
assert!(pq.origin == Point2D::new(0, 0));
assert!(pq.size == Size2D::new(50, 40));
let pr = p.union(&r);
assert!(pr.origin == Point2D::new(-15, -30));
assert!(pr.size == Size2D::new(200, 70));
let ps = p.union(&s);
assert!(ps.origin == Point2D::new(0, -15));
assert!(ps.size == Size2D::new(270, 200));
}
#[test]
fn test_intersection() {
let p = Rect::new(Point2D::new(0, 0), Size2D::new(10, 20));
let q = Rect::new(Point2D::new(5, 15), Size2D::new(10, 10));
let r = Rect::new(Point2D::new(-5, -5), Size2D::new(8, 8));
let pq = p.intersection(&q);
assert!(pq.is_some());
let pq = pq.unwrap();
assert!(pq.origin == Point2D::new(5, 15));
assert!(pq.size == Size2D::new(5, 5));
let pr = p.intersection(&r);
assert!(pr.is_some());
let pr = pr.unwrap();
assert!(pr.origin == Point2D::new(0, 0));
assert!(pr.size == Size2D::new(3, 3));
let qr = q.intersection(&r);
assert!(qr.is_none());
}
#[test]
fn test_contains() {
let r = Rect::new(Point2D::new(-20, 15), Size2D::new(100, 200));
assert!(r.contains(&Point2D::new(0, 50)));
assert!(r.contains(&Point2D::new(-10, 200)));
// The `contains` method is inclusive of the top/left edges, but not the
// bottom/right edges.
assert!(r.contains(&Point2D::new(-20, 15)));
assert!(!r.contains(&Point2D::new(80, 15)));
assert!(!r.contains(&Point2D::new(80, 215)));
assert!(!r.contains(&Point2D::new(-20, 215)));
// Points beyond the top-left corner.
assert!(!r.contains(&Point2D::new(-25, 15)));
assert!(!r.contains(&Point2D::new(-15, 10)));
// Points beyond the top-right corner.
assert!(!r.contains(&Point2D::new(85, 20)));
assert!(!r.contains(&Point2D::new(75, 10)));
// Points beyond the bottom-right corner.
assert!(!r.contains(&Point2D::new(85, 210)));
assert!(!r.contains(&Point2D::new(75, 220)));
// Points beyond the bottom-left corner.
assert!(!r.contains(&Point2D::new(-25, 210)));
assert!(!r.contains(&Point2D::new(-15, 220)));
let r = Rect::new(Point2D::new(-20.0, 15.0), Size2D::new(100.0, 200.0));
assert!(r.contains_rect(&r));
assert!(!r.contains_rect(&r.translate(&Point2D::new( 0.1, 0.0))));
assert!(!r.contains_rect(&r.translate(&Point2D::new(-0.1, 0.0))));
assert!(!r.contains_rect(&r.translate(&Point2D::new( 0.0, 0.1))));
assert!(!r.contains_rect(&r.translate(&Point2D::new( 0.0, -0.1))));
// Empty rectangles are always considered as contained in other rectangles,
// even if their origin is not.
let p = Point2D::new(1.0, 1.0);
assert!(!r.contains(&p));
assert!(r.contains_rect(&Rect::new(p, Size2D::zero())));
}
#[test]
fn test_scale() {
let p = Rect::new(Point2D::new(0u32, 0u32), Size2D::new(50u32, 40u32));
let pp = p.scale(10, 15);
assert!(pp.size.width == 500);
assert!(pp.size.height == 600);
assert!(pp.origin.x == 0);
assert!(pp.origin.y == 0);
let r = Rect::new(Point2D::new(-10, -5), Size2D::new(50, 40));
let rr = r.scale(1, 20);
assert!(rr.size.width == 50);
assert!(rr.size.height == 800);
assert!(rr.origin.x == -10);
assert!(rr.origin.y == -100);
}
#[test]
fn test_inflate() {
let p = Rect::new(Point2D::new(0, 0), Size2D::new(10, 10));
let pp = p.inflate(10, 20);
assert!(pp.size.width == 30);
assert!(pp.size.height == 50);
assert!(pp.origin.x == -10);
assert!(pp.origin.y == -20);
let r = Rect::new(Point2D::new(0, 0), Size2D::new(10, 20));
let rr = r.inflate(-2, -5);
assert!(rr.size.width == 6);
assert!(rr.size.height == 10);
assert!(rr.origin.x == 2);
assert!(rr.origin.y == 5);
}
#[test]
fn test_min_max_x_y() {
let p = Rect::new(Point2D::new(0u32, 0u32), Size2D::new(50u32, 40u32));
assert!(p.max_y() == 40);
assert!(p.min_y() == 0);
assert!(p.max_x() == 50);
assert!(p.min_x() == 0);
let r = Rect::new(Point2D::new(-10, -5), Size2D::new(50, 40));
assert!(r.max_y() == 35);
assert!(r.min_y() == -5);
assert!(r.max_x() == 40);
assert!(r.min_x() == -10);
}
#[test]
fn test_is_empty() {
assert!(Rect::new(Point2D::new(0u32, 0u32), Size2D::new(0u32, 0u32)).is_empty());
assert!(Rect::new(Point2D::new(0u32, 0u32), Size2D::new(10u32, 0u32)).is_empty());
assert!(Rect::new(Point2D::new(0u32, 0u32), Size2D::new(0u32, 10u32)).is_empty());
assert!(!Rect::new(Point2D::new(0u32, 0u32), Size2D::new(1u32, 1u32)).is_empty());
assert!(Rect::new(Point2D::new(10u32, 10u32), Size2D::new(0u32, 0u32)).is_empty());
assert!(Rect::new(Point2D::new(10u32, 10u32), Size2D::new(10u32, 0u32)).is_empty());
assert!(Rect::new(Point2D::new(10u32, 10u32), Size2D::new(0u32, 10u32)).is_empty());
assert!(!Rect::new(Point2D::new(10u32, 10u32), Size2D::new(1u32, 1u32)).is_empty());
}
#[test]
fn test_round() {
let mut x = -2.0;
let mut y = -2.0;
let mut w = -2.0;
let mut h = -2.0;
while x < 2.0 {
while y < 2.0 {
while w < 2.0 {
while h < 2.0 {
let rect = Rect::new(Point2D::new(x, y), Size2D::new(w, h));
assert!(rect.contains_rect(&rect.round_in()));
assert!(rect.round_in().inflate(1.0, 1.0).contains_rect(&rect));
assert!(rect.round_out().contains_rect(&rect));
assert!(rect.inflate(1.0, 1.0).contains_rect(&rect.round_out()));
assert!(rect.inflate(1.0, 1.0).contains_rect(&rect.round()));
assert!(rect.round().inflate(1.0, 1.0).contains_rect(&rect));
h += 0.1;
}
w += 0.1;
}
y += 0.1;
}
x += 0.1
}
}
}

Просмотреть файл

@ -1,171 +0,0 @@
// Copyright 2014 The Servo Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! A type-checked scaling factor between units.
use num::One;
use heapsize::HeapSizeOf;
use num_traits::NumCast;
use serde::{Deserialize, Deserializer, Serialize, Serializer};
use std::fmt;
use std::ops::{Add, Mul, Sub, Div};
use std::marker::PhantomData;
/// A scaling factor between two different units of measurement.
///
/// This is effectively a type-safe float, intended to be used in combination with other types like
/// `length::Length` to enforce conversion between systems of measurement at compile time.
///
/// `Src` and `Dst` represent the units before and after multiplying a value by a `ScaleFactor`. They
/// may be types without values, such as empty enums. For example:
///
/// ```rust
/// use euclid::scale_factor::ScaleFactor;
/// use euclid::length::Length;
/// enum Mm {};
/// enum Inch {};
///
/// let mm_per_inch: ScaleFactor<f32, Inch, Mm> = ScaleFactor::new(25.4);
///
/// let one_foot: Length<f32, Inch> = Length::new(12.0);
/// let one_foot_in_mm: Length<f32, Mm> = one_foot * mm_per_inch;
/// ```
#[derive(RustcDecodable, RustcEncodable)]
pub struct ScaleFactor<T, Src, Dst>(pub T, PhantomData<(Src, Dst)>);
impl<T: HeapSizeOf, Src, Dst> HeapSizeOf for ScaleFactor<T, Src, Dst> {
fn heap_size_of_children(&self) -> usize {
self.0.heap_size_of_children()
}
}
impl<T, Src, Dst> Deserialize for ScaleFactor<T, Src, Dst> where T: Deserialize {
fn deserialize<D>(deserializer: &mut D) -> Result<ScaleFactor<T, Src, Dst>, D::Error>
where D: Deserializer {
Ok(ScaleFactor(try!(Deserialize::deserialize(deserializer)), PhantomData))
}
}
impl<T, Src, Dst> Serialize for ScaleFactor<T, Src, Dst> where T: Serialize {
fn serialize<S>(&self, serializer: &mut S) -> Result<(),S::Error> where S: Serializer {
self.0.serialize(serializer)
}
}
impl<T, Src, Dst> ScaleFactor<T, Src, Dst> {
pub fn new(x: T) -> ScaleFactor<T, Src, Dst> {
ScaleFactor(x, PhantomData)
}
}
impl<T: Clone, Src, Dst> ScaleFactor<T, Src, Dst> {
pub fn get(&self) -> T {
self.0.clone()
}
}
impl<T: Clone + One + Div<T, Output=T>, Src, Dst> ScaleFactor<T, Src, Dst> {
/// The inverse ScaleFactor (1.0 / self).
pub fn inv(&self) -> ScaleFactor<T, Dst, Src> {
let one: T = One::one();
ScaleFactor::new(one / self.get())
}
}
// scale0 * scale1
impl<T: Clone + Mul<T, Output=T>, A, B, C>
Mul<ScaleFactor<T, B, C>> for ScaleFactor<T, A, B> {
type Output = ScaleFactor<T, A, C>;
#[inline]
fn mul(self, other: ScaleFactor<T, B, C>) -> ScaleFactor<T, A, C> {
ScaleFactor::new(self.get() * other.get())
}
}
// scale0 + scale1
impl<T: Clone + Add<T, Output=T>, Src, Dst> Add for ScaleFactor<T, Src, Dst> {
type Output = ScaleFactor<T, Src, Dst>;
#[inline]
fn add(self, other: ScaleFactor<T, Src, Dst>) -> ScaleFactor<T, Src, Dst> {
ScaleFactor::new(self.get() + other.get())
}
}
// scale0 - scale1
impl<T: Clone + Sub<T, Output=T>, Src, Dst> Sub for ScaleFactor<T, Src, Dst> {
type Output = ScaleFactor<T, Src, Dst>;
#[inline]
fn sub(self, other: ScaleFactor<T, Src, Dst>) -> ScaleFactor<T, Src, Dst> {
ScaleFactor::new(self.get() - other.get())
}
}
impl<T: NumCast + Clone, Src, Dst0> ScaleFactor<T, Src, Dst0> {
/// Cast from one numeric representation to another, preserving the units.
pub fn cast<T1: NumCast + Clone>(&self) -> Option<ScaleFactor<T1, Src, Dst0>> {
NumCast::from(self.get()).map(ScaleFactor::new)
}
}
// FIXME: Switch to `derive(PartialEq, Clone)` after this Rust issue is fixed:
// https://github.com/mozilla/rust/issues/7671
impl<T: PartialEq, Src, Dst> PartialEq for ScaleFactor<T, Src, Dst> {
fn eq(&self, other: &ScaleFactor<T, Src, Dst>) -> bool {
self.0 == other.0
}
}
impl<T: Clone, Src, Dst> Clone for ScaleFactor<T, Src, Dst> {
fn clone(&self) -> ScaleFactor<T, Src, Dst> {
ScaleFactor::new(self.get())
}
}
impl<T: Copy, Src, Dst> Copy for ScaleFactor<T, Src, Dst> {}
impl<T: fmt::Debug, Src, Dst> fmt::Debug for ScaleFactor<T, Src, Dst> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.0.fmt(f)
}
}
impl<T: fmt::Display, Src, Dst> fmt::Display for ScaleFactor<T, Src, Dst> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.0.fmt(f)
}
}
#[cfg(test)]
mod tests {
use super::ScaleFactor;
enum Inch {}
enum Cm {}
enum Mm {}
#[test]
fn test_scale_factor() {
let mm_per_inch: ScaleFactor<f32, Inch, Mm> = ScaleFactor::new(25.4);
let cm_per_mm: ScaleFactor<f32, Mm, Cm> = ScaleFactor::new(0.1);
let mm_per_cm: ScaleFactor<f32, Cm, Mm> = cm_per_mm.inv();
assert_eq!(mm_per_cm.get(), 10.0);
let cm_per_inch: ScaleFactor<f32, Inch, Cm> = mm_per_inch * cm_per_mm;
assert_eq!(cm_per_inch, ScaleFactor::new(2.54));
let a: ScaleFactor<isize, Inch, Inch> = ScaleFactor::new(2);
let b: ScaleFactor<isize, Inch, Inch> = ScaleFactor::new(3);
assert!(a != b);
assert_eq!(a, a.clone());
assert_eq!(a.clone() + b.clone(), ScaleFactor::new(5));
assert_eq!(a - b, ScaleFactor::new(-1));
}
}

Просмотреть файл

@ -1,283 +0,0 @@
// Copyright 2013 The Servo Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! A group of side offsets, which correspond to top/left/bottom/right for borders, padding,
//! and margins in CSS.
use super::UnknownUnit;
use length::Length;
use num::Zero;
use std::fmt;
use std::ops::Add;
use std::marker::PhantomData;
#[cfg(feature = "unstable")]
use heapsize::HeapSizeOf;
/// A group of side offsets, which correspond to top/left/bottom/right for borders, padding,
/// and margins in CSS, optionally tagged with a unit.
define_matrix! {
pub struct TypedSideOffsets2D<T, U> {
pub top: T,
pub right: T,
pub bottom: T,
pub left: T,
}
}
impl<T: fmt::Debug, U> fmt::Debug for TypedSideOffsets2D<T, U> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "({:?},{:?},{:?},{:?})",
self.top, self.right, self.bottom, self.left)
}
}
/// The default side offset type with no unit.
pub type SideOffsets2D<T> = TypedSideOffsets2D<T, UnknownUnit>;
impl<T: Copy, U> TypedSideOffsets2D<T, U> {
/// Constructor taking a scalar for each side.
pub fn new(top: T, right: T, bottom: T, left: T) -> TypedSideOffsets2D<T, U> {
TypedSideOffsets2D {
top: top,
right: right,
bottom: bottom,
left: left,
_unit: PhantomData,
}
}
/// Constructor taking a typed Length for each side.
pub fn from_lengths(top: Length<T, U>,
right: Length<T, U>,
bottom: Length<T, U>,
left: Length<T, U>) -> TypedSideOffsets2D<T, U> {
TypedSideOffsets2D::new(top.0, right.0, bottom.0, left.0)
}
/// Access self.top as a typed Length instead of a scalar value.
pub fn top_typed(&self) -> Length<T, U> { Length::new(self.top) }
/// Access self.right as a typed Length instead of a scalar value.
pub fn right_typed(&self) -> Length<T, U> { Length::new(self.right) }
/// Access self.bottom as a typed Length instead of a scalar value.
pub fn bottom_typed(&self) -> Length<T, U> { Length::new(self.bottom) }
/// Access self.left as a typed Length instead of a scalar value.
pub fn left_typed(&self) -> Length<T, U> { Length::new(self.left) }
/// Constructor setting the same value to all sides, taking a scalar value directly.
pub fn new_all_same(all: T) -> TypedSideOffsets2D<T, U> {
TypedSideOffsets2D::new(all, all, all, all)
}
/// Constructor setting the same value to all sides, taking a typed Length.
pub fn from_length_all_same(all: Length<T, U>) -> TypedSideOffsets2D<T, U> {
TypedSideOffsets2D::new_all_same(all.0)
}
}
impl<T, U> TypedSideOffsets2D<T, U> where T: Add<T, Output=T> + Copy {
pub fn horizontal(&self) -> T {
self.left + self.right
}
pub fn vertical(&self) -> T {
self.top + self.bottom
}
pub fn horizontal_typed(&self) -> Length<T, U> {
Length::new(self.horizontal())
}
pub fn vertical_typed(&self) -> Length<T, U> {
Length::new(self.vertical())
}
}
impl<T, U> Add for TypedSideOffsets2D<T, U> where T : Copy + Add<T, Output=T> {
type Output = TypedSideOffsets2D<T, U>;
fn add(self, other: TypedSideOffsets2D<T, U>) -> TypedSideOffsets2D<T, U> {
TypedSideOffsets2D::new(
self.top + other.top,
self.right + other.right,
self.bottom + other.bottom,
self.left + other.left,
)
}
}
impl<T: Copy + Zero, U> TypedSideOffsets2D<T, U> {
/// Constructor, setting all sides to zero.
pub fn zero() -> TypedSideOffsets2D<T, U> {
TypedSideOffsets2D::new(
Zero::zero(),
Zero::zero(),
Zero::zero(),
Zero::zero(),
)
}
}
/// A SIMD enabled version of TypedSideOffsets2D specialized for i32.
#[cfg(feature = "unstable")]
#[derive(Clone, Copy, PartialEq)]
#[repr(simd)]
pub struct SideOffsets2DSimdI32 {
pub top: i32,
pub bottom: i32,
pub right: i32,
pub left: i32,
}
#[cfg(feature = "unstable")]
impl HeapSizeOf for SideOffsets2DSimdI32 {
fn heap_size_of_children(&self) -> usize { 0 }
}
#[cfg(feature = "unstable")]
impl SideOffsets2DSimdI32 {
#[inline]
pub fn new(top: i32, right: i32, bottom: i32, left: i32) -> SideOffsets2DSimdI32 {
SideOffsets2DSimdI32 {
top: top,
bottom: bottom,
right: right,
left: left,
}
}
}
#[cfg(feature = "unstable")]
impl SideOffsets2DSimdI32 {
#[inline]
pub fn new_all_same(all: i32) -> SideOffsets2DSimdI32 {
SideOffsets2DSimdI32::new(all.clone(), all.clone(), all.clone(), all.clone())
}
}
#[cfg(feature = "unstable")]
impl SideOffsets2DSimdI32 {
#[inline]
pub fn horizontal(&self) -> i32 {
self.left + self.right
}
#[inline]
pub fn vertical(&self) -> i32 {
self.top + self.bottom
}
}
/*impl Add for SideOffsets2DSimdI32 {
type Output = SideOffsets2DSimdI32;
#[inline]
fn add(self, other: SideOffsets2DSimdI32) -> SideOffsets2DSimdI32 {
self + other // Use SIMD addition
}
}*/
#[cfg(feature = "unstable")]
impl SideOffsets2DSimdI32 {
#[inline]
pub fn zero() -> SideOffsets2DSimdI32 {
SideOffsets2DSimdI32 {
top: 0,
bottom: 0,
right: 0,
left: 0,
}
}
#[cfg(not(target_arch = "x86_64"))]
#[inline]
pub fn is_zero(&self) -> bool {
self.top == 0 && self.right == 0 && self.bottom == 0 && self.left == 0
}
#[cfg(target_arch = "x86_64")]
#[inline]
pub fn is_zero(&self) -> bool {
let is_zero: bool;
unsafe {
asm! {
"ptest $1, $1
setz $0"
: "=r"(is_zero)
: "x"(*self)
:
: "intel"
};
}
is_zero
}
}
#[cfg(feature = "unstable")]
#[cfg(test)]
mod tests {
use super::SideOffsets2DSimdI32;
#[test]
fn test_is_zero() {
assert!(SideOffsets2DSimdI32::new_all_same(0).is_zero());
assert!(!SideOffsets2DSimdI32::new_all_same(1).is_zero());
assert!(!SideOffsets2DSimdI32::new(1, 0, 0, 0).is_zero());
assert!(!SideOffsets2DSimdI32::new(0, 1, 0, 0).is_zero());
assert!(!SideOffsets2DSimdI32::new(0, 0, 1, 0).is_zero());
assert!(!SideOffsets2DSimdI32::new(0, 0, 0, 1).is_zero());
}
}
#[cfg(feature = "unstable")]
#[cfg(bench)]
mod bench {
use test::BenchHarness;
use std::num::Zero;
use rand::{XorShiftRng, Rng};
use super::SideOffsets2DSimdI32;
#[cfg(target_arch = "x86")]
#[cfg(target_arch = "x86_64")]
#[bench]
fn bench_naive_is_zero(bh: &mut BenchHarness) {
fn is_zero(x: &SideOffsets2DSimdI32) -> bool {
x.top.is_zero() && x.right.is_zero() && x.bottom.is_zero() && x.left.is_zero()
}
let mut rng = XorShiftRng::new().unwrap();
bh.iter(|| is_zero(&rng.gen::<SideOffsets2DSimdI32>()))
}
#[bench]
fn bench_is_zero(bh: &mut BenchHarness) {
let mut rng = XorShiftRng::new().unwrap();
bh.iter(|| rng.gen::<SideOffsets2DSimdI32>().is_zero())
}
#[bench]
fn bench_naive_add(bh: &mut BenchHarness) {
fn add(x: &SideOffsets2DSimdI32, y: &SideOffsets2DSimdI32) -> SideOffsets2DSimdI32 {
SideOffsets2DSimdI32 {
top: x.top + y.top,
right: x.right + y.right,
bottom: x.bottom + y.bottom,
left: x.left + y.left,
}
}
let mut rng = XorShiftRng::new().unwrap();
bh.iter(|| add(&rng.gen::<SideOffsets2DSimdI32>(), &rng.gen::<SideOffsets2DSimdI32>()))
}
#[bench]
fn bench_add(bh: &mut BenchHarness) {
let mut rng = XorShiftRng::new().unwrap();
bh.iter(|| rng.gen::<SideOffsets2DSimdI32>() + rng.gen::<SideOffsets2DSimdI32>())
}
}

276
third_party/rust/euclid-0.10.5/src/size.rs поставляемый
Просмотреть файл

@ -1,276 +0,0 @@
// Copyright 2013 The Servo Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use super::UnknownUnit;
use length::Length;
use scale_factor::ScaleFactor;
use num::*;
use num_traits::NumCast;
use std::fmt;
use std::ops::{Add, Div, Mul, Sub};
use std::marker::PhantomData;
/// A 2d size tagged with a unit.
define_matrix! {
#[derive(RustcDecodable, RustcEncodable)]
pub struct TypedSize2D<T, U> {
pub width: T,
pub height: T,
}
}
/// Default 2d size type with no unit.
///
/// `Size2D` provides the same methods as `TypedSize2D`.
pub type Size2D<T> = TypedSize2D<T, UnknownUnit>;
impl<T: fmt::Debug, U> fmt::Debug for TypedSize2D<T, U> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{:?}×{:?}", self.width, self.height)
}
}
impl<T: fmt::Display, U> fmt::Display for TypedSize2D<T, U> {
fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
write!(formatter, "({}x{})", self.width, self.height)
}
}
impl<T, U> TypedSize2D<T, U> {
/// Constructor taking scalar values.
pub fn new(width: T, height: T) -> TypedSize2D<T, U> {
TypedSize2D {
width: width,
height: height,
_unit: PhantomData,
}
}
}
impl<T: Clone, U> TypedSize2D<T, U> {
/// Constructor taking scalar stronlgy typed lengths.
pub fn from_lengths(width: Length<T, U>, height: Length<T, U>) -> TypedSize2D<T, U> {
TypedSize2D::new(width.get(), height.get())
}
}
impl<T: Round, U> TypedSize2D<T, U> {
/// Rounds each component to the nearest integer value.
///
/// This behavior is preserved for negative values (unlike the basic cast).
pub fn round(&self) -> Self {
TypedSize2D::new(self.width.round(), self.height.round())
}
}
impl<T: Ceil, U> TypedSize2D<T, U> {
/// Rounds each component to the smallest integer equal or greater than the orginal value.
///
/// This behavior is preserved for negative values (unlike the basic cast).
pub fn ceil(&self) -> Self {
TypedSize2D::new(self.width.ceil(), self.height.ceil())
}
}
impl<T: Floor, U> TypedSize2D<T, U> {
/// Rounds each component to the biggest integer equal or lower than the orginal value.
///
/// This behavior is preserved for negative values (unlike the basic cast).
pub fn floor(&self) -> Self {
TypedSize2D::new(self.width.floor(), self.height.floor())
}
}
impl<T: Copy + Add<T, Output=T>, U> Add for TypedSize2D<T, U> {
type Output = TypedSize2D<T, U>;
fn add(self, other: TypedSize2D<T, U>) -> TypedSize2D<T, U> {
TypedSize2D::new(self.width + other.width, self.height + other.height)
}
}
impl<T: Copy + Sub<T, Output=T>, U> Sub for TypedSize2D<T, U> {
type Output = TypedSize2D<T, U>;
fn sub(self, other: TypedSize2D<T, U>) -> TypedSize2D<T, U> {
TypedSize2D::new(self.width - other.width, self.height - other.height)
}
}
impl<T: Copy + Clone + Mul<T, Output=U>, U> TypedSize2D<T, U> {
pub fn area(&self) -> U { self.width * self.height }
}
impl<T: Zero, U> TypedSize2D<T, U> {
pub fn zero() -> TypedSize2D<T, U> {
TypedSize2D::new(
Zero::zero(),
Zero::zero(),
)
}
}
impl<T: Zero, U> Zero for TypedSize2D<T, U> {
fn zero() -> TypedSize2D<T, U> {
TypedSize2D::new(
Zero::zero(),
Zero::zero(),
)
}
}
impl<T: Copy + Mul<T, Output=T>, U> Mul<T> for TypedSize2D<T, U> {
type Output = TypedSize2D<T, U>;
#[inline]
fn mul(self, scale: T) -> TypedSize2D<T, U> {
TypedSize2D::new(self.width * scale, self.height * scale)
}
}
impl<T: Copy + Div<T, Output=T>, U> Div<T> for TypedSize2D<T, U> {
type Output = TypedSize2D<T, U>;
#[inline]
fn div(self, scale: T) -> TypedSize2D<T, U> {
TypedSize2D::new(self.width / scale, self.height / scale)
}
}
impl<T: Copy + Mul<T, Output=T>, U1, U2> Mul<ScaleFactor<T, U1, U2>> for TypedSize2D<T, U1> {
type Output = TypedSize2D<T, U2>;
#[inline]
fn mul(self, scale: ScaleFactor<T, U1, U2>) -> TypedSize2D<T, U2> {
TypedSize2D::new(self.width * scale.get(), self.height * scale.get())
}
}
impl<T: Copy + Div<T, Output=T>, U1, U2> Div<ScaleFactor<T, U1, U2>> for TypedSize2D<T, U2> {
type Output = TypedSize2D<T, U1>;
#[inline]
fn div(self, scale: ScaleFactor<T, U1, U2>) -> TypedSize2D<T, U1> {
TypedSize2D::new(self.width / scale.get(), self.height / scale.get())
}
}
impl<T: Copy, U> TypedSize2D<T, U> {
/// Returns self.width as a Length carrying the unit.
#[inline]
pub fn width_typed(&self) -> Length<T, U> { Length::new(self.width) }
/// Returns self.height as a Length carrying the unit.
#[inline]
pub fn height_typed(&self) -> Length<T, U> { Length::new(self.height) }
#[inline]
pub fn to_array(&self) -> [T; 2] { [self.width, self.height] }
/// Drop the units, preserving only the numeric value.
pub fn to_untyped(&self) -> Size2D<T> {
TypedSize2D::new(self.width, self.height)
}
/// Tag a unitless value with units.
pub fn from_untyped(p: &Size2D<T>) -> TypedSize2D<T, U> {
TypedSize2D::new(p.width, p.height)
}
}
impl<T: NumCast + Copy, Unit> TypedSize2D<T, Unit> {
/// Cast from one numeric representation to another, preserving the units.
///
/// When casting from floating point to integer coordinates, the decimals are truncated
/// as one would expect from a simple cast, but this behavior does not always marke sense
/// geometrically. Consider using round(), ceil or floor() before casting.
pub fn cast<NewT: NumCast + Copy>(&self) -> Option<TypedSize2D<NewT, Unit>> {
match (NumCast::from(self.width), NumCast::from(self.height)) {
(Some(w), Some(h)) => Some(TypedSize2D::new(w, h)),
_ => None
}
}
// Convenience functions for common casts
/// Cast into an f32 size.
pub fn to_f32(&self) -> TypedSize2D<f32, Unit> {
self.cast().unwrap()
}
/// Cast into an usize size, truncating decimals if any.
///
/// When casting from floating point sizes, it is worth considering whether
/// to round(), ceil() or floor() before the cast in order to obtain the desired
/// conversion behavior.
pub fn to_uint(&self) -> TypedSize2D<usize, Unit> {
self.cast().unwrap()
}
/// Cast into an i32 size, truncating decimals if any.
///
/// When casting from floating point sizes, it is worth considering whether
/// to round(), ceil() or floor() before the cast in order to obtain the desired
/// conversion behavior.
pub fn to_i32(&self) -> TypedSize2D<i32, Unit> {
self.cast().unwrap()
}
/// Cast into an i64 size, truncating decimals if any.
///
/// When casting from floating point sizes, it is worth considering whether
/// to round(), ceil() or floor() before the cast in order to obtain the desired
/// conversion behavior.
pub fn to_i64(&self) -> TypedSize2D<i64, Unit> {
self.cast().unwrap()
}
}
/// Shorthand for TypedSize2D::new(w, h).
pub fn size2<T, U>(w: T, h: T) -> TypedSize2D<T, U> {
TypedSize2D::new(w, h)
}
#[cfg(test)]
mod size2d {
use super::Size2D;
#[test]
pub fn test_add() {
let p1 = Size2D::new(1.0, 2.0);
let p2 = Size2D::new(3.0, 4.0);
assert_eq!(p1 + p2, Size2D::new(4.0, 6.0));
let p1 = Size2D::new(1.0, 2.0);
let p2 = Size2D::new(0.0, 0.0);
assert_eq!(p1 + p2, Size2D::new(1.0, 2.0));
let p1 = Size2D::new(1.0, 2.0);
let p2 = Size2D::new(-3.0, -4.0);
assert_eq!(p1 + p2, Size2D::new(-2.0, -2.0));
let p1 = Size2D::new(0.0, 0.0);
let p2 = Size2D::new(0.0, 0.0);
assert_eq!(p1 + p2, Size2D::new(0.0, 0.0));
}
#[test]
pub fn test_sub() {
let p1 = Size2D::new(1.0, 2.0);
let p2 = Size2D::new(3.0, 4.0);
assert_eq!(p1 - p2, Size2D::new(-2.0, -2.0));
let p1 = Size2D::new(1.0, 2.0);
let p2 = Size2D::new(0.0, 0.0);
assert_eq!(p1 - p2, Size2D::new(1.0, 2.0));
let p1 = Size2D::new(1.0, 2.0);
let p2 = Size2D::new(-3.0, -4.0);
assert_eq!(p1 - p2, Size2D::new(4.0, 6.0));
let p1 = Size2D::new(0.0, 0.0);
let p2 = Size2D::new(0.0, 0.0);
assert_eq!(p1 - p2, Size2D::new(0.0, 0.0));
}
}

50
third_party/rust/euclid-0.10.5/src/trig.rs поставляемый
Просмотреть файл

@ -1,50 +0,0 @@
// Copyright 2013 The Servo Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/// Trait for basic trigonometry functions, so they can be used on generic numeric types
pub trait Trig {
fn sin(self) -> Self;
fn cos(self) -> Self;
fn tan(self) -> Self;
}
impl Trig for f32 {
#[inline]
fn sin(self) -> f32 {
self.sin()
}
#[inline]
fn cos(self) -> f32 {
self.cos()
}
#[inline]
fn tan(self) -> f32 {
self.tan()
}
}
impl Trig for f64 {
#[inline]
fn sin(self) -> f64 {
self.sin()
}
#[inline]
fn cos(self) -> f64 {
self.cos()
}
#[inline]
fn tan(self) -> f64 {
self.tan()
}
}

Просмотреть файл

@ -1 +1 @@
{"files":{".cargo-ok":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",".gitignore":"7150ee9391a955b2ef7e0762fc61c0c1aab167620ca36d88d78062d93b8334ba",".travis.yml":"9b8376fc479996f32f8a690e6009fc2f7e9f6dc1b1224e0180a92ad65b0b2183","Cargo.toml":"50ad80c8c43d09fbf1360736cfb7a9ba122c9ab080facebc83788313dfefce72","Makefile":"85b6d903eecac170ac97f10d9d89b8366cd91f5ea2f7c6212704bc590b64cf50","README.md":"614cf0c6242be3e62e45a3d60ce9a2a1581bdc46b28b25d5f40caba558e4d615","build.rs":"86776b47fac1d9368e3c3c5d57c62731729ed859bb1c4e4db0fe219251812cab","src/draw_buffer.rs":"52bef86972f40e0dd13a6e81f3aa76d4d0c28ea0b63f5f9da9650a34d75488c0","src/gl_context.rs":"28953e3752ea7fd2b19327f98c06fe53f7618efc4d3f0cb2262eba403756df2a","src/gl_context_attributes.rs":"c76ef02996d0daac313b666d1991878bbf7812932a0f9feac9e62c89ba7bf669","src/gl_context_capabilities.rs":"9f665ad04d42d47d15ecbd430639d95da526ec5951f0b7abe2434adc1415c85d","src/gl_feature.rs":"b826884900c0e8d6317a41ebb6c30bdb468601bf1c030c376749bdb2ecd2f15a","src/gl_formats.rs":"99087345b4e9a12c86605c0d091bfaf1b4ed4b2475a3b6f91d2127a2bb85fe1b","src/gl_limits.rs":"02e41619518daae5895929db00d073b5ad0d9daf9319a61abb7012c2e59fb6c7","src/lib.rs":"daaf4e26504dbb97f3803de4337f601d616adf0633e5c4415c2c172fb257ebd6","src/platform/mod.rs":"f6ec310e5b8fb519607b8e4d5ca71a0c07c83737a83c3785b5b44e7902498c8a","src/platform/not_implemented/mod.rs":"d576e9fc3164f9e2a8ff9460a60eaa8ecada44c600de1a4d1bb5513ab93569af","src/platform/not_implemented/native_gl_context.rs":"fe018722b8bebbd59b6fae759dd78b0175d10bf110205b113ff155fd06d0f75d","src/platform/with_cgl/mod.rs":"b05dc146c9ba82d62410d9b0566a8aa70c77e7ec583ad4881c531d7118454543","src/platform/with_cgl/native_gl_context.rs":"c6271cfa96836d8f833f5efbc90352852557d582db41d2c513cc36c3f966ae88","src/platform/with_egl/mod.rs":"c52ac147eb051733070c36b2c62be8c57427f80999507f62a9ce801f4aac284c","src/platform/with_egl/native_gl_context.rs":"3a8342d53de9525a5478cc96b323dbad2b3628aa6655fe5f092834cc72256116","src/platform/with_egl/utils.rs":"508521e2bf3809ffe0dfea4fa4a358903f49c77a33aa42cc6c0e7458d992a2a7","src/platform/with_glx/mod.rs":"0e497f38b2071ed189995c91b27b0b199d31bfcc10836e2d26b55023d7aff503","src/platform/with_glx/native_gl_context.rs":"2c648ae18baac14290b2eca3581d474adfea00a29a7ad47a1100e564e74b9152","src/platform/with_glx/utils.rs":"eb81e0a4c62947fa5099c241cfe2e4dd075376d30b22864e042c0f536ac6be58","src/platform/with_osmesa/mod.rs":"9f6d69878125185f16740f52ba5cdd8a45e8812af1a3561482c9b43edaf4514a","src/platform/with_wgl/mod.rs":"38f9b44b54c8a1bd4d25ae77a4ea6a2e5454a00b816764d7d74152c1f3c1b126","src/platform/with_wgl/native_gl_context.rs":"4aecd40a811cf38607b17db9724f79bb934e056f85c90c987b2aa82d637b7bb4","src/platform/with_wgl/utils.rs":"d9640c000dcb513cf0a13c4a0d35c423366b7d0894deff299affe0202bdeb770","src/platform/with_wgl/wgl_attributes.rs":"73b75da18519e048011e9c303e402cf7961e3652aa8f4d4ebf507b4ab83d06a3","src/tests.rs":"780d4211a02c09abebb2b8be85a87ed98bee374999bd333c29efb3a8c7d2b281"},"package":"171f74d51d4c94dae19d13c502dbf09afab328a5517f8bfeee2f2a33ced3bca9"}
{"files":{".cargo-ok":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",".gitignore":"7150ee9391a955b2ef7e0762fc61c0c1aab167620ca36d88d78062d93b8334ba",".travis.yml":"9b8376fc479996f32f8a690e6009fc2f7e9f6dc1b1224e0180a92ad65b0b2183","Cargo.toml":"2377d25abb4a8cd857355e101af61480e9e5718e1d5fcfb12e41993b8ec211c9","Makefile":"85b6d903eecac170ac97f10d9d89b8366cd91f5ea2f7c6212704bc590b64cf50","README.md":"614cf0c6242be3e62e45a3d60ce9a2a1581bdc46b28b25d5f40caba558e4d615","build.rs":"86776b47fac1d9368e3c3c5d57c62731729ed859bb1c4e4db0fe219251812cab","src/draw_buffer.rs":"52bef86972f40e0dd13a6e81f3aa76d4d0c28ea0b63f5f9da9650a34d75488c0","src/gl_context.rs":"28953e3752ea7fd2b19327f98c06fe53f7618efc4d3f0cb2262eba403756df2a","src/gl_context_attributes.rs":"8ddf99864f838ba847783d824e85eb71c8eea7d5dfb9950737dfb1472a33a4f6","src/gl_context_capabilities.rs":"9f665ad04d42d47d15ecbd430639d95da526ec5951f0b7abe2434adc1415c85d","src/gl_feature.rs":"b826884900c0e8d6317a41ebb6c30bdb468601bf1c030c376749bdb2ecd2f15a","src/gl_formats.rs":"d15a8e102ebac82c166be4ba2a6e6702a82d509ac61102157c26a0ae25f54ac7","src/gl_limits.rs":"ccecc941207f1f27d9eaf96f0ffadb03d991ab5e6ad2ef73a5af1b9dbbbd7cad","src/lib.rs":"daaf4e26504dbb97f3803de4337f601d616adf0633e5c4415c2c172fb257ebd6","src/platform/mod.rs":"f6ec310e5b8fb519607b8e4d5ca71a0c07c83737a83c3785b5b44e7902498c8a","src/platform/not_implemented/mod.rs":"d576e9fc3164f9e2a8ff9460a60eaa8ecada44c600de1a4d1bb5513ab93569af","src/platform/not_implemented/native_gl_context.rs":"fe018722b8bebbd59b6fae759dd78b0175d10bf110205b113ff155fd06d0f75d","src/platform/with_cgl/mod.rs":"b05dc146c9ba82d62410d9b0566a8aa70c77e7ec583ad4881c531d7118454543","src/platform/with_cgl/native_gl_context.rs":"c6271cfa96836d8f833f5efbc90352852557d582db41d2c513cc36c3f966ae88","src/platform/with_egl/mod.rs":"c52ac147eb051733070c36b2c62be8c57427f80999507f62a9ce801f4aac284c","src/platform/with_egl/native_gl_context.rs":"3a8342d53de9525a5478cc96b323dbad2b3628aa6655fe5f092834cc72256116","src/platform/with_egl/utils.rs":"508521e2bf3809ffe0dfea4fa4a358903f49c77a33aa42cc6c0e7458d992a2a7","src/platform/with_glx/mod.rs":"0e497f38b2071ed189995c91b27b0b199d31bfcc10836e2d26b55023d7aff503","src/platform/with_glx/native_gl_context.rs":"2c648ae18baac14290b2eca3581d474adfea00a29a7ad47a1100e564e74b9152","src/platform/with_glx/utils.rs":"eb81e0a4c62947fa5099c241cfe2e4dd075376d30b22864e042c0f536ac6be58","src/platform/with_osmesa/mod.rs":"9f6d69878125185f16740f52ba5cdd8a45e8812af1a3561482c9b43edaf4514a","src/platform/with_wgl/mod.rs":"38f9b44b54c8a1bd4d25ae77a4ea6a2e5454a00b816764d7d74152c1f3c1b126","src/platform/with_wgl/native_gl_context.rs":"4aecd40a811cf38607b17db9724f79bb934e056f85c90c987b2aa82d637b7bb4","src/platform/with_wgl/utils.rs":"d9640c000dcb513cf0a13c4a0d35c423366b7d0894deff299affe0202bdeb770","src/platform/with_wgl/wgl_attributes.rs":"73b75da18519e048011e9c303e402cf7961e3652aa8f4d4ebf507b4ab83d06a3","src/tests.rs":"a2e5ceecd6b12def2f66a5c576b4ad8ca0dce1834aebe69ebc8474a5c06ec798"},"package":"4ac875ea951d7d695a1cc8c370777d6a0e2b7355ca49506034683df09b24b1bc"}

Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше