зеркало из https://github.com/mozilla/gecko-dev.git
Backed out 3 changesets (bug 1799402) for causing windows build bustages. CLOSED TREE
Backed out changeset e51cd76008ef (bug 1799402) Backed out changeset 3ff660b475ff (bug 1799402) Backed out changeset 9cfa7581565e (bug 1799402)
This commit is contained in:
Родитель
bea9f37e42
Коммит
f838c68136
|
@ -102,11 +102,6 @@ git = "https://github.com/chris-zen/coremidi.git"
|
|||
replace-with = "vendored-sources"
|
||||
rev = "fc68464b5445caf111e41f643a2e69ccce0b4f83"
|
||||
|
||||
[source."https://github.com/FirefoxGraphics/wpf-gpu-raster"]
|
||||
git = "https://github.com/FirefoxGraphics/wpf-gpu-raster"
|
||||
replace-with = "vendored-sources"
|
||||
rev = "11fc561cd9d9c206474efbdda78f73660254b510"
|
||||
|
||||
[source.crates-io]
|
||||
replace-with = "vendored-sources"
|
||||
|
||||
|
|
|
@ -2204,7 +2204,6 @@ dependencies = [
|
|||
"webext_storage_bridge",
|
||||
"webrender_bindings",
|
||||
"wgpu_bindings",
|
||||
"wpf-gpu-raster",
|
||||
"xpcom",
|
||||
"xulstore",
|
||||
]
|
||||
|
@ -5557,12 +5556,6 @@ dependencies = [
|
|||
"rustc-hash",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "typed-arena-nomut"
|
||||
version = "0.1.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "bfc9d8d4e8c94375df96d6ac01a18c263d3d529bc4a53a207580ae9bc30e87c1"
|
||||
|
||||
[[package]]
|
||||
name = "typenum"
|
||||
version = "1.15.0"
|
||||
|
@ -6370,14 +6363,6 @@ dependencies = [
|
|||
"winapi",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "wpf-gpu-raster"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/FirefoxGraphics/wpf-gpu-raster?rev=11fc561cd9d9c206474efbdda78f73660254b510#11fc561cd9d9c206474efbdda78f73660254b510"
|
||||
dependencies = [
|
||||
"typed-arena-nomut",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "wr_malloc_size_of"
|
||||
version = "0.0.2"
|
||||
|
|
|
@ -3349,16 +3349,6 @@ void ClientWebGLContext::RawBufferData(GLenum target, const uint8_t* srcBytes,
|
|||
|
||||
////
|
||||
|
||||
void ClientWebGLContext::RawBufferSubData(GLenum target,
|
||||
WebGLsizeiptr dstByteOffset,
|
||||
const uint8_t* srcBytes,
|
||||
size_t srcLen) {
|
||||
const FuncScope funcScope(*this, "bufferSubData");
|
||||
|
||||
Run<RPROC(BufferSubData)>(target, dstByteOffset,
|
||||
RawBuffer<>({srcBytes, srcLen}));
|
||||
}
|
||||
|
||||
void ClientWebGLContext::BufferSubData(GLenum target,
|
||||
WebGLsizeiptr dstByteOffset,
|
||||
const dom::ArrayBuffer& src) {
|
||||
|
|
|
@ -833,6 +833,7 @@ class ClientWebGLContext final : public nsICanvasRenderingContextInternal,
|
|||
FuncScope(FuncScope&&) = delete;
|
||||
};
|
||||
|
||||
|
||||
protected:
|
||||
// The scope of the function at the top of the current WebGL function call
|
||||
// stack
|
||||
|
@ -1424,11 +1425,8 @@ class ClientWebGLContext final : public nsICanvasRenderingContextInternal,
|
|||
void BufferData(GLenum target, const dom::ArrayBufferView& srcData,
|
||||
GLenum usage, GLuint srcElemOffset = 0,
|
||||
GLuint srcElemCountOverride = 0);
|
||||
|
||||
void RawBufferData(GLenum target, const uint8_t* srcBytes, size_t srcLen,
|
||||
GLenum usage);
|
||||
void RawBufferSubData(GLenum target, WebGLsizeiptr dstByteOffset,
|
||||
const uint8_t* srcBytes, size_t srcLen);
|
||||
|
||||
void BufferSubData(GLenum target, WebGLsizeiptr dstByteOffset,
|
||||
const dom::ArrayBufferView& src, GLuint srcElemOffset = 0,
|
||||
|
|
|
@ -427,8 +427,6 @@ bool DrawTargetWebgl::SharedContext::Initialize() {
|
|||
|
||||
mMaxTextureSize = mWebgl->Limits().maxTex2dSize;
|
||||
|
||||
CachePrefs();
|
||||
|
||||
if (!CreateShaders()) {
|
||||
// There was a non-recoverable error when trying to init shaders.
|
||||
sContextInitError = true;
|
||||
|
@ -1003,37 +1001,22 @@ void DrawTargetWebgl::ReleaseBits(uint8_t* aData) {
|
|||
}
|
||||
}
|
||||
|
||||
// Format is x, y, alpha
|
||||
static const float kRectVertexData[12] = {0.0f, 0.0f, 1.0f, 1.0f, 0.0f, 1.0f,
|
||||
1.0f, 1.0f, 1.0f, 0.0f, 1.0f, 1.0f};
|
||||
|
||||
// Orphans the contents of the path vertex buffer. The beginning of the buffer
|
||||
// always contains data for a simple rectangle draw to avoid needing to switch
|
||||
// buffers.
|
||||
void DrawTargetWebgl::SharedContext::ResetPathVertexBuffer() {
|
||||
mWebgl->BindBuffer(LOCAL_GL_ARRAY_BUFFER, mPathVertexBuffer.get());
|
||||
mWebgl->RawBufferData(
|
||||
LOCAL_GL_ARRAY_BUFFER, nullptr,
|
||||
std::max(size_t(mPathVertexCapacity), sizeof(kRectVertexData)),
|
||||
LOCAL_GL_DYNAMIC_DRAW);
|
||||
mWebgl->RawBufferSubData(LOCAL_GL_ARRAY_BUFFER, 0,
|
||||
(const uint8_t*)kRectVertexData,
|
||||
sizeof(kRectVertexData));
|
||||
mPathVertexOffset = sizeof(kRectVertexData);
|
||||
}
|
||||
|
||||
// Attempts to create all shaders and resources to be used for drawing commands.
|
||||
// Returns whether or not this succeeded.
|
||||
bool DrawTargetWebgl::SharedContext::CreateShaders() {
|
||||
if (!mPathVertexArray) {
|
||||
mPathVertexArray = mWebgl->CreateVertexArray();
|
||||
if (!mVertexArray) {
|
||||
mVertexArray = mWebgl->CreateVertexArray();
|
||||
}
|
||||
if (!mPathVertexBuffer) {
|
||||
mPathVertexBuffer = mWebgl->CreateBuffer();
|
||||
mWebgl->BindVertexArray(mPathVertexArray.get());
|
||||
ResetPathVertexBuffer();
|
||||
if (!mVertexBuffer) {
|
||||
mVertexBuffer = mWebgl->CreateBuffer();
|
||||
static const float rectData[8] = {0.0f, 0.0f, 1.0f, 0.0f,
|
||||
1.0f, 1.0f, 0.0f, 1.0f};
|
||||
mWebgl->BindVertexArray(mVertexArray.get());
|
||||
mWebgl->BindBuffer(LOCAL_GL_ARRAY_BUFFER, mVertexBuffer.get());
|
||||
mWebgl->RawBufferData(LOCAL_GL_ARRAY_BUFFER, (const uint8_t*)rectData,
|
||||
sizeof(rectData), LOCAL_GL_STATIC_DRAW);
|
||||
mWebgl->EnableVertexAttribArray(0);
|
||||
mWebgl->VertexAttribPointer(0, 3, LOCAL_GL_FLOAT, LOCAL_GL_FALSE, 0, 0);
|
||||
mWebgl->VertexAttribPointer(0, 2, LOCAL_GL_FLOAT, LOCAL_GL_FALSE, 0, 0);
|
||||
}
|
||||
if (!mSolidProgram) {
|
||||
// AA is computed by using the basis vectors of the transform to determine
|
||||
|
@ -1044,27 +1027,24 @@ bool DrawTargetWebgl::SharedContext::CreateShaders() {
|
|||
// minimum coverage is then chosen by the fragment shader to use as an AA
|
||||
// coverage value to modulate the color.
|
||||
auto vsSource =
|
||||
u"attribute vec3 a_vertex;\n"
|
||||
u"attribute vec2 a_vertex;\n"
|
||||
"uniform vec2 u_transform[3];\n"
|
||||
"uniform vec2 u_viewport;\n"
|
||||
"uniform float u_aa;\n"
|
||||
"varying vec2 v_cliptc;\n"
|
||||
"varying vec4 v_dist;\n"
|
||||
"varying float v_alpha;\n"
|
||||
"varying vec2 v_cliptc;\n"
|
||||
"void main() {\n"
|
||||
" vec2 scale = vec2(dot(u_transform[0], u_transform[0]),\n"
|
||||
" dot(u_transform[1], u_transform[1]));\n"
|
||||
" vec2 invScale = u_aa * inversesqrt(scale + 1.0e-6);\n"
|
||||
" scale *= invScale;\n"
|
||||
" vec2 extrude = a_vertex.xy + invScale * (2.0 * a_vertex.xy - "
|
||||
"1.0);\n"
|
||||
" vec2 extrude = a_vertex + invScale * (2.0 * a_vertex - 1.0);\n"
|
||||
" vec2 vertex = u_transform[0] * extrude.x +\n"
|
||||
" u_transform[1] * extrude.y +\n"
|
||||
" u_transform[2];\n"
|
||||
" gl_Position = vec4(vertex * 2.0 / u_viewport - 1.0, 0.0, 1.0);\n"
|
||||
" v_cliptc = vertex / u_viewport;\n"
|
||||
" v_dist = vec4(extrude, 1.0 - extrude) * scale.xyxy + 1.5 - u_aa;\n"
|
||||
" v_alpha = a_vertex.z;\n"
|
||||
"}\n"_ns;
|
||||
auto fsSource =
|
||||
u"precision mediump float;\n"
|
||||
|
@ -1072,11 +1052,10 @@ bool DrawTargetWebgl::SharedContext::CreateShaders() {
|
|||
"uniform sampler2D u_clipmask;\n"
|
||||
"varying vec2 v_cliptc;\n"
|
||||
"varying vec4 v_dist;\n"
|
||||
"varying float v_alpha;\n"
|
||||
"void main() {\n"
|
||||
" float clip = texture2D(u_clipmask, v_cliptc).r;\n"
|
||||
" vec2 dist = min(v_dist.xy, v_dist.zw);\n"
|
||||
" float aa = v_alpha * clamp(min(dist.x, dist.y), 0.0, 1.0);\n"
|
||||
" float aa = clamp(min(dist.x, dist.y), 0.0, 1.0);\n"
|
||||
" gl_FragColor = clip * aa * u_color;\n"
|
||||
"}\n"_ns;
|
||||
RefPtr<WebGLShaderJS> vsId = mWebgl->CreateShader(LOCAL_GL_VERTEX_SHADER);
|
||||
|
@ -1120,7 +1099,7 @@ bool DrawTargetWebgl::SharedContext::CreateShaders() {
|
|||
|
||||
if (!mImageProgram) {
|
||||
auto vsSource =
|
||||
u"attribute vec3 a_vertex;\n"
|
||||
u"attribute vec2 a_vertex;\n"
|
||||
"uniform vec2 u_viewport;\n"
|
||||
"uniform float u_aa;\n"
|
||||
"uniform vec2 u_transform[3];\n"
|
||||
|
@ -1128,14 +1107,12 @@ bool DrawTargetWebgl::SharedContext::CreateShaders() {
|
|||
"varying vec2 v_cliptc;\n"
|
||||
"varying vec2 v_texcoord;\n"
|
||||
"varying vec4 v_dist;\n"
|
||||
"varying float v_alpha;\n"
|
||||
"void main() {\n"
|
||||
" vec2 scale = vec2(dot(u_transform[0], u_transform[0]),\n"
|
||||
" dot(u_transform[1], u_transform[1]));\n"
|
||||
" vec2 invScale = u_aa * inversesqrt(scale + 1.0e-6);\n"
|
||||
" scale *= invScale;\n"
|
||||
" vec2 extrude = a_vertex.xy + invScale * (2.0 * a_vertex.xy - "
|
||||
"1.0);\n"
|
||||
" vec2 extrude = a_vertex + invScale * (2.0 * a_vertex - 1.0);\n"
|
||||
" vec2 vertex = u_transform[0] * extrude.x +\n"
|
||||
" u_transform[1] * extrude.y +\n"
|
||||
" u_transform[2];\n"
|
||||
|
@ -1145,7 +1122,6 @@ bool DrawTargetWebgl::SharedContext::CreateShaders() {
|
|||
" u_texmatrix[1] * extrude.y +\n"
|
||||
" u_texmatrix[2];\n"
|
||||
" v_dist = vec4(extrude, 1.0 - extrude) * scale.xyxy + 1.5 - u_aa;\n"
|
||||
" v_alpha = a_vertex.z;\n"
|
||||
"}\n"_ns;
|
||||
auto fsSource =
|
||||
u"precision mediump float;\n"
|
||||
|
@ -1157,13 +1133,12 @@ bool DrawTargetWebgl::SharedContext::CreateShaders() {
|
|||
"varying vec2 v_cliptc;\n"
|
||||
"varying vec2 v_texcoord;\n"
|
||||
"varying vec4 v_dist;\n"
|
||||
"varying float v_alpha;\n"
|
||||
"void main() {\n"
|
||||
" vec2 tc = clamp(v_texcoord, u_texbounds.xy, u_texbounds.zw);\n"
|
||||
" vec4 image = texture2D(u_sampler, tc);\n"
|
||||
" float clip = texture2D(u_clipmask, v_cliptc).r;\n"
|
||||
" vec2 dist = min(v_dist.xy, v_dist.zw);\n"
|
||||
" float aa = v_alpha * clamp(min(dist.x, dist.y), 0.0, 1.0);\n"
|
||||
" float aa = clamp(min(dist.x, dist.y), 0.0, 1.0);\n"
|
||||
" gl_FragColor = clip * aa * u_color *\n"
|
||||
" mix(image, image.rrrr, u_swizzle);\n"
|
||||
"}\n"_ns;
|
||||
|
@ -1626,7 +1601,7 @@ bool DrawTargetWebgl::SharedContext::DrawRectAccel(
|
|||
const Rect& aRect, const Pattern& aPattern, const DrawOptions& aOptions,
|
||||
Maybe<DeviceColor> aMaskColor, RefPtr<TextureHandle>* aHandle,
|
||||
bool aTransformed, bool aClipped, bool aAccelOnly, bool aForceUpdate,
|
||||
const StrokeOptions* aStrokeOptions, const PathVertexRange* aVertexRange) {
|
||||
const StrokeOptions* aStrokeOptions) {
|
||||
// If the rect or clip rect is empty, then there is nothing to draw.
|
||||
if (aRect.IsEmpty() || mClipRect.IsEmpty()) {
|
||||
return true;
|
||||
|
@ -1640,7 +1615,6 @@ bool DrawTargetWebgl::SharedContext::DrawRectAccel(
|
|||
// If only accelerated drawing was requested, bail out without software
|
||||
// drawing fallback.
|
||||
if (!aAccelOnly) {
|
||||
MOZ_ASSERT(!aVertexRange);
|
||||
mCurrentTarget->DrawRectFallback(aRect, aPattern, aOptions, aMaskColor,
|
||||
aTransformed, aClipped, aStrokeOptions);
|
||||
}
|
||||
|
@ -1667,7 +1641,7 @@ bool DrawTargetWebgl::SharedContext::DrawRectAccel(
|
|||
DrawOptions(1.0f, CompositionOp::OP_SOURCE,
|
||||
aOptions.mAntialiasMode),
|
||||
Nothing(), nullptr, aTransformed, aClipped, aAccelOnly,
|
||||
aForceUpdate, aStrokeOptions, aVertexRange)) {
|
||||
aForceUpdate, aStrokeOptions)) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
@ -1675,7 +1649,7 @@ bool DrawTargetWebgl::SharedContext::DrawRectAccel(
|
|||
aRect, aPattern,
|
||||
DrawOptions(aOptions.mAlpha, op, aOptions.mAntialiasMode), aMaskColor,
|
||||
aHandle, aTransformed, aClipped, aAccelOnly, aForceUpdate,
|
||||
aStrokeOptions, aVertexRange);
|
||||
aStrokeOptions);
|
||||
}
|
||||
|
||||
// Set up the scissor test to reflect the clipping rectangle, if supplied.
|
||||
|
@ -1692,16 +1666,13 @@ bool DrawTargetWebgl::SharedContext::DrawRectAccel(
|
|||
// Now try to actually draw the pattern...
|
||||
switch (aPattern.GetType()) {
|
||||
case PatternType::COLOR: {
|
||||
if (!aVertexRange) {
|
||||
// Only an uncached draw if not using the vertex cache.
|
||||
mCurrentTarget->mProfile.OnUncachedDraw();
|
||||
}
|
||||
mCurrentTarget->mProfile.OnUncachedDraw();
|
||||
auto color = static_cast<const ColorPattern&>(aPattern).mColor;
|
||||
float a = color.a * aOptions.mAlpha;
|
||||
DeviceColor premulColor(color.r * a, color.g * a, color.b * a, a);
|
||||
if (((a == 1.0f && aOptions.mCompositionOp == CompositionOp::OP_OVER) ||
|
||||
aOptions.mCompositionOp == CompositionOp::OP_SOURCE) &&
|
||||
!aStrokeOptions && !aVertexRange && !HasClipMask()) {
|
||||
!aStrokeOptions && !HasClipMask()) {
|
||||
// Certain color patterns can be mapped to scissored clears. The
|
||||
// composition op must effectively overwrite the destination, and the
|
||||
// transform must map to an axis-aligned integer rectangle.
|
||||
|
@ -1748,13 +1719,12 @@ bool DrawTargetWebgl::SharedContext::DrawRectAccel(
|
|||
{(const uint8_t*)viewportData, sizeof(viewportData)});
|
||||
mDirtyViewport = false;
|
||||
}
|
||||
if (mDirtyAA || aStrokeOptions || aVertexRange) {
|
||||
// Native lines use line smoothing. Generated paths provide their own
|
||||
// AA as vertex alpha.
|
||||
float aaData = aStrokeOptions || aVertexRange ? 0.0f : 1.0f;
|
||||
if (mDirtyAA || aStrokeOptions) {
|
||||
// Native lines use line smoothing.
|
||||
float aaData = aStrokeOptions ? 0.0f : 1.0f;
|
||||
mWebgl->UniformData(LOCAL_GL_FLOAT, mSolidProgramAA, false,
|
||||
{(const uint8_t*)&aaData, sizeof(aaData)});
|
||||
mDirtyAA = aaData == 0.0f;
|
||||
mDirtyAA = !!aStrokeOptions;
|
||||
}
|
||||
float colorData[4] = {premulColor.b, premulColor.g, premulColor.r,
|
||||
premulColor.a};
|
||||
|
@ -1769,16 +1739,8 @@ bool DrawTargetWebgl::SharedContext::DrawRectAccel(
|
|||
mWebgl->UniformData(LOCAL_GL_FLOAT_VEC4, mSolidProgramColor, false,
|
||||
{(const uint8_t*)colorData, sizeof(colorData)});
|
||||
// Finally draw the colored rectangle.
|
||||
if (aVertexRange) {
|
||||
// If there's a vertex range, then we need to draw triangles within from
|
||||
// generated from a path stored in the path vertex buffer.
|
||||
mWebgl->DrawArrays(LOCAL_GL_TRIANGLES, GLint(aVertexRange->mOffset),
|
||||
GLsizei(aVertexRange->mLength));
|
||||
} else {
|
||||
// Otherwise we're drawing a simple stroked/filled rectangle.
|
||||
mWebgl->DrawArrays(
|
||||
aStrokeOptions ? LOCAL_GL_LINE_LOOP : LOCAL_GL_TRIANGLE_FAN, 0, 4);
|
||||
}
|
||||
mWebgl->DrawArrays(
|
||||
aStrokeOptions ? LOCAL_GL_LINE_LOOP : LOCAL_GL_TRIANGLE_FAN, 0, 4);
|
||||
success = true;
|
||||
break;
|
||||
}
|
||||
|
@ -1962,17 +1924,15 @@ bool DrawTargetWebgl::SharedContext::DrawRectAccel(
|
|||
{(const uint8_t*)viewportData, sizeof(viewportData)});
|
||||
mDirtyViewport = false;
|
||||
}
|
||||
if (mDirtyAA || aStrokeOptions || aVertexRange) {
|
||||
if (mDirtyAA || aStrokeOptions) {
|
||||
// AA is not supported for OP_SOURCE. Native lines use line smoothing.
|
||||
// Generated paths provide their own AA as vertex alpha.
|
||||
|
||||
float aaData = mLastCompositionOp == CompositionOp::OP_SOURCE ||
|
||||
aStrokeOptions || aVertexRange
|
||||
? 0.0f
|
||||
: 1.0f;
|
||||
float aaData =
|
||||
mLastCompositionOp == CompositionOp::OP_SOURCE || aStrokeOptions
|
||||
? 0.0f
|
||||
: 1.0f;
|
||||
mWebgl->UniformData(LOCAL_GL_FLOAT, mImageProgramAA, false,
|
||||
{(const uint8_t*)&aaData, sizeof(aaData)});
|
||||
mDirtyAA = aaData == 0.0f;
|
||||
mDirtyAA = !!aStrokeOptions;
|
||||
}
|
||||
DeviceColor color = aMaskColor && format != SurfaceFormat::A8
|
||||
? DeviceColor::Mask(1.0f, aMaskColor->a)
|
||||
|
@ -2060,16 +2020,8 @@ bool DrawTargetWebgl::SharedContext::DrawRectAccel(
|
|||
}
|
||||
|
||||
// Finally draw the image rectangle.
|
||||
if (aVertexRange) {
|
||||
// If there's a vertex range, then we need to draw triangles within from
|
||||
// generated from a path stored in the path vertex buffer.
|
||||
mWebgl->DrawArrays(LOCAL_GL_TRIANGLES, GLint(aVertexRange->mOffset),
|
||||
GLsizei(aVertexRange->mLength));
|
||||
} else {
|
||||
// Otherwise we're drawing a simple stroked/filled rectangle.
|
||||
mWebgl->DrawArrays(
|
||||
aStrokeOptions ? LOCAL_GL_LINE_LOOP : LOCAL_GL_TRIANGLE_FAN, 0, 4);
|
||||
}
|
||||
mWebgl->DrawArrays(
|
||||
aStrokeOptions ? LOCAL_GL_LINE_LOOP : LOCAL_GL_TRIANGLE_FAN, 0, 4);
|
||||
|
||||
// Restore the default linear filter if overridden.
|
||||
if (UseNearestFilter(surfacePattern)) {
|
||||
|
@ -2208,31 +2160,26 @@ void CacheEntry::Link(const RefPtr<TextureHandle>& aHandle) {
|
|||
// TextureHandle as unused and unlinks it from the CacheEntry. The
|
||||
// entry is removed from its containing Cache, if applicable.
|
||||
void CacheEntry::Unlink() {
|
||||
RemoveFromList();
|
||||
|
||||
// The entry may not have a valid handle if rasterization failed.
|
||||
if (mHandle) {
|
||||
mHandle->SetCacheEntry(nullptr);
|
||||
mHandle = nullptr;
|
||||
}
|
||||
|
||||
RemoveFromList();
|
||||
}
|
||||
|
||||
// Hashes a path and pattern to a single hash value that can be used for quick
|
||||
// comparisons. This currently avoids to expensive hashing of internal path
|
||||
// and pattern data for speed, relying instead on later exact comparisons for
|
||||
// disambiguation.
|
||||
HashNumber PathCacheEntry::HashPath(const QuantizedPath& aPath,
|
||||
HashNumber PathCacheEntry::HashPath(const SkPath& aPath,
|
||||
const Pattern* aPattern,
|
||||
const Matrix& aTransform,
|
||||
const IntRect& aBounds,
|
||||
const Point& aOrigin) {
|
||||
const IntRect& aBounds) {
|
||||
HashNumber hash = 0;
|
||||
hash = AddToHash(hash, aPath.mPath.num_types);
|
||||
hash = AddToHash(hash, aPath.mPath.num_points);
|
||||
// Quantize the relative offset of the path to its bounds.
|
||||
IntPoint offset = RoundedToInt((aOrigin - Point(aBounds.TopLeft())) * 16.0f);
|
||||
hash = AddToHash(hash, offset.x);
|
||||
hash = AddToHash(hash, offset.y);
|
||||
hash = AddToHash(hash, aPath.countVerbs());
|
||||
hash = AddToHash(hash, aPath.countPoints());
|
||||
hash = AddToHash(hash, aBounds.width);
|
||||
hash = AddToHash(hash, aBounds.height);
|
||||
if (aPattern) {
|
||||
|
@ -2253,7 +2200,7 @@ static inline bool HasMatchingScale(const Matrix& aTransform1,
|
|||
|
||||
// Determines if an existing path cache entry matches an incoming path and
|
||||
// pattern.
|
||||
inline bool PathCacheEntry::MatchesPath(const QuantizedPath& aPath,
|
||||
inline bool PathCacheEntry::MatchesPath(const SkPath& aPath,
|
||||
const Pattern* aPattern,
|
||||
const StrokeOptions* aStrokeOptions,
|
||||
const Matrix& aTransform,
|
||||
|
@ -2276,13 +2223,13 @@ inline bool PathCacheEntry::MatchesPath(const QuantizedPath& aPath,
|
|||
aSigma == mSigma;
|
||||
}
|
||||
|
||||
PathCacheEntry::PathCacheEntry(QuantizedPath&& aPath, Pattern* aPattern,
|
||||
PathCacheEntry::PathCacheEntry(const SkPath& aPath, Pattern* aPattern,
|
||||
StoredStrokeOptions* aStrokeOptions,
|
||||
const Matrix& aTransform, const IntRect& aBounds,
|
||||
const Point& aOrigin, HashNumber aHash,
|
||||
float aSigma)
|
||||
: CacheEntryImpl<PathCacheEntry>(aTransform, aBounds, aHash),
|
||||
mPath(std::move(aPath)),
|
||||
mPath(aPath),
|
||||
mOrigin(aOrigin),
|
||||
mPattern(aPattern),
|
||||
mStrokeOptions(aStrokeOptions),
|
||||
|
@ -2293,11 +2240,11 @@ PathCacheEntry::PathCacheEntry(QuantizedPath&& aPath, Pattern* aPattern,
|
|||
// texture handle is valid to determine if it will need to render the text run
|
||||
// or just reuse the cached texture.
|
||||
already_AddRefed<PathCacheEntry> PathCache::FindOrInsertEntry(
|
||||
QuantizedPath aPath, const Pattern* aPattern,
|
||||
const SkPath& aPath, const Pattern* aPattern,
|
||||
const StrokeOptions* aStrokeOptions, const Matrix& aTransform,
|
||||
const IntRect& aBounds, const Point& aOrigin, float aSigma) {
|
||||
HashNumber hash =
|
||||
PathCacheEntry::HashPath(aPath, aPattern, aTransform, aBounds, aOrigin);
|
||||
PathCacheEntry::HashPath(aPath, aPattern, aTransform, aBounds);
|
||||
for (const RefPtr<PathCacheEntry>& entry : GetChain(hash)) {
|
||||
if (entry->MatchesPath(aPath, aPattern, aStrokeOptions, aTransform, aBounds,
|
||||
aOrigin, hash, aSigma)) {
|
||||
|
@ -2319,8 +2266,8 @@ already_AddRefed<PathCacheEntry> PathCache::FindOrInsertEntry(
|
|||
}
|
||||
}
|
||||
RefPtr<PathCacheEntry> entry =
|
||||
new PathCacheEntry(std::move(aPath), pattern, strokeOptions, aTransform,
|
||||
aBounds, aOrigin, hash, aSigma);
|
||||
new PathCacheEntry(aPath, pattern, strokeOptions, aTransform, aBounds,
|
||||
aOrigin, hash, aSigma);
|
||||
Insert(entry);
|
||||
return entry.forget();
|
||||
}
|
||||
|
@ -2340,154 +2287,6 @@ void DrawTargetWebgl::Fill(const Path* aPath, const Pattern& aPattern,
|
|||
}
|
||||
}
|
||||
|
||||
QuantizedPath::QuantizedPath(const WGR::Path& aPath) : mPath(aPath) {}
|
||||
|
||||
QuantizedPath::QuantizedPath(QuantizedPath&& aPath) noexcept
|
||||
: mPath(aPath.mPath) {
|
||||
aPath.mPath.points = nullptr;
|
||||
aPath.mPath.num_points = 0;
|
||||
aPath.mPath.types = nullptr;
|
||||
aPath.mPath.num_types = 0;
|
||||
}
|
||||
|
||||
QuantizedPath::~QuantizedPath() {
|
||||
if (mPath.points || mPath.types) {
|
||||
WGR::wgr_path_release(mPath);
|
||||
}
|
||||
}
|
||||
|
||||
bool QuantizedPath::operator==(const QuantizedPath& aOther) const {
|
||||
return mPath.num_types == aOther.mPath.num_types &&
|
||||
mPath.num_points == aOther.mPath.num_points &&
|
||||
mPath.fill_mode == aOther.mPath.fill_mode &&
|
||||
!memcmp(mPath.types, aOther.mPath.types,
|
||||
mPath.num_types * sizeof(uint8_t)) &&
|
||||
!memcmp(mPath.points, aOther.mPath.points,
|
||||
mPath.num_points * sizeof(WGR::Point));
|
||||
}
|
||||
|
||||
// Generate a quantized path from the Skia path using WGR. The supplied
|
||||
// transform will be applied to the path. The path is stored relative to its
|
||||
// bounds origin to support translation later.
|
||||
static Maybe<QuantizedPath> GenerateQuantizedPath(const SkPath& aPath,
|
||||
const IntRect& aBounds,
|
||||
const Matrix& aTransform) {
|
||||
WGR::PathBuilder* pb = WGR::wgr_new_builder();
|
||||
if (!pb) {
|
||||
return Nothing();
|
||||
}
|
||||
WGR::wgr_builder_set_fill_mode(
|
||||
pb, aPath.getFillType() == SkPath::kWinding_FillType
|
||||
? WGR::FillMode::Winding
|
||||
: WGR::FillMode::EvenOdd);
|
||||
|
||||
SkPath::RawIter iter(aPath);
|
||||
SkPoint params[4];
|
||||
SkPath::Verb currentVerb;
|
||||
|
||||
// printf_stderr("bounds: (%d, %d) %d x %d\n", aBounds.x, aBounds.y,
|
||||
// aBounds.width, aBounds.height);
|
||||
Matrix transform = aTransform;
|
||||
transform.PostTranslate(-aBounds.TopLeft());
|
||||
while ((currentVerb = iter.next(params)) != SkPath::kDone_Verb) {
|
||||
switch (currentVerb) {
|
||||
case SkPath::kMove_Verb: {
|
||||
Point p0 = transform.TransformPoint(SkPointToPoint(params[0]));
|
||||
// printf_stderr("move (%f, %f)\n", p0.x, p0.y);
|
||||
WGR::wgr_builder_move_to(pb, p0.x, p0.y);
|
||||
break;
|
||||
}
|
||||
case SkPath::kLine_Verb: {
|
||||
Point p1 = transform.TransformPoint(SkPointToPoint(params[1]));
|
||||
// printf_stderr("line (%f, %f)\n", p1.x, p1.y);
|
||||
WGR::wgr_builder_line_to(pb, p1.x, p1.y);
|
||||
break;
|
||||
}
|
||||
case SkPath::kCubic_Verb: {
|
||||
Point p1 = transform.TransformPoint(SkPointToPoint(params[1]));
|
||||
Point p2 = transform.TransformPoint(SkPointToPoint(params[2]));
|
||||
Point p3 = transform.TransformPoint(SkPointToPoint(params[3]));
|
||||
// printf_stderr("cubic (%f, %f), (%f, %f), (%f, %f)\n", p1.x, p1.y,
|
||||
// p2.x, p2.y, p3.x, p3.y);
|
||||
WGR::wgr_builder_curve_to(pb, p1.x, p1.y, p2.x, p2.y, p3.x, p3.y);
|
||||
break;
|
||||
}
|
||||
case SkPath::kQuad_Verb: {
|
||||
Point p1 = transform.TransformPoint(SkPointToPoint(params[1]));
|
||||
Point p2 = transform.TransformPoint(SkPointToPoint(params[2]));
|
||||
// printf_stderr("quad (%f, %f), (%f, %f)\n", p1.x, p1.y, p2.x, p2.y);
|
||||
WGR::wgr_builder_quad_to(pb, p1.x, p1.y, p2.x, p2.y);
|
||||
break;
|
||||
}
|
||||
case SkPath::kConic_Verb: {
|
||||
Point p0 = transform.TransformPoint(SkPointToPoint(params[0]));
|
||||
Point p1 = transform.TransformPoint(SkPointToPoint(params[1]));
|
||||
Point p2 = transform.TransformPoint(SkPointToPoint(params[2]));
|
||||
float w = iter.conicWeight();
|
||||
std::vector<Point> quads;
|
||||
int numQuads = ConvertConicToQuads(p0, p1, p2, w, quads);
|
||||
for (int i = 0; i < numQuads; i++) {
|
||||
Point q1 = quads[2 * i + 1];
|
||||
Point q2 = quads[2 * i + 2];
|
||||
// printf_stderr("conic quad (%f, %f), (%f, %f)\n", q1.x, q1.y, q2.x,
|
||||
// q2.y);
|
||||
WGR::wgr_builder_quad_to(pb, q1.x, q1.y, q2.x, q2.y);
|
||||
}
|
||||
break;
|
||||
}
|
||||
case SkPath::kClose_Verb:
|
||||
// printf_stderr("close\n");
|
||||
WGR::wgr_builder_close(pb);
|
||||
break;
|
||||
default:
|
||||
MOZ_ASSERT(false);
|
||||
// Unexpected verb found in path!
|
||||
WGR::wgr_builder_release(pb);
|
||||
return Nothing();
|
||||
}
|
||||
}
|
||||
|
||||
WGR::Path p = WGR::wgr_builder_get_path(pb);
|
||||
WGR::wgr_builder_release(pb);
|
||||
if (!p.num_points || !p.num_types) {
|
||||
WGR::wgr_path_release(p);
|
||||
return Nothing();
|
||||
}
|
||||
return Some(QuantizedPath(p));
|
||||
}
|
||||
|
||||
// Get the output vertex buffer using WGR from an input quantized path.
|
||||
static Maybe<WGR::VertexBuffer> GeneratePathVertexBuffer(
|
||||
const QuantizedPath& aPath, const IntRect& aClipRect) {
|
||||
WGR::VertexBuffer vb = WGR::wgr_path_rasterize_to_tri_list(
|
||||
&aPath.mPath, aClipRect.x, aClipRect.y, aClipRect.width,
|
||||
aClipRect.height);
|
||||
if (!vb.len) {
|
||||
WGR::wgr_vertex_buffer_release(vb);
|
||||
return Nothing();
|
||||
}
|
||||
return Some(vb);
|
||||
}
|
||||
|
||||
// Search the path cache for any entries stored in the path vertex buffer and
|
||||
// remove them.
|
||||
void PathCache::ClearVertexRanges() {
|
||||
for (auto& chain : mChains) {
|
||||
PathCacheEntry* entry = chain.getFirst();
|
||||
while (entry) {
|
||||
PathCacheEntry* next = entry->getNext();
|
||||
if (entry->GetVertexRange().IsValid()) {
|
||||
entry->Unlink();
|
||||
}
|
||||
entry = next;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
inline bool DrawTargetWebgl::ShouldAccelPath(const DrawOptions& aOptions) {
|
||||
return mWebglValid && SupportsDrawOptions(aOptions) && PrepareContext();
|
||||
}
|
||||
|
||||
bool DrawTargetWebgl::SharedContext::DrawPathAccel(
|
||||
const Path* aPath, const Pattern& aPattern, const DrawOptions& aOptions,
|
||||
const StrokeOptions* aStrokeOptions, const ShadowOptions* aShadow,
|
||||
|
@ -2537,15 +2336,8 @@ bool DrawTargetWebgl::SharedContext::DrawPathAccel(
|
|||
if (!mPathCache) {
|
||||
mPathCache = MakeUnique<PathCache>();
|
||||
}
|
||||
// Use a quantized, relative (to its bounds origin) version of the path as
|
||||
// a cache key to help limit cache bloat.
|
||||
Maybe<QuantizedPath> qp =
|
||||
GenerateQuantizedPath(pathSkia->GetPath(), intBounds, currentTransform);
|
||||
if (!qp) {
|
||||
return false;
|
||||
}
|
||||
entry = mPathCache->FindOrInsertEntry(
|
||||
std::move(*qp), color ? nullptr : &aPattern, aStrokeOptions,
|
||||
pathSkia->GetPath(), color ? nullptr : &aPattern, aStrokeOptions,
|
||||
currentTransform, intBounds, bounds.TopLeft(),
|
||||
aShadow ? aShadow->mSigma : -1.0f);
|
||||
if (!entry) {
|
||||
|
@ -2576,164 +2368,78 @@ bool DrawTargetWebgl::SharedContext::DrawPathAccel(
|
|||
(bounds.TopLeft() - entry->GetOrigin()) + entry->GetBounds().TopLeft();
|
||||
SurfacePattern pathPattern(nullptr, ExtendMode::CLAMP,
|
||||
Matrix::Translation(offset), filter);
|
||||
return DrawRectAccel(Rect(intBounds), pathPattern, aOptions, shadowColor,
|
||||
&handle, false, true, true);
|
||||
}
|
||||
|
||||
if (mPathVertexCapacity > 0 && !handle && entry && !aShadow &&
|
||||
SupportsPattern(aPattern)) {
|
||||
if (entry->GetVertexRange().IsValid()) {
|
||||
// If there is a valid cached vertex data in the path vertex buffer, then
|
||||
// just draw that.
|
||||
mCurrentTarget->mProfile.OnCacheHit();
|
||||
return DrawRectAccel(Rect(intBounds.TopLeft(), Size(1, 1)), aPattern,
|
||||
aOptions, Nothing(), nullptr, false, true, true,
|
||||
false, nullptr, &entry->GetVertexRange());
|
||||
}
|
||||
|
||||
// printf_stderr("Generating... verbs %d, points %d\n",
|
||||
// int(pathSkia->GetPath().countVerbs()),
|
||||
// int(pathSkia->GetPath().countPoints()));
|
||||
Maybe<WGR::VertexBuffer> vb;
|
||||
if (aStrokeOptions) {
|
||||
// If stroking, then generate a path to fill the stroked region. This
|
||||
// path will need to be quantized again because it differs from the path
|
||||
// used for the cache entry, but this allows us to avoid generating a
|
||||
// fill path on a cache hit.
|
||||
SkPaint paint;
|
||||
if (StrokeOptionsToPaint(paint, *aStrokeOptions)) {
|
||||
Maybe<SkRect> cullRect;
|
||||
Matrix invTransform = currentTransform;
|
||||
if (invTransform.Invert()) {
|
||||
// Transform the stroking clip rect from device space to local space.
|
||||
Rect invRect = invTransform.TransformBounds(Rect(mClipRect));
|
||||
invRect.RoundOut();
|
||||
cullRect = Some(RectToSkRect(invRect));
|
||||
}
|
||||
SkPath fillPath;
|
||||
if (paint.getFillPath(pathSkia->GetPath(), &fillPath,
|
||||
cullRect.ptrOr(nullptr),
|
||||
ComputeResScaleForStroking(currentTransform))) {
|
||||
// printf_stderr(" stroke fill... verbs %d, points %d\n",
|
||||
// int(fillPath.countVerbs()),
|
||||
// int(fillPath.countPoints()));
|
||||
if (Maybe<QuantizedPath> qp = GenerateQuantizedPath(
|
||||
fillPath, intBounds, currentTransform)) {
|
||||
vb = GeneratePathVertexBuffer(
|
||||
*qp, IntRect(-intBounds.TopLeft(), mViewportSize));
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
vb = GeneratePathVertexBuffer(
|
||||
entry->GetPath(), IntRect(-intBounds.TopLeft(), mViewportSize));
|
||||
}
|
||||
if (vb) {
|
||||
uint32_t vertexBytes = vb->len * sizeof(WGR::OutputVertex);
|
||||
// printf_stderr(" ... %d verts, %d bytes\n", int(vb->len),
|
||||
// int(vertexBytes));
|
||||
if (vertexBytes > mPathVertexCapacity - mPathVertexOffset &&
|
||||
vertexBytes <= mPathVertexCapacity - sizeof(kRectVertexData)) {
|
||||
// If the vertex data is too large to fit in the remaining path vertex
|
||||
// buffer, then orphan the contents of the vertex buffer to make room
|
||||
// for it.
|
||||
if (mPathCache) {
|
||||
mPathCache->ClearVertexRanges();
|
||||
}
|
||||
ResetPathVertexBuffer();
|
||||
}
|
||||
if (vertexBytes <= mPathVertexCapacity - mPathVertexOffset) {
|
||||
// If there is actually room to fit the vertex data in the vertex buffer
|
||||
// after orphaning as necessary, then upload the data to the next
|
||||
// available offset in the buffer.
|
||||
PathVertexRange vertexRange(
|
||||
uint32_t(mPathVertexOffset / sizeof(WGR::OutputVertex)),
|
||||
uint32_t(vb->len));
|
||||
if (entry) {
|
||||
entry->SetVertexRange(vertexRange);
|
||||
}
|
||||
// printf_stderr(" ... offset %d\n", mPathVertexOffset);
|
||||
mWebgl->RawBufferSubData(LOCAL_GL_ARRAY_BUFFER, mPathVertexOffset,
|
||||
(const uint8_t*)vb->data, vertexBytes);
|
||||
mPathVertexOffset += vertexBytes;
|
||||
wgr_vertex_buffer_release(vb.ref());
|
||||
// Finally, draw the uploaded vertex data.
|
||||
mCurrentTarget->mProfile.OnCacheMiss();
|
||||
return DrawRectAccel(Rect(intBounds.TopLeft(), Size(1, 1)), aPattern,
|
||||
aOptions, Nothing(), nullptr, false, true, true,
|
||||
false, nullptr, &vertexRange);
|
||||
}
|
||||
wgr_vertex_buffer_release(vb.ref());
|
||||
// If we failed to draw the vertex data for some reason, then fall through
|
||||
// to the texture rasterization path.
|
||||
}
|
||||
}
|
||||
|
||||
// If there isn't a valid texture handle, then we need to rasterize the
|
||||
// path in a software canvas and upload this to a texture. Solid color
|
||||
// patterns will be rendered as a path mask that can then be modulated
|
||||
// with any color. Other pattern types have to rasterize the pattern
|
||||
// directly into the cached texture.
|
||||
handle = nullptr;
|
||||
RefPtr<DrawTargetSkia> pathDT = new DrawTargetSkia;
|
||||
if (pathDT->Init(intBounds.Size(), color || aShadow
|
||||
? SurfaceFormat::A8
|
||||
: SurfaceFormat::B8G8R8A8)) {
|
||||
Point offset = -intBounds.TopLeft();
|
||||
if (aShadow) {
|
||||
// Ensure the the shadow is drawn at the requested offset
|
||||
offset += aShadow->mOffset;
|
||||
}
|
||||
pathDT->SetTransform(currentTransform * Matrix::Translation(offset));
|
||||
DrawOptions drawOptions(1.0f, CompositionOp::OP_OVER,
|
||||
aOptions.mAntialiasMode);
|
||||
static const ColorPattern maskPattern(DeviceColor(1.0f, 1.0f, 1.0f, 1.0f));
|
||||
const Pattern& cachePattern = color ? maskPattern : aPattern;
|
||||
// If the source pattern is a DrawTargetWebgl snapshot, we may shift
|
||||
// targets when drawing the path, so back up the old target.
|
||||
DrawTargetWebgl* oldTarget = mCurrentTarget;
|
||||
if (aStrokeOptions) {
|
||||
pathDT->Stroke(aPath, cachePattern, *aStrokeOptions, drawOptions);
|
||||
} else {
|
||||
pathDT->Fill(aPath, cachePattern, drawOptions);
|
||||
}
|
||||
if (aShadow && aShadow->mSigma > 0.0f) {
|
||||
// Blur the shadow if required.
|
||||
uint8_t* data = nullptr;
|
||||
IntSize size;
|
||||
int32_t stride = 0;
|
||||
SurfaceFormat format = SurfaceFormat::UNKNOWN;
|
||||
if (pathDT->LockBits(&data, &size, &stride, &format)) {
|
||||
AlphaBoxBlur blur(Rect(pathDT->GetRect()), stride, aShadow->mSigma,
|
||||
aShadow->mSigma);
|
||||
blur.Blur(data);
|
||||
pathDT->ReleaseBits(data);
|
||||
}
|
||||
}
|
||||
RefPtr<SourceSurface> pathSurface = pathDT->Snapshot();
|
||||
if (pathSurface) {
|
||||
// If the target changed, try to restore it.
|
||||
if (mCurrentTarget != oldTarget && !oldTarget->PrepareContext()) {
|
||||
return false;
|
||||
}
|
||||
SurfacePattern pathPattern(pathSurface, ExtendMode::CLAMP,
|
||||
Matrix::Translation(intBounds.TopLeft()),
|
||||
filter);
|
||||
// Try and upload the rasterized path to a texture. If there is a
|
||||
// valid texture handle after this, then link it to the entry.
|
||||
// Otherwise, we might have to fall back to software drawing the
|
||||
// path, so unlink it from the entry.
|
||||
if (DrawRectAccel(Rect(intBounds), pathPattern, aOptions, shadowColor,
|
||||
&handle, false, true) &&
|
||||
handle) {
|
||||
if (entry) {
|
||||
entry->Link(handle);
|
||||
}
|
||||
} else if (entry) {
|
||||
entry->Unlink();
|
||||
}
|
||||
if (DrawRectAccel(Rect(intBounds), pathPattern, aOptions, shadowColor,
|
||||
&handle, false, true, true)) {
|
||||
return true;
|
||||
}
|
||||
} else {
|
||||
// If there isn't a valid texture handle, then we need to rasterize the
|
||||
// path in a software canvas and upload this to a texture. Solid color
|
||||
// patterns will be rendered as a path mask that can then be modulated
|
||||
// with any color. Other pattern types have to rasterize the pattern
|
||||
// directly into the cached texture.
|
||||
handle = nullptr;
|
||||
RefPtr<DrawTargetSkia> pathDT = new DrawTargetSkia;
|
||||
if (pathDT->Init(intBounds.Size(), color || aShadow
|
||||
? SurfaceFormat::A8
|
||||
: SurfaceFormat::B8G8R8A8)) {
|
||||
Point offset = -intBounds.TopLeft();
|
||||
if (aShadow) {
|
||||
// Ensure the the shadow is drawn at the requested offset
|
||||
offset += aShadow->mOffset;
|
||||
}
|
||||
pathDT->SetTransform(currentTransform * Matrix::Translation(offset));
|
||||
DrawOptions drawOptions(1.0f, CompositionOp::OP_OVER,
|
||||
aOptions.mAntialiasMode);
|
||||
static const ColorPattern maskPattern(
|
||||
DeviceColor(1.0f, 1.0f, 1.0f, 1.0f));
|
||||
const Pattern& cachePattern = color ? maskPattern : aPattern;
|
||||
// If the source pattern is a DrawTargetWebgl snapshot, we may shift
|
||||
// targets when drawing the path, so back up the old target.
|
||||
DrawTargetWebgl* oldTarget = mCurrentTarget;
|
||||
if (aStrokeOptions) {
|
||||
pathDT->Stroke(aPath, cachePattern, *aStrokeOptions, drawOptions);
|
||||
} else {
|
||||
pathDT->Fill(aPath, cachePattern, drawOptions);
|
||||
}
|
||||
if (aShadow && aShadow->mSigma > 0.0f) {
|
||||
// Blur the shadow if required.
|
||||
uint8_t* data = nullptr;
|
||||
IntSize size;
|
||||
int32_t stride = 0;
|
||||
SurfaceFormat format = SurfaceFormat::UNKNOWN;
|
||||
if (pathDT->LockBits(&data, &size, &stride, &format)) {
|
||||
AlphaBoxBlur blur(Rect(pathDT->GetRect()), stride, aShadow->mSigma,
|
||||
aShadow->mSigma);
|
||||
blur.Blur(data);
|
||||
pathDT->ReleaseBits(data);
|
||||
}
|
||||
}
|
||||
RefPtr<SourceSurface> pathSurface = pathDT->Snapshot();
|
||||
if (pathSurface) {
|
||||
// If the target changed, try to restore it.
|
||||
if (mCurrentTarget != oldTarget && !oldTarget->PrepareContext()) {
|
||||
return false;
|
||||
}
|
||||
SurfacePattern pathPattern(pathSurface, ExtendMode::CLAMP,
|
||||
Matrix::Translation(intBounds.TopLeft()),
|
||||
filter);
|
||||
// Try and upload the rasterized path to a texture. If there is a
|
||||
// valid texture handle after this, then link it to the entry.
|
||||
// Otherwise, we might have to fall back to software drawing the
|
||||
// path, so unlink it from the entry.
|
||||
if (DrawRectAccel(Rect(intBounds), pathPattern, aOptions, shadowColor,
|
||||
&handle, false, true) &&
|
||||
handle) {
|
||||
if (entry) {
|
||||
entry->Link(handle);
|
||||
}
|
||||
} else if (entry) {
|
||||
entry->Unlink();
|
||||
}
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
|
@ -2744,7 +2450,7 @@ void DrawTargetWebgl::DrawPath(const Path* aPath, const Pattern& aPattern,
|
|||
const StrokeOptions* aStrokeOptions) {
|
||||
// If there is a WebGL context, then try to cache the path to avoid slow
|
||||
// fallbacks.
|
||||
if (ShouldAccelPath(aOptions) &&
|
||||
if (mWebglValid && SupportsDrawOptions(aOptions) && PrepareContext() &&
|
||||
mSharedContext->DrawPathAccel(aPath, aPattern, aOptions,
|
||||
aStrokeOptions)) {
|
||||
return;
|
||||
|
@ -2839,7 +2545,7 @@ void DrawTargetWebgl::DrawShadow(const Path* aPath, const Pattern& aPattern,
|
|||
const StrokeOptions* aStrokeOptions) {
|
||||
// If there is a WebGL context, then try to cache the path to avoid slow
|
||||
// fallbacks.
|
||||
if (ShouldAccelPath(aOptions) &&
|
||||
if (mWebglValid && SupportsDrawOptions(aOptions) && PrepareContext() &&
|
||||
mSharedContext->DrawPathAccel(aPath, aPattern, aOptions, aStrokeOptions,
|
||||
&aShadow)) {
|
||||
return;
|
||||
|
@ -2856,7 +2562,7 @@ void DrawTargetWebgl::DrawSurfaceWithShadow(SourceSurface* aSurface,
|
|||
const ShadowOptions& aShadow,
|
||||
CompositionOp aOperator) {
|
||||
DrawOptions options(1.0f, aOperator);
|
||||
if (ShouldAccelPath(options)) {
|
||||
if (mWebglValid && SupportsDrawOptions(options) && PrepareContext()) {
|
||||
SurfacePattern pattern(aSurface, ExtendMode::CLAMP,
|
||||
Matrix::Translation(aDest));
|
||||
SkPath skiaPath;
|
||||
|
@ -3678,19 +3384,6 @@ bool DrawTargetWebgl::UsageProfile::RequiresRefresh() const {
|
|||
return mFailedFrames > failRatio * mFrameCount;
|
||||
}
|
||||
|
||||
void DrawTargetWebgl::SharedContext::CachePrefs() {
|
||||
uint32_t capacity = StaticPrefs::gfx_canvas_accelerated_gpu_path_size() << 20;
|
||||
if (capacity != mPathVertexCapacity) {
|
||||
mPathVertexCapacity = capacity;
|
||||
if (mPathCache) {
|
||||
mPathCache->ClearVertexRanges();
|
||||
}
|
||||
if (mPathVertexBuffer) {
|
||||
ResetPathVertexBuffer();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// For use within CanvasRenderingContext2D, called on BorrowDrawTarget.
|
||||
void DrawTargetWebgl::BeginFrame(const IntRect& aPersistedRect) {
|
||||
if (mNeedsPresent) {
|
||||
|
@ -3708,8 +3401,6 @@ void DrawTargetWebgl::BeginFrame(const IntRect& aPersistedRect) {
|
|||
}
|
||||
// Check if we need to clear out any cached because of memory pressure.
|
||||
mSharedContext->ClearCachesIfNecessary();
|
||||
// Cache any prefs for the frame.
|
||||
mSharedContext->CachePrefs();
|
||||
mProfile.BeginFrame();
|
||||
}
|
||||
|
||||
|
|
|
@ -45,7 +45,6 @@ class SharedTextureHandle;
|
|||
class StandaloneTexture;
|
||||
class GlyphCache;
|
||||
class PathCache;
|
||||
struct PathVertexRange;
|
||||
|
||||
// DrawTargetWebgl implements a subset of the DrawTarget API suitable for use
|
||||
// by CanvasRenderingContext2D. It maps these to a client WebGL context so that
|
||||
|
@ -163,12 +162,8 @@ class DrawTargetWebgl : public DrawTarget, public SupportsWeakPtr {
|
|||
bool mDirtyAA = true;
|
||||
|
||||
// WebGL shader resources
|
||||
RefPtr<WebGLBufferJS> mPathVertexBuffer;
|
||||
RefPtr<WebGLVertexArrayJS> mPathVertexArray;
|
||||
// The current insertion offset into the GPU path buffer.
|
||||
uint32_t mPathVertexOffset = 0;
|
||||
// The maximum size of the GPU path buffer.
|
||||
uint32_t mPathVertexCapacity = 0;
|
||||
RefPtr<WebGLBufferJS> mVertexBuffer;
|
||||
RefPtr<WebGLVertexArrayJS> mVertexArray;
|
||||
RefPtr<WebGLProgramJS> mSolidProgram;
|
||||
RefPtr<WebGLUniformLocationJS> mSolidProgramViewport;
|
||||
RefPtr<WebGLUniformLocationJS> mSolidProgramAA;
|
||||
|
@ -239,7 +234,6 @@ class DrawTargetWebgl : public DrawTarget, public SupportsWeakPtr {
|
|||
|
||||
bool Initialize();
|
||||
bool CreateShaders();
|
||||
void ResetPathVertexBuffer();
|
||||
|
||||
void SetBlendState(CompositionOp aOp,
|
||||
const Maybe<DeviceColor>& aBlendColor = Nothing());
|
||||
|
@ -287,8 +281,7 @@ class DrawTargetWebgl : public DrawTarget, public SupportsWeakPtr {
|
|||
RefPtr<TextureHandle>* aHandle = nullptr,
|
||||
bool aTransformed = true, bool aClipped = true,
|
||||
bool aAccelOnly = false, bool aForceUpdate = false,
|
||||
const StrokeOptions* aStrokeOptions = nullptr,
|
||||
const PathVertexRange* aVertexRange = nullptr);
|
||||
const StrokeOptions* aStrokeOptions = nullptr);
|
||||
|
||||
bool DrawPathAccel(const Path* aPath, const Pattern& aPattern,
|
||||
const DrawOptions& aOptions,
|
||||
|
@ -317,8 +310,6 @@ class DrawTargetWebgl : public DrawTarget, public SupportsWeakPtr {
|
|||
void ClearCachesIfNecessary();
|
||||
|
||||
void WaitForShmem(DrawTargetWebgl* aTarget);
|
||||
|
||||
void CachePrefs();
|
||||
};
|
||||
|
||||
RefPtr<SharedContext> mSharedContext;
|
||||
|
@ -483,8 +474,6 @@ class DrawTargetWebgl : public DrawTarget, public SupportsWeakPtr {
|
|||
bool aTransformed = true, bool aClipped = true,
|
||||
bool aAccelOnly = false, bool aForceUpdate = false,
|
||||
const StrokeOptions* aStrokeOptions = nullptr);
|
||||
|
||||
bool ShouldAccelPath(const DrawOptions& aOptions);
|
||||
void DrawPath(const Path* aPath, const Pattern& aPattern,
|
||||
const DrawOptions& aOptions,
|
||||
const StrokeOptions* aStrokeOptions = nullptr);
|
||||
|
|
|
@ -11,7 +11,6 @@
|
|||
|
||||
#include "mozilla/HashFunctions.h"
|
||||
#include "mozilla/gfx/PathSkia.h"
|
||||
#include "mozilla/gfx/WPFGpuRaster.h"
|
||||
|
||||
namespace mozilla::gfx {
|
||||
|
||||
|
@ -109,7 +108,6 @@ class CacheEntryImpl : public CacheEntry, public LinkedListElement<RefPtr<T>> {
|
|||
// CacheImpl manages a list of CacheEntry.
|
||||
template <typename T>
|
||||
class CacheImpl {
|
||||
protected:
|
||||
typedef LinkedList<RefPtr<T>> ListType;
|
||||
|
||||
static constexpr size_t kNumChains = 17;
|
||||
|
@ -371,62 +369,33 @@ class GlyphCache : public LinkedListElement<GlyphCache>,
|
|||
ScaledFont* mFont;
|
||||
};
|
||||
|
||||
struct QuantizedPath {
|
||||
explicit QuantizedPath(const WGR::Path& aPath);
|
||||
// Ensure the path can only be moved, but not copied.
|
||||
QuantizedPath(QuantizedPath&&) noexcept;
|
||||
QuantizedPath(const QuantizedPath&) = delete;
|
||||
~QuantizedPath();
|
||||
|
||||
bool operator==(const QuantizedPath&) const;
|
||||
|
||||
WGR::Path mPath;
|
||||
};
|
||||
|
||||
struct PathVertexRange {
|
||||
uint32_t mOffset;
|
||||
uint32_t mLength;
|
||||
|
||||
PathVertexRange() : mOffset(0), mLength(0) {}
|
||||
PathVertexRange(uint32_t aOffset, uint32_t aLength)
|
||||
: mOffset(aOffset), mLength(aLength) {}
|
||||
|
||||
bool IsValid() const { return mLength > 0; }
|
||||
};
|
||||
|
||||
// PathCacheEntry stores a rasterized version of a supplied path with a given
|
||||
// pattern.
|
||||
class PathCacheEntry : public CacheEntryImpl<PathCacheEntry> {
|
||||
public:
|
||||
MOZ_DECLARE_REFCOUNTED_VIRTUAL_TYPENAME(PathCacheEntry, override)
|
||||
|
||||
PathCacheEntry(QuantizedPath&& aPath, Pattern* aPattern,
|
||||
PathCacheEntry(const SkPath& aPath, Pattern* aPattern,
|
||||
StoredStrokeOptions* aStrokeOptions, const Matrix& aTransform,
|
||||
const IntRect& aBounds, const Point& aOrigin, HashNumber aHash,
|
||||
float aSigma = -1.0f);
|
||||
|
||||
bool MatchesPath(const QuantizedPath& aPath, const Pattern* aPattern,
|
||||
bool MatchesPath(const SkPath& aPath, const Pattern* aPattern,
|
||||
const StrokeOptions* aStrokeOptions,
|
||||
const Matrix& aTransform, const IntRect& aBounds,
|
||||
const Point& aOrigin, HashNumber aHash, float aSigma);
|
||||
|
||||
static HashNumber HashPath(const QuantizedPath& aPath,
|
||||
const Pattern* aPattern, const Matrix& aTransform,
|
||||
const IntRect& aBounds, const Point& aOrigin);
|
||||
|
||||
const QuantizedPath& GetPath() const { return mPath; }
|
||||
static HashNumber HashPath(const SkPath& aPath, const Pattern* aPattern,
|
||||
const Matrix& aTransform, const IntRect& aBounds);
|
||||
|
||||
const Point& GetOrigin() const { return mOrigin; }
|
||||
|
||||
// Valid if either a mask (no pattern) or there is valid pattern.
|
||||
bool IsValid() const override { return !mPattern || mPattern->IsValid(); }
|
||||
|
||||
const PathVertexRange& GetVertexRange() const { return mVertexRange; }
|
||||
void SetVertexRange(const PathVertexRange& aRange) { mVertexRange = aRange; }
|
||||
|
||||
private:
|
||||
// The actual path geometry supplied
|
||||
QuantizedPath mPath;
|
||||
SkPath mPath;
|
||||
// The transformed origin of the path
|
||||
Point mOrigin;
|
||||
// The pattern used to rasterize the path, if not a mask
|
||||
|
@ -435,8 +404,6 @@ class PathCacheEntry : public CacheEntryImpl<PathCacheEntry> {
|
|||
UniquePtr<StoredStrokeOptions> mStrokeOptions;
|
||||
// The shadow blur sigma
|
||||
float mSigma;
|
||||
// If the path has cached geometry in the vertex buffer.
|
||||
PathVertexRange mVertexRange;
|
||||
};
|
||||
|
||||
class PathCache : public CacheImpl<PathCacheEntry> {
|
||||
|
@ -444,11 +411,9 @@ class PathCache : public CacheImpl<PathCacheEntry> {
|
|||
PathCache() = default;
|
||||
|
||||
already_AddRefed<PathCacheEntry> FindOrInsertEntry(
|
||||
QuantizedPath aPath, const Pattern* aPattern,
|
||||
const SkPath& aPath, const Pattern* aPattern,
|
||||
const StrokeOptions* aStrokeOptions, const Matrix& aTransform,
|
||||
const IntRect& aBounds, const Point& aOrigin, float aSigma = -1.0f);
|
||||
|
||||
void ClearVertexRanges();
|
||||
};
|
||||
|
||||
} // namespace mozilla::gfx
|
||||
|
|
|
@ -332,21 +332,6 @@ static inline bool IsBackedByPixels(const SkCanvas* aCanvas) {
|
|||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Computes appropriate resolution scale to be used with SkPath::getFillPath
|
||||
* based on the scaling of the supplied transform.
|
||||
*/
|
||||
float ComputeResScaleForStroking(const Matrix& aTransform);
|
||||
|
||||
/**
|
||||
* This is a wrapper around SkGeometry's SkConic that can be used to convert
|
||||
* conic sections in an SkPath to a sequence of quadratic curves. The quads
|
||||
* vector is organized such that for the Nth quad, it's control points are
|
||||
* 2*N, 2*N+1, 2*N+2. This function returns the resulting number of quads.
|
||||
*/
|
||||
int ConvertConicToQuads(const Point& aP0, const Point& aP1, const Point& aP2,
|
||||
float aWeight, std::vector<Point>& aQuads);
|
||||
|
||||
} // namespace gfx
|
||||
} // namespace mozilla
|
||||
|
||||
|
|
|
@ -8,7 +8,6 @@
|
|||
#include "HelpersSkia.h"
|
||||
#include "PathHelpers.h"
|
||||
#include "skia/src/core/SkDraw.h"
|
||||
#include "skia/src/core/SkGeometry.h"
|
||||
|
||||
namespace mozilla::gfx {
|
||||
|
||||
|
@ -125,12 +124,6 @@ bool PathSkia::ContainsPoint(const Point& aPoint,
|
|||
return SkPathContainsPoint(mPath, aPoint, aTransform);
|
||||
}
|
||||
|
||||
float ComputeResScaleForStroking(const Matrix& aTransform) {
|
||||
SkMatrix skiaMatrix;
|
||||
GfxMatrixToSkiaMatrix(aTransform, skiaMatrix);
|
||||
return SkDraw::ComputeResScaleForStroking(skiaMatrix);
|
||||
}
|
||||
|
||||
bool PathSkia::StrokeContainsPoint(const StrokeOptions& aStrokeOptions,
|
||||
const Point& aPoint,
|
||||
const Matrix& aTransform) const {
|
||||
|
@ -143,9 +136,11 @@ bool PathSkia::StrokeContainsPoint(const StrokeOptions& aStrokeOptions,
|
|||
return false;
|
||||
}
|
||||
|
||||
SkMatrix skiaMatrix;
|
||||
GfxMatrixToSkiaMatrix(aTransform, skiaMatrix);
|
||||
SkPath strokePath;
|
||||
paint.getFillPath(mPath, &strokePath, nullptr,
|
||||
ComputeResScaleForStroking(aTransform));
|
||||
SkDraw::ComputeResScaleForStroking(skiaMatrix));
|
||||
|
||||
return SkPathContainsPoint(strokePath, aPoint, aTransform);
|
||||
}
|
||||
|
@ -197,20 +192,6 @@ Rect PathSkia::GetFastBounds(const Matrix& aTransform,
|
|||
return aTransform.TransformBounds(SkRectToRect(bounds));
|
||||
}
|
||||
|
||||
int ConvertConicToQuads(const Point& aP0, const Point& aP1, const Point& aP2,
|
||||
float aWeight, std::vector<Point>& aQuads) {
|
||||
SkConic conic(PointToSkPoint(aP0), PointToSkPoint(aP1), PointToSkPoint(aP2),
|
||||
aWeight);
|
||||
int pow2 = conic.computeQuadPOW2(0.25f);
|
||||
aQuads.resize(1 + 2 * (1 << pow2));
|
||||
int numQuads =
|
||||
conic.chopIntoQuadsPOW2(reinterpret_cast<SkPoint*>(&aQuads[0]), pow2);
|
||||
if (numQuads < 1 << pow2) {
|
||||
aQuads.resize(1 + 2 * numQuads);
|
||||
}
|
||||
return numQuads;
|
||||
}
|
||||
|
||||
void PathSkia::StreamToSink(PathSink* aSink) const {
|
||||
SkPath::RawIter iter(mPath);
|
||||
|
||||
|
@ -232,16 +213,6 @@ void PathSkia::StreamToSink(PathSink* aSink) const {
|
|||
aSink->QuadraticBezierTo(SkPointToPoint(points[1]),
|
||||
SkPointToPoint(points[2]));
|
||||
break;
|
||||
case SkPath::kConic_Verb: {
|
||||
std::vector<Point> quads;
|
||||
int numQuads = ConvertConicToQuads(
|
||||
SkPointToPoint(points[0]), SkPointToPoint(points[1]),
|
||||
SkPointToPoint(points[2]), iter.conicWeight(), quads);
|
||||
for (int i = 0; i < numQuads; i++) {
|
||||
aSink->QuadraticBezierTo(quads[2 * i + 1], quads[2 * i + 2]);
|
||||
}
|
||||
break;
|
||||
}
|
||||
case SkPath::kClose_Verb:
|
||||
aSink->Close();
|
||||
break;
|
||||
|
|
|
@ -1,60 +0,0 @@
|
|||
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
|
||||
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
|
||||
/* This Source Code Form is subject to the terms of the Mozilla Public
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
||||
|
||||
#ifndef MOZILLA_GFX_WPF_GPU_RASTER_H
|
||||
#define MOZILLA_GFX_WPF_GPU_RASTER_H
|
||||
|
||||
#include <stddef.h>
|
||||
#include <stdint.h>
|
||||
|
||||
namespace WGR {
|
||||
|
||||
enum class FillMode { EvenOdd, Winding };
|
||||
struct PathBuilder;
|
||||
struct Point {
|
||||
int32_t x;
|
||||
int32_t y;
|
||||
};
|
||||
struct Path {
|
||||
FillMode fill_mode;
|
||||
const Point* points;
|
||||
size_t num_points;
|
||||
const uint8_t* types;
|
||||
size_t num_types;
|
||||
};
|
||||
struct OutputVertex {
|
||||
float x;
|
||||
float y;
|
||||
float coverage;
|
||||
};
|
||||
struct VertexBuffer {
|
||||
OutputVertex* data;
|
||||
size_t len;
|
||||
};
|
||||
|
||||
extern "C" {
|
||||
PathBuilder* wgr_new_builder();
|
||||
void wgr_builder_move_to(PathBuilder* pb, float x, float y);
|
||||
void wgr_builder_line_to(PathBuilder* pb, float x, float y);
|
||||
void wgr_builder_curve_to(PathBuilder* pb, float c1x, float c1y, float c2x,
|
||||
float c2y, float x, float y);
|
||||
void wgr_builder_quad_to(PathBuilder* pb, float cx, float cy, float x, float y);
|
||||
void wgr_builder_close(PathBuilder* pb);
|
||||
void wgr_builder_set_fill_mode(PathBuilder* pb, FillMode fill_mode);
|
||||
Path wgr_builder_get_path(PathBuilder* pb);
|
||||
VertexBuffer wgr_path_rasterize_to_tri_list(const Path* p, int32_t clip_x,
|
||||
int32_t clip_y, int32_t clip_width,
|
||||
int32_t clip_height,
|
||||
bool need_inside = true,
|
||||
bool need_outside = false);
|
||||
void wgr_path_release(Path p);
|
||||
void wgr_vertex_buffer_release(VertexBuffer vb);
|
||||
void wgr_builder_release(PathBuilder* pb);
|
||||
};
|
||||
|
||||
} // namespace WGR
|
||||
|
||||
#endif // MOZILLA_GFX_WPF_GPU_RASTER_H
|
|
@ -50,7 +50,6 @@ EXPORTS.mozilla += [
|
|||
|
||||
EXPORTS.mozilla.gfx += [
|
||||
"CompositorHitTestInfo.h",
|
||||
"WPFGpuRaster.h",
|
||||
]
|
||||
|
||||
if CONFIG["MOZ_X11"]:
|
||||
|
|
|
@ -5592,11 +5592,6 @@
|
|||
value: 0.66
|
||||
mirror: always
|
||||
|
||||
- name: gfx.canvas.accelerated.gpu-path-size
|
||||
type: RelaxedAtomicUint32
|
||||
value: 4
|
||||
mirror: always
|
||||
|
||||
# 0x7fff is the maximum supported xlib surface size and is more than enough for canvases.
|
||||
- name: gfx.canvas.max-size
|
||||
type: RelaxedAtomicInt32
|
||||
|
|
|
@ -1293,11 +1293,6 @@ who = "Glenn Watson <git@intuitionlibrary.com>"
|
|||
criteria = "safe-to-deploy"
|
||||
version = "0.1.2"
|
||||
|
||||
[[audits.typed-arena-nomut]]
|
||||
who = "Lee Salzman <lsalzman@gmail.com>"
|
||||
criteria = "safe-to-deploy"
|
||||
version = "0.1.0"
|
||||
|
||||
[[audits.uluru]]
|
||||
who = "Emilio Cobos Álvarez <emilio@crisal.io>"
|
||||
criteria = "safe-to-deploy"
|
||||
|
@ -1630,12 +1625,6 @@ also contains a small C file compiled at build-time. I audited all of it and it
|
|||
looks correct.
|
||||
"""
|
||||
|
||||
[[audits.wpf-gpu-raster]]
|
||||
who = "Lee Salzman <lsalzman@mozilla.com>"
|
||||
criteria = "safe-to-deploy"
|
||||
version = "0.1.0"
|
||||
notes = "Written and maintained by Gfx team at Mozilla."
|
||||
|
||||
[[audits.xmldecl]]
|
||||
who = "Henri Sivonen <hsivonen@hsivonen.fi>"
|
||||
criteria = "safe-to-deploy"
|
||||
|
|
|
@ -1 +0,0 @@
|
|||
{"files":{"CHANGELOG.md":"341c18fadacf5a5e634ed9225a14cf52d1acf3060bbef7cac7e06a9cc03d9d51","Cargo.toml":"59835e08c5df189f959b3ae85f3bb1d3ced3202ed83c01a84e1b216dc352cbc5","LICENSE":"9ed5e982274d54d0cf94f0e9f9fd889182b6f1f50a012f0be41ce7c884347ab6","README.md":"c01093b0ed283660525630cac3940c053e6fc9d16b2c2b96f5997060bbf518be","benches/benches.rs":"81a7aecb26801254b2c02583e0f48c1068f321c7988e4c76f914b26a4ee76ebf","ci/miri.sh":"10afdbeb3ed16e2eb2f330bc816652a3cd3fb9e49d6f5582e45014b59572b583","src/lib.rs":"6da96cda4f88099ff69018035795ffe74d84a8deed37a8dd5cf58ba7bcb90b2e","src/test.rs":"a4924cc873b077c14505d16a6b6ed8d4c248c21ebf5f76a2993d66b03ae9eae6"},"package":"bfc9d8d4e8c94375df96d6ac01a18c263d3d529bc4a53a207580ae9bc30e87c1"}
|
|
@ -1,143 +0,0 @@
|
|||
## Unreleased
|
||||
|
||||
Released YYYY/MM/DD.
|
||||
|
||||
### Added
|
||||
|
||||
* TODO (or remove section if none)
|
||||
|
||||
### Changed
|
||||
|
||||
* TODO (or remove section if none)
|
||||
|
||||
### Deprecated
|
||||
|
||||
* TODO (or remove section if none)
|
||||
|
||||
### Removed
|
||||
|
||||
* TODO (or remove section if none)
|
||||
|
||||
### Fixed
|
||||
|
||||
* TODO (or remove section if none)
|
||||
|
||||
### Security
|
||||
|
||||
* TODO (or remove section if none)
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
## 2.0.1
|
||||
|
||||
Released 2019/01/10.
|
||||
|
||||
### Fixed
|
||||
|
||||
* Support `#![no_std]` on stable Rust.
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
## 2.0.0
|
||||
|
||||
Released 2019/12/03.
|
||||
|
||||
### Fixed
|
||||
|
||||
* Fixed some intra-documentation URLs.
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
## 2.0.0-rc1
|
||||
|
||||
Released 2019/11/26.
|
||||
|
||||
Unless any issues are discovered or raised, we will release version 2.0.0 soon.
|
||||
|
||||
### Added
|
||||
|
||||
* Added `alloc_str` to `Arena<u8>`, to be able to allocate string slices.
|
||||
|
||||
### Changed
|
||||
|
||||
* The minimum supported rust version is now 1.36.0.
|
||||
* `alloc_uninitialized` returns `&mut [MaybeUninit<T>]` instead of `*mut [T]`,
|
||||
which is less prone to undefined behavior.
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
## 1.7.0
|
||||
|
||||
Released 2019/10/31. *Spooky!*
|
||||
|
||||
### Added
|
||||
|
||||
* Added a `len` method to count how many items are in an arena.
|
||||
|
||||
### Fixed
|
||||
|
||||
* Fixed some theoretical overflows.
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
## 1.6.1
|
||||
|
||||
Released 2019/09/17.
|
||||
|
||||
### Fixed
|
||||
|
||||
* Now compiles on old stable Rust versions again, instead of just new stable
|
||||
Rust versions. From here on out, we'll promise that 1.X will continue to
|
||||
compile on rustc versions >= 1.32.
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
## 1.6.0
|
||||
|
||||
Released 2019/09/09.
|
||||
|
||||
### Added
|
||||
|
||||
* Added the `Arena::iter_mut` method for mutably iterating over an arena's
|
||||
contents. [See #29 for
|
||||
details.](https://github.com/SimonSapin/rust-typed-arena/pull/29)
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
## 1.5.0
|
||||
|
||||
Released 2019/08/02.
|
||||
|
||||
### Added
|
||||
|
||||
* `Arena` now implements `Default`
|
||||
|
||||
### Fixed
|
||||
|
||||
* Introduced an internal fast path for allocation, improving performance.
|
||||
* Tests now run cleanly on Miri. There was previously a technicality where
|
||||
the stacked borrow rules were not being followed.
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
## 1.4.1
|
||||
|
||||
Released 2018/06/29.
|
||||
|
||||
### Added
|
||||
|
||||
* Added more documentation comments and examples.
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
## 1.4.0
|
||||
|
||||
Released 2018/06/21.
|
||||
|
||||
### Added
|
||||
|
||||
* Added a new, on-by-default feature named "std". Disabling this feature allows
|
||||
the crate to be used in `#![no_std]` environments. [#15][] [#12][]
|
||||
|
||||
[#15]: https://github.com/SimonSapin/rust-typed-arena/pull/15
|
||||
[#12]: https://github.com/SimonSapin/rust-typed-arena/pull/12
|
|
@ -1,39 +0,0 @@
|
|||
# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
|
||||
#
|
||||
# When uploading crates to the registry Cargo will automatically
|
||||
# "normalize" Cargo.toml files for maximal compatibility
|
||||
# with all versions of Cargo and also rewrite `path` dependencies
|
||||
# to registry (e.g., crates.io) dependencies.
|
||||
#
|
||||
# If you are reading this file be aware that the original Cargo.toml
|
||||
# will likely look very different (and much more reasonable).
|
||||
# See Cargo.toml.orig for the original contents.
|
||||
|
||||
[package]
|
||||
name = "typed-arena-nomut"
|
||||
version = "0.1.0"
|
||||
authors = ["Simon Sapin <simon.sapin@exyr.org>", "Nick Fitzgerald <fitzgen@gmail.com>"]
|
||||
description = "The arena, a fast but limited type of allocator"
|
||||
documentation = "https://docs.rs/typed-arena"
|
||||
readme = "./README.md"
|
||||
keywords = ["arena"]
|
||||
categories = ["memory-management", "no-std"]
|
||||
license = "MIT"
|
||||
repository = "https://github.com/jrmuizel/typed-arena-nomut"
|
||||
[profile.bench]
|
||||
debug = true
|
||||
|
||||
[lib]
|
||||
name = "typed_arena_nomut"
|
||||
path = "src/lib.rs"
|
||||
|
||||
[[bench]]
|
||||
name = "benches"
|
||||
path = "benches/benches.rs"
|
||||
harness = false
|
||||
[dev-dependencies.criterion]
|
||||
version = "0.3.4"
|
||||
|
||||
[features]
|
||||
default = ["std"]
|
||||
std = []
|
|
@ -1,21 +0,0 @@
|
|||
MIT License
|
||||
|
||||
Copyright (c) 2018 The typed-arena developers
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
|
@ -1,74 +0,0 @@
|
|||
# `typed-arena-nomut`
|
||||
|
||||
[![](https://docs.rs/typed-arena-nomut/badge.svg)](https://docs.rs/typed-arena-nomut/)
|
||||
[![](https://img.shields.io/crates/v/typed-arena-nomut.svg)](https://crates.io/crates/typed-arena-nomut)
|
||||
[![](https://img.shields.io/crates/d/typed-arena-nomut.svg)](https://crates.io/crates/typed-arena-nomut)
|
||||
|
||||
This is a fork of the typed-arena arena crate that returns an immutable reference instead of
|
||||
mutable one. This allows iteration on the arena items while they're borrowed.
|
||||
|
||||
**A fast (but limited) allocation arena for values of a single type.**
|
||||
|
||||
Allocated objects are destroyed all at once, when the arena itself is destroyed.
|
||||
There is no deallocation of individual objects while the arena itself is still
|
||||
alive. The flipside is that allocation is fast: typically just a vector push.
|
||||
|
||||
There is also a method `into_vec()` to recover ownership of allocated objects
|
||||
when the arena is no longer required, instead of destroying everything.
|
||||
|
||||
|
||||
## Example
|
||||
|
||||
```rust
|
||||
use typed_arena_nomut::Arena;
|
||||
|
||||
struct Monster {
|
||||
level: u32,
|
||||
}
|
||||
|
||||
let monsters = Arena::new();
|
||||
|
||||
let goku = monsters.alloc(Monster { level: 9001 });
|
||||
assert!(goku.level > 9000);
|
||||
```
|
||||
|
||||
## Safe Cycles
|
||||
|
||||
All allocated objects get the same lifetime, so you can safely create cycles
|
||||
between them. This can be useful for certain data structures, such as graphs
|
||||
and trees with parent pointers.
|
||||
|
||||
```rust
|
||||
use std::cell::Cell;
|
||||
use typed_arena_nomut::Arena;
|
||||
|
||||
struct CycleParticipant<'a> {
|
||||
other: Cell<Option<&'a CycleParticipant<'a>>>,
|
||||
}
|
||||
|
||||
let arena = Arena::new();
|
||||
|
||||
let a = arena.alloc(CycleParticipant { other: Cell::new(None) });
|
||||
let b = arena.alloc(CycleParticipant { other: Cell::new(None) });
|
||||
|
||||
a.other.set(Some(b));
|
||||
b.other.set(Some(a));
|
||||
```
|
||||
|
||||
## Alternatives
|
||||
|
||||
### Need to allocate many different types of values?
|
||||
|
||||
Use multiple arenas if you have only a couple different types or try
|
||||
[`bumpalo`](https://crates.io/crates/bumpalo), which is a bump-allocation arena
|
||||
can allocate heterogenous types of values.
|
||||
|
||||
### Want allocation to return identifiers instead of references and dealing with references and lifetimes everywhere?
|
||||
|
||||
Check out [`id-arena`](https://crates.io/crates/id-arena) or
|
||||
[`generational-arena`](https://crates.io/crates/generational-arena).
|
||||
|
||||
### Need to deallocate individual objects at a time?
|
||||
|
||||
Check out [`generational-arena`](https://crates.io/crates/generational-arena)
|
||||
for an arena-style crate or look for a more traditional allocator.
|
|
@ -1,40 +0,0 @@
|
|||
#[macro_use]
|
||||
extern crate criterion;
|
||||
extern crate typed_arena_nomut;
|
||||
|
||||
use criterion::{Criterion, BenchmarkId};
|
||||
|
||||
#[derive(Default)]
|
||||
struct Small(usize);
|
||||
|
||||
#[derive(Default)]
|
||||
struct Big([usize; 32]);
|
||||
|
||||
fn allocate<T: Default>(n: usize) {
|
||||
let arena = typed_arena_nomut::Arena::new();
|
||||
for _ in 0..n {
|
||||
let val: &T = arena.alloc(Default::default());
|
||||
criterion::black_box(val);
|
||||
}
|
||||
}
|
||||
|
||||
fn criterion_benchmark(c: &mut Criterion) {
|
||||
let mut group = c.benchmark_group("allocate");
|
||||
for n in 1..5 {
|
||||
let n = n * 1000;
|
||||
group.throughput(criterion::Throughput::Elements(n as u64));
|
||||
group.bench_with_input(
|
||||
BenchmarkId::new("allocate-small", n),
|
||||
&n,
|
||||
|b, &n| b.iter(|| allocate::<Small>(n)),
|
||||
);
|
||||
group.bench_with_input(
|
||||
BenchmarkId::new("allocate-big", n),
|
||||
&n,
|
||||
|b, &n| b.iter(|| allocate::<Big>(n)),
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
criterion_group!(benches, criterion_benchmark);
|
||||
criterion_main!(benches);
|
|
@ -1,11 +0,0 @@
|
|||
set -ex
|
||||
|
||||
MIRI_NIGHTLY=nightly-$(curl -s https://rust-lang.github.io/rustup-components-history/x86_64-unknown-linux-gnu/miri)
|
||||
echo "Installing latest nightly with Miri: $MIRI_NIGHTLY"
|
||||
rustup default "$MIRI_NIGHTLY"
|
||||
|
||||
cargo clean
|
||||
rustup component add miri
|
||||
cargo miri setup
|
||||
|
||||
cargo miri test
|
|
@ -1,633 +0,0 @@
|
|||
//! The arena, a fast but limited type of allocator.
|
||||
//!
|
||||
//! **A fast (but limited) allocation arena for values of a single type.**
|
||||
//!
|
||||
//! Allocated objects are destroyed all at once, when the arena itself is
|
||||
//! destroyed. There is no deallocation of individual objects while the arena
|
||||
//! itself is still alive. The flipside is that allocation is fast: typically
|
||||
//! just a vector push.
|
||||
//!
|
||||
//! There is also a method `into_vec()` to recover ownership of allocated
|
||||
//! objects when the arena is no longer required, instead of destroying
|
||||
//! everything.
|
||||
//!
|
||||
//! ## Example
|
||||
//!
|
||||
//! ```
|
||||
//! use typed_arena_nomut::Arena;
|
||||
//!
|
||||
//! struct Monster {
|
||||
//! level: u32,
|
||||
//! }
|
||||
//!
|
||||
//! let monsters = Arena::new();
|
||||
//!
|
||||
//! let goku = monsters.alloc(Monster { level: 9001 });
|
||||
//! assert!(goku.level > 9000);
|
||||
//! ```
|
||||
//!
|
||||
//! ## Safe Cycles
|
||||
//!
|
||||
//! All allocated objects get the same lifetime, so you can safely create cycles
|
||||
//! between them. This can be useful for certain data structures, such as graphs
|
||||
//! and trees with parent pointers.
|
||||
//!
|
||||
//! ```
|
||||
//! use std::cell::Cell;
|
||||
//! use typed_arena_nomut::Arena;
|
||||
//!
|
||||
//! struct CycleParticipant<'a> {
|
||||
//! other: Cell<Option<&'a CycleParticipant<'a>>>,
|
||||
//! }
|
||||
//!
|
||||
//! let arena = Arena::new();
|
||||
//!
|
||||
//! let a = arena.alloc(CycleParticipant { other: Cell::new(None) });
|
||||
//! let b = arena.alloc(CycleParticipant { other: Cell::new(None) });
|
||||
//!
|
||||
//! a.other.set(Some(b));
|
||||
//! b.other.set(Some(a));
|
||||
//! ```
|
||||
|
||||
// Potential optimizations:
|
||||
// 1) add and stabilize a method for in-place reallocation of vecs.
|
||||
// 2) add and stabilize placement new.
|
||||
// 3) use an iterator. This may add far too much unsafe code.
|
||||
|
||||
#![deny(missing_docs)]
|
||||
#![cfg_attr(not(any(feature = "std", test)), no_std)]
|
||||
|
||||
#[cfg(not(feature = "std"))]
|
||||
extern crate alloc;
|
||||
|
||||
#[cfg(any(feature = "std", test))]
|
||||
extern crate core;
|
||||
|
||||
#[cfg(not(feature = "std"))]
|
||||
use alloc::vec::Vec;
|
||||
|
||||
use core::cell::RefCell;
|
||||
use core::cmp;
|
||||
use core::iter;
|
||||
use core::mem;
|
||||
use core::slice;
|
||||
use core::str;
|
||||
use std::cell::Ref;
|
||||
|
||||
use mem::MaybeUninit;
|
||||
|
||||
#[cfg(test)]
|
||||
mod test;
|
||||
|
||||
// Initial size in bytes.
|
||||
const INITIAL_SIZE: usize = 1024;
|
||||
// Minimum capacity. Must be larger than 0.
|
||||
const MIN_CAPACITY: usize = 1;
|
||||
|
||||
/// An arena of objects of type `T`.
|
||||
///
|
||||
/// ## Example
|
||||
///
|
||||
/// ```
|
||||
/// use typed_arena_nomut::Arena;
|
||||
///
|
||||
/// struct Monster {
|
||||
/// level: u32,
|
||||
/// }
|
||||
///
|
||||
/// let monsters = Arena::new();
|
||||
///
|
||||
/// let vegeta = monsters.alloc(Monster { level: 9001 });
|
||||
/// assert!(vegeta.level > 9000);
|
||||
/// ```
|
||||
pub struct Arena<T> {
|
||||
chunks: RefCell<ChunkList<T>>,
|
||||
}
|
||||
|
||||
struct ChunkList<T> {
|
||||
current: Vec<T>,
|
||||
rest: Vec<Vec<T>>,
|
||||
}
|
||||
|
||||
impl<T> Arena<T> {
|
||||
/// Construct a new arena.
|
||||
///
|
||||
/// ## Example
|
||||
///
|
||||
/// ```
|
||||
/// use typed_arena_nomut::Arena;
|
||||
///
|
||||
/// let arena = Arena::new();
|
||||
/// # arena.alloc(1);
|
||||
/// ```
|
||||
pub fn new() -> Arena<T> {
|
||||
let size = cmp::max(1, mem::size_of::<T>());
|
||||
Arena::with_capacity(INITIAL_SIZE / size)
|
||||
}
|
||||
|
||||
/// Construct a new arena with capacity for `n` values pre-allocated.
|
||||
///
|
||||
/// ## Example
|
||||
///
|
||||
/// ```
|
||||
/// use typed_arena_nomut::Arena;
|
||||
///
|
||||
/// let arena = Arena::with_capacity(1337);
|
||||
/// # arena.alloc(1);
|
||||
/// ```
|
||||
pub fn with_capacity(n: usize) -> Arena<T> {
|
||||
let n = cmp::max(MIN_CAPACITY, n);
|
||||
Arena {
|
||||
chunks: RefCell::new(ChunkList {
|
||||
current: Vec::with_capacity(n),
|
||||
rest: Vec::new(),
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
/// Return the size of the arena
|
||||
///
|
||||
/// This is useful for using the size of previous typed arenas to build new typed arenas with large enough spaces.
|
||||
///
|
||||
/// ## Example
|
||||
///
|
||||
/// ```
|
||||
/// use typed_arena_nomut::Arena;
|
||||
///
|
||||
/// let arena = Arena::with_capacity(0);
|
||||
/// let a = arena.alloc(1);
|
||||
/// let b = arena.alloc(2);
|
||||
///
|
||||
/// assert_eq!(arena.len(), 2);
|
||||
/// ```
|
||||
pub fn len(&self) -> usize {
|
||||
let chunks = self.chunks.borrow();
|
||||
|
||||
let mut res = 0;
|
||||
for vec in chunks.rest.iter() {
|
||||
res += vec.len()
|
||||
}
|
||||
|
||||
res + chunks.current.len()
|
||||
}
|
||||
|
||||
/// Allocates a value in the arena, and returns a mutable reference
|
||||
/// to that value.
|
||||
///
|
||||
/// ## Example
|
||||
///
|
||||
/// ```
|
||||
/// use typed_arena_nomut::Arena;
|
||||
///
|
||||
/// let arena = Arena::new();
|
||||
/// let x = arena.alloc(42);
|
||||
/// assert_eq!(*x, 42);
|
||||
/// ```
|
||||
#[inline]
|
||||
pub fn alloc(&self, value: T) -> &T {
|
||||
self.alloc_fast_path(value)
|
||||
.unwrap_or_else(|value| self.alloc_slow_path(value))
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn alloc_fast_path(&self, value: T) -> Result<&T, T> {
|
||||
let mut chunks = self.chunks.borrow_mut();
|
||||
let len = chunks.current.len();
|
||||
if len < chunks.current.capacity() {
|
||||
chunks.current.push(value);
|
||||
// Avoid going through `Vec::deref_mut`, which overlaps
|
||||
// other references we have already handed out!
|
||||
debug_assert!(len < chunks.current.len()); // bounds check
|
||||
Ok(unsafe { &mut *chunks.current.as_mut_ptr().add(len) })
|
||||
} else {
|
||||
Err(value)
|
||||
}
|
||||
}
|
||||
|
||||
fn alloc_slow_path(&self, value: T) -> &T {
|
||||
&self.alloc_extend(iter::once(value))[0]
|
||||
}
|
||||
|
||||
/// Uses the contents of an iterator to allocate values in the arena.
|
||||
/// Returns a mutable slice that contains these values.
|
||||
///
|
||||
/// ## Example
|
||||
///
|
||||
/// ```
|
||||
/// use typed_arena_nomut::Arena;
|
||||
///
|
||||
/// let arena = Arena::new();
|
||||
/// let abc = arena.alloc_extend("abcdefg".chars().take(3));
|
||||
/// assert_eq!(abc, ['a', 'b', 'c']);
|
||||
/// ```
|
||||
pub fn alloc_extend<I>(&self, iterable: I) -> &[T]
|
||||
where
|
||||
I: IntoIterator<Item = T>,
|
||||
{
|
||||
let mut iter = iterable.into_iter();
|
||||
|
||||
let mut chunks = self.chunks.borrow_mut();
|
||||
|
||||
let iter_min_len = iter.size_hint().0;
|
||||
let mut next_item_index;
|
||||
debug_assert!(
|
||||
chunks.current.capacity() >= chunks.current.len(),
|
||||
"capacity is always greater than or equal to len, so we don't need to worry about underflow"
|
||||
);
|
||||
if iter_min_len > chunks.current.capacity() - chunks.current.len() {
|
||||
chunks.reserve(iter_min_len);
|
||||
chunks.current.extend(iter);
|
||||
next_item_index = 0;
|
||||
} else {
|
||||
next_item_index = chunks.current.len();
|
||||
let mut i = 0;
|
||||
while let Some(elem) = iter.next() {
|
||||
if chunks.current.len() == chunks.current.capacity() {
|
||||
// The iterator was larger than we could fit into the current chunk.
|
||||
let chunks = &mut *chunks;
|
||||
// Create a new chunk into which we can freely push the entire iterator into
|
||||
chunks.reserve(i + 1);
|
||||
let previous_chunk = chunks.rest.last_mut().unwrap();
|
||||
let previous_chunk_len = previous_chunk.len();
|
||||
// Move any elements we put into the previous chunk into this new chunk
|
||||
chunks
|
||||
.current
|
||||
.extend(previous_chunk.drain(previous_chunk_len - i..));
|
||||
chunks.current.push(elem);
|
||||
// And the remaining elements in the iterator
|
||||
chunks.current.extend(iter);
|
||||
next_item_index = 0;
|
||||
break;
|
||||
} else {
|
||||
chunks.current.push(elem);
|
||||
}
|
||||
i += 1;
|
||||
}
|
||||
}
|
||||
let new_slice_ref = &mut chunks.current[next_item_index..];
|
||||
|
||||
// Extend the lifetime from that of `chunks_borrow` to that of `self`.
|
||||
// This is OK because we’re careful to never move items
|
||||
// by never pushing to inner `Vec`s beyond their initial capacity.
|
||||
// The returned reference is unique (`&mut`):
|
||||
// the `Arena` never gives away references to existing items.
|
||||
unsafe { mem::transmute::<&mut [T], &mut [T]>(new_slice_ref) }
|
||||
}
|
||||
|
||||
/// Allocates space for a given number of values, but doesn't initialize it.
|
||||
///
|
||||
/// ## Safety
|
||||
///
|
||||
/// After calling this method, the arena considers the elements initialized. If you fail to
|
||||
/// initialize them (which includes because of panicking during the initialization), the arena
|
||||
/// will run destructors on the uninitialized memory. Therefore, you must initialize them.
|
||||
///
|
||||
/// Considering how easy it is to cause undefined behaviour using this, you're advised to
|
||||
/// prefer the other (safe) methods, like [`alloc_extend`][Arena::alloc_extend].
|
||||
///
|
||||
/// ## Example
|
||||
///
|
||||
/// ```rust
|
||||
/// use std::mem::{self, MaybeUninit};
|
||||
/// use std::ptr;
|
||||
/// use typed_arena_nomut::Arena;
|
||||
///
|
||||
/// // Transmute from MaybeUninit slice to slice of initialized T.
|
||||
/// // It is a separate function to preserve the lifetime of the reference.
|
||||
/// unsafe fn transmute_uninit<A>(r: &mut [MaybeUninit<A>]) -> &mut [A] {
|
||||
/// mem::transmute(r)
|
||||
/// }
|
||||
///
|
||||
/// let arena: Arena<bool> = Arena::new();
|
||||
/// let slice: &mut [bool];
|
||||
/// unsafe {
|
||||
/// let uninitialized = arena.alloc_uninitialized(10);
|
||||
/// for elem in uninitialized.iter_mut() {
|
||||
/// ptr::write(elem.as_mut_ptr(), true);
|
||||
/// }
|
||||
/// slice = transmute_uninit(uninitialized);
|
||||
/// }
|
||||
/// ```
|
||||
///
|
||||
/// ## Alternative allocation pattern
|
||||
///
|
||||
/// To avoid the problem of dropping assumed to be initialized elements on panic, it is also
|
||||
/// possible to combine the [`reserve_extend`][Arena::reserve_extend] with
|
||||
/// [`uninitialized_array`][Arena::uninitialized_array], initialize the elements and confirm
|
||||
/// them by this method. In such case, when there's a panic during initialization, the already
|
||||
/// initialized elements would leak but it wouldn't cause UB.
|
||||
///
|
||||
/// ```rust
|
||||
/// use std::mem::{self, MaybeUninit};
|
||||
/// use std::ptr;
|
||||
/// use typed_arena_nomut::Arena;
|
||||
///
|
||||
/// unsafe fn transmute_uninit<A>(r: &mut [MaybeUninit<A>]) -> &mut [A] {
|
||||
/// mem::transmute(r)
|
||||
/// }
|
||||
///
|
||||
/// const COUNT: usize = 2;
|
||||
///
|
||||
/// let arena: Arena<String> = Arena::new();
|
||||
///
|
||||
/// arena.reserve_extend(COUNT);
|
||||
/// let slice: &mut [String];
|
||||
/// unsafe {
|
||||
/// // Perform initialization before we claim the memory.
|
||||
/// let uninitialized = arena.uninitialized_array();
|
||||
/// assert!((*uninitialized).len() >= COUNT); // Ensured by the reserve_extend
|
||||
/// for elem in &mut (*uninitialized)[..COUNT] {
|
||||
/// ptr::write(elem.as_mut_ptr(), "Hello".to_owned());
|
||||
/// }
|
||||
/// let addr = (*uninitialized).as_ptr() as usize;
|
||||
///
|
||||
/// // The alloc_uninitialized returns the same memory, but "confirms" its allocation.
|
||||
/// slice = transmute_uninit(arena.alloc_uninitialized(COUNT));
|
||||
/// assert_eq!(addr, slice.as_ptr() as usize);
|
||||
/// assert_eq!(slice, &["Hello".to_owned(), "Hello".to_owned()]);
|
||||
/// }
|
||||
/// ```
|
||||
pub unsafe fn alloc_uninitialized(&self, num: usize) -> &mut [MaybeUninit<T>] {
|
||||
let mut chunks = self.chunks.borrow_mut();
|
||||
|
||||
debug_assert!(
|
||||
chunks.current.capacity() >= chunks.current.len(),
|
||||
"capacity is always greater than or equal to len, so we don't need to worry about underflow"
|
||||
);
|
||||
if num > chunks.current.capacity() - chunks.current.len() {
|
||||
chunks.reserve(num);
|
||||
}
|
||||
|
||||
// At this point, the current chunk must have free capacity.
|
||||
let next_item_index = chunks.current.len();
|
||||
chunks.current.set_len(next_item_index + num);
|
||||
|
||||
// Go through pointers, to make sure we never create a reference to uninitialized T.
|
||||
let start = chunks.current.as_mut_ptr().offset(next_item_index as isize);
|
||||
let start_uninit = start as *mut MaybeUninit<T>;
|
||||
slice::from_raw_parts_mut(start_uninit, num)
|
||||
}
|
||||
|
||||
/// Makes sure there's enough continuous space for at least `num` elements.
|
||||
///
|
||||
/// This may save some work if called before [`alloc_extend`][Arena::alloc_extend]. It also
|
||||
/// allows somewhat safer use pattern of [`alloc_uninitialized`][Arena::alloc_uninitialized].
|
||||
/// On the other hand this might waste up to `n - 1` elements of space. In case new allocation
|
||||
/// is needed, the unused ones in current chunk are never used.
|
||||
pub fn reserve_extend(&self, num: usize) {
|
||||
let mut chunks = self.chunks.borrow_mut();
|
||||
|
||||
debug_assert!(
|
||||
chunks.current.capacity() >= chunks.current.len(),
|
||||
"capacity is always greater than or equal to len, so we don't need to worry about underflow"
|
||||
);
|
||||
if num > chunks.current.capacity() - chunks.current.len() {
|
||||
chunks.reserve(num);
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns unused space.
|
||||
///
|
||||
/// *This unused space is still not considered "allocated".* Therefore, it
|
||||
/// won't be dropped unless there are further calls to `alloc`,
|
||||
/// [`alloc_uninitialized`][Arena::alloc_uninitialized], or
|
||||
/// [`alloc_extend`][Arena::alloc_extend] which is why the method is safe.
|
||||
///
|
||||
/// It returns a raw pointer to avoid creating multiple mutable references to the same place.
|
||||
/// It is up to the caller not to dereference it after any of the `alloc_` methods are called.
|
||||
pub fn uninitialized_array(&self) -> *mut [MaybeUninit<T>] {
|
||||
let mut chunks = self.chunks.borrow_mut();
|
||||
let len = chunks.current.capacity() - chunks.current.len();
|
||||
let next_item_index = chunks.current.len();
|
||||
|
||||
unsafe {
|
||||
// Go through pointers, to make sure we never create a reference to uninitialized T.
|
||||
let start = chunks.current.as_mut_ptr().offset(next_item_index as isize);
|
||||
let start_uninit = start as *mut MaybeUninit<T>;
|
||||
slice::from_raw_parts_mut(start_uninit, len) as *mut _
|
||||
}
|
||||
}
|
||||
|
||||
/// Convert this `Arena` into a `Vec<T>`.
|
||||
///
|
||||
/// Items in the resulting `Vec<T>` appear in the order that they were
|
||||
/// allocated in.
|
||||
///
|
||||
/// ## Example
|
||||
///
|
||||
/// ```
|
||||
/// use typed_arena_nomut::Arena;
|
||||
///
|
||||
/// let arena = Arena::new();
|
||||
///
|
||||
/// arena.alloc("a");
|
||||
/// arena.alloc("b");
|
||||
/// arena.alloc("c");
|
||||
///
|
||||
/// let easy_as_123 = arena.into_vec();
|
||||
///
|
||||
/// assert_eq!(easy_as_123, vec!["a", "b", "c"]);
|
||||
/// ```
|
||||
pub fn into_vec(self) -> Vec<T> {
|
||||
let mut chunks = self.chunks.into_inner();
|
||||
// keep order of allocation in the resulting Vec
|
||||
let n = chunks
|
||||
.rest
|
||||
.iter()
|
||||
.fold(chunks.current.len(), |a, v| a + v.len());
|
||||
let mut result = Vec::with_capacity(n);
|
||||
for mut vec in chunks.rest {
|
||||
result.append(&mut vec);
|
||||
}
|
||||
result.append(&mut chunks.current);
|
||||
result
|
||||
}
|
||||
|
||||
/// Returns an iterator that allows modifying each value.
|
||||
///
|
||||
/// Items are yielded in the order that they were allocated.
|
||||
///
|
||||
/// ## Example
|
||||
///
|
||||
/// ```
|
||||
/// use typed_arena_nomut::Arena;
|
||||
/// use std::cell::Cell;
|
||||
///
|
||||
/// #[derive(Debug, PartialEq, Eq)]
|
||||
/// struct Point { x: Cell<i32>, y: i32 };
|
||||
///
|
||||
/// let mut arena = Arena::new();
|
||||
///
|
||||
/// arena.alloc(Point { x: Cell::new(0), y: 0 });
|
||||
/// arena.alloc(Point { x: Cell::new(1), y: 1 });
|
||||
///
|
||||
/// for point in arena.iter() {
|
||||
/// point.x.set(point.x.get() + 10);
|
||||
/// }
|
||||
///
|
||||
/// let points = arena.into_vec();
|
||||
///
|
||||
/// assert_eq!(points, vec![Point { x: Cell::new(10), y: 0 }, Point { x: Cell::new(11), y: 1 }]);
|
||||
///
|
||||
/// ```
|
||||
///
|
||||
/// ## Immutable Iteration
|
||||
///
|
||||
/// Note that there is no corresponding `iter` method. Access to the arena's contents
|
||||
/// requries mutable access to the arena itself.
|
||||
///
|
||||
/// ```
|
||||
/// use typed_arena_nomut::Arena;
|
||||
/// use std::cell::Cell;
|
||||
///
|
||||
/// let mut arena = Arena::new();
|
||||
/// let x = arena.alloc(Cell::new(1));
|
||||
///
|
||||
/// for i in arena.iter() {
|
||||
/// println!("i: {}", i.get());
|
||||
/// }
|
||||
///
|
||||
/// x.set(x.get() * 2);
|
||||
/// ```
|
||||
#[inline]
|
||||
pub fn iter(&self) -> Iter<T> {
|
||||
let chunks = self.chunks.borrow();
|
||||
let position = if !chunks.rest.is_empty() {
|
||||
let index = 0;
|
||||
let inner_iter = chunks.rest[index].iter();
|
||||
// Extend the lifetime of the individual elements to that of the arena.
|
||||
// This is OK because we borrow the arena mutably to prevent new allocations
|
||||
// and we take care here to never move items inside the arena while the
|
||||
// iterator is alive.
|
||||
let inner_iter = unsafe { mem::transmute(inner_iter) };
|
||||
IterState::ChunkListRest { index, inner_iter }
|
||||
} else {
|
||||
// Extend the lifetime of the individual elements to that of the arena.
|
||||
let iter = unsafe { mem::transmute(chunks.current.iter()) };
|
||||
IterState::ChunkListCurrent { iter }
|
||||
};
|
||||
Iter {
|
||||
chunks,
|
||||
state: position,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Arena<u8> {
|
||||
/// Allocates a string slice and returns a mutable reference to it.
|
||||
///
|
||||
/// This is on `Arena<u8>`, because string slices use byte slices (`[u8]`) as their backing
|
||||
/// storage.
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```
|
||||
/// use typed_arena_nomut::Arena;
|
||||
///
|
||||
/// let arena: Arena<u8> = Arena::new();
|
||||
/// let hello = arena.alloc_str("Hello world");
|
||||
/// assert_eq!("Hello world", hello);
|
||||
/// ```
|
||||
#[inline]
|
||||
pub fn alloc_str(&self, s: &str) -> & str {
|
||||
let buffer = self.alloc_extend(s.bytes());
|
||||
// Can't fail the utf8 validation, it already came in as utf8
|
||||
unsafe { str::from_utf8_unchecked(buffer) }
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Default for Arena<T> {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> ChunkList<T> {
|
||||
#[inline(never)]
|
||||
#[cold]
|
||||
fn reserve(&mut self, additional: usize) {
|
||||
let double_cap = self
|
||||
.current
|
||||
.capacity()
|
||||
.checked_mul(2)
|
||||
.expect("capacity overflow");
|
||||
let required_cap = additional
|
||||
.checked_next_power_of_two()
|
||||
.expect("capacity overflow");
|
||||
let new_capacity = cmp::max(double_cap, required_cap);
|
||||
let chunk = mem::replace(&mut self.current, Vec::with_capacity(new_capacity));
|
||||
self.rest.push(chunk);
|
||||
}
|
||||
}
|
||||
|
||||
enum IterState<'a, T> {
|
||||
ChunkListRest {
|
||||
index: usize,
|
||||
inner_iter: slice::Iter<'a, T>,
|
||||
},
|
||||
ChunkListCurrent {
|
||||
iter: slice::Iter<'a, T>,
|
||||
},
|
||||
}
|
||||
|
||||
/// Mutable arena iterator.
|
||||
///
|
||||
/// This struct is created by the [`iter_mut`](struct.Arena.html#method.iter_mut) method on [Arenas](struct.Arena.html).
|
||||
pub struct Iter<'a, T: 'a> {
|
||||
chunks: Ref<'a, ChunkList<T>>,
|
||||
state: IterState<'a, T>,
|
||||
}
|
||||
|
||||
impl<'a, T> Iterator for Iter<'a, T> {
|
||||
type Item = &'a T;
|
||||
fn next(&mut self) -> Option<&'a T> {
|
||||
loop {
|
||||
self.state = match self.state {
|
||||
IterState::ChunkListRest {
|
||||
mut index,
|
||||
ref mut inner_iter,
|
||||
} => {
|
||||
match inner_iter.next() {
|
||||
Some(item) => return Some(item),
|
||||
None => {
|
||||
index += 1;
|
||||
if index < self.chunks.rest.len() {
|
||||
let inner_iter = self.chunks.rest[index].iter();
|
||||
// Extend the lifetime of the individual elements to that of the arena.
|
||||
let inner_iter = unsafe { mem::transmute(inner_iter) };
|
||||
IterState::ChunkListRest { index, inner_iter }
|
||||
} else {
|
||||
let iter = self.chunks.current.iter();
|
||||
// Extend the lifetime of the individual elements to that of the arena.
|
||||
let iter = unsafe { mem::transmute(iter) };
|
||||
IterState::ChunkListCurrent { iter }
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
IterState::ChunkListCurrent { ref mut iter } => return iter.next(),
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
fn size_hint(&self) -> (usize, Option<usize>) {
|
||||
let current_len = self.chunks.current.len();
|
||||
let current_cap = self.chunks.current.capacity();
|
||||
if self.chunks.rest.is_empty() {
|
||||
(current_len, Some(current_len))
|
||||
} else {
|
||||
let rest_len = self.chunks.rest.len();
|
||||
let last_chunk_len = self
|
||||
.chunks
|
||||
.rest
|
||||
.last()
|
||||
.map(|chunk| chunk.len())
|
||||
.unwrap_or(0);
|
||||
|
||||
let min = current_len + last_chunk_len;
|
||||
let max = min + (rest_len * current_cap / rest_len);
|
||||
|
||||
(min, Some(max))
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,373 +0,0 @@
|
|||
use super::*;
|
||||
use std::cell::Cell;
|
||||
use std::mem;
|
||||
use std::panic::{self, AssertUnwindSafe};
|
||||
use std::ptr;
|
||||
|
||||
struct DropTracker<'a>(&'a Cell<u32>);
|
||||
impl<'a> Drop for DropTracker<'a> {
|
||||
fn drop(&mut self) {
|
||||
self.0.set(self.0.get() + 1);
|
||||
}
|
||||
}
|
||||
|
||||
struct Node<'a, 'b: 'a>(Option<&'a Node<'a, 'b>>, u32, DropTracker<'b>);
|
||||
|
||||
#[test]
|
||||
fn arena_as_intended() {
|
||||
let drop_counter = Cell::new(0);
|
||||
{
|
||||
let arena = Arena::with_capacity(2);
|
||||
|
||||
let mut node: &Node = arena.alloc(Node(None, 1, DropTracker(&drop_counter)));
|
||||
assert_eq!(arena.chunks.borrow().rest.len(), 0);
|
||||
|
||||
node = arena.alloc(Node(Some(node), 2, DropTracker(&drop_counter)));
|
||||
assert_eq!(arena.chunks.borrow().rest.len(), 0);
|
||||
|
||||
node = arena.alloc(Node(Some(node), 3, DropTracker(&drop_counter)));
|
||||
assert_eq!(arena.chunks.borrow().rest.len(), 1);
|
||||
|
||||
node = arena.alloc(Node(Some(node), 4, DropTracker(&drop_counter)));
|
||||
assert_eq!(arena.chunks.borrow().rest.len(), 1);
|
||||
|
||||
assert_eq!(node.1, 4);
|
||||
assert_eq!(node.0.unwrap().1, 3);
|
||||
assert_eq!(node.0.unwrap().0.unwrap().1, 2);
|
||||
assert_eq!(node.0.unwrap().0.unwrap().0.unwrap().1, 1);
|
||||
assert!(node.0.unwrap().0.unwrap().0.unwrap().0.is_none());
|
||||
|
||||
assert_eq!(arena.len(), 4);
|
||||
|
||||
mem::drop(node);
|
||||
assert_eq!(drop_counter.get(), 0);
|
||||
|
||||
let mut node: &Node = arena.alloc(Node(None, 5, DropTracker(&drop_counter)));
|
||||
assert_eq!(arena.chunks.borrow().rest.len(), 1);
|
||||
|
||||
node = arena.alloc(Node(Some(node), 6, DropTracker(&drop_counter)));
|
||||
assert_eq!(arena.chunks.borrow().rest.len(), 1);
|
||||
|
||||
node = arena.alloc(Node(Some(node), 7, DropTracker(&drop_counter)));
|
||||
assert_eq!(arena.chunks.borrow().rest.len(), 2);
|
||||
|
||||
assert_eq!(drop_counter.get(), 0);
|
||||
|
||||
assert_eq!(node.1, 7);
|
||||
assert_eq!(node.0.unwrap().1, 6);
|
||||
assert_eq!(node.0.unwrap().0.unwrap().1, 5);
|
||||
assert!(node.0.unwrap().0.unwrap().0.is_none());
|
||||
|
||||
assert_eq!(drop_counter.get(), 0);
|
||||
}
|
||||
assert_eq!(drop_counter.get(), 7);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn ensure_into_vec_maintains_order_of_allocation() {
|
||||
let arena = Arena::with_capacity(1); // force multiple inner vecs
|
||||
for &s in &["t", "e", "s", "t"] {
|
||||
arena.alloc(String::from(s));
|
||||
}
|
||||
let vec = arena.into_vec();
|
||||
assert_eq!(vec, vec!["t", "e", "s", "t"]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_zero_cap() {
|
||||
let arena = Arena::with_capacity(0);
|
||||
let a = arena.alloc(1);
|
||||
let b = arena.alloc(2);
|
||||
assert_eq!(*a, 1);
|
||||
assert_eq!(*b, 2);
|
||||
assert_eq!(arena.len(), 2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_alloc_extend() {
|
||||
let arena = Arena::with_capacity(2);
|
||||
for i in 0..15 {
|
||||
let slice = arena.alloc_extend(0..i);
|
||||
for (j, &elem) in slice.iter().enumerate() {
|
||||
assert_eq!(j, elem);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_alloc_uninitialized() {
|
||||
const LIMIT: usize = 15;
|
||||
let drop_counter = Cell::new(0);
|
||||
unsafe {
|
||||
let arena: Arena<Node> = Arena::with_capacity(4);
|
||||
for i in 0..LIMIT {
|
||||
let slice = arena.alloc_uninitialized(i);
|
||||
for (j, elem) in slice.iter_mut().enumerate() {
|
||||
ptr::write(elem.as_mut_ptr(), Node(None, j as u32, DropTracker(&drop_counter)));
|
||||
}
|
||||
assert_eq!(drop_counter.get(), 0);
|
||||
}
|
||||
}
|
||||
assert_eq!(drop_counter.get(), (0..LIMIT).fold(0, |a, e| a + e) as u32);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_alloc_extend_with_drop_counter() {
|
||||
let drop_counter = Cell::new(0);
|
||||
{
|
||||
let arena = Arena::with_capacity(2);
|
||||
let iter = (0..100).map(|j| Node(None, j as u32, DropTracker(&drop_counter)));
|
||||
let older_ref = Some(&arena.alloc_extend(iter)[0]);
|
||||
assert_eq!(drop_counter.get(), 0);
|
||||
let iter = (0..100).map(|j| Node(older_ref, j as u32, DropTracker(&drop_counter)));
|
||||
arena.alloc_extend(iter);
|
||||
assert_eq!(drop_counter.get(), 0);
|
||||
}
|
||||
assert_eq!(drop_counter.get(), 200);
|
||||
}
|
||||
|
||||
/// Test with bools.
|
||||
///
|
||||
/// Bools, unlike integers, have invalid bit patterns. Therefore, ever having an uninitialized bool
|
||||
/// is insta-UB. Make sure miri doesn't find any such thing.
|
||||
#[test]
|
||||
fn test_alloc_uninitialized_bools() {
|
||||
const LEN: usize = 20;
|
||||
unsafe {
|
||||
let arena: Arena<bool> = Arena::with_capacity(2);
|
||||
let slice = arena.alloc_uninitialized(LEN);
|
||||
for elem in slice.iter_mut() {
|
||||
ptr::write(elem.as_mut_ptr(), true);
|
||||
}
|
||||
// Now it is fully initialized, we can safely transmute the slice.
|
||||
let slice: &mut [bool] = mem::transmute(slice);
|
||||
assert_eq!(&[true; LEN], slice);
|
||||
}
|
||||
}
|
||||
|
||||
/// Check nothing bad happens by panicking during initialization of borrowed slice.
|
||||
#[test]
|
||||
fn alloc_uninitialized_with_panic() {
|
||||
struct Dropper(bool);
|
||||
|
||||
impl Drop for Dropper {
|
||||
fn drop(&mut self) {
|
||||
// Just make sure we touch the value, to make sure miri would bite if it was
|
||||
// unitialized
|
||||
if self.0 {
|
||||
panic!();
|
||||
}
|
||||
}
|
||||
}
|
||||
let mut reached_first_init = false;
|
||||
panic::catch_unwind(AssertUnwindSafe(|| unsafe {
|
||||
let arena: Arena<Dropper> = Arena::new();
|
||||
arena.reserve_extend(2);
|
||||
let uninitialized = arena.uninitialized_array();
|
||||
assert!((*uninitialized).len() >= 2);
|
||||
ptr::write((*uninitialized)[0].as_mut_ptr(), Dropper(false));
|
||||
reached_first_init = true;
|
||||
panic!("To drop the arena");
|
||||
// If it didn't panic, we would continue by initializing the second one and confirming by
|
||||
// .alloc_uninitialized();
|
||||
})).unwrap_err();
|
||||
assert!(reached_first_init);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_uninitialized_array() {
|
||||
let arena = Arena::with_capacity(2);
|
||||
let uninit = arena.uninitialized_array();
|
||||
arena.alloc_extend(0..2);
|
||||
unsafe {
|
||||
for (&a, b) in (&*uninit).iter().zip(0..2) {
|
||||
assert_eq!(a.assume_init(), b);
|
||||
}
|
||||
assert!((&*arena.uninitialized_array()).as_ptr() != (&*uninit).as_ptr());
|
||||
arena.alloc(0);
|
||||
let uninit = arena.uninitialized_array();
|
||||
assert_eq!((&*uninit).len(), 3);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn dont_trust_the_iterator_size() {
|
||||
use std::iter::repeat;
|
||||
|
||||
struct WrongSizeIter<I>(I);
|
||||
impl<I> Iterator for WrongSizeIter<I>
|
||||
where
|
||||
I: Iterator,
|
||||
{
|
||||
type Item = I::Item;
|
||||
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
self.0.next()
|
||||
}
|
||||
|
||||
fn size_hint(&self) -> (usize, Option<usize>) {
|
||||
(0, Some(0))
|
||||
}
|
||||
}
|
||||
|
||||
impl<I> ExactSizeIterator for WrongSizeIter<I> where I: Iterator {}
|
||||
|
||||
let arena = Arena::with_capacity(2);
|
||||
arena.alloc(0);
|
||||
let slice = arena.alloc_extend(WrongSizeIter(repeat(1).take(1_000)));
|
||||
// Allocation of 1000 elements should have created a new chunk
|
||||
assert_eq!(arena.chunks.borrow().rest.len(), 1);
|
||||
assert_eq!(slice.len(), 1000);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn arena_is_send() {
|
||||
fn assert_is_send<T: Send>(_: T) {}
|
||||
|
||||
// If `T` is `Send`, ...
|
||||
assert_is_send(42_u32);
|
||||
|
||||
// Then `Arena<T>` is also `Send`.
|
||||
let arena: Arena<u32> = Arena::new();
|
||||
assert_is_send(arena);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn iter_mut_low_capacity() {
|
||||
#[derive(Debug, PartialEq, Eq)]
|
||||
struct NonCopy(usize);
|
||||
|
||||
const MAX: usize = 1_000;
|
||||
const CAP: usize = 16;
|
||||
|
||||
let arena = Arena::with_capacity(CAP);
|
||||
for i in 1..MAX {
|
||||
arena.alloc(NonCopy(i));
|
||||
}
|
||||
|
||||
assert!(
|
||||
arena.chunks.borrow().rest.len() > 1,
|
||||
"expected multiple chunks"
|
||||
);
|
||||
|
||||
let mut iter = arena.iter();
|
||||
for i in 1..MAX {
|
||||
assert_eq!(Some(&NonCopy(i)), iter.next());
|
||||
}
|
||||
|
||||
assert_eq!(None, iter.next());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn iter_mut_high_capacity() {
|
||||
#[derive(Debug, PartialEq, Eq)]
|
||||
struct NonCopy(usize);
|
||||
|
||||
const MAX: usize = 1_000;
|
||||
const CAP: usize = 8192;
|
||||
|
||||
let arena = Arena::with_capacity(CAP);
|
||||
for i in 1..MAX {
|
||||
arena.alloc(NonCopy(i));
|
||||
}
|
||||
|
||||
assert!(
|
||||
arena.chunks.borrow().rest.is_empty(),
|
||||
"expected single chunk"
|
||||
);
|
||||
|
||||
let mut iter = arena.iter();
|
||||
for i in 1..MAX {
|
||||
assert_eq!(Some(&NonCopy(i)), iter.next());
|
||||
}
|
||||
|
||||
assert_eq!(None, iter.next());
|
||||
}
|
||||
|
||||
fn assert_size_hint<T>(arena_len: usize, iter: Iter<'_, T>) {
|
||||
let (min, max) = iter.size_hint();
|
||||
|
||||
assert!(max.is_some());
|
||||
let max = max.unwrap();
|
||||
|
||||
// Check that the actual arena length lies between the estimated min and max
|
||||
assert!(min <= arena_len);
|
||||
assert!(max >= arena_len);
|
||||
|
||||
// Check that the min and max estimates are within a factor of 3
|
||||
assert!(min >= arena_len / 3);
|
||||
assert!(max <= arena_len * 3);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn size_hint() {
|
||||
#[derive(Debug, PartialEq, Eq)]
|
||||
struct NonCopy(usize);
|
||||
|
||||
const MAX: usize = 32;
|
||||
const CAP: usize = 0;
|
||||
|
||||
for cap in CAP..(CAP + 16/* check some non-power-of-two capacities */) {
|
||||
let arena = Arena::with_capacity(cap);
|
||||
for i in 1..MAX {
|
||||
arena.alloc(NonCopy(i));
|
||||
let iter = arena.iter();
|
||||
assert_size_hint(i, iter);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[cfg_attr(miri, ignore)]
|
||||
fn size_hint_low_initial_capacities() {
|
||||
#[derive(Debug, PartialEq, Eq)]
|
||||
struct NonCopy(usize);
|
||||
|
||||
const MAX: usize = 25_000;
|
||||
const CAP: usize = 0;
|
||||
|
||||
for cap in CAP..(CAP + 128/* check some non-power-of-two capacities */) {
|
||||
let arena = Arena::with_capacity(cap);
|
||||
for i in 1..MAX {
|
||||
arena.alloc(NonCopy(i));
|
||||
let iter = arena.iter();
|
||||
assert_size_hint(i, iter);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[cfg_attr(miri, ignore)]
|
||||
fn size_hint_high_initial_capacities() {
|
||||
#[derive(Debug, PartialEq, Eq)]
|
||||
struct NonCopy(usize);
|
||||
|
||||
const MAX: usize = 25_000;
|
||||
const CAP: usize = 8164;
|
||||
|
||||
for cap in CAP..(CAP + 128/* check some non-power-of-two capacities */) {
|
||||
let arena = Arena::with_capacity(cap);
|
||||
for i in 1..MAX {
|
||||
arena.alloc(NonCopy(i));
|
||||
let iter = arena.iter();
|
||||
assert_size_hint(i, iter);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[cfg_attr(miri, ignore)]
|
||||
fn size_hint_many_items() {
|
||||
#[derive(Debug, PartialEq, Eq)]
|
||||
struct NonCopy(usize);
|
||||
|
||||
const MAX: usize = 5_000_000;
|
||||
const CAP: usize = 16;
|
||||
|
||||
let arena = Arena::with_capacity(CAP);
|
||||
for i in 1..MAX {
|
||||
arena.alloc(NonCopy(i));
|
||||
let iter = arena.iter();
|
||||
assert_size_hint(i, iter);
|
||||
}
|
||||
}
|
|
@ -1 +0,0 @@
|
|||
{"files":{".github/workflows/coverage.yml":"90aaa068c16cb778b24badaff78baf2a313637780a723be09596abde0f4c827a",".github/workflows/rust.yml":"905954be896d052ced621eedb9d5b9d35795490f27071ac1147e75ac3b3711ec","CHANGES.md":"5f54e553a1c4ef21c5be6109b25df9d1d63c4547627723fe044c73dbddf0db2f","Cargo.toml":"c4f220ebc481f7b1db1909f32c5e95a94f665b40943713f084547d9df2f8c29c","LICENSE":"ae48df11a335dc1a615f4f938b69cba73bcf4485c4f97af49b38efb0f216353b","README.md":"45f9e20ee7a50ca4b4b55918976b3218667d63ebc3075952f8b0ea1d6a6d22f8","examples/draw.rs":"52fee9e2f2c11e1c891b30cb460be2a0ec65974f38dc0c08fd48391caf1e4247","examples/obj-output.rs":"6fc549022aa715eee74ea1cafb89ca33189e9dbe914ea6b2c46160049bda68f3","examples/simple.rs":"99fb566414cbd4a0eb69a2774c9780d7cd17e5cdaa14837b280fba319c053f22","notes":"48e636c646d697e213b3a79e31063e11b6ffc7493592d31f3929b1db495870b8","src/aacoverage.rs":"1f9ebe8db75bd80b6b347e3734b3c5bdb35c6fa984e142271bfcc0c286e0cb45","src/aarasterizer.rs":"d02939c0de5ad1c134543e0e91e89f3d86f6ff718c52a6f40df4db5fb1c4a714","src/bezier.rs":"f089ab04e30077ce4e0fe59dfa602948b989aa53d51ad207fbc30c1edd24086b","src/c_bindings.rs":"e3eadbdb83a0ef3d7a364b4607d1e0469cf97b823d69b4eee578ffec59980315","src/fix.rs":"7ccf63db5bab4ab0135d92691f7c2272a27866b9792dd55ec98b2d1c1b7c0358","src/geometry_sink.rs":"9025569f77f475a1e47fd470e8f53dcdf88ef57e3a5b8a51268fff892da8b1a7","src/helpers.rs":"220294dac335943518f249c4a27ad803f8226ed62cd780f517e95be6343a1f2f","src/hwrasterizer.rs":"55d7771b0f2537bb0ba2cce4006786582759bca4958cb8008822aa4947ac5404","src/hwvertexbuffer.rs":"51f884dda5f91e30a70fb6486673967b216dee295521539d1a4806fcdbf4cf94","src/lib.rs":"bc496e7d4e6827198997a5c5a099000405ac590daece785ca94b4a31e2351c55","src/matrix.rs":"1ac44bc5d073f96ab64b1b5c6077fd0d47fe61db8243bd9a55fc91d8eae1dd92","src/notes":"d50d49e0b5660bc6350d8055f25f26700c937558de0af690e1fc4f50ed7e05c9","src/nullable_ref.rs":"789fe0e59b7d4a925faecbf2362be93643ea8382b4424ca0e60866f9bf83c3cd","src/real.rs":"47809b1ae8cbc9c6e25165a2bce508bd8d27deb78e3f283460c6490114dbe703","src/tri_rasterize.rs":"30821a3465cea3c5ac578590013b530c03ea3010225f580d6cf609e39910c412","src/types.rs":"43a20f23a798858c6be64c1faf42ee9e392cbab323f851653993fcb0d78cdb5e"},"package":null}
|
|
@ -1,23 +0,0 @@
|
|||
name: Coverage
|
||||
|
||||
on: [pull_request, push]
|
||||
|
||||
jobs:
|
||||
coverage:
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
CARGO_TERM_COLOR: always
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Install Rust
|
||||
run: rustup toolchain install stable --component llvm-tools-preview
|
||||
- name: Install cargo-llvm-cov
|
||||
uses: taiki-e/install-action@cargo-llvm-cov
|
||||
- name: Generate code coverage
|
||||
run: cargo llvm-cov --all-features --workspace --lcov --output-path lcov.info
|
||||
- name: Upload coverage to Codecov
|
||||
uses: codecov/codecov-action@v3
|
||||
with:
|
||||
token: ${{ secrets.CODECOV_TOKEN }} # not required for public repos
|
||||
files: lcov.info
|
||||
fail_ci_if_error: true
|
|
@ -1,39 +0,0 @@
|
|||
name: Rust
|
||||
|
||||
on:
|
||||
push:
|
||||
pull_request:
|
||||
|
||||
env:
|
||||
CARGO_TERM_COLOR: always
|
||||
|
||||
jobs:
|
||||
build:
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Build
|
||||
run: cargo build --verbose
|
||||
- name: Run tests
|
||||
run: cargo test --verbose
|
||||
|
||||
aarch64:
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Install toolchain
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: stable
|
||||
override: true
|
||||
target: aarch64-unknown-linux-gnu
|
||||
|
||||
- name: Install cross
|
||||
run: cargo install cross
|
||||
|
||||
- name: Run tests with Neon
|
||||
run: cross test --target aarch64-unknown-linux-gnu
|
|
@ -1,22 +0,0 @@
|
|||
Changes for Safety
|
||||
------------------
|
||||
|
||||
`CEdgeStore` is replaced by `typed_arena_nomut::Arena<CEdge>`.
|
||||
|
||||
`CEdgeStore` is an arena with built-in stack storage for the first allocation
|
||||
of the arena. It exposes the allocated buffers to support very fast allocation,
|
||||
and supports fast enumeration by returning pointers to each allocation.
|
||||
|
||||
`CCoverageBuffer` also now uses a `typed_arena_nomut::Arena<CEdge>` but uses it
|
||||
to allocate `CCoverageIntervalBuffer`'s. We currently lack support for
|
||||
the builtin stack storage. Storing these in an Arena is not ideal, we'd rather
|
||||
just heap allocate them individually.
|
||||
|
||||
|
||||
Changes for performance
|
||||
-----------------------
|
||||
|
||||
Switched from using triangle strips to triangle lists. This lets
|
||||
us use a single triangle to draw each line segement which reduces
|
||||
the amount of geometry per line segment from 6 vertices to 3.
|
||||
Direct2D also made this switch in later versions.
|
|
@ -1,21 +0,0 @@
|
|||
[package]
|
||||
name = "wpf-gpu-raster"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
license = "MIT"
|
||||
|
||||
[profile.release]
|
||||
debug = true
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[dependencies]
|
||||
typed-arena-nomut = "0.1.0"
|
||||
|
||||
[dev-dependencies]
|
||||
usvg = "0.4"
|
||||
euclid = "0.22.6"
|
||||
png = "0.17.2"
|
||||
|
||||
[features]
|
||||
default = ["c_bindings"]
|
||||
c_bindings = []
|
|
@ -1,23 +0,0 @@
|
|||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) .NET Foundation and Contributors
|
||||
|
||||
All rights reserved.
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
|
@ -1,20 +0,0 @@
|
|||
This is a port of the WPF hardware rasterizer code to Rust. That
|
||||
rasterizer is predecessor to the Direct2D rasterizer. Direct2D still
|
||||
uses a similar technique when run on hardware that does not support
|
||||
Target Independent Rasterization.
|
||||
|
||||
Design
|
||||
======
|
||||
|
||||
Bezier flattening is done using an approach that uses forward differencing
|
||||
of the error metric to compute a flattened version that would match a traditional
|
||||
adaptive recursive flattening.
|
||||
|
||||
The general algorithm used for rasterization is a vertical sweep of
|
||||
the shape that maintains an active edge list. The sweep is done
|
||||
at a sub-scanline resolution and results in either:
|
||||
1. Sub-scanlines being combined in the coverage buffer and output
|
||||
as "complex scans". These are emitted as lines constructed out
|
||||
of triangle strips.
|
||||
2. Simple trapezoids being recognized in the active edge list
|
||||
and output using a faster simple trapezoid path.
|
|
@ -1,354 +0,0 @@
|
|||
/* The rasterization code here is based off of piglit/tests/general/triangle-rasterization.cpp:
|
||||
|
||||
/**************************************************************************
|
||||
*
|
||||
* Copyright 2012 VMware, Inc.
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
|
||||
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
|
||||
* IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
|
||||
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
|
||||
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
|
||||
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
**************************************************************************/
|
||||
|
||||
*/
|
||||
|
||||
#![allow(non_camel_case_types)]
|
||||
#![allow(non_snake_case)]
|
||||
use euclid::{default::Transform2D, point2};
|
||||
use wpf_gpu_raster::{PathBuilder};
|
||||
|
||||
|
||||
use std::ops::Index;
|
||||
|
||||
|
||||
const WIDTH: u32 = 800;
|
||||
const HEIGHT: u32 = 800;
|
||||
|
||||
|
||||
fn over(src: u32, dst: u32) -> u32 {
|
||||
let a = src >> 24;
|
||||
let a = 255 - a;
|
||||
let mask = 0xff00ff;
|
||||
let t = (dst & mask) * a + 0x800080;
|
||||
let mut rb = (t + ((t >> 8) & mask)) >> 8;
|
||||
rb &= mask;
|
||||
|
||||
rb += src & mask;
|
||||
|
||||
// saturate
|
||||
rb |= 0x1000100 - ((rb >> 8) & mask);
|
||||
rb &= mask;
|
||||
|
||||
let t = ((dst >> 8) & mask) * a + 0x800080;
|
||||
let mut ag = (t + ((t >> 8) & mask)) >> 8;
|
||||
ag &= mask;
|
||||
ag += (src >> 8) & mask;
|
||||
|
||||
// saturate
|
||||
ag |= 0x1000100 - ((ag >> 8) & mask);
|
||||
ag &= mask;
|
||||
|
||||
(ag << 8) + rb
|
||||
}
|
||||
|
||||
pub fn alpha_mul(x: u32, a: u32) -> u32 {
|
||||
let mask = 0xFF00FF;
|
||||
|
||||
let src_rb = ((x & mask) * a) >> 8;
|
||||
let src_ag = ((x >> 8) & mask) * a;
|
||||
|
||||
(src_rb & mask) | (src_ag & !mask)
|
||||
}
|
||||
|
||||
fn write_image(data: &[u32], path: &str) {
|
||||
use std::path::Path;
|
||||
use std::fs::File;
|
||||
use std::io::BufWriter;
|
||||
|
||||
let mut png_data: Vec<u8> = vec![0; (WIDTH * HEIGHT * 3) as usize];
|
||||
let mut i = 0;
|
||||
for pixel in data {
|
||||
png_data[i] = ((pixel >> 16) & 0xff) as u8;
|
||||
png_data[i + 1] = ((pixel >> 8) & 0xff) as u8;
|
||||
png_data[i + 2] = ((pixel >> 0) & 0xff) as u8;
|
||||
i += 3;
|
||||
}
|
||||
|
||||
|
||||
let path = Path::new(path);
|
||||
let file = File::create(path).unwrap();
|
||||
let w = &mut BufWriter::new(file);
|
||||
|
||||
let mut encoder = png::Encoder::new(w, WIDTH, HEIGHT); // Width is 2 pixels and height is 1.
|
||||
encoder.set_color(png::ColorType::Rgb);
|
||||
encoder.set_depth(png::BitDepth::Eight);
|
||||
let mut writer = encoder.write_header().unwrap();
|
||||
|
||||
writer.write_image_data(&png_data).unwrap(); // Save
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
struct Vertex {
|
||||
x: f32,
|
||||
y: f32,
|
||||
coverage: f32
|
||||
}
|
||||
#[derive(Debug)]
|
||||
struct Triangle {
|
||||
v: [Vertex; 3],
|
||||
}
|
||||
|
||||
impl Index<usize> for Triangle {
|
||||
type Output = Vertex;
|
||||
|
||||
fn index(&self, index: usize) -> &Self::Output {
|
||||
&self.v[index]
|
||||
}
|
||||
}
|
||||
|
||||
// D3D11 mandates 8 bit subpixel precision:
|
||||
// https://microsoft.github.io/DirectX-Specs/d3d/archive/D3D11_3_FunctionalSpec.htm#CoordinateSnapping
|
||||
const FIXED_SHIFT: i32 = 8;
|
||||
const FIXED_ONE: f32 = (1 << FIXED_SHIFT) as f32;
|
||||
|
||||
/* Proper rounding of float to integer */
|
||||
fn iround(mut v: f32) -> i64 {
|
||||
if v > 0.0 {
|
||||
v += 0.5;
|
||||
}
|
||||
if v < 0.0 {
|
||||
v -= 0.5;
|
||||
}
|
||||
return v as i64
|
||||
}
|
||||
|
||||
/* Based on http://devmaster.net/forums/topic/1145-advanced-rasterization */
|
||||
fn rast_triangle(buffer: &mut [u32], stride: usize, tri: &Triangle, color: u32) {
|
||||
let center_offset = -0.5;
|
||||
|
||||
let mut coverage1 = tri[0].coverage;
|
||||
let mut coverage2 = tri[1].coverage;
|
||||
let mut coverage3 = tri[2].coverage;
|
||||
|
||||
/* fixed point coordinates */
|
||||
let mut x1 = iround(FIXED_ONE * (tri[0].x + center_offset));
|
||||
let x2 = iround(FIXED_ONE * (tri[1].x + center_offset));
|
||||
let mut x3 = iround(FIXED_ONE * (tri[2].x + center_offset));
|
||||
|
||||
let mut y1 = iround(FIXED_ONE * (tri[0].y + center_offset));
|
||||
let y2 = iround(FIXED_ONE * (tri[1].y + center_offset));
|
||||
let mut y3 = iround(FIXED_ONE * (tri[2].y + center_offset));
|
||||
|
||||
|
||||
/* Force correct vertex order */
|
||||
let cross = (x2 - x1) * (y3 - y2) - (y2 - y1) * (x3 - x2);
|
||||
if cross > 0 {
|
||||
std::mem::swap(&mut x1, &mut x3);
|
||||
std::mem::swap(&mut y1, &mut y3);
|
||||
// I don't understand why coverage 2 and 3 are swapped instead of 1 and 3
|
||||
std::mem::swap(&mut coverage2, &mut coverage3);
|
||||
} else {
|
||||
std::mem::swap(&mut coverage1, &mut coverage3);
|
||||
}
|
||||
|
||||
/* Deltas */
|
||||
let dx12 = x1 - x2;
|
||||
let dx23 = x2 - x3;
|
||||
let dx31 = x3 - x1;
|
||||
|
||||
let dy12 = y1 - y2;
|
||||
let dy23 = y2 - y3;
|
||||
let dy31 = y3 - y1;
|
||||
|
||||
/* Fixed-point deltas */
|
||||
let fdx12 = dx12 << FIXED_SHIFT;
|
||||
let fdx23 = dx23 << FIXED_SHIFT;
|
||||
let fdx31 = dx31 << FIXED_SHIFT;
|
||||
|
||||
let fdy12 = dy12 << FIXED_SHIFT;
|
||||
let fdy23 = dy23 << FIXED_SHIFT;
|
||||
let fdy31 = dy31 << FIXED_SHIFT;
|
||||
|
||||
/* Bounding rectangle */
|
||||
let mut minx = x1.min(x2).min(x3) >> FIXED_SHIFT;
|
||||
let mut maxx = x1.max(x2).max(x3) >> FIXED_SHIFT;
|
||||
|
||||
let mut miny = y1.min(y2).min(y3) >> FIXED_SHIFT;
|
||||
let mut maxy = y1.max(y2).max(y3) >> FIXED_SHIFT;
|
||||
|
||||
minx = minx.max(0);
|
||||
maxx = maxx.min(WIDTH as i64 - 1);
|
||||
|
||||
miny = miny.max(0);
|
||||
maxy = maxy.min(HEIGHT as i64 - 1);
|
||||
|
||||
/* Half-edge constants */
|
||||
let mut c1 = dy12 * x1 - dx12 * y1;
|
||||
let mut c2 = dy23 * x2 - dx23 * y2;
|
||||
let mut c3 = dy31 * x3 - dx31 * y3;
|
||||
|
||||
/* Correct for top-left filling convention */
|
||||
if dy12 < 0 || (dy12 == 0 && dx12 < 0) { c1 += 1 }
|
||||
if dy23 < 0 || (dy23 == 0 && dx23 < 0) { c2 += 1 }
|
||||
if dy31 < 0 || (dy31 == 0 && dx31 < 0) { c3 += 1 }
|
||||
|
||||
let mut cy1 = c1 + dx12 * (miny << FIXED_SHIFT) - dy12 * (minx << FIXED_SHIFT);
|
||||
let mut cy2 = c2 + dx23 * (miny << FIXED_SHIFT) - dy23 * (minx << FIXED_SHIFT);
|
||||
let mut cy3 = c3 + dx31 * (miny << FIXED_SHIFT) - dy31 * (minx << FIXED_SHIFT);
|
||||
|
||||
/* Perform rasterization */
|
||||
let mut buffer = &mut buffer[miny as usize * stride..];
|
||||
for _y in miny..=maxy {
|
||||
let mut cx1 = cy1;
|
||||
let mut cx2 = cy2;
|
||||
let mut cx3 = cy3;
|
||||
|
||||
for x in minx..=maxx {
|
||||
if cx1 > 0 && cx2 > 0 && cx3 > 0 {
|
||||
// cross is equal to 2*area of the triangle.
|
||||
// we can normalize cx by 2*area to get barycentric coords.
|
||||
let area = cross.abs() as f32;
|
||||
let bary = (cx1 as f32 / area, cx2 as f32/ area, cx3 as f32 / area);
|
||||
|
||||
let coverages = coverage1 * bary.0 + coverage2 * bary.1 + coverage3 * bary.2;
|
||||
|
||||
let color = alpha_mul(color, (coverages * 256. + 0.5) as u32);
|
||||
buffer[x as usize] = over(color, buffer[x as usize]);
|
||||
}
|
||||
|
||||
cx1 -= fdy12;
|
||||
cx2 -= fdy23;
|
||||
cx3 -= fdy31;
|
||||
}
|
||||
|
||||
cy1 += fdx12;
|
||||
cy2 += fdx23;
|
||||
cy3 += fdx31;
|
||||
|
||||
buffer = &mut buffer[stride..];
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
fn main() {
|
||||
let opt = usvg::Options::default();
|
||||
|
||||
let rtree = usvg::Tree::from_file("tiger.svg", &opt).unwrap();
|
||||
|
||||
let mut image = vec![0; (WIDTH * HEIGHT) as usize];
|
||||
for _ in 0..1 {
|
||||
let mut total_vertex_count = 0;
|
||||
let mut total_time = std::time::Duration::default();
|
||||
for node in rtree.root().descendants() {
|
||||
use usvg::NodeExt;
|
||||
let t = node.transform();
|
||||
let transform = Transform2D::new(
|
||||
t.a as f32, t.b as f32,
|
||||
t.c as f32, t.d as f32,
|
||||
t.e as f32, t.f as f32,
|
||||
);
|
||||
|
||||
|
||||
let s = 1.;
|
||||
if let usvg::NodeKind::Path(ref usvg_path) = *node.borrow() {
|
||||
let color = match usvg_path.fill {
|
||||
Some(ref fill) => {
|
||||
match fill.paint {
|
||||
usvg::Paint::Color(c) => 0xff000000 | (c.red as u32) << 16 | (c.green as u32) << 8 | c.blue as u32,
|
||||
_ => 0xff00ff00,
|
||||
}
|
||||
}
|
||||
None => {
|
||||
continue;
|
||||
}
|
||||
};
|
||||
let mut builder = PathBuilder::new();
|
||||
//dbg!(&usvg_path.segments);
|
||||
for segment in &usvg_path.segments {
|
||||
match *segment {
|
||||
usvg::PathSegment::MoveTo { x, y } => {
|
||||
let p = transform.transform_point(point2(x as f32, y as f32)) * s;
|
||||
builder.move_to(p.x, p.y);
|
||||
}
|
||||
usvg::PathSegment::LineTo { x, y } => {
|
||||
let p = transform.transform_point(point2(x as f32, y as f32)) * s;
|
||||
builder.line_to(p.x, p.y);
|
||||
}
|
||||
usvg::PathSegment::CurveTo { x1, y1, x2, y2, x, y, } => {
|
||||
let c1 = transform.transform_point(point2(x1 as f32, y1 as f32)) * s;
|
||||
let c2 = transform.transform_point(point2(x2 as f32, y2 as f32)) * s;
|
||||
let p = transform.transform_point(point2(x as f32, y as f32)) * s;
|
||||
builder.curve_to(
|
||||
c1.x, c1.y,
|
||||
c2.x, c2.y,
|
||||
p.x, p.y,
|
||||
);
|
||||
}
|
||||
usvg::PathSegment::ClosePath => {
|
||||
builder.close();
|
||||
}
|
||||
}
|
||||
}
|
||||
let start = std::time::Instant::now();
|
||||
let result = builder.rasterize_to_tri_list(0, 0, WIDTH as i32, HEIGHT as i32);
|
||||
let end = std::time::Instant::now();
|
||||
total_time += end - start;
|
||||
|
||||
println!("vertices {}", result.len());
|
||||
total_vertex_count += result.len();
|
||||
if result.len() == 0 {
|
||||
continue;
|
||||
}
|
||||
|
||||
for n in (0..result.len()).step_by(3) {
|
||||
let vertices = {
|
||||
[&result[n], &result[n+1], &result[n+2]]
|
||||
};
|
||||
|
||||
let src = color;
|
||||
let tri = Triangle { v: [
|
||||
Vertex { x: vertices[0].x, y: vertices[0].y, coverage: vertices[0].coverage},
|
||||
Vertex { x: vertices[1].x, y: vertices[1].y, coverage: vertices[1].coverage},
|
||||
Vertex { x: vertices[2].x, y: vertices[2].y, coverage: vertices[2].coverage}
|
||||
]
|
||||
};
|
||||
rast_triangle(&mut image, WIDTH as usize, &tri, src);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
println!("total vertex count {}, took {}ms", total_vertex_count, total_time.as_secs_f32()*1000.);
|
||||
}
|
||||
|
||||
|
||||
write_image(&image, "out.png");
|
||||
use std::{hash::{Hash, Hasher}, collections::hash_map::DefaultHasher};
|
||||
use crate::*;
|
||||
fn calculate_hash<T: Hash>(t: &T) -> u64 {
|
||||
let mut s = DefaultHasher::new();
|
||||
t.hash(&mut s);
|
||||
s.finish()
|
||||
}
|
||||
|
||||
assert_eq!(calculate_hash(&image),
|
||||
if cfg!(debug_assertions) { 0x5973c52a1c0232f3 } else { 0xf15821a5bebc5ecf});
|
||||
|
||||
|
||||
}
|
|
@ -1,26 +0,0 @@
|
|||
// Output an .obj file of the generated mesh. Viewable at https://3dviewer.net/
|
||||
|
||||
fn output_obj_file(data: &[OutputVertex]) {
|
||||
for v in data {
|
||||
let color = v.coverage;
|
||||
println!("v {} {} {} {} {} {}", v.x, v.y, 0., color, color, color);
|
||||
}
|
||||
|
||||
// output a standard triangle strip face list
|
||||
for n in (1..data.len()-1).step_by(3) {
|
||||
println!("f {} {} {}", n, n+1, n+2);
|
||||
}
|
||||
}
|
||||
|
||||
use wpf_gpu_raster::{PathBuilder, OutputVertex};
|
||||
fn main() {
|
||||
let mut p = PathBuilder::new();
|
||||
p.move_to(10., 10.0);
|
||||
p.line_to(30., 10.);
|
||||
p.line_to(50., 20.);
|
||||
p.line_to(30., 30.);
|
||||
p.line_to(10., 30.);
|
||||
p.close();
|
||||
let result = p.rasterize_to_tri_list(0, 0, 100, 100);
|
||||
output_obj_file(&result)
|
||||
}
|
|
@ -1,11 +0,0 @@
|
|||
use wpf_gpu_raster::PathBuilder;
|
||||
fn main() {
|
||||
let mut p = PathBuilder::new();
|
||||
p.move_to(10., 10.);
|
||||
p.line_to(10., 30.);
|
||||
p.line_to(30., 30.);
|
||||
p.line_to(30., 10.);
|
||||
p.close();
|
||||
let _result = p.rasterize_to_tri_list(0, 0, 100, 100);
|
||||
//dbg!(result);
|
||||
}
|
|
@ -1,8 +0,0 @@
|
|||
bezier flattening
|
||||
-----------------
|
||||
if we make sure we flatten beziers to integer y values we can avoid having to hit
|
||||
the slow complex coverage path
|
||||
|
||||
We can probably do this by using a skia style flattener.
|
||||
Normally we compute a series of line segments using partial differencing.
|
||||
I think we can adjust the line towards an integer y value by having small partial differences that we can move by.
|
|
@ -1,629 +0,0 @@
|
|||
// Licensed to the .NET Foundation under one or more agreements.
|
||||
// The .NET Foundation licenses this file to you under the MIT license.
|
||||
// See the LICENSE file in the project root for more information.
|
||||
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
//
|
||||
|
||||
use std::cell::Cell;
|
||||
|
||||
use typed_arena_nomut::Arena;
|
||||
|
||||
//
|
||||
// Description:
|
||||
// Coverage buffer implementation
|
||||
#[cfg(debug_assertions)]
|
||||
use crate::aarasterizer::AssertActiveList;
|
||||
use crate::aarasterizer::CEdge;
|
||||
use crate::nullable_ref::Ref;
|
||||
use crate::types::*;
|
||||
//struct CEdge;
|
||||
//struct CInactiveEdge;
|
||||
|
||||
//-------------------------------------------------------------------------
|
||||
//
|
||||
// TrapezoidalAA only supports 8x8 mode, so the shifts/masks are all
|
||||
// constants. Also, since we must be symmetrical, x and y shifts are
|
||||
// merged into one shift unlike the implementation in aarasterizer.
|
||||
//
|
||||
//-------------------------------------------------------------------------
|
||||
|
||||
pub const c_nShift: INT = 3;
|
||||
pub const c_nShiftSize: INT = 8;
|
||||
pub const c_nShiftSizeSquared: INT = c_nShiftSize * c_nShiftSize;
|
||||
pub const c_nHalfShiftSize: INT = 4;
|
||||
pub const c_nShiftMask: INT = 7;
|
||||
//pub const c_rShiftSize: f32 = 8.0;
|
||||
//pub const c_rHalfShiftSize: f32 = 4.0;
|
||||
pub const c_rInvShiftSize: f32 = 1.0/8.0;
|
||||
pub const c_antiAliasMode: MilAntiAliasMode = MilAntiAliasMode::EightByEight;
|
||||
|
||||
//
|
||||
// Interval coverage descriptor for our antialiased filler
|
||||
//
|
||||
|
||||
pub struct CCoverageInterval<'a>
|
||||
{
|
||||
pub m_pNext: Cell<Ref<'a, CCoverageInterval<'a>>>, // m_pNext interval (look for sentinel, not NULL)
|
||||
pub m_nPixelX: Cell<INT>, // Interval's left edge (m_pNext->X is the right edge)
|
||||
pub m_nCoverage: Cell<INT>, // Pixel coverage for interval
|
||||
}
|
||||
|
||||
impl<'a> Default for CCoverageInterval<'a> {
|
||||
fn default() -> Self {
|
||||
Self { m_pNext: Cell::new(unsafe { Ref::null() } ), m_nPixelX: Default::default(), m_nCoverage: Default::default() }
|
||||
}
|
||||
}
|
||||
|
||||
// Define our on-stack storage use. The 'free' versions are nicely tuned
|
||||
// to avoid allocations in most common scenarios, while at the same time
|
||||
// not chewing up toooo much stack space.
|
||||
//
|
||||
// We make the debug versions small so that we hit the 'grow' cases more
|
||||
// frequently, for better testing:
|
||||
|
||||
#[cfg(debug_assertions)]
|
||||
// Must be at least 6 now: 4 for the "minus4" logic in hwrasterizer.*, and then
|
||||
// 1 each for the head and tail sentinels (since their allocation doesn't use Grow).
|
||||
const INTERVAL_BUFFER_NUMBER: usize = 8;
|
||||
#[cfg(not(debug_assertions))]
|
||||
const INTERVAL_BUFFER_NUMBER: usize = 32;
|
||||
|
||||
|
||||
//
|
||||
// Allocator structure for the antialiased fill interval data
|
||||
//
|
||||
|
||||
struct CCoverageIntervalBuffer<'a>
|
||||
{
|
||||
m_pNext: Cell<Option<& 'a CCoverageIntervalBuffer<'a>>>,
|
||||
m_interval: [CCoverageInterval<'a>; INTERVAL_BUFFER_NUMBER],
|
||||
}
|
||||
|
||||
impl<'a> Default for CCoverageIntervalBuffer<'a> {
|
||||
fn default() -> Self {
|
||||
Self { m_pNext: Cell::new(None), m_interval: Default::default() }
|
||||
}
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
//
|
||||
// Class: CCoverageBuffer
|
||||
//
|
||||
// Description:
|
||||
// Coverage buffer implementation that maintains coverage information
|
||||
// for one scanline.
|
||||
//
|
||||
// This implementation will maintain a linked list of intervals consisting
|
||||
// of x value in pixel space and a coverage value that applies for all pixels
|
||||
// between pInterval->X and pInterval->Next->X.
|
||||
//
|
||||
// For example, if we add the following interval (assuming 8x8 anti-aliasing)
|
||||
// to the coverage buffer:
|
||||
// _____ _____ _____ _____
|
||||
// | | | | |
|
||||
// | ------------------- |
|
||||
// |_____|_____|_____|_____|
|
||||
// (0,0) (1,0) (2,0) (3,0) (4,0)
|
||||
//
|
||||
// Then we will get the following coverage buffer:
|
||||
//
|
||||
// m_nPixelX: INT_MIN | 0 | 1 | 3 | 4 | INT_MAX
|
||||
// m_nCoverage: 0 | 4 | 8 | 4 | 0 | 0xdeadbeef
|
||||
// m_pNext: -------->|---->|---->|---->|---->| NULL
|
||||
//
|
||||
//------------------------------------------------------------------------------
|
||||
pub struct CCoverageBuffer<'a>
|
||||
{
|
||||
/*
|
||||
public:
|
||||
//
|
||||
// Init/Destroy methods
|
||||
//
|
||||
|
||||
VOID Initialize();
|
||||
VOID Destroy();
|
||||
|
||||
//
|
||||
// Setup the buffer so that it can accept another scanline
|
||||
//
|
||||
|
||||
VOID Reset();
|
||||
|
||||
//
|
||||
// Add a subpixel interval to the coverage buffer
|
||||
//
|
||||
|
||||
HRESULT FillEdgesAlternating(
|
||||
__in_ecount(1) const CEdge *pEdgeActiveList,
|
||||
INT nSubpixelYCurrent
|
||||
);
|
||||
|
||||
HRESULT FillEdgesWinding(
|
||||
__in_ecount(1) const CEdge *pEdgeActiveList,
|
||||
INT nSubpixelYCurrent
|
||||
);
|
||||
|
||||
HRESULT AddInterval(INT nSubpixelXLeft, INT nSubpixelXRight);
|
||||
|
||||
private:
|
||||
|
||||
HRESULT Grow(
|
||||
__deref_out_ecount(1) CCoverageInterval **ppIntervalNew,
|
||||
__deref_out_ecount(1) CCoverageInterval **ppIntervalEndMinus4
|
||||
);
|
||||
|
||||
public:*/
|
||||
pub m_pIntervalStart: Cell<Ref<'a, CCoverageInterval<'a>>>, // Points to list head entry
|
||||
|
||||
//private:
|
||||
m_pIntervalNew: Cell<Ref<'a, CCoverageInterval<'a>>>,
|
||||
interval_new_index: Cell<usize>,
|
||||
|
||||
// The Minus4 in the below variable refers to the position at which
|
||||
// we need to Grow the buffer. The buffer is grown once before an
|
||||
// AddInterval, so the Grow has to ensure that there are enough
|
||||
// intervals for the AddInterval worst case which is the following:
|
||||
//
|
||||
// 1 2 3 4
|
||||
// *_____*_____ _____*_____*
|
||||
// | | | | |
|
||||
// | ---|-----------|--- |
|
||||
// |_____|_____|_____|_____|
|
||||
//
|
||||
// Note that the *'s above mark potentional insert points in the list,
|
||||
// so we need to ensure that at least 4 intervals can be allocated.
|
||||
//
|
||||
|
||||
m_pIntervalEndMinus4: Cell<Ref<'a, CCoverageInterval<'a>>>,
|
||||
|
||||
m_pIntervalBufferBuiltin: CCoverageIntervalBuffer<'a>,
|
||||
m_pIntervalBufferCurrent: Cell<Ref<'a, CCoverageIntervalBuffer<'a>>>,
|
||||
|
||||
arena: Arena<CCoverageIntervalBuffer<'a>>
|
||||
|
||||
// Disable instrumentation checks within all methods of this class
|
||||
//SET_MILINSTRUMENTATION_FLAGS(MILINSTRUMENTATIONFLAGS_DONOTHING);
|
||||
}
|
||||
|
||||
impl<'a> Default for CCoverageBuffer<'a> {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
m_pIntervalStart: Cell::new(unsafe { Ref::null() }),
|
||||
m_pIntervalNew: Cell::new(unsafe { Ref::null() }),
|
||||
m_pIntervalEndMinus4: Cell::new(unsafe { Ref::null() }),
|
||||
m_pIntervalBufferBuiltin: Default::default(),
|
||||
m_pIntervalBufferCurrent: unsafe { Cell::new(Ref::null()) },
|
||||
arena: Arena::new(),
|
||||
interval_new_index: Cell::new(0),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
//
|
||||
// Inlines
|
||||
//
|
||||
impl<'a> CCoverageBuffer<'a> {
|
||||
//-------------------------------------------------------------------------
|
||||
//
|
||||
// Function: CCoverageBuffer::AddInterval
|
||||
//
|
||||
// Synopsis: Add a subpixel resolution interval to the coverage buffer
|
||||
//
|
||||
//-------------------------------------------------------------------------
|
||||
pub fn AddInterval(&'a self, nSubpixelXLeft: INT, nSubpixelXRight: INT) -> HRESULT
|
||||
{
|
||||
let hr: HRESULT = S_OK;
|
||||
let mut nPixelXNext: INT;
|
||||
let nPixelXLeft: INT;
|
||||
let nPixelXRight: INT;
|
||||
let nCoverageLeft: INT; // coverage from right edge of pixel for interval start
|
||||
let nCoverageRight: INT; // coverage from left edge of pixel for interval end
|
||||
|
||||
let mut pInterval = self.m_pIntervalStart.get();
|
||||
let mut pIntervalNew = self.m_pIntervalNew.get();
|
||||
let mut interval_new_index = self.interval_new_index.get();
|
||||
let mut pIntervalEndMinus4 = self.m_pIntervalEndMinus4.get();
|
||||
|
||||
// Make sure we have enough room to add two intervals if
|
||||
// necessary:
|
||||
|
||||
if (pIntervalNew >= pIntervalEndMinus4)
|
||||
{
|
||||
IFC!(self.Grow(&mut pIntervalNew, &mut pIntervalEndMinus4, &mut interval_new_index));
|
||||
}
|
||||
|
||||
// Convert interval to pixel space so that we can insert it
|
||||
// into the coverage buffer
|
||||
|
||||
debug_assert!(nSubpixelXLeft < nSubpixelXRight);
|
||||
nPixelXLeft = nSubpixelXLeft >> c_nShift;
|
||||
nPixelXRight = nSubpixelXRight >> c_nShift;
|
||||
|
||||
// Skip any intervals less than 'nPixelLeft':
|
||||
|
||||
loop {
|
||||
nPixelXNext = pInterval.m_pNext.get().m_nPixelX.get();
|
||||
if !(nPixelXNext < nPixelXLeft) { break }
|
||||
|
||||
pInterval = pInterval.m_pNext.get();
|
||||
}
|
||||
|
||||
// Insert a new interval if necessary:
|
||||
|
||||
if (nPixelXNext != nPixelXLeft)
|
||||
{
|
||||
pIntervalNew.m_nPixelX.set(nPixelXLeft);
|
||||
pIntervalNew.m_nCoverage.set(pInterval.m_nCoverage.get());
|
||||
|
||||
pIntervalNew.m_pNext.set(pInterval.m_pNext.get());
|
||||
pInterval.m_pNext.set(pIntervalNew);
|
||||
|
||||
pInterval = pIntervalNew;
|
||||
|
||||
interval_new_index += 1;
|
||||
pIntervalNew = Ref::new(&Ref::get_ref(self.m_pIntervalBufferCurrent.get()).m_interval[interval_new_index])
|
||||
|
||||
}
|
||||
else
|
||||
{
|
||||
pInterval = (*pInterval).m_pNext.get();
|
||||
}
|
||||
|
||||
//
|
||||
// Compute coverage for left segment as shown by the *'s below
|
||||
//
|
||||
// |_____|_____|_____|_
|
||||
// | | | |
|
||||
// | ***---------- |
|
||||
// |_____|_____|_____|
|
||||
//
|
||||
|
||||
nCoverageLeft = c_nShiftSize - (nSubpixelXLeft & c_nShiftMask);
|
||||
|
||||
// If nCoverageLeft == 0, then the value of nPixelXLeft is wrong
|
||||
// and should have been equal to nPixelXLeft+1.
|
||||
debug_assert!(nCoverageLeft > 0);
|
||||
|
||||
// If we have partial coverage, then ensure that we have a position
|
||||
// for the end of the pixel
|
||||
|
||||
if ((nCoverageLeft < c_nShiftSize || (nPixelXLeft == nPixelXRight))
|
||||
&& nPixelXLeft + 1 != pInterval.m_pNext.get().m_nPixelX.get())
|
||||
{
|
||||
pIntervalNew.m_nPixelX.set(nPixelXLeft + 1);
|
||||
pIntervalNew.m_nCoverage.set(pInterval.m_nCoverage.get());
|
||||
|
||||
pIntervalNew.m_pNext.set(pInterval.m_pNext.get());
|
||||
pInterval.m_pNext.set(pIntervalNew);
|
||||
|
||||
interval_new_index += 1;
|
||||
pIntervalNew = Ref::new(&Ref::get_ref(self.m_pIntervalBufferCurrent.get()).m_interval[interval_new_index])
|
||||
}
|
||||
|
||||
//
|
||||
// If the interval only includes one pixel, then the coverage is
|
||||
// nSubpixelXRight - nSubpixelXLeft
|
||||
//
|
||||
|
||||
if (nPixelXLeft == nPixelXRight)
|
||||
{
|
||||
pInterval.m_nCoverage.set(pInterval.m_nCoverage.get() + nSubpixelXRight - nSubpixelXLeft);
|
||||
debug_assert!(pInterval.m_nCoverage.get() <= c_nShiftSize*c_nShiftSize);
|
||||
//goto Cleanup;
|
||||
|
||||
//Cleanup:
|
||||
// Update the coverage buffer new interval
|
||||
self.interval_new_index.set(interval_new_index);
|
||||
self.m_pIntervalNew.set(pIntervalNew);
|
||||
return hr;
|
||||
}
|
||||
|
||||
// Update coverage of current interval
|
||||
pInterval.m_nCoverage.set(pInterval.m_nCoverage.get() + nCoverageLeft);
|
||||
debug_assert!(pInterval.m_nCoverage.get() <= c_nShiftSize*c_nShiftSize);
|
||||
|
||||
// Increase the coverage for any intervals between 'nPixelXLeft'
|
||||
// and 'nPixelXRight':
|
||||
|
||||
loop {
|
||||
(nPixelXNext = pInterval.m_pNext.get().m_nPixelX.get());
|
||||
|
||||
if !(nPixelXNext < nPixelXRight) {
|
||||
break;
|
||||
}
|
||||
pInterval = pInterval.m_pNext.get();
|
||||
pInterval.m_nCoverage.set(pInterval.m_nCoverage.get() + c_nShiftSize);
|
||||
debug_assert!(pInterval.m_nCoverage.get() <= c_nShiftSize*c_nShiftSize);
|
||||
}
|
||||
|
||||
// Insert another new interval if necessary:
|
||||
|
||||
if (nPixelXNext != nPixelXRight)
|
||||
{
|
||||
pIntervalNew.m_nPixelX.set(nPixelXRight);
|
||||
pIntervalNew.m_nCoverage.set(pInterval.m_nCoverage.get() - c_nShiftSize);
|
||||
|
||||
pIntervalNew.m_pNext.set(pInterval.m_pNext.get());
|
||||
pInterval.m_pNext.set(pIntervalNew);
|
||||
|
||||
pInterval = pIntervalNew;
|
||||
|
||||
interval_new_index += 1;
|
||||
pIntervalNew = Ref::new(&Ref::get_ref(self.m_pIntervalBufferCurrent.get()).m_interval[interval_new_index])
|
||||
}
|
||||
else
|
||||
{
|
||||
pInterval = pInterval.m_pNext.get();
|
||||
}
|
||||
|
||||
//
|
||||
// Compute coverage for right segment as shown by the *'s below
|
||||
//
|
||||
// |_____|_____|_____|_
|
||||
// | | | |
|
||||
// | ---------**** |
|
||||
// |_____|_____|_____|
|
||||
//
|
||||
|
||||
nCoverageRight = nSubpixelXRight & c_nShiftMask;
|
||||
if (nCoverageRight > 0)
|
||||
{
|
||||
if (nPixelXRight + 1 != (*(*pInterval).m_pNext.get()).m_nPixelX.get())
|
||||
{
|
||||
pIntervalNew.m_nPixelX.set(nPixelXRight + 1);
|
||||
pIntervalNew.m_nCoverage.set(pInterval.m_nCoverage.get());
|
||||
|
||||
pIntervalNew.m_pNext.set(pInterval.m_pNext.get());
|
||||
pInterval.m_pNext.set(pIntervalNew);
|
||||
|
||||
interval_new_index += 1;
|
||||
pIntervalNew = Ref::new(&Ref::get_ref(self.m_pIntervalBufferCurrent.get()).m_interval[interval_new_index])
|
||||
}
|
||||
|
||||
pInterval.m_nCoverage.set((*pInterval).m_nCoverage.get() + nCoverageRight);
|
||||
debug_assert!(pInterval.m_nCoverage.get() <= c_nShiftSize*c_nShiftSize);
|
||||
}
|
||||
|
||||
//Cleanup:
|
||||
// Update the coverage buffer new interval
|
||||
self.interval_new_index.set(interval_new_index);
|
||||
self.m_pIntervalNew.set(pIntervalNew);
|
||||
|
||||
|
||||
return hr;
|
||||
}
|
||||
|
||||
|
||||
//-------------------------------------------------------------------------
|
||||
//
|
||||
// Function: CCoverageBuffer::FillEdgesAlternating
|
||||
//
|
||||
// Synopsis:
|
||||
// Given the active edge list for the current scan, do an alternate-mode
|
||||
// antialiased fill.
|
||||
//
|
||||
//-------------------------------------------------------------------------
|
||||
pub fn FillEdgesAlternating(&'a self,
|
||||
pEdgeActiveList: Ref<CEdge>,
|
||||
nSubpixelYCurrent: INT
|
||||
) -> HRESULT
|
||||
{
|
||||
|
||||
let hr: HRESULT = S_OK;
|
||||
let mut pEdgeStart: Ref<CEdge> = (*pEdgeActiveList).Next.get();
|
||||
let mut pEdgeEnd: Ref<CEdge>;
|
||||
let mut nSubpixelXLeft: INT;
|
||||
let mut nSubpixelXRight: INT;
|
||||
|
||||
ASSERTACTIVELIST!(pEdgeActiveList, nSubpixelYCurrent);
|
||||
|
||||
while (pEdgeStart.X.get() != INT::MAX)
|
||||
{
|
||||
pEdgeEnd = pEdgeStart.Next.get();
|
||||
|
||||
// We skip empty pairs:
|
||||
(nSubpixelXLeft = pEdgeStart.X.get());
|
||||
if (nSubpixelXLeft != pEdgeEnd.X.get())
|
||||
{
|
||||
// We now know we have a non-empty interval. Skip any
|
||||
// empty interior pairs:
|
||||
|
||||
while ({(nSubpixelXRight = pEdgeEnd.X.get()); pEdgeEnd.X == pEdgeEnd.Next.get().X})
|
||||
{
|
||||
pEdgeEnd = pEdgeEnd.Next.get().Next.get();
|
||||
}
|
||||
|
||||
debug_assert!((nSubpixelXLeft < nSubpixelXRight) && (nSubpixelXRight < INT::MAX));
|
||||
|
||||
IFC!(self.AddInterval(nSubpixelXLeft, nSubpixelXRight));
|
||||
}
|
||||
|
||||
// Prepare for the next iteration:
|
||||
pEdgeStart = pEdgeEnd.Next.get();
|
||||
}
|
||||
|
||||
//Cleanup:
|
||||
return hr
|
||||
|
||||
}
|
||||
|
||||
//-------------------------------------------------------------------------
|
||||
//
|
||||
// Function: CCoverageBuffer::FillEdgesWinding
|
||||
//
|
||||
// Synopsis:
|
||||
// Given the active edge list for the current scan, do an alternate-mode
|
||||
// antialiased fill.
|
||||
//
|
||||
//-------------------------------------------------------------------------
|
||||
pub fn FillEdgesWinding(&'a self,
|
||||
pEdgeActiveList: Ref<CEdge>,
|
||||
nSubpixelYCurrent: INT
|
||||
) -> HRESULT
|
||||
{
|
||||
|
||||
let hr: HRESULT = S_OK;
|
||||
let mut pEdgeStart: Ref<CEdge> = pEdgeActiveList.Next.get();
|
||||
let mut pEdgeEnd: Ref<CEdge>;
|
||||
let mut nSubpixelXLeft: INT;
|
||||
let mut nSubpixelXRight: INT;
|
||||
let mut nWindingValue: INT;
|
||||
|
||||
ASSERTACTIVELIST!(pEdgeActiveList, nSubpixelYCurrent);
|
||||
|
||||
while (pEdgeStart.X.get() != INT::MAX)
|
||||
{
|
||||
pEdgeEnd = pEdgeStart.Next.get();
|
||||
|
||||
nWindingValue = pEdgeStart.WindingDirection;
|
||||
while ({nWindingValue += pEdgeEnd.WindingDirection; nWindingValue != 0})
|
||||
{
|
||||
pEdgeEnd = pEdgeEnd.Next.get();
|
||||
}
|
||||
|
||||
debug_assert!(pEdgeEnd.X.get() != INT::MAX);
|
||||
|
||||
// We skip empty pairs:
|
||||
|
||||
if ({nSubpixelXLeft = pEdgeStart.X.get(); nSubpixelXLeft != pEdgeEnd.X.get()})
|
||||
{
|
||||
// We now know we have a non-empty interval. Skip any
|
||||
// empty interior pairs:
|
||||
|
||||
while ({nSubpixelXRight = pEdgeEnd.X.get(); nSubpixelXRight == pEdgeEnd.Next.get().X.get()})
|
||||
{
|
||||
pEdgeStart = pEdgeEnd.Next.get();
|
||||
pEdgeEnd = pEdgeStart.Next.get();
|
||||
|
||||
nWindingValue = pEdgeStart.WindingDirection;
|
||||
while ({nWindingValue += pEdgeEnd.WindingDirection; nWindingValue != 0})
|
||||
{
|
||||
pEdgeEnd = pEdgeEnd.Next.get();
|
||||
}
|
||||
}
|
||||
|
||||
debug_assert!((nSubpixelXLeft < nSubpixelXRight) && (nSubpixelXRight < INT::MAX));
|
||||
|
||||
IFC!(self.AddInterval(nSubpixelXLeft, nSubpixelXRight));
|
||||
}
|
||||
|
||||
// Prepare for the next iteration:
|
||||
|
||||
pEdgeStart = pEdgeEnd.Next.get();
|
||||
}
|
||||
|
||||
//Cleanup:
|
||||
return hr;//RRETURN(hr);
|
||||
}
|
||||
|
||||
//-------------------------------------------------------------------------
|
||||
//
|
||||
// Function: CCoverageBuffer::Initialize
|
||||
//
|
||||
// Synopsis: Set the coverage buffer to a valid initial state
|
||||
//
|
||||
//-------------------------------------------------------------------------
|
||||
pub fn Initialize(&'a self)
|
||||
{
|
||||
self.m_pIntervalBufferBuiltin.m_interval[0].m_nPixelX.set(INT::MIN);
|
||||
self.m_pIntervalBufferBuiltin.m_interval[0].m_nCoverage.set(0);
|
||||
self.m_pIntervalBufferBuiltin.m_interval[0].m_pNext.set(Ref::new(&self.m_pIntervalBufferBuiltin.m_interval[1]));
|
||||
|
||||
self.m_pIntervalBufferBuiltin.m_interval[1].m_nPixelX.set(INT::MAX);
|
||||
self.m_pIntervalBufferBuiltin.m_interval[1].m_nCoverage.set(0xdeadbeef);
|
||||
self.m_pIntervalBufferBuiltin.m_interval[1].m_pNext.set(unsafe { Ref::null() });
|
||||
|
||||
self.m_pIntervalBufferBuiltin.m_pNext.set(None);
|
||||
self.m_pIntervalBufferCurrent.set(Ref::new(&self.m_pIntervalBufferBuiltin));
|
||||
|
||||
self.m_pIntervalStart.set(Ref::new(&self.m_pIntervalBufferBuiltin.m_interval[0]));
|
||||
self.m_pIntervalNew.set(Ref::new(&self.m_pIntervalBufferBuiltin.m_interval[2]));
|
||||
self.interval_new_index.set(2);
|
||||
self.m_pIntervalEndMinus4.set(Ref::new(&self.m_pIntervalBufferBuiltin.m_interval[INTERVAL_BUFFER_NUMBER - 4]));
|
||||
}
|
||||
|
||||
//-------------------------------------------------------------------------
|
||||
//
|
||||
// Function: CCoverageBuffer::Destroy
|
||||
//
|
||||
// Synopsis: Free all allocated buffers
|
||||
//
|
||||
//-------------------------------------------------------------------------
|
||||
pub fn Destroy(&mut self)
|
||||
{
|
||||
// Free the linked-list of allocations (skipping 'm_pIntervalBufferBuiltin',
|
||||
// which is built into the class):
|
||||
|
||||
|
||||
}
|
||||
|
||||
|
||||
//-------------------------------------------------------------------------
|
||||
//
|
||||
// Function: CCoverageBuffer::Reset
|
||||
//
|
||||
// Synopsis: Reset the coverage buffer
|
||||
//
|
||||
//-------------------------------------------------------------------------
|
||||
pub fn Reset(&'a self)
|
||||
{
|
||||
// Reset our coverage structure. Point the head back to the tail,
|
||||
// and reset where the next new entry will be placed:
|
||||
|
||||
self.m_pIntervalBufferBuiltin.m_interval[0].m_pNext.set(Ref::new(&self.m_pIntervalBufferBuiltin.m_interval[1]));
|
||||
|
||||
self.m_pIntervalBufferCurrent.set(Ref::new(&self.m_pIntervalBufferBuiltin));
|
||||
self.m_pIntervalNew.set(Ref::new(&self.m_pIntervalBufferBuiltin.m_interval[2]));
|
||||
self.interval_new_index.set(2);
|
||||
self.m_pIntervalEndMinus4.set(Ref::new(&self.m_pIntervalBufferBuiltin.m_interval[INTERVAL_BUFFER_NUMBER - 4]));
|
||||
}
|
||||
|
||||
//-------------------------------------------------------------------------
|
||||
//
|
||||
// Function: CCoverageBuffer::Grow
|
||||
//
|
||||
// Synopsis:
|
||||
// Grow our interval buffer.
|
||||
//
|
||||
//-------------------------------------------------------------------------
|
||||
fn Grow(&'a self,
|
||||
ppIntervalNew: &mut Ref<'a, CCoverageInterval<'a>>,
|
||||
ppIntervalEndMinus4: &mut Ref<'a, CCoverageInterval<'a>>,
|
||||
interval_new_index: &mut usize
|
||||
) -> HRESULT
|
||||
{
|
||||
let hr: HRESULT = S_OK;
|
||||
let pIntervalBufferNew = (*self.m_pIntervalBufferCurrent.get()).m_pNext.get();
|
||||
|
||||
let pIntervalBufferNew = pIntervalBufferNew.unwrap_or_else(||
|
||||
{
|
||||
let pIntervalBufferNew = self.arena.alloc(Default::default());
|
||||
|
||||
(*pIntervalBufferNew).m_pNext.set(None);
|
||||
(*self.m_pIntervalBufferCurrent.get()).m_pNext.set(Some(pIntervalBufferNew));
|
||||
pIntervalBufferNew
|
||||
});
|
||||
|
||||
self.m_pIntervalBufferCurrent.set(Ref::new(pIntervalBufferNew));
|
||||
|
||||
self.m_pIntervalNew.set(Ref::new(&(*pIntervalBufferNew).m_interval[2]));
|
||||
self.interval_new_index.set(2);
|
||||
self.m_pIntervalEndMinus4.set(Ref::new(&(*pIntervalBufferNew).m_interval[INTERVAL_BUFFER_NUMBER - 4]));
|
||||
|
||||
*ppIntervalNew = self.m_pIntervalNew.get();
|
||||
*ppIntervalEndMinus4 = self.m_pIntervalEndMinus4.get();
|
||||
*interval_new_index = 2;
|
||||
|
||||
return hr;
|
||||
}
|
||||
|
||||
}
|
||||
/*
|
||||
impl<'a> Drop for CCoverageBuffer<'a> {
|
||||
fn drop(&mut self) {
|
||||
self.Destroy();
|
||||
}
|
||||
}*/
|
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
|
@ -1,990 +0,0 @@
|
|||
// Licensed to the .NET Foundation under one or more agreements.
|
||||
// The .NET Foundation licenses this file to you under the MIT license.
|
||||
// See the LICENSE file in the project root for more information.
|
||||
|
||||
//+-----------------------------------------------------------------------------
|
||||
//
|
||||
// class Bezier32
|
||||
//
|
||||
// Bezier cracker.
|
||||
//
|
||||
// A hybrid cubic Bezier curve flattener based on KirkO's error factor.
|
||||
// Generates line segments fast without using the stack. Used to flatten a
|
||||
// path.
|
||||
//
|
||||
// For an understanding of the methods used, see:
|
||||
//
|
||||
// Kirk Olynyk, "..."
|
||||
// Goossen and Olynyk, "System and Method of Hybrid Forward
|
||||
// Differencing to Render Bezier Splines"
|
||||
// Lien, Shantz and Vaughan Pratt, "Adaptive Forward Differencing for
|
||||
// Rendering Curves and Surfaces", Computer Graphics, July 1987
|
||||
// Chang and Shantz, "Rendering Trimmed NURBS with Adaptive Forward
|
||||
// Differencing", Computer Graphics, August 1988
|
||||
// Foley and Van Dam, "Fundamentals of Interactive Computer Graphics"
|
||||
//
|
||||
// Public Interface:
|
||||
// bInit(pptfx) - pptfx points to 4 control points of
|
||||
// Bezier. Current point is set to the first
|
||||
// point after the start-point.
|
||||
// Bezier32(pptfx) - Constructor with initialization.
|
||||
// vGetCurrent(pptfx) - Returns current polyline point.
|
||||
// bCurrentIsEndPoint() - TRUE if current point is end-point.
|
||||
// vNext() - Moves to next polyline point.
|
||||
//
|
||||
|
||||
|
||||
#![allow(unused_parens)]
|
||||
#![allow(non_upper_case_globals)]
|
||||
//+-----------------------------------------------------------------------------
|
||||
//
|
||||
|
||||
//
|
||||
// $TAG ENGR
|
||||
|
||||
// $Module: win_mil_graphics_geometry
|
||||
// $Keywords:
|
||||
//
|
||||
// $Description:
|
||||
// Class for flattening a bezier.
|
||||
//
|
||||
// $ENDTAG
|
||||
//
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
// First conversion from original 28.4 to 18.14 format
|
||||
const HFD32_INITIAL_SHIFT: i32 = 10;
|
||||
|
||||
// Second conversion to 15.17 format
|
||||
const HFD32_ADDITIONAL_SHIFT: i32 = 3;
|
||||
|
||||
|
||||
// BEZIER_FLATTEN_GDI_COMPATIBLE:
|
||||
//
|
||||
// Don't turn on this switch without testing carefully. It's more for
|
||||
// documentation's sake - to show the values that GDI used - for an error
|
||||
// tolerance of 2/3.
|
||||
|
||||
// It turns out that 2/3 produces very noticable artifacts on antialiased lines,
|
||||
// so we want to use 1/4 instead.
|
||||
/*
|
||||
#ifdef BEZIER_FLATTEN_GDI_COMPATIBLE
|
||||
|
||||
// Flatten to an error of 2/3. During initial phase, use 18.14 format.
|
||||
|
||||
#define TEST_MAGNITUDE_INITIAL (6 * 0x00002aa0L)
|
||||
|
||||
// Error of 2/3. During normal phase, use 15.17 format.
|
||||
|
||||
#define TEST_MAGNITUDE_NORMAL (TEST_MAGNITUDE_INITIAL << 3)
|
||||
|
||||
#else
|
||||
*/
|
||||
use crate::types::*;
|
||||
/*
|
||||
// Flatten to an error of 1/4. During initial phase, use 18.14 format.
|
||||
|
||||
const TEST_MAGNITUDE_INITIAL: i32 = (6 * 0x00001000);
|
||||
|
||||
// Error of 1/4. During normal phase, use 15.17 format.
|
||||
|
||||
const TEST_MAGNITUDE_NORMAL: i32 = (TEST_MAGNITUDE_INITIAL << 3);
|
||||
*/
|
||||
|
||||
// I have modified the constants for HFD32 as part of fixing accuracy errors
|
||||
// (Bug 816015). Something similar could be done for the 64 bit hfd, but it ain't
|
||||
// broke so I'd rather not fix it.
|
||||
|
||||
// The shift to the steady state 15.17 format
|
||||
const HFD32_SHIFT: LONG = HFD32_INITIAL_SHIFT + HFD32_ADDITIONAL_SHIFT;
|
||||
|
||||
// Added to output numbers before rounding back to original representation
|
||||
const HFD32_ROUND: LONG = 1 << (HFD32_SHIFT - 1);
|
||||
|
||||
// The error is tested on max(|e2|, |e3|), which represent 6 times the actual error.
|
||||
// The flattening tolerance is hard coded to 1/4 in the original geometry space,
|
||||
// which translates to 4 in 28.4 format. So 6 times that is:
|
||||
|
||||
const HFD32_TOLERANCE: LONGLONG = 24;
|
||||
|
||||
// During the initial phase, while working in 18.14 format
|
||||
const HFD32_INITIAL_TEST_MAGNITUDE: LONGLONG = HFD32_TOLERANCE << HFD32_INITIAL_SHIFT;
|
||||
|
||||
// During the steady state, while working in 15.17 format
|
||||
const HFD32_TEST_MAGNITUDE: LONGLONG = HFD32_INITIAL_TEST_MAGNITUDE << HFD32_ADDITIONAL_SHIFT;
|
||||
|
||||
// We will stop halving the segment with basis e1, e2, e3, e4 when max(|e2|, |e3|)
|
||||
// is less than HFD32_TOLERANCE. The operation e2 = (e2 + e3) >> 3 in vHalveStepSize() may
|
||||
// eat up 3 bits of accuracy. HfdBasis32 starts off with a pad of HFD32_SHIFT zeros, so
|
||||
// we can stay exact up to HFD32_SHIFT/3 subdivisions. Since every subdivision is guaranteed
|
||||
// to shift max(|e2|, |e3|) at least by 2, we will subdivide no more than n times if the
|
||||
// initial max(|e2|, |e3|) is less than than HFD32_TOLERANCE << 2n. But if the initial
|
||||
// max(|e2|, |e3|) is greater than HFD32_TOLERANCE >> (HFD32_SHIFT / 3) then we may not be
|
||||
// able to flatten with the 32 bit hfd, so we need to resort to the 64 bit hfd.
|
||||
|
||||
const HFD32_MAX_ERROR: INT = (HFD32_TOLERANCE as i32) << ((2 * HFD32_INITIAL_SHIFT) / 3);
|
||||
|
||||
// The maximum size of coefficients that can be handled by HfdBasis32.
|
||||
const HFD32_MAX_SIZE: LONGLONG = 0xffffc000;
|
||||
|
||||
// Michka 9/12/03: I found this number in the the body of the code witout any explanation.
|
||||
// My analysis suggests that we could get away with larger numbers, but if I'm wrong we
|
||||
// could be in big trouble, so let us stay conservative.
|
||||
//
|
||||
// In bInit() we subtract Min(Bezier coeffients) from the original coefficients, so after
|
||||
// that 0 <= coefficients <= Bound, and the test will be Bound < HFD32_MAX_SIZE. When
|
||||
// switching to the HFD basis in bInit():
|
||||
// * e0 is the first Bezier coeffient, so abs(e0) <= Bound.
|
||||
// * e1 is a difference of non-negative coefficients so abs(e1) <= Bound.
|
||||
// * e2 and e3 can be written as 12*(p - (q + r)/2) where p,q and r are coefficients.
|
||||
// 0 <=(q + r)/2 <= Bound, so abs(p - (q + r)/2) <= 2*Bound, hence
|
||||
// abs(e2), abs(e3) <= 12*Bound.
|
||||
//
|
||||
// During vLazyHalveStepSize we add e2 + e3, resulting in absolute value <= 24*Bound.
|
||||
// Initially HfdBasis32 shifts the numbers by HFD32_INITIAL_SHIFT, so we need to handle
|
||||
// 24*bounds*(2^HFD32_SHIFT), and that needs to be less than 2^31. So the bounds need to
|
||||
// be less than 2^(31-HFD32_INITIAL_SHIFT)/24).
|
||||
//
|
||||
// For speed, the algorithm uses & rather than < for comparison. To facilitate that we
|
||||
// replace 24 by 32=2^5, and then the binary representation of the number is of the form
|
||||
// 0...010...0 with HFD32_SHIFT+5 trailing zeros. By subtracting that from 2^32 = 0xffffffff+1
|
||||
// we get a number that is 1..110...0 with the same number of trailing zeros, and that can be
|
||||
// used with an & for comparison. So the number should be:
|
||||
//
|
||||
// 0xffffffffL - (1L << (31 - HFD32_INITIAL_SHIFT - 5)) + 1 = (1L << 16) + 1 = 0xffff0000
|
||||
//
|
||||
// For the current values of HFD32_INITIAL_SHIFT=10 and HFD32_ADDITIONAL_SHIFT=3, the steady
|
||||
// state doesn't pose additional requirements, as shown below.
|
||||
//
|
||||
// For some reason the current code uses 0xfffc0000 = (1L << 14) + 1.
|
||||
//
|
||||
// Here is why the steady state doesn't pose additional requirements:
|
||||
//
|
||||
// In vSteadyState we multiply e0 and e1 by 8, so the requirement is Bounds*2^13 < 2^31,
|
||||
// or Bounds < 2^18, less stringent than the above.
|
||||
//
|
||||
// In vLazyHalveStepSize we cut the error down by subdivision, making abs(e2) and abs(e3)
|
||||
// less than HFD32_TEST_MAGNITUDE = 24*2^13, well below 2^31.
|
||||
//
|
||||
// During all the steady-state operations - vTakeStep, vHalveStepSize and vDoubleStepSize,
|
||||
// e0 is on the curve and e1 is a difference of 2 points on the curve, so
|
||||
// abs(e0), abs(e1) < Bounds * 2^13, which requires Bound < 2^(31-13) = 2^18. e2 and e3
|
||||
// are errors, kept below 6*HFD32_TEST_MAGNITUDE = 216*2^13. Details:
|
||||
//
|
||||
// In vTakeStep e2 = 2e2 - e3 keeps abs(e2) < 3*HFD32_TEST_MAGNITUDE = 72*2^13,
|
||||
// well below 2^31
|
||||
//
|
||||
// In vHalveStepSize we add e2 + e3 when their absolute is < 3*HFD32_TEST_MAGNITUDE (because
|
||||
// this comes after a step), so that keeps the result below 6*HFD32_TEST_MAGNITUDE = 216*2^13.
|
||||
//
|
||||
// In vDoubleStepSize we know that abs(e2), abs(e3) < HFD32_TEST_MAGNITUDE/4, otherwise we
|
||||
// would not have doubled the step.
|
||||
|
||||
#[derive(Default)]
|
||||
struct HfdBasis32
|
||||
{
|
||||
e0: LONG,
|
||||
e1: LONG,
|
||||
e2: LONG,
|
||||
e3: LONG,
|
||||
}
|
||||
|
||||
impl HfdBasis32 {
|
||||
fn lParentErrorDividedBy4(&self) -> LONG {
|
||||
self.e3.abs().max((self.e2 + self.e2 - self.e3).abs())
|
||||
}
|
||||
|
||||
fn lError(&self) -> LONG
|
||||
{
|
||||
self.e2.abs().max(self.e3.abs())
|
||||
}
|
||||
|
||||
fn fxValue(&self) -> INT
|
||||
{
|
||||
return((self.e0 + HFD32_ROUND) >> HFD32_SHIFT);
|
||||
}
|
||||
|
||||
fn bInit(&mut self, p1: INT, p2: INT, p3: INT, p4: INT) -> bool
|
||||
{
|
||||
// Change basis and convert from 28.4 to 18.14 format:
|
||||
|
||||
self.e0 = (p1 ) << HFD32_INITIAL_SHIFT;
|
||||
self.e1 = (p4 - p1 ) << HFD32_INITIAL_SHIFT;
|
||||
|
||||
self.e2 = 6 * (p2 - p3 - p3 + p4);
|
||||
self.e3 = 6 * (p1 - p2 - p2 + p3);
|
||||
|
||||
if (self.lError() >= HFD32_MAX_ERROR)
|
||||
{
|
||||
// Large error, will require too many subdivision for this 32 bit hfd
|
||||
return false;
|
||||
}
|
||||
|
||||
self.e2 <<= HFD32_INITIAL_SHIFT;
|
||||
self.e3 <<= HFD32_INITIAL_SHIFT;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
fn vLazyHalveStepSize(&mut self, cShift: LONG)
|
||||
{
|
||||
self.e2 = self.ExactShiftRight(self.e2 + self.e3, 1);
|
||||
self.e1 = self.ExactShiftRight(self.e1 - self.ExactShiftRight(self.e2, cShift), 1);
|
||||
}
|
||||
|
||||
fn vSteadyState(&mut self, cShift: LONG)
|
||||
{
|
||||
// We now convert from 18.14 fixed format to 15.17:
|
||||
|
||||
self.e0 <<= HFD32_ADDITIONAL_SHIFT;
|
||||
self.e1 <<= HFD32_ADDITIONAL_SHIFT;
|
||||
|
||||
let mut lShift = cShift - HFD32_ADDITIONAL_SHIFT;
|
||||
|
||||
if (lShift < 0)
|
||||
{
|
||||
lShift = -lShift;
|
||||
self.e2 <<= lShift;
|
||||
self.e3 <<= lShift;
|
||||
}
|
||||
else
|
||||
{
|
||||
self.e2 >>= lShift;
|
||||
self.e3 >>= lShift;
|
||||
}
|
||||
}
|
||||
|
||||
fn vHalveStepSize(&mut self)
|
||||
{
|
||||
self.e2 = self.ExactShiftRight(self.e2 + self.e3, 3);
|
||||
self.e1 = self.ExactShiftRight(self.e1 - self.e2, 1);
|
||||
self.e3 = self.ExactShiftRight(self.e3, 2);
|
||||
}
|
||||
|
||||
fn vDoubleStepSize(&mut self)
|
||||
{
|
||||
self.e1 += self.e1 + self.e2;
|
||||
self.e3 <<= 2;
|
||||
self.e2 = (self.e2 << 3) - self.e3;
|
||||
}
|
||||
|
||||
fn vTakeStep(&mut self)
|
||||
{
|
||||
self.e0 += self.e1;
|
||||
let lTemp = self.e2;
|
||||
self.e1 += lTemp;
|
||||
self.e2 += lTemp - self.e3;
|
||||
self.e3 = lTemp;
|
||||
}
|
||||
|
||||
fn ExactShiftRight(&self, num: i32, shift: i32) -> i32
|
||||
{
|
||||
// Performs a shift to the right while asserting that we're not
|
||||
// losing significant bits
|
||||
|
||||
assert!(num == (num >> shift) << shift);
|
||||
return num >> shift;
|
||||
}
|
||||
}
|
||||
|
||||
fn vBoundBox(
|
||||
aptfx: &[POINT; 4]) -> RECT
|
||||
{
|
||||
let mut left = aptfx[0].x;
|
||||
let mut right = aptfx[0].x;
|
||||
let mut top = aptfx[0].y;
|
||||
let mut bottom = aptfx[0].y;
|
||||
|
||||
for i in 1..4
|
||||
{
|
||||
left = left.min(aptfx[i].x);
|
||||
top = top.min(aptfx[i].y);
|
||||
right = right.max(aptfx[i].x);
|
||||
bottom = bottom.max(aptfx[i].y);
|
||||
}
|
||||
|
||||
// We make the bounds one pixel loose for the nominal width
|
||||
// stroke case, which increases the bounds by half a pixel
|
||||
// in every dimension:
|
||||
|
||||
RECT { left: left - 16, top: top - 16, right: right + 16, bottom: bottom + 16}
|
||||
}
|
||||
|
||||
|
||||
|
||||
fn bIntersect(
|
||||
a: &RECT,
|
||||
b: &RECT) -> bool
|
||||
{
|
||||
return((a.left < b.right) &&
|
||||
(a.top < b.bottom) &&
|
||||
(a.right > b.left) &&
|
||||
(a.bottom > b.top));
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct Bezier32
|
||||
{
|
||||
cSteps: LONG,
|
||||
x: HfdBasis32,
|
||||
y: HfdBasis32,
|
||||
rcfxBound: RECT
|
||||
}
|
||||
impl Bezier32 {
|
||||
|
||||
fn bInit(&mut self,
|
||||
aptfxBez: &[POINT; 4],
|
||||
// Pointer to 4 control points
|
||||
prcfxClip: Option<&RECT>) -> bool
|
||||
// Bound box of visible region (optional)
|
||||
{
|
||||
let mut aptfx;
|
||||
let mut cShift = 0; // Keeps track of 'lazy' shifts
|
||||
|
||||
self.cSteps = 1; // Number of steps to do before reach end of curve
|
||||
|
||||
self.rcfxBound = vBoundBox(aptfxBez);
|
||||
|
||||
aptfx = aptfxBez.clone();
|
||||
|
||||
{
|
||||
let mut fxOr;
|
||||
let mut fxOffset;
|
||||
|
||||
// find out if the coordinates minus the bounding box
|
||||
// exceed 10 bits
|
||||
fxOffset = self.rcfxBound.left;
|
||||
fxOr = {aptfx[0].x -= fxOffset; aptfx[0].x};
|
||||
fxOr |= {aptfx[1].x -= fxOffset; aptfx[1].x};
|
||||
fxOr |= {aptfx[2].x -= fxOffset; aptfx[2].x};
|
||||
fxOr |= {aptfx[3].x -= fxOffset; aptfx[3].x};
|
||||
|
||||
fxOffset = self.rcfxBound.top;
|
||||
fxOr |= {aptfx[0].y -= fxOffset; aptfx[0].y};
|
||||
fxOr |= {aptfx[1].y -= fxOffset; aptfx[1].y};
|
||||
fxOr |= {aptfx[2].y -= fxOffset; aptfx[2].y};
|
||||
fxOr |= {aptfx[3].y -= fxOffset; aptfx[3].y};
|
||||
|
||||
// This 32 bit cracker can only handle points in a 10 bit space:
|
||||
|
||||
if ((fxOr as i64 & HFD32_MAX_SIZE) != 0) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
if (!self.x.bInit(aptfx[0].x, aptfx[1].x, aptfx[2].x, aptfx[3].x))
|
||||
{
|
||||
return false;
|
||||
}
|
||||
if (!self.y.bInit(aptfx[0].y, aptfx[1].y, aptfx[2].y, aptfx[3].y))
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
if (match prcfxClip { None => true, Some(clip) => bIntersect(&self.rcfxBound, clip)})
|
||||
{
|
||||
|
||||
loop {
|
||||
let lTestMagnitude = (HFD32_INITIAL_TEST_MAGNITUDE << cShift) as LONG;
|
||||
|
||||
if (self.x.lError() <= lTestMagnitude && self.y.lError() <= lTestMagnitude) {
|
||||
break;
|
||||
}
|
||||
|
||||
cShift += 2;
|
||||
self.x.vLazyHalveStepSize(cShift);
|
||||
self.y.vLazyHalveStepSize(cShift);
|
||||
self.cSteps <<= 1;
|
||||
}
|
||||
}
|
||||
|
||||
self.x.vSteadyState(cShift);
|
||||
self.y.vSteadyState(cShift);
|
||||
|
||||
// Note that this handles the case where the initial error for
|
||||
// the Bezier is already less than HFD32_TEST_MAGNITUDE:
|
||||
|
||||
self.x.vTakeStep();
|
||||
self.y.vTakeStep();
|
||||
self.cSteps-=1;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
fn cFlatten(&mut self,
|
||||
mut pptfx: &mut [POINT],
|
||||
pbMore: &mut bool) -> i32
|
||||
{
|
||||
let mut cptfx = pptfx.len();
|
||||
assert!(cptfx > 0);
|
||||
|
||||
let cptfxOriginal = cptfx;
|
||||
|
||||
while {
|
||||
// Return current point:
|
||||
|
||||
pptfx[0].x = self.x.fxValue() + self.rcfxBound.left;
|
||||
pptfx[0].y = self.y.fxValue() + self.rcfxBound.top;
|
||||
pptfx = &mut pptfx[1..];
|
||||
|
||||
// If cSteps == 0, that was the end point in the curve!
|
||||
|
||||
if (self.cSteps == 0)
|
||||
{
|
||||
*pbMore = false;
|
||||
|
||||
// '+1' because we haven't decremented 'cptfx' yet:
|
||||
|
||||
return(cptfxOriginal - cptfx + 1) as i32;
|
||||
}
|
||||
|
||||
// Okay, we have to step:
|
||||
|
||||
if (self.x.lError().max(self.y.lError()) > HFD32_TEST_MAGNITUDE as LONG)
|
||||
{
|
||||
self.x.vHalveStepSize();
|
||||
self.y.vHalveStepSize();
|
||||
self.cSteps <<= 1;
|
||||
}
|
||||
|
||||
// We are here after vTakeStep. Before that the error max(|e2|,|e3|) was less
|
||||
// than HFD32_TEST_MAGNITUDE. vTakeStep changed e2 to 2e2-e3. Since
|
||||
// |2e2-e3| < max(|e2|,|e3|) << 2 and vHalveStepSize is guaranteed to reduce
|
||||
// max(|e2|,|e3|) by >> 2, no more than one subdivision should be required to
|
||||
// bring the new max(|e2|,|e3|) back to within HFD32_TEST_MAGNITUDE, so:
|
||||
assert!(self.x.lError().max(self.y.lError()) <= HFD32_TEST_MAGNITUDE as LONG);
|
||||
|
||||
while (!(self.cSteps & 1 != 0) &&
|
||||
self.x.lParentErrorDividedBy4() <= (HFD32_TEST_MAGNITUDE as LONG >> 2) &&
|
||||
self.y.lParentErrorDividedBy4() <= (HFD32_TEST_MAGNITUDE as LONG >> 2))
|
||||
{
|
||||
self.x.vDoubleStepSize();
|
||||
self.y.vDoubleStepSize();
|
||||
self.cSteps >>= 1;
|
||||
}
|
||||
|
||||
self.cSteps -=1 ;
|
||||
self.x.vTakeStep();
|
||||
self.y.vTakeStep();
|
||||
cptfx -= 1;
|
||||
cptfx != 0
|
||||
} {}
|
||||
|
||||
*pbMore = true;
|
||||
return cptfxOriginal as i32;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////
|
||||
// Bezier64
|
||||
//
|
||||
// All math is done using 64 bit fixed numbers in a 36.28 format.
|
||||
//
|
||||
// All drawing is done in a 31 bit space, then a 31 bit window offset
|
||||
// is applied. In the initial transform where we change to the HFD
|
||||
// basis, e2 and e3 require the most bits precision: e2 = 6(p2 - 2p3 + p4).
|
||||
// This requires an additional 4 bits precision -- hence we require 36 bits
|
||||
// for the integer part, and the remaining 28 bits is given to the fraction.
|
||||
//
|
||||
// In rendering a Bezier, every 'subdivide' requires an extra 3 bits of
|
||||
// fractional precision. In order to be reversible, we can allow no
|
||||
// error to creep in. Since a INT coordinate is 32 bits, and we
|
||||
// require an additional 4 bits as mentioned above, that leaves us
|
||||
// 28 bits fractional precision -- meaning we can do a maximum of
|
||||
// 9 subdivides. Now, the maximum absolute error of a Bezier curve in 27
|
||||
// bit integer space is 2^29 - 1. But 9 subdivides reduces the error by a
|
||||
// guaranteed factor of 2^18, meaning we can subdivide down only to an error
|
||||
// of 2^11 before we overflow, when in fact we want to reduce error to less
|
||||
// than 1.
|
||||
//
|
||||
// So what we do is HFD until we hit an error less than 2^11, reverse our
|
||||
// basis transform to get the four control points of this smaller curve
|
||||
// (rounding in the process to 32 bits), then invoke another copy of HFD
|
||||
// on the reduced Bezier curve. We again have enough precision, but since
|
||||
// its starting error is less than 2^11, we can reduce error to 2^-7 before
|
||||
// overflowing! We'll start a low HFD after every step of the high HFD.
|
||||
////////////////////////////////////////////////////////////////////////////
|
||||
#[derive(Default)]
|
||||
struct HfdBasis64
|
||||
{
|
||||
e0: LONGLONG,
|
||||
e1: LONGLONG,
|
||||
e2: LONGLONG,
|
||||
e3: LONGLONG,
|
||||
}
|
||||
|
||||
impl HfdBasis64 {
|
||||
fn vParentError(&self) -> LONGLONG
|
||||
{
|
||||
(self.e3 << 2).abs().max(((self.e2 << 3) - (self.e3 << 2)).abs())
|
||||
}
|
||||
|
||||
fn vError(&self) -> LONGLONG
|
||||
{
|
||||
self.e2.abs().max(self.e3.abs())
|
||||
}
|
||||
|
||||
fn fxValue(&self) -> INT
|
||||
{
|
||||
// Convert from 36.28 and round:
|
||||
|
||||
let mut eq = self.e0;
|
||||
eq += (1 << (BEZIER64_FRACTION - 1));
|
||||
eq >>= BEZIER64_FRACTION;
|
||||
return eq as LONG as INT;
|
||||
}
|
||||
|
||||
fn vInit(&mut self, p1: INT, p2: INT, p3: INT, p4: INT)
|
||||
{
|
||||
let mut eqTmp;
|
||||
let eqP2 = p2 as LONGLONG;
|
||||
let eqP3 = p3 as LONGLONG;
|
||||
|
||||
// e0 = p1
|
||||
// e1 = p4 - p1
|
||||
// e2 = 6(p2 - 2p3 + p4)
|
||||
// e3 = 6(p1 - 2p2 + p3)
|
||||
|
||||
// Change basis:
|
||||
|
||||
self.e0 = p1 as LONGLONG; // e0 = p1
|
||||
self.e1 = p4 as LONGLONG;
|
||||
self.e2 = eqP2; self.e2 -= eqP3; self.e2 -= eqP3; self.e2 += self.e1; // e2 = p2 - 2*p3 + p4
|
||||
self.e3 = self.e0; self.e3 -= eqP2; self.e3 -= eqP2; self.e3 += eqP3; // e3 = p1 - 2*p2 + p3
|
||||
self.e1 -= self.e0; // e1 = p4 - p1
|
||||
|
||||
// Convert to 36.28 format and multiply e2 and e3 by six:
|
||||
|
||||
self.e0 <<= BEZIER64_FRACTION;
|
||||
self.e1 <<= BEZIER64_FRACTION;
|
||||
eqTmp = self.e2; self.e2 += eqTmp; self.e2 += eqTmp; self.e2 <<= (BEZIER64_FRACTION + 1);
|
||||
eqTmp = self.e3; self.e3 += eqTmp; self.e3 += eqTmp; self.e3 <<= (BEZIER64_FRACTION + 1);
|
||||
}
|
||||
|
||||
fn vUntransform<F: Fn(&mut POINT) -> &mut LONG>(&self,
|
||||
afx: &mut [POINT; 4], field: F)
|
||||
{
|
||||
// Declare some temps to hold our operations, since we can't modify e0..e3.
|
||||
|
||||
let mut eqP0;
|
||||
let mut eqP1;
|
||||
let mut eqP2;
|
||||
let mut eqP3;
|
||||
|
||||
// p0 = e0
|
||||
// p1 = e0 + (6e1 - e2 - 2e3)/18
|
||||
// p2 = e0 + (12e1 - 2e2 - e3)/18
|
||||
// p3 = e0 + e1
|
||||
|
||||
eqP0 = self.e0;
|
||||
|
||||
// NOTE PERF: Convert this to a multiply by 6: [andrewgo]
|
||||
|
||||
eqP2 = self.e1;
|
||||
eqP2 += self.e1;
|
||||
eqP2 += self.e1;
|
||||
eqP1 = eqP2;
|
||||
eqP1 += eqP2; // 6e1
|
||||
eqP1 -= self.e2; // 6e1 - e2
|
||||
eqP2 = eqP1;
|
||||
eqP2 += eqP1; // 12e1 - 2e2
|
||||
eqP2 -= self.e3; // 12e1 - 2e2 - e3
|
||||
eqP1 -= self.e3;
|
||||
eqP1 -= self.e3; // 6e1 - e2 - 2e3
|
||||
|
||||
// NOTE: May just want to approximate these divides! [andrewgo]
|
||||
// Or can do a 64 bit divide by 32 bit to get 32 bits right here.
|
||||
|
||||
eqP1 /= 18;
|
||||
eqP2 /= 18;
|
||||
eqP1 += self.e0;
|
||||
eqP2 += self.e0;
|
||||
|
||||
eqP3 = self.e0;
|
||||
eqP3 += self.e1;
|
||||
|
||||
// Convert from 36.28 format with rounding:
|
||||
|
||||
eqP0 += (1 << (BEZIER64_FRACTION - 1)); eqP0 >>= BEZIER64_FRACTION; *field(&mut afx[0]) = eqP0 as LONG;
|
||||
eqP1 += (1 << (BEZIER64_FRACTION - 1)); eqP1 >>= BEZIER64_FRACTION; *field(&mut afx[1]) = eqP1 as LONG;
|
||||
eqP2 += (1 << (BEZIER64_FRACTION - 1)); eqP2 >>= BEZIER64_FRACTION; *field(&mut afx[2]) = eqP2 as LONG;
|
||||
eqP3 += (1 << (BEZIER64_FRACTION - 1)); eqP3 >>= BEZIER64_FRACTION; *field(&mut afx[3]) = eqP3 as LONG;
|
||||
}
|
||||
|
||||
fn vHalveStepSize(&mut self)
|
||||
{
|
||||
// e2 = (e2 + e3) >> 3
|
||||
// e1 = (e1 - e2) >> 1
|
||||
// e3 >>= 2
|
||||
|
||||
self.e2 += self.e3; self.e2 >>= 3;
|
||||
self.e1 -= self.e2; self.e1 >>= 1;
|
||||
self.e3 >>= 2;
|
||||
}
|
||||
|
||||
fn vDoubleStepSize(&mut self)
|
||||
{
|
||||
// e1 = 2e1 + e2
|
||||
// e3 = 4e3;
|
||||
// e2 = 8e2 - e3
|
||||
|
||||
self.e1 <<= 1; self.e1 += self.e2;
|
||||
self.e3 <<= 2;
|
||||
self.e2 <<= 3; self.e2 -= self.e3;
|
||||
}
|
||||
|
||||
fn vTakeStep(&mut self)
|
||||
{
|
||||
self.e0 += self.e1;
|
||||
let eqTmp = self.e2;
|
||||
self.e1 += self.e2;
|
||||
self.e2 += eqTmp; self.e2 -= self.e3;
|
||||
self.e3 = eqTmp;
|
||||
}
|
||||
}
|
||||
|
||||
const BEZIER64_FRACTION: LONG = 28;
|
||||
|
||||
// The following is our 2^11 target error encoded as a 36.28 number
|
||||
// (don't forget the additional 4 bits of fractional precision!) and
|
||||
// the 6 times error multiplier:
|
||||
|
||||
const geqErrorHigh: LONGLONG = (6 * (1 << 15) >> (32 - BEZIER64_FRACTION)) << 32;
|
||||
|
||||
/*#ifdef BEZIER_FLATTEN_GDI_COMPATIBLE
|
||||
|
||||
// The following is the default 2/3 error encoded as a 36.28 number,
|
||||
// multiplied by 6, and leaving 4 bits for fraction:
|
||||
|
||||
const LONGLONG geqErrorLow = (LONGLONG)(4) << 32;
|
||||
|
||||
#else*/
|
||||
|
||||
// The following is the default 1/4 error encoded as a 36.28 number,
|
||||
// multiplied by 6, and leaving 4 bits for fraction:
|
||||
|
||||
use crate::types::POINT;
|
||||
|
||||
const geqErrorLow: LONGLONG = (3) << 31;
|
||||
|
||||
//#endif
|
||||
#[derive(Default)]
|
||||
pub struct Bezier64
|
||||
{
|
||||
xLow: HfdBasis64,
|
||||
yLow: HfdBasis64,
|
||||
xHigh: HfdBasis64,
|
||||
yHigh: HfdBasis64,
|
||||
|
||||
eqErrorLow: LONGLONG,
|
||||
rcfxClip: Option<RECT>,
|
||||
|
||||
cStepsHigh: LONG,
|
||||
cStepsLow: LONG
|
||||
}
|
||||
|
||||
impl Bezier64 {
|
||||
|
||||
fn vInit(&mut self,
|
||||
aptfx: &[POINT; 4],
|
||||
// Pointer to 4 control points
|
||||
prcfxVis: Option<&RECT>,
|
||||
// Pointer to bound box of visible area (may be NULL)
|
||||
eqError: LONGLONG)
|
||||
// Fractional maximum error (32.32 format)
|
||||
{
|
||||
self.cStepsHigh = 1;
|
||||
self.cStepsLow = 0;
|
||||
|
||||
self.xHigh.vInit(aptfx[0].x, aptfx[1].x, aptfx[2].x, aptfx[3].x);
|
||||
self.yHigh.vInit(aptfx[0].y, aptfx[1].y, aptfx[2].y, aptfx[3].y);
|
||||
|
||||
// Initialize error:
|
||||
|
||||
self.eqErrorLow = eqError;
|
||||
|
||||
self.rcfxClip = prcfxVis.cloned();
|
||||
|
||||
while (((self.xHigh.vError()) > geqErrorHigh) ||
|
||||
((self.yHigh.vError()) > geqErrorHigh))
|
||||
{
|
||||
self.cStepsHigh <<= 1;
|
||||
self.xHigh.vHalveStepSize();
|
||||
self.yHigh.vHalveStepSize();
|
||||
}
|
||||
}
|
||||
|
||||
fn cFlatten(
|
||||
&mut self,
|
||||
mut pptfx: &mut [POINT],
|
||||
pbMore: &mut bool) -> INT
|
||||
{
|
||||
let mut aptfx: [POINT; 4] = Default::default();
|
||||
let mut cptfx = pptfx.len();
|
||||
let mut rcfxBound: RECT;
|
||||
let cptfxOriginal = cptfx;
|
||||
|
||||
assert!(cptfx > 0);
|
||||
|
||||
while {
|
||||
if (self.cStepsLow == 0)
|
||||
{
|
||||
// Optimization that if the bound box of the control points doesn't
|
||||
// intersect with the bound box of the visible area, render entire
|
||||
// curve as a single line:
|
||||
|
||||
self.xHigh.vUntransform(&mut aptfx, |p| &mut p.x);
|
||||
self.yHigh.vUntransform(&mut aptfx, |p| &mut p.y);
|
||||
|
||||
self.xLow.vInit(aptfx[0].x, aptfx[1].x, aptfx[2].x, aptfx[3].x);
|
||||
self.yLow.vInit(aptfx[0].y, aptfx[1].y, aptfx[2].y, aptfx[3].y);
|
||||
self.cStepsLow = 1;
|
||||
|
||||
if (match &self.rcfxClip { None => true, Some(clip) => {rcfxBound = vBoundBox(&aptfx); bIntersect(&rcfxBound, &clip)}})
|
||||
{
|
||||
while (((self.xLow.vError()) > self.eqErrorLow) ||
|
||||
((self.yLow.vError()) > self.eqErrorLow))
|
||||
{
|
||||
self.cStepsLow <<= 1;
|
||||
self.xLow.vHalveStepSize();
|
||||
self.yLow.vHalveStepSize();
|
||||
}
|
||||
}
|
||||
|
||||
// This 'if' handles the case where the initial error for the Bezier
|
||||
// is already less than the target error:
|
||||
|
||||
if ({self.cStepsHigh -= 1; self.cStepsHigh} != 0)
|
||||
{
|
||||
self.xHigh.vTakeStep();
|
||||
self.yHigh.vTakeStep();
|
||||
|
||||
if (((self.xHigh.vError()) > geqErrorHigh) ||
|
||||
((self.yHigh.vError()) > geqErrorHigh))
|
||||
{
|
||||
self.cStepsHigh <<= 1;
|
||||
self.xHigh.vHalveStepSize();
|
||||
self.yHigh.vHalveStepSize();
|
||||
}
|
||||
|
||||
while (!(self.cStepsHigh & 1 != 0) &&
|
||||
((self.xHigh.vParentError()) <= geqErrorHigh) &&
|
||||
((self.yHigh.vParentError()) <= geqErrorHigh))
|
||||
{
|
||||
self.xHigh.vDoubleStepSize();
|
||||
self.yHigh.vDoubleStepSize();
|
||||
self.cStepsHigh >>= 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
self.xLow.vTakeStep();
|
||||
self.yLow.vTakeStep();
|
||||
|
||||
pptfx[0].x = self.xLow.fxValue();
|
||||
pptfx[0].y = self.yLow.fxValue();
|
||||
pptfx = &mut pptfx[1..];
|
||||
|
||||
self.cStepsLow-=1;
|
||||
if (self.cStepsLow == 0 && self.cStepsHigh == 0)
|
||||
{
|
||||
*pbMore = false;
|
||||
|
||||
// '+1' because we haven't decremented 'cptfx' yet:
|
||||
|
||||
return(cptfxOriginal - cptfx + 1) as INT;
|
||||
}
|
||||
|
||||
if ((self.xLow.vError() > self.eqErrorLow) ||
|
||||
(self.yLow.vError() > self.eqErrorLow))
|
||||
{
|
||||
self.cStepsLow <<= 1;
|
||||
self.xLow.vHalveStepSize();
|
||||
self.yLow.vHalveStepSize();
|
||||
}
|
||||
|
||||
while (!(self.cStepsLow & 1 != 0) &&
|
||||
((self.xLow.vParentError()) <= self.eqErrorLow) &&
|
||||
((self.yLow.vParentError()) <= self.eqErrorLow))
|
||||
{
|
||||
self.xLow.vDoubleStepSize();
|
||||
self.yLow.vDoubleStepSize();
|
||||
self.cStepsLow >>= 1;
|
||||
}
|
||||
cptfx -= 1;
|
||||
cptfx != 0
|
||||
} {};
|
||||
|
||||
*pbMore = true;
|
||||
return(cptfxOriginal) as INT;
|
||||
}
|
||||
}
|
||||
|
||||
//+-----------------------------------------------------------------------------
|
||||
//
|
||||
// class CMILBezier
|
||||
//
|
||||
// Bezier cracker. Flattens any Bezier in our 28.4 device space down to a
|
||||
// smallest 'error' of 2^-7 = 0.0078. Will use fast 32 bit cracker for small
|
||||
// curves and slower 64 bit cracker for big curves.
|
||||
//
|
||||
// Public Interface:
|
||||
// vInit(aptfx, prcfxClip, peqError)
|
||||
// - pptfx points to 4 control points of Bezier. The first point
|
||||
// retrieved by bNext() is the the first point in the approximation
|
||||
// after the start-point.
|
||||
//
|
||||
// - prcfxClip is an optional pointer to the bound box of the visible
|
||||
// region. This is used to optimize clipping of Bezier curves that
|
||||
// won't be seen. Note that this value should account for the pen's
|
||||
// width!
|
||||
//
|
||||
// - optional maximum error in 32.32 format, corresponding to Kirko's
|
||||
// error factor.
|
||||
//
|
||||
// bNext(pptfx)
|
||||
// - pptfx points to where next point in approximation will be
|
||||
// returned. Returns FALSE if the point is the end-point of the
|
||||
// curve.
|
||||
//
|
||||
pub (crate) enum CMILBezier
|
||||
{
|
||||
Bezier64(Bezier64),
|
||||
Bezier32(Bezier32)
|
||||
}
|
||||
|
||||
impl CMILBezier {
|
||||
// All coordinates must be in 28.4 format:
|
||||
pub fn new(aptfxBez: &[POINT; 4], prcfxClip: Option<&RECT>) -> Self {
|
||||
let mut bez32 = Bezier32::default();
|
||||
let bBez32 = bez32.bInit(aptfxBez, prcfxClip);
|
||||
if bBez32 {
|
||||
CMILBezier::Bezier32(bez32)
|
||||
} else {
|
||||
let mut bez64 = Bezier64::default();
|
||||
bez64.vInit(aptfxBez, prcfxClip, geqErrorLow);
|
||||
CMILBezier::Bezier64(bez64)
|
||||
}
|
||||
}
|
||||
|
||||
// Returns the number of points filled in. This will never be zero.
|
||||
//
|
||||
// The last point returned may not be exactly the last control
|
||||
// point. The workaround is for calling code to add an extra
|
||||
// point if this is the case.
|
||||
pub fn Flatten( &mut self,
|
||||
pptfx: &mut [POINT],
|
||||
pbMore: &mut bool) -> INT {
|
||||
match self {
|
||||
CMILBezier::Bezier32(bez) => bez.cFlatten(pptfx, pbMore),
|
||||
CMILBezier::Bezier64(bez) => bez.cFlatten(pptfx, pbMore)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn flatten() {
|
||||
let curve: [POINT; 4] = [
|
||||
POINT{x: 1715, y: 6506},
|
||||
POINT{x: 1692, y: 6506},
|
||||
POINT{x: 1227, y: 5148},
|
||||
POINT{x: 647, y: 5211}];
|
||||
let mut bez = CMILBezier::new(&curve, None);
|
||||
let mut result: [POINT; 32] = Default::default();
|
||||
let mut more: bool = false;
|
||||
let count = bez.Flatten(&mut result, &mut more);
|
||||
assert_eq!(count, 21);
|
||||
assert_eq!(more, false);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn split_flatten32() {
|
||||
// make sure that flattening a curve into two small buffers matches
|
||||
// doing it into a large buffer
|
||||
let curve: [POINT; 4] = [
|
||||
POINT{x: 1795, y: 8445},
|
||||
POINT{x: 1795, y: 8445},
|
||||
POINT{x: 1908, y: 8683},
|
||||
POINT{x: 2043, y: 8705}];
|
||||
|
||||
let mut bez = CMILBezier::new(&curve, None);
|
||||
let mut result: [POINT; 8] = Default::default();
|
||||
let mut more: bool = false;
|
||||
let count = bez.Flatten(&mut result[..5], &mut more);
|
||||
assert_eq!(count, 5);
|
||||
assert_eq!(more, true);
|
||||
let count = bez.Flatten(&mut result[5..], &mut more);
|
||||
assert_eq!(count, 3);
|
||||
assert_eq!(more, false);
|
||||
|
||||
let mut bez = CMILBezier::new(&curve, None);
|
||||
let mut full_result: [POINT; 8] = Default::default();
|
||||
let mut more: bool = false;
|
||||
let count = bez.Flatten(&mut full_result, &mut more);
|
||||
assert_eq!(count, 8);
|
||||
assert_eq!(more, false);
|
||||
assert!(result == full_result);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn flatten32() {
|
||||
let curve: [POINT; 4] = [
|
||||
POINT{x: 100, y: 100},
|
||||
POINT{x: 110, y: 100},
|
||||
POINT{x: 110, y: 110},
|
||||
POINT{x: 110, y: 100}];
|
||||
let mut bez = CMILBezier::new(&curve, None);
|
||||
let mut result: [POINT; 32] = Default::default();
|
||||
let mut more: bool = false;
|
||||
let count = bez.Flatten(&mut result, &mut more);
|
||||
assert_eq!(count, 3);
|
||||
assert_eq!(more, false);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn flatten32_double_step_size() {
|
||||
let curve: [POINT; 4] = [
|
||||
POINT{x: 1761, y: 8152},
|
||||
POINT{x: 1761, y: 8152},
|
||||
POINT{x: 1750, y: 8355},
|
||||
POINT{x: 1795, y: 8445}];
|
||||
let mut bez = CMILBezier::new(&curve, None);
|
||||
let mut result: [POINT; 32] = Default::default();
|
||||
let mut more: bool = false;
|
||||
let count = bez.Flatten(&mut result, &mut more);
|
||||
assert_eq!(count, 7);
|
||||
assert_eq!(more, false);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn bezier64_init_high_num_steps() {
|
||||
let curve: [POINT; 4] = [
|
||||
POINT{x: 33, y: -1},
|
||||
POINT{x: -1, y: -1},
|
||||
POINT{x: -1, y: -16385},
|
||||
POINT{x: -226, y: 10}];
|
||||
let mut bez = CMILBezier::new(&curve, None);
|
||||
let mut result: [POINT; 32] = Default::default();
|
||||
let mut more: bool = false;
|
||||
let count = bez.Flatten(&mut result, &mut more);
|
||||
assert_eq!(count, 32);
|
||||
assert_eq!(more, true);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn bezier64_high_error() {
|
||||
let curve: [POINT; 4] = [
|
||||
POINT{x: -1, y: -1},
|
||||
POINT{x: -4097, y: -1},
|
||||
POINT{x: 65471, y: -256},
|
||||
POINT{x: -1, y: 0}];
|
||||
let mut bez = CMILBezier::new(&curve, None);
|
||||
let mut result: [POINT; 32] = Default::default();
|
||||
let mut more: bool = false;
|
||||
let count = bez.Flatten(&mut result, &mut more);
|
||||
assert_eq!(count, 32);
|
||||
assert_eq!(more, true);
|
||||
}
|
|
@ -1,133 +0,0 @@
|
|||
use crate::{PathBuilder, OutputPath, OutputVertex, FillMode, rasterize_to_tri_list};
|
||||
use crate::types::{BYTE, POINT};
|
||||
|
||||
#[no_mangle]
|
||||
pub extern "C" fn wgr_new_builder() -> *mut PathBuilder {
|
||||
let pb = PathBuilder::new();
|
||||
Box::into_raw(Box::new(pb))
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
pub extern "C" fn wgr_builder_move_to(pb: &mut PathBuilder, x: f32, y: f32) {
|
||||
pb.move_to(x, y);
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
pub extern "C" fn wgr_builder_line_to(pb: &mut PathBuilder, x: f32, y: f32) {
|
||||
pb.line_to(x, y);
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
pub extern "C" fn wgr_builder_curve_to(pb: &mut PathBuilder, c1x: f32, c1y: f32, c2x: f32, c2y: f32, x: f32, y: f32) {
|
||||
pb.curve_to(c1x, c1y, c2x, c2y, x, y);
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
pub extern "C" fn wgr_builder_quad_to(pb: &mut PathBuilder, cx: f32, cy: f32, x: f32, y: f32) {
|
||||
pb.quad_to(cx, cy, x, y);
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
pub extern "C" fn wgr_builder_close(pb: &mut PathBuilder) {
|
||||
pb.close();
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
pub extern "C" fn wgr_builder_set_fill_mode(pb: &mut PathBuilder, fill_mode: FillMode) {
|
||||
pb.set_fill_mode(fill_mode)
|
||||
}
|
||||
|
||||
#[repr(C)]
|
||||
pub struct Path {
|
||||
fill_mode: FillMode,
|
||||
points: *const POINT,
|
||||
num_points: usize,
|
||||
types: *const BYTE,
|
||||
num_types: usize,
|
||||
}
|
||||
|
||||
impl From<OutputPath> for Path {
|
||||
fn from(output_path: OutputPath) -> Self {
|
||||
let path = Self {
|
||||
fill_mode: output_path.fill_mode,
|
||||
points: output_path.points.as_ptr(),
|
||||
num_points: output_path.points.len(),
|
||||
types: output_path.types.as_ptr(),
|
||||
num_types: output_path.types.len(),
|
||||
};
|
||||
std::mem::forget(output_path);
|
||||
path
|
||||
}
|
||||
}
|
||||
|
||||
impl Into<OutputPath> for Path {
|
||||
fn into(self) -> OutputPath {
|
||||
OutputPath {
|
||||
fill_mode: self.fill_mode,
|
||||
points: unsafe {
|
||||
if self.points == std::ptr::null() {
|
||||
Default::default()
|
||||
} else {
|
||||
Box::from_raw(std::slice::from_raw_parts_mut(self.points as *mut POINT, self.num_points))
|
||||
}
|
||||
},
|
||||
types: unsafe {
|
||||
if self.types == std::ptr::null() {
|
||||
Default::default()
|
||||
} else {
|
||||
Box::from_raw(std::slice::from_raw_parts_mut(self.types as *mut BYTE, self.num_types))
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
pub extern "C" fn wgr_builder_get_path(pb: &mut PathBuilder) -> Path {
|
||||
Path::from(pb.get_path().unwrap_or_default())
|
||||
}
|
||||
|
||||
#[repr(C)]
|
||||
pub struct VertexBuffer {
|
||||
data: *const OutputVertex,
|
||||
len: usize
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
pub extern "C" fn wgr_path_rasterize_to_tri_list(
|
||||
path: &Path,
|
||||
clip_x: i32,
|
||||
clip_y: i32,
|
||||
clip_width: i32,
|
||||
clip_height: i32,
|
||||
need_inside: bool,
|
||||
need_outside: bool,
|
||||
) -> VertexBuffer {
|
||||
let result = rasterize_to_tri_list(
|
||||
path.fill_mode,
|
||||
unsafe { std::slice::from_raw_parts(path.types, path.num_types) },
|
||||
unsafe { std::slice::from_raw_parts(path.points, path.num_points) },
|
||||
clip_x, clip_y, clip_width, clip_height,
|
||||
need_inside, need_outside,
|
||||
);
|
||||
let vb = VertexBuffer { data: result.as_ptr(), len: result.len() };
|
||||
std::mem::forget(result);
|
||||
vb
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
pub extern "C" fn wgr_path_release(path: Path) {
|
||||
let output_path: OutputPath = path.into();
|
||||
drop(output_path);
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
pub extern "C" fn wgr_vertex_buffer_release(vb: VertexBuffer)
|
||||
{
|
||||
unsafe { drop(Box::from_raw(std::slice::from_raw_parts_mut(vb.data as *mut OutputVertex, vb.len))) }
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
pub unsafe extern "C" fn wgr_builder_release(pb: *mut PathBuilder) {
|
||||
drop(Box::from_raw(pb));
|
||||
}
|
|
@ -1,9 +0,0 @@
|
|||
use crate::types::*;
|
||||
type FIX4 = INT; // 28.4 fixed point value
|
||||
|
||||
// constants for working with 28.4 fixed point values
|
||||
macro_rules! FIX4_SHIFT { () => { 4 } }
|
||||
macro_rules! FIX4_PRECISION { () => { 4 } }
|
||||
macro_rules! FIX4_ONE { () => { (1 << FIX4_PRECISION!()) } }
|
||||
macro_rules! FIX4_HALF { () => { (1 << (FIX4_PRECISION!()-1)) } }
|
||||
macro_rules! FIX4_MASK { () => { (FIX4_ONE!() - 1) } }
|
|
@ -1,92 +0,0 @@
|
|||
use crate::aacoverage::CCoverageInterval;
|
||||
use crate::nullable_ref::Ref;
|
||||
use crate::types::*;
|
||||
|
||||
pub trait IGeometrySink
|
||||
{
|
||||
//
|
||||
// Aliased geometry output
|
||||
//
|
||||
/*
|
||||
virtual HRESULT AddVertex(
|
||||
__in_ecount(1) const MilPoint2F &ptPosition,
|
||||
// In: Vertex coordinates
|
||||
__out_ecount(1) WORD *pidxOut
|
||||
// Out: Index of vertex
|
||||
) PURE;
|
||||
|
||||
virtual HRESULT AddIndexedVertices(
|
||||
UINT cVertices,
|
||||
// In: number of vertices
|
||||
__in_bcount(cVertices*uVertexStride) const void *pVertexBuffer,
|
||||
// In: vertex buffer containing the vertices
|
||||
UINT uVertexStride,
|
||||
// In: size of each vertex
|
||||
MilVertexFormat mvfFormat,
|
||||
// In: format of each vertex
|
||||
UINT cIndices,
|
||||
// In: Number of indices
|
||||
__in_ecount(cIndices) const UINT *puIndexBuffer
|
||||
// In: index buffer
|
||||
) PURE;
|
||||
|
||||
virtual void SetTransformMapping(
|
||||
__in_ecount(1) const MILMatrix3x2 &mat2DTransform
|
||||
) PURE;
|
||||
|
||||
virtual HRESULT AddTriangle(
|
||||
DWORD idx1,
|
||||
// In: Index of triangle's first vertex
|
||||
DWORD idx2,
|
||||
// In: Index of triangle's second vertex
|
||||
DWORD idx3
|
||||
// In: Index of triangle's third vertex
|
||||
) PURE;
|
||||
|
||||
//
|
||||
// Trapezoidal AA geometry output
|
||||
//
|
||||
*/
|
||||
fn AddComplexScan(&mut self,
|
||||
nPixelY: INT,
|
||||
// In: y coordinate in pixel space
|
||||
pIntervalSpanStart: Ref<CCoverageInterval>
|
||||
// In: coverage segments
|
||||
) -> HRESULT;
|
||||
|
||||
fn AddTrapezoid(
|
||||
&mut self,
|
||||
rYMin: f32,
|
||||
// In: y coordinate of top of trapezoid
|
||||
rXLeftYMin: f32,
|
||||
// In: x coordinate for top left
|
||||
rXRightYMin: f32,
|
||||
// In: x coordinate for top right
|
||||
rYMax: f32,
|
||||
// In: y coordinate of bottom of trapezoid
|
||||
rXLeftYMax: f32,
|
||||
// In: x coordinate for bottom left
|
||||
rXRightYMax: f32,
|
||||
// In: x coordinate for bottom right
|
||||
rXDeltaLeft: f32,
|
||||
// In: trapezoid expand radius
|
||||
rXDeltaRight: f32
|
||||
// In: trapezoid expand radius
|
||||
) -> HRESULT;
|
||||
|
||||
fn IsEmpty(&self) -> bool;
|
||||
/*
|
||||
virtual HRESULT AddParallelogram(
|
||||
__in_ecount(4) const MilPoint2F *rgPosition
|
||||
) PURE;
|
||||
|
||||
//
|
||||
// Query sink status
|
||||
//
|
||||
|
||||
// Some geometry generators don't actually know if they have output
|
||||
// any triangles, so they need to get this information from the geometry sink.
|
||||
|
||||
virtual BOOL IsEmpty() PURE;
|
||||
*/
|
||||
}
|
|
@ -1,55 +0,0 @@
|
|||
pub fn Int32x32To64(a: i32, b: i32) -> i64 { a as i64 * b as i64 }
|
||||
|
||||
macro_rules! IsTagEnabled {
|
||||
($e: expr) => {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
macro_rules! TraceTag {
|
||||
(($e: expr, $s: expr)) => {
|
||||
dbg!($s)
|
||||
}
|
||||
}
|
||||
|
||||
macro_rules! IFC {
|
||||
($e: expr) => {
|
||||
assert_eq!($e, S_OK);
|
||||
}
|
||||
}
|
||||
|
||||
macro_rules! IFR {
|
||||
($e: expr) => {
|
||||
let hresult = $e;
|
||||
if (hresult != S_OK) { return hresult }
|
||||
}
|
||||
}
|
||||
|
||||
macro_rules! __analysis_assume {
|
||||
($e: expr) => {
|
||||
}
|
||||
}
|
||||
|
||||
macro_rules! IFCOOM {
|
||||
($e: expr) => {
|
||||
assert_ne!($e, NULL());
|
||||
}
|
||||
}
|
||||
|
||||
macro_rules! RRETURN1 {
|
||||
($e: expr, $s1: expr) => {
|
||||
if $e == $s1 {
|
||||
} else {
|
||||
assert_eq!($e, S_OK);
|
||||
}
|
||||
return $e;
|
||||
}
|
||||
}
|
||||
|
||||
macro_rules! RRETURN {
|
||||
($e: expr) => {
|
||||
assert_eq!($e, S_OK);
|
||||
return $e;
|
||||
}
|
||||
}
|
||||
|
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
|
@ -1,669 +0,0 @@
|
|||
/*!
|
||||
Converts a 2D path into a set of vertices of a triangle strip mesh that represents the antialiased fill of that path.
|
||||
|
||||
```rust
|
||||
use wpf_gpu_raster::PathBuilder;
|
||||
let mut p = PathBuilder::new();
|
||||
p.move_to(10., 10.);
|
||||
p.line_to(40., 10.);
|
||||
p.line_to(40., 40.);
|
||||
let result = p.rasterize_to_tri_list(0, 0, 100, 100);
|
||||
```
|
||||
|
||||
*/
|
||||
#![allow(unused_parens)]
|
||||
#![allow(overflowing_literals)]
|
||||
#![allow(non_snake_case)]
|
||||
#![allow(non_camel_case_types)]
|
||||
#![allow(non_upper_case_globals)]
|
||||
#![allow(dead_code)]
|
||||
#![allow(unused_macros)]
|
||||
|
||||
#[macro_use]
|
||||
mod fix;
|
||||
#[macro_use]
|
||||
mod helpers;
|
||||
#[macro_use]
|
||||
mod real;
|
||||
mod bezier;
|
||||
#[macro_use]
|
||||
mod aarasterizer;
|
||||
mod hwrasterizer;
|
||||
mod aacoverage;
|
||||
mod hwvertexbuffer;
|
||||
|
||||
mod types;
|
||||
mod geometry_sink;
|
||||
mod matrix;
|
||||
|
||||
mod nullable_ref;
|
||||
|
||||
#[cfg(feature = "c_bindings")]
|
||||
pub mod c_bindings;
|
||||
|
||||
#[cfg(test)]
|
||||
mod tri_rasterize;
|
||||
|
||||
use std::{rc::Rc, cell::RefCell};
|
||||
|
||||
use aarasterizer::CheckValidRange28_4;
|
||||
use hwrasterizer::CHwRasterizer;
|
||||
use hwvertexbuffer::CHwVertexBufferBuilder;
|
||||
use matrix::CMatrix;
|
||||
use real::CFloatFPU;
|
||||
use types::{CoordinateSpace, CD3DDeviceLevel1, MilFillMode, PathPointTypeStart, MilPoint2F, PathPointTypeLine, MilVertexFormat, MilVertexFormatAttribute, DynArray, BYTE, PathPointTypeBezier, PathPointTypeCloseSubpath, CMILSurfaceRect, POINT};
|
||||
|
||||
#[repr(C)]
|
||||
#[derive(Debug, Default)]
|
||||
pub struct OutputVertex {
|
||||
pub x: f32,
|
||||
pub y: f32,
|
||||
pub coverage: f32
|
||||
}
|
||||
|
||||
#[repr(C)]
|
||||
#[derive(Copy, Clone)]
|
||||
pub enum FillMode {
|
||||
EvenOdd = 0,
|
||||
Winding = 1,
|
||||
}
|
||||
|
||||
impl Default for FillMode {
|
||||
fn default() -> Self {
|
||||
FillMode::EvenOdd
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Default)]
|
||||
pub struct OutputPath {
|
||||
fill_mode: FillMode,
|
||||
points: Box<[POINT]>,
|
||||
types: Box<[BYTE]>,
|
||||
}
|
||||
|
||||
impl std::hash::Hash for OutputVertex {
|
||||
fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
|
||||
self.x.to_bits().hash(state);
|
||||
self.y.to_bits().hash(state);
|
||||
self.coverage.to_bits().hash(state);
|
||||
}
|
||||
}
|
||||
|
||||
pub struct PathBuilder {
|
||||
points: DynArray<POINT>,
|
||||
types: DynArray<BYTE>,
|
||||
initial_point: Option<MilPoint2F>,
|
||||
current_point: Option<MilPoint2F>,
|
||||
in_shape: bool,
|
||||
fill_mode: FillMode,
|
||||
outside_bounds: Option<CMILSurfaceRect>,
|
||||
need_inside: bool,
|
||||
valid_range: bool,
|
||||
}
|
||||
|
||||
impl PathBuilder {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
points: Vec::new(),
|
||||
types: Vec::new(),
|
||||
initial_point: None,
|
||||
current_point: None,
|
||||
in_shape: false,
|
||||
fill_mode: FillMode::EvenOdd,
|
||||
outside_bounds: None,
|
||||
need_inside: true,
|
||||
valid_range: true,
|
||||
}
|
||||
}
|
||||
fn add_point(&mut self, x: f32, y: f32) {
|
||||
self.current_point = Some(MilPoint2F{X: x, Y: y});
|
||||
// Transform from pixel corner at 0.0 to pixel center at 0.0. Scale into 28.4 range.
|
||||
// Validate that the point before rounding is within expected bounds for the rasterizer.
|
||||
let (x, y) = ((x - 0.5) * 16.0, (y - 0.5) * 16.0);
|
||||
self.valid_range = self.valid_range && CheckValidRange28_4(x, y);
|
||||
self.points.push(POINT {
|
||||
x: CFloatFPU::Round(x),
|
||||
y: CFloatFPU::Round(y),
|
||||
});
|
||||
}
|
||||
pub fn line_to(&mut self, x: f32, y: f32) {
|
||||
if let Some(initial_point) = self.initial_point {
|
||||
if !self.in_shape {
|
||||
self.types.push(PathPointTypeStart);
|
||||
self.add_point(initial_point.X, initial_point.Y);
|
||||
self.in_shape = true;
|
||||
}
|
||||
self.types.push(PathPointTypeLine);
|
||||
self.add_point(x, y);
|
||||
} else {
|
||||
self.initial_point = Some(MilPoint2F{X: x, Y: y})
|
||||
}
|
||||
}
|
||||
pub fn move_to(&mut self, x: f32, y: f32) {
|
||||
self.in_shape = false;
|
||||
self.initial_point = Some(MilPoint2F{X: x, Y: y});
|
||||
self.current_point = self.initial_point;
|
||||
}
|
||||
pub fn curve_to(&mut self, c1x: f32, c1y: f32, c2x: f32, c2y: f32, x: f32, y: f32) {
|
||||
let initial_point = match self.initial_point {
|
||||
Some(initial_point) => initial_point,
|
||||
None => MilPoint2F{X:c1x, Y:c1y}
|
||||
};
|
||||
if !self.in_shape {
|
||||
self.types.push(PathPointTypeStart);
|
||||
self.add_point(initial_point.X, initial_point.Y);
|
||||
self.initial_point = Some(initial_point);
|
||||
self.in_shape = true;
|
||||
}
|
||||
self.types.push(PathPointTypeBezier);
|
||||
self.add_point(c1x, c1y);
|
||||
self.add_point(c2x, c2y);
|
||||
self.add_point(x, y);
|
||||
}
|
||||
pub fn quad_to(&mut self, cx: f32, cy: f32, x: f32, y: f32) {
|
||||
// For now we just implement quad_to on top of curve_to.
|
||||
// Long term we probably want to support quad curves
|
||||
// directly.
|
||||
let c0 = match self.current_point {
|
||||
Some(current_point) => current_point,
|
||||
None => MilPoint2F{X:cx, Y:cy}
|
||||
};
|
||||
|
||||
let c1x = c0.X + (2./3.) * (cx - c0.X);
|
||||
let c1y = c0.Y + (2./3.) * (cy - c0.Y);
|
||||
|
||||
let c2x = x + (2./3.) * (cx - x);
|
||||
let c2y = y + (2./3.) * (cy - y);
|
||||
|
||||
self.curve_to(c1x, c1y, c2x, c2y, x, y);
|
||||
}
|
||||
pub fn close(&mut self) {
|
||||
if let Some(last) = self.types.last_mut() {
|
||||
*last |= PathPointTypeCloseSubpath;
|
||||
}
|
||||
self.in_shape = false;
|
||||
self.initial_point = None;
|
||||
}
|
||||
pub fn set_fill_mode(&mut self, fill_mode: FillMode) {
|
||||
self.fill_mode = fill_mode;
|
||||
}
|
||||
/// Enables rendering geometry for areas outside the shape but
|
||||
/// within the bounds. These areas will be created with
|
||||
/// zero alpha.
|
||||
///
|
||||
/// This is useful for creating geometry for other blend modes.
|
||||
/// For example:
|
||||
/// - `IN(dest, geometry)` can be done with `outside_bounds` and `need_inside = false`
|
||||
/// - `IN(dest, geometry, alpha)` can be done with `outside_bounds` and `need_inside = true`
|
||||
///
|
||||
/// Note: trapezoidal areas won't be clipped to outside_bounds
|
||||
pub fn set_outside_bounds(&mut self, outside_bounds: Option<(i32, i32, i32, i32)>, need_inside: bool) {
|
||||
self.outside_bounds = outside_bounds.map(|r| CMILSurfaceRect { left: r.0, top: r.1, right: r.2, bottom: r.3 });
|
||||
self.need_inside = need_inside;
|
||||
}
|
||||
|
||||
/// Note: trapezoidal areas won't necessarily be clipped to the clip rect
|
||||
pub fn rasterize_to_tri_list(&self, clip_x: i32, clip_y: i32, clip_width: i32, clip_height: i32) -> Box<[OutputVertex]> {
|
||||
if !self.valid_range {
|
||||
// If any of the points are outside of valid 28.4 range, then just return an empty triangle list.
|
||||
return Box::new([]);
|
||||
}
|
||||
let (x, y, width, height, need_outside) = if let Some(CMILSurfaceRect { left, top, right, bottom }) = self.outside_bounds {
|
||||
let x0 = clip_x.max(left);
|
||||
let y0 = clip_y.max(top);
|
||||
let x1 = (clip_x + clip_width).min(right);
|
||||
let y1 = (clip_y + clip_height).min(bottom);
|
||||
(x0, y0, x1 - x0, y1 - y0, true)
|
||||
} else {
|
||||
(clip_x, clip_y, clip_width, clip_height, false)
|
||||
};
|
||||
rasterize_to_tri_list(self.fill_mode, &self.types, &self.points, x, y, width, height, self.need_inside, need_outside)
|
||||
}
|
||||
|
||||
pub fn get_path(&mut self) -> Option<OutputPath> {
|
||||
if self.valid_range && !self.points.is_empty() && !self.types.is_empty() {
|
||||
Some(OutputPath {
|
||||
fill_mode: self.fill_mode,
|
||||
points: std::mem::take(&mut self.points).into_boxed_slice(),
|
||||
types: std::mem::take(&mut self.types).into_boxed_slice(),
|
||||
})
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Converts a path that is specified as an array of edge types, each associated with a fixed number
|
||||
// of points that are serialized to the points array. Edge types are specified via PathPointType
|
||||
// masks, whereas points must be supplied in 28.4 signed fixed-point format. By default, users can
|
||||
// fill the inside of the path excluding the outside. It may alternatively be desirable to fill the
|
||||
// outside the path out to the clip boundary, optionally keeping the inside. PathBuilder may be
|
||||
// used instead as a simpler interface to this function that handles building the path arrays.
|
||||
pub fn rasterize_to_tri_list(
|
||||
fill_mode: FillMode,
|
||||
types: &[BYTE],
|
||||
points: &[POINT],
|
||||
clip_x: i32,
|
||||
clip_y: i32,
|
||||
clip_width: i32,
|
||||
clip_height: i32,
|
||||
need_inside: bool,
|
||||
need_outside: bool,
|
||||
) -> Box<[OutputVertex]> {
|
||||
let mut rasterizer = CHwRasterizer::new();
|
||||
let mut device = CD3DDeviceLevel1::new();
|
||||
|
||||
device.clipRect.X = clip_x;
|
||||
device.clipRect.Y = clip_y;
|
||||
device.clipRect.Width = clip_width;
|
||||
device.clipRect.Height = clip_height;
|
||||
let device = Rc::new(device);
|
||||
/*
|
||||
device.m_rcViewport = device.clipRect;
|
||||
*/
|
||||
let worldToDevice: CMatrix<CoordinateSpace::Shape, CoordinateSpace::Device> = CMatrix::Identity();
|
||||
|
||||
let mil_fill_mode = match fill_mode {
|
||||
FillMode::EvenOdd => MilFillMode::Alternate,
|
||||
FillMode::Winding => MilFillMode::Winding,
|
||||
};
|
||||
|
||||
rasterizer.Setup(device.clone(), mil_fill_mode, Some(&worldToDevice));
|
||||
|
||||
let mut m_mvfIn: MilVertexFormat = MilVertexFormatAttribute::MILVFAttrNone as MilVertexFormat;
|
||||
let m_mvfGenerated: MilVertexFormat = MilVertexFormatAttribute::MILVFAttrNone as MilVertexFormat;
|
||||
//let mvfaAALocation = MILVFAttrNone;
|
||||
const HWPIPELINE_ANTIALIAS_LOCATION: MilVertexFormatAttribute = MilVertexFormatAttribute::MILVFAttrDiffuse;
|
||||
let mvfaAALocation = HWPIPELINE_ANTIALIAS_LOCATION;
|
||||
struct CHwPipeline {
|
||||
m_pDevice: Rc<CD3DDeviceLevel1>
|
||||
}
|
||||
let pipeline = CHwPipeline { m_pDevice: device.clone() };
|
||||
let m_pHP = &pipeline;
|
||||
|
||||
rasterizer.GetPerVertexDataType(&mut m_mvfIn);
|
||||
let vertexBuilder= Rc::new(RefCell::new(CHwVertexBufferBuilder::Create(m_mvfIn, m_mvfIn | m_mvfGenerated,
|
||||
mvfaAALocation,
|
||||
m_pHP.m_pDevice.clone())));
|
||||
|
||||
let outside_bounds = if need_outside {
|
||||
Some(CMILSurfaceRect {
|
||||
left: clip_x,
|
||||
top: clip_y,
|
||||
right: clip_x + clip_width,
|
||||
bottom: clip_y + clip_height,
|
||||
})
|
||||
} else {
|
||||
None
|
||||
};
|
||||
vertexBuilder.borrow_mut().SetOutsideBounds(outside_bounds.as_ref(), need_inside);
|
||||
vertexBuilder.borrow_mut().BeginBuilding();
|
||||
|
||||
rasterizer.SendGeometry(vertexBuilder.clone(), points, types);
|
||||
vertexBuilder.borrow_mut().FlushTryGetVertexBuffer(None);
|
||||
device.output.replace(Vec::new()).into_boxed_slice()
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::{hash::{Hash, Hasher}, collections::hash_map::DefaultHasher};
|
||||
use crate::{*, tri_rasterize::rasterize_to_mask};
|
||||
fn calculate_hash<T: Hash>(t: &T) -> u64 {
|
||||
let mut s = DefaultHasher::new();
|
||||
t.hash(&mut s);
|
||||
s.finish()
|
||||
}
|
||||
#[test]
|
||||
fn basic() {
|
||||
let mut p = PathBuilder::new();
|
||||
p.move_to(10., 10.);
|
||||
p.line_to(10., 30.);
|
||||
p.line_to(30., 30.);
|
||||
p.line_to(30., 10.);
|
||||
p.close();
|
||||
let result = p.rasterize_to_tri_list(0, 0, 100, 100);
|
||||
assert_eq!(result.len(), 18);
|
||||
//assert_eq!(dbg!(calculate_hash(&result)), 0x5851570566450135);
|
||||
assert_eq!(calculate_hash(&rasterize_to_mask(&result, 100, 100)), 0xfbb7c3932059e240);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn simple() {
|
||||
let mut p = PathBuilder::new();
|
||||
p.move_to(10., 10.);
|
||||
p.line_to(40., 10.);
|
||||
p.line_to(40., 40.);
|
||||
let result = p.rasterize_to_tri_list(0, 0, 100, 100);
|
||||
//assert_eq!(dbg!(calculate_hash(&result)), 0x81a9af7769f88e68);
|
||||
assert_eq!(calculate_hash(&rasterize_to_mask(&result, 100, 100)), 0x6d1595533d40ef92);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn rust() {
|
||||
let mut p = PathBuilder::new();
|
||||
p.move_to(10., 10.);
|
||||
p.line_to(40., 10.);
|
||||
p.line_to(40., 40.);
|
||||
let result = p.rasterize_to_tri_list(0, 0, 100, 100);
|
||||
//assert_eq!(dbg!(calculate_hash(&result)), 0x81a9af7769f88e68);
|
||||
assert_eq!(calculate_hash(&rasterize_to_mask(&result, 100, 100)), 0x6d1595533d40ef92);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn fill_mode() {
|
||||
let mut p = PathBuilder::new();
|
||||
p.move_to(10., 10.);
|
||||
p.line_to(40., 10.);
|
||||
p.line_to(40., 40.);
|
||||
p.line_to(10., 40.);
|
||||
p.close();
|
||||
p.move_to(15., 15.);
|
||||
p.line_to(35., 15.);
|
||||
p.line_to(35., 35.);
|
||||
p.line_to(15., 35.);
|
||||
p.close();
|
||||
let result = p.rasterize_to_tri_list(0, 0, 100, 100);
|
||||
//assert_eq!(dbg!(calculate_hash(&result)), 0xb34344234f2f75a8);
|
||||
assert_eq!(calculate_hash(&rasterize_to_mask(&result, 100, 100)), 0xc7bf999c56ccfc34);
|
||||
|
||||
let mut p = PathBuilder::new();
|
||||
p.move_to(10., 10.);
|
||||
p.line_to(40., 10.);
|
||||
p.line_to(40., 40.);
|
||||
p.line_to(10., 40.);
|
||||
p.close();
|
||||
p.move_to(15., 15.);
|
||||
p.line_to(35., 15.);
|
||||
p.line_to(35., 35.);
|
||||
p.line_to(15., 35.);
|
||||
p.close();
|
||||
p.set_fill_mode(FillMode::Winding);
|
||||
let result = p.rasterize_to_tri_list(0, 0, 100, 100);
|
||||
//assert_eq!(dbg!(calculate_hash(&result)), 0xee4ecd8a738fc42c);
|
||||
assert_eq!(calculate_hash(&rasterize_to_mask(&result, 100, 100)), 0xfafad659db9a2efd);
|
||||
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn range() {
|
||||
// test for a start point out of range
|
||||
let mut p = PathBuilder::new();
|
||||
p.curve_to(8.872974e16, 0., 0., 0., 0., 0.);
|
||||
let result = p.rasterize_to_tri_list(0, 0, 100, 100);
|
||||
assert_eq!(result.len(), 0);
|
||||
|
||||
// test for a subsequent point out of range
|
||||
let mut p = PathBuilder::new();
|
||||
p.curve_to(0., 0., 8.872974e16, 0., 0., 0.);
|
||||
let result = p.rasterize_to_tri_list(0, 0, 100, 100);
|
||||
assert_eq!(result.len(), 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn multiple_starts() {
|
||||
let mut p = PathBuilder::new();
|
||||
p.line_to(10., 10.);
|
||||
p.move_to(0., 0.);
|
||||
let result = p.rasterize_to_tri_list(0, 0, 100, 100);
|
||||
assert_eq!(result.len(), 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn path_closing() {
|
||||
let mut p = PathBuilder::new();
|
||||
p.curve_to(0., 0., 0., 0., 0., 32.0);
|
||||
p.close();
|
||||
p.curve_to(0., 0., 0., 0., 0., 32.0);
|
||||
let result = p.rasterize_to_tri_list(0, 0, 100, 100);
|
||||
assert_eq!(result.len(), 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn curve() {
|
||||
let mut p = PathBuilder::new();
|
||||
p.move_to(10., 10.);
|
||||
p.curve_to(40., 10., 40., 10., 40., 40.);
|
||||
p.close();
|
||||
let result = p.rasterize_to_tri_list(0, 0, 100, 100);
|
||||
assert_eq!(dbg!(calculate_hash(&result)), 0x8dbc4d23f9bba38d);
|
||||
assert_eq!(calculate_hash(&rasterize_to_mask(&result, 100, 100)), 0xa92aae8dba7b8cd4);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn partial_coverage_last_line() {
|
||||
let mut p = PathBuilder::new();
|
||||
|
||||
p.move_to(10., 10.);
|
||||
p.line_to(40., 10.);
|
||||
p.line_to(40., 39.6);
|
||||
p.line_to(10., 39.6);
|
||||
|
||||
let result = p.rasterize_to_tri_list(0, 0, 100, 100);
|
||||
assert_eq!(result.len(), 21);
|
||||
assert_eq!(dbg!(calculate_hash(&result)), 0xf90cb6afaadfb559);
|
||||
assert_eq!(calculate_hash(&rasterize_to_mask(&result, 100, 100)), 0xfa200c3bae144952);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn delta_upper_bound() {
|
||||
let mut p = PathBuilder::new();
|
||||
p.move_to(-122.3 + 200.,84.285);
|
||||
p.curve_to(-122.3 + 200., 84.285, -122.2 + 200.,86.179, -123.03 + 200., 86.16);
|
||||
p.curve_to(-123.85 + 200., 86.141, -140.3 + 200., 38.066, -160.83 + 200., 40.309);
|
||||
p.curve_to(-160.83 + 200., 40.309, -143.05 + 200., 32.956, -122.3 + 200., 84.285);
|
||||
p.close();
|
||||
|
||||
let result = p.rasterize_to_tri_list(0, 0, 400, 400);
|
||||
assert_eq!(result.len(), 429);
|
||||
assert_eq!(dbg!(calculate_hash(&result)), 0x52d52992e249587a);
|
||||
assert_eq!(calculate_hash(&rasterize_to_mask(&result, 100, 100)), 0x5e82d98fdb47a796);
|
||||
}
|
||||
|
||||
|
||||
#[test]
|
||||
fn self_intersect() {
|
||||
let mut p = PathBuilder::new();
|
||||
p.move_to(10., 10.);
|
||||
p.line_to(40., 10.);
|
||||
p.line_to(10., 40.);
|
||||
p.line_to(40., 40.);
|
||||
p.close();
|
||||
let result = p.rasterize_to_tri_list(0, 0, 100, 100);
|
||||
assert_eq!(dbg!(calculate_hash(&result)), 0xf10babef5c619d19);
|
||||
assert_eq!(calculate_hash(&rasterize_to_mask(&result, 100, 100)), 0x49ecc769e1d4ec01);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn grid() {
|
||||
let mut p = PathBuilder::new();
|
||||
|
||||
for i in 0..200 {
|
||||
let offset = i as f32 * 1.3;
|
||||
p.move_to(0. + offset, -8.);
|
||||
p.line_to(0.5 + offset, -8.);
|
||||
p.line_to(0.5 + offset, 40.);
|
||||
p.line_to(0. + offset, 40.);
|
||||
p.close();
|
||||
}
|
||||
let result = p.rasterize_to_tri_list(0, 0, 100, 100);
|
||||
assert_eq!(result.len(), 12000);
|
||||
assert_eq!(calculate_hash(&rasterize_to_mask(&result, 100, 100)), 0x5a7df39d9e9292f0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn outside() {
|
||||
let mut p = PathBuilder::new();
|
||||
p.move_to(10., 10.);
|
||||
p.line_to(40., 10.);
|
||||
p.line_to(10., 40.);
|
||||
p.line_to(40., 40.);
|
||||
p.close();
|
||||
p.set_outside_bounds(Some((0, 0, 50, 50)), false);
|
||||
let result = p.rasterize_to_tri_list(0, 0, 100, 100);
|
||||
assert_eq!(dbg!(calculate_hash(&result)), 0x7c5750ee536ae4ee);
|
||||
assert_eq!(calculate_hash(&rasterize_to_mask(&result, 100, 100)), 0x59403ddbb7e1d09a);
|
||||
|
||||
// ensure that adjusting the outside bounds changes the results
|
||||
p.set_outside_bounds(Some((5, 5, 50, 50)), false);
|
||||
let result = p.rasterize_to_tri_list(0, 0, 100, 100);
|
||||
assert_eq!(dbg!(calculate_hash(&result)), 0x55441457b28613e0);
|
||||
assert_eq!(calculate_hash(&rasterize_to_mask(&result, 100, 100)), 0x59403ddbb7e1d09a);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn outside_inside() {
|
||||
let mut p = PathBuilder::new();
|
||||
p.move_to(10., 10.);
|
||||
p.line_to(40., 10.);
|
||||
p.line_to(10., 40.);
|
||||
p.line_to(40., 40.);
|
||||
p.close();
|
||||
p.set_outside_bounds(Some((0, 0, 50, 50)), true);
|
||||
let result = p.rasterize_to_tri_list(0, 0, 100, 100);
|
||||
assert_eq!(dbg!(calculate_hash(&result)), 0xaf76b42a5244d1ec);
|
||||
assert_eq!(calculate_hash(&rasterize_to_mask(&result, 100, 100)), 0x49ecc769e1d4ec01);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn outside_clipped() {
|
||||
let mut p = PathBuilder::new();
|
||||
p.move_to(10., 10.);
|
||||
p.line_to(10., 40.);
|
||||
p.line_to(90., 40.);
|
||||
p.line_to(40., 10.);
|
||||
p.close();
|
||||
p.set_outside_bounds(Some((0, 0, 50, 50)), false);
|
||||
let result = p.rasterize_to_tri_list(0, 0, 50, 50);
|
||||
assert_eq!(dbg!(calculate_hash(&result)), 0x648a0b7b6aa3b4ed);
|
||||
assert_eq!(calculate_hash(&rasterize_to_mask(&result, 100, 100)), 0x3d2a08f5d0bac999);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn clip_edge() {
|
||||
let mut p = PathBuilder::new();
|
||||
// tests the bigNumerator < 0 case of aarasterizer::ClipEdge
|
||||
p.curve_to(-24., -10., -300., 119., 0.0, 0.0);
|
||||
let result = p.rasterize_to_tri_list(0, 0, 100, 100);
|
||||
// The edge merging only happens between points inside the enumerate buffer. This means
|
||||
// that the vertex output can depend on the size of the enumerate buffer because there
|
||||
// the number of edges and positions of vertices will change depending on edge merging.
|
||||
if ENUMERATE_BUFFER_NUMBER!() == 32 {
|
||||
assert_eq!(result.len(), 111);
|
||||
} else {
|
||||
assert_eq!(result.len(), 171);
|
||||
}
|
||||
assert_eq!(calculate_hash(&rasterize_to_mask(&result, 100, 100)), 0x50b887b09a4c16e);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn enum_buffer_num() {
|
||||
let mut p = PathBuilder::new();
|
||||
p.curve_to(0.0, 0.0, 0.0, 12.0, 0.0, 44.919434);
|
||||
p.line_to(64.0, 36.0 );
|
||||
p.line_to(0.0, 80.0,);
|
||||
let result = p.rasterize_to_tri_list(0, 0, 100, 100);
|
||||
assert_eq!(result.len(), 300);
|
||||
assert_eq!(calculate_hash(&rasterize_to_mask(&result, 100, 100)), 0x659cc742f16b42f2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn fill_alternating_empty_interior_pairs() {
|
||||
let mut p = PathBuilder::new();
|
||||
p.line_to( 0., 2. );
|
||||
p.curve_to(0.0, 0.0,1., 6., 0.0, 0.0);
|
||||
let result = p.rasterize_to_tri_list(0, 0, 100, 100);
|
||||
assert_eq!(result.len(), 9);
|
||||
assert_eq!(calculate_hash(&rasterize_to_mask(&result, 100, 100)), 0x726606a662fe46a0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn fill_winding_empty_interior_pairs() {
|
||||
let mut p = PathBuilder::new();
|
||||
p.curve_to(45., 61., 0.09, 0., 0., 0.);
|
||||
p.curve_to(45., 61., 0.09, 0., 0., 0.);
|
||||
p.curve_to(0., 0., 0., 38., 0.09, 15.);
|
||||
p.set_fill_mode(FillMode::Winding);
|
||||
let result = p.rasterize_to_tri_list(0, 0, 100, 100);
|
||||
assert_eq!(result.len(), 462);
|
||||
assert_eq!(calculate_hash(&rasterize_to_mask(&result, 100, 100)), 0x651ea4ade5543087);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn empty_fill() {
|
||||
let mut p = PathBuilder::new();
|
||||
p.move_to(0., 0.);
|
||||
p.line_to(10., 100.);
|
||||
let result = p.rasterize_to_tri_list(0, 0, 100, 100);
|
||||
assert_eq!(result.len(), 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn rasterize_line() {
|
||||
let mut p = PathBuilder::new();
|
||||
p.move_to(1., 1.);
|
||||
p.line_to(2., 1.);
|
||||
p.line_to(2., 2.);
|
||||
p.line_to(1., 2.);
|
||||
p.close();
|
||||
let result = p.rasterize_to_tri_list(0, 0, 100, 100);
|
||||
let mask = rasterize_to_mask(&result, 3, 3);
|
||||
assert_eq!(&mask[..], &[0, 0, 0,
|
||||
0, 255, 0,
|
||||
0, 0, 0][..]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn triangle() {
|
||||
let mut p = PathBuilder::new();
|
||||
p.move_to(1., 10.);
|
||||
p.line_to(100., 13.);
|
||||
p.line_to(1., 16.);
|
||||
p.close();
|
||||
let result = p.rasterize_to_tri_list(0, 0, 100, 100);
|
||||
assert_eq!(calculate_hash(&rasterize_to_mask(&result, 100, 100)), 0x4757b0c5a19b02f0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn single_pixel() {
|
||||
let mut p = PathBuilder::new();
|
||||
p.move_to(1.5, 1.5);
|
||||
p.line_to(2., 1.5);
|
||||
p.line_to(2., 2.);
|
||||
p.line_to(1.5, 2.);
|
||||
p.close();
|
||||
let result = p.rasterize_to_tri_list(0, 0, 100, 100);
|
||||
assert_eq!(result.len(), 3);
|
||||
assert_eq!(calculate_hash(&rasterize_to_mask(&result, 4, 4)), 0x9f481fe5588e341c);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn traps_outside_bounds() {
|
||||
let mut p = PathBuilder::new();
|
||||
p.move_to(10., 10.0);
|
||||
p.line_to(30., 10.);
|
||||
p.line_to(50., 20.);
|
||||
p.line_to(30., 30.);
|
||||
p.line_to(10., 30.);
|
||||
p.close();
|
||||
// The generated trapezoids are not necessarily clipped to the outside bounds rect
|
||||
// and in this case the outside bounds geometry ends up drawing on top of the
|
||||
// edge geometry which could be considered a bug.
|
||||
p.set_outside_bounds(Some((0, 0, 50, 30)), true);
|
||||
let result = p.rasterize_to_tri_list(0, 0, 100, 100);
|
||||
assert_eq!(calculate_hash(&rasterize_to_mask(&result, 100, 100)), 0x6514e3d79d641f09);
|
||||
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn quad_to() {
|
||||
let mut p = PathBuilder::new();
|
||||
p.move_to(10., 10.0);
|
||||
p.quad_to(30., 10., 30., 30.);
|
||||
p.quad_to(10., 30., 30., 30.);
|
||||
p.quad_to(60., 30., 60., 10.);
|
||||
p.close();
|
||||
let result = p.rasterize_to_tri_list(0, 0, 70, 40);
|
||||
assert_eq!(result.len(), 279);
|
||||
assert_eq!(calculate_hash(&rasterize_to_mask(&result, 70, 40)), 0xbd2eec3cfe9bd30b);
|
||||
}
|
||||
}
|
|
@ -1,37 +0,0 @@
|
|||
use std::marker::PhantomData;
|
||||
|
||||
use crate::types::CoordinateSpace;
|
||||
|
||||
pub type CMILMatrix = CMatrix<CoordinateSpace::Shape,CoordinateSpace::Device>;
|
||||
#[derive(Default, Clone)]
|
||||
pub struct CMatrix<InCoordSpace, OutCoordSpace> {
|
||||
_11: f32, _12: f32, _13: f32, _14: f32,
|
||||
_21: f32, _22: f32, _23: f32 , _24: f32,
|
||||
_31: f32, _32: f32, _33: f32, _34: f32,
|
||||
_41: f32, _42: f32, _43: f32, _44: f32,
|
||||
in_coord: PhantomData<InCoordSpace>,
|
||||
out_coord: PhantomData<OutCoordSpace>
|
||||
}
|
||||
|
||||
impl<InCoordSpace: Default, OutCoordSpace: Default> CMatrix<InCoordSpace, OutCoordSpace> {
|
||||
pub fn Identity() -> Self { let mut ret: Self = Default::default();
|
||||
ret._11 = 1.;
|
||||
ret._22 = 1.;
|
||||
ret._33 = 1.;
|
||||
ret._44 = 1.;
|
||||
ret
|
||||
}
|
||||
pub fn GetM11(&self) -> f32 { self._11 }
|
||||
pub fn GetM12(&self) -> f32 { self._12 }
|
||||
pub fn GetM21(&self) -> f32 { self._21 }
|
||||
pub fn GetM22(&self) -> f32 { self._22 }
|
||||
pub fn GetDx(&self) -> f32 { self._41 }
|
||||
pub fn GetDy(&self) -> f32 { self._42 }
|
||||
|
||||
pub fn SetM11(&mut self, r: f32) { self._11 = r}
|
||||
pub fn SetM12(&mut self, r: f32) { self._12 = r}
|
||||
pub fn SetM21(&mut self, r: f32) { self._21 = r}
|
||||
pub fn SetM22(&mut self, r: f32) { self._22 = r}
|
||||
pub fn SetDx(&mut self, dx: f32) { self._41 = dx }
|
||||
pub fn SetDy(&mut self, dy: f32) { self._42 = dy }
|
||||
}
|
|
@ -1,12 +0,0 @@
|
|||
crossing goto
|
||||
./MultiSpaceRectF.inl:70:5: error: call to implicitly-deleted default constructor of 'union (anonymous union at ./MultiSpaceRectF.inl:138:5)'
|
||||
|
||||
|
||||
Rust conversion
|
||||
---------------
|
||||
CEdge is a singly linked list
|
||||
|
||||
Future
|
||||
------
|
||||
When flatening curves if we try to flatten at integer values
|
||||
we can avoid the ComplexSpan code path.
|
|
@ -1,53 +0,0 @@
|
|||
use std::{marker::PhantomData, ops::Deref};
|
||||
|
||||
pub struct Ref<'a, T> {
|
||||
ptr: *const T,
|
||||
_phantom: PhantomData<&'a T>
|
||||
}
|
||||
|
||||
impl<'a, T> Copy for Ref<'a, T> { }
|
||||
|
||||
impl<'a, T> Clone for Ref<'a, T> {
|
||||
fn clone(&self) -> Self {
|
||||
*self
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, T> Ref<'a, T> {
|
||||
pub fn new(p: &'a T) -> Self {
|
||||
Ref { ptr: p as *const T, _phantom: PhantomData}
|
||||
}
|
||||
pub unsafe fn null() -> Self {
|
||||
Ref { ptr: std::ptr::null(), _phantom: PhantomData}
|
||||
}
|
||||
pub fn is_null(&self) -> bool {
|
||||
self.ptr.is_null()
|
||||
}
|
||||
pub fn get_ref(self) -> &'a T {
|
||||
unsafe { &*self.ptr }
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, T> PartialEq for Ref<'a, T> {
|
||||
fn eq(&self, other: &Self) -> bool {
|
||||
self.ptr == other.ptr && self._phantom == other._phantom
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, T> PartialOrd for Ref<'a, T> {
|
||||
fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
|
||||
match self.ptr.partial_cmp(&other.ptr) {
|
||||
Some(core::cmp::Ordering::Equal) => {}
|
||||
ord => return ord,
|
||||
}
|
||||
self._phantom.partial_cmp(&other._phantom)
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, T> Deref for Ref<'a, T> {
|
||||
type Target = T;
|
||||
|
||||
fn deref(&self) -> &Self::Target {
|
||||
unsafe { &*self.ptr }
|
||||
}
|
||||
}
|
|
@ -1,160 +0,0 @@
|
|||
pub mod CFloatFPU {
|
||||
// Maximum allowed argument for SmallRound
|
||||
// const sc_uSmallMax: u32 = 0xFFFFF;
|
||||
|
||||
// Binary representation of static_cast<float>(sc_uSmallMax)
|
||||
const sc_uBinaryFloatSmallMax: u32 = 0x497ffff0;
|
||||
|
||||
fn LargeRound(x: f32) -> i32 {
|
||||
//XXX: the SSE2 version is probably slower than a naive SSE4 implementation that can use roundss
|
||||
#[cfg(target_feature = "sse2")]
|
||||
unsafe {
|
||||
use std::arch::x86_64::{__m128, _mm_set_ss, _mm_cvtss_si32, _mm_cvtsi32_ss, _mm_sub_ss, _mm_cmple_ss, _mm_store_ss, _mm_setzero_ps};
|
||||
|
||||
let given: __m128 = _mm_set_ss(x); // load given value
|
||||
let result = _mm_cvtss_si32(given);
|
||||
let rounded: __m128 = _mm_setzero_ps(); // convert it to integer (rounding mode doesn't matter)
|
||||
let rounded = _mm_cvtsi32_ss(rounded, result); // convert back to float
|
||||
let diff = _mm_sub_ss(rounded, given); // diff = (rounded - given)
|
||||
let negHalf = _mm_set_ss(-0.5); // load -0.5f
|
||||
let mask = _mm_cmple_ss(diff, negHalf); // get all-ones if (rounded - given) < -0.5f
|
||||
let mut correction: i32 = 0;
|
||||
_mm_store_ss((&mut correction) as *mut _ as *mut _, mask); // get comparison result as integer
|
||||
return result - correction; // correct the result of rounding
|
||||
}
|
||||
#[cfg(not(target_feature = "sse2"))]
|
||||
return (x + 0.5).floor() as i32;
|
||||
}
|
||||
|
||||
|
||||
//+------------------------------------------------------------------------
|
||||
//
|
||||
// Function: CFloatFPU::SmallRound
|
||||
//
|
||||
// Synopsis: Convert given floating point value to nearest integer.
|
||||
// Half-integers are rounded up.
|
||||
//
|
||||
// Important: this routine is fast but restricted:
|
||||
// given x should be within (-(0x100000-.5) < x < (0x100000-.5))
|
||||
//
|
||||
// Details: Implementation has abnormal looking that use to confuse
|
||||
// many people. However, it indeed works, being tested
|
||||
// thoroughly on x86 and ia64 platforms for literally
|
||||
// each possible argument values in the given range.
|
||||
//
|
||||
// More details:
|
||||
// Implementation is based on the knowledge of floating point
|
||||
// value representation. This 32-bits value consists of three parts:
|
||||
// v & 0x80000000 = sign
|
||||
// v & 0x7F800000 = exponent
|
||||
// v & 0x007FFFFF - mantissa
|
||||
//
|
||||
// Let N to be a floating point number within -0x400000 <= N <= 0x3FFFFF.
|
||||
// The sum (S = 0xC00000 + N) thus will satisfy Ox800000 <= S <= 0xFFFFFF.
|
||||
// All the numbers within this range (sometimes referred to as "binade")
|
||||
// have same position of most significant bit, i.e. 0x800000.
|
||||
// Therefore they are normalized equal way, thus
|
||||
// providing the weights on mantissa's bits to be the same
|
||||
// as integer numbers have. In other words, to get
|
||||
// integer value of floating point S, when Ox800000 <= S <= 0xFFFFFF,
|
||||
// we can just throw away the exponent and sign, and add assumed
|
||||
// most significant bit (that is always 1 and therefore is not stored
|
||||
// in floating point value):
|
||||
// (int)S = (<float S as int> & 0x7FFFFF | 0x800000);
|
||||
// To get given N in as integer, we need to subtract back
|
||||
// the value 0xC00000 that was added in order to obtain
|
||||
// proper normalization:
|
||||
// N = (<float S as int> & 0x7FFFFF | 0x800000) - 0xC00000.
|
||||
// or
|
||||
// N = (<float S as int> & 0x7FFFFF ) - 0x400000.
|
||||
//
|
||||
// Hopefully, the text above explains how
|
||||
// following routine works:
|
||||
// int SmallRound1(float x)
|
||||
// {
|
||||
// union
|
||||
// {
|
||||
// __int32 i;
|
||||
// float f;
|
||||
// } u;
|
||||
//
|
||||
// u.f = x + float(0x00C00000);
|
||||
// return ((u.i - (int)0x00400000) << 9) >> 9;
|
||||
// }
|
||||
// Unfortunatelly it is imperfect, due to the way how FPU
|
||||
// use to round intermediate calculation results.
|
||||
// By default, rounding mode is set to "nearest".
|
||||
// This means that when it calculates N+float(0x00C00000),
|
||||
// the 80-bit precise result will not fit in 32-bit float,
|
||||
// so some least significant bits will be thrown away.
|
||||
// Rounding to nearest means that S consisting of intS + fraction,
|
||||
// where 0 <= fraction < 1, will be converted to intS
|
||||
// when fraction < 0.5 and to intS+1 if fraction > 0.5.
|
||||
// What would happen with fraction exactly equal to 0.5?
|
||||
// Smart thing: S will go to intS if intS is even and
|
||||
// to intS+1 if intS is odd. In other words, half-integers
|
||||
// are rounded to nearest even number.
|
||||
// This FPU feature apparently is useful to minimize
|
||||
// average rounding error when somebody is, say,
|
||||
// digitally simulating electrons' behavior in plasma.
|
||||
// However for graphics this is not desired.
|
||||
//
|
||||
// We want to move half-integers up, therefore
|
||||
// define SmallRound(x) as {return SmallRound1(x*2+.5) >> 1;}.
|
||||
// This may require more comments.
|
||||
// Let given x = i+f, where i is integer and f is fraction, 0 <= f < 1.
|
||||
// Let's wee what is y = x*2+.5:
|
||||
// y = i*2 + (f*2 + .5) = i*2 + g, where g = f*2 + .5;
|
||||
// If "f" is in the range 0 <= f < .5 (so correct rounding result should be "i"),
|
||||
// then range for "g" is .5 <= g < 1.5. The very first value, .5 will force
|
||||
// SmallRound1 result to be "i*2", due to round-to-even rule; the remaining
|
||||
// will lead to "i*2+1". Consequent shift will throw away extra "1" and give
|
||||
// us desired "i".
|
||||
// When "f" in in the range .5 <= f < 1, then 1.5 <= g < 2.5.
|
||||
// All these values will round to 2, so SmallRound1 will return (2*i+2),
|
||||
// and the final shift will give desired 1+1.
|
||||
//
|
||||
// To get final routine looking we need to transform the combines
|
||||
// expression for u.f:
|
||||
// (x*2) + .5 + float(0x00C00000) ==
|
||||
// (x + (.25 + double(0x00600000)) )*2
|
||||
// Note that the ratio "2" means nothing for following operations,
|
||||
// since it affects only exponent bits that are ignored anyway.
|
||||
// So we can save some processor cycles avoiding this multiplication.
|
||||
//
|
||||
// And, the very final beautification:
|
||||
// to avoid subtracting 0x00400000 let's ignore this bit.
|
||||
// This mean that we effectively decrease available range by 1 bit,
|
||||
// but we're chasing for performance and found it acceptable.
|
||||
// So
|
||||
// return ((u.i - (int)0x00400000) << 9) >> 9;
|
||||
// is converted to
|
||||
// return ((u.i ) << 10) >> 10;
|
||||
// Eventually, will found that final shift by 10 bits may be combined
|
||||
// with shift by 1 in the definition {return SmallRound1(x*2+.5) >> 1;},
|
||||
// we'll just shift by 11 bits. That's it.
|
||||
//
|
||||
//-------------------------------------------------------------------------
|
||||
fn SmallRound(x: f32) -> i32
|
||||
{
|
||||
//AssertPrecisionAndRoundingMode();
|
||||
debug_assert!(-(0x100000 as f64 -0.5) < x as f64 && (x as f64) < (0x100000 as f64 -0.5));
|
||||
|
||||
|
||||
let fi = (x as f64 + (0x00600000 as f64 + 0.25)) as f32;
|
||||
let result = ((fi.to_bits() as i32) << 10) >> 11;
|
||||
|
||||
debug_assert!(x < (result as f32) + 0.5 && x >= (result as f32) - 0.5);
|
||||
return result;
|
||||
}
|
||||
|
||||
pub fn Round(x: f32) -> i32
|
||||
{
|
||||
// cut off sign
|
||||
let xAbs: u32 = x.to_bits() & 0x7FFFFFFF;
|
||||
|
||||
return if xAbs <= sc_uBinaryFloatSmallMax {SmallRound(x)} else {LargeRound(x)};
|
||||
}
|
||||
}
|
||||
|
||||
macro_rules! TOREAL { ($e: expr) => { $e as REAL } }
|
|
@ -1,190 +0,0 @@
|
|||
/* The rasterization code here is based off of piglit/tests/general/triangle-rasterization.cpp:
|
||||
|
||||
/**************************************************************************
|
||||
*
|
||||
* Copyright 2012 VMware, Inc.
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
|
||||
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
|
||||
* IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
|
||||
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
|
||||
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
|
||||
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
**************************************************************************/
|
||||
|
||||
*/
|
||||
|
||||
use std::ops::Index;
|
||||
use crate::OutputVertex;
|
||||
#[derive(Debug)]
|
||||
struct Vertex {
|
||||
x: f32,
|
||||
y: f32,
|
||||
coverage: f32
|
||||
}
|
||||
#[derive(Debug)]
|
||||
struct Triangle {
|
||||
v: [Vertex; 3],
|
||||
}
|
||||
|
||||
impl Index<usize> for Triangle {
|
||||
type Output = Vertex;
|
||||
|
||||
fn index(&self, index: usize) -> &Self::Output {
|
||||
&self.v[index]
|
||||
}
|
||||
}
|
||||
|
||||
// D3D11 mandates 8 bit subpixel precision:
|
||||
// https://microsoft.github.io/DirectX-Specs/d3d/archive/D3D11_3_FunctionalSpec.htm#CoordinateSnapping
|
||||
const FIXED_SHIFT: i32 = 8;
|
||||
const FIXED_ONE: f32 = (1 << FIXED_SHIFT) as f32;
|
||||
|
||||
/* Proper rounding of float to integer */
|
||||
fn iround(mut v: f32) -> i64 {
|
||||
if v > 0.0 {
|
||||
v += 0.5;
|
||||
}
|
||||
if v < 0.0 {
|
||||
v -= 0.5;
|
||||
}
|
||||
return v as i64
|
||||
}
|
||||
|
||||
/* Based on http://devmaster.net/forums/topic/1145-advanced-rasterization */
|
||||
fn rast_triangle(buffer: &mut [u8], width: usize, height: usize, tri: &Triangle) {
|
||||
let center_offset = -0.5;
|
||||
|
||||
let mut coverage1 = tri[0].coverage;
|
||||
let mut coverage2 = tri[1].coverage;
|
||||
let mut coverage3 = tri[2].coverage;
|
||||
|
||||
/* fixed point coordinates */
|
||||
let mut x1 = iround(FIXED_ONE * (tri[0].x + center_offset));
|
||||
let x2 = iround(FIXED_ONE * (tri[1].x + center_offset));
|
||||
let mut x3 = iround(FIXED_ONE * (tri[2].x + center_offset));
|
||||
|
||||
let mut y1 = iround(FIXED_ONE * (tri[0].y + center_offset));
|
||||
let y2 = iround(FIXED_ONE * (tri[1].y + center_offset));
|
||||
let mut y3 = iround(FIXED_ONE * (tri[2].y + center_offset));
|
||||
|
||||
|
||||
/* Force correct vertex order */
|
||||
let cross = (x2 - x1) * (y3 - y2) - (y2 - y1) * (x3 - x2);
|
||||
if cross > 0 {
|
||||
std::mem::swap(&mut x1, &mut x3);
|
||||
std::mem::swap(&mut y1, &mut y3);
|
||||
// I don't understand why coverage 2 and 3 are swapped instead of 1 and 3
|
||||
std::mem::swap(&mut coverage2, &mut coverage3);
|
||||
} else {
|
||||
std::mem::swap(&mut coverage1, &mut coverage3);
|
||||
}
|
||||
|
||||
/* Deltas */
|
||||
let dx12 = x1 - x2;
|
||||
let dx23 = x2 - x3;
|
||||
let dx31 = x3 - x1;
|
||||
|
||||
let dy12 = y1 - y2;
|
||||
let dy23 = y2 - y3;
|
||||
let dy31 = y3 - y1;
|
||||
|
||||
/* Fixed-point deltas */
|
||||
let fdx12 = dx12 << FIXED_SHIFT;
|
||||
let fdx23 = dx23 << FIXED_SHIFT;
|
||||
let fdx31 = dx31 << FIXED_SHIFT;
|
||||
|
||||
let fdy12 = dy12 << FIXED_SHIFT;
|
||||
let fdy23 = dy23 << FIXED_SHIFT;
|
||||
let fdy31 = dy31 << FIXED_SHIFT;
|
||||
|
||||
/* Bounding rectangle */
|
||||
let mut minx = x1.min(x2).min(x3) >> FIXED_SHIFT;
|
||||
let mut maxx = x1.max(x2).max(x3) >> FIXED_SHIFT;
|
||||
|
||||
let mut miny = y1.min(y2).min(y3) >> FIXED_SHIFT;
|
||||
let mut maxy = y1.max(y2).max(y3) >> FIXED_SHIFT;
|
||||
|
||||
minx = minx.max(0);
|
||||
maxx = maxx.min(width as i64 - 1);
|
||||
|
||||
miny = miny.max(0);
|
||||
maxy = maxy.min(height as i64 - 1);
|
||||
|
||||
/* Half-edge constants */
|
||||
let mut c1 = dy12 * x1 - dx12 * y1;
|
||||
let mut c2 = dy23 * x2 - dx23 * y2;
|
||||
let mut c3 = dy31 * x3 - dx31 * y3;
|
||||
|
||||
/* Correct for top-left filling convention */
|
||||
if dy12 < 0 || (dy12 == 0 && dx12 < 0) { c1 += 1 }
|
||||
if dy23 < 0 || (dy23 == 0 && dx23 < 0) { c2 += 1 }
|
||||
if dy31 < 0 || (dy31 == 0 && dx31 < 0) { c3 += 1 }
|
||||
|
||||
let mut cy1 = c1 + dx12 * (miny << FIXED_SHIFT) - dy12 * (minx << FIXED_SHIFT);
|
||||
let mut cy2 = c2 + dx23 * (miny << FIXED_SHIFT) - dy23 * (minx << FIXED_SHIFT);
|
||||
let mut cy3 = c3 + dx31 * (miny << FIXED_SHIFT) - dy31 * (minx << FIXED_SHIFT);
|
||||
//dbg!(minx, maxx, tri, cross);
|
||||
/* Perform rasterization */
|
||||
let mut buffer = &mut buffer[miny as usize * width..];
|
||||
for _y in miny..=maxy {
|
||||
let mut cx1 = cy1;
|
||||
let mut cx2 = cy2;
|
||||
let mut cx3 = cy3;
|
||||
|
||||
for x in minx..=maxx {
|
||||
if cx1 > 0 && cx2 > 0 && cx3 > 0 {
|
||||
// cross is equal to 2*area of the triangle.
|
||||
// we can normalize cx by 2*area to get barycentric coords.
|
||||
let area = cross.abs() as f32;
|
||||
let bary = (cx1 as f32 / area, cx2 as f32 / area, cx3 as f32 / area);
|
||||
let coverages = coverage1 * bary.0 + coverage2 * bary.1 + coverage3 * bary.2;
|
||||
let color = (coverages * 255. + 0.5) as u8;
|
||||
|
||||
buffer[x as usize] = color;
|
||||
}
|
||||
|
||||
cx1 -= fdy12;
|
||||
cx2 -= fdy23;
|
||||
cx3 -= fdy31;
|
||||
}
|
||||
|
||||
cy1 += fdx12;
|
||||
cy2 += fdx23;
|
||||
cy3 += fdx31;
|
||||
|
||||
buffer = &mut buffer[width..];
|
||||
}
|
||||
}
|
||||
|
||||
pub fn rasterize_to_mask(vertices: &[OutputVertex], width: u32, height: u32) -> Box<[u8]> {
|
||||
let mut mask = vec![0; (width * height) as usize];
|
||||
for n in (0..vertices.len()).step_by(3) {
|
||||
let tri =
|
||||
[&vertices[n], &vertices[n+1], &vertices[n+2]];
|
||||
|
||||
let tri = Triangle { v: [
|
||||
Vertex { x: tri[0].x, y: tri[0].y, coverage: tri[0].coverage},
|
||||
Vertex { x: tri[1].x, y: tri[1].y, coverage: tri[1].coverage},
|
||||
Vertex { x: tri[2].x, y: tri[2].y, coverage: tri[2].coverage}
|
||||
]
|
||||
};
|
||||
rast_triangle(&mut mask, width as usize, height as usize, &tri);
|
||||
}
|
||||
mask.into_boxed_slice()
|
||||
}
|
|
@ -1,201 +0,0 @@
|
|||
pub(crate) type LONG = i32;
|
||||
pub(crate) type INT = i32;
|
||||
pub(crate) type UINT = u32;
|
||||
pub(crate) type ULONG = u32;
|
||||
pub(crate) type DWORD = ULONG;
|
||||
pub(crate) type WORD = u16;
|
||||
pub(crate) type LONGLONG = i64;
|
||||
pub(crate) type ULONGLONG = u64;
|
||||
pub(crate) type BYTE = u8;
|
||||
pub(crate) type FLOAT = f32;
|
||||
pub(crate) type REAL = FLOAT;
|
||||
pub(crate) type HRESULT = LONG;
|
||||
|
||||
pub(crate) const S_OK: HRESULT = 0;
|
||||
pub(crate) const INTSAFE_E_ARITHMETIC_OVERFLOW: HRESULT = 0x80070216;
|
||||
pub(crate) const WGXERR_VALUEOVERFLOW: HRESULT = INTSAFE_E_ARITHMETIC_OVERFLOW;
|
||||
pub(crate) const WINCODEC_ERR_VALUEOVERFLOW: HRESULT = INTSAFE_E_ARITHMETIC_OVERFLOW;
|
||||
const fn MAKE_HRESULT(sev: LONG,fac: LONG,code: LONG) -> HRESULT {
|
||||
( (((sev)<<31) | ((fac)<<16) | ((code))) )
|
||||
}
|
||||
|
||||
const FACILITY_WGX: LONG = 0x898;
|
||||
|
||||
|
||||
const fn MAKE_WGXHR( sev: LONG, code: LONG) -> HRESULT {
|
||||
MAKE_HRESULT( sev, FACILITY_WGX, (code) )
|
||||
}
|
||||
|
||||
const fn MAKE_WGXHR_ERR( code: LONG ) -> HRESULT
|
||||
{
|
||||
MAKE_WGXHR( 1, code )
|
||||
}
|
||||
|
||||
pub const WGXHR_CLIPPEDTOEMPTY: HRESULT = MAKE_WGXHR(0, 1);
|
||||
pub const WGXHR_EMPTYFILL: HRESULT = MAKE_WGXHR(0, 2);
|
||||
pub const WGXHR_INTERNALTEMPORARYSUCCESS: HRESULT = MAKE_WGXHR(0, 3);
|
||||
pub const WGXHR_RESETSHAREDHANDLEMANAGER: HRESULT = MAKE_WGXHR(0, 4);
|
||||
|
||||
pub const WGXERR_BADNUMBER: HRESULT = MAKE_WGXHR_ERR(0x00A); // 4438
|
||||
|
||||
pub fn FAILED(hr: HRESULT) -> bool {
|
||||
hr != S_OK
|
||||
}
|
||||
pub trait NullPtr {
|
||||
fn make() -> Self;
|
||||
}
|
||||
|
||||
impl<T> NullPtr for *mut T {
|
||||
fn make() -> Self {
|
||||
std::ptr::null_mut()
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> NullPtr for *const T {
|
||||
fn make() -> Self {
|
||||
std::ptr::null()
|
||||
}
|
||||
}
|
||||
|
||||
pub fn NULL<T: NullPtr>() -> T {
|
||||
T::make()
|
||||
}
|
||||
#[derive(Default, Clone)]
|
||||
pub struct RECT {
|
||||
pub left: LONG,
|
||||
pub top: LONG,
|
||||
pub right: LONG,
|
||||
pub bottom: LONG,
|
||||
}
|
||||
#[derive(Default, Clone, Copy, PartialEq, Eq)]
|
||||
pub struct POINT {
|
||||
pub x: LONG,
|
||||
pub y: LONG
|
||||
}
|
||||
#[derive(Clone, Copy)]
|
||||
pub struct MilPoint2F
|
||||
{
|
||||
pub X: FLOAT,
|
||||
pub Y: FLOAT,
|
||||
}
|
||||
|
||||
#[derive(Default, Clone)]
|
||||
pub struct MilPointAndSizeL
|
||||
{
|
||||
pub X: INT,
|
||||
pub Y: INT,
|
||||
pub Width: INT,
|
||||
pub Height: INT,
|
||||
}
|
||||
|
||||
pub type CMILSurfaceRect = RECT;
|
||||
|
||||
#[derive(PartialEq)]
|
||||
pub enum MilAntiAliasMode {
|
||||
None = 0,
|
||||
EightByEight = 1,
|
||||
}
|
||||
#[derive(PartialEq, Clone, Copy)]
|
||||
pub enum MilFillMode {
|
||||
Alternate = 0,
|
||||
Winding = 1,
|
||||
}
|
||||
|
||||
pub const PathPointTypeStart: u8 = 0; // move, 1 point
|
||||
pub const PathPointTypeLine: u8 = 1; // line, 1 point
|
||||
pub const PathPointTypeBezier: u8 = 3; // default Bezier (= cubic Bezier), 3 points
|
||||
pub const PathPointTypePathTypeMask: u8 = 0x07; // type mask (lowest 3 bits).
|
||||
pub const PathPointTypeCloseSubpath: u8 = 0x80; // closed flag
|
||||
|
||||
use std::cell::RefCell;
|
||||
|
||||
use crate::{hwvertexbuffer::CHwVertexBuffer, OutputVertex};
|
||||
|
||||
|
||||
pub type DynArray<T> = Vec<T>;
|
||||
|
||||
pub trait DynArrayExts<T> {
|
||||
fn Reset(&mut self, shrink: bool);
|
||||
fn GetCount(&self) -> usize;
|
||||
fn SetCount(&mut self, count: usize);
|
||||
fn GetDataBuffer(&self) -> &[T];
|
||||
}
|
||||
|
||||
impl<T> DynArrayExts<T> for DynArray<T> {
|
||||
fn Reset(&mut self, shrink: bool) {
|
||||
self.clear();
|
||||
if shrink {
|
||||
self.shrink_to_fit();
|
||||
}
|
||||
}
|
||||
fn GetCount(&self) -> usize {
|
||||
self.len()
|
||||
}
|
||||
fn SetCount(&mut self, count: usize) {
|
||||
assert!(count <= self.len());
|
||||
self.truncate(count);
|
||||
}
|
||||
|
||||
fn GetDataBuffer(&self) -> &[T] {
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct CD3DDeviceLevel1 {
|
||||
pub clipRect: MilPointAndSizeL,
|
||||
pub output: RefCell<Vec<OutputVertex>>
|
||||
}
|
||||
impl CD3DDeviceLevel1 {
|
||||
pub fn new() -> Self { Default::default() }
|
||||
pub fn GetClipRect(&self, rect: &mut MilPointAndSizeL) {
|
||||
*rect = self.clipRect.clone();
|
||||
}
|
||||
pub fn GetViewport(&self) -> MilPointAndSizeL { self.clipRect.clone() }
|
||||
pub fn GetVB_XYZDUV2(&self) -> Box<CHwVertexBuffer> {
|
||||
Box::new(Default::default())
|
||||
}
|
||||
|
||||
}
|
||||
pub struct CHwPipelineBuilder;
|
||||
|
||||
pub mod CoordinateSpace {
|
||||
#[derive(Default, Clone)]
|
||||
pub struct Shape;
|
||||
#[derive(Default, Clone)]
|
||||
pub struct Device;
|
||||
}
|
||||
|
||||
pub trait IShapeData {
|
||||
fn GetFillMode(&self) -> MilFillMode;
|
||||
}
|
||||
|
||||
pub type MilVertexFormat = DWORD;
|
||||
|
||||
pub enum MilVertexFormatAttribute {
|
||||
MILVFAttrNone = 0x0,
|
||||
MILVFAttrXY = 0x1,
|
||||
MILVFAttrZ = 0x2,
|
||||
MILVFAttrXYZ = 0x3,
|
||||
MILVFAttrNormal = 0x4,
|
||||
MILVFAttrDiffuse = 0x8,
|
||||
MILVFAttrSpecular = 0x10,
|
||||
MILVFAttrUV1 = 0x100,
|
||||
MILVFAttrUV2 = 0x300,
|
||||
MILVFAttrUV3 = 0x700,
|
||||
MILVFAttrUV4 = 0xf00,
|
||||
MILVFAttrUV5 = 0x1f00,
|
||||
MILVFAttrUV6 = 0x3f00,
|
||||
MILVFAttrUV7 = 0x7f00,
|
||||
MILVFAttrUV8 = 0xff00, // Vertex fields that are pre-generated
|
||||
|
||||
}
|
||||
|
||||
pub struct CHwPipeline;
|
||||
|
||||
pub struct CBufferDispenser;
|
||||
#[derive(Default)]
|
||||
pub struct PointXYA
|
||||
{
|
||||
pub x: f32,pub y: f32, pub a: f32,
|
||||
}
|
|
@ -99,8 +99,6 @@ localization-ffi = { path = "../../../../intl/l10n/rust/localization-ffi" }
|
|||
processtools = { path = "../../../components/processtools" }
|
||||
qcms = { path = "../../../../gfx/qcms", features = ["c_bindings", "neon"], default-features = false }
|
||||
|
||||
wpf-gpu-raster = { git = "https://github.com/FirefoxGraphics/wpf-gpu-raster", rev = "11fc561cd9d9c206474efbdda78f73660254b510" }
|
||||
|
||||
# Force url to stay at 2.1.0. See bug 1734538.
|
||||
url = "=2.1.0"
|
||||
|
||||
|
|
|
@ -71,7 +71,6 @@ extern crate neqo_glue;
|
|||
extern crate wgpu_bindings;
|
||||
|
||||
extern crate qcms;
|
||||
extern crate wpf_gpu_raster;
|
||||
|
||||
extern crate unic_langid;
|
||||
extern crate unic_langid_ffi;
|
||||
|
|
Загрузка…
Ссылка в новой задаче