Backed out 2 changesets (bug 1803604) for causing talos crashes on pdfpaint.

Backed out changeset 0be171a7d6ac (bug 1803604)
Backed out changeset 9c9e6c4e7864 (bug 1803604)
This commit is contained in:
Cosmin Sabou 2022-12-03 02:59:04 +02:00
Родитель 6b64e9513d
Коммит bc234a7f64
20 изменённых файлов: 45 добавлений и 2388 удалений

Просмотреть файл

@ -107,11 +107,6 @@ git = "https://github.com/FirefoxGraphics/wpf-gpu-raster"
replace-with = "vendored-sources"
rev = "f0d95ce14af8a8de74f469dbad715c4064fca2e1"
[source."https://github.com/FirefoxGraphics/aa-stroke"]
git = "https://github.com/FirefoxGraphics/aa-stroke"
replace-with = "vendored-sources"
rev = "d5cb0fa467e66fdd2deab3211be2284ed1be5da7"
[source.crates-io]
replace-with = "vendored-sources"

9
Cargo.lock сгенерированный
Просмотреть файл

@ -2,14 +2,6 @@
# It is not intended for manual editing.
version = 3
[[package]]
name = "aa-stroke"
version = "0.1.0"
source = "git+https://github.com/FirefoxGraphics/aa-stroke?rev=d5cb0fa467e66fdd2deab3211be2284ed1be5da7#d5cb0fa467e66fdd2deab3211be2284ed1be5da7"
dependencies = [
"euclid",
]
[[package]]
name = "adler"
version = "1.0.2"
@ -2135,7 +2127,6 @@ dependencies = [
name = "gkrust-shared"
version = "0.1.0"
dependencies = [
"aa-stroke",
"app_services_logger",
"audio_thread_priority",
"audioipc-client",

Просмотреть файл

@ -9,7 +9,6 @@
#include "mozilla/ClearOnShutdown.h"
#include "mozilla/StaticPrefs_gfx.h"
#include "mozilla/gfx/AAStroke.h"
#include "mozilla/gfx/Blur.h"
#include "mozilla/gfx/DrawTargetSkia.h"
#include "mozilla/gfx/Helpers.h"
@ -2520,133 +2519,6 @@ static Maybe<WGR::VertexBuffer> GeneratePathVertexBuffer(
return Some(vb);
}
static inline AAStroke::LineJoin ToAAStrokeLineJoin(JoinStyle aJoin) {
switch (aJoin) {
case JoinStyle::BEVEL:
return AAStroke::LineJoin::Bevel;
case JoinStyle::ROUND:
return AAStroke::LineJoin::Round;
case JoinStyle::MITER:
case JoinStyle::MITER_OR_BEVEL:
return AAStroke::LineJoin::Miter;
}
return AAStroke::LineJoin::Miter;
}
static inline AAStroke::LineCap ToAAStrokeLineCap(CapStyle aCap) {
switch (aCap) {
case CapStyle::BUTT:
return AAStroke::LineCap::Butt;
case CapStyle::ROUND:
return AAStroke::LineCap::Round;
case CapStyle::SQUARE:
return AAStroke::LineCap::Square;
}
return AAStroke::LineCap::Butt;
}
static inline Point WGRPointToPoint(const WGR::Point& aPoint) {
return Point(IntPoint(aPoint.x, aPoint.y)) * (1.0f / 16.0f);
}
// Generates a vertex buffer for a stroked path using aa-stroke.
static Maybe<AAStroke::VertexBuffer> GenerateStrokeVertexBuffer(
const QuantizedPath& aPath, const StrokeOptions* aStrokeOptions,
float aScale) {
AAStroke::StrokeStyle style = {aStrokeOptions->mLineWidth * aScale,
ToAAStrokeLineCap(aStrokeOptions->mLineCap),
ToAAStrokeLineJoin(aStrokeOptions->mLineJoin),
aStrokeOptions->mMiterLimit * aScale};
if (style.width <= 0.0f || !IsFinite(style.width) ||
style.miter_limit <= 0.0f || !IsFinite(style.miter_limit)) {
return Nothing();
}
AAStroke::Stroker* s = AAStroke::aa_stroke_new(&style);
bool valid = true;
size_t curPoint = 0;
for (size_t curType = 0; valid && curType < aPath.mPath.num_types;) {
// Verify that we are at the start of a sub-path.
if ((aPath.mPath.types[curType] & WGR::PathPointTypePathTypeMask) !=
WGR::PathPointTypeStart) {
valid = false;
break;
}
// Find where the next sub-path starts so we can locate the end.
size_t endType = curType + 1;
for (; endType < aPath.mPath.num_types; endType++) {
if ((aPath.mPath.types[endType] & WGR::PathPointTypePathTypeMask) ==
WGR::PathPointTypeStart) {
break;
}
}
// Check if the path is closed. This is a flag modifying the last type.
bool closed =
(aPath.mPath.types[endType - 1] & WGR::PathPointTypeCloseSubpath) != 0;
for (; curType < endType; curType++) {
// If this is the last type and the sub-path is not closed, determine if
// this segment should be capped.
bool end = curType + 1 == endType && !closed;
switch (aPath.mPath.types[curType] & WGR::PathPointTypePathTypeMask) {
case WGR::PathPointTypeStart: {
if (curPoint + 1 > aPath.mPath.num_points) {
valid = false;
break;
}
Point p1 = WGRPointToPoint(aPath.mPath.points[curPoint]);
AAStroke::aa_stroke_move_to(s, p1.x, p1.y, closed);
if (end) {
AAStroke::aa_stroke_line_to(s, p1.x, p1.y, true);
}
curPoint++;
break;
}
case WGR::PathPointTypeLine: {
if (curPoint + 1 > aPath.mPath.num_points) {
valid = false;
break;
}
Point p1 = WGRPointToPoint(aPath.mPath.points[curPoint]);
AAStroke::aa_stroke_line_to(s, p1.x, p1.y, end);
curPoint++;
break;
}
case WGR::PathPointTypeBezier: {
if (curPoint + 3 > aPath.mPath.num_points) {
valid = false;
break;
}
Point p1 = WGRPointToPoint(aPath.mPath.points[curPoint]);
Point p2 = WGRPointToPoint(aPath.mPath.points[curPoint + 1]);
Point p3 = WGRPointToPoint(aPath.mPath.points[curPoint + 2]);
AAStroke::aa_stroke_curve_to(s, p1.x, p1.y, p2.x, p2.y, p3.x, p3.y,
end);
curPoint += 3;
break;
}
default:
MOZ_ASSERT(false, "Unknown WGR path point type");
valid = false;
break;
}
}
// Close the sub-path if necessary.
if (valid && closed) {
AAStroke::aa_stroke_close(s);
}
}
Maybe<AAStroke::VertexBuffer> result;
if (valid) {
AAStroke::VertexBuffer vb = AAStroke::aa_stroke_finish(s);
if (!vb.len) {
AAStroke::aa_stroke_vertex_buffer_release(vb);
} else {
result = Some(vb);
}
}
AAStroke::aa_stroke_release(s);
return result;
}
// Search the path cache for any entries stored in the path vertex buffer and
// remove them.
void PathCache::ClearVertexRanges() {
@ -2667,27 +2539,6 @@ inline bool DrawTargetWebgl::ShouldAccelPath(
return mWebglValid && SupportsDrawOptions(aOptions) && PrepareContext();
}
// For now, we only support stroking solid color patterns to limit artifacts
// from blending of overlapping geometry generated by AAStroke.
static inline bool SupportsAAStroke(const Pattern& aPattern,
const DrawOptions& aOptions,
const StrokeOptions& aStrokeOptions) {
if (aStrokeOptions.mDashPattern) {
return false;
}
switch (aOptions.mCompositionOp) {
case CompositionOp::OP_SOURCE:
return true;
case CompositionOp::OP_OVER:
return aPattern.GetType() == PatternType::COLOR &&
static_cast<const ColorPattern&>(aPattern).mColor.a *
aOptions.mAlpha ==
1.0f;
default:
return false;
}
}
bool DrawTargetWebgl::SharedContext::DrawPathAccel(
const Path* aPath, const Pattern& aPattern, const DrawOptions& aOptions,
const StrokeOptions* aStrokeOptions, const ShadowOptions* aShadow,
@ -2781,7 +2632,7 @@ bool DrawTargetWebgl::SharedContext::DrawPathAccel(
}
if (mPathVertexCapacity > 0 && !handle && entry && !aShadow &&
SupportsPattern(aPattern) &&
SupportsPattern(aPattern) && (!aStrokeOptions || mPathAccelStroke) &&
entry->GetPath().mPath.num_types <= mPathMaxComplexity) {
if (entry->GetVertexRange().IsValid()) {
// If there is a valid cached vertex data in the path vertex buffer, then
@ -2795,59 +2646,43 @@ bool DrawTargetWebgl::SharedContext::DrawPathAccel(
// printf_stderr("Generating... verbs %d, points %d\n",
// int(pathSkia->GetPath().countVerbs()),
// int(pathSkia->GetPath().countPoints()));
Maybe<WGR::VertexBuffer> wgrVB;
Maybe<AAStroke::VertexBuffer> strokeVB;
if (!aStrokeOptions) {
wgrVB = GeneratePathVertexBuffer(
entry->GetPath(), IntRect(-intBounds.TopLeft(), mViewportSize));
Maybe<WGR::VertexBuffer> vb;
if (aStrokeOptions) {
// If stroking, then generate a path to fill the stroked region. This
// path will need to be quantized again because it differs from the path
// used for the cache entry, but this allows us to avoid generating a
// fill path on a cache hit.
SkPaint paint;
if (StrokeOptionsToPaint(paint, *aStrokeOptions)) {
Maybe<SkRect> cullRect;
Matrix invTransform = currentTransform;
if (invTransform.Invert()) {
// Transform the stroking clip rect from device space to local space.
Rect invRect = invTransform.TransformBounds(Rect(mClipRect));
invRect.RoundOut();
cullRect = Some(RectToSkRect(invRect));
}
SkPath fillPath;
if (paint.getFillPath(pathSkia->GetPath(), &fillPath,
cullRect.ptrOr(nullptr),
ComputeResScaleForStroking(currentTransform))) {
// printf_stderr(" stroke fill... verbs %d, points %d\n",
// int(fillPath.countVerbs()),
// int(fillPath.countPoints()));
if (Maybe<QuantizedPath> qp = GenerateQuantizedPath(
fillPath, intBounds, currentTransform)) {
vb = GeneratePathVertexBuffer(
*qp, IntRect(-intBounds.TopLeft(), mViewportSize));
}
}
}
} else {
if (mPathAAStroke &&
SupportsAAStroke(aPattern, aOptions, *aStrokeOptions)) {
auto scaleFactors = currentTransform.ScaleFactors();
if (scaleFactors.AreScalesSame()) {
strokeVB = GenerateStrokeVertexBuffer(
entry->GetPath(), aStrokeOptions, scaleFactors.xScale);
}
}
if (!strokeVB && mPathWGRStroke) {
// If stroking, then generate a path to fill the stroked region. This
// path will need to be quantized again because it differs from the
// path used for the cache entry, but this allows us to avoid
// generating a fill path on a cache hit.
SkPaint paint;
if (StrokeOptionsToPaint(paint, *aStrokeOptions)) {
Maybe<SkRect> cullRect;
Matrix invTransform = currentTransform;
if (invTransform.Invert()) {
// Transform the stroking clip rect from device space to local
// space.
Rect invRect = invTransform.TransformBounds(Rect(mClipRect));
invRect.RoundOut();
cullRect = Some(RectToSkRect(invRect));
}
SkPath fillPath;
if (paint.getFillPath(pathSkia->GetPath(), &fillPath,
cullRect.ptrOr(nullptr),
ComputeResScaleForStroking(currentTransform))) {
// printf_stderr(" stroke fill... verbs %d, points %d\n",
// int(fillPath.countVerbs()),
// int(fillPath.countPoints()));
if (Maybe<QuantizedPath> qp = GenerateQuantizedPath(
fillPath, intBounds, currentTransform)) {
wgrVB = GeneratePathVertexBuffer(
*qp, IntRect(-intBounds.TopLeft(), mViewportSize));
}
}
}
}
vb = GeneratePathVertexBuffer(
entry->GetPath(), IntRect(-intBounds.TopLeft(), mViewportSize));
}
if (wgrVB || strokeVB) {
const uint8_t* vbData =
wgrVB ? (const uint8_t*)wgrVB->data : (const uint8_t*)strokeVB->data;
size_t vbLen = wgrVB ? wgrVB->len : strokeVB->len;
uint32_t vertexBytes = uint32_t(
std::min(vbLen * sizeof(WGR::OutputVertex), size_t(UINT32_MAX)));
// printf_stderr(" ... %d verts, %d bytes\n", int(vbLen),
if (vb) {
uint32_t vertexBytes = vb->len * sizeof(WGR::OutputVertex);
// printf_stderr(" ... %d verts, %d bytes\n", int(vb->len),
// int(vertexBytes));
if (vertexBytes > mPathVertexCapacity - mPathVertexOffset &&
vertexBytes <= mPathVertexCapacity - sizeof(kRectVertexData)) {
@ -2865,30 +2700,22 @@ bool DrawTargetWebgl::SharedContext::DrawPathAccel(
// available offset in the buffer.
PathVertexRange vertexRange(
uint32_t(mPathVertexOffset / sizeof(WGR::OutputVertex)),
uint32_t(vbLen));
uint32_t(vb->len));
if (entry) {
entry->SetVertexRange(vertexRange);
}
// printf_stderr(" ... offset %d\n", mPathVertexOffset);
mWebgl->RawBufferSubData(LOCAL_GL_ARRAY_BUFFER, mPathVertexOffset,
vbData, vertexBytes);
(const uint8_t*)vb->data, vertexBytes);
mPathVertexOffset += vertexBytes;
if (wgrVB) {
WGR::wgr_vertex_buffer_release(wgrVB.ref());
} else {
AAStroke::aa_stroke_vertex_buffer_release(strokeVB.ref());
}
wgr_vertex_buffer_release(vb.ref());
// Finally, draw the uploaded vertex data.
mCurrentTarget->mProfile.OnCacheMiss();
return DrawRectAccel(Rect(intBounds.TopLeft(), Size(1, 1)), aPattern,
aOptions, Nothing(), nullptr, false, true, true,
false, nullptr, &vertexRange);
}
if (wgrVB) {
WGR::wgr_vertex_buffer_release(wgrVB.ref());
} else {
AAStroke::aa_stroke_vertex_buffer_release(strokeVB.ref());
}
wgr_vertex_buffer_release(vb.ref());
// If we failed to draw the vertex data for some reason, then fall through
// to the texture rasterization path.
}
@ -3908,8 +3735,7 @@ void DrawTargetWebgl::SharedContext::CachePrefs() {
mPathMaxComplexity =
StaticPrefs::gfx_canvas_accelerated_gpu_path_complexity();
mPathAAStroke = StaticPrefs::gfx_canvas_accelerated_aa_stroke_enabled();
mPathWGRStroke = StaticPrefs::gfx_canvas_accelerated_stroke_to_fill_path();
mPathAccelStroke = StaticPrefs::gfx_canvas_accelerated_gpu_path_stroke();
}
// For use within CanvasRenderingContext2D, called on BorrowDrawTarget.

Просмотреть файл

@ -176,10 +176,8 @@ class DrawTargetWebgl : public DrawTarget, public SupportsWeakPtr {
uint32_t mPathVertexCapacity = 0;
// The maximum supported type complexity of a GPU path.
uint32_t mPathMaxComplexity = 0;
// Whether to accelerate stroked paths with AAStroke.
bool mPathAAStroke = true;
// Whether to accelerate stroked paths with WGR.
bool mPathWGRStroke = false;
// Whether to accelerate stroked paths.
bool mPathAccelStroke = false;
RefPtr<WebGLProgramJS> mSolidProgram;
RefPtr<WebGLUniformLocationJS> mSolidProgramViewport;
RefPtr<WebGLUniformLocationJS> mSolidProgramAA;

Просмотреть файл

@ -1,47 +0,0 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef MOZILLA_GFX_AA_STROKE_H
#define MOZILLA_GFX_AA_STROKE_H
#include <stddef.h>
namespace AAStroke {
enum class LineCap { Round, Square, Butt };
enum class LineJoin { Round, Miter, Bevel };
struct StrokeStyle {
float width;
LineCap cap;
LineJoin join;
float miter_limit;
};
struct Stroker;
struct OutputVertex {
float x;
float y;
float coverage;
};
struct VertexBuffer {
OutputVertex* data;
size_t len;
};
extern "C" {
Stroker* aa_stroke_new(const StrokeStyle* style);
void aa_stroke_move_to(Stroker* s, float x, float y, bool closed);
void aa_stroke_line_to(Stroker* s, float x, float y, bool end);
void aa_stroke_curve_to(Stroker* s, float c1x, float c1y, float c2x, float c2y,
float x, float y, bool end);
void aa_stroke_close(Stroker* s);
VertexBuffer aa_stroke_finish(Stroker* s);
void aa_stroke_vertex_buffer_release(VertexBuffer vb);
void aa_stroke_release(Stroker* s);
};
} // namespace AAStroke
#endif // MOZILLA_GFX_AA_STROKE_H

Просмотреть файл

@ -18,11 +18,6 @@ struct Point {
int32_t x;
int32_t y;
};
constexpr uint8_t PathPointTypeStart = 0;
constexpr uint8_t PathPointTypeLine = 1;
constexpr uint8_t PathPointTypeBezier = 3;
constexpr uint8_t PathPointTypePathTypeMask = 0x07;
constexpr uint8_t PathPointTypeCloseSubpath = 0x80;
struct Path {
FillMode fill_mode;
const Point* points;

Просмотреть файл

@ -49,7 +49,6 @@ EXPORTS.mozilla += [
]
EXPORTS.mozilla.gfx += [
"AAStroke.h",
"CompositorHitTestInfo.h",
"WPFGpuRaster.h",
]

Просмотреть файл

@ -5636,18 +5636,12 @@
value: 4000
mirror: always
# Whether to accelerate stroked paths by converting them to fill paths.
- name: gfx.canvas.accelerated.stroke-to-fill-path
# Whether to accelerate stroked paths.
- name: gfx.canvas.accelerated.gpu-path-stroke
type: RelaxedAtomicBool
value: false
mirror: always
# Whether to use aa-stroke to accelerate accelerate stroked paths.
- name: gfx.canvas.accelerated.aa-stroke.enabled
type: RelaxedAtomicBool
value: true
mirror: always
# 0x7fff is the maximum supported xlib surface size and is more than enough for canvases.
- name: gfx.canvas.max-size
type: RelaxedAtomicInt32

Просмотреть файл

@ -1,12 +1,6 @@
# cargo-vet audits file
[[audits.aa-stroke]]
who = "Lee Salzman <lsalzman@mozilla.com>"
criteria = "safe-to-deploy"
version = "0.1.0"
notes = "Written and maintained by Gfx team at Mozilla."
[[audits.android_logger]]
who = "Jan-Erik Rediger <jrediger@mozilla.com>"
criteria = "safe-to-deploy"

Просмотреть файл

@ -1 +0,0 @@
{"files":{".github/workflows/rust.yml":"e859b12cfed145b66e4fbf1571dde15880359717ca5b0a9720341229183f2c6f","Cargo.toml":"da37548ce129b1d813f6aeb45f0bdb73afb888497c608e884a6f0b6bdb1d0a4b","README.md":"60b34cfa653114d5054009696df2ed2ea1d4926a6bc312d0cac4b84845c2beff","examples/simple.rs":"c196e79568fe4be31a08374aa451c70c9377db5428aef924a985e069c12ed91e","src/bezierflattener.rs":"79240616f09e8dae75fa310d9f2a0dcab7cb2adda465a088d3ba2df66c5c08fa","src/c_bindings.rs":"6b22f6a0557040fce12f14cc1ac1ce11bf5ca41c64d3f79aba414d30f35d3caa","src/lib.rs":"7ee17a94d6c4868c28d8f1d1b37aa937e9dd2f4477e145c09c941e900717b861","src/tri_rasterize.rs":"fb6f595ab9340d8ea6429b41638c378bbd772c8e4d8f7793e225624c12cd3a21"},"package":null}

Просмотреть файл

@ -1,20 +0,0 @@
name: Rust
on:
push:
pull_request:
env:
CARGO_TERM_COLOR: always
jobs:
build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- name: Build
run: cargo build --verbose --features=c_bindings
- name: Run tests
run: cargo test --verbose --features=c_bindings

17
third_party/rust/aa-stroke/Cargo.toml поставляемый
Просмотреть файл

@ -1,17 +0,0 @@
[package]
name = "aa-stroke"
version = "0.1.0"
edition = "2021"
license = "MIT"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
euclid = "0.22.7"
[dev-dependencies]
png = "0.17.7"
[features]
default = ["c_bindings"]
c_bindings = []

12
third_party/rust/aa-stroke/README.md поставляемый
Просмотреть файл

@ -1,12 +0,0 @@
Takes a path and produces a triangle mesh that corresponds to the antialiased stroked path.
The approach here is naive and only works for opaquely filled paths. Overlaping areas can
end up with seams or otherwise incorrect coverage values.
Transforms with uniform scale can be supported by scaling the input points and the stroke width
before passing them to the stroker. Other transforms are not currently (or ever?) supported.
### TODO
- using triangle strips instead of triangle lists
- handle curves more efficiently than just flattening to lines
- handle cusps of curves more correctly

97
third_party/rust/aa-stroke/examples/simple.rs поставляемый
Просмотреть файл

@ -1,97 +0,0 @@
use aa_stroke::{StrokeStyle, LineCap, LineJoin, Point, Stroker, tri_rasterize::rasterize_to_mask};
fn write_image(data: &[u8], path: &str, width: u32, height: u32) {
use std::path::Path;
use std::fs::File;
use std::io::BufWriter;
/*let mut png_data: Vec<u8> = vec![0; (width * height * 3) as usize];
let mut i = 0;
for pixel in data {
png_data[i] = ((pixel >> 16) & 0xff) as u8;
png_data[i + 1] = ((pixel >> 8) & 0xff) as u8;
png_data[i + 2] = ((pixel >> 0) & 0xff) as u8;
i += 3;
}*/
let path = Path::new(path);
let file = File::create(path).unwrap();
let w = &mut BufWriter::new(file);
let mut encoder = png::Encoder::new(w, width, height); // Width is 2 pixels and height is 1.
encoder.set_color(png::ColorType::Grayscale);
encoder.set_depth(png::BitDepth::Eight);
let mut writer = encoder.write_header().unwrap();
writer.write_image_data(&data).unwrap(); // Save
}
// WpfGfx uses CShapeBase which has a set of Figures
// Figures have FigureFlags which include FigureFlagClosed
// so that we can know ahead of time whether the figure/subpath
// is closed
// How do we handle transformed paths? D2D seems to only support transforms that
// can be applied before stroking. (one's with uniform scale?)
fn main() {
let mut stroker = Stroker::new(&StrokeStyle{
cap: LineCap::Round,
join: LineJoin::Bevel,
width: 20.,
..Default::default()});
stroker.move_to(Point::new(20., 20.), false);
stroker.line_to(Point::new(100., 100.));
stroker.line_to_capped(Point::new(110., 20.));
stroker.move_to(Point::new(120., 20.), true);
stroker.line_to(Point::new(120., 50.));
stroker.line_to(Point::new(140., 50.));
stroker.close();
stroker.move_to(Point::new(20., 160.), true);
stroker.curve_to(Point::new(100., 160.), Point::new(100., 180.), Point::new(20., 180.));
stroker.close();
let stroked = stroker.finish();
dbg!(&stroked);
let mask = rasterize_to_mask(&stroked, 200, 200);
write_image(&mask,"out.png", 200, 200);
/*
struct Target;
impl CFlatteningSink for Target {
fn AcceptPointAndTangent(&mut self,
pt: &GpPointR,
// The point
vec: &GpPointR,
// The tangent there
fLast: bool
// Is this the last point on the curve?
) -> HRESULT {
todo!()
}
fn AcceptPoint(&mut self,
pt: &GpPointR,
// The point
t: f64,
// Parameter we're at
fAborted: &mut bool) -> HRESULT {
println!("{} {}", pt.x, pt.y);
return S_OK;
}
}
let bezier = CBezier::new([GpPointR { x: 0., y: 0. },
GpPointR { x: 0., y: 0. },
GpPointR { x: 0., y: 0. },
GpPointR { x: 100., y: 100. }]);
let mut t = Target{};
let mut f = CBezierFlattener::new(&bezier, &mut t, 0.1);
f.Flatten(false);*/
}

Просмотреть файл

@ -1,822 +0,0 @@
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
#![allow(non_snake_case)]
use std::ops::{Sub, Mul, Add, AddAssign, SubAssign, MulAssign, Div};
macro_rules! IFC {
($e: expr) => {
assert_eq!($e, S_OK);
}
}
pub type HRESULT = i32;
pub const S_OK: i32 = 0;
#[derive(Clone, Copy, Debug)]
pub struct GpPointR {
pub x: f64,
pub y: f64
}
impl Sub for GpPointR {
type Output = Self;
fn sub(self, rhs: Self) -> Self::Output {
GpPointR { x: self.x - rhs.x, y: self.y - rhs.y }
}
}
impl Add for GpPointR {
type Output = Self;
fn add(self, rhs: Self) -> Self::Output {
GpPointR { x: self.x + rhs.x, y: self.y + rhs.y }
}
}
impl AddAssign for GpPointR {
fn add_assign(&mut self, rhs: Self) {
*self = *self + rhs;
}
}
impl SubAssign for GpPointR {
fn sub_assign(&mut self, rhs: Self) {
*self = *self - rhs;
}
}
impl MulAssign<f64> for GpPointR {
fn mul_assign(&mut self, rhs: f64) {
*self = *self * rhs;
}
}
impl Mul<f64> for GpPointR {
type Output = Self;
fn mul(self, rhs: f64) -> Self::Output {
GpPointR { x: self.x * rhs, y: self.y * rhs }
}
}
impl Div<f64> for GpPointR {
type Output = Self;
fn div(self, rhs: f64) -> Self::Output {
GpPointR { x: self.x / rhs, y: self.y / rhs }
}
}
impl Mul for GpPointR {
type Output = f64;
fn mul(self, rhs: Self) -> Self::Output {
self.x * rhs.x + self.y * rhs.y
}
}
impl GpPointR {
pub fn ApproxNorm(&self) -> f64 {
self.x.abs().max(self.y.abs())
}
pub fn Norm(&self) -> f64 {
self.x.hypot(self.y)
}
}
// Relative to this is relative to the tolerance squared. In other words, a vector
// whose length is less than .01*tolerance will be considered 0
const SQ_LENGTH_FUZZ: f64 = 1.0e-4;
// Some of these constants need further thinking
//const FUZZ: f64 = 1.0e-6; // Relative 0
// Minimum allowed tolerance - should probably be adjusted to the size of the
// geometry we are rendering, but for now ---
/*
const FUZZ_DOUBLE: f64 = 1.0e-12; // Double-precision relative 0
const MIN_TOLERANCE: f64 = 1.0e-6;
const DEFAULT_FLATTENING_TOLERANCE: f64 = 0.25;*/
const TWICE_MIN_BEZIER_STEP_SIZE: f64 = 1.0e-3; // The step size in the Bezier flattener should
// never go below half this amount.
//+-----------------------------------------------------------------------------
//
//
// $TAG ENGR
// $Module: win_mil_graphics_geometry
// $Keywords:
//
// $Description:
// Definition of CBezierFlattener.
//
// $ENDTAG
//
//------------------------------------------------------------------------------
//+-----------------------------------------------------------------------------
//
// Class:
// CFlatteningSink
//
// Synopsis:
// Callback interface for the results of curve flattening
//
// Notes:
// Methods are implemented rather than pure, for callers who do not use all
// of them.
//
//------------------------------------------------------------------------------
//
// Definition of CFlatteningSink
//
//------------------------------------------------------------------------------
/*
struct CFlatteningSink
{
public:
CFlatteningSink() {}
virtual ~CFlatteningSink() {}
virtual HRESULT Begin(
__in_ecount(1) const GpPointR &)
// First point (transformed)
{
// Do nothing stub, should not be called
RIP("Base class Begin called");
return E_NOTIMPL;
}
virtual HRESULT AcceptPoint(
__in_ecount(1) const GpPointR &pt,
// The point
IN GpReal t,
// Parameter we're at
__out_ecount(1) bool &fAborted)
// Set to true to signal aborting
{
UNREFERENCED_PARAMETER(pt);
UNREFERENCED_PARAMETER(t);
UNREFERENCED_PARAMETER(fAborted);
// Do nothing stub, should not be called
RIP("Base class AcceptPoint called");
return E_NOTIMPL;
}
virtual HRESULT AcceptPointAndTangent(
__in_ecount(1) const GpPointR &,
//The point
__in_ecount(1) const GpPointR &,
//The tangent there
IN bool fLast) // Is this the last point on the curve?
{
// Do nothing stub, should not be called
RIP("Base class AcceptPointAndTangent called");
return E_NOTIMPL;
}
};
*/
#[derive(Clone)]
pub struct CBezier
{
/*
public:
CBezier()
{
}
CBezier(
__in_ecount(4) const GpPointR *pPt)
// The defining Bezier points
{
Assert(pPt);
memcpy(&m_ptB, pPt, 4 * sizeof(GpPointR));
}
CBezier(
__in_ecount(1) const CBezier &other)
// Another Bezier to copy
{
Copy(other);
}
void Copy(
__in_ecount(1) const CBezier &other)
// Another Bezier to copy
{
memcpy(&m_ptB, other.m_ptB, 4 * sizeof(GpPointR));
}
void Initialize(
__in_ecount(1) const GpPointR &ptFirst,
// The first Bezier point
__in_ecount(3) const GpPointR *pPt)
// The remaining 3 Bezier points
{
m_ptB[0] = ptFirst;
memcpy(m_ptB + 1, pPt, 3 * sizeof(GpPointR));
}
__outro_ecount(1) const GpPointR &GetControlPoint(__range(0, 3) UINT i) const
{
Assert(i < 4);
return m_ptB[i];
}
__outro_ecount(1) const GpPointR &GetFirstPoint() const
{
return m_ptB[0];
}
__outro_ecount(1) const GpPointR &GetLastPoint() const
{
return m_ptB[3];
}
void GetPoint(
_In_ double t,
// Parameter value
__out_ecount(1) GpPointR &pt) const;
// Point there
void GetPointAndDerivatives(
__in double t,
// Parameter value
__out_ecount(3) GpPointR *pValues) const;
// Point, first derivative and second derivative there
void TrimToStartAt(
IN double t); // Parameter value
void TrimToEndAt(
IN double t); // Parameter value
bool TrimBetween(
__in double rStart,
// Parameter value for the new start, must be between 0 and 1
__in double rEnd);
// Parameter value for the new end, must be between 0 and 1
bool operator ==(__in_ecount(1) const CBezier &other) const
{
return (m_ptB[0] == other.m_ptB[0]) &&
(m_ptB[1] == other.m_ptB[1]) &&
(m_ptB[2] == other.m_ptB[2]) &&
(m_ptB[3] == other.m_ptB[3]);
}
void AssertEqualOrNaN(__in_ecount(1) const CBezier &other) const
{
m_ptB[0].AssertEqualOrNaN(other.m_ptB[0]);
m_ptB[1].AssertEqualOrNaN(other.m_ptB[1]);
m_ptB[2].AssertEqualOrNaN(other.m_ptB[2]);
m_ptB[3].AssertEqualOrNaN(other.m_ptB[3]);
}
protected:
*/
// Data
m_ptB: [GpPointR; 4],
// The defining Bezier points
}
impl CBezier {
pub fn new(curve: [GpPointR; 4]) -> Self {
Self { m_ptB: curve }
}
}
pub trait CFlatteningSink {
fn AcceptPointAndTangent(&mut self,
pt: &GpPointR,
// The point
vec: &GpPointR,
// The tangent there
fLast: bool
// Is this the last point on the curve?
) -> HRESULT;
fn AcceptPoint(&mut self,
pt: &GpPointR,
// The point
t: f64,
// Parameter we're at
fAborted: &mut bool,
lastPoint: bool
) -> HRESULT;
}
//+-----------------------------------------------------------------------------
//
// Class:
// CBezierFlattener
//
// Synopsis:
// Generates a polygonal apprximation to a given Bezier curve
//
//------------------------------------------------------------------------------
pub struct CBezierFlattener<'a>
{
bezier: CBezier,
// Flattening defining data
m_pSink: &'a mut dyn CFlatteningSink, // The recipient of the flattening data
m_rTolerance: f64, // Prescribed tolerance
m_fWithTangents: bool, // Generate tangent vectors if true
m_rQuarterTolerance: f64,// Prescribed tolerance/4 (for doubling the step)
m_rFuzz: f64, // Computational zero
// Flattening working data
m_ptE: [GpPointR; 4], // The moving basis of the curve definition
m_cSteps: i32, // The number of steps left to the end of the curve
m_rParameter: f64, // Parameter value
m_rStepSize: f64, // Steps size in parameter domain
}
impl<'a> CBezierFlattener<'a> {
/*fn new(
__in_ecount_opt(1) CFlatteningSink *pSink,
// The reciptient of the flattened data
IN GpReal rTolerance)
// Flattening tolerance
{
Initialize(pSink, rTolerance);
}*/
/*
void SetTarget(__in_ecount_opt(1) CFlatteningSink *pSink)
{
m_pSink = pSink;
}
void Initialize(
__in_ecount_opt(1) CFlatteningSink *pSink,
// The reciptient of the flattened data
IN GpReal rTolerance);
// Flattening tolerance
void SetPoint(
__in UINT i,
// index of the point (must be between 0 and 3)
__in_ecount(1) const GpPointR &pt)
// point value
{
Assert(i < 4);
m_ptB[i] = pt;
}
HRESULT GetFirstTangent(
__out_ecount(1) GpPointR &vecTangent) const;
// Tangent vector there
GpPointR GetLastTangent() const;
HRESULT Flatten(
IN bool fWithTangents); // Return tangents with the points if true
private:
// Disallow copy constructor
CBezierFlattener(__in_ecount(1) const CBezierFlattener &)
{
RIP("CBezierFlattener copy constructor reached.");
}
protected:
*/
/* fn Step(
__out_ecount(1) bool &fAbort); // Set to true if flattening should be aborted
fn HalveTheStep();
fn TryDoubleTheStep();*/
}
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
//+-----------------------------------------------------------------------------
//
//
// $TAG ENGR
// $Module: win_mil_graphics_geometry
// $Keywords:
//
// $Description:
// Implementation of CBezierFlattener.
//
// $ENDTAG
//
//------------------------------------------------------------------------------
impl<'a> CBezierFlattener<'a> {
/////////////////////////////////////////////////////////////////////////////////
//
// Implementation of CBezierFlattener
//+-----------------------------------------------------------------------------
//
// Member:
// CBezierFlattener::Initialize
//
// Synopsis:
// Initialize the sink and tolerance
//
//------------------------------------------------------------------------------
pub fn new(bezier: &CBezier,
pSink: &'a mut dyn CFlatteningSink,
// The reciptient of the flattened data
rTolerance: f64) // Flattening tolerance
-> Self
{
let mut result = CBezierFlattener {
bezier: bezier.clone(),
// Flattening defining data
m_pSink: pSink, // The recipient of the flattening data
m_rTolerance: 0., // Prescribed tolerance
m_fWithTangents: false, // Generate tangent vectors if true
m_rQuarterTolerance: 0.,// Prescribed tolerance/4 (for doubling the step)
m_rFuzz: 0., // Computational zero
// Flattening working data
m_ptE: [GpPointR { x: 0., y: 0.}; 4], // The moving basis of the curve definition
m_cSteps: 0, // The number of steps left to the end of the curve
m_rParameter: 0., // Parameter value
m_rStepSize: 0., // Steps size in parameter domain
};
// If rTolerance == NaN or less than 0, we'll treat it as 0.
result.m_rTolerance = if rTolerance >= 0.0 { rTolerance } else { 0.0 };
result.m_rFuzz = rTolerance * rTolerance * SQ_LENGTH_FUZZ;
// The error is tested on max(|e2|, |e2|), which represent 6 times the actual error, so:
result.m_rTolerance *= 6.;
result.m_rQuarterTolerance = result.m_rTolerance * 0.25;
result
}
//+-----------------------------------------------------------------------------
//
// Member:
// CBezierFlattener::Flatten
//
// Synopsis:
// Flatten this curve
//
// Notes:
// The algorithm is described in detail in the 1995 patent # 5367617 "System and
// method of hybrid forward differencing to render Bezier splines" to be found
// on the Microsoft legal dept. web site (LCAWEB). Additional references are:
// Lien, Shantz and Vaughan Pratt, "Adaptive Forward Differencing for
// Rendering Curves and Surfaces", Computer Graphics, July 1987
// Chang and Shantz, "Rendering Trimmed NURBS with Adaptive Forward
// Differencing", Computer Graphics, August 1988
// Foley and Van Dam, "Fundamentals of Interactive Computer Graphics"
//
// The basic idea is to replace the Bernstein basis (underlying Bezier curves)
// with the Hybrid Forward Differencing (HFD) basis which is more efficient at
// for flattening. Each one of the 3 actions - Step, Halve and Double (step
// size) this basis affords very efficient formulas for computing coefficients
// for the new interval.
//
// The coefficients of the HFD basis are defined in terms of the Bezier
// coefficients as follows:
//
// e0 = p0, e1 = p3 - p0, e2 = 6(p1 - 2p2 + p3), e3 = 6(p0 - 2p1 + p2),
//
// but formulas may be easier to understand by going through the power basis
// representation: f(t) = a*t + b*t + c * t^2 + d * t^3.
//
// The conversion is then:
// e0 = a
// e1 = f(1) - f(0) = b + c + d
// e2 = f"(1) = 2c + 6d
// e3 = f"(0) = 2c
//
// This is inverted to:
// a = e0
// c = e3 / 2
// d = (e2 - 2c) / 6 = (e2 - e3) / 6
// b = e1 - c - d = e1 - e2 / 6 - e3 / 3
//
// a, b, c, d for the new (halved, doubled or forwarded) interval are derived
// and then converted to e0, e1, e2, e3 using these relationships.
//
// An exact integer version is implemented in Bezier.h and Bezier.cpp.
//
//------------------------------------------------------------------------------
pub fn Flatten(&mut self,
fWithTangents: bool) // Return tangents with the points if true
-> HRESULT
{
let hr = S_OK;
let mut fAbort = false;
/*if (!self.m_pSink)
{
return E_UNEXPECTED;
}*/
self.m_fWithTangents = fWithTangents;
self.m_cSteps = 1;
self.m_rParameter = 0.;
self.m_rStepSize = 1.;
// Compute the HFD basis
self.m_ptE[0] = self.bezier.m_ptB[0];
self.m_ptE[1] = self.bezier.m_ptB[3] - self.bezier.m_ptB[0];
self.m_ptE[2] = (self.bezier.m_ptB[1] - self.bezier.m_ptB[2] * 2. + self.bezier.m_ptB[3]) * 6.; // The second derivative at curve end
self.m_ptE[3] = (self.bezier.m_ptB[0] - self.bezier.m_ptB[1] * 2. + self.bezier.m_ptB[2]) * 6.; // The second derivative at curve start
// Determine the initial step size
self.m_cSteps = 1;
while ((self.m_ptE[2].ApproxNorm() > self.m_rTolerance) || (self.m_ptE[3].ApproxNorm() > self.m_rTolerance)) &&
(self.m_rStepSize > TWICE_MIN_BEZIER_STEP_SIZE)
{
self.HalveTheStep();
}
while self.m_cSteps > 1
{
IFC!(self.Step(&mut fAbort));
if fAbort {
return hr;
}
// E[3] was already tested as E[2] in the previous step
if self.m_ptE[2].ApproxNorm() > self.m_rTolerance &&
self.m_rStepSize > TWICE_MIN_BEZIER_STEP_SIZE
{
// Halving the step once is provably sufficient (see Notes above), so ---
self.HalveTheStep();
}
else
{
// --- but the step can possibly be more than doubled, hence the while loop
while self.TryDoubleTheStep() {
continue;
}
}
}
// Last point
if self.m_fWithTangents
{
IFC!(self.m_pSink.AcceptPointAndTangent(&self.bezier.m_ptB[3], &self.GetLastTangent(), true /* last point */));
}
else
{
IFC!(self.m_pSink.AcceptPoint(&self.bezier.m_ptB[3], 1., &mut fAbort, true));
}
return hr;
}
//+-----------------------------------------------------------------------------
//
// Member:
// CBezierFlattener::Step
//
// Synopsis:
// Step forward on the polygonal approximation of the curve
//
// Notes:
// Taking a step means replacing a,b,c,d by coefficients of g(t) = f(t+1).
// Express those in terms of a,b,c,d and convert to e0, e1, e2, e3 to get:
//
// New e0 = e0 + e1
// New e1 = e1 + e2
// New e2 = 2e2 - e3
// New e3 = e2
//
// The patent application (see above) explains why.
//
// Getting a tangent vector is a minor enhancement along the same lines:
// f'(0) = b = 6e1 - e2 - 2e3.
//
//------------------------------------------------------------------------------
fn Step(&mut self,
fAbort: &mut bool) -> HRESULT // Set to true if flattening should be aborted, untouched otherwise
{
let hr = S_OK;
// Compute the basis for the same curve on the next interval
let mut pt;
self.m_ptE[0] += self.m_ptE[1];
pt = self.m_ptE[2];
self.m_ptE[1] += pt;
self.m_ptE[2] += pt; self.m_ptE[2] -= self.m_ptE[3];
self.m_ptE[3] = pt;
// Increment the parameter
self.m_rParameter += self.m_rStepSize;
// Generate the start point of the new interval
if self.m_fWithTangents
{
// Compute the tangent there
pt = self.m_ptE[1] * 6. - self.m_ptE[2] - self.m_ptE[3] * 2.; // = twice the derivative at E[0]
IFC!(self.m_pSink.AcceptPointAndTangent(&self.m_ptE[0], &pt, false /* not the last point */));
}
else
{
IFC!(self.m_pSink.AcceptPoint(&self.m_ptE[0], self.m_rParameter, fAbort, false));
}
self.m_cSteps-=1;
return hr;
}
//+-----------------------------------------------------------------------------
//
// Member:
// CBezierFlattener::HalveTheStep
//
// Synopsis:
// Halve the size of the step
//
// Notes:
// Halving the step means replacing a,b,c,d by coefficients of g(t) =
// f(t/2). Experss those in terms of a,b,c,d and convert to e0, e1, e2, e3
// to get:
//
// New e0 = e0
// New e1 = (e1 - e2) / 2
// New e2 = (e2 + e3) / 8
// New e3 = e3 / 4
//
// The patent application (see above) explains why.
//
//------------------------------------------------------------------------------
fn HalveTheStep(&mut self)
{
self.m_ptE[2] += self.m_ptE[3]; self.m_ptE[2] *= 0.125;
self.m_ptE[1] -= self.m_ptE[2]; self.m_ptE[1] *= 0.5;
self.m_ptE[3] *= 0.25;
self.m_cSteps *= 2; // Double the number of steps left
self.m_rStepSize *= 0.5;
}
//+-----------------------------------------------------------------------------
//
// Member:
// CBezierFlattener::TryDoubleTheStep
//
// Synopsis:
// Double the step size if possible within tolerance.
//
// Notes:
// Coubling the step means replacing a,b,c,d by coefficients of g(t) =
// f(2t). Experss those in terms of a,b,c,d and convert to e0, e1, e2, e3
// to get:
//
// New e0 = e0
// New e1 = 2e1 + e2
// New e2 = 8e2 - 4e3
// New e3 = 4e3
//
// The patent application (see above) explains why. Note also that these
// formulas are the inverse of those for halving the step.
//
//------------------------------------------------------------------------------
fn
TryDoubleTheStep(&mut self) -> bool
{
let mut fDoubled = 0 == (self.m_cSteps & 1);
if fDoubled
{
let ptTemp = self.m_ptE[2] * 2. - self.m_ptE[3];
fDoubled = (self.m_ptE[3].ApproxNorm() <= self.m_rQuarterTolerance) &&
(ptTemp.ApproxNorm() <= self.m_rQuarterTolerance);
if fDoubled
{
self.m_ptE[1] *= 2.; self.m_ptE[1] += self.m_ptE[2];
self.m_ptE[3] *= 4.;
self.m_ptE[2] = ptTemp * 4.;
self.m_cSteps /= 2; // Halve the number of steps left
self.m_rStepSize *= 2.;
}
}
return fDoubled;
}
//+-----------------------------------------------------------------------------
//
// Member:
// CBezierFlattener::GetFirstTangent
//
// Synopsis:
// Get the tangent at curve start
//
// Return:
// WGXERR_ZEROVECTOR if the tangent vector has practically 0 length
//
// Notes:
// This method can return an error if all the points are bunched together.
// The idea is that the caller will detect that, abandon this curve, and
// never call GetLasttangent, which can therefore be presumed to succeed.
// The failure here is benign.
//
//------------------------------------------------------------------------------
#[allow(dead_code)]
fn GetFirstTangent(&self) -> Option<GpPointR> // Tangent vector there
{
let mut vecTangent = self.bezier.m_ptB[1] - self.bezier.m_ptB[0];
if vecTangent * vecTangent > self.m_rFuzz
{
return Some(vecTangent); // - we're done
}
// Zero first derivative, go for the second
vecTangent = self.bezier.m_ptB[2] - self.bezier.m_ptB[0];
if vecTangent * vecTangent > self.m_rFuzz
{
return Some(vecTangent); // - we're done
}
// Zero second derivative, go for the third
vecTangent = self.bezier.m_ptB[3] - self.bezier.m_ptB[0];
if vecTangent * vecTangent <= self.m_rFuzz
{
return None;
}
return Some(vecTangent); // no RRETURN, error is expected
}
//+-----------------------------------------------------------------------------
//
// Member:
// CBezierFlattener::GetLastTangent
//
// Synopsis:
// Get the tangent at curve end
//
// Return:
// The tangent
//
// Notes:
// This method has no error return while GetFirstTangent returns
// WGXERR_ZEROVECTOR if the tangent is zero. The idea is that we should
// only fail if all the control points coincide, that should have been
// detected at GetFirstTangent, and then we should have not be called.
//
//------------------------------------------------------------------------------
fn GetLastTangent(&self) -> GpPointR
{
let mut vecTangent = self.bezier.m_ptB[3] - self.bezier.m_ptB[2];
// If the curve is degenerate, we should have detected it at curve-start, skipped this curve
// altogether and not be here. But the test in GetFirstTangent is for the point-differences
// 1-0, 2-0 and 3-0, while here it is for points 3-2, 3-1 and 3-0, which is not quite the same.
// Still, In a disk of radius r no 2 points are more than 2r apart. The tests are done with
// squared distance, and m_rFuzz is the minimal accepted squared distance. GetFirstTangent()
// succeeded, so there is a pair of points whose squared distance is greater than m_rfuzz.
// So the squared radius of a disk about point 3 that contains the remaining points must be
// at least m_rFuzz/4. Allowing some margin for arithmetic error:
let rLastTangentFuzz = self.m_rFuzz/8.;
if vecTangent * vecTangent <= rLastTangentFuzz
{
// Zero first derivative, go for the second
vecTangent = self.bezier.m_ptB[3] - self.bezier.m_ptB[1];
if vecTangent * vecTangent <= rLastTangentFuzz
{
// Zero second derivative, go for the third
vecTangent = self.bezier.m_ptB[3] - self.bezier.m_ptB[0];
}
}
debug_assert! (!(vecTangent * vecTangent < rLastTangentFuzz)); // Ignore NaNs
return vecTangent;
}
}

81
third_party/rust/aa-stroke/src/c_bindings.rs поставляемый
Просмотреть файл

@ -1,81 +0,0 @@
use crate::{Stroker, StrokeStyle, Point};
type OutputVertex = crate::Vertex;
#[repr(C)]
pub struct VertexBuffer {
data: *const OutputVertex,
len: usize
}
#[no_mangle]
pub extern "C" fn aa_stroke_new(style: &StrokeStyle) -> *mut Stroker {
let s = Stroker::new(style);
Box::into_raw(Box::new(s))
}
#[no_mangle]
pub extern "C" fn aa_stroke_move_to(s: &mut Stroker, x: f32, y: f32, closed: bool) {
s.move_to(Point::new(x, y), closed);
}
#[no_mangle]
pub extern "C" fn aa_stroke_line_to(s: &mut Stroker, x: f32, y: f32, end: bool) {
if end {
s.line_to_capped(Point::new(x, y))
} else {
s.line_to(Point::new(x, y));
}
}
#[no_mangle]
pub extern "C" fn aa_stroke_curve_to(s: &mut Stroker, c1x: f32, c1y: f32, c2x: f32, c2y: f32, x: f32, y: f32, end: bool) {
if end {
s.curve_to_capped(Point::new(c1x, c1y), Point::new(c2x, c2y), Point::new(x, y));
} else {
s.curve_to(Point::new(c1x, c1y), Point::new(c2x, c2y), Point::new(x, y));
}
}
/*
#[no_mangle]
pub extern "C" fn aa_stroke_quad_to(s: &mut Stroker, cx: f32, cy: f32, x: f32, y: f32) {
s.quad_to(cx, cy, x, y);
}*/
#[no_mangle]
pub extern "C" fn aa_stroke_close(s: &mut Stroker) {
s.close();
}
#[no_mangle]
pub extern "C" fn aa_stroke_finish(s: &mut Stroker) -> VertexBuffer {
let result = s.finish();
let vb = VertexBuffer { data: result.as_ptr(), len: result.len() };
std::mem::forget(result);
vb
}
#[no_mangle]
pub extern "C" fn aa_stroke_vertex_buffer_release(vb: VertexBuffer)
{
unsafe { drop(Box::from_raw(std::slice::from_raw_parts_mut(vb.data as *mut OutputVertex, vb.len))) }
}
#[no_mangle]
pub unsafe extern "C" fn aa_stroke_release(s: *mut Stroker) {
drop(Box::from_raw(s));
}
#[test]
fn simple() {
let style = StrokeStyle::default();
let s = unsafe { &mut *aa_stroke_new(&style) } ;
aa_stroke_move_to(s, 10., 10., false);
aa_stroke_line_to(s, 100., 100., false);
aa_stroke_line_to(s, 100., 10., true);
let vb = aa_stroke_finish(s);
aa_stroke_vertex_buffer_release(vb);
unsafe { aa_stroke_release(s) } ;
}

846
third_party/rust/aa-stroke/src/lib.rs поставляемый
Просмотреть файл

@ -1,846 +0,0 @@
use std::default::Default;
use bezierflattener::CBezierFlattener;
use crate::{bezierflattener::{CFlatteningSink, GpPointR, HRESULT, S_OK, CBezier}};
mod bezierflattener;
pub mod tri_rasterize;
#[cfg(feature = "c_bindings")]
pub mod c_bindings;
#[derive(Clone, Copy, PartialEq, Debug)]
pub enum Winding {
EvenOdd,
NonZero,
}
#[derive(Clone, Copy, Debug)]
pub enum PathOp {
MoveTo(Point),
LineTo(Point),
QuadTo(Point, Point),
CubicTo(Point, Point, Point),
Close,
}
/// Represents a complete path usable for filling or stroking.
#[derive(Clone, Debug)]
pub struct Path {
pub ops: Vec<PathOp>,
pub winding: Winding,
}
pub type Point = euclid::default::Point2D<f32>;
pub type Transform = euclid::default::Transform2D<f32>;
pub type Vector = euclid::default::Vector2D<f32>;
#[derive(Clone, Copy, PartialEq, Debug)]
#[repr(C)]
pub enum LineCap {
Round,
Square,
Butt,
}
#[derive(Clone, Copy, PartialEq, Debug)]
#[repr(C)]
pub enum LineJoin {
Round,
Miter,
Bevel,
}
#[derive(Clone, PartialEq, Debug)]
#[repr(C)]
pub struct StrokeStyle {
pub width: f32,
pub cap: LineCap,
pub join: LineJoin,
pub miter_limit: f32,
}
impl Default for StrokeStyle {
fn default() -> Self {
StrokeStyle {
width: 1.,
cap: LineCap::Butt,
join: LineJoin::Miter,
miter_limit: 10.,
}
}
}
#[derive(Debug)]
pub struct Vertex {
x: f32,
y: f32,
coverage: f32
}
/// A helper struct used for constructing a `Path`.
pub struct PathBuilder {
vertices: Vec<Vertex>,
coverage: f32,
aa: bool
}
impl PathBuilder {
pub fn new(coverage: f32) -> PathBuilder {
PathBuilder {
vertices: Vec::new(),
coverage,
aa: true
}
}
pub fn push_tri(&mut self, x1: f32, y1: f32, x2: f32, y2: f32, x3: f32, y3: f32) {
self.vertices.push(Vertex { x: x1, y: y1, coverage: self.coverage});
self.vertices.push(Vertex { x: x2, y: y2, coverage: self.coverage});
self.vertices.push(Vertex { x: x3, y: y3, coverage: self.coverage});
}
// x3, y3 is the full coverage vert
pub fn tri_ramp(&mut self, x1: f32, y1: f32, x2: f32, y2: f32, x3: f32, y3: f32) {
self.vertices.push(Vertex { x: x1, y: y1, coverage: 0.});
self.vertices.push(Vertex { x: x2, y: y2, coverage: 0.});
self.vertices.push(Vertex { x: x3, y: y3, coverage: self.coverage});
}
pub fn quad(&mut self, x1: f32, y1: f32, x2: f32, y2: f32, x3: f32, y3: f32, x4: f32, y4: f32) {
self.push_tri(x1, y1, x2, y2, x3, y3);
self.push_tri(x3, y3, x4, y4, x1, y1);
}
pub fn ramp(&mut self, x1: f32, y1: f32, x2: f32, y2: f32, x3: f32, y3: f32, x4: f32, y4: f32) {
self.vertices.push(Vertex { x: x1, y: y1, coverage: self.coverage});
self.vertices.push(Vertex { x: x2, y: y2, coverage: 0.});
self.vertices.push(Vertex { x: x3, y: y3, coverage: 0.});
self.vertices.push(Vertex { x: x3, y: y3, coverage: 0.});
self.vertices.push(Vertex { x: x4, y: y4, coverage: self.coverage});
self.vertices.push(Vertex { x: x1, y: y1, coverage: self.coverage});
}
// first edge is outside
pub fn tri(&mut self, x1: f32, y1: f32, x2: f32, y2: f32, x3: f32, y3: f32) {
self.push_tri(x1, y1, x2, y2, x3, y3);
}
pub fn arc_wedge(&mut self, c: Point, radius: f32, a: Vector, b: Vector) {
arc(self, c.x, c.y, radius, a, b);
}
/// Completes the current path
pub fn finish(self) -> Vec<Vertex> {
self.vertices
}
}
fn compute_normal(p0: Point, p1: Point) -> Option<Vector> {
let ux = p1.x - p0.x;
let uy = p1.y - p0.y;
// this could overflow f32. Skia checks for this and
// uses a double in that situation
let ulen = ux.hypot(uy);
if ulen == 0. {
return None;
}
// the normal is perpendicular to the *unit* vector
Some(Vector::new(-uy / ulen, ux / ulen))
}
fn flip(v: Vector) -> Vector {
Vector::new(-v.x, -v.y)
}
/* Compute a spline approximation of the arc
centered at xc, yc from the angle a to the angle b
The angle between a and b should not be more than a
quarter circle (pi/2)
The approximation is similar to an approximation given in:
"Approximation of a cubic bezier curve by circular arcs and vice versa"
by Alekas Riškus. However that approximation becomes unstable when the
angle of the arc approaches 0.
This approximation is inspired by a discusion with Boris Zbarsky
and essentially just computes:
h = 4.0/3.0 * tan ((angle_B - angle_A) / 4.0);
without converting to polar coordinates.
A different way to do this is covered in "Approximation of a cubic bezier
curve by circular arcs and vice versa" by Alekas Riškus. However, the method
presented there doesn't handle arcs with angles close to 0 because it
divides by the perp dot product of the two angle vectors.
*/
fn arc_segment_tri(path: &mut PathBuilder, xc: f32, yc: f32, radius: f32, a: Vector, b: Vector) {
let r_sin_a = radius * a.y;
let r_cos_a = radius * a.x;
let r_sin_b = radius * b.y;
let r_cos_b = radius * b.x;
/* bisect the angle between 'a' and 'b' with 'mid' */
let mut mid = a + b;
mid /= mid.length();
/* bisect the angle between 'a' and 'mid' with 'mid2' this is parallel to a
* line with angle (B - A)/4 */
let mid2 = a + mid;
let h = (4. / 3.) * dot(perp(a), mid2) / dot(a, mid2);
let last_point = GpPointR { x: (xc + r_cos_a) as f64, y: (yc + r_sin_a) as f64 };
let initial_normal = GpPointR { x: a.x as f64, y: a.y as f64 };
struct Target<'a> { last_point: GpPointR, last_normal: GpPointR, xc: f32, yc: f32, path: &'a mut PathBuilder }
impl<'a> CFlatteningSink for Target<'a> {
fn AcceptPointAndTangent(&mut self,
pt: &GpPointR,
// The point
vec: &GpPointR,
// The tangent there
_last: bool
// Is this the last point on the curve?
) -> HRESULT {
if self.path.aa {
let len = vec.Norm();
let normal = *vec/len;
let normal = GpPointR { x: -normal.y, y: normal.x };
// FIXME: we probably need more width here because
// the normals are not perpendicular with the edge
let width = 0.5;
self.path.ramp(
(pt.x - normal.x * width) as f32,
(pt.y - normal.y * width) as f32,
(pt.x + normal.x * width) as f32,
(pt.y + normal.y * width) as f32,
(self.last_point.x + self.last_normal.x * width) as f32,
(self.last_point.y + self.last_normal.y * width) as f32,
(self.last_point.x - self.last_normal.x * width) as f32,
(self.last_point.y - self.last_normal.y * width) as f32, );
self.path.push_tri(
(self.last_point.x - self.last_normal.x * 0.5) as f32,
(self.last_point.y - self.last_normal.y * 0.5) as f32,
(pt.x - normal.x * 0.5) as f32,
(pt.y - normal.y * 0.5) as f32,
self.xc, self.yc);
self.last_normal = normal;
} else {
self.path.push_tri(self.last_point.x as f32, self.last_point.y as f32, pt.x as f32, pt.y as f32, self.xc, self.yc);
}
self.last_point = pt.clone();
return S_OK;
}
fn AcceptPoint(&mut self,
pt: &GpPointR,
// The point
_t: f64,
// Parameter we're at
_aborted: &mut bool,
_last_point: bool) -> HRESULT {
self.path.push_tri(self.last_point.x as f32, self.last_point.y as f32, pt.x as f32, pt.y as f32, self.xc, self.yc);
self.last_point = pt.clone();
return S_OK;
}
}
let bezier = CBezier::new([GpPointR { x: (xc + r_cos_a) as f64, y: (yc + r_sin_a) as f64, },
GpPointR { x: (xc + r_cos_a - h * r_sin_a) as f64, y: (yc + r_sin_a + h * r_cos_a) as f64, },
GpPointR { x: (xc + r_cos_b + h * r_sin_b) as f64, y: (yc + r_sin_b - h * r_cos_b) as f64, },
GpPointR { x: (xc + r_cos_b) as f64, y: (yc + r_sin_b) as f64, }]);
let mut t = Target{ last_point, last_normal: initial_normal, xc, yc, path };
let mut f = CBezierFlattener::new(&bezier, &mut t, 0.25);
f.Flatten(true);
}
/* The angle between the vectors must be <= pi */
fn bisect(a: Vector, b: Vector) -> Vector {
let mut mid;
if dot(a, b) >= 0. {
/* if the angle between a and b is accute, then we can
* just add the vectors and normalize */
mid = a + b;
} else {
/* otherwise, we can flip a, add it
* and then use the perpendicular of the result */
mid = flip(a) + b;
mid = perp(mid);
}
/* normalize */
/* because we assume that 'a' and 'b' are normalized, we can use
* sqrt instead of hypot because the range of mid is limited */
let mid_len = mid.x * mid.x + mid.y * mid.y;
let len = mid_len.sqrt();
return mid / len;
}
fn arc(path: &mut PathBuilder, xc: f32, yc: f32, radius: f32, a: Vector, b: Vector) {
/* find a vector that bisects the angle between a and b */
let mid_v = bisect(a, b);
/* construct the arc using two curve segments */
arc_segment_tri(path, xc, yc, radius, a, mid_v);
arc_segment_tri(path, xc, yc, radius, mid_v, b);
}
/*
fn join_round(path: &mut PathBuilder, center: Point, a: Vector, b: Vector, radius: f32) {
/*
int ccw = dot (perp (b), a) >= 0; // XXX: is this always true?
yes, otherwise we have an interior angle.
assert (ccw);
*/
arc(path, center.x, center.y, radius, a, b);
}*/
fn cap_line(dest: &mut PathBuilder, style: &StrokeStyle, pt: Point, normal: Vector) {
let offset = style.width / 2.;
match style.cap {
LineCap::Butt => {
if dest.aa {
let half_width = offset;
let end = pt;
let v = Vector::new(normal.y, -normal.x);
// end
dest.ramp(
end.x - normal.x * (half_width - 0.5),
end.y - normal.y * (half_width - 0.5),
end.x + v.x - normal.x * (half_width - 0.5),
end.y + v.y - normal.y * (half_width - 0.5),
end.x + v.x + normal.x * (half_width - 0.5),
end.y + v.y + normal.y * (half_width - 0.5),
end.x + normal.x * (half_width - 0.5),
end.y + normal.y * (half_width - 0.5),
);
dest.tri_ramp(
end.x + v.x - normal.x * (half_width - 0.5),
end.y + v.y - normal.y * (half_width - 0.5),
end.x - normal.x * (half_width + 0.5),
end.y - normal.y * (half_width + 0.5),
end.x - normal.x * (half_width - 0.5),
end.y - normal.y * (half_width - 0.5));
dest.tri_ramp(
end.x + v.x + normal.x * (half_width - 0.5),
end.y + v.y + normal.y * (half_width - 0.5),
end.x + normal.x * (half_width + 0.5),
end.y + normal.y * (half_width + 0.5),
end.x + normal.x * (half_width - 0.5),
end.y + normal.y * (half_width - 0.5));
}
}
LineCap::Round => {
dest.arc_wedge(pt, offset, normal, flip(normal));
}
LineCap::Square => {
// parallel vector
let v = Vector::new(normal.y, -normal.x);
let end = pt + v * offset;
if dest.aa {
let half_width = offset;
let offset = offset - 0.5;
dest.ramp(
end.x + normal.x * (half_width - 0.5),
end.y + normal.y * (half_width - 0.5),
end.x + normal.x * (half_width + 0.5),
end.y + normal.y * (half_width + 0.5),
pt.x + normal.x * (half_width + 0.5),
pt.y + normal.y * (half_width + 0.5),
pt.x + normal.x * (half_width - 0.5),
pt.y + normal.y * (half_width - 0.5),
);
dest.quad(pt.x + normal.x * offset, pt.y + normal.y * offset,
end.x + normal.x * offset, end.y + normal.y * offset,
end.x + -normal.x * offset, end.y + -normal.y * offset,
pt.x - normal.x * offset, pt.y - normal.y * offset);
dest.ramp(
pt.x - normal.x * (half_width - 0.5),
pt.y - normal.y * (half_width - 0.5),
pt.x - normal.x * (half_width + 0.5),
pt.y - normal.y * (half_width + 0.5),
end.x - normal.x * (half_width + 0.5),
end.y - normal.y * (half_width + 0.5),
end.x - normal.x * (half_width - 0.5),
end.y - normal.y * (half_width - 0.5));
// end
dest.ramp(
end.x - normal.x * (half_width - 0.5),
end.y - normal.y * (half_width - 0.5),
end.x + v.x - normal.x * (half_width - 0.5),
end.y + v.y - normal.y * (half_width - 0.5),
end.x + v.x + normal.x * (half_width - 0.5),
end.y + v.y + normal.y * (half_width - 0.5),
end.x + normal.x * (half_width - 0.5),
end.y + normal.y * (half_width - 0.5),
);
dest.tri_ramp(
end.x + v.x - normal.x * (half_width - 0.5),
end.y + v.y - normal.y * (half_width - 0.5),
end.x - normal.x * (half_width + 0.5),
end.y - normal.y * (half_width + 0.5),
end.x - normal.x * (half_width - 0.5),
end.y - normal.y * (half_width - 0.5));
dest.tri_ramp(
end.x + v.x + normal.x * (half_width - 0.5),
end.y + v.y + normal.y * (half_width - 0.5),
end.x + normal.x * (half_width + 0.5),
end.y + normal.y * (half_width + 0.5),
end.x + normal.x * (half_width - 0.5),
end.y + normal.y * (half_width - 0.5));
} else {
dest.quad(pt.x + normal.x * offset, pt.y + normal.y * offset,
end.x + normal.x * offset, end.y + normal.y * offset,
end.x + -normal.x * offset, end.y + -normal.y * offset,
pt.x - normal.x * offset, pt.y - normal.y * offset);
}
}
}
}
fn bevel(
dest: &mut PathBuilder,
style: &StrokeStyle,
pt: Point,
s1_normal: Vector,
s2_normal: Vector,
) {
let offset = style.width / 2.;
if dest.aa {
let width = 1.;
let offset = offset - width / 2.;
//XXX: we should be able to just bisect the two norms to get this
let diff = (s2_normal - s1_normal).normalize();
let edge_normal = perp(diff);
dest.tri(pt.x + s1_normal.x * offset, pt.y + s1_normal.y * offset,
pt.x + s2_normal.x * offset, pt.y + s2_normal.y * offset,
pt.x, pt.y);
dest.tri_ramp(pt.x + s1_normal.x * (offset + width), pt.y + s1_normal.y * (offset + width),
pt.x + s1_normal.x * offset + edge_normal.x, pt.y + s1_normal.y * offset + edge_normal.y,
pt.x + s1_normal.x * offset, pt.y + s1_normal.y * offset);
dest.ramp(
pt.x + s2_normal.x * offset, pt.y + s2_normal.y * offset,
pt.x + s2_normal.x * offset + edge_normal.x, pt.y + s2_normal.y * offset + edge_normal.y,
pt.x + s1_normal.x * offset + edge_normal.x, pt.y + s1_normal.y * offset + edge_normal.y,
pt.x + s1_normal.x * offset, pt.y + s1_normal.y * offset,
);
dest.tri_ramp(pt.x + s2_normal.x * (offset + width), pt.y + s2_normal.y * (offset + width),
pt.x + s2_normal.x * offset + edge_normal.x, pt.y + s2_normal.y * offset + edge_normal.y,
pt.x + s2_normal.x * offset, pt.y + s2_normal.y * offset);
} else {
dest.tri(pt.x + s1_normal.x * offset, pt.y + s1_normal.y * offset,
pt.x + s2_normal.x * offset, pt.y + s2_normal.y * offset,
pt.x, pt.y);
}
}
/* given a normal rotate the vector 90 degrees to the right clockwise
* This function has a period of 4. e.g. swap(swap(swap(swap(x) == x */
fn swap(a: Vector) -> Vector {
/* one of these needs to be negative. We choose a.x so that we rotate to the right instead of negating */
Vector::new(a.y, -a.x)
}
fn unperp(a: Vector) -> Vector {
swap(a)
}
/* rotate a vector 90 degrees to the left */
fn perp(v: Vector) -> Vector {
Vector::new(-v.y, v.x)
}
fn dot(a: Vector, b: Vector) -> f32 {
a.x * b.x + a.y * b.y
}
/* Finds the intersection of two lines each defined by a point and a normal.
From "Example 2: Find the intersection of two lines" of
"The Pleasures of "Perp Dot" Products"
F. S. Hill, Jr. */
fn line_intersection(a: Point, a_perp: Vector, b: Point, b_perp: Vector) -> Option<Point> {
let a_parallel = unperp(a_perp);
let c = b - a;
let denom = dot(b_perp, a_parallel);
if denom == 0.0 {
return None;
}
let t = dot(b_perp, c) / denom;
let intersection = Point::new(a.x + t * (a_parallel.x), a.y + t * (a_parallel.y));
Some(intersection)
}
fn is_interior_angle(a: Vector, b: Vector) -> bool {
/* angles of 180 and 0 degress will evaluate to 0, however
* we to treat 180 as an interior angle and 180 as an exterior angle */
dot(perp(a), b) > 0. || a == b /* 0 degrees is interior */
}
fn join_line(
dest: &mut PathBuilder,
style: &StrokeStyle,
pt: Point,
mut s1_normal: Vector,
mut s2_normal: Vector,
) {
if is_interior_angle(s1_normal, s2_normal) {
s2_normal = flip(s2_normal);
s1_normal = flip(s1_normal);
std::mem::swap(&mut s1_normal, &mut s2_normal);
}
// XXX: joining uses `pt` which can cause seams because it lies halfway on a line and the
// rasterizer may not find exactly the same spot
let mut offset = style.width / 2.;
match style.join {
LineJoin::Round => {
dest.arc_wedge(pt, offset, s1_normal, s2_normal);
}
LineJoin::Miter => {
if dest.aa {
offset -= 0.5;
}
let in_dot_out = -s1_normal.x * s2_normal.x + -s1_normal.y * s2_normal.y;
if 2. <= style.miter_limit * style.miter_limit * (1. - in_dot_out) {
let start = pt + s1_normal * offset;
let end = pt + s2_normal * offset;
if let Some(intersection) = line_intersection(start, s1_normal, end, s2_normal) {
// We won't have an intersection if the segments are parallel
if dest.aa {
let ramp_start = pt + s1_normal * (offset + 1.);
let ramp_end = pt + s2_normal * (offset + 1.);
let mid = bisect(s1_normal, s2_normal);
let ramp_intersection = intersection + mid;
let ramp_s1 = line_intersection(ramp_start, s1_normal, ramp_intersection, flip(mid)).unwrap();
let ramp_s2 = line_intersection(ramp_end, s2_normal, ramp_intersection, flip(mid)).unwrap();
dest.ramp(intersection.x, intersection.y,
ramp_s1.x, ramp_s1.y,
ramp_start.x, ramp_start.y,
pt.x + s1_normal.x * offset, pt.y + s1_normal.y * offset,
);
dest.ramp(pt.x + s2_normal.x * offset, pt.y + s2_normal.y * offset,
ramp_end.x, ramp_end.y,
ramp_s2.x, ramp_s2.y,
intersection.x, intersection.y);
dest.tri_ramp(ramp_s1.x, ramp_s1.y, ramp_s2.x, ramp_s2.y, intersection.x, intersection.y);
// we'll want to intersect the ramps and put a flat cap on the end
dest.quad(pt.x + s1_normal.x * offset, pt.y + s1_normal.y * offset,
intersection.x, intersection.y,
pt.x + s2_normal.x * offset, pt.y + s2_normal.y * offset,
pt.x, pt.y);
} else {
dest.quad(pt.x + s1_normal.x * offset, pt.y + s1_normal.y * offset,
intersection.x, intersection.y,
pt.x + s2_normal.x * offset, pt.y + s2_normal.y * offset,
pt.x, pt.y);
}
}
} else {
bevel(dest, style, pt, s1_normal, s2_normal);
}
}
LineJoin::Bevel => {
bevel(dest, style, pt, s1_normal, s2_normal);
}
}
}
pub struct Stroker {
stroked_path: PathBuilder,
cur_pt: Option<Point>,
last_normal: Vector,
half_width: f32,
start_point: Option<(Point, Vector)>,
style: StrokeStyle,
closed_subpath: bool
}
impl Stroker {
pub fn new(style: &StrokeStyle) -> Self {
let mut style = style.clone();
let mut coverage = 1.;
if style.width < 1. {
coverage = style.width;
style.width = 1.;
}
Stroker {
stroked_path: PathBuilder::new(coverage),
cur_pt: None,
last_normal: Vector::zero(),
half_width: style.width / 2.,
start_point: None,
style,
closed_subpath: false,
}
}
pub fn line_to_capped(&mut self, pt: Point) {
if let Some(cur_pt) = self.cur_pt {
let normal = compute_normal(cur_pt, pt).unwrap_or(self.last_normal);
self.line_to(if self.stroked_path.aa && self.style.cap == LineCap::Butt { pt - flip(normal) * 0.5} else { pt });
if let (Some(cur_pt), Some((_point, _normal))) = (self.cur_pt, self.start_point) {
// cap end
cap_line(&mut self.stroked_path, &self.style, cur_pt, self.last_normal);
}
}
self.start_point = None;
}
pub fn move_to(&mut self, pt: Point, closed_subpath: bool) {
self.start_point = None;
self.cur_pt = Some(pt);
self.closed_subpath = closed_subpath;
}
pub fn line_to(&mut self, pt: Point) {
let cur_pt = self.cur_pt;
let stroked_path = &mut self.stroked_path;
let half_width = self.half_width;
if cur_pt.is_none() {
self.start_point = None;
} else if let Some(cur_pt) = cur_pt {
if let Some(normal) = compute_normal(cur_pt, pt) {
if self.start_point.is_none() {
if !self.closed_subpath {
// cap beginning
cap_line(stroked_path, &self.style, cur_pt, flip(normal));
if stroked_path.aa && self.style.cap == LineCap::Butt {
}
}
self.start_point = Some((cur_pt, normal));
} else {
join_line(stroked_path, &self.style, cur_pt, self.last_normal, normal);
}
if stroked_path.aa {
stroked_path.ramp(
pt.x + normal.x * (half_width - 0.5),
pt.y + normal.y * (half_width - 0.5),
pt.x + normal.x * (half_width + 0.5),
pt.y + normal.y * (half_width + 0.5),
cur_pt.x + normal.x * (half_width + 0.5),
cur_pt.y + normal.y * (half_width + 0.5),
cur_pt.x + normal.x * (half_width - 0.5),
cur_pt.y + normal.y * (half_width - 0.5),
);
stroked_path.quad(
cur_pt.x + normal.x * (half_width - 0.5),
cur_pt.y + normal.y * (half_width - 0.5),
pt.x + normal.x * (half_width - 0.5), pt.y + normal.y * (half_width - 0.5),
pt.x + -normal.x * (half_width - 0.5), pt.y + -normal.y * (half_width - 0.5),
cur_pt.x - normal.x * (half_width - 0.5),
cur_pt.y - normal.y * (half_width - 0.5),
);
stroked_path.ramp(
cur_pt.x - normal.x * (half_width - 0.5),
cur_pt.y - normal.y * (half_width - 0.5),
cur_pt.x - normal.x * (half_width + 0.5),
cur_pt.y - normal.y * (half_width + 0.5),
pt.x - normal.x * (half_width + 0.5),
pt.y - normal.y * (half_width + 0.5),
pt.x - normal.x * (half_width - 0.5),
pt.y - normal.y * (half_width - 0.5),
);
} else {
stroked_path.quad(
cur_pt.x + normal.x * half_width,
cur_pt.y + normal.y * half_width,
pt.x + normal.x * half_width, pt.y + normal.y * half_width,
pt.x + -normal.x * half_width, pt.y + -normal.y * half_width,
cur_pt.x - normal.x * half_width,
cur_pt.y - normal.y * half_width,
);
}
self.last_normal = normal;
}
}
self.cur_pt = Some(pt);
}
pub fn curve_to(&mut self, cx1: Point, cx2: Point, pt: Point) {
self.curve_to_internal(cx1, cx2, pt, false);
}
pub fn curve_to_capped(&mut self, cx1: Point, cx2: Point, pt: Point) {
self.curve_to_internal(cx1, cx2, pt, true);
}
pub fn curve_to_internal(&mut self, cx1: Point, cx2: Point, pt: Point, end: bool) {
struct Target<'a> { stroker: &'a mut Stroker, end: bool }
impl<'a> CFlatteningSink for Target<'a> {
fn AcceptPointAndTangent(&mut self, _: &GpPointR, _: &GpPointR, _: bool ) -> HRESULT {
panic!()
}
fn AcceptPoint(&mut self,
pt: &GpPointR,
// The point
_t: f64,
// Parameter we're at
_aborted: &mut bool,
last_point: bool) -> HRESULT {
if last_point && self.end {
self.stroker.line_to_capped(Point::new(pt.x as f32, pt.y as f32));
} else {
self.stroker.line_to(Point::new(pt.x as f32, pt.y as f32));
}
return S_OK;
}
}
let cur_pt = self.cur_pt.unwrap_or(cx1);
let bezier = CBezier::new([GpPointR { x: cur_pt.x as f64, y: cur_pt.y as f64, },
GpPointR { x: cx1.x as f64, y: cx1.y as f64, },
GpPointR { x: cx2.x as f64, y: cx2.y as f64, },
GpPointR { x: pt.x as f64, y: pt.y as f64, }]);
let mut t = Target{ stroker: self, end };
let mut f = CBezierFlattener::new(&bezier, &mut t, 0.25);
f.Flatten(false);
}
pub fn close(&mut self) {
let stroked_path = &mut self.stroked_path;
let half_width = self.half_width;
if let (Some(cur_pt), Some((end_point, start_normal))) = (self.cur_pt, self.start_point) {
if let Some(normal) = compute_normal(cur_pt, end_point) {
join_line(stroked_path, &self.style, cur_pt, self.last_normal, normal);
if stroked_path.aa {
stroked_path.ramp(
end_point.x + normal.x * (half_width - 0.5),
end_point.y + normal.y * (half_width - 0.5),
end_point.x + normal.x * (half_width + 0.5),
end_point.y + normal.y * (half_width + 0.5),
cur_pt.x + normal.x * (half_width + 0.5),
cur_pt.y + normal.y * (half_width + 0.5),
cur_pt.x + normal.x * (half_width - 0.5),
cur_pt.y + normal.y * (half_width - 0.5),
);
stroked_path.quad(
cur_pt.x + normal.x * (half_width - 0.5),
cur_pt.y + normal.y * (half_width - 0.5),
end_point.x + normal.x * (half_width - 0.5), end_point.y + normal.y * (half_width - 0.5),
end_point.x + -normal.x * (half_width - 0.5), end_point.y + -normal.y * (half_width - 0.5),
cur_pt.x - normal.x * (half_width - 0.5),
cur_pt.y - normal.y * (half_width - 0.5),
);
stroked_path.ramp(
cur_pt.x - normal.x * (half_width - 0.5),
cur_pt.y - normal.y * (half_width - 0.5),
cur_pt.x - normal.x * (half_width + 0.5),
cur_pt.y - normal.y * (half_width + 0.5),
end_point.x - normal.x * (half_width + 0.5),
end_point.y - normal.y * (half_width + 0.5),
end_point.x - normal.x * (half_width - 0.5),
end_point.y - normal.y * (half_width - 0.5),
);
} else {
stroked_path.quad(
cur_pt.x + normal.x * half_width,
cur_pt.y + normal.y * half_width,
end_point.x + normal.x * half_width, end_point.y + normal.y * half_width,
end_point.x + -normal.x * half_width, end_point.y + -normal.y * half_width,
cur_pt.x - normal.x * half_width,
cur_pt.y - normal.y * half_width,
);
}
join_line(stroked_path, &self.style, end_point, normal, start_normal);
} else {
join_line(stroked_path, &self.style, end_point, self.last_normal, start_normal);
}
}
self.cur_pt = self.start_point.map(|x| x.0);
self.start_point = None;
}
pub fn finish(&mut self) -> Vec<Vertex> {
let mut stroked_path = std::mem::replace(&mut self.stroked_path, PathBuilder::new(1.));
if let (Some(cur_pt), Some((point, normal))) = (self.cur_pt, self.start_point) {
// cap end
cap_line(&mut stroked_path, &self.style, cur_pt, self.last_normal);
// cap beginning
cap_line( &mut stroked_path, &self.style, point, flip(normal));
}
stroked_path.finish()
}
}
#[test]
fn simple() {
let mut stroker = Stroker::new(&StrokeStyle{
cap: LineCap::Round,
join: LineJoin::Bevel,
width: 20.,
..Default::default()});
stroker.move_to(Point::new(20., 20.), false);
stroker.line_to(Point::new(100., 100.));
stroker.line_to_capped(Point::new(110., 20.));
stroker.move_to(Point::new(120., 20.), true);
stroker.line_to(Point::new(120., 50.));
stroker.line_to(Point::new(140., 50.));
stroker.close();
let stroked = stroker.finish();
assert_eq!(stroked.len(), 330);
}
#[test]
fn curve() {
let mut stroker = Stroker::new(&StrokeStyle{
cap: LineCap::Round,
join: LineJoin::Bevel,
width: 20.,
..Default::default()});
stroker.move_to(Point::new(20., 160.), true);
stroker.curve_to(Point::new(100., 160.), Point::new(100., 180.), Point::new(20., 180.));
stroker.close();
let stroked = stroker.finish();
assert_eq!(stroked.len(), 1089);
}
#[test]
fn width_one_radius_arc() {
// previously this caused us to try to flatten an arc with radius 0
let mut stroker = Stroker::new(&StrokeStyle{
cap: LineCap::Round,
join: LineJoin::Round,
width: 1.,
..Default::default()});
stroker.move_to(Point::new(20., 20.), false);
stroker.line_to(Point::new(30., 160.));
stroker.line_to_capped(Point::new(40., 20.));
stroker.finish();
}

Просмотреть файл

@ -1,190 +0,0 @@
/* The rasterization code here is based off of piglit/tests/general/triangle-rasterization.cpp:
/**************************************************************************
*
* Copyright 2012 VMware, Inc.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
*/
use std::ops::Index;
use crate::Vertex as OutputVertex;
#[derive(Debug)]
struct Vertex {
x: f32,
y: f32,
coverage: f32
}
#[derive(Debug)]
struct Triangle {
v: [Vertex; 3],
}
impl Index<usize> for Triangle {
type Output = Vertex;
fn index(&self, index: usize) -> &Self::Output {
&self.v[index]
}
}
// D3D11 mandates 8 bit subpixel precision:
// https://microsoft.github.io/DirectX-Specs/d3d/archive/D3D11_3_FunctionalSpec.htm#CoordinateSnapping
const FIXED_SHIFT: i32 = 8;
const FIXED_ONE: f32 = (1 << FIXED_SHIFT) as f32;
/* Proper rounding of float to integer */
fn iround(mut v: f32) -> i64 {
if v > 0.0 {
v += 0.5;
}
if v < 0.0 {
v -= 0.5;
}
return v as i64
}
/* Based on http://devmaster.net/forums/topic/1145-advanced-rasterization */
fn rast_triangle(buffer: &mut [u8], width: usize, height: usize, tri: &Triangle) {
let center_offset = -0.5;
let mut coverage1 = tri[0].coverage;
let mut coverage2 = tri[1].coverage;
let mut coverage3 = tri[2].coverage;
/* fixed point coordinates */
let mut x1 = iround(FIXED_ONE * (tri[0].x + center_offset));
let x2 = iround(FIXED_ONE * (tri[1].x + center_offset));
let mut x3 = iround(FIXED_ONE * (tri[2].x + center_offset));
let mut y1 = iround(FIXED_ONE * (tri[0].y + center_offset));
let y2 = iround(FIXED_ONE * (tri[1].y + center_offset));
let mut y3 = iround(FIXED_ONE * (tri[2].y + center_offset));
/* Force correct vertex order */
let cross = (x2 - x1) * (y3 - y2) - (y2 - y1) * (x3 - x2);
if cross > 0 {
std::mem::swap(&mut x1, &mut x3);
std::mem::swap(&mut y1, &mut y3);
// I don't understand why coverage 2 and 3 are swapped instead of 1 and 3
std::mem::swap(&mut coverage2, &mut coverage3);
} else {
std::mem::swap(&mut coverage1, &mut coverage3);
}
/* Deltas */
let dx12 = x1 - x2;
let dx23 = x2 - x3;
let dx31 = x3 - x1;
let dy12 = y1 - y2;
let dy23 = y2 - y3;
let dy31 = y3 - y1;
/* Fixed-point deltas */
let fdx12 = dx12 << FIXED_SHIFT;
let fdx23 = dx23 << FIXED_SHIFT;
let fdx31 = dx31 << FIXED_SHIFT;
let fdy12 = dy12 << FIXED_SHIFT;
let fdy23 = dy23 << FIXED_SHIFT;
let fdy31 = dy31 << FIXED_SHIFT;
/* Bounding rectangle */
let mut minx = x1.min(x2).min(x3) >> FIXED_SHIFT;
let mut maxx = x1.max(x2).max(x3) >> FIXED_SHIFT;
let mut miny = y1.min(y2).min(y3) >> FIXED_SHIFT;
let mut maxy = y1.max(y2).max(y3) >> FIXED_SHIFT;
minx = minx.max(0);
maxx = maxx.min(width as i64 - 1);
miny = miny.max(0);
maxy = maxy.min(height as i64 - 1);
/* Half-edge constants */
let mut c1 = dy12 * x1 - dx12 * y1;
let mut c2 = dy23 * x2 - dx23 * y2;
let mut c3 = dy31 * x3 - dx31 * y3;
/* Correct for top-left filling convention */
if dy12 < 0 || (dy12 == 0 && dx12 < 0) { c1 += 1 }
if dy23 < 0 || (dy23 == 0 && dx23 < 0) { c2 += 1 }
if dy31 < 0 || (dy31 == 0 && dx31 < 0) { c3 += 1 }
let mut cy1 = c1 + dx12 * (miny << FIXED_SHIFT) - dy12 * (minx << FIXED_SHIFT);
let mut cy2 = c2 + dx23 * (miny << FIXED_SHIFT) - dy23 * (minx << FIXED_SHIFT);
let mut cy3 = c3 + dx31 * (miny << FIXED_SHIFT) - dy31 * (minx << FIXED_SHIFT);
//dbg!(minx, maxx, tri, cross);
/* Perform rasterization */
let mut buffer = &mut buffer[miny as usize * width..];
for _y in miny..=maxy {
let mut cx1 = cy1;
let mut cx2 = cy2;
let mut cx3 = cy3;
for x in minx..=maxx {
if cx1 > 0 && cx2 > 0 && cx3 > 0 {
// cross is equal to 2*area of the triangle.
// we can normalize cx by 2*area to get barycentric coords.
let area = cross.abs() as f32;
let bary = (cx1 as f32 / area, cx2 as f32 / area, cx3 as f32 / area);
let coverages = coverage1 * bary.0 + coverage2 * bary.1 + coverage3 * bary.2;
let color = (coverages * 255. + 0.5) as u8;
buffer[x as usize] = color;
}
cx1 -= fdy12;
cx2 -= fdy23;
cx3 -= fdy31;
}
cy1 += fdx12;
cy2 += fdx23;
cy3 += fdx31;
buffer = &mut buffer[width..];
}
}
pub fn rasterize_to_mask(vertices: &[OutputVertex], width: u32, height: u32) -> Box<[u8]> {
let mut mask = vec![0; (width * height) as usize];
for n in (0..vertices.len()).step_by(3) {
let tri =
[&vertices[n], &vertices[n+1], &vertices[n+2]];
let tri = Triangle { v: [
Vertex { x: tri[0].x, y: tri[0].y, coverage: tri[0].coverage},
Vertex { x: tri[1].x, y: tri[1].y, coverage: tri[1].coverage},
Vertex { x: tri[2].x, y: tri[2].y, coverage: tri[2].coverage}
]
};
rast_triangle(&mut mask, width as usize, height as usize, &tri);
}
mask.into_boxed_slice()
}

Просмотреть файл

@ -100,7 +100,6 @@ processtools = { path = "../../../components/processtools" }
qcms = { path = "../../../../gfx/qcms", features = ["c_bindings", "neon"], default-features = false }
wpf-gpu-raster = { git = "https://github.com/FirefoxGraphics/wpf-gpu-raster", rev = "f0d95ce14af8a8de74f469dbad715c4064fca2e1" }
aa-stroke = { git = "https://github.com/FirefoxGraphics/aa-stroke", rev = "d5cb0fa467e66fdd2deab3211be2284ed1be5da7" }
# Force url to stay at 2.1.0. See bug 1734538.
url = "=2.1.0"

Просмотреть файл

@ -72,7 +72,6 @@ extern crate wgpu_bindings;
extern crate qcms;
extern crate wpf_gpu_raster;
extern crate aa_stroke;
extern crate unic_langid;
extern crate unic_langid_ffi;