Bug 658249. Update pixman to 0f6a4d45886d64b244d57403609f0377b58cc7fb.

Andrea Canciani (1):
      test: Fix compilation on win32

Dave Yeo (1):
      Check for working mmap()

Gilles Espinasse (2):
      Fix missing AC_MSG_RESULT value from Werror test
      Fix OpenMP not supported case

Siarhei Siamashka (7):
      ARM: tweaked horizontal weights update in NEON bilinear scaling code
      ARM: use aligned memory writes in NEON bilinear scaling code
      ARM: support for software pipelining in bilinear macros
      ARM: use less ARM instructions in NEON bilinear scaling code
      ARM: support different levels of loop unrolling in bilinear scaler
      ARM: pipelined NEON implementation of bilinear scaled 'src_8888_8888'
      ARM: pipelined NEON implementation of bilinear scaled 'src_8888_0565'

Søren Sandmann Pedersen (8):
      Makefile.am: Put development releases in "snapshots" directory
      ARM: Tiny improvement in over_n_8888_8888_ca_process_pixblock_head
      ARM: Add 'neon_composite_over_n_8888_0565_ca' fast path
      Offset rendering in pixman_composite_trapezoids() by (x_dst, y_dst)
      Pre-release version bump to 0.21.8
      Post-release version bump to 0.21.9
      Pre-release version bump to 0.22.0
      Post-release version bump to 0.23.1

Taekyun Kim (3):
      ARM: Common macro for scaled bilinear scanline function with A8 mask
      ARM: NEON scanline functions for bilinear scaling
      ARM: Enable bilinear fast paths using scanline functions in pixman-arm-neon-asm-bilinear.S

--HG--
extra : rebase_source : aff729718af3e7d25577603241d204bf02ecfb90
This commit is contained in:
Jeff Muizelaar 2011-05-18 13:43:30 -04:00
Родитель b97a70d1cb
Коммит bc110d668a
9 изменённых файлов: 1646 добавлений и 91 удалений

Просмотреть файл

@ -166,6 +166,7 @@ endif
ifdef USE_ARM_NEON_GCC
CSRCS += pixman-arm-neon.c
SSRCS += pixman-arm-neon-asm.S
SSRCS += pixman-arm-neon-asm-bilinear.S
DEFINES += -DUSE_ARM_NEON
ARM_NEON_CFLAGS = -mfpu=neon
endif

Просмотреть файл

@ -406,4 +406,49 @@ FAST_BILINEAR_MAINLOOP_COMMON (cputype##_##name##_pad_##op, \
scaled_bilinear_scanline_##cputype##_##name##_##op, \
src_type, uint32_t, dst_type, PAD, FALSE, FALSE)
#define PIXMAN_ARM_BIND_SCALED_BILINEAR_SRC_A8_DST(flags, cputype, name, op, \
src_type, dst_type) \
void \
pixman_scaled_bilinear_scanline_##name##_##op##_asm_##cputype ( \
dst_type * dst, \
const uint8_t * mask, \
const src_type * top, \
const src_type * bottom, \
int wt, \
int wb, \
pixman_fixed_t x, \
pixman_fixed_t ux, \
int width); \
\
static force_inline void \
scaled_bilinear_scanline_##cputype##_##name##_##op ( \
dst_type * dst, \
const uint8_t * mask, \
const src_type * src_top, \
const src_type * src_bottom, \
int32_t w, \
int wt, \
int wb, \
pixman_fixed_t vx, \
pixman_fixed_t unit_x, \
pixman_fixed_t max_vx, \
pixman_bool_t zero_src) \
{ \
if ((flags & SKIP_ZERO_SRC) && zero_src) \
return; \
pixman_scaled_bilinear_scanline_##name##_##op##_asm_##cputype ( \
dst, mask, src_top, src_bottom, wt, wb, vx, unit_x, w); \
} \
\
FAST_BILINEAR_MAINLOOP_COMMON (cputype##_##name##_cover_##op, \
scaled_bilinear_scanline_##cputype##_##name##_##op, \
src_type, uint8_t, dst_type, COVER, TRUE, FALSE) \
FAST_BILINEAR_MAINLOOP_COMMON (cputype##_##name##_none_##op, \
scaled_bilinear_scanline_##cputype##_##name##_##op, \
src_type, uint8_t, dst_type, NONE, TRUE, FALSE) \
FAST_BILINEAR_MAINLOOP_COMMON (cputype##_##name##_pad_##op, \
scaled_bilinear_scanline_##cputype##_##name##_##op, \
src_type, uint8_t, dst_type, PAD, TRUE, FALSE)
#endif

Просмотреть файл

@ -0,0 +1,768 @@
/*
* Copyright © 2011 SCore Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Author: Siarhei Siamashka (siarhei.siamashka@nokia.com)
* Author: Taekyun Kim (tkq.kim@samsung.com)
*/
/*
* This file contains scaled bilinear scanline functions implemented
* using older siarhei's bilinear macro template.
*
* << General scanline function procedures >>
* 1. bilinear interpolate source pixels
* 2. load mask pixels
* 3. load destination pixels
* 4. duplicate mask to fill whole register
* 5. interleave source & destination pixels
* 6. apply mask to source pixels
* 7. combine source & destination pixels
* 8, Deinterleave final result
* 9. store destination pixels
*
* All registers with single number (i.e. src0, tmp0) are 64-bits registers.
* Registers with double numbers(src01, dst01) are 128-bits registers.
* All temp registers can be used freely outside the code block.
* Assume that symbol(register .req) OUT and MASK are defined at caller of these macro blocks.
*
* TODOs
* Support 0565 pixel format
* Optimization for two and last pixel cases
*
* Remarks
* There can be lots of pipeline stalls inside code block and between code blocks.
* Further optimizations will be done by new macro templates using head/tail_head/tail scheme.
*/
/* Prevent the stack from becoming executable for no reason... */
#if defined(__linux__) && defined (__ELF__)
.section .note.GNU-stack,"",%progbits
#endif
.text
.fpu neon
.arch armv7a
.object_arch armv4
.eabi_attribute 10, 0
.eabi_attribute 12, 0
.arm
.altmacro
#include "pixman-arm-neon-asm.h"
/*
* Bilinear macros from pixman-arm-neon-asm.S
*/
/* Supplementary macro for setting function attributes */
.macro pixman_asm_function fname
.func fname
.global fname
#ifdef __ELF__
.hidden fname
.type fname, %function
#endif
fname:
.endm
/*
* Bilinear scaling support code which tries to provide pixel fetching, color
* format conversion, and interpolation as separate macros which can be used
* as the basic building blocks for constructing bilinear scanline functions.
*/
.macro bilinear_load_8888 reg1, reg2, tmp
mov TMP2, X, asr #16
add X, X, UX
add TMP1, TOP, TMP2, asl #2
add TMP2, BOTTOM, TMP2, asl #2
vld1.32 {reg1}, [TMP1]
vld1.32 {reg2}, [TMP2]
.endm
.macro bilinear_load_0565 reg1, reg2, tmp
mov TMP2, X, asr #16
add X, X, UX
add TMP1, TOP, TMP2, asl #1
add TMP2, BOTTOM, TMP2, asl #1
vld1.32 {reg2[0]}, [TMP1]
vld1.32 {reg2[1]}, [TMP2]
convert_four_0565_to_x888_packed reg2, reg1, reg2, tmp
.endm
.macro bilinear_load_and_vertical_interpolate_two_8888 \
acc1, acc2, reg1, reg2, reg3, reg4, tmp1, tmp2
bilinear_load_8888 reg1, reg2, tmp1
vmull.u8 acc1, reg1, d28
vmlal.u8 acc1, reg2, d29
bilinear_load_8888 reg3, reg4, tmp2
vmull.u8 acc2, reg3, d28
vmlal.u8 acc2, reg4, d29
.endm
.macro bilinear_load_and_vertical_interpolate_four_8888 \
xacc1, xacc2, xreg1, xreg2, xreg3, xreg4, xacc2lo, xacc2hi \
yacc1, yacc2, yreg1, yreg2, yreg3, yreg4, yacc2lo, yacc2hi
bilinear_load_and_vertical_interpolate_two_8888 \
xacc1, xacc2, xreg1, xreg2, xreg3, xreg4, xacc2lo, xacc2hi
bilinear_load_and_vertical_interpolate_two_8888 \
yacc1, yacc2, yreg1, yreg2, yreg3, yreg4, yacc2lo, yacc2hi
.endm
.macro bilinear_load_and_vertical_interpolate_two_0565 \
acc1, acc2, reg1, reg2, reg3, reg4, acc2lo, acc2hi
mov TMP2, X, asr #16
add X, X, UX
mov TMP4, X, asr #16
add X, X, UX
add TMP1, TOP, TMP2, asl #1
add TMP2, BOTTOM, TMP2, asl #1
add TMP3, TOP, TMP4, asl #1
add TMP4, BOTTOM, TMP4, asl #1
vld1.32 {acc2lo[0]}, [TMP1]
vld1.32 {acc2hi[0]}, [TMP3]
vld1.32 {acc2lo[1]}, [TMP2]
vld1.32 {acc2hi[1]}, [TMP4]
convert_0565_to_x888 acc2, reg3, reg2, reg1
vzip.u8 reg1, reg3
vzip.u8 reg2, reg4
vzip.u8 reg3, reg4
vzip.u8 reg1, reg2
vmull.u8 acc1, reg1, d28
vmlal.u8 acc1, reg2, d29
vmull.u8 acc2, reg3, d28
vmlal.u8 acc2, reg4, d29
.endm
.macro bilinear_load_and_vertical_interpolate_four_0565 \
xacc1, xacc2, xreg1, xreg2, xreg3, xreg4, xacc2lo, xacc2hi \
yacc1, yacc2, yreg1, yreg2, yreg3, yreg4, yacc2lo, yacc2hi
mov TMP2, X, asr #16
add X, X, UX
mov TMP4, X, asr #16
add X, X, UX
add TMP1, TOP, TMP2, asl #1
add TMP2, BOTTOM, TMP2, asl #1
add TMP3, TOP, TMP4, asl #1
add TMP4, BOTTOM, TMP4, asl #1
vld1.32 {xacc2lo[0]}, [TMP1]
vld1.32 {xacc2hi[0]}, [TMP3]
vld1.32 {xacc2lo[1]}, [TMP2]
vld1.32 {xacc2hi[1]}, [TMP4]
convert_0565_to_x888 xacc2, xreg3, xreg2, xreg1
mov TMP2, X, asr #16
add X, X, UX
mov TMP4, X, asr #16
add X, X, UX
add TMP1, TOP, TMP2, asl #1
add TMP2, BOTTOM, TMP2, asl #1
add TMP3, TOP, TMP4, asl #1
add TMP4, BOTTOM, TMP4, asl #1
vld1.32 {yacc2lo[0]}, [TMP1]
vzip.u8 xreg1, xreg3
vld1.32 {yacc2hi[0]}, [TMP3]
vzip.u8 xreg2, xreg4
vld1.32 {yacc2lo[1]}, [TMP2]
vzip.u8 xreg3, xreg4
vld1.32 {yacc2hi[1]}, [TMP4]
vzip.u8 xreg1, xreg2
convert_0565_to_x888 yacc2, yreg3, yreg2, yreg1
vmull.u8 xacc1, xreg1, d28
vzip.u8 yreg1, yreg3
vmlal.u8 xacc1, xreg2, d29
vzip.u8 yreg2, yreg4
vmull.u8 xacc2, xreg3, d28
vzip.u8 yreg3, yreg4
vmlal.u8 xacc2, xreg4, d29
vzip.u8 yreg1, yreg2
vmull.u8 yacc1, yreg1, d28
vmlal.u8 yacc1, yreg2, d29
vmull.u8 yacc2, yreg3, d28
vmlal.u8 yacc2, yreg4, d29
.endm
.macro bilinear_store_8888 numpix, tmp1, tmp2
.if numpix == 4
vst1.32 {d0, d1}, [OUT]!
.elseif numpix == 2
vst1.32 {d0}, [OUT]!
.elseif numpix == 1
vst1.32 {d0[0]}, [OUT, :32]!
.else
.error bilinear_store_8888 numpix is unsupported
.endif
.endm
.macro bilinear_store_0565 numpix, tmp1, tmp2
vuzp.u8 d0, d1
vuzp.u8 d2, d3
vuzp.u8 d1, d3
vuzp.u8 d0, d2
convert_8888_to_0565 d2, d1, d0, q1, tmp1, tmp2
.if numpix == 4
vst1.16 {d2}, [OUT]!
.elseif numpix == 2
vst1.32 {d2[0]}, [OUT]!
.elseif numpix == 1
vst1.16 {d2[0]}, [OUT]!
.else
.error bilinear_store_0565 numpix is unsupported
.endif
.endm
/*
* Macros for loading mask pixels into register 'mask'.
* vdup must be done in somewhere else.
*/
.macro bilinear_load_mask_x numpix, mask
.endm
.macro bilinear_load_mask_8 numpix, mask
.if numpix == 4
vld1.32 {mask[0]}, [MASK]!
.elseif numpix == 2
vld1.16 {mask[0]}, [MASK]!
.elseif numpix == 1
vld1.8 {mask[0]}, [MASK]!
.else
.error bilinear_load_mask_8 numpix is unsupported
.endif
.endm
.macro bilinear_load_mask mask_fmt, numpix, mask
bilinear_load_mask_&mask_fmt numpix, mask
.endm
/*
* Macros for loading destination pixels into register 'dst0' and 'dst1'.
* Interleave should be done somewhere else.
*/
.macro bilinear_load_dst_0565_src numpix, dst0, dst1, dst01
.endm
.macro bilinear_load_dst_8888_src numpix, dst0, dst1, dst01
.endm
.macro bilinear_load_dst_8888 numpix, dst0, dst1, dst01
.if numpix == 4
vld1.32 {dst0, dst1}, [OUT]
.elseif numpix == 2
vld1.32 {dst0}, [OUT]
.elseif numpix == 1
vld1.32 {dst0[0]}, [OUT]
.else
.error bilinear_load_dst_8888 numpix is unsupported
.endif
.endm
.macro bilinear_load_dst_8888_over numpix, dst0, dst1, dst01
bilinear_load_dst_8888 numpix, dst0, dst1, dst01
.endm
.macro bilinear_load_dst_8888_add numpix, dst0, dst1, dst01
bilinear_load_dst_8888 numpix, dst0, dst1, dst01
.endm
.macro bilinear_load_dst dst_fmt, op, numpix, dst0, dst1, dst01
bilinear_load_dst_&dst_fmt&_&op numpix, dst0, dst1, dst01
.endm
/*
* Macros for duplicating partially loaded mask to fill entire register.
* We will apply mask to interleaved source pixels, that is
* (r0, r1, r2, r3, g0, g1, g2, g3) x (m0, m1, m2, m3, m0, m1, m2, m3)
* (b0, b1, b2, b3, a0, a1, a2, a3) x (m0, m1, m2, m3, m0, m1, m2, m3)
* So, we need to duplicate loaded mask into whole register.
*
* For two pixel case
* (r0, r1, x, x, g0, g1, x, x) x (m0, m1, m0, m1, m0, m1, m0, m1)
* (b0, b1, x, x, a0, a1, x, x) x (m0, m1, m0, m1, m0, m1, m0, m1)
* We can do some optimizations for this including one pixel cases.
*/
.macro bilinear_duplicate_mask_x numpix, mask
.endm
.macro bilinear_duplicate_mask_8 numpix, mask
.if numpix == 4
vdup.32 mask, mask[0]
.elseif numpix == 2
vdup.16 mask, mask[0]
.elseif numpix == 1
vdup.8 mask, mask[0]
.else
.error bilinear_duplicate_mask_8 is unsupported
.endif
.endm
.macro bilinear_duplicate_mask mask_fmt, numpix, mask
bilinear_duplicate_mask_&mask_fmt numpix, mask
.endm
/*
* Macros for interleaving src and dst pixels to rrrr gggg bbbb aaaa form.
* Interleave should be done when maks is enabled or operator is 'over'.
*/
.macro bilinear_interleave src0, src1, dst0, dst1
vuzp.8 src0, src1
vuzp.8 dst0, dst1
vuzp.8 src0, src1
vuzp.8 dst0, dst1
.endm
.macro bilinear_interleave_src_dst_x_src \
numpix, src0, src1, src01, dst0, dst1, dst01
.endm
.macro bilinear_interleave_src_dst_x_over \
numpix, src0, src1, src01, dst0, dst1, dst01
bilinear_interleave src0, src1, dst0, dst1
.endm
.macro bilinear_interleave_src_dst_x_add \
numpix, src0, src1, src01, dst0, dst1, dst01
.endm
.macro bilinear_interleave_src_dst_8_src \
numpix, src0, src1, src01, dst0, dst1, dst01
bilinear_interleave src0, src1, dst0, dst1
.endm
.macro bilinear_interleave_src_dst_8_over \
numpix, src0, src1, src01, dst0, dst1, dst01
bilinear_interleave src0, src1, dst0, dst1
.endm
.macro bilinear_interleave_src_dst_8_add \
numpix, src0, src1, src01, dst0, dst1, dst01
bilinear_interleave src0, src1, dst0, dst1
.endm
.macro bilinear_interleave_src_dst \
mask_fmt, op, numpix, src0, src1, src01, dst0, dst1, dst01
bilinear_interleave_src_dst_&mask_fmt&_&op \
numpix, src0, src1, src01, dst0, dst1, dst01
.endm
/*
* Macros for applying masks to src pixels. (see combine_mask_u() function)
* src, dst should be in interleaved form.
* mask register should be in form (m0, m1, m2, m3).
*/
.macro bilinear_apply_mask_to_src_x \
numpix, src0, src1, src01, mask, \
tmp01, tmp23, tmp45, tmp67
.endm
.macro bilinear_apply_mask_to_src_8 \
numpix, src0, src1, src01, mask, \
tmp01, tmp23, tmp45, tmp67
vmull.u8 tmp01, src0, mask
vmull.u8 tmp23, src1, mask
/* bubbles */
vrshr.u16 tmp45, tmp01, #8
vrshr.u16 tmp67, tmp23, #8
/* bubbles */
vraddhn.u16 src0, tmp45, tmp01
vraddhn.u16 src1, tmp67, tmp23
.endm
.macro bilinear_apply_mask_to_src \
mask_fmt, numpix, src0, src1, src01, mask, \
tmp01, tmp23, tmp45, tmp67
bilinear_apply_mask_to_src_&mask_fmt \
numpix, src0, src1, src01, mask, \
tmp01, tmp23, tmp45, tmp67
.endm
/*
* Macros for combining src and destination pixels.
* Interleave or not is depending on operator 'op'.
*/
.macro bilinear_combine_src \
numpix, src0, src1, src01, dst0, dst1, dst01, \
tmp01, tmp23, tmp45, tmp67, tmp8
.endm
.macro bilinear_combine_over \
numpix, src0, src1, src01, dst0, dst1, dst01, \
tmp01, tmp23, tmp45, tmp67, tmp8
vdup.32 tmp8, src1[1]
/* bubbles */
vmvn.8 tmp8, tmp8
/* bubbles */
vmull.u8 tmp01, dst0, tmp8
/* bubbles */
vmull.u8 tmp23, dst1, tmp8
/* bubbles */
vrshr.u16 tmp45, tmp01, #8
vrshr.u16 tmp67, tmp23, #8
/* bubbles */
vraddhn.u16 dst0, tmp45, tmp01
vraddhn.u16 dst1, tmp67, tmp23
/* bubbles */
vqadd.u8 src01, dst01, src01
.endm
.macro bilinear_combine_add \
numpix, src0, src1, src01, dst0, dst1, dst01, \
tmp01, tmp23, tmp45, tmp67, tmp8
vqadd.u8 src01, dst01, src01
.endm
.macro bilinear_combine \
op, numpix, src0, src1, src01, dst0, dst1, dst01, \
tmp01, tmp23, tmp45, tmp67, tmp8
bilinear_combine_&op \
numpix, src0, src1, src01, dst0, dst1, dst01, \
tmp01, tmp23, tmp45, tmp67, tmp8
.endm
/*
* Macros for final deinterleaving of destination pixels if needed.
*/
.macro bilinear_deinterleave numpix, dst0, dst1, dst01
vuzp.8 dst0, dst1
/* bubbles */
vuzp.8 dst0, dst1
.endm
.macro bilinear_deinterleave_dst_x_src numpix, dst0, dst1, dst01
.endm
.macro bilinear_deinterleave_dst_x_over numpix, dst0, dst1, dst01
bilinear_deinterleave numpix, dst0, dst1, dst01
.endm
.macro bilinear_deinterleave_dst_x_add numpix, dst0, dst1, dst01
.endm
.macro bilinear_deinterleave_dst_8_src numpix, dst0, dst1, dst01
bilinear_deinterleave numpix, dst0, dst1, dst01
.endm
.macro bilinear_deinterleave_dst_8_over numpix, dst0, dst1, dst01
bilinear_deinterleave numpix, dst0, dst1, dst01
.endm
.macro bilinear_deinterleave_dst_8_add numpix, dst0, dst1, dst01
bilinear_deinterleave numpix, dst0, dst1, dst01
.endm
.macro bilinear_deinterleave_dst mask_fmt, op, numpix, dst0, dst1, dst01
bilinear_deinterleave_dst_&mask_fmt&_&op numpix, dst0, dst1, dst01
.endm
.macro bilinear_interpolate_last_pixel src_fmt, mask_fmt, dst_fmt, op
bilinear_load_&src_fmt d0, d1, d2
bilinear_load_mask mask_fmt, 1, d4
bilinear_load_dst dst_fmt, op, 1, d18, d19, q9
vmull.u8 q1, d0, d28
vmlal.u8 q1, d1, d29
vshr.u16 d30, d24, #8
/* 4 cycles bubble */
vshll.u16 q0, d2, #8
vmlsl.u16 q0, d2, d30
vmlal.u16 q0, d3, d30
/* 5 cycles bubble */
bilinear_duplicate_mask mask_fmt, 1, d4
vshrn.u32 d0, q0, #16
/* 3 cycles bubble */
vmovn.u16 d0, q0
/* 1 cycle bubble */
bilinear_interleave_src_dst \
mask_fmt, op, 1, d0, d1, q0, d18, d19, q9
bilinear_apply_mask_to_src \
mask_fmt, 1, d0, d1, q0, d4, \
q3, q8, q10, q11
bilinear_combine \
op, 1, d0, d1, q0, d18, d19, q9, \
q3, q8, q10, q11, d5
bilinear_deinterleave_dst mask_fmt, op, 1, d0, d1, q0
bilinear_store_&dst_fmt 1, q2, q3
.endm
.macro bilinear_interpolate_two_pixels src_fmt, mask_fmt, dst_fmt, op
bilinear_load_and_vertical_interpolate_two_&src_fmt \
q1, q11, d0, d1, d20, d21, d22, d23
bilinear_load_mask mask_fmt, 2, d4
bilinear_load_dst dst_fmt, op, 2, d18, d19, q9
vshr.u16 q15, q12, #8
vadd.u16 q12, q12, q13
vshll.u16 q0, d2, #8
vmlsl.u16 q0, d2, d30
vmlal.u16 q0, d3, d30
vshll.u16 q10, d22, #8
vmlsl.u16 q10, d22, d31
vmlal.u16 q10, d23, d31
vshrn.u32 d30, q0, #16
vshrn.u32 d31, q10, #16
bilinear_duplicate_mask mask_fmt, 2, d4
vmovn.u16 d0, q15
bilinear_interleave_src_dst \
mask_fmt, op, 2, d0, d1, q0, d18, d19, q9
bilinear_apply_mask_to_src \
mask_fmt, 2, d0, d1, q0, d4, \
q3, q8, q10, q11
bilinear_combine \
op, 2, d0, d1, q0, d18, d19, q9, \
q3, q8, q10, q11, d5
bilinear_deinterleave_dst mask_fmt, op, 2, d0, d1, q0
bilinear_store_&dst_fmt 2, q2, q3
.endm
.macro bilinear_interpolate_four_pixels src_fmt, mask_fmt, dst_fmt, op
bilinear_load_and_vertical_interpolate_four_&src_fmt \
q1, q11, d0, d1, d20, d21, d22, d23 \
q3, q9, d4, d5, d16, d17, d18, d19
pld [TMP1, PF_OFFS]
vshr.u16 q15, q12, #8
vadd.u16 q12, q12, q13
vshll.u16 q0, d2, #8
vmlsl.u16 q0, d2, d30
vmlal.u16 q0, d3, d30
vshll.u16 q10, d22, #8
vmlsl.u16 q10, d22, d31
vmlal.u16 q10, d23, d31
vshr.u16 q15, q12, #8
vshll.u16 q2, d6, #8
vmlsl.u16 q2, d6, d30
vmlal.u16 q2, d7, d30
vshll.u16 q8, d18, #8
bilinear_load_mask mask_fmt, 4, d30
bilinear_load_dst dst_fmt, op, 4, d2, d3, q1
pld [TMP2, PF_OFFS]
vmlsl.u16 q8, d18, d31
vmlal.u16 q8, d19, d31
vadd.u16 q12, q12, q13
vshrn.u32 d0, q0, #16
vshrn.u32 d1, q10, #16
vshrn.u32 d4, q2, #16
vshrn.u32 d5, q8, #16
bilinear_duplicate_mask mask_fmt, 4, d30
vmovn.u16 d0, q0
vmovn.u16 d1, q2
bilinear_interleave_src_dst \
mask_fmt, op, 4, d0, d1, q0, d2, d3, q1
bilinear_apply_mask_to_src \
mask_fmt, 4, d0, d1, q0, d30, \
q3, q8, q9, q10
bilinear_combine \
op, 4, d0, d1, q0, d2, d3, q1, \
q3, q8, q9, q10, d22
bilinear_deinterleave_dst mask_fmt, op, 4, d0, d1, q0
bilinear_store_&dst_fmt 4, q2, q3
.endm
.macro generate_bilinear_scanline_func_src_dst \
fname, src_fmt, dst_fmt, op, \
bpp_shift, prefetch_distance
pixman_asm_function fname
OUT .req r0
TOP .req r1
BOTTOM .req r2
WT .req r3
WB .req r4
X .req r5
UX .req r6
WIDTH .req ip
TMP1 .req r3
TMP2 .req r4
PF_OFFS .req r7
TMP3 .req r8
TMP4 .req r9
mov ip, sp
push {r4, r5, r6, r7, r8, r9}
mov PF_OFFS, #prefetch_distance
ldmia ip, {WB, X, UX, WIDTH}
mul PF_OFFS, PF_OFFS, UX
cmp WIDTH, #0
ble 3f
vdup.u16 q12, X
vdup.u16 q13, UX
vdup.u8 d28, WT
vdup.u8 d29, WB
vadd.u16 d25, d25, d26
vadd.u16 q13, q13, q13
subs WIDTH, WIDTH, #4
blt 1f
mov PF_OFFS, PF_OFFS, asr #(16 - bpp_shift)
0:
bilinear_interpolate_four_pixels src_fmt, x, dst_fmt, op
subs WIDTH, WIDTH, #4
bge 0b
1:
tst WIDTH, #2
beq 2f
bilinear_interpolate_two_pixels src_fmt, x, dst_fmt, op
2:
tst WIDTH, #1
beq 3f
bilinear_interpolate_last_pixel src_fmt, x, dst_fmt, op
3:
pop {r4, r5, r6, r7, r8, r9}
bx lr
.unreq OUT
.unreq TOP
.unreq BOTTOM
.unreq WT
.unreq WB
.unreq X
.unreq UX
.unreq WIDTH
.unreq TMP1
.unreq TMP2
.unreq PF_OFFS
.unreq TMP3
.unreq TMP4
.endfunc
.endm
.macro generate_bilinear_scanline_func_src_a8_dst \
fname, src_fmt, dst_fmt, op, \
bpp_shift, prefetch_distance
pixman_asm_function fname
OUT .req r0
MASK .req r1
TOP .req r2
BOTTOM .req r3
WT .req r4
WB .req r5
X .req r6
UX .req r7
WIDTH .req ip
TMP1 .req r4
TMP2 .req r5
PF_OFFS .req r8
TMP3 .req r9
TMP4 .req r10
mov ip, sp
push {r4, r5, r6, r7, r8, r9, r10, ip}
mov PF_OFFS, #prefetch_distance
ldmia ip, {WT, WB, X, UX, WIDTH}
mul PF_OFFS, PF_OFFS, UX
cmp WIDTH, #0
ble 3f
vdup.u16 q12, X
vdup.u16 q13, UX
vdup.u8 d28, WT
vdup.u8 d29, WB
vadd.u16 d25, d25, d26
vadd.u16 q13, q13, q13
subs WIDTH, WIDTH, #4
blt 1f
mov PF_OFFS, PF_OFFS, asr #(16 - bpp_shift)
0:
bilinear_interpolate_four_pixels src_fmt, 8, dst_fmt, op
subs WIDTH, WIDTH, #4
bge 0b
1:
tst WIDTH, #2
beq 2f
bilinear_interpolate_two_pixels src_fmt, 8, dst_fmt, op
2:
tst WIDTH, #1
beq 3f
bilinear_interpolate_last_pixel src_fmt, 8, dst_fmt, op
3:
pop {r4, r5, r6, r7, r8, r9, r10, ip}
bx lr
.unreq OUT
.unreq TOP
.unreq BOTTOM
.unreq WT
.unreq WB
.unreq X
.unreq UX
.unreq WIDTH
.unreq MASK
.unreq TMP1
.unreq TMP2
.unreq PF_OFFS
.unreq TMP3
.unreq TMP4
.endfunc
.endm
generate_bilinear_scanline_func_src_dst \
pixman_scaled_bilinear_scanline_8888_8888_OVER_asm_neon, \
8888, 8888, over, 2, 28
generate_bilinear_scanline_func_src_dst \
pixman_scaled_bilinear_scanline_8888_8888_ADD_asm_neon, \
8888, 8888, add, 2, 28
generate_bilinear_scanline_func_src_a8_dst \
pixman_scaled_bilinear_scanline_8888_8_8888_SRC_asm_neon, \
8888, 8888, src, 2, 28
generate_bilinear_scanline_func_src_a8_dst \
pixman_scaled_bilinear_scanline_8888_8_0565_SRC_asm_neon, \
8888, 0565, src, 2, 28
generate_bilinear_scanline_func_src_a8_dst \
pixman_scaled_bilinear_scanline_0565_8_x888_SRC_asm_neon, \
0565, 8888, src, 1, 28
generate_bilinear_scanline_func_src_a8_dst \
pixman_scaled_bilinear_scanline_0565_8_0565_SRC_asm_neon, \
0565, 0565, src, 1, 28
generate_bilinear_scanline_func_src_a8_dst \
pixman_scaled_bilinear_scanline_8888_8_8888_OVER_asm_neon, \
8888, 8888, over, 2, 28
generate_bilinear_scanline_func_src_a8_dst \
pixman_scaled_bilinear_scanline_8888_8_8888_ADD_asm_neon, \
8888, 8888, add, 2, 28

Просмотреть файл

@ -1358,11 +1358,10 @@ generate_composite_function \
*
* output: updated dest in {d28, d29, d30, d31}
*/
vmvn.8 d24, d24
vmvn.8 d25, d25
vmvn.8 q12, q12
vmvn.8 d26, d26
vmull.u8 q8, d24, d4
vmull.u8 q9, d25, d5
vmvn.8 d26, d26
vmvn.8 d27, d3
vmull.u8 q10, d26, d6
vmull.u8 q11, d27, d7
@ -1427,6 +1426,175 @@ generate_composite_function \
/******************************************************************************/
.macro pixman_composite_over_n_8888_0565_ca_process_pixblock_head
/*
* 'combine_mask_ca' replacement
*
* input: solid src (n) in {d8, d9, d10, d11} [B, G, R, A]
* mask in {d24, d25, d26} [B, G, R]
* output: updated src in {d0, d1, d2 } [B, G, R]
* updated mask in {d24, d25, d26} [B, G, R]
*/
vmull.u8 q0, d24, d8
vmull.u8 q1, d25, d9
vmull.u8 q6, d26, d10
vmull.u8 q9, d11, d25
vmull.u8 q12, d11, d24
vmull.u8 q13, d11, d26
vrshr.u16 q8, q0, #8
vrshr.u16 q10, q1, #8
vrshr.u16 q11, q6, #8
vraddhn.u16 d0, q0, q8
vraddhn.u16 d1, q1, q10
vraddhn.u16 d2, q6, q11
vrshr.u16 q11, q12, #8
vrshr.u16 q8, q9, #8
vrshr.u16 q6, q13, #8
vraddhn.u16 d24, q12, q11
vraddhn.u16 d25, q9, q8
/*
* convert 8 r5g6b5 pixel data from {d4, d5} to planar 8-bit format
* and put data into d16 - blue, d17 - green, d18 - red
*/
vshrn.u16 d17, q2, #3
vshrn.u16 d18, q2, #8
vraddhn.u16 d26, q13, q6
vsli.u16 q2, q2, #5
vsri.u8 d18, d18, #5
vsri.u8 d17, d17, #6
/*
* 'combine_over_ca' replacement
*
* output: updated dest in d16 - blue, d17 - green, d18 - red
*/
vmvn.8 q12, q12
vshrn.u16 d16, q2, #2
vmvn.8 d26, d26
vmull.u8 q6, d16, d24
vmull.u8 q7, d17, d25
vmull.u8 q11, d18, d26
.endm
.macro pixman_composite_over_n_8888_0565_ca_process_pixblock_tail
/* ... continue 'combine_over_ca' replacement */
vrshr.u16 q10, q6, #8
vrshr.u16 q14, q7, #8
vrshr.u16 q15, q11, #8
vraddhn.u16 d16, q10, q6
vraddhn.u16 d17, q14, q7
vraddhn.u16 d18, q15, q11
vqadd.u8 q8, q0, q8
vqadd.u8 d18, d2, d18
/*
* convert the results in d16, d17, d18 to r5g6b5 and store
* them into {d28, d29}
*/
vshll.u8 q14, d18, #8
vshll.u8 q10, d17, #8
vshll.u8 q15, d16, #8
vsri.u16 q14, q10, #5
vsri.u16 q14, q15, #11
.endm
.macro pixman_composite_over_n_8888_0565_ca_process_pixblock_tail_head
fetch_mask_pixblock
vrshr.u16 q10, q6, #8
vrshr.u16 q14, q7, #8
vld1.16 {d4, d5}, [DST_R, :128]!
vrshr.u16 q15, q11, #8
vraddhn.u16 d16, q10, q6
vraddhn.u16 d17, q14, q7
vraddhn.u16 d22, q15, q11
/* process_pixblock_head */
/*
* 'combine_mask_ca' replacement
*
* input: solid src (n) in {d8, d9, d10, d11} [B, G, R, A]
* mask in {d24, d25, d26} [B, G, R]
* output: updated src in {d0, d1, d2 } [B, G, R]
* updated mask in {d24, d25, d26} [B, G, R]
*/
vmull.u8 q1, d25, d9
vqadd.u8 q8, q0, q8
vmull.u8 q0, d24, d8
vqadd.u8 d22, d2, d22
vmull.u8 q6, d26, d10
/*
* convert the result in d16, d17, d22 to r5g6b5 and store
* it into {d28, d29}
*/
vshll.u8 q14, d22, #8
vshll.u8 q10, d17, #8
vshll.u8 q15, d16, #8
vmull.u8 q9, d11, d25
vsri.u16 q14, q10, #5
vmull.u8 q12, d11, d24
vmull.u8 q13, d11, d26
vsri.u16 q14, q15, #11
cache_preload 8, 8
vrshr.u16 q8, q0, #8
vrshr.u16 q10, q1, #8
vrshr.u16 q11, q6, #8
vraddhn.u16 d0, q0, q8
vraddhn.u16 d1, q1, q10
vraddhn.u16 d2, q6, q11
vrshr.u16 q11, q12, #8
vrshr.u16 q8, q9, #8
vrshr.u16 q6, q13, #8
vraddhn.u16 d25, q9, q8
/*
* convert 8 r5g6b5 pixel data from {d4, d5} to planar
* 8-bit format and put data into d16 - blue, d17 - green,
* d18 - red
*/
vshrn.u16 d17, q2, #3
vshrn.u16 d18, q2, #8
vraddhn.u16 d24, q12, q11
vraddhn.u16 d26, q13, q6
vsli.u16 q2, q2, #5
vsri.u8 d18, d18, #5
vsri.u8 d17, d17, #6
/*
* 'combine_over_ca' replacement
*
* output: updated dest in d16 - blue, d17 - green, d18 - red
*/
vmvn.8 q12, q12
vshrn.u16 d16, q2, #2
vmvn.8 d26, d26
vmull.u8 q7, d17, d25
vmull.u8 q6, d16, d24
vmull.u8 q11, d18, d26
vst1.16 {d28, d29}, [DST_W, :128]!
.endm
.macro pixman_composite_over_n_8888_0565_ca_init
add DUMMY, sp, #ARGS_STACK_OFFSET
vpush {d8-d15}
vld1.32 {d11[0]}, [DUMMY]
vdup.8 d8, d11[0]
vdup.8 d9, d11[1]
vdup.8 d10, d11[2]
vdup.8 d11, d11[3]
.endm
.macro pixman_composite_over_n_8888_0565_ca_cleanup
vpop {d8-d15}
.endm
generate_composite_function \
pixman_composite_over_n_8888_0565_ca_asm_neon, 0, 32, 16, \
FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \
8, /* number of pixels, processed in a single block */ \
5, /* prefetch distance */ \
pixman_composite_over_n_8888_0565_ca_init, \
pixman_composite_over_n_8888_0565_ca_cleanup, \
pixman_composite_over_n_8888_0565_ca_process_pixblock_head, \
pixman_composite_over_n_8888_0565_ca_process_pixblock_tail, \
pixman_composite_over_n_8888_0565_ca_process_pixblock_tail_head
/******************************************************************************/
.macro pixman_composite_in_n_8_process_pixblock_head
/* expecting source data in {d0, d1, d2, d3} */
/* and destination data in {d4, d5, d6, d7} */
@ -2412,21 +2580,19 @@ fname:
*/
.macro bilinear_load_8888 reg1, reg2, tmp
mov TMP2, X, asr #16
mov TMP1, X, asr #16
add X, X, UX
add TMP1, TOP, TMP2, asl #2
add TMP2, BOTTOM, TMP2, asl #2
vld1.32 {reg1}, [TMP1]
vld1.32 {reg2}, [TMP2]
add TMP1, TOP, TMP1, asl #2
vld1.32 {reg1}, [TMP1], STRIDE
vld1.32 {reg2}, [TMP1]
.endm
.macro bilinear_load_0565 reg1, reg2, tmp
mov TMP2, X, asr #16
mov TMP1, X, asr #16
add X, X, UX
add TMP1, TOP, TMP2, asl #1
add TMP2, BOTTOM, TMP2, asl #1
vld1.32 {reg2[0]}, [TMP1]
vld1.32 {reg2[1]}, [TMP2]
add TMP1, TOP, TMP1, asl #1
vld1.32 {reg2[0]}, [TMP1], STRIDE
vld1.32 {reg2[1]}, [TMP1]
convert_four_0565_to_x888_packed reg2, reg1, reg2, tmp
.endm
@ -2454,18 +2620,16 @@ fname:
.macro bilinear_load_and_vertical_interpolate_two_0565 \
acc1, acc2, reg1, reg2, reg3, reg4, acc2lo, acc2hi
mov TMP1, X, asr #16
add X, X, UX
add TMP1, TOP, TMP1, asl #1
mov TMP2, X, asr #16
add X, X, UX
mov TMP4, X, asr #16
add X, X, UX
add TMP1, TOP, TMP2, asl #1
add TMP2, BOTTOM, TMP2, asl #1
add TMP3, TOP, TMP4, asl #1
add TMP4, BOTTOM, TMP4, asl #1
vld1.32 {acc2lo[0]}, [TMP1]
vld1.32 {acc2hi[0]}, [TMP3]
vld1.32 {acc2lo[1]}, [TMP2]
vld1.32 {acc2hi[1]}, [TMP4]
add TMP2, TOP, TMP2, asl #1
vld1.32 {acc2lo[0]}, [TMP1], STRIDE
vld1.32 {acc2hi[0]}, [TMP2], STRIDE
vld1.32 {acc2lo[1]}, [TMP1]
vld1.32 {acc2hi[1]}, [TMP2]
convert_0565_to_x888 acc2, reg3, reg2, reg1
vzip.u8 reg1, reg3
vzip.u8 reg2, reg4
@ -2481,34 +2645,30 @@ fname:
xacc1, xacc2, xreg1, xreg2, xreg3, xreg4, xacc2lo, xacc2hi \
yacc1, yacc2, yreg1, yreg2, yreg3, yreg4, yacc2lo, yacc2hi
mov TMP1, X, asr #16
add X, X, UX
add TMP1, TOP, TMP1, asl #1
mov TMP2, X, asr #16
add X, X, UX
mov TMP4, X, asr #16
add X, X, UX
add TMP1, TOP, TMP2, asl #1
add TMP2, BOTTOM, TMP2, asl #1
add TMP3, TOP, TMP4, asl #1
add TMP4, BOTTOM, TMP4, asl #1
vld1.32 {xacc2lo[0]}, [TMP1]
vld1.32 {xacc2hi[0]}, [TMP3]
vld1.32 {xacc2lo[1]}, [TMP2]
vld1.32 {xacc2hi[1]}, [TMP4]
add TMP2, TOP, TMP2, asl #1
vld1.32 {xacc2lo[0]}, [TMP1], STRIDE
vld1.32 {xacc2hi[0]}, [TMP2], STRIDE
vld1.32 {xacc2lo[1]}, [TMP1]
vld1.32 {xacc2hi[1]}, [TMP2]
convert_0565_to_x888 xacc2, xreg3, xreg2, xreg1
mov TMP1, X, asr #16
add X, X, UX
add TMP1, TOP, TMP1, asl #1
mov TMP2, X, asr #16
add X, X, UX
mov TMP4, X, asr #16
add X, X, UX
add TMP1, TOP, TMP2, asl #1
add TMP2, BOTTOM, TMP2, asl #1
add TMP3, TOP, TMP4, asl #1
add TMP4, BOTTOM, TMP4, asl #1
vld1.32 {yacc2lo[0]}, [TMP1]
add TMP2, TOP, TMP2, asl #1
vld1.32 {yacc2lo[0]}, [TMP1], STRIDE
vzip.u8 xreg1, xreg3
vld1.32 {yacc2hi[0]}, [TMP3]
vld1.32 {yacc2hi[0]}, [TMP2], STRIDE
vzip.u8 xreg2, xreg4
vld1.32 {yacc2lo[1]}, [TMP2]
vld1.32 {yacc2lo[1]}, [TMP1]
vzip.u8 xreg3, xreg4
vld1.32 {yacc2hi[1]}, [TMP4]
vld1.32 {yacc2hi[1]}, [TMP2]
vzip.u8 xreg1, xreg2
convert_0565_to_x888 yacc2, yreg3, yreg2, yreg1
vmull.u8 xacc1, xreg1, d28
@ -2527,9 +2687,9 @@ fname:
.macro bilinear_store_8888 numpix, tmp1, tmp2
.if numpix == 4
vst1.32 {d0, d1}, [OUT]!
vst1.32 {d0, d1}, [OUT, :128]!
.elseif numpix == 2
vst1.32 {d0}, [OUT]!
vst1.32 {d0}, [OUT, :64]!
.elseif numpix == 1
vst1.32 {d0[0]}, [OUT, :32]!
.else
@ -2544,11 +2704,11 @@ fname:
vuzp.u8 d0, d2
convert_8888_to_0565 d2, d1, d0, q1, tmp1, tmp2
.if numpix == 4
vst1.16 {d2}, [OUT]!
vst1.16 {d2}, [OUT, :64]!
.elseif numpix == 2
vst1.32 {d2[0]}, [OUT]!
vst1.32 {d2[0]}, [OUT, :32]!
.elseif numpix == 1
vst1.16 {d2[0]}, [OUT]!
vst1.16 {d2[0]}, [OUT, :16]!
.else
.error bilinear_store_0565 numpix is unsupported
.endif
@ -2558,8 +2718,7 @@ fname:
bilinear_load_&src_fmt d0, d1, d2
vmull.u8 q1, d0, d28
vmlal.u8 q1, d1, d29
vshr.u16 d30, d24, #8
/* 4 cycles bubble */
/* 5 cycles bubble */
vshll.u16 q0, d2, #8
vmlsl.u16 q0, d2, d30
vmlal.u16 q0, d3, d30
@ -2574,17 +2733,17 @@ fname:
.macro bilinear_interpolate_two_pixels src_fmt, dst_fmt
bilinear_load_and_vertical_interpolate_two_&src_fmt \
q1, q11, d0, d1, d20, d21, d22, d23
vshr.u16 q15, q12, #8
vadd.u16 q12, q12, q13
vshll.u16 q0, d2, #8
vmlsl.u16 q0, d2, d30
vmlal.u16 q0, d3, d30
vshll.u16 q10, d22, #8
vmlsl.u16 q10, d22, d31
vmlal.u16 q10, d23, d31
vshrn.u32 d30, q0, #16
vshrn.u32 d31, q10, #16
vmovn.u16 d0, q15
vshrn.u32 d0, q0, #16
vshrn.u32 d1, q10, #16
vshr.u16 q15, q12, #8
vadd.u16 q12, q12, q13
vmovn.u16 d0, q0
bilinear_store_&dst_fmt 2, q2, q3
.endm
@ -2593,8 +2752,7 @@ fname:
q1, q11, d0, d1, d20, d21, d22, d23 \
q3, q9, d4, d5, d16, d17, d18, d19
pld [TMP1, PF_OFFS]
vshr.u16 q15, q12, #8
vadd.u16 q12, q12, q13
sub TMP1, TMP1, STRIDE
vshll.u16 q0, d2, #8
vmlsl.u16 q0, d2, d30
vmlal.u16 q0, d3, d30
@ -2614,18 +2772,69 @@ fname:
vshrn.u32 d1, q10, #16
vshrn.u32 d4, q2, #16
vshrn.u32 d5, q8, #16
vshr.u16 q15, q12, #8
vmovn.u16 d0, q0
vmovn.u16 d1, q2
vadd.u16 q12, q12, q13
bilinear_store_&dst_fmt 4, q2, q3
.endm
.macro bilinear_interpolate_four_pixels_head src_fmt, dst_fmt
.ifdef have_bilinear_interpolate_four_pixels_&src_fmt&_&dst_fmt
bilinear_interpolate_four_pixels_&src_fmt&_&dst_fmt&_head
.else
bilinear_interpolate_four_pixels src_fmt, dst_fmt
.endif
.endm
.macro bilinear_interpolate_four_pixels_tail src_fmt, dst_fmt
.ifdef have_bilinear_interpolate_four_pixels_&src_fmt&_&dst_fmt
bilinear_interpolate_four_pixels_&src_fmt&_&dst_fmt&_tail
.endif
.endm
.macro bilinear_interpolate_four_pixels_tail_head src_fmt, dst_fmt
.ifdef have_bilinear_interpolate_four_pixels_&src_fmt&_&dst_fmt
bilinear_interpolate_four_pixels_&src_fmt&_&dst_fmt&_tail_head
.else
bilinear_interpolate_four_pixels src_fmt, dst_fmt
.endif
.endm
.macro bilinear_interpolate_eight_pixels_head src_fmt, dst_fmt
.ifdef have_bilinear_interpolate_eight_pixels_&src_fmt&_&dst_fmt
bilinear_interpolate_eight_pixels_&src_fmt&_&dst_fmt&_head
.else
bilinear_interpolate_four_pixels_head src_fmt, dst_fmt
bilinear_interpolate_four_pixels_tail_head src_fmt, dst_fmt
.endif
.endm
.macro bilinear_interpolate_eight_pixels_tail src_fmt, dst_fmt
.ifdef have_bilinear_interpolate_eight_pixels_&src_fmt&_&dst_fmt
bilinear_interpolate_eight_pixels_&src_fmt&_&dst_fmt&_tail
.else
bilinear_interpolate_four_pixels_tail src_fmt, dst_fmt
.endif
.endm
.macro bilinear_interpolate_eight_pixels_tail_head src_fmt, dst_fmt
.ifdef have_bilinear_interpolate_eight_pixels_&src_fmt&_&dst_fmt
bilinear_interpolate_eight_pixels_&src_fmt&_&dst_fmt&_tail_head
.else
bilinear_interpolate_four_pixels_tail_head src_fmt, dst_fmt
bilinear_interpolate_four_pixels_tail_head src_fmt, dst_fmt
.endif
.endm
.set BILINEAR_FLAG_UNROLL_4, 0
.set BILINEAR_FLAG_UNROLL_8, 1
.set BILINEAR_FLAG_USE_ALL_NEON_REGS, 2
/*
* Main template macro for generating NEON optimized bilinear scanline
* functions.
*
* TODO: use software pipelining and aligned writes to the destination buffer
* in order to improve performance
*
* Bilinear scanline scaler macro template uses the following arguments:
* fname - name of the function to generate
* src_fmt - source color format (8888 or 0565)
@ -2636,7 +2845,8 @@ fname:
*/
.macro generate_bilinear_scanline_func fname, src_fmt, dst_fmt, \
bpp_shift, prefetch_distance
src_bpp_shift, dst_bpp_shift, \
prefetch_distance, flags
pixman_asm_function fname
OUT .req r0
@ -2652,6 +2862,7 @@ pixman_asm_function fname
PF_OFFS .req r7
TMP3 .req r8
TMP4 .req r9
STRIDE .req r2
mov ip, sp
push {r4, r5, r6, r7, r8, r9}
@ -2659,6 +2870,13 @@ pixman_asm_function fname
ldmia ip, {WB, X, UX, WIDTH}
mul PF_OFFS, PF_OFFS, UX
.if ((flags) & BILINEAR_FLAG_USE_ALL_NEON_REGS) != 0
vpush {d8-d15}
.endif
sub STRIDE, BOTTOM, TOP
.unreq BOTTOM
cmp WIDTH, #0
ble 3f
@ -2667,16 +2885,72 @@ pixman_asm_function fname
vdup.u8 d28, WT
vdup.u8 d29, WB
vadd.u16 d25, d25, d26
vadd.u16 q13, q13, q13
/* ensure good destination alignment */
cmp WIDTH, #1
blt 0f
tst OUT, #(1 << dst_bpp_shift)
beq 0f
vshr.u16 q15, q12, #8
vadd.u16 q12, q12, q13
bilinear_interpolate_last_pixel src_fmt, dst_fmt
sub WIDTH, WIDTH, #1
0:
vadd.u16 q13, q13, q13
vshr.u16 q15, q12, #8
vadd.u16 q12, q12, q13
cmp WIDTH, #2
blt 0f
tst OUT, #(1 << (dst_bpp_shift + 1))
beq 0f
bilinear_interpolate_two_pixels src_fmt, dst_fmt
sub WIDTH, WIDTH, #2
0:
.if ((flags) & BILINEAR_FLAG_UNROLL_8) != 0
/*********** 8 pixels per iteration *****************/
cmp WIDTH, #4
blt 0f
tst OUT, #(1 << (dst_bpp_shift + 2))
beq 0f
bilinear_interpolate_four_pixels src_fmt, dst_fmt
sub WIDTH, WIDTH, #4
0:
subs WIDTH, WIDTH, #8
blt 1f
mov PF_OFFS, PF_OFFS, asr #(16 - src_bpp_shift)
bilinear_interpolate_eight_pixels_head src_fmt, dst_fmt
subs WIDTH, WIDTH, #8
blt 5f
0:
bilinear_interpolate_eight_pixels_tail_head src_fmt, dst_fmt
subs WIDTH, WIDTH, #8
bge 0b
5:
bilinear_interpolate_eight_pixels_tail src_fmt, dst_fmt
1:
tst WIDTH, #4
beq 2f
bilinear_interpolate_four_pixels src_fmt, dst_fmt
2:
.else
/*********** 4 pixels per iteration *****************/
subs WIDTH, WIDTH, #4
blt 1f
mov PF_OFFS, PF_OFFS, asr #(16 - bpp_shift)
mov PF_OFFS, PF_OFFS, asr #(16 - src_bpp_shift)
bilinear_interpolate_four_pixels_head src_fmt, dst_fmt
subs WIDTH, WIDTH, #4
blt 5f
0:
bilinear_interpolate_four_pixels src_fmt, dst_fmt
bilinear_interpolate_four_pixels_tail_head src_fmt, dst_fmt
subs WIDTH, WIDTH, #4
bge 0b
5:
bilinear_interpolate_four_pixels_tail src_fmt, dst_fmt
1:
/****************************************************/
.endif
/* handle the remaining trailing pixels */
tst WIDTH, #2
beq 2f
bilinear_interpolate_two_pixels src_fmt, dst_fmt
@ -2685,12 +2959,14 @@ pixman_asm_function fname
beq 3f
bilinear_interpolate_last_pixel src_fmt, dst_fmt
3:
.if ((flags) & BILINEAR_FLAG_USE_ALL_NEON_REGS) != 0
vpop {d8-d15}
.endif
pop {r4, r5, r6, r7, r8, r9}
bx lr
.unreq OUT
.unreq TOP
.unreq BOTTOM
.unreq WT
.unreq WB
.unreq X
@ -2701,18 +2977,393 @@ pixman_asm_function fname
.unreq PF_OFFS
.unreq TMP3
.unreq TMP4
.unreq STRIDE
.endfunc
.endm
generate_bilinear_scanline_func \
pixman_scaled_bilinear_scanline_8888_8888_SRC_asm_neon, 8888, 8888, 2, 28
/*****************************************************************************/
.set have_bilinear_interpolate_four_pixels_8888_8888, 1
.macro bilinear_interpolate_four_pixels_8888_8888_head
mov TMP1, X, asr #16
add X, X, UX
add TMP1, TOP, TMP1, asl #2
mov TMP2, X, asr #16
add X, X, UX
add TMP2, TOP, TMP2, asl #2
vld1.32 {d22}, [TMP1], STRIDE
vld1.32 {d23}, [TMP1]
mov TMP3, X, asr #16
add X, X, UX
add TMP3, TOP, TMP3, asl #2
vmull.u8 q8, d22, d28
vmlal.u8 q8, d23, d29
vld1.32 {d22}, [TMP2], STRIDE
vld1.32 {d23}, [TMP2]
mov TMP4, X, asr #16
add X, X, UX
add TMP4, TOP, TMP4, asl #2
vmull.u8 q9, d22, d28
vmlal.u8 q9, d23, d29
vld1.32 {d22}, [TMP3], STRIDE
vld1.32 {d23}, [TMP3]
vmull.u8 q10, d22, d28
vmlal.u8 q10, d23, d29
vshll.u16 q0, d16, #8
vmlsl.u16 q0, d16, d30
vmlal.u16 q0, d17, d30
pld [TMP4, PF_OFFS]
vld1.32 {d16}, [TMP4], STRIDE
vld1.32 {d17}, [TMP4]
pld [TMP4, PF_OFFS]
vmull.u8 q11, d16, d28
vmlal.u8 q11, d17, d29
vshll.u16 q1, d18, #8
vmlsl.u16 q1, d18, d31
.endm
.macro bilinear_interpolate_four_pixels_8888_8888_tail
vmlal.u16 q1, d19, d31
vshr.u16 q15, q12, #8
vshll.u16 q2, d20, #8
vmlsl.u16 q2, d20, d30
vmlal.u16 q2, d21, d30
vshll.u16 q3, d22, #8
vmlsl.u16 q3, d22, d31
vmlal.u16 q3, d23, d31
vadd.u16 q12, q12, q13
vshrn.u32 d0, q0, #16
vshrn.u32 d1, q1, #16
vshrn.u32 d4, q2, #16
vshr.u16 q15, q12, #8
vshrn.u32 d5, q3, #16
vmovn.u16 d6, q0
vmovn.u16 d7, q2
vadd.u16 q12, q12, q13
vst1.32 {d6, d7}, [OUT, :128]!
.endm
.macro bilinear_interpolate_four_pixels_8888_8888_tail_head
mov TMP1, X, asr #16
add X, X, UX
add TMP1, TOP, TMP1, asl #2
mov TMP2, X, asr #16
add X, X, UX
add TMP2, TOP, TMP2, asl #2
vmlal.u16 q1, d19, d31
vshr.u16 q15, q12, #8
vshll.u16 q2, d20, #8
vmlsl.u16 q2, d20, d30
vmlal.u16 q2, d21, d30
vshll.u16 q3, d22, #8
vld1.32 {d20}, [TMP1], STRIDE
vmlsl.u16 q3, d22, d31
vmlal.u16 q3, d23, d31
vld1.32 {d21}, [TMP1]
vmull.u8 q8, d20, d28
vmlal.u8 q8, d21, d29
vshrn.u32 d0, q0, #16
vshrn.u32 d1, q1, #16
vshrn.u32 d4, q2, #16
vld1.32 {d22}, [TMP2], STRIDE
vshrn.u32 d5, q3, #16
vadd.u16 q12, q12, q13
vld1.32 {d23}, [TMP2]
vmull.u8 q9, d22, d28
mov TMP3, X, asr #16
add X, X, UX
add TMP3, TOP, TMP3, asl #2
mov TMP4, X, asr #16
add X, X, UX
add TMP4, TOP, TMP4, asl #2
vmlal.u8 q9, d23, d29
vld1.32 {d22}, [TMP3], STRIDE
vshr.u16 q15, q12, #8
vld1.32 {d23}, [TMP3]
vmull.u8 q10, d22, d28
vmlal.u8 q10, d23, d29
vmovn.u16 d6, q0
vshll.u16 q0, d16, #8
vmovn.u16 d7, q2
vmlsl.u16 q0, d16, d30
vmlal.u16 q0, d17, d30
pld [TMP4, PF_OFFS]
vld1.32 {d16}, [TMP4], STRIDE
vadd.u16 q12, q12, q13
vld1.32 {d17}, [TMP4]
pld [TMP4, PF_OFFS]
vmull.u8 q11, d16, d28
vmlal.u8 q11, d17, d29
vst1.32 {d6, d7}, [OUT, :128]!
vshll.u16 q1, d18, #8
vmlsl.u16 q1, d18, d31
.endm
/*****************************************************************************/
.set have_bilinear_interpolate_eight_pixels_8888_0565, 1
.macro bilinear_interpolate_eight_pixels_8888_0565_head
mov TMP1, X, asr #16
add X, X, UX
add TMP1, TOP, TMP1, asl #2
mov TMP2, X, asr #16
add X, X, UX
add TMP2, TOP, TMP2, asl #2
vld1.32 {d20}, [TMP1], STRIDE
vld1.32 {d21}, [TMP1]
vmull.u8 q8, d20, d28
vmlal.u8 q8, d21, d29
vld1.32 {d22}, [TMP2], STRIDE
vld1.32 {d23}, [TMP2]
vmull.u8 q9, d22, d28
mov TMP3, X, asr #16
add X, X, UX
add TMP3, TOP, TMP3, asl #2
mov TMP4, X, asr #16
add X, X, UX
add TMP4, TOP, TMP4, asl #2
vmlal.u8 q9, d23, d29
vld1.32 {d22}, [TMP3], STRIDE
vld1.32 {d23}, [TMP3]
vmull.u8 q10, d22, d28
vmlal.u8 q10, d23, d29
vshll.u16 q0, d16, #8
vmlsl.u16 q0, d16, d30
vmlal.u16 q0, d17, d30
pld [TMP4, PF_OFFS]
vld1.32 {d16}, [TMP4], STRIDE
vld1.32 {d17}, [TMP4]
pld [TMP4, PF_OFFS]
vmull.u8 q11, d16, d28
vmlal.u8 q11, d17, d29
vshll.u16 q1, d18, #8
vmlsl.u16 q1, d18, d31
mov TMP1, X, asr #16
add X, X, UX
add TMP1, TOP, TMP1, asl #2
mov TMP2, X, asr #16
add X, X, UX
add TMP2, TOP, TMP2, asl #2
vmlal.u16 q1, d19, d31
vshr.u16 q15, q12, #8
vshll.u16 q2, d20, #8
vmlsl.u16 q2, d20, d30
vmlal.u16 q2, d21, d30
vshll.u16 q3, d22, #8
vld1.32 {d20}, [TMP1], STRIDE
vmlsl.u16 q3, d22, d31
vmlal.u16 q3, d23, d31
vld1.32 {d21}, [TMP1]
vmull.u8 q8, d20, d28
vmlal.u8 q8, d21, d29
vshrn.u32 d0, q0, #16
vshrn.u32 d1, q1, #16
vshrn.u32 d4, q2, #16
vld1.32 {d22}, [TMP2], STRIDE
vshrn.u32 d5, q3, #16
vadd.u16 q12, q12, q13
vld1.32 {d23}, [TMP2]
vmull.u8 q9, d22, d28
mov TMP3, X, asr #16
add X, X, UX
add TMP3, TOP, TMP3, asl #2
mov TMP4, X, asr #16
add X, X, UX
add TMP4, TOP, TMP4, asl #2
vmlal.u8 q9, d23, d29
vld1.32 {d22}, [TMP3], STRIDE
vshr.u16 q15, q12, #8
vld1.32 {d23}, [TMP3]
vmull.u8 q10, d22, d28
vmlal.u8 q10, d23, d29
vmovn.u16 d8, q0
vshll.u16 q0, d16, #8
vmovn.u16 d9, q2
vmlsl.u16 q0, d16, d30
vmlal.u16 q0, d17, d30
pld [TMP4, PF_OFFS]
vld1.32 {d16}, [TMP4], STRIDE
vadd.u16 q12, q12, q13
vld1.32 {d17}, [TMP4]
pld [TMP4, PF_OFFS]
vmull.u8 q11, d16, d28
vmlal.u8 q11, d17, d29
vshll.u16 q1, d18, #8
vmlsl.u16 q1, d18, d31
.endm
.macro bilinear_interpolate_eight_pixels_8888_0565_tail
vmlal.u16 q1, d19, d31
vshr.u16 q15, q12, #8
vshll.u16 q2, d20, #8
vmlsl.u16 q2, d20, d30
vmlal.u16 q2, d21, d30
vshll.u16 q3, d22, #8
vmlsl.u16 q3, d22, d31
vmlal.u16 q3, d23, d31
vadd.u16 q12, q12, q13
vshrn.u32 d0, q0, #16
vshrn.u32 d1, q1, #16
vshrn.u32 d4, q2, #16
vshr.u16 q15, q12, #8
vshrn.u32 d5, q3, #16
vmovn.u16 d10, q0
vmovn.u16 d11, q2
vadd.u16 q12, q12, q13
vuzp.u8 d8, d9
vuzp.u8 d10, d11
vuzp.u8 d9, d11
vuzp.u8 d8, d10
vshll.u8 q6, d9, #8
vshll.u8 q5, d10, #8
vshll.u8 q7, d8, #8
vsri.u16 q5, q6, #5
vsri.u16 q5, q7, #11
vst1.32 {d10, d11}, [OUT, :128]!
.endm
.macro bilinear_interpolate_eight_pixels_8888_0565_tail_head
mov TMP1, X, asr #16
add X, X, UX
add TMP1, TOP, TMP1, asl #2
mov TMP2, X, asr #16
add X, X, UX
add TMP2, TOP, TMP2, asl #2
vmlal.u16 q1, d19, d31
vshr.u16 q15, q12, #8
vuzp.u8 d8, d9
vshll.u16 q2, d20, #8
vmlsl.u16 q2, d20, d30
vmlal.u16 q2, d21, d30
vshll.u16 q3, d22, #8
vld1.32 {d20}, [TMP1], STRIDE
vmlsl.u16 q3, d22, d31
vmlal.u16 q3, d23, d31
vld1.32 {d21}, [TMP1]
vmull.u8 q8, d20, d28
vmlal.u8 q8, d21, d29
vshrn.u32 d0, q0, #16
vshrn.u32 d1, q1, #16
vshrn.u32 d4, q2, #16
vld1.32 {d22}, [TMP2], STRIDE
vshrn.u32 d5, q3, #16
vadd.u16 q12, q12, q13
vld1.32 {d23}, [TMP2]
vmull.u8 q9, d22, d28
mov TMP3, X, asr #16
add X, X, UX
add TMP3, TOP, TMP3, asl #2
mov TMP4, X, asr #16
add X, X, UX
add TMP4, TOP, TMP4, asl #2
vmlal.u8 q9, d23, d29
vld1.32 {d22}, [TMP3], STRIDE
vshr.u16 q15, q12, #8
vld1.32 {d23}, [TMP3]
vmull.u8 q10, d22, d28
vmlal.u8 q10, d23, d29
vmovn.u16 d10, q0
vshll.u16 q0, d16, #8
vmovn.u16 d11, q2
vmlsl.u16 q0, d16, d30
vmlal.u16 q0, d17, d30
pld [TMP4, PF_OFFS]
vld1.32 {d16}, [TMP4], STRIDE
vadd.u16 q12, q12, q13
vld1.32 {d17}, [TMP4]
pld [TMP4, PF_OFFS]
vmull.u8 q11, d16, d28
vmlal.u8 q11, d17, d29
vuzp.u8 d10, d11
vshll.u16 q1, d18, #8
vmlsl.u16 q1, d18, d31
mov TMP1, X, asr #16
add X, X, UX
add TMP1, TOP, TMP1, asl #2
mov TMP2, X, asr #16
add X, X, UX
add TMP2, TOP, TMP2, asl #2
vmlal.u16 q1, d19, d31
vuzp.u8 d9, d11
vshr.u16 q15, q12, #8
vshll.u16 q2, d20, #8
vuzp.u8 d8, d10
vmlsl.u16 q2, d20, d30
vmlal.u16 q2, d21, d30
vshll.u16 q3, d22, #8
vld1.32 {d20}, [TMP1], STRIDE
vmlsl.u16 q3, d22, d31
vmlal.u16 q3, d23, d31
vld1.32 {d21}, [TMP1]
vmull.u8 q8, d20, d28
vmlal.u8 q8, d21, d29
vshll.u8 q6, d9, #8
vshll.u8 q5, d10, #8
vshll.u8 q7, d8, #8
vshrn.u32 d0, q0, #16
vsri.u16 q5, q6, #5
vshrn.u32 d1, q1, #16
vsri.u16 q5, q7, #11
vshrn.u32 d4, q2, #16
vld1.32 {d22}, [TMP2], STRIDE
vshrn.u32 d5, q3, #16
vadd.u16 q12, q12, q13
vld1.32 {d23}, [TMP2]
vmull.u8 q9, d22, d28
mov TMP3, X, asr #16
add X, X, UX
add TMP3, TOP, TMP3, asl #2
mov TMP4, X, asr #16
add X, X, UX
add TMP4, TOP, TMP4, asl #2
vmlal.u8 q9, d23, d29
vld1.32 {d22}, [TMP3], STRIDE
vshr.u16 q15, q12, #8
vld1.32 {d23}, [TMP3]
vmull.u8 q10, d22, d28
vmlal.u8 q10, d23, d29
vmovn.u16 d8, q0
vshll.u16 q0, d16, #8
vmovn.u16 d9, q2
vmlsl.u16 q0, d16, d30
vmlal.u16 q0, d17, d30
pld [TMP4, PF_OFFS]
vld1.32 {d16}, [TMP4], STRIDE
vadd.u16 q12, q12, q13
vld1.32 {d17}, [TMP4]
pld [TMP4, PF_OFFS]
vmull.u8 q11, d16, d28
vmlal.u8 q11, d17, d29
vshll.u16 q1, d18, #8
vst1.32 {d10, d11}, [OUT, :128]!
vmlsl.u16 q1, d18, d31
.endm
/*****************************************************************************/
generate_bilinear_scanline_func \
pixman_scaled_bilinear_scanline_8888_0565_SRC_asm_neon, 8888, 0565, 2, 28
pixman_scaled_bilinear_scanline_8888_8888_SRC_asm_neon, 8888, 8888, \
2, 2, 28, BILINEAR_FLAG_UNROLL_4
generate_bilinear_scanline_func \
pixman_scaled_bilinear_scanline_0565_x888_SRC_asm_neon, 0565, 8888, 1, 28
pixman_scaled_bilinear_scanline_8888_0565_SRC_asm_neon, 8888, 0565, \
2, 1, 28, BILINEAR_FLAG_UNROLL_8 | BILINEAR_FLAG_USE_ALL_NEON_REGS
generate_bilinear_scanline_func \
pixman_scaled_bilinear_scanline_0565_0565_SRC_asm_neon, 0565, 0565, 1, 28
pixman_scaled_bilinear_scanline_0565_x888_SRC_asm_neon, 0565, 8888, \
1, 2, 28, BILINEAR_FLAG_UNROLL_4
generate_bilinear_scanline_func \
pixman_scaled_bilinear_scanline_0565_0565_SRC_asm_neon, 0565, 0565, \
1, 1, 28, BILINEAR_FLAG_UNROLL_4

Просмотреть файл

@ -80,6 +80,8 @@ PIXMAN_ARM_BIND_FAST_PATH_N_MASK_DST (SKIP_ZERO_SRC, neon, over_n_8_8888,
uint8_t, 1, uint32_t, 1)
PIXMAN_ARM_BIND_FAST_PATH_N_MASK_DST (SKIP_ZERO_SRC, neon, over_n_8888_8888_ca,
uint32_t, 1, uint32_t, 1)
PIXMAN_ARM_BIND_FAST_PATH_N_MASK_DST (SKIP_ZERO_SRC, neon, over_n_8888_0565_ca,
uint32_t, 1, uint16_t, 1)
PIXMAN_ARM_BIND_FAST_PATH_N_MASK_DST (SKIP_ZERO_SRC, neon, over_n_8_8,
uint8_t, 1, uint8_t, 1)
PIXMAN_ARM_BIND_FAST_PATH_N_MASK_DST (SKIP_ZERO_SRC, neon, add_n_8_8,
@ -135,6 +137,23 @@ PIXMAN_ARM_BIND_SCALED_BILINEAR_SRC_DST (0, neon, 0565_x888, SRC,
uint16_t, uint32_t)
PIXMAN_ARM_BIND_SCALED_BILINEAR_SRC_DST (0, neon, 0565_0565, SRC,
uint16_t, uint16_t)
PIXMAN_ARM_BIND_SCALED_BILINEAR_SRC_DST (SKIP_ZERO_SRC, neon, 8888_8888, OVER,
uint32_t, uint32_t)
PIXMAN_ARM_BIND_SCALED_BILINEAR_SRC_DST (SKIP_ZERO_SRC, neon, 8888_8888, ADD,
uint32_t, uint32_t)
PIXMAN_ARM_BIND_SCALED_BILINEAR_SRC_A8_DST (0, neon, 8888_8_8888, SRC,
uint32_t, uint32_t)
PIXMAN_ARM_BIND_SCALED_BILINEAR_SRC_A8_DST (0, neon, 8888_8_0565, SRC,
uint32_t, uint16_t)
PIXMAN_ARM_BIND_SCALED_BILINEAR_SRC_A8_DST (0, neon, 0565_8_x888, SRC,
uint16_t, uint32_t)
PIXMAN_ARM_BIND_SCALED_BILINEAR_SRC_A8_DST (0, neon, 0565_8_0565, SRC,
uint16_t, uint16_t)
PIXMAN_ARM_BIND_SCALED_BILINEAR_SRC_A8_DST (SKIP_ZERO_SRC, neon, 8888_8_8888, OVER,
uint32_t, uint32_t)
PIXMAN_ARM_BIND_SCALED_BILINEAR_SRC_A8_DST (SKIP_ZERO_SRC, neon, 8888_8_8888, ADD,
uint32_t, uint32_t)
void
pixman_composite_src_n_8_asm_neon (int32_t w,
@ -282,6 +301,8 @@ static const pixman_fast_path_t arm_neon_fast_paths[] =
PIXMAN_STD_FAST_PATH_CA (OVER, solid, a8r8g8b8, x8r8g8b8, neon_composite_over_n_8888_8888_ca),
PIXMAN_STD_FAST_PATH_CA (OVER, solid, a8b8g8r8, a8b8g8r8, neon_composite_over_n_8888_8888_ca),
PIXMAN_STD_FAST_PATH_CA (OVER, solid, a8b8g8r8, x8b8g8r8, neon_composite_over_n_8888_8888_ca),
PIXMAN_STD_FAST_PATH_CA (OVER, solid, a8r8g8b8, r5g6b5, neon_composite_over_n_8888_0565_ca),
PIXMAN_STD_FAST_PATH_CA (OVER, solid, a8b8g8r8, b5g6r5, neon_composite_over_n_8888_0565_ca),
PIXMAN_STD_FAST_PATH (OVER, a8r8g8b8, solid, a8r8g8b8, neon_composite_over_8888_n_8888),
PIXMAN_STD_FAST_PATH (OVER, a8r8g8b8, solid, x8r8g8b8, neon_composite_over_8888_n_8888),
PIXMAN_STD_FAST_PATH (OVER, a8r8g8b8, solid, r5g6b5, neon_composite_over_8888_n_0565),
@ -362,6 +383,28 @@ static const pixman_fast_path_t arm_neon_fast_paths[] =
SIMPLE_BILINEAR_FAST_PATH (SRC, r5g6b5, x8r8g8b8, neon_0565_x888),
SIMPLE_BILINEAR_FAST_PATH (SRC, r5g6b5, r5g6b5, neon_0565_0565),
SIMPLE_BILINEAR_FAST_PATH (OVER, a8r8g8b8, a8r8g8b8, neon_8888_8888),
SIMPLE_BILINEAR_FAST_PATH (OVER, a8r8g8b8, x8r8g8b8, neon_8888_8888),
SIMPLE_BILINEAR_FAST_PATH (ADD, a8r8g8b8, a8r8g8b8, neon_8888_8888),
SIMPLE_BILINEAR_FAST_PATH (ADD, a8r8g8b8, x8r8g8b8, neon_8888_8888),
SIMPLE_BILINEAR_A8_MASK_FAST_PATH (SRC, a8r8g8b8, a8r8g8b8, neon_8888_8_8888),
SIMPLE_BILINEAR_A8_MASK_FAST_PATH (SRC, a8r8g8b8, x8r8g8b8, neon_8888_8_8888),
SIMPLE_BILINEAR_A8_MASK_FAST_PATH (SRC, x8r8g8b8, x8r8g8b8, neon_8888_8_8888),
SIMPLE_BILINEAR_A8_MASK_FAST_PATH (SRC, a8r8g8b8, r5g6b5, neon_8888_8_0565),
SIMPLE_BILINEAR_A8_MASK_FAST_PATH (SRC, x8r8g8b8, r5g6b5, neon_8888_8_0565),
SIMPLE_BILINEAR_A8_MASK_FAST_PATH (SRC, r5g6b5, x8r8g8b8, neon_0565_8_x888),
SIMPLE_BILINEAR_A8_MASK_FAST_PATH (SRC, r5g6b5, r5g6b5, neon_0565_8_0565),
SIMPLE_BILINEAR_A8_MASK_FAST_PATH (OVER, a8r8g8b8, a8r8g8b8, neon_8888_8_8888),
SIMPLE_BILINEAR_A8_MASK_FAST_PATH (OVER, a8r8g8b8, x8r8g8b8, neon_8888_8_8888),
SIMPLE_BILINEAR_A8_MASK_FAST_PATH (ADD, a8r8g8b8, a8r8g8b8, neon_8888_8_8888),
SIMPLE_BILINEAR_A8_MASK_FAST_PATH (ADD, a8r8g8b8, x8r8g8b8, neon_8888_8_8888),
{ PIXMAN_OP_NONE },
};

Просмотреть файл

@ -963,15 +963,33 @@ set_lum (uint32_t dest[3], uint32_t src[3], uint32_t sa, uint32_t lum)
if (min < 0)
{
tmp[0] = l + (tmp[0] - l) * l / (l - min);
tmp[1] = l + (tmp[1] - l) * l / (l - min);
tmp[2] = l + (tmp[2] - l) * l / (l - min);
if (l - min == 0.0)
{
tmp[0] = 0;
tmp[1] = 0;
tmp[2] = 0;
}
else
{
tmp[0] = l + (tmp[0] - l) * l / (l - min);
tmp[1] = l + (tmp[1] - l) * l / (l - min);
tmp[2] = l + (tmp[2] - l) * l / (l - min);
}
}
if (max > a)
{
tmp[0] = l + (tmp[0] - l) * (a - l) / (max - l);
tmp[1] = l + (tmp[1] - l) * (a - l) / (max - l);
tmp[2] = l + (tmp[2] - l) * (a - l) / (max - l);
if (max - l == 0.0)
{
tmp[0] = a;
tmp[1] = a;
tmp[2] = a;
}
else
{
tmp[0] = l + (tmp[0] - l) * (a - l) / (max - l);
tmp[1] = l + (tmp[1] - l) * (a - l) / (max - l);
tmp[2] = l + (tmp[2] - l) * (a - l) / (max - l);
}
}
dest[0] = tmp[0] * MASK + 0.5;

Просмотреть файл

@ -963,15 +963,33 @@ set_lum (uint64_t dest[3], uint64_t src[3], uint64_t sa, uint64_t lum)
if (min < 0)
{
tmp[0] = l + (tmp[0] - l) * l / (l - min);
tmp[1] = l + (tmp[1] - l) * l / (l - min);
tmp[2] = l + (tmp[2] - l) * l / (l - min);
if (l - min == 0.0)
{
tmp[0] = 0;
tmp[1] = 0;
tmp[2] = 0;
}
else
{
tmp[0] = l + (tmp[0] - l) * l / (l - min);
tmp[1] = l + (tmp[1] - l) * l / (l - min);
tmp[2] = l + (tmp[2] - l) * l / (l - min);
}
}
if (max > a)
{
tmp[0] = l + (tmp[0] - l) * (a - l) / (max - l);
tmp[1] = l + (tmp[1] - l) * (a - l) / (max - l);
tmp[2] = l + (tmp[2] - l) * (a - l) / (max - l);
if (max - l == 0.0)
{
tmp[0] = a;
tmp[1] = a;
tmp[2] = a;
}
else
{
tmp[0] = l + (tmp[0] - l) * (a - l) / (max - l);
tmp[1] = l + (tmp[1] - l) * (a - l) / (max - l);
tmp[2] = l + (tmp[2] - l) * (a - l) / (max - l);
}
}
dest[0] = tmp[0] * MASK + 0.5;

Просмотреть файл

@ -387,6 +387,19 @@ pixman_rasterize_trapezoid (pixman_image_t * image,
}
}
/*
* pixman_composite_trapezoids()
*
* All the trapezoids are conceptually rendered to an infinitely big image.
* The (0, 0) coordinates of this image are then aligned with the (x, y)
* coordinates of the source image, and then both images are aligned with
* the (x, y) coordinates of the destination. Then, in principle, compositing
* of these three images takes place across the entire destination.
*
* FIXME: However, there is currently a bug, where we restrict this compositing
* to the bounding box of the trapezoids. This is incorrect for operators such
* as SRC and IN where blank source pixels do have an effect on the destination.
*/
PIXMAN_EXPORT void
pixman_composite_trapezoids (pixman_op_t op,
pixman_image_t * src,
@ -419,14 +432,13 @@ pixman_composite_trapezoids (pixman_op_t op,
if (!pixman_trapezoid_valid (trap))
continue;
pixman_rasterize_trapezoid (dst, trap, 0, 0);
pixman_rasterize_trapezoid (dst, trap, x_dst, y_dst);
}
}
else
{
pixman_image_t *tmp;
pixman_box32_t box;
int x_rel, y_rel;
box.x1 = INT32_MAX;
box.y1 = INT32_MAX;
@ -482,11 +494,10 @@ pixman_composite_trapezoids (pixman_op_t op,
pixman_rasterize_trapezoid (tmp, trap, - box.x1, - box.y1);
}
x_rel = box.x1 + x_src - x_dst;
y_rel = box.y1 + y_src - y_dst;
pixman_image_composite (op, src, tmp, dst,
x_rel, y_rel, 0, 0, box.x1, box.y1,
x_src + box.x1, y_src + box.y1,
0, 0,
x_dst + box.x1, y_dst + box.y1,
box.x2 - box.x1, box.y2 - box.y1);
pixman_image_unref (tmp);

Просмотреть файл

@ -32,10 +32,10 @@
#endif
#define PIXMAN_VERSION_MAJOR 0
#define PIXMAN_VERSION_MINOR 19
#define PIXMAN_VERSION_MICRO 5
#define PIXMAN_VERSION_MINOR 21
#define PIXMAN_VERSION_MICRO 7
#define PIXMAN_VERSION_STRING "0.19.5"
#define PIXMAN_VERSION_STRING "0.21.7"
#define PIXMAN_VERSION_ENCODE(major, minor, micro) ( \
((major) * 10000) \