Bug 540464. pixman: update to 7862f9b96e8e8456cc60852790c7f244a5e3425e

This is a substantial cleanup of pixman and could break things.
This commit is contained in:
Jeff Muizelaar 2010-01-20 15:08:05 -05:00
Родитель ba2a2329c5
Коммит 565b6e30ab
43 изменённых файлов: 28655 добавлений и 14915 удалений

Просмотреть файл

@ -103,21 +103,25 @@ endif
CSRCS = \
pixman-access.c \
pixman-access-accessors.c \
pixman-bits-image.c \
pixman.c \
pixman-combine32.c \
pixman-combine64.c \
pixman-compose.c \
pixman-compose-accessors.c \
pixman-compute-region.c \
pixman-conical-gradient.c \
pixman-cpu.c \
pixman-edge.c \
pixman-edge-accessors.c \
pixman-fast-path.c \
pixman-general.c \
pixman-gradient-walker.c \
pixman-image.c \
pixman-implementation.c \
pixman-linear-gradient.c \
pixman-matrix.c \
pixman-pict.c \
pixman-radial-gradient.c \
pixman-region16.c \
pixman-region32.c \
pixman-source.c \
pixman-transformed.c \
pixman-transformed-accessors.c \
pixman-solid-fill.c \
pixman-trap.c \
pixman-utils.c \
$(NULL)
@ -138,7 +142,7 @@ DEFINES += -DUSE_VMX
endif
ifdef USE_ARM_SIMD_GCC
CSRCS += pixman-arm-simd.c
CSRCS += pixman-arm-simd.c pixman-arm-simd-asm.c
DEFINES += -DUSE_ARM_SIMD
endif
@ -150,6 +154,7 @@ endif
ifdef USE_ARM_SIMD_MSVC
ASFILES += pixman-arm-detect-win32.asm pixman-wce-arm-simd.asm
CSRCS += pixman-arm-simd.c
DEFINES += -DUSE_ARM_SIMD
AS_DASH_C_FLAG =
endif

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -0,0 +1,40 @@
#ifdef PIXMAN_FB_ACCESSORS
#define ACCESS(sym) sym##_accessors
#define READ(img, ptr) \
(((bits_image_t *)(img))->read_func ((ptr), sizeof(*(ptr))))
#define WRITE(img, ptr,val) \
(((bits_image_t *)(img))->write_func ((ptr), (val), sizeof (*(ptr))))
#define MEMCPY_WRAPPED(img, dst, src, size) \
do { \
size_t _i; \
uint8_t *_dst = (uint8_t*)(dst), *_src = (uint8_t*)(src); \
for(_i = 0; _i < size; _i++) { \
WRITE((img), _dst +_i, READ((img), _src + _i)); \
} \
} while (0)
#define MEMSET_WRAPPED(img, dst, val, size) \
do { \
size_t _i; \
uint8_t *_dst = (uint8_t*)(dst); \
for(_i = 0; _i < (size_t) size; _i++) { \
WRITE((img), _dst +_i, (val)); \
} \
} while (0)
#else
#define ACCESS(sym) sym
#define READ(img, ptr) (*(ptr))
#define WRITE(img, ptr, val) (*(ptr) = (val))
#define MEMCPY_WRAPPED(img, dst, src, size) \
memcpy(dst, src, size)
#define MEMSET_WRAPPED(img, dst, val, size) \
memset(dst, val, size)
#endif

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -0,0 +1,906 @@
/*
* Copyright © 2009 Nokia Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Author: Siarhei Siamashka (siarhei.siamashka@nokia.com)
*/
/*
* This file contains a macro ('generate_composite_function') which can
* construct 2D image processing functions, based on a common template.
* Any combinations of source, destination and mask images with 8bpp,
* 16bpp, 24bpp, 32bpp color formats are supported.
*
* This macro takes care of:
* - handling of leading and trailing unaligned pixels
* - doing most of the work related to L2 cache preload
* - encourages the use of software pipelining for better instructions
* scheduling
*
* The user of this macro has to provide some configuration parameters
* (bit depths for the images, prefetch distance, etc.) and a set of
* macros, which should implement basic code chunks responsible for
* pixels processing. See 'pixman-arm-neon-asm.S' file for the usage
* examples.
*
* TODO:
* - try overlapped pixel method (from Ian Rickards) when processing
* exactly two blocks of pixels
* - maybe add an option to do reverse scanline processing
*/
/*
* Bit flags for 'generate_composite_function' macro which are used
* to tune generated functions behavior.
*/
.set FLAG_DST_WRITEONLY, 0
.set FLAG_DST_READWRITE, 1
.set FLAG_DEINTERLEAVE_32BPP, 2
/*
* Offset in stack where mask and source pointer/stride can be accessed
* from 'init' macro. This is useful for doing special handling for solid mask.
*/
.set ARGS_STACK_OFFSET, 40
/*
* Constants for selecting preferable prefetch type.
*/
.set PREFETCH_TYPE_NONE, 0 /* No prefetch at all */
.set PREFETCH_TYPE_SIMPLE, 1 /* A simple, fixed-distance-ahead prefetch */
.set PREFETCH_TYPE_ADVANCED, 2 /* Advanced fine-grained prefetch */
/*
* Definitions of supplementary pixld/pixst macros (for partial load/store of
* pixel data).
*/
.macro pixldst1 op, elem_size, reg1, mem_operand, abits
.if abits > 0
op&.&elem_size {d&reg1}, [&mem_operand&, :&abits&]!
.else
op&.&elem_size {d&reg1}, [&mem_operand&]!
.endif
.endm
.macro pixldst2 op, elem_size, reg1, reg2, mem_operand, abits
.if abits > 0
op&.&elem_size {d&reg1, d&reg2}, [&mem_operand&, :&abits&]!
.else
op&.&elem_size {d&reg1, d&reg2}, [&mem_operand&]!
.endif
.endm
.macro pixldst4 op, elem_size, reg1, reg2, reg3, reg4, mem_operand, abits
.if abits > 0
op&.&elem_size {d&reg1, d&reg2, d&reg3, d&reg4}, [&mem_operand&, :&abits&]!
.else
op&.&elem_size {d&reg1, d&reg2, d&reg3, d&reg4}, [&mem_operand&]!
.endif
.endm
.macro pixldst0 op, elem_size, reg1, idx, mem_operand, abits
op&.&elem_size {d&reg1[idx]}, [&mem_operand&]!
.endm
.macro pixldst3 op, elem_size, reg1, reg2, reg3, mem_operand
op&.&elem_size {d&reg1, d&reg2, d&reg3}, [&mem_operand&]!
.endm
.macro pixldst30 op, elem_size, reg1, reg2, reg3, idx, mem_operand
op&.&elem_size {d&reg1[idx], d&reg2[idx], d&reg3[idx]}, [&mem_operand&]!
.endm
.macro pixldst numbytes, op, elem_size, basereg, mem_operand, abits
.if numbytes == 32
pixldst4 op, elem_size, %(basereg+4), %(basereg+5), \
%(basereg+6), %(basereg+7), mem_operand, abits
.elseif numbytes == 16
pixldst2 op, elem_size, %(basereg+2), %(basereg+3), mem_operand, abits
.elseif numbytes == 8
pixldst1 op, elem_size, %(basereg+1), mem_operand, abits
.elseif numbytes == 4
.if !RESPECT_STRICT_ALIGNMENT || (elem_size == 32)
pixldst0 op, 32, %(basereg+0), 1, mem_operand, abits
.elseif elem_size == 16
pixldst0 op, 16, %(basereg+0), 2, mem_operand, abits
pixldst0 op, 16, %(basereg+0), 3, mem_operand, abits
.else
pixldst0 op, 8, %(basereg+0), 4, mem_operand, abits
pixldst0 op, 8, %(basereg+0), 5, mem_operand, abits
pixldst0 op, 8, %(basereg+0), 6, mem_operand, abits
pixldst0 op, 8, %(basereg+0), 7, mem_operand, abits
.endif
.elseif numbytes == 2
.if !RESPECT_STRICT_ALIGNMENT || (elem_size == 16)
pixldst0 op, 16, %(basereg+0), 1, mem_operand, abits
.else
pixldst0 op, 8, %(basereg+0), 2, mem_operand, abits
pixldst0 op, 8, %(basereg+0), 3, mem_operand, abits
.endif
.elseif numbytes == 1
pixldst0 op, 8, %(basereg+0), 1, mem_operand, abits
.else
.error "unsupported size: numbytes"
.endif
.endm
.macro pixld numpix, bpp, basereg, mem_operand, abits=0
.if bpp > 0
.if (bpp == 32) && (numpix == 8) && (DEINTERLEAVE_32BPP_ENABLED != 0)
pixldst4 vld4, 8, %(basereg+4), %(basereg+5), \
%(basereg+6), %(basereg+7), mem_operand, abits
.elseif (bpp == 24) && (numpix == 8)
pixldst3 vld3, 8, %(basereg+3), %(basereg+4), %(basereg+5), mem_operand
.elseif (bpp == 24) && (numpix == 4)
pixldst30 vld3, 8, %(basereg+0), %(basereg+1), %(basereg+2), 4, mem_operand
pixldst30 vld3, 8, %(basereg+0), %(basereg+1), %(basereg+2), 5, mem_operand
pixldst30 vld3, 8, %(basereg+0), %(basereg+1), %(basereg+2), 6, mem_operand
pixldst30 vld3, 8, %(basereg+0), %(basereg+1), %(basereg+2), 7, mem_operand
.elseif (bpp == 24) && (numpix == 2)
pixldst30 vld3, 8, %(basereg+0), %(basereg+1), %(basereg+2), 2, mem_operand
pixldst30 vld3, 8, %(basereg+0), %(basereg+1), %(basereg+2), 3, mem_operand
.elseif (bpp == 24) && (numpix == 1)
pixldst30 vld3, 8, %(basereg+0), %(basereg+1), %(basereg+2), 1, mem_operand
.else
pixldst %(numpix * bpp / 8), vld1, %(bpp), basereg, mem_operand, abits
.endif
.endif
.endm
.macro pixst numpix, bpp, basereg, mem_operand, abits=0
.if bpp > 0
.if (bpp == 32) && (numpix == 8) && (DEINTERLEAVE_32BPP_ENABLED != 0)
pixldst4 vst4, 8, %(basereg+4), %(basereg+5), \
%(basereg+6), %(basereg+7), mem_operand, abits
.elseif (bpp == 24) && (numpix == 8)
pixldst3 vst3, 8, %(basereg+3), %(basereg+4), %(basereg+5), mem_operand
.elseif (bpp == 24) && (numpix == 4)
pixldst30 vst3, 8, %(basereg+0), %(basereg+1), %(basereg+2), 4, mem_operand
pixldst30 vst3, 8, %(basereg+0), %(basereg+1), %(basereg+2), 5, mem_operand
pixldst30 vst3, 8, %(basereg+0), %(basereg+1), %(basereg+2), 6, mem_operand
pixldst30 vst3, 8, %(basereg+0), %(basereg+1), %(basereg+2), 7, mem_operand
.elseif (bpp == 24) && (numpix == 2)
pixldst30 vst3, 8, %(basereg+0), %(basereg+1), %(basereg+2), 2, mem_operand
pixldst30 vst3, 8, %(basereg+0), %(basereg+1), %(basereg+2), 3, mem_operand
.elseif (bpp == 24) && (numpix == 1)
pixldst30 vst3, 8, %(basereg+0), %(basereg+1), %(basereg+2), 1, mem_operand
.else
pixldst %(numpix * bpp / 8), vst1, %(bpp), basereg, mem_operand, abits
.endif
.endif
.endm
.macro pixld_a numpix, bpp, basereg, mem_operand
.if (bpp * numpix) <= 128
pixld numpix, bpp, basereg, mem_operand, %(bpp * numpix)
.else
pixld numpix, bpp, basereg, mem_operand, 128
.endif
.endm
.macro pixst_a numpix, bpp, basereg, mem_operand
.if (bpp * numpix) <= 128
pixst numpix, bpp, basereg, mem_operand, %(bpp * numpix)
.else
pixst numpix, bpp, basereg, mem_operand, 128
.endif
.endm
.macro vuzp8 reg1, reg2
vuzp.8 d&reg1, d&reg2
.endm
.macro vzip8 reg1, reg2
vzip.8 d&reg1, d&reg2
.endm
/* deinterleave B, G, R, A channels for eight 32bpp pixels in 4 registers */
.macro pixdeinterleave bpp, basereg
.if (bpp == 32) && (DEINTERLEAVE_32BPP_ENABLED != 0)
vuzp8 %(basereg+0), %(basereg+1)
vuzp8 %(basereg+2), %(basereg+3)
vuzp8 %(basereg+1), %(basereg+3)
vuzp8 %(basereg+0), %(basereg+2)
.endif
.endm
/* interleave B, G, R, A channels for eight 32bpp pixels in 4 registers */
.macro pixinterleave bpp, basereg
.if (bpp == 32) && (DEINTERLEAVE_32BPP_ENABLED != 0)
vzip8 %(basereg+0), %(basereg+2)
vzip8 %(basereg+1), %(basereg+3)
vzip8 %(basereg+2), %(basereg+3)
vzip8 %(basereg+0), %(basereg+1)
.endif
.endm
/*
* This is a macro for implementing cache preload. The main idea is that
* cache preload logic is mostly independent from the rest of pixels
* processing code. It starts at the top left pixel and moves forward
* across pixels and can jump across scanlines. Prefetch distance is
* handled in an 'incremental' way: it starts from 0 and advances to the
* optimal distance over time. After reaching optimal prefetch distance,
* it is kept constant. There are some checks which prevent prefetching
* unneeded pixel lines below the image (but it still can prefetch a bit
* more data on the right side of the image - not a big issue and may
* be actually helpful when rendering text glyphs). Additional trick is
* the use of LDR instruction for prefetch instead of PLD when moving to
* the next line, the point is that we have a high chance of getting TLB
* miss in this case, and PLD would be useless.
*
* This sounds like it may introduce a noticeable overhead (when working with
* fully cached data). But in reality, due to having a separate pipeline and
* instruction queue for NEON unit in ARM Cortex-A8, normal ARM code can
* execute simultaneously with NEON and be completely shadowed by it. Thus
* we get no performance overhead at all (*). This looks like a very nice
* feature of Cortex-A8, if used wisely. We don't have a hardware prefetcher,
* but still can implement some rather advanced prefetch logic in sofware
* for almost zero cost!
*
* (*) The overhead of the prefetcher is visible when running some trivial
* pixels processing like simple copy. Anyway, having prefetch is a must
* when working with the graphics data.
*/
.macro PF a, x:vararg
.if (PREFETCH_TYPE_CURRENT == PREFETCH_TYPE_ADVANCED)
a x
.endif
.endm
.macro cache_preload std_increment, boost_increment
.if (src_bpp_shift >= 0) || (dst_r_bpp != 0) || (mask_bpp_shift >= 0)
.if regs_shortage
PF ldr ORIG_W, [sp] /* If we are short on regs, ORIG_W is kept on stack */
.endif
.if std_increment != 0
PF add PF_X, PF_X, #std_increment
.endif
PF tst PF_CTL, #0xF
PF addne PF_X, PF_X, #boost_increment
PF subne PF_CTL, PF_CTL, #1
PF cmp PF_X, ORIG_W
.if src_bpp_shift >= 0
PF pld, [PF_SRC, PF_X, lsl #src_bpp_shift]
.endif
.if dst_r_bpp != 0
PF pld, [PF_DST, PF_X, lsl #dst_bpp_shift]
.endif
.if mask_bpp_shift >= 0
PF pld, [PF_MASK, PF_X, lsl #mask_bpp_shift]
.endif
PF subge PF_X, PF_X, ORIG_W
PF subges PF_CTL, PF_CTL, #0x10
.if src_bpp_shift >= 0
PF ldrgeb DUMMY, [PF_SRC, SRC_STRIDE, lsl #src_bpp_shift]!
.endif
.if dst_r_bpp != 0
PF ldrgeb DUMMY, [PF_DST, DST_STRIDE, lsl #dst_bpp_shift]!
.endif
.if mask_bpp_shift >= 0
PF ldrgeb DUMMY, [PF_MASK, MASK_STRIDE, lsl #mask_bpp_shift]!
.endif
.endif
.endm
.macro cache_preload_simple
.if (PREFETCH_TYPE_CURRENT == PREFETCH_TYPE_SIMPLE)
.if src_bpp > 0
pld [SRC, #(PREFETCH_DISTANCE_SIMPLE * src_bpp / 8)]
.endif
.if dst_r_bpp > 0
pld [DST_R, #(PREFETCH_DISTANCE_SIMPLE * dst_r_bpp / 8)]
.endif
.if mask_bpp > 0
pld [MASK, #(PREFETCH_DISTANCE_SIMPLE * mask_bpp / 8)]
.endif
.endif
.endm
/*
* Macro which is used to process leading pixels until destination
* pointer is properly aligned (at 16 bytes boundary). When destination
* buffer uses 16bpp format, this is unnecessary, or even pointless.
*/
.macro ensure_destination_ptr_alignment process_pixblock_head, \
process_pixblock_tail, \
process_pixblock_tail_head
.if dst_w_bpp != 24
tst DST_R, #0xF
beq 2f
.irp lowbit, 1, 2, 4, 8, 16
local skip1
.if (dst_w_bpp <= (lowbit * 8)) && ((lowbit * 8) < (pixblock_size * dst_w_bpp))
.if lowbit < 16 /* we don't need more than 16-byte alignment */
tst DST_R, #lowbit
beq 1f
.endif
pixld (lowbit * 8 / dst_w_bpp), src_bpp, src_basereg, SRC
pixld (lowbit * 8 / dst_w_bpp), mask_bpp, mask_basereg, MASK
.if dst_r_bpp > 0
pixld_a (lowbit * 8 / dst_r_bpp), dst_r_bpp, dst_r_basereg, DST_R
.else
add DST_R, DST_R, #lowbit
.endif
PF add PF_X, PF_X, #(lowbit * 8 / dst_w_bpp)
sub W, W, #(lowbit * 8 / dst_w_bpp)
1:
.endif
.endr
pixdeinterleave src_bpp, src_basereg
pixdeinterleave mask_bpp, mask_basereg
pixdeinterleave dst_r_bpp, dst_r_basereg
process_pixblock_head
cache_preload 0, pixblock_size
cache_preload_simple
process_pixblock_tail
pixinterleave dst_w_bpp, dst_w_basereg
.irp lowbit, 1, 2, 4, 8, 16
.if (dst_w_bpp <= (lowbit * 8)) && ((lowbit * 8) < (pixblock_size * dst_w_bpp))
.if lowbit < 16 /* we don't need more than 16-byte alignment */
tst DST_W, #lowbit
beq 1f
.endif
pixst_a (lowbit * 8 / dst_w_bpp), dst_w_bpp, dst_w_basereg, DST_W
1:
.endif
.endr
.endif
2:
.endm
/*
* Special code for processing up to (pixblock_size - 1) remaining
* trailing pixels. As SIMD processing performs operation on
* pixblock_size pixels, anything smaller than this has to be loaded
* and stored in a special way. Loading and storing of pixel data is
* performed in such a way that we fill some 'slots' in the NEON
* registers (some slots naturally are unused), then perform compositing
* operation as usual. In the end, the data is taken from these 'slots'
* and saved to memory.
*
* cache_preload_flag - allows to suppress prefetch if
* set to 0
* dst_aligned_flag - selects whether destination buffer
* is aligned
*/
.macro process_trailing_pixels cache_preload_flag, \
dst_aligned_flag, \
process_pixblock_head, \
process_pixblock_tail, \
process_pixblock_tail_head
tst W, #(pixblock_size - 1)
beq 2f
.irp chunk_size, 16, 8, 4, 2, 1
.if pixblock_size > chunk_size
tst W, #chunk_size
beq 1f
pixld chunk_size, src_bpp, src_basereg, SRC
pixld chunk_size, mask_bpp, mask_basereg, MASK
.if dst_aligned_flag != 0
pixld_a chunk_size, dst_r_bpp, dst_r_basereg, DST_R
.else
pixld chunk_size, dst_r_bpp, dst_r_basereg, DST_R
.endif
.if cache_preload_flag != 0
PF add PF_X, PF_X, #chunk_size
.endif
1:
.endif
.endr
pixdeinterleave src_bpp, src_basereg
pixdeinterleave mask_bpp, mask_basereg
pixdeinterleave dst_r_bpp, dst_r_basereg
process_pixblock_head
.if cache_preload_flag != 0
cache_preload 0, pixblock_size
cache_preload_simple
.endif
process_pixblock_tail
pixinterleave dst_w_bpp, dst_w_basereg
.irp chunk_size, 16, 8, 4, 2, 1
.if pixblock_size > chunk_size
tst W, #chunk_size
beq 1f
.if dst_aligned_flag != 0
pixst_a chunk_size, dst_w_bpp, dst_w_basereg, DST_W
.else
pixst chunk_size, dst_w_bpp, dst_w_basereg, DST_W
.endif
1:
.endif
.endr
2:
.endm
/*
* Macro, which performs all the needed operations to switch to the next
* scanline and start the next loop iteration unless all the scanlines
* are already processed.
*/
.macro advance_to_next_scanline start_of_loop_label
.if regs_shortage
ldrd W, [sp] /* load W and H (width and height) from stack */
.else
mov W, ORIG_W
.endif
add DST_W, DST_W, DST_STRIDE, lsl #dst_bpp_shift
.if src_bpp != 0
add SRC, SRC, SRC_STRIDE, lsl #src_bpp_shift
.endif
.if mask_bpp != 0
add MASK, MASK, MASK_STRIDE, lsl #mask_bpp_shift
.endif
.if (dst_w_bpp != 24)
sub DST_W, DST_W, W, lsl #dst_bpp_shift
.endif
.if (src_bpp != 24) && (src_bpp != 0)
sub SRC, SRC, W, lsl #src_bpp_shift
.endif
.if (mask_bpp != 24) && (mask_bpp != 0)
sub MASK, MASK, W, lsl #mask_bpp_shift
.endif
subs H, H, #1
mov DST_R, DST_W
.if regs_shortage
str H, [sp, #4] /* save updated height to stack */
.endif
bge start_of_loop_label
.endm
/*
* Registers are allocated in the following way by default:
* d0, d1, d2, d3 - reserved for loading source pixel data
* d4, d5, d6, d7 - reserved for loading destination pixel data
* d24, d25, d26, d27 - reserved for loading mask pixel data
* d28, d29, d30, d31 - final destination pixel data for writeback to memory
*/
.macro generate_composite_function fname, \
src_bpp_, \
mask_bpp_, \
dst_w_bpp_, \
flags, \
pixblock_size_, \
prefetch_distance, \
init, \
cleanup, \
process_pixblock_head, \
process_pixblock_tail, \
process_pixblock_tail_head, \
dst_w_basereg_ = 28, \
dst_r_basereg_ = 4, \
src_basereg_ = 0, \
mask_basereg_ = 24
.func fname
.global fname
/* For ELF format also set function visibility to hidden */
#ifdef __ELF__
.hidden fname
.type fname, %function
#endif
fname:
push {r4-r12, lr} /* save all registers */
/*
* Select prefetch type for this function. If prefetch distance is
* set to 0 or one of the color formats is 24bpp, SIMPLE prefetch
* has to be used instead of ADVANCED.
*/
.set PREFETCH_TYPE_CURRENT, PREFETCH_TYPE_DEFAULT
.if prefetch_distance == 0
.set PREFETCH_TYPE_CURRENT, PREFETCH_TYPE_NONE
.elseif (PREFETCH_TYPE_CURRENT > PREFETCH_TYPE_SIMPLE) && \
((src_bpp_ == 24) || (mask_bpp_ == 24) || (dst_w_bpp_ == 24))
.set PREFETCH_TYPE_CURRENT, PREFETCH_TYPE_SIMPLE
.endif
/*
* Make some macro arguments globally visible and accessible
* from other macros
*/
.set src_bpp, src_bpp_
.set mask_bpp, mask_bpp_
.set dst_w_bpp, dst_w_bpp_
.set pixblock_size, pixblock_size_
.set dst_w_basereg, dst_w_basereg_
.set dst_r_basereg, dst_r_basereg_
.set src_basereg, src_basereg_
.set mask_basereg, mask_basereg_
/*
* Assign symbolic names to registers
*/
W .req r0 /* width (is updated during processing) */
H .req r1 /* height (is updated during processing) */
DST_W .req r2 /* destination buffer pointer for writes */
DST_STRIDE .req r3 /* destination image stride */
SRC .req r4 /* source buffer pointer */
SRC_STRIDE .req r5 /* source image stride */
DST_R .req r6 /* destination buffer pointer for reads */
MASK .req r7 /* mask pointer */
MASK_STRIDE .req r8 /* mask stride */
PF_CTL .req r9 /* combined lines counter and prefetch */
/* distance increment counter */
PF_X .req r10 /* pixel index in a scanline for current */
/* pretetch position */
PF_SRC .req r11 /* pointer to source scanline start */
/* for prefetch purposes */
PF_DST .req r12 /* pointer to destination scanline start */
/* for prefetch purposes */
PF_MASK .req r14 /* pointer to mask scanline start */
/* for prefetch purposes */
/*
* Check whether we have enough registers for all the local variables.
* If we don't have enough registers, original width and height are
* kept on top of stack (and 'regs_shortage' variable is set to indicate
* this for the rest of code). Even if there are enough registers, the
* allocation scheme may be a bit different depending on whether source
* or mask is not used.
*/
.if (PREFETCH_TYPE_CURRENT < PREFETCH_TYPE_ADVANCED)
ORIG_W .req r10 /* saved original width */
DUMMY .req r12 /* temporary register */
.set regs_shortage, 0
.elseif mask_bpp == 0
ORIG_W .req r7 /* saved original width */
DUMMY .req r8 /* temporary register */
.set regs_shortage, 0
.elseif src_bpp == 0
ORIG_W .req r4 /* saved original width */
DUMMY .req r5 /* temporary register */
.set regs_shortage, 0
.else
ORIG_W .req r1 /* saved original width */
DUMMY .req r1 /* temporary register */
.set regs_shortage, 1
.endif
.set mask_bpp_shift, -1
.if src_bpp == 32
.set src_bpp_shift, 2
.elseif src_bpp == 24
.set src_bpp_shift, 0
.elseif src_bpp == 16
.set src_bpp_shift, 1
.elseif src_bpp == 8
.set src_bpp_shift, 0
.elseif src_bpp == 0
.set src_bpp_shift, -1
.else
.error "requested src bpp (src_bpp) is not supported"
.endif
.if mask_bpp == 32
.set mask_bpp_shift, 2
.elseif mask_bpp == 24
.set mask_bpp_shift, 0
.elseif mask_bpp == 8
.set mask_bpp_shift, 0
.elseif mask_bpp == 0
.set mask_bpp_shift, -1
.else
.error "requested mask bpp (mask_bpp) is not supported"
.endif
.if dst_w_bpp == 32
.set dst_bpp_shift, 2
.elseif dst_w_bpp == 24
.set dst_bpp_shift, 0
.elseif dst_w_bpp == 16
.set dst_bpp_shift, 1
.elseif dst_w_bpp == 8
.set dst_bpp_shift, 0
.else
.error "requested dst bpp (dst_w_bpp) is not supported"
.endif
.if (((flags) & FLAG_DST_READWRITE) != 0)
.set dst_r_bpp, dst_w_bpp
.else
.set dst_r_bpp, 0
.endif
.if (((flags) & FLAG_DEINTERLEAVE_32BPP) != 0)
.set DEINTERLEAVE_32BPP_ENABLED, 1
.else
.set DEINTERLEAVE_32BPP_ENABLED, 0
.endif
.if prefetch_distance < 0 || prefetch_distance > 15
.error "invalid prefetch distance (prefetch_distance)"
.endif
.if src_bpp > 0
ldr SRC, [sp, #40]
.endif
.if mask_bpp > 0
ldr MASK, [sp, #48]
.endif
PF mov PF_X, #0
.if src_bpp > 0
ldr SRC_STRIDE, [sp, #44]
.endif
.if mask_bpp > 0
ldr MASK_STRIDE, [sp, #52]
.endif
mov DST_R, DST_W
.if src_bpp == 24
sub SRC_STRIDE, SRC_STRIDE, W
sub SRC_STRIDE, SRC_STRIDE, W, lsl #1
.endif
.if mask_bpp == 24
sub MASK_STRIDE, MASK_STRIDE, W
sub MASK_STRIDE, MASK_STRIDE, W, lsl #1
.endif
.if dst_w_bpp == 24
sub DST_STRIDE, DST_STRIDE, W
sub DST_STRIDE, DST_STRIDE, W, lsl #1
.endif
/*
* Setup advanced prefetcher initial state
*/
PF mov PF_SRC, SRC
PF mov PF_DST, DST_R
PF mov PF_MASK, MASK
/* PF_CTL = prefetch_distance | ((h - 1) << 4) */
PF mov PF_CTL, H, lsl #4
PF add PF_CTL, #(prefetch_distance - 0x10)
init
.if regs_shortage
push {r0, r1}
.endif
subs H, H, #1
.if regs_shortage
str H, [sp, #4] /* save updated height to stack */
.else
mov ORIG_W, W
.endif
blt 9f
cmp W, #(pixblock_size * 2)
blt 8f
/*
* This is the start of the pipelined loop, which if optimized for
* long scanlines
*/
0:
ensure_destination_ptr_alignment process_pixblock_head, \
process_pixblock_tail, \
process_pixblock_tail_head
/* Implement "head (tail_head) ... (tail_head) tail" loop pattern */
pixld_a pixblock_size, dst_r_bpp, \
(dst_r_basereg - pixblock_size * dst_r_bpp / 64), DST_R
pixld pixblock_size, src_bpp, \
(src_basereg - pixblock_size * src_bpp / 64), SRC
pixld pixblock_size, mask_bpp, \
(mask_basereg - pixblock_size * mask_bpp / 64), MASK
PF add PF_X, PF_X, #pixblock_size
process_pixblock_head
cache_preload 0, pixblock_size
cache_preload_simple
subs W, W, #(pixblock_size * 2)
blt 2f
1:
process_pixblock_tail_head
cache_preload_simple
subs W, W, #pixblock_size
bge 1b
2:
process_pixblock_tail
pixst_a pixblock_size, dst_w_bpp, \
(dst_w_basereg - pixblock_size * dst_w_bpp / 64), DST_W
/* Process the remaining trailing pixels in the scanline */
process_trailing_pixels 1, 1, \
process_pixblock_head, \
process_pixblock_tail, \
process_pixblock_tail_head
advance_to_next_scanline 0b
.if regs_shortage
pop {r0, r1}
.endif
cleanup
pop {r4-r12, pc} /* exit */
/*
* This is the start of the loop, designed to process images with small width
* (less than pixblock_size * 2 pixels). In this case neither pipelining
* nor prefetch are used.
*/
8:
/* Process exactly pixblock_size pixels if needed */
tst W, #pixblock_size
beq 1f
pixld pixblock_size, dst_r_bpp, \
(dst_r_basereg - pixblock_size * dst_r_bpp / 64), DST_R
pixld pixblock_size, src_bpp, \
(src_basereg - pixblock_size * src_bpp / 64), SRC
pixld pixblock_size, mask_bpp, \
(mask_basereg - pixblock_size * mask_bpp / 64), MASK
process_pixblock_head
process_pixblock_tail
pixst pixblock_size, dst_w_bpp, \
(dst_w_basereg - pixblock_size * dst_w_bpp / 64), DST_W
1:
/* Process the remaining trailing pixels in the scanline */
process_trailing_pixels 0, 0, \
process_pixblock_head, \
process_pixblock_tail, \
process_pixblock_tail_head
advance_to_next_scanline 8b
9:
.if regs_shortage
pop {r0, r1}
.endif
cleanup
pop {r4-r12, pc} /* exit */
.unreq SRC
.unreq MASK
.unreq DST_R
.unreq DST_W
.unreq ORIG_W
.unreq W
.unreq H
.unreq SRC_STRIDE
.unreq DST_STRIDE
.unreq MASK_STRIDE
.unreq PF_CTL
.unreq PF_X
.unreq PF_SRC
.unreq PF_DST
.unreq PF_MASK
.unreq DUMMY
.endfunc
.endm
/*
* A simplified variant of function generation template for a single
* scanline processing (for implementing pixman combine functions)
*/
.macro generate_composite_function_single_scanline fname, \
src_bpp_, \
mask_bpp_, \
dst_w_bpp_, \
flags, \
pixblock_size_, \
init, \
cleanup, \
process_pixblock_head, \
process_pixblock_tail, \
process_pixblock_tail_head, \
dst_w_basereg_ = 28, \
dst_r_basereg_ = 4, \
src_basereg_ = 0, \
mask_basereg_ = 24
.func fname
.global fname
/* For ELF format also set function visibility to hidden */
#ifdef __ELF__
.hidden fname
.type fname, %function
#endif
fname:
.set PREFETCH_TYPE_CURRENT, PREFETCH_TYPE_NONE
/*
* Make some macro arguments globally visible and accessible
* from other macros
*/
.set src_bpp, src_bpp_
.set mask_bpp, mask_bpp_
.set dst_w_bpp, dst_w_bpp_
.set pixblock_size, pixblock_size_
.set dst_w_basereg, dst_w_basereg_
.set dst_r_basereg, dst_r_basereg_
.set src_basereg, src_basereg_
.set mask_basereg, mask_basereg_
/*
* Assign symbolic names to registers
*/
W .req r0 /* width (is updated during processing) */
DST_W .req r1 /* destination buffer pointer for writes */
SRC .req r2 /* source buffer pointer */
DST_R .req ip /* destination buffer pointer for reads */
MASK .req r3 /* mask pointer */
.if (((flags) & FLAG_DST_READWRITE) != 0)
.set dst_r_bpp, dst_w_bpp
.else
.set dst_r_bpp, 0
.endif
.if (((flags) & FLAG_DEINTERLEAVE_32BPP) != 0)
.set DEINTERLEAVE_32BPP_ENABLED, 1
.else
.set DEINTERLEAVE_32BPP_ENABLED, 0
.endif
init
mov DST_R, DST_W
cmp W, #pixblock_size
blt 8f
ensure_destination_ptr_alignment process_pixblock_head, \
process_pixblock_tail, \
process_pixblock_tail_head
subs W, W, #pixblock_size
blt 7f
/* Implement "head (tail_head) ... (tail_head) tail" loop pattern */
pixld_a pixblock_size, dst_r_bpp, \
(dst_r_basereg - pixblock_size * dst_r_bpp / 64), DST_R
pixld pixblock_size, src_bpp, \
(src_basereg - pixblock_size * src_bpp / 64), SRC
pixld pixblock_size, mask_bpp, \
(mask_basereg - pixblock_size * mask_bpp / 64), MASK
process_pixblock_head
subs W, W, #pixblock_size
blt 2f
1:
process_pixblock_tail_head
subs W, W, #pixblock_size
bge 1b
2:
process_pixblock_tail
pixst_a pixblock_size, dst_w_bpp, \
(dst_w_basereg - pixblock_size * dst_w_bpp / 64), DST_W
7:
/* Process the remaining trailing pixels in the scanline (dst aligned) */
process_trailing_pixels 0, 1, \
process_pixblock_head, \
process_pixblock_tail, \
process_pixblock_tail_head
cleanup
bx lr /* exit */
8:
/* Process the remaining trailing pixels in the scanline (dst unaligned) */
process_trailing_pixels 0, 0, \
process_pixblock_head, \
process_pixblock_tail, \
process_pixblock_tail_head
cleanup
bx lr /* exit */
.unreq SRC
.unreq MASK
.unreq DST_R
.unreq DST_W
.unreq W
.endfunc
.endm
.macro default_init
.endm
.macro default_cleanup
.endm

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -0,0 +1,485 @@
/*
* Copyright © 2008 Mozilla Corporation
*
* Permission to use, copy, modify, distribute, and sell this software and its
* documentation for any purpose is hereby granted without fee, provided that
* the above copyright notice appear in all copies and that both that
* copyright notice and this permission notice appear in supporting
* documentation, and that the name of Mozilla Corporation not be used in
* advertising or publicity pertaining to distribution of the software without
* specific, written prior permission. Mozilla Corporation makes no
* representations about the suitability of this software for any purpose. It
* is provided "as is" without express or implied warranty.
*
* THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS
* SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
* FITNESS, IN NO EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY
* SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN
* AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING
* OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS
* SOFTWARE.
*
* Author: Jeff Muizelaar (jeff@infidigm.net)
*
*/
#ifdef HAVE_CONFIG_H
#include <config.h>
#endif
#include "pixman-private.h"
static void
arm_composite_add_8000_8000 (pixman_implementation_t * impl,
pixman_op_t op,
pixman_image_t * src_image,
pixman_image_t * mask_image,
pixman_image_t * dst_image,
int32_t src_x,
int32_t src_y,
int32_t mask_x,
int32_t mask_y,
int32_t dest_x,
int32_t dest_y,
int32_t width,
int32_t height)
{
uint8_t *dst_line, *dst;
uint8_t *src_line, *src;
int dst_stride, src_stride;
uint16_t w;
uint8_t s, d;
PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint8_t, src_stride, src_line, 1);
PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint8_t, dst_stride, dst_line, 1);
while (height--)
{
dst = dst_line;
dst_line += dst_stride;
src = src_line;
src_line += src_stride;
w = width;
/* ensure both src and dst are properly aligned before doing 32 bit reads
* we'll stay in this loop if src and dst have differing alignments
*/
while (w && (((unsigned long)dst & 3) || ((unsigned long)src & 3)))
{
s = *src;
d = *dst;
asm ("uqadd8 %0, %1, %2" : "+r" (d) : "r" (s));
*dst = d;
dst++;
src++;
w--;
}
while (w >= 4)
{
asm ("uqadd8 %0, %1, %2"
: "=r" (*(uint32_t*)dst)
: "r" (*(uint32_t*)src), "r" (*(uint32_t*)dst));
dst += 4;
src += 4;
w -= 4;
}
while (w)
{
s = *src;
d = *dst;
asm ("uqadd8 %0, %1, %2" : "+r" (d) : "r" (s));
*dst = d;
dst++;
src++;
w--;
}
}
}
static void
arm_composite_over_8888_8888 (pixman_implementation_t * impl,
pixman_op_t op,
pixman_image_t * src_image,
pixman_image_t * mask_image,
pixman_image_t * dst_image,
int32_t src_x,
int32_t src_y,
int32_t mask_x,
int32_t mask_y,
int32_t dest_x,
int32_t dest_y,
int32_t width,
int32_t height)
{
uint32_t *dst_line, *dst;
uint32_t *src_line, *src;
int dst_stride, src_stride;
uint16_t w;
uint32_t component_half = 0x800080;
uint32_t upper_component_mask = 0xff00ff00;
uint32_t alpha_mask = 0xff;
PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1);
while (height--)
{
dst = dst_line;
dst_line += dst_stride;
src = src_line;
src_line += src_stride;
w = width;
/* #define inner_branch */
asm volatile (
"cmp %[w], #0\n\t"
"beq 2f\n\t"
"1:\n\t"
/* load src */
"ldr r5, [%[src]], #4\n\t"
#ifdef inner_branch
/* We can avoid doing the multiplication in two cases: 0x0 or 0xff.
* The 0x0 case also allows us to avoid doing an unecessary data
* write which is more valuable so we only check for that
*/
"cmp r5, #0\n\t"
"beq 3f\n\t"
/* = 255 - alpha */
"sub r8, %[alpha_mask], r5, lsr #24\n\t"
"ldr r4, [%[dest]] \n\t"
#else
"ldr r4, [%[dest]] \n\t"
/* = 255 - alpha */
"sub r8, %[alpha_mask], r5, lsr #24\n\t"
#endif
"uxtb16 r6, r4\n\t"
"uxtb16 r7, r4, ror #8\n\t"
/* multiply by 257 and divide by 65536 */
"mla r6, r6, r8, %[component_half]\n\t"
"mla r7, r7, r8, %[component_half]\n\t"
"uxtab16 r6, r6, r6, ror #8\n\t"
"uxtab16 r7, r7, r7, ror #8\n\t"
/* recombine the 0xff00ff00 bytes of r6 and r7 */
"and r7, r7, %[upper_component_mask]\n\t"
"uxtab16 r6, r7, r6, ror #8\n\t"
"uqadd8 r5, r6, r5\n\t"
#ifdef inner_branch
"3:\n\t"
#endif
"str r5, [%[dest]], #4\n\t"
/* increment counter and jmp to top */
"subs %[w], %[w], #1\n\t"
"bne 1b\n\t"
"2:\n\t"
: [w] "+r" (w), [dest] "+r" (dst), [src] "+r" (src)
: [component_half] "r" (component_half), [upper_component_mask] "r" (upper_component_mask),
[alpha_mask] "r" (alpha_mask)
: "r4", "r5", "r6", "r7", "r8", "cc", "memory"
);
}
}
static void
arm_composite_over_8888_n_8888 (pixman_implementation_t * impl,
pixman_op_t op,
pixman_image_t * src_image,
pixman_image_t * mask_image,
pixman_image_t * dst_image,
int32_t src_x,
int32_t src_y,
int32_t mask_x,
int32_t mask_y,
int32_t dest_x,
int32_t dest_y,
int32_t width,
int32_t height)
{
uint32_t *dst_line, *dst;
uint32_t *src_line, *src;
uint32_t mask;
int dst_stride, src_stride;
uint16_t w;
uint32_t component_half = 0x800080;
uint32_t alpha_mask = 0xff;
PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1);
mask = _pixman_image_get_solid (mask_image, PIXMAN_a8r8g8b8);
mask = (mask) >> 24;
while (height--)
{
dst = dst_line;
dst_line += dst_stride;
src = src_line;
src_line += src_stride;
w = width;
/* #define inner_branch */
asm volatile (
"cmp %[w], #0\n\t"
"beq 2f\n\t"
"1:\n\t"
/* load src */
"ldr r5, [%[src]], #4\n\t"
#ifdef inner_branch
/* We can avoid doing the multiplication in two cases: 0x0 or 0xff.
* The 0x0 case also allows us to avoid doing an unecessary data
* write which is more valuable so we only check for that
*/
"cmp r5, #0\n\t"
"beq 3f\n\t"
#endif
"ldr r4, [%[dest]] \n\t"
"uxtb16 r6, r5\n\t"
"uxtb16 r7, r5, ror #8\n\t"
/* multiply by alpha (r8) then by 257 and divide by 65536 */
"mla r6, r6, %[mask_alpha], %[component_half]\n\t"
"mla r7, r7, %[mask_alpha], %[component_half]\n\t"
"uxtab16 r6, r6, r6, ror #8\n\t"
"uxtab16 r7, r7, r7, ror #8\n\t"
"uxtb16 r6, r6, ror #8\n\t"
"uxtb16 r7, r7, ror #8\n\t"
/* recombine */
"orr r5, r6, r7, lsl #8\n\t"
"uxtb16 r6, r4\n\t"
"uxtb16 r7, r4, ror #8\n\t"
/* 255 - alpha */
"sub r8, %[alpha_mask], r5, lsr #24\n\t"
/* multiply by alpha (r8) then by 257 and divide by 65536 */
"mla r6, r6, r8, %[component_half]\n\t"
"mla r7, r7, r8, %[component_half]\n\t"
"uxtab16 r6, r6, r6, ror #8\n\t"
"uxtab16 r7, r7, r7, ror #8\n\t"
"uxtb16 r6, r6, ror #8\n\t"
"uxtb16 r7, r7, ror #8\n\t"
/* recombine */
"orr r6, r6, r7, lsl #8\n\t"
"uqadd8 r5, r6, r5\n\t"
#ifdef inner_branch
"3:\n\t"
#endif
"str r5, [%[dest]], #4\n\t"
/* increment counter and jmp to top */
"subs %[w], %[w], #1\n\t"
"bne 1b\n\t"
"2:\n\t"
: [w] "+r" (w), [dest] "+r" (dst), [src] "+r" (src)
: [component_half] "r" (component_half), [mask_alpha] "r" (mask),
[alpha_mask] "r" (alpha_mask)
: "r4", "r5", "r6", "r7", "r8", "r9", "cc", "memory"
);
}
}
static void
arm_composite_over_n_8_8888 (pixman_implementation_t * impl,
pixman_op_t op,
pixman_image_t * src_image,
pixman_image_t * mask_image,
pixman_image_t * dst_image,
int32_t src_x,
int32_t src_y,
int32_t mask_x,
int32_t mask_y,
int32_t dest_x,
int32_t dest_y,
int32_t width,
int32_t height)
{
uint32_t src, srca;
uint32_t *dst_line, *dst;
uint8_t *mask_line, *mask;
int dst_stride, mask_stride;
uint16_t w;
src = _pixman_image_get_solid (src_image, dst_image->bits.format);
/* bail out if fully transparent */
srca = src >> 24;
if (src == 0)
return;
uint32_t component_mask = 0xff00ff;
uint32_t component_half = 0x800080;
uint32_t src_hi = (src >> 8) & component_mask;
uint32_t src_lo = src & component_mask;
PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1);
while (height--)
{
dst = dst_line;
dst_line += dst_stride;
mask = mask_line;
mask_line += mask_stride;
w = width;
/* #define inner_branch */
asm volatile (
"cmp %[w], #0\n\t"
"beq 2f\n\t"
"1:\n\t"
/* load mask */
"ldrb r5, [%[mask]], #1\n\t"
#ifdef inner_branch
/* We can avoid doing the multiplication in two cases: 0x0 or 0xff.
* The 0x0 case also allows us to avoid doing an unecessary data
* write which is more valuable so we only check for that
*/
"cmp r5, #0\n\t"
"beq 3f\n\t"
#endif
"ldr r4, [%[dest]] \n\t"
/* multiply by alpha (r8) then by 257 and divide by 65536 */
"mla r6, %[src_lo], r5, %[component_half]\n\t"
"mla r7, %[src_hi], r5, %[component_half]\n\t"
"uxtab16 r6, r6, r6, ror #8\n\t"
"uxtab16 r7, r7, r7, ror #8\n\t"
"uxtb16 r6, r6, ror #8\n\t"
"uxtb16 r7, r7, ror #8\n\t"
/* recombine */
"orr r5, r6, r7, lsl #8\n\t"
"uxtb16 r6, r4\n\t"
"uxtb16 r7, r4, ror #8\n\t"
/* we could simplify this to use 'sub' if we were
* willing to give up a register for alpha_mask
*/
"mvn r8, r5\n\t"
"mov r8, r8, lsr #24\n\t"
/* multiply by alpha (r8) then by 257 and divide by 65536 */
"mla r6, r6, r8, %[component_half]\n\t"
"mla r7, r7, r8, %[component_half]\n\t"
"uxtab16 r6, r6, r6, ror #8\n\t"
"uxtab16 r7, r7, r7, ror #8\n\t"
"uxtb16 r6, r6, ror #8\n\t"
"uxtb16 r7, r7, ror #8\n\t"
/* recombine */
"orr r6, r6, r7, lsl #8\n\t"
"uqadd8 r5, r6, r5\n\t"
#ifdef inner_branch
"3:\n\t"
#endif
"str r5, [%[dest]], #4\n\t"
/* increment counter and jmp to top */
"subs %[w], %[w], #1\n\t"
"bne 1b\n\t"
"2:\n\t"
: [w] "+r" (w), [dest] "+r" (dst), [src] "+r" (src), [mask] "+r" (mask)
: [component_half] "r" (component_half),
[src_hi] "r" (src_hi), [src_lo] "r" (src_lo)
: "r4", "r5", "r6", "r7", "r8", "cc", "memory");
}
}
static const pixman_fast_path_t arm_simd_fast_path_array[] =
{
{ PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null, PIXMAN_a8r8g8b8, arm_composite_over_8888_8888 },
{ PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null, PIXMAN_x8r8g8b8, arm_composite_over_8888_8888 },
{ PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_null, PIXMAN_a8b8g8r8, arm_composite_over_8888_8888 },
{ PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_null, PIXMAN_x8b8g8r8, arm_composite_over_8888_8888 },
{ PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_solid, PIXMAN_a8r8g8b8, arm_composite_over_8888_n_8888 },
{ PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_solid, PIXMAN_x8r8g8b8, arm_composite_over_8888_n_8888 },
{ PIXMAN_OP_ADD, PIXMAN_a8, PIXMAN_null, PIXMAN_a8, arm_composite_add_8000_8000 },
{ PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8, PIXMAN_a8r8g8b8, arm_composite_over_n_8_8888 },
{ PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8, PIXMAN_x8r8g8b8, arm_composite_over_n_8_8888 },
{ PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8, PIXMAN_a8b8g8r8, arm_composite_over_n_8_8888 },
{ PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8, PIXMAN_x8b8g8r8, arm_composite_over_n_8_8888 },
{ PIXMAN_OP_NONE },
};
const pixman_fast_path_t *const arm_simd_fast_paths = arm_simd_fast_path_array;
static void
arm_simd_composite (pixman_implementation_t *imp,
pixman_op_t op,
pixman_image_t * src,
pixman_image_t * mask,
pixman_image_t * dest,
int32_t src_x,
int32_t src_y,
int32_t mask_x,
int32_t mask_y,
int32_t dest_x,
int32_t dest_y,
int32_t width,
int32_t height)
{
if (_pixman_run_fast_path (arm_simd_fast_paths, imp,
op, src, mask, dest,
src_x, src_y,
mask_x, mask_y,
dest_x, dest_y,
width, height))
{
return;
}
_pixman_implementation_composite (imp->delegate, op,
src, mask, dest,
src_x, src_y,
mask_x, mask_y,
dest_x, dest_y,
width, height);
}
pixman_implementation_t *
_pixman_implementation_create_arm_simd (void)
{
pixman_implementation_t *general = _pixman_implementation_create_fast_path ();
pixman_implementation_t *imp = _pixman_implementation_create (general);
imp->composite = arm_simd_composite;
return imp;
}

Просмотреть файл

@ -0,0 +1,104 @@
/*
* Copyright © 2008 Mozilla Corporation
*
* Permission to use, copy, modify, distribute, and sell this software and its
* documentation for any purpose is hereby granted without fee, provided that
* the above copyright notice appear in all copies and that both that
* copyright notice and this permission notice appear in supporting
* documentation, and that the name of Mozilla Corporation not be used in
* advertising or publicity pertaining to distribution of the software without
* specific, written prior permission. Mozilla Corporation makes no
* representations about the suitability of this software for any purpose. It
* is provided "as is" without express or implied warranty.
*
* THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS
* SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
* FITNESS, IN NO EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY
* SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN
* AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING
* OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS
* SOFTWARE.
*
* Author: Jeff Muizelaar (jeff@infidigm.net)
*
*/
#ifdef HAVE_CONFIG_H
#include <config.h>
#endif
#include "pixman-private.h"
void
arm_composite_add_8000_8000 (pixman_implementation_t * impl,
pixman_op_t op,
pixman_image_t * src_image,
pixman_image_t * mask_image,
pixman_image_t * dst_image,
int32_t src_x,
int32_t src_y,
int32_t mask_x,
int32_t mask_y,
int32_t dest_x,
int32_t dest_y,
int32_t width,
int32_t height);
void
arm_composite_over_8888_8888 (pixman_implementation_t * impl,
pixman_op_t op,
pixman_image_t * src_image,
pixman_image_t * mask_image,
pixman_image_t * dst_image,
int32_t src_x,
int32_t src_y,
int32_t mask_x,
int32_t mask_y,
int32_t dest_x,
int32_t dest_y,
int32_t width,
int32_t height);
void
arm_composite_over_8888_n_8888 (pixman_implementation_t * impl,
pixman_op_t op,
pixman_image_t * src_image,
pixman_image_t * mask_image,
pixman_image_t * dst_image,
int32_t src_x,
int32_t src_y,
int32_t mask_x,
int32_t mask_y,
int32_t dest_x,
int32_t dest_y,
int32_t width,
int32_t height);
void
arm_composite_over_n_8_8888 (pixman_implementation_t * impl,
pixman_op_t op,
pixman_image_t * src_image,
pixman_image_t * mask_image,
pixman_image_t * dst_image,
int32_t src_x,
int32_t src_y,
int32_t mask_x,
int32_t mask_y,
int32_t dest_x,
int32_t dest_y,
int32_t width,
int32_t height);
void
arm_composite_src_8888_0565 (pixman_implementation_t * impl,
pixman_op_t op,
pixman_image_t * src_image,
pixman_image_t * mask_image,
pixman_image_t * dst_image,
int32_t src_x,
int32_t src_y,
int32_t mask_x,
int32_t mask_y,
int32_t dest_x,
int32_t dest_y,
int32_t width,
int32_t height);

Просмотреть файл

@ -1,6 +1,5 @@
/*
* Copyright © 2008 Mozilla Corporation
* Copyright © 2008 Nokia Corporation
*
* Permission to use, copy, modify, distribute, and sell this software and its
* documentation for any purpose is hereby granted without fee, provided that
@ -22,665 +21,79 @@
* SOFTWARE.
*
* Author: Jeff Muizelaar (jeff@infidigm.net)
* Author: Siarhei Siamashka <siarhei.siamashka@nokia.com>
*
*/
#ifdef HAVE_CONFIG_H
#include <config.h>
#endif
#include "pixman-arm-simd.h"
#include "pixman-private.h"
#include "pixman-arm-simd-asm.h"
void
fbCompositeSrcAdd_8000x8000arm (pixman_op_t op,
pixman_image_t * pSrc,
pixman_image_t * pMask,
pixman_image_t * pDst,
int16_t xSrc,
int16_t ySrc,
int16_t xMask,
int16_t yMask,
int16_t xDst,
int16_t yDst,
uint16_t width,
uint16_t height)
static const pixman_fast_path_t arm_simd_fast_path_array[] =
{
uint8_t *dstLine, *dst;
uint8_t *srcLine, *src;
int dstStride, srcStride;
uint16_t w;
uint8_t s, d;
{ PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null, PIXMAN_a8r8g8b8, arm_composite_over_8888_8888 },
{ PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null, PIXMAN_x8r8g8b8, arm_composite_over_8888_8888 },
{ PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_null, PIXMAN_a8b8g8r8, arm_composite_over_8888_8888 },
{ PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_null, PIXMAN_x8b8g8r8, arm_composite_over_8888_8888 },
{ PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_solid, PIXMAN_a8r8g8b8, arm_composite_over_8888_n_8888 },
{ PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_solid, PIXMAN_x8r8g8b8, arm_composite_over_8888_n_8888 },
fbComposeGetStart (pSrc, xSrc, ySrc, uint8_t, srcStride, srcLine, 1);
fbComposeGetStart (pDst, xDst, yDst, uint8_t, dstStride, dstLine, 1);
{ PIXMAN_OP_ADD, PIXMAN_a8, PIXMAN_null, PIXMAN_a8, arm_composite_add_8000_8000 },
while (height--)
{ PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8, PIXMAN_a8r8g8b8, arm_composite_over_n_8_8888 },
{ PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8, PIXMAN_x8r8g8b8, arm_composite_over_n_8_8888 },
{ PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8, PIXMAN_a8b8g8r8, arm_composite_over_n_8_8888 },
{ PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8, PIXMAN_x8b8g8r8, arm_composite_over_n_8_8888 },
{ PIXMAN_OP_SRC, PIXMAN_x8r8g8b8, PIXMAN_null, PIXMAN_r5g6b5, arm_composite_src_8888_0565 },
{ PIXMAN_OP_SRC, PIXMAN_x8r8g8b8, PIXMAN_null, PIXMAN_b5g6r5, arm_composite_src_8888_0565 },
{ PIXMAN_OP_NONE },
};
const pixman_fast_path_t *const arm_simd_fast_paths = arm_simd_fast_path_array;
static void
arm_simd_composite (pixman_implementation_t *imp,
pixman_op_t op,
pixman_image_t * src,
pixman_image_t * mask,
pixman_image_t * dest,
int32_t src_x,
int32_t src_y,
int32_t mask_x,
int32_t mask_y,
int32_t dest_x,
int32_t dest_y,
int32_t width,
int32_t height)
{
if (_pixman_run_fast_path (arm_simd_fast_paths, imp,
op, src, mask, dest,
src_x, src_y,
mask_x, mask_y,
dest_x, dest_y,
width, height))
{
dst = dstLine;
dstLine += dstStride;
src = srcLine;
srcLine += srcStride;
w = width;
/* ensure both src and dst are properly aligned before doing 32 bit reads
* we'll stay in this loop if src and dst have differing alignments */
while (w && (((unsigned long)dst & 3) || ((unsigned long)src & 3)))
{
s = *src;
d = *dst;
asm("uqadd8 %0, %1, %2" : "+r"(d) : "r"(s));
*dst = d;
dst++;
src++;
w--;
}
while (w >= 4)
{
asm("uqadd8 %0, %1, %2" : "=r"(*(uint32_t*)dst) : "r"(*(uint32_t*)src), "r"(*(uint32_t*)dst));
dst += 4;
src += 4;
w -= 4;
}
while (w)
{
s = *src;
d = *dst;
asm("uqadd8 %0, %1, %2" : "+r"(d) : "r"(s));
*dst = d;
dst++;
src++;
w--;
}
}
}
void
fbCompositeSrc_8888x8888arm (pixman_op_t op,
pixman_image_t * pSrc,
pixman_image_t * pMask,
pixman_image_t * pDst,
int16_t xSrc,
int16_t ySrc,
int16_t xMask,
int16_t yMask,
int16_t xDst,
int16_t yDst,
uint16_t width,
uint16_t height)
{
uint32_t *dstLine, *dst;
uint32_t *srcLine, *src;
int dstStride, srcStride;
uint16_t w;
uint32_t component_half = 0x800080;
uint32_t upper_component_mask = 0xff00ff00;
uint32_t alpha_mask = 0xff;
fbComposeGetStart (pDst, xDst, yDst, uint32_t, dstStride, dstLine, 1);
fbComposeGetStart (pSrc, xSrc, ySrc, uint32_t, srcStride, srcLine, 1);
while (height--)
{
dst = dstLine;
dstLine += dstStride;
src = srcLine;
srcLine += srcStride;
w = width;
//#define inner_branch
asm volatile (
"cmp %[w], #0\n\t"
"beq 2f\n\t"
"1:\n\t"
/* load src */
"ldr r5, [%[src]], #4\n\t"
#ifdef inner_branch
/* We can avoid doing the multiplication in two cases: 0x0 or 0xff.
* The 0x0 case also allows us to avoid doing an unecessary data
* write which is more valuable so we only check for that */
"cmp r5, #0\n\t"
"beq 3f\n\t"
/* = 255 - alpha */
"sub r8, %[alpha_mask], r5, lsr #24\n\t"
"ldr r4, [%[dest]] \n\t"
#else
"ldr r4, [%[dest]] \n\t"
/* = 255 - alpha */
"sub r8, %[alpha_mask], r5, lsr #24\n\t"
#endif
"uxtb16 r6, r4\n\t"
"uxtb16 r7, r4, ror #8\n\t"
/* multiply by 257 and divide by 65536 */
"mla r6, r6, r8, %[component_half]\n\t"
"mla r7, r7, r8, %[component_half]\n\t"
"uxtab16 r6, r6, r6, ror #8\n\t"
"uxtab16 r7, r7, r7, ror #8\n\t"
/* recombine the 0xff00ff00 bytes of r6 and r7 */
"and r7, r7, %[upper_component_mask]\n\t"
"uxtab16 r6, r7, r6, ror #8\n\t"
"uqadd8 r5, r6, r5\n\t"
#ifdef inner_branch
"3:\n\t"
#endif
"str r5, [%[dest]], #4\n\t"
/* increment counter and jmp to top */
"subs %[w], %[w], #1\n\t"
"bne 1b\n\t"
"2:\n\t"
: [w] "+r" (w), [dest] "+r" (dst), [src] "+r" (src)
: [component_half] "r" (component_half), [upper_component_mask] "r" (upper_component_mask),
[alpha_mask] "r" (alpha_mask)
: "r4", "r5", "r6", "r7", "r8", "cc", "memory"
);
}
}
void
fbCompositeSrc_8888x8x8888arm (pixman_op_t op,
pixman_image_t * pSrc,
pixman_image_t * pMask,
pixman_image_t * pDst,
int16_t xSrc,
int16_t ySrc,
int16_t xMask,
int16_t yMask,
int16_t xDst,
int16_t yDst,
uint16_t width,
uint16_t height)
{
uint32_t *dstLine, *dst;
uint32_t *srcLine, *src;
uint32_t mask;
int dstStride, srcStride;
uint16_t w;
uint32_t component_half = 0x800080;
uint32_t alpha_mask = 0xff;
fbComposeGetStart (pDst, xDst, yDst, uint32_t, dstStride, dstLine, 1);
fbComposeGetStart (pSrc, xSrc, ySrc, uint32_t, srcStride, srcLine, 1);
fbComposeGetSolid (pMask, mask, pDst->bits.format);
mask = (mask) >> 24;
while (height--)
{
dst = dstLine;
dstLine += dstStride;
src = srcLine;
srcLine += srcStride;
w = width;
//#define inner_branch
asm volatile (
"cmp %[w], #0\n\t"
"beq 2f\n\t"
"1:\n\t"
/* load src */
"ldr r5, [%[src]], #4\n\t"
#ifdef inner_branch
/* We can avoid doing the multiplication in two cases: 0x0 or 0xff.
* The 0x0 case also allows us to avoid doing an unecessary data
* write which is more valuable so we only check for that */
"cmp r5, #0\n\t"
"beq 3f\n\t"
#endif
"ldr r4, [%[dest]] \n\t"
"uxtb16 r6, r5\n\t"
"uxtb16 r7, r5, ror #8\n\t"
/* multiply by alpha (r8) then by 257 and divide by 65536 */
"mla r6, r6, %[mask_alpha], %[component_half]\n\t"
"mla r7, r7, %[mask_alpha], %[component_half]\n\t"
"uxtab16 r6, r6, r6, ror #8\n\t"
"uxtab16 r7, r7, r7, ror #8\n\t"
"uxtb16 r6, r6, ror #8\n\t"
"uxtb16 r7, r7, ror #8\n\t"
/* recombine */
"orr r5, r6, r7, lsl #8\n\t"
"uxtb16 r6, r4\n\t"
"uxtb16 r7, r4, ror #8\n\t"
/* 255 - alpha */
"sub r8, %[alpha_mask], r5, lsr #24\n\t"
/* multiply by alpha (r8) then by 257 and divide by 65536 */
"mla r6, r6, r8, %[component_half]\n\t"
"mla r7, r7, r8, %[component_half]\n\t"
"uxtab16 r6, r6, r6, ror #8\n\t"
"uxtab16 r7, r7, r7, ror #8\n\t"
"uxtb16 r6, r6, ror #8\n\t"
"uxtb16 r7, r7, ror #8\n\t"
/* recombine */
"orr r6, r6, r7, lsl #8\n\t"
"uqadd8 r5, r6, r5\n\t"
#ifdef inner_branch
"3:\n\t"
#endif
"str r5, [%[dest]], #4\n\t"
/* increment counter and jmp to top */
"subs %[w], %[w], #1\n\t"
"bne 1b\n\t"
"2:\n\t"
: [w] "+r" (w), [dest] "+r" (dst), [src] "+r" (src)
: [component_half] "r" (component_half), [mask_alpha] "r" (mask),
[alpha_mask] "r" (alpha_mask)
: "r4", "r5", "r6", "r7", "r8", "r9", "cc", "memory"
);
}
}
void
fbCompositeSolidMask_nx8x8888arm (pixman_op_t op,
pixman_image_t * pSrc,
pixman_image_t * pMask,
pixman_image_t * pDst,
int16_t xSrc,
int16_t ySrc,
int16_t xMask,
int16_t yMask,
int16_t xDst,
int16_t yDst,
uint16_t width,
uint16_t height)
{
uint32_t src, srca;
uint32_t *dstLine, *dst;
uint8_t *maskLine, *mask;
int dstStride, maskStride;
uint16_t w;
fbComposeGetSolid(pSrc, src, pDst->bits.format);
srca = src >> 24;
if (src == 0)
return;
uint32_t component_mask = 0xff00ff;
uint32_t component_half = 0x800080;
uint32_t src_hi = (src >> 8) & component_mask;
uint32_t src_lo = src & component_mask;
fbComposeGetStart (pDst, xDst, yDst, uint32_t, dstStride, dstLine, 1);
fbComposeGetStart (pMask, xMask, yMask, uint8_t, maskStride, maskLine, 1);
while (height--)
{
dst = dstLine;
dstLine += dstStride;
mask = maskLine;
maskLine += maskStride;
w = width;
//#define inner_branch
asm volatile (
"cmp %[w], #0\n\t"
"beq 2f\n\t"
"1:\n\t"
/* load mask */
"ldrb r5, [%[mask]], #1\n\t"
#ifdef inner_branch
/* We can avoid doing the multiplication in two cases: 0x0 or 0xff.
* The 0x0 case also allows us to avoid doing an unecessary data
* write which is more valuable so we only check for that */
"cmp r5, #0\n\t"
"beq 3f\n\t"
#endif
"ldr r4, [%[dest]] \n\t"
/* multiply by alpha (r8) then by 257 and divide by 65536 */
"mla r6, %[src_lo], r5, %[component_half]\n\t"
"mla r7, %[src_hi], r5, %[component_half]\n\t"
"uxtab16 r6, r6, r6, ror #8\n\t"
"uxtab16 r7, r7, r7, ror #8\n\t"
"uxtb16 r6, r6, ror #8\n\t"
"uxtb16 r7, r7, ror #8\n\t"
/* recombine */
"orr r5, r6, r7, lsl #8\n\t"
"uxtb16 r6, r4\n\t"
"uxtb16 r7, r4, ror #8\n\t"
/* we could simplify this to use 'sub' if we were
* willing to give up a register for alpha_mask */
"mvn r8, r5\n\t"
"mov r8, r8, lsr #24\n\t"
/* multiply by alpha (r8) then by 257 and divide by 65536 */
"mla r6, r6, r8, %[component_half]\n\t"
"mla r7, r7, r8, %[component_half]\n\t"
"uxtab16 r6, r6, r6, ror #8\n\t"
"uxtab16 r7, r7, r7, ror #8\n\t"
"uxtb16 r6, r6, ror #8\n\t"
"uxtb16 r7, r7, ror #8\n\t"
/* recombine */
"orr r6, r6, r7, lsl #8\n\t"
"uqadd8 r5, r6, r5\n\t"
#ifdef inner_branch
"3:\n\t"
#endif
"str r5, [%[dest]], #4\n\t"
/* increment counter and jmp to top */
"subs %[w], %[w], #1\n\t"
"bne 1b\n\t"
"2:\n\t"
: [w] "+r" (w), [dest] "+r" (dst), [src] "+r" (src), [mask] "+r" (mask)
: [component_half] "r" (component_half),
[src_hi] "r" (src_hi), [src_lo] "r" (src_lo)
: "r4", "r5", "r6", "r7", "r8", "cc", "memory"
);
}
_pixman_implementation_composite (imp->delegate, op,
src, mask, dest,
src_x, src_y,
mask_x, mask_y,
dest_x, dest_y,
width, height);
}
/**
* Conversion x8r8g8b8 -> r5g6b5
*
* TODO: optimize more, eliminate stalls, try to use burst writes (4 words aligned
* at 16 byte boundary)
*/
static inline void fbComposite_x8r8g8b8_src_r5g6b5_internal_mixed_armv6_c(
uint16_t *dst, uint32_t *src, int w, int dst_stride,
int src_stride, int h)
pixman_implementation_t *
_pixman_implementation_create_arm_simd (void)
{
uint32_t a, x, y, c1F001F = 0x1F001F;
int backup_w = w;
while (h--)
{
w = backup_w;
if (w > 0 && (uintptr_t)dst & 2)
{
x = *src++;
pixman_implementation_t *general = _pixman_implementation_create_fast_path ();
pixman_implementation_t *imp = _pixman_implementation_create (general);
a = (x >> 3) & c1F001F;
x &= 0xFC00;
a |= a >> 5;
a |= x >> 5;
*dst++ = a;
w--;
}
asm volatile(
"subs %[w], %[w], #2\n"
"blt 2f\n"
"1:\n"
"ldr %[x], [%[src]], #4\n"
"ldr %[y], [%[src]], #4\n"
"subs %[w], %[w], #2\n"
"and %[a], %[c1F001F], %[x], lsr #3\n"
"and %[x], %[x], #0xFC00\n\n"
"orr %[a], %[a], %[a], lsr #5\n"
"orr %[x], %[a], %[x], lsr #5\n"
"and %[a], %[c1F001F], %[y], lsr #3\n"
"and %[y], %[y], #0xFC00\n\n"
"orr %[a], %[a], %[a], lsr #5\n"
"orr %[y], %[a], %[y], lsr #5\n"
"pkhbt %[x], %[x], %[y], lsl #16\n"
"str %[x], [%[dst]], #4\n"
"bge 1b\n"
"2:\n"
: [c1F001F] "+&r" (c1F001F), [src] "+&r" (src), [dst] "+&r" (dst), [a] "=&r" (a),
[x] "=&r" (x), [y] "=&r" (y), [w] "+&r" (w)
);
if (w & 1)
{
x = *src++;
a = (x >> 3) & c1F001F;
x = x & 0xFC00;
a |= a >> 5;
a |= x >> 5;
*dst++ = a;
}
src += src_stride - backup_w;
dst += dst_stride - backup_w;
}
}
/**
* Conversion x8r8g8b8 -> r5g6b5
*
* Note: 'w' must be >= 7
*/
static void __attribute__((naked)) fbComposite_x8r8g8b8_src_r5g6b5_internal_armv6(
uint16_t *dst, uint32_t *src, int w, int dst_stride,
int src_stride, int h)
{
asm volatile(
/* define supplementary macros */
".macro cvt8888to565 PIX\n"
"and A, C1F001F, \\PIX, lsr #3\n"
"and \\PIX, \\PIX, #0xFC00\n\n"
"orr A, A, A, lsr #5\n"
"orr \\PIX, A, \\PIX, lsr #5\n"
".endm\n"
".macro combine_pixels_pair PIX1, PIX2\n"
"pkhbt \\PIX1, \\PIX1, \\PIX2, lsl #16\n" /* Note: assume little endian byte order */
".endm\n"
/* function entry, save all registers (10 words) to stack */
"stmdb sp!, {r4-r11, ip, lr}\n"
/* define some aliases */
"DST .req r0\n"
"SRC .req r1\n"
"W .req r2\n"
"H .req r3\n"
"TMP1 .req r4\n"
"TMP2 .req r5\n"
"TMP3 .req r6\n"
"TMP4 .req r7\n"
"TMP5 .req r8\n"
"TMP6 .req r9\n"
"TMP7 .req r10\n"
"TMP8 .req r11\n"
"C1F001F .req ip\n"
"A .req lr\n"
"ldr TMP1, [sp, #(10*4+0)]\n" /* load src_stride */
"ldr C1F001F, =0x1F001F\n"
"sub r3, r3, W\n"
"str r3, [sp, #(10*4+0)]\n" /* store (dst_stride-w) */
"ldr r3, [sp, #(10*4+4)]\n" /* load h */
"sub TMP1, TMP1, W\n"
"str TMP1, [sp, #(10*4+4)]\n" /* store (src_stride-w) */
"str W, [sp, #(8*4)]\n" /* saved ip = W */
"0:\n"
"subs H, H, #1\n"
"blt 6f\n"
"1:\n"
/* align DST at 4 byte boundary */
"tst DST, #2\n"
"beq 2f\n"
"ldr TMP1, [SRC], #4\n"
"sub W, W, #1\n"
"cvt8888to565 TMP1\n"
"strh TMP1, [DST], #2\n"
"2:"
/* align DST at 8 byte boundary */
"tst DST, #4\n"
"beq 2f\n"
"ldmia SRC!, {TMP1, TMP2}\n"
"sub W, W, #2\n"
"cvt8888to565 TMP1\n"
"cvt8888to565 TMP2\n"
"combine_pixels_pair TMP1, TMP2\n"
"str TMP1, [DST], #4\n"
"2:"
/* align DST at 16 byte boundary */
"tst DST, #8\n"
"beq 2f\n"
"ldmia SRC!, {TMP1, TMP2, TMP3, TMP4}\n"
"sub W, W, #4\n"
"cvt8888to565 TMP1\n"
"cvt8888to565 TMP2\n"
"cvt8888to565 TMP3\n"
"cvt8888to565 TMP4\n"
"combine_pixels_pair TMP1, TMP2\n"
"combine_pixels_pair TMP3, TMP4\n"
"stmia DST!, {TMP1, TMP3}\n"
"2:"
/* inner loop, process 8 pixels per iteration */
"subs W, W, #8\n"
"blt 4f\n"
"3:\n"
"ldmia SRC!, {TMP1, TMP2, TMP3, TMP4, TMP5, TMP6, TMP7, TMP8}\n"
"subs W, W, #8\n"
"cvt8888to565 TMP1\n"
"cvt8888to565 TMP2\n"
"cvt8888to565 TMP3\n"
"cvt8888to565 TMP4\n"
"cvt8888to565 TMP5\n"
"cvt8888to565 TMP6\n"
"cvt8888to565 TMP7\n"
"cvt8888to565 TMP8\n"
"combine_pixels_pair TMP1, TMP2\n"
"combine_pixels_pair TMP3, TMP4\n"
"combine_pixels_pair TMP5, TMP6\n"
"combine_pixels_pair TMP7, TMP8\n"
"stmia DST!, {TMP1, TMP3, TMP5, TMP7}\n"
"bge 3b\n"
"4:\n"
/* process the remaining pixels */
"tst W, #4\n"
"beq 4f\n"
"ldmia SRC!, {TMP1, TMP2, TMP3, TMP4}\n"
"cvt8888to565 TMP1\n"
"cvt8888to565 TMP2\n"
"cvt8888to565 TMP3\n"
"cvt8888to565 TMP4\n"
"combine_pixels_pair TMP1, TMP2\n"
"combine_pixels_pair TMP3, TMP4\n"
"stmia DST!, {TMP1, TMP3}\n"
"4:\n"
"tst W, #2\n"
"beq 4f\n"
"ldmia SRC!, {TMP1, TMP2}\n"
"cvt8888to565 TMP1\n"
"cvt8888to565 TMP2\n"
"combine_pixels_pair TMP1, TMP2\n"
"str TMP1, [DST], #4\n"
"4:\n"
"tst W, #1\n"
"beq 4f\n"
"ldr TMP1, [SRC], #4\n"
"cvt8888to565 TMP1\n"
"strh TMP1, [DST], #2\n"
"4:\n"
"ldr TMP1, [sp, #(10*4+0)]\n" /* (dst_stride-w) */
"ldr TMP2, [sp, #(10*4+4)]\n" /* (src_stride-w) */
"ldr W, [sp, #(8*4)]\n"
"subs H, H, #1\n"
"add DST, DST, TMP1, lsl #1\n"
"add SRC, SRC, TMP2, lsl #2\n"
"bge 1b\n"
"6:\n"
"ldmia sp!, {r4-r11, ip, pc}\n" /* restore all registers and return */
".ltorg\n"
".unreq DST\n"
".unreq SRC\n"
".unreq W\n"
".unreq H\n"
".unreq TMP1\n"
".unreq TMP2\n"
".unreq TMP3\n"
".unreq TMP4\n"
".unreq TMP5\n"
".unreq TMP6\n"
".unreq TMP7\n"
".unreq TMP8\n"
".unreq C1F001F\n"
".unreq A\n"
".purgem cvt8888to565\n"
".purgem combine_pixels_pair\n"
);
}
void
fbCompositeSrc_x888x0565arm (pixman_op_t op,
pixman_image_t * pSrc,
pixman_image_t * pMask,
pixman_image_t * pDst,
int16_t xSrc,
int16_t ySrc,
int16_t xMask,
int16_t yMask,
int16_t xDst,
int16_t yDst,
uint16_t width,
uint16_t height)
{
uint16_t *dstLine, *dst;
uint32_t *srcLine, *src;
int dstStride, srcStride;
uint16_t w, h;
fbComposeGetStart (pSrc, xSrc, ySrc, uint32_t, srcStride, srcLine, 1);
fbComposeGetStart (pDst, xDst, yDst, uint16_t, dstStride, dstLine, 1);
dst = dstLine;
src = srcLine;
h = height;
w = width;
if (w < 7)
fbComposite_x8r8g8b8_src_r5g6b5_internal_mixed_armv6_c(dst, src, w, dstStride, srcStride, h);
else
fbComposite_x8r8g8b8_src_r5g6b5_internal_armv6(dst, src, w, dstStride, srcStride, h);
imp->composite = arm_simd_composite;
return imp;
}

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -1,7 +1,7 @@
/* WARNING: This file is generated by combine.pl from combine.inc.
Please edit one of those files rather than this one. */
#line 1 "combine.inc"
#line 1 "pixman-combine.c.template"
#define COMPONENT_SIZE 8
#define MASK 0xff
@ -19,199 +19,215 @@
#define RB_ONE_HALF 0x800080
#define RB_MASK_PLUS_ONE 0x10000100
#define Alpha(x) ((x) >> A_SHIFT)
#define ALPHA_8(x) ((x) >> A_SHIFT)
#define RED_8(x) (((x) >> R_SHIFT) & MASK)
#define GREEN_8(x) (((x) >> G_SHIFT) & MASK)
#define BLUE_8(x) ((x) & MASK)
/*
* Helper macros.
*/
#define IntMult(a,b,t) ( (t) = (a) * (b) + ONE_HALF, ( ( ( (t)>>G_SHIFT ) + (t) )>>G_SHIFT ) )
#define IntDiv(a,b) (((uint16_t) (a) * MASK) / (b))
#define MUL_UN8(a, b, t) \
((t) = (a) * (b) + ONE_HALF, ((((t) >> G_SHIFT ) + (t) ) >> G_SHIFT ))
#define GetComp(v,i) ((uint16_t) (uint8_t) ((v) >> i))
#define DIV_UN8(a, b) \
(((uint16_t) (a) * MASK) / (b))
#define Add(x,y,i,t) ((t) = GetComp(x,i) + GetComp(y,i), \
(uint32_t) ((uint8_t) ((t) | (0 - ((t) >> G_SHIFT)))) << (i))
#define ADD_UN8(x, y, t) \
((t) = x + y, \
(uint32_t) (uint8_t) ((t) | (0 - ((t) >> G_SHIFT))))
#define FbGen(x,y,i,ax,ay,t,u,v) ((t) = (IntMult(GetComp(y,i),ay,(u)) + \
IntMult(GetComp(x,i),ax,(v))), \
(uint32_t) ((uint8_t) ((t) | \
(0 - ((t) >> G_SHIFT)))) << (i))
#define DIV_ONE_UN8(x) \
(((x) + ONE_HALF + (((x) + ONE_HALF) >> G_SHIFT)) >> G_SHIFT)
/*
The methods below use some tricks to be able to do two color
components at the same time.
*/
* The methods below use some tricks to be able to do two color
* components at the same time.
*/
/*
x_c = (x_c * a) / 255
*/
#define FbByteMul(x, a) do { \
uint32_t t = ((x & RB_MASK) * a) + RB_ONE_HALF; \
t = (t + ((t >> COMPONENT_SIZE) & RB_MASK)) >> COMPONENT_SIZE; \
t &= RB_MASK; \
* x_c = (x_c * a) / 255
*/
#define UN8x4_MUL_UN8(x, a) \
do \
{ \
uint32_t t = ((x & RB_MASK) * a) + RB_ONE_HALF; \
t = (t + ((t >> COMPONENT_SIZE) & RB_MASK)) >> COMPONENT_SIZE; \
t &= RB_MASK; \
\
x = (((x >> COMPONENT_SIZE) & RB_MASK) * a) + RB_ONE_HALF; \
x = (x + ((x >> COMPONENT_SIZE) & RB_MASK)); \
x &= RB_MASK << COMPONENT_SIZE; \
x += t; \
x = (((x >> COMPONENT_SIZE) & RB_MASK) * a) + RB_ONE_HALF; \
x = (x + ((x >> COMPONENT_SIZE) & RB_MASK)); \
x &= RB_MASK << COMPONENT_SIZE; \
x += t; \
} while (0)
/*
x_c = (x_c * a) / 255 + y
*/
#define FbByteMulAdd(x, a, y) do { \
/* multiply and divide: trunc((i + 128)*257/65536) */ \
uint32_t t = ((x & RB_MASK) * a) + RB_ONE_HALF; \
t = (t + ((t >> COMPONENT_SIZE) & RB_MASK)) >> COMPONENT_SIZE; \
t &= RB_MASK; \
* x_c = (x_c * a) / 255 + y_c
*/
#define UN8x4_MUL_UN8_ADD_UN8x4(x, a, y) \
do \
{ \
/* multiply and divide: trunc((i + 128)*257/65536) */ \
uint32_t t = ((x & RB_MASK) * a) + RB_ONE_HALF; \
t = (t + ((t >> COMPONENT_SIZE) & RB_MASK)) >> COMPONENT_SIZE; \
t &= RB_MASK; \
\
/* add */ \
t += y & RB_MASK; \
/* add */ \
t += y & RB_MASK; \
\
/* saturate */ \
t |= RB_MASK_PLUS_ONE - ((t >> COMPONENT_SIZE) & RB_MASK); \
t &= RB_MASK; \
/* saturate */ \
t |= RB_MASK_PLUS_ONE - ((t >> COMPONENT_SIZE) & RB_MASK); \
t &= RB_MASK; \
\
/* multiply and divide */ \
x = (((x >> COMPONENT_SIZE) & RB_MASK) * a) + RB_ONE_HALF; \
x = (x + ((x >> COMPONENT_SIZE) & RB_MASK)) >> COMPONENT_SIZE; \
x &= RB_MASK; \
/* multiply and divide */ \
x = (((x >> COMPONENT_SIZE) & RB_MASK) * a) + RB_ONE_HALF; \
x = (x + ((x >> COMPONENT_SIZE) & RB_MASK)) >> COMPONENT_SIZE; \
x &= RB_MASK; \
\
/* add */ \
x += (y >> COMPONENT_SIZE) & RB_MASK; \
/* add */ \
x += (y >> COMPONENT_SIZE) & RB_MASK; \
\
/* saturate */ \
x |= RB_MASK_PLUS_ONE - ((x >> COMPONENT_SIZE) & RB_MASK); \
x &= RB_MASK; \
/* saturate */ \
x |= RB_MASK_PLUS_ONE - ((x >> COMPONENT_SIZE) & RB_MASK); \
x &= RB_MASK; \
\
/* recombine */ \
x <<= COMPONENT_SIZE; \
x += t; \
/* recombine */ \
x <<= COMPONENT_SIZE; \
x += t; \
} while (0)
/*
x_c = (x_c * a + y_c * b) / 255
*/
#define FbByteAddMul(x, a, y, b) do { \
uint32_t t; \
uint32_t r = (x >> A_SHIFT) * a + (y >> A_SHIFT) * b + ONE_HALF; \
r += (r >> G_SHIFT); \
r >>= G_SHIFT; \
* x_c = (x_c * a + y_c * b) / 255
*/
#define UN8x4_MUL_UN8_ADD_UN8x4_MUL_UN8(x, a, y, b) \
do \
{ \
uint32_t t; \
uint32_t r = (x >> A_SHIFT) * a + (y >> A_SHIFT) * b + ONE_HALF; \
r += (r >> G_SHIFT); \
r >>= G_SHIFT; \
\
t = (x & G_MASK) * a + (y & G_MASK) * b; \
t += (t >> G_SHIFT) + (ONE_HALF << G_SHIFT); \
t >>= R_SHIFT; \
t = (x & G_MASK) * a + (y & G_MASK) * b; \
t += (t >> G_SHIFT) + (ONE_HALF << G_SHIFT); \
t >>= R_SHIFT; \
\
t |= r << R_SHIFT; \
t |= RB_MASK_PLUS_ONE - ((t >> G_SHIFT) & RB_MASK); \
t &= RB_MASK; \
t <<= G_SHIFT; \
t |= r << R_SHIFT; \
t |= RB_MASK_PLUS_ONE - ((t >> G_SHIFT) & RB_MASK); \
t &= RB_MASK; \
t <<= G_SHIFT; \
\
r = ((x >> R_SHIFT) & MASK) * a + \
((y >> R_SHIFT) & MASK) * b + ONE_HALF; \
r += (r >> G_SHIFT); \
r >>= G_SHIFT; \
r = ((x >> R_SHIFT) & MASK) * a + \
((y >> R_SHIFT) & MASK) * b + ONE_HALF; \
r += (r >> G_SHIFT); \
r >>= G_SHIFT; \
\
x = (x & MASK) * a + (y & MASK) * b + ONE_HALF; \
x += (x >> G_SHIFT); \
x >>= G_SHIFT; \
x |= r << R_SHIFT; \
x |= RB_MASK_PLUS_ONE - ((x >> G_SHIFT) & RB_MASK); \
x &= RB_MASK; \
x |= t; \
x = (x & MASK) * a + (y & MASK) * b + ONE_HALF; \
x += (x >> G_SHIFT); \
x >>= G_SHIFT; \
x |= r << R_SHIFT; \
x |= RB_MASK_PLUS_ONE - ((x >> G_SHIFT) & RB_MASK); \
x &= RB_MASK; \
x |= t; \
} while (0)
/*
x_c = (x_c * a_c) / 255
*/
#define FbByteMulC(x, a) do { \
uint32_t t; \
uint32_t r = (x & MASK) * (a & MASK); \
r |= (x & R_MASK) * ((a >> R_SHIFT) & MASK); \
r += RB_ONE_HALF; \
r = (r + ((r >> G_SHIFT) & RB_MASK)) >> G_SHIFT; \
r &= RB_MASK; \
* x_c = (x_c * a_c) / 255
*/
#define UN8x4_MUL_UN8x4(x, a) \
do \
{ \
uint32_t t; \
uint32_t r = (x & MASK) * (a & MASK); \
r |= (x & R_MASK) * ((a >> R_SHIFT) & MASK); \
r += RB_ONE_HALF; \
r = (r + ((r >> G_SHIFT) & RB_MASK)) >> G_SHIFT; \
r &= RB_MASK; \
\
x >>= G_SHIFT; \
t = (x & MASK) * ((a >> G_SHIFT) & MASK); \
t |= (x & R_MASK) * (a >> A_SHIFT); \
t += RB_ONE_HALF; \
t = t + ((t >> G_SHIFT) & RB_MASK); \
x = r | (t & AG_MASK); \
x >>= G_SHIFT; \
t = (x & MASK) * ((a >> G_SHIFT) & MASK); \
t |= (x & R_MASK) * (a >> A_SHIFT); \
t += RB_ONE_HALF; \
t = t + ((t >> G_SHIFT) & RB_MASK); \
x = r | (t & AG_MASK); \
} while (0)
/*
x_c = (x_c * a) / 255 + y
*/
#define FbByteMulAddC(x, a, y) do { \
uint32_t t; \
uint32_t r = (x & MASK) * (a & MASK); \
r |= (x & R_MASK) * ((a >> R_SHIFT) & MASK); \
r += RB_ONE_HALF; \
r = (r + ((r >> G_SHIFT) & RB_MASK)) >> G_SHIFT; \
r &= RB_MASK; \
r += y & RB_MASK; \
r |= RB_MASK_PLUS_ONE - ((r >> G_SHIFT) & RB_MASK); \
r &= RB_MASK; \
* x_c = (x_c * a_c) / 255 + y_c
*/
#define UN8x4_MUL_UN8x4_ADD_UN8x4(x, a, y) \
do \
{ \
uint32_t t; \
uint32_t r = (x & MASK) * (a & MASK); \
r |= (x & R_MASK) * ((a >> R_SHIFT) & MASK); \
r += RB_ONE_HALF; \
r = (r + ((r >> G_SHIFT) & RB_MASK)) >> G_SHIFT; \
r &= RB_MASK; \
r += y & RB_MASK; \
r |= RB_MASK_PLUS_ONE - ((r >> G_SHIFT) & RB_MASK); \
r &= RB_MASK; \
\
x >>= G_SHIFT; \
t = (x & MASK) * ((a >> G_SHIFT) & MASK); \
t |= (x & R_MASK) * (a >> A_SHIFT); \
t += RB_ONE_HALF; \
t = (t + ((t >> G_SHIFT) & RB_MASK)) >> G_SHIFT; \
t &= RB_MASK; \
t += (y >> G_SHIFT) & RB_MASK; \
t |= RB_MASK_PLUS_ONE - ((t >> G_SHIFT) & RB_MASK); \
t &= RB_MASK; \
x = r | (t << G_SHIFT); \
x >>= G_SHIFT; \
t = (x & MASK) * ((a >> G_SHIFT) & MASK); \
t |= (x & R_MASK) * (a >> A_SHIFT); \
t += RB_ONE_HALF; \
t = (t + ((t >> G_SHIFT) & RB_MASK)) >> G_SHIFT; \
t &= RB_MASK; \
t += (y >> G_SHIFT) & RB_MASK; \
t |= RB_MASK_PLUS_ONE - ((t >> G_SHIFT) & RB_MASK); \
t &= RB_MASK; \
x = r | (t << G_SHIFT); \
} while (0)
/*
x_c = (x_c * a_c + y_c * b) / 255
*/
#define FbByteAddMulC(x, a, y, b) do { \
uint32_t t; \
uint32_t r = (x >> A_SHIFT) * (a >> A_SHIFT) + \
(y >> A_SHIFT) * b; \
r += (r >> G_SHIFT) + ONE_HALF; \
r >>= G_SHIFT; \
\
t = (x & G_MASK) * ((a >> G_SHIFT) & MASK) + (y & G_MASK) * b; \
t += (t >> G_SHIFT) + (ONE_HALF << G_SHIFT); \
t >>= R_SHIFT; \
\
t |= r << R_SHIFT; \
t |= RB_MASK_PLUS_ONE - ((t >> G_SHIFT) & RB_MASK); \
t &= RB_MASK; \
t <<= G_SHIFT; \
\
r = ((x >> R_SHIFT) & MASK) * ((a >> R_SHIFT) & MASK) + \
((y >> R_SHIFT) & MASK) * b + ONE_HALF; \
r += (r >> G_SHIFT); \
r >>= G_SHIFT; \
\
x = (x & MASK) * (a & MASK) + (y & MASK) * b + ONE_HALF; \
x += (x >> G_SHIFT); \
x >>= G_SHIFT; \
x |= r << R_SHIFT; \
x |= RB_MASK_PLUS_ONE - ((x >> G_SHIFT) & RB_MASK); \
x &= RB_MASK; \
x |= t; \
* x_c = (x_c * a_c + y_c * b) / 255
*/
#define UN8x4_MUL_UN8x4_ADD_UN8x4_MUL_UN8(x, a, y, b) \
do \
{ \
uint32_t t; \
uint32_t r = (x >> A_SHIFT) * (a >> A_SHIFT) + \
(y >> A_SHIFT) * b; \
r += (r >> G_SHIFT) + ONE_HALF; \
r >>= G_SHIFT; \
\
t = (x & G_MASK) * ((a >> G_SHIFT) & MASK) + (y & G_MASK) * b; \
t += (t >> G_SHIFT) + (ONE_HALF << G_SHIFT); \
t >>= R_SHIFT; \
\
t |= r << R_SHIFT; \
t |= RB_MASK_PLUS_ONE - ((t >> G_SHIFT) & RB_MASK); \
t &= RB_MASK; \
t <<= G_SHIFT; \
\
r = ((x >> R_SHIFT) & MASK) * ((a >> R_SHIFT) & MASK) + \
((y >> R_SHIFT) & MASK) * b + ONE_HALF; \
r += (r >> G_SHIFT); \
r >>= G_SHIFT; \
\
x = (x & MASK) * (a & MASK) + (y & MASK) * b + ONE_HALF; \
x += (x >> G_SHIFT); \
x >>= G_SHIFT; \
x |= r << R_SHIFT; \
x |= RB_MASK_PLUS_ONE - ((x >> G_SHIFT) & RB_MASK); \
x &= RB_MASK; \
x |= t; \
} while (0)
/*
x_c = min(x_c + y_c, 255)
*/
#define FbByteAdd(x, y) do { \
uint32_t t; \
uint32_t r = (x & RB_MASK) + (y & RB_MASK); \
r |= RB_MASK_PLUS_ONE - ((r >> G_SHIFT) & RB_MASK); \
r &= RB_MASK; \
\
t = ((x >> G_SHIFT) & RB_MASK) + ((y >> G_SHIFT) & RB_MASK); \
t |= RB_MASK_PLUS_ONE - ((t >> G_SHIFT) & RB_MASK); \
r |= (t & RB_MASK) << G_SHIFT; \
x = r; \
x_c = min(x_c + y_c, 255)
*/
#define UN8x4_ADD_UN8x4(x, y) \
do \
{ \
uint32_t t; \
uint32_t r = (x & RB_MASK) + (y & RB_MASK); \
r |= RB_MASK_PLUS_ONE - ((r >> G_SHIFT) & RB_MASK); \
r &= RB_MASK; \
\
t = ((x >> G_SHIFT) & RB_MASK) + ((y >> G_SHIFT) & RB_MASK); \
t |= RB_MASK_PLUS_ONE - ((t >> G_SHIFT) & RB_MASK); \
r |= (t & RB_MASK) << G_SHIFT; \
x = r; \
} while (0)

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -1,7 +1,7 @@
/* WARNING: This file is generated by combine.pl from combine.inc.
Please edit one of those files rather than this one. */
#line 1 "combine.inc"
#line 1 "pixman-combine.c.template"
#define COMPONENT_SIZE 16
#define MASK 0xffffULL
@ -19,199 +19,215 @@
#define RB_ONE_HALF 0x800000008000ULL
#define RB_MASK_PLUS_ONE 0x10000000010000ULL
#define Alpha(x) ((x) >> A_SHIFT)
#define ALPHA_16(x) ((x) >> A_SHIFT)
#define RED_16(x) (((x) >> R_SHIFT) & MASK)
#define GREEN_16(x) (((x) >> G_SHIFT) & MASK)
#define BLUE_16(x) ((x) & MASK)
/*
* Helper macros.
*/
#define IntMult(a,b,t) ( (t) = (a) * (b) + ONE_HALF, ( ( ( (t)>>G_SHIFT ) + (t) )>>G_SHIFT ) )
#define IntDiv(a,b) (((uint32_t) (a) * MASK) / (b))
#define MUL_UN16(a, b, t) \
((t) = (a) * (b) + ONE_HALF, ((((t) >> G_SHIFT ) + (t) ) >> G_SHIFT ))
#define GetComp(v,i) ((uint32_t) (uint16_t) ((v) >> i))
#define DIV_UN16(a, b) \
(((uint32_t) (a) * MASK) / (b))
#define Add(x,y,i,t) ((t) = GetComp(x,i) + GetComp(y,i), \
(uint64_t) ((uint16_t) ((t) | (0 - ((t) >> G_SHIFT)))) << (i))
#define ADD_UN16(x, y, t) \
((t) = x + y, \
(uint64_t) (uint16_t) ((t) | (0 - ((t) >> G_SHIFT))))
#define FbGen(x,y,i,ax,ay,t,u,v) ((t) = (IntMult(GetComp(y,i),ay,(u)) + \
IntMult(GetComp(x,i),ax,(v))), \
(uint64_t) ((uint16_t) ((t) | \
(0 - ((t) >> G_SHIFT)))) << (i))
#define DIV_ONE_UN16(x) \
(((x) + ONE_HALF + (((x) + ONE_HALF) >> G_SHIFT)) >> G_SHIFT)
/*
The methods below use some tricks to be able to do two color
components at the same time.
*/
* The methods below use some tricks to be able to do two color
* components at the same time.
*/
/*
x_c = (x_c * a) / 255
*/
#define FbByteMul(x, a) do { \
uint64_t t = ((x & RB_MASK) * a) + RB_ONE_HALF; \
t = (t + ((t >> COMPONENT_SIZE) & RB_MASK)) >> COMPONENT_SIZE; \
t &= RB_MASK; \
* x_c = (x_c * a) / 255
*/
#define UN16x4_MUL_UN16(x, a) \
do \
{ \
uint64_t t = ((x & RB_MASK) * a) + RB_ONE_HALF; \
t = (t + ((t >> COMPONENT_SIZE) & RB_MASK)) >> COMPONENT_SIZE; \
t &= RB_MASK; \
\
x = (((x >> COMPONENT_SIZE) & RB_MASK) * a) + RB_ONE_HALF; \
x = (x + ((x >> COMPONENT_SIZE) & RB_MASK)); \
x &= RB_MASK << COMPONENT_SIZE; \
x += t; \
x = (((x >> COMPONENT_SIZE) & RB_MASK) * a) + RB_ONE_HALF; \
x = (x + ((x >> COMPONENT_SIZE) & RB_MASK)); \
x &= RB_MASK << COMPONENT_SIZE; \
x += t; \
} while (0)
/*
x_c = (x_c * a) / 255 + y
*/
#define FbByteMulAdd(x, a, y) do { \
/* multiply and divide: trunc((i + 128)*257/65536) */ \
uint64_t t = ((x & RB_MASK) * a) + RB_ONE_HALF; \
t = (t + ((t >> COMPONENT_SIZE) & RB_MASK)) >> COMPONENT_SIZE; \
t &= RB_MASK; \
* x_c = (x_c * a) / 255 + y_c
*/
#define UN16x4_MUL_UN16_ADD_UN16x4(x, a, y) \
do \
{ \
/* multiply and divide: trunc((i + 128)*257/65536) */ \
uint64_t t = ((x & RB_MASK) * a) + RB_ONE_HALF; \
t = (t + ((t >> COMPONENT_SIZE) & RB_MASK)) >> COMPONENT_SIZE; \
t &= RB_MASK; \
\
/* add */ \
t += y & RB_MASK; \
/* add */ \
t += y & RB_MASK; \
\
/* saturate */ \
t |= RB_MASK_PLUS_ONE - ((t >> COMPONENT_SIZE) & RB_MASK); \
t &= RB_MASK; \
/* saturate */ \
t |= RB_MASK_PLUS_ONE - ((t >> COMPONENT_SIZE) & RB_MASK); \
t &= RB_MASK; \
\
/* multiply and divide */ \
x = (((x >> COMPONENT_SIZE) & RB_MASK) * a) + RB_ONE_HALF; \
x = (x + ((x >> COMPONENT_SIZE) & RB_MASK)) >> COMPONENT_SIZE; \
x &= RB_MASK; \
/* multiply and divide */ \
x = (((x >> COMPONENT_SIZE) & RB_MASK) * a) + RB_ONE_HALF; \
x = (x + ((x >> COMPONENT_SIZE) & RB_MASK)) >> COMPONENT_SIZE; \
x &= RB_MASK; \
\
/* add */ \
x += (y >> COMPONENT_SIZE) & RB_MASK; \
/* add */ \
x += (y >> COMPONENT_SIZE) & RB_MASK; \
\
/* saturate */ \
x |= RB_MASK_PLUS_ONE - ((x >> COMPONENT_SIZE) & RB_MASK); \
x &= RB_MASK; \
/* saturate */ \
x |= RB_MASK_PLUS_ONE - ((x >> COMPONENT_SIZE) & RB_MASK); \
x &= RB_MASK; \
\
/* recombine */ \
x <<= COMPONENT_SIZE; \
x += t; \
/* recombine */ \
x <<= COMPONENT_SIZE; \
x += t; \
} while (0)
/*
x_c = (x_c * a + y_c * b) / 255
*/
#define FbByteAddMul(x, a, y, b) do { \
uint64_t t; \
uint64_t r = (x >> A_SHIFT) * a + (y >> A_SHIFT) * b + ONE_HALF; \
r += (r >> G_SHIFT); \
r >>= G_SHIFT; \
* x_c = (x_c * a + y_c * b) / 255
*/
#define UN16x4_MUL_UN16_ADD_UN16x4_MUL_UN16(x, a, y, b) \
do \
{ \
uint64_t t; \
uint64_t r = (x >> A_SHIFT) * a + (y >> A_SHIFT) * b + ONE_HALF; \
r += (r >> G_SHIFT); \
r >>= G_SHIFT; \
\
t = (x & G_MASK) * a + (y & G_MASK) * b; \
t += (t >> G_SHIFT) + (ONE_HALF << G_SHIFT); \
t >>= R_SHIFT; \
t = (x & G_MASK) * a + (y & G_MASK) * b; \
t += (t >> G_SHIFT) + (ONE_HALF << G_SHIFT); \
t >>= R_SHIFT; \
\
t |= r << R_SHIFT; \
t |= RB_MASK_PLUS_ONE - ((t >> G_SHIFT) & RB_MASK); \
t &= RB_MASK; \
t <<= G_SHIFT; \
t |= r << R_SHIFT; \
t |= RB_MASK_PLUS_ONE - ((t >> G_SHIFT) & RB_MASK); \
t &= RB_MASK; \
t <<= G_SHIFT; \
\
r = ((x >> R_SHIFT) & MASK) * a + \
((y >> R_SHIFT) & MASK) * b + ONE_HALF; \
r += (r >> G_SHIFT); \
r >>= G_SHIFT; \
r = ((x >> R_SHIFT) & MASK) * a + \
((y >> R_SHIFT) & MASK) * b + ONE_HALF; \
r += (r >> G_SHIFT); \
r >>= G_SHIFT; \
\
x = (x & MASK) * a + (y & MASK) * b + ONE_HALF; \
x += (x >> G_SHIFT); \
x >>= G_SHIFT; \
x |= r << R_SHIFT; \
x |= RB_MASK_PLUS_ONE - ((x >> G_SHIFT) & RB_MASK); \
x &= RB_MASK; \
x |= t; \
x = (x & MASK) * a + (y & MASK) * b + ONE_HALF; \
x += (x >> G_SHIFT); \
x >>= G_SHIFT; \
x |= r << R_SHIFT; \
x |= RB_MASK_PLUS_ONE - ((x >> G_SHIFT) & RB_MASK); \
x &= RB_MASK; \
x |= t; \
} while (0)
/*
x_c = (x_c * a_c) / 255
*/
#define FbByteMulC(x, a) do { \
uint64_t t; \
uint64_t r = (x & MASK) * (a & MASK); \
r |= (x & R_MASK) * ((a >> R_SHIFT) & MASK); \
r += RB_ONE_HALF; \
r = (r + ((r >> G_SHIFT) & RB_MASK)) >> G_SHIFT; \
r &= RB_MASK; \
* x_c = (x_c * a_c) / 255
*/
#define UN16x4_MUL_UN16x4(x, a) \
do \
{ \
uint64_t t; \
uint64_t r = (x & MASK) * (a & MASK); \
r |= (x & R_MASK) * ((a >> R_SHIFT) & MASK); \
r += RB_ONE_HALF; \
r = (r + ((r >> G_SHIFT) & RB_MASK)) >> G_SHIFT; \
r &= RB_MASK; \
\
x >>= G_SHIFT; \
t = (x & MASK) * ((a >> G_SHIFT) & MASK); \
t |= (x & R_MASK) * (a >> A_SHIFT); \
t += RB_ONE_HALF; \
t = t + ((t >> G_SHIFT) & RB_MASK); \
x = r | (t & AG_MASK); \
x >>= G_SHIFT; \
t = (x & MASK) * ((a >> G_SHIFT) & MASK); \
t |= (x & R_MASK) * (a >> A_SHIFT); \
t += RB_ONE_HALF; \
t = t + ((t >> G_SHIFT) & RB_MASK); \
x = r | (t & AG_MASK); \
} while (0)
/*
x_c = (x_c * a) / 255 + y
*/
#define FbByteMulAddC(x, a, y) do { \
uint64_t t; \
uint64_t r = (x & MASK) * (a & MASK); \
r |= (x & R_MASK) * ((a >> R_SHIFT) & MASK); \
r += RB_ONE_HALF; \
r = (r + ((r >> G_SHIFT) & RB_MASK)) >> G_SHIFT; \
r &= RB_MASK; \
r += y & RB_MASK; \
r |= RB_MASK_PLUS_ONE - ((r >> G_SHIFT) & RB_MASK); \
r &= RB_MASK; \
* x_c = (x_c * a_c) / 255 + y_c
*/
#define UN16x4_MUL_UN16x4_ADD_UN16x4(x, a, y) \
do \
{ \
uint64_t t; \
uint64_t r = (x & MASK) * (a & MASK); \
r |= (x & R_MASK) * ((a >> R_SHIFT) & MASK); \
r += RB_ONE_HALF; \
r = (r + ((r >> G_SHIFT) & RB_MASK)) >> G_SHIFT; \
r &= RB_MASK; \
r += y & RB_MASK; \
r |= RB_MASK_PLUS_ONE - ((r >> G_SHIFT) & RB_MASK); \
r &= RB_MASK; \
\
x >>= G_SHIFT; \
t = (x & MASK) * ((a >> G_SHIFT) & MASK); \
t |= (x & R_MASK) * (a >> A_SHIFT); \
t += RB_ONE_HALF; \
t = (t + ((t >> G_SHIFT) & RB_MASK)) >> G_SHIFT; \
t &= RB_MASK; \
t += (y >> G_SHIFT) & RB_MASK; \
t |= RB_MASK_PLUS_ONE - ((t >> G_SHIFT) & RB_MASK); \
t &= RB_MASK; \
x = r | (t << G_SHIFT); \
x >>= G_SHIFT; \
t = (x & MASK) * ((a >> G_SHIFT) & MASK); \
t |= (x & R_MASK) * (a >> A_SHIFT); \
t += RB_ONE_HALF; \
t = (t + ((t >> G_SHIFT) & RB_MASK)) >> G_SHIFT; \
t &= RB_MASK; \
t += (y >> G_SHIFT) & RB_MASK; \
t |= RB_MASK_PLUS_ONE - ((t >> G_SHIFT) & RB_MASK); \
t &= RB_MASK; \
x = r | (t << G_SHIFT); \
} while (0)
/*
x_c = (x_c * a_c + y_c * b) / 255
*/
#define FbByteAddMulC(x, a, y, b) do { \
uint64_t t; \
uint64_t r = (x >> A_SHIFT) * (a >> A_SHIFT) + \
(y >> A_SHIFT) * b; \
r += (r >> G_SHIFT) + ONE_HALF; \
r >>= G_SHIFT; \
\
t = (x & G_MASK) * ((a >> G_SHIFT) & MASK) + (y & G_MASK) * b; \
t += (t >> G_SHIFT) + (ONE_HALF << G_SHIFT); \
t >>= R_SHIFT; \
\
t |= r << R_SHIFT; \
t |= RB_MASK_PLUS_ONE - ((t >> G_SHIFT) & RB_MASK); \
t &= RB_MASK; \
t <<= G_SHIFT; \
\
r = ((x >> R_SHIFT) & MASK) * ((a >> R_SHIFT) & MASK) + \
((y >> R_SHIFT) & MASK) * b + ONE_HALF; \
r += (r >> G_SHIFT); \
r >>= G_SHIFT; \
\
x = (x & MASK) * (a & MASK) + (y & MASK) * b + ONE_HALF; \
x += (x >> G_SHIFT); \
x >>= G_SHIFT; \
x |= r << R_SHIFT; \
x |= RB_MASK_PLUS_ONE - ((x >> G_SHIFT) & RB_MASK); \
x &= RB_MASK; \
x |= t; \
* x_c = (x_c * a_c + y_c * b) / 255
*/
#define UN16x4_MUL_UN16x4_ADD_UN16x4_MUL_UN16(x, a, y, b) \
do \
{ \
uint64_t t; \
uint64_t r = (x >> A_SHIFT) * (a >> A_SHIFT) + \
(y >> A_SHIFT) * b; \
r += (r >> G_SHIFT) + ONE_HALF; \
r >>= G_SHIFT; \
\
t = (x & G_MASK) * ((a >> G_SHIFT) & MASK) + (y & G_MASK) * b; \
t += (t >> G_SHIFT) + (ONE_HALF << G_SHIFT); \
t >>= R_SHIFT; \
\
t |= r << R_SHIFT; \
t |= RB_MASK_PLUS_ONE - ((t >> G_SHIFT) & RB_MASK); \
t &= RB_MASK; \
t <<= G_SHIFT; \
\
r = ((x >> R_SHIFT) & MASK) * ((a >> R_SHIFT) & MASK) + \
((y >> R_SHIFT) & MASK) * b + ONE_HALF; \
r += (r >> G_SHIFT); \
r >>= G_SHIFT; \
\
x = (x & MASK) * (a & MASK) + (y & MASK) * b + ONE_HALF; \
x += (x >> G_SHIFT); \
x >>= G_SHIFT; \
x |= r << R_SHIFT; \
x |= RB_MASK_PLUS_ONE - ((x >> G_SHIFT) & RB_MASK); \
x &= RB_MASK; \
x |= t; \
} while (0)
/*
x_c = min(x_c + y_c, 255)
*/
#define FbByteAdd(x, y) do { \
uint64_t t; \
uint64_t r = (x & RB_MASK) + (y & RB_MASK); \
r |= RB_MASK_PLUS_ONE - ((r >> G_SHIFT) & RB_MASK); \
r &= RB_MASK; \
\
t = ((x >> G_SHIFT) & RB_MASK) + ((y >> G_SHIFT) & RB_MASK); \
t |= RB_MASK_PLUS_ONE - ((t >> G_SHIFT) & RB_MASK); \
r |= (t & RB_MASK) << G_SHIFT; \
x = r; \
x_c = min(x_c + y_c, 255)
*/
#define UN16x4_ADD_UN16x4(x, y) \
do \
{ \
uint64_t t; \
uint64_t r = (x & RB_MASK) + (y & RB_MASK); \
r |= RB_MASK_PLUS_ONE - ((r >> G_SHIFT) & RB_MASK); \
r &= RB_MASK; \
\
t = ((x >> G_SHIFT) & RB_MASK) + ((y >> G_SHIFT) & RB_MASK); \
t |= RB_MASK_PLUS_ONE - ((t >> G_SHIFT) & RB_MASK); \
r |= (t & RB_MASK) << G_SHIFT; \
x = r; \
} while (0)

Просмотреть файл

@ -0,0 +1,71 @@
/* Pixman uses some non-standard compiler features. This file ensures
* they exist
*
* The features are:
*
* FUNC must be defined to expand to the current function
* PIXMAN_EXPORT should be defined to whatever is required to
* export functions from a shared library
* limits limits for various types must be defined
* inline must be defined
* force_inline must be defined
*/
#if defined (__GNUC__)
# define FUNC ((const char*) (__PRETTY_FUNCTION__))
#elif defined (__sun) || (defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L)
# define FUNC ((const char*) (__func__))
#else
# define FUNC ((const char*) ("???"))
#endif
#ifndef INT16_MIN
# define INT16_MIN (-32767-1)
#endif
#ifndef INT16_MAX
# define INT16_MAX (32767)
#endif
#ifndef INT32_MIN
# define INT32_MIN (-2147483647-1)
#endif
#ifndef INT32_MAX
# define INT32_MAX (2147483647)
#endif
#ifndef UINT32_MIN
# define UINT32_MIN (0)
#endif
#ifndef UINT32_MAX
# define UINT32_MAX (4294967295U)
#endif
#ifndef M_PI
# define M_PI 3.14159265358979323846
#endif
#ifdef _MSC_VER
/* 'inline' is available only in C++ in MSVC */
# define inline __inline
# define force_inline __forceinline
#elif defined __GNUC__ || (defined(__SUNPRO_C) && (__SUNPRO_C >= 0x590))
# define inline __inline__
# define force_inline __inline__ __attribute__ ((__always_inline__))
#else
# ifndef force_inline
# define force_inline inline
# endif
#endif
/* GCC visibility */
#if defined(__GNUC__) && __GNUC__ >= 4
# define PIXMAN_EXPORT __attribute__ ((visibility("default")))
/* Sun Studio 8 visibility */
#elif defined(__SUNPRO_C) && (__SUNPRO_C >= 0x550)
# define PIXMAN_EXPORT __global
#else
# define PIXMAN_EXPORT
#endif

Просмотреть файл

@ -0,0 +1,184 @@
/*
* Copyright © 2000 SuSE, Inc.
* Copyright © 2007 Red Hat, Inc.
* Copyright © 2000 Keith Packard, member of The XFree86 Project, Inc.
* 2005 Lars Knoll & Zack Rusin, Trolltech
*
* Permission to use, copy, modify, distribute, and sell this software and its
* documentation for any purpose is hereby granted without fee, provided that
* the above copyright notice appear in all copies and that both that
* copyright notice and this permission notice appear in supporting
* documentation, and that the name of Keith Packard not be used in
* advertising or publicity pertaining to distribution of the software without
* specific, written prior permission. Keith Packard makes no
* representations about the suitability of this software for any purpose. It
* is provided "as is" without express or implied warranty.
*
* THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS
* SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
* FITNESS, IN NO EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY
* SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN
* AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING
* OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS
* SOFTWARE.
*/
#ifdef HAVE_CONFIG_H
#include <config.h>
#endif
#include <stdlib.h>
#include <math.h>
#include "pixman-private.h"
static void
conical_gradient_get_scanline_32 (pixman_image_t *image,
int x,
int y,
int width,
uint32_t * buffer,
const uint32_t *mask,
uint32_t mask_bits)
{
source_image_t *source = (source_image_t *)image;
gradient_t *gradient = (gradient_t *)source;
conical_gradient_t *conical = (conical_gradient_t *)image;
uint32_t *end = buffer + width;
pixman_gradient_walker_t walker;
pixman_bool_t affine = TRUE;
double cx = 1.;
double cy = 0.;
double cz = 0.;
double rx = x + 0.5;
double ry = y + 0.5;
double rz = 1.;
double a = (conical->angle * M_PI) / (180. * 65536);
_pixman_gradient_walker_init (&walker, gradient, source->common.repeat);
if (source->common.transform)
{
pixman_vector_t v;
/* reference point is the center of the pixel */
v.vector[0] = pixman_int_to_fixed (x) + pixman_fixed_1 / 2;
v.vector[1] = pixman_int_to_fixed (y) + pixman_fixed_1 / 2;
v.vector[2] = pixman_fixed_1;
if (!pixman_transform_point_3d (source->common.transform, &v))
return;
cx = source->common.transform->matrix[0][0] / 65536.;
cy = source->common.transform->matrix[1][0] / 65536.;
cz = source->common.transform->matrix[2][0] / 65536.;
rx = v.vector[0] / 65536.;
ry = v.vector[1] / 65536.;
rz = v.vector[2] / 65536.;
affine =
source->common.transform->matrix[2][0] == 0 &&
v.vector[2] == pixman_fixed_1;
}
if (affine)
{
rx -= conical->center.x / 65536.;
ry -= conical->center.y / 65536.;
while (buffer < end)
{
double angle;
if (!mask || *mask++ & mask_bits)
{
pixman_fixed_48_16_t t;
angle = atan2 (ry, rx) + a;
t = (pixman_fixed_48_16_t) (angle * (65536. / (2 * M_PI)));
*buffer = _pixman_gradient_walker_pixel (&walker, t);
}
++buffer;
rx += cx;
ry += cy;
}
}
else
{
while (buffer < end)
{
double x, y;
double angle;
if (!mask || *mask++ & mask_bits)
{
pixman_fixed_48_16_t t;
if (rz != 0)
{
x = rx / rz;
y = ry / rz;
}
else
{
x = y = 0.;
}
x -= conical->center.x / 65536.;
y -= conical->center.y / 65536.;
angle = atan2 (y, x) + a;
t = (pixman_fixed_48_16_t) (angle * (65536. / (2 * M_PI)));
*buffer = _pixman_gradient_walker_pixel (&walker, t);
}
++buffer;
rx += cx;
ry += cy;
rz += cz;
}
}
}
static void
conical_gradient_property_changed (pixman_image_t *image)
{
image->common.get_scanline_32 = conical_gradient_get_scanline_32;
image->common.get_scanline_64 = _pixman_image_get_scanline_generic_64;
}
PIXMAN_EXPORT pixman_image_t *
pixman_image_create_conical_gradient (pixman_point_fixed_t * center,
pixman_fixed_t angle,
const pixman_gradient_stop_t *stops,
int n_stops)
{
pixman_image_t *image = _pixman_image_allocate ();
conical_gradient_t *conical;
if (!image)
return NULL;
conical = &image->conical;
if (!_pixman_init_gradient (&conical->common, stops, n_stops))
{
free (image);
return NULL;
}
image->type = CONICAL;
conical->center = *center;
conical->angle = angle;
image->common.property_changed = conical_gradient_property_changed;
return image;
}

Просмотреть файл

@ -0,0 +1,585 @@
/*
* Copyright © 2000 SuSE, Inc.
* Copyright © 2007 Red Hat, Inc.
*
* Permission to use, copy, modify, distribute, and sell this software and its
* documentation for any purpose is hereby granted without fee, provided that
* the above copyright notice appear in all copies and that both that
* copyright notice and this permission notice appear in supporting
* documentation, and that the name of SuSE not be used in advertising or
* publicity pertaining to distribution of the software without specific,
* written prior permission. SuSE makes no representations about the
* suitability of this software for any purpose. It is provided "as is"
* without express or implied warranty.
*
* SuSE DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING ALL
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT SHALL SuSE
* BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
* OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifdef HAVE_CONFIG_H
#include <config.h>
#endif
#include <string.h>
#if defined(USE_ARM_SIMD) && defined(_MSC_VER)
/* Needed for EXCEPTION_ILLEGAL_INSTRUCTION */
#include <windows.h>
#endif
#include "pixman-private.h"
#ifdef USE_VMX
/* The CPU detection code needs to be in a file not compiled with
* "-maltivec -mabi=altivec", as gcc would try to save vector register
* across function calls causing SIGILL on cpus without Altivec/vmx.
*/
static pixman_bool_t initialized = FALSE;
static volatile pixman_bool_t have_vmx = TRUE;
#ifdef __APPLE__
#include <sys/sysctl.h>
static pixman_bool_t
pixman_have_vmx (void)
{
if (!initialized)
{
size_t length = sizeof(have_vmx);
int error =
sysctlbyname ("hw.optional.altivec", &have_vmx, &length, NULL, 0);
if (error)
have_vmx = FALSE;
initialized = TRUE;
}
return have_vmx;
}
#elif defined (__linux__)
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <unistd.h>
#include <stdio.h>
#include <linux/auxvec.h>
#include <asm/cputable.h>
static pixman_bool_t
pixman_have_vmx (void)
{
if (!initialized)
{
char fname[64];
unsigned long buf[64];
ssize_t count = 0;
pid_t pid;
int fd, i;
pid = getpid ();
snprintf (fname, sizeof(fname) - 1, "/proc/%d/auxv", pid);
fd = open (fname, O_RDONLY);
if (fd >= 0)
{
for (i = 0; i <= (count / sizeof(unsigned long)); i += 2)
{
/* Read more if buf is empty... */
if (i == (count / sizeof(unsigned long)))
{
count = read (fd, buf, sizeof(buf));
if (count <= 0)
break;
i = 0;
}
if (buf[i] == AT_HWCAP)
{
have_vmx = !!(buf[i + 1] & PPC_FEATURE_HAS_ALTIVEC);
initialized = TRUE;
break;
}
else if (buf[i] == AT_NULL)
{
break;
}
}
close (fd);
}
}
if (!initialized)
{
/* Something went wrong. Assume 'no' rather than playing
fragile tricks with catching SIGILL. */
have_vmx = FALSE;
initialized = TRUE;
}
return have_vmx;
}
#else /* !__APPLE__ && !__linux__ */
#include <signal.h>
#include <setjmp.h>
static jmp_buf jump_env;
static void
vmx_test (int sig,
siginfo_t *si,
void * unused)
{
longjmp (jump_env, 1);
}
static pixman_bool_t
pixman_have_vmx (void)
{
struct sigaction sa, osa;
int jmp_result;
if (!initialized)
{
sa.sa_flags = SA_SIGINFO;
sigemptyset (&sa.sa_mask);
sa.sa_sigaction = vmx_test;
sigaction (SIGILL, &sa, &osa);
jmp_result = setjmp (jump_env);
if (jmp_result == 0)
{
asm volatile ( "vor 0, 0, 0" );
}
sigaction (SIGILL, &osa, NULL);
have_vmx = (jmp_result == 0);
initialized = TRUE;
}
return have_vmx;
}
#endif /* __APPLE__ */
#endif /* USE_VMX */
#if defined(USE_ARM_SIMD) || defined(USE_ARM_NEON)
#if defined(_MSC_VER)
#if defined(USE_ARM_SIMD)
extern int pixman_msvc_try_arm_simd_op ();
pixman_bool_t
pixman_have_arm_simd (void)
{
static pixman_bool_t initialized = FALSE;
static pixman_bool_t have_arm_simd = FALSE;
if (!initialized)
{
__try {
pixman_msvc_try_arm_simd_op ();
have_arm_simd = TRUE;
} __except (GetExceptionCode () == EXCEPTION_ILLEGAL_INSTRUCTION) {
have_arm_simd = FALSE;
}
initialized = TRUE;
}
return have_arm_simd;
}
#endif /* USE_ARM_SIMD */
#if defined(USE_ARM_NEON)
extern int pixman_msvc_try_arm_neon_op ();
pixman_bool_t
pixman_have_arm_neon (void)
{
static pixman_bool_t initialized = FALSE;
static pixman_bool_t have_arm_neon = FALSE;
if (!initialized)
{
__try
{
pixman_msvc_try_arm_neon_op ();
have_arm_neon = TRUE;
}
__except (GetExceptionCode () == EXCEPTION_ILLEGAL_INSTRUCTION)
{
have_arm_neon = FALSE;
}
initialized = TRUE;
}
return have_arm_neon;
}
#endif /* USE_ARM_NEON */
#else /* linux ELF */
#include <stdlib.h>
#include <unistd.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/mman.h>
#include <fcntl.h>
#include <string.h>
#include <elf.h>
static pixman_bool_t arm_has_v7 = FALSE;
static pixman_bool_t arm_has_v6 = FALSE;
static pixman_bool_t arm_has_vfp = FALSE;
static pixman_bool_t arm_has_neon = FALSE;
static pixman_bool_t arm_has_iwmmxt = FALSE;
static pixman_bool_t arm_tests_initialized = FALSE;
static void
pixman_arm_read_auxv ()
{
int fd;
Elf32_auxv_t aux;
fd = open ("/proc/self/auxv", O_RDONLY);
if (fd >= 0)
{
while (read (fd, &aux, sizeof(Elf32_auxv_t)) == sizeof(Elf32_auxv_t))
{
if (aux.a_type == AT_HWCAP)
{
uint32_t hwcap = aux.a_un.a_val;
if (getenv ("ARM_FORCE_HWCAP"))
hwcap = strtoul (getenv ("ARM_FORCE_HWCAP"), NULL, 0);
/* hardcode these values to avoid depending on specific
* versions of the hwcap header, e.g. HWCAP_NEON
*/
arm_has_vfp = (hwcap & 64) != 0;
arm_has_iwmmxt = (hwcap & 512) != 0;
/* this flag is only present on kernel 2.6.29 */
arm_has_neon = (hwcap & 4096) != 0;
}
else if (aux.a_type == AT_PLATFORM)
{
const char *plat = (const char*) aux.a_un.a_val;
if (getenv ("ARM_FORCE_PLATFORM"))
plat = getenv ("ARM_FORCE_PLATFORM");
if (strncmp (plat, "v7l", 3) == 0)
{
arm_has_v7 = TRUE;
arm_has_v6 = TRUE;
}
else if (strncmp (plat, "v6l", 3) == 0)
{
arm_has_v6 = TRUE;
}
}
}
close (fd);
/* if we don't have 2.6.29, we have to do this hack; set
* the env var to trust HWCAP.
*/
if (!getenv ("ARM_TRUST_HWCAP") && arm_has_v7)
arm_has_neon = TRUE;
}
arm_tests_initialized = TRUE;
}
#if defined(USE_ARM_SIMD)
pixman_bool_t
pixman_have_arm_simd (void)
{
if (!arm_tests_initialized)
pixman_arm_read_auxv ();
return arm_has_v6;
}
#endif /* USE_ARM_SIMD */
#if defined(USE_ARM_NEON)
pixman_bool_t
pixman_have_arm_neon (void)
{
if (!arm_tests_initialized)
pixman_arm_read_auxv ();
return arm_has_neon;
}
#endif /* USE_ARM_NEON */
#endif /* linux */
#endif /* USE_ARM_SIMD || USE_ARM_NEON */
#ifdef USE_MMX
/* The CPU detection code needs to be in a file not compiled with
* "-mmmx -msse", as gcc would generate CMOV instructions otherwise
* that would lead to SIGILL instructions on old CPUs that don't have
* it.
*/
#if !defined(__amd64__) && !defined(__x86_64__) && !defined(_M_AMD64)
#ifdef HAVE_GETISAX
#include <sys/auxv.h>
#endif
typedef enum
{
NO_FEATURES = 0,
MMX = 0x1,
MMX_EXTENSIONS = 0x2,
SSE = 0x6,
SSE2 = 0x8,
CMOV = 0x10
} cpu_features_t;
static unsigned int
detect_cpu_features (void)
{
unsigned int features = 0;
unsigned int result = 0;
#ifdef HAVE_GETISAX
if (getisax (&result, 1))
{
if (result & AV_386_CMOV)
features |= CMOV;
if (result & AV_386_MMX)
features |= MMX;
if (result & AV_386_AMD_MMX)
features |= MMX_EXTENSIONS;
if (result & AV_386_SSE)
features |= SSE;
if (result & AV_386_SSE2)
features |= SSE2;
}
#else
char vendor[13];
#ifdef _MSC_VER
int vendor0 = 0, vendor1, vendor2;
#endif
vendor[0] = 0;
vendor[12] = 0;
#ifdef __GNUC__
/* see p. 118 of amd64 instruction set manual Vol3 */
/* We need to be careful about the handling of %ebx and
* %esp here. We can't declare either one as clobbered
* since they are special registers (%ebx is the "PIC
* register" holding an offset to global data, %esp the
* stack pointer), so we need to make sure they have their
* original values when we access the output operands.
*/
__asm__ (
"pushf\n"
"pop %%eax\n"
"mov %%eax, %%ecx\n"
"xor $0x00200000, %%eax\n"
"push %%eax\n"
"popf\n"
"pushf\n"
"pop %%eax\n"
"mov $0x0, %%edx\n"
"xor %%ecx, %%eax\n"
"jz 1f\n"
"mov $0x00000000, %%eax\n"
"push %%ebx\n"
"cpuid\n"
"mov %%ebx, %%eax\n"
"pop %%ebx\n"
"mov %%eax, %1\n"
"mov %%edx, %2\n"
"mov %%ecx, %3\n"
"mov $0x00000001, %%eax\n"
"push %%ebx\n"
"cpuid\n"
"pop %%ebx\n"
"1:\n"
"mov %%edx, %0\n"
: "=r" (result),
"=m" (vendor[0]),
"=m" (vendor[4]),
"=m" (vendor[8])
:
: "%eax", "%ecx", "%edx"
);
#elif defined (_MSC_VER)
_asm {
pushfd
pop eax
mov ecx, eax
xor eax, 00200000h
push eax
popfd
pushfd
pop eax
mov edx, 0
xor eax, ecx
jz nocpuid
mov eax, 0
push ebx
cpuid
mov eax, ebx
pop ebx
mov vendor0, eax
mov vendor1, edx
mov vendor2, ecx
mov eax, 1
push ebx
cpuid
pop ebx
nocpuid:
mov result, edx
}
memmove (vendor + 0, &vendor0, 4);
memmove (vendor + 4, &vendor1, 4);
memmove (vendor + 8, &vendor2, 4);
#else
# error unsupported compiler
#endif
features = 0;
if (result)
{
/* result now contains the standard feature bits */
if (result & (1 << 15))
features |= CMOV;
if (result & (1 << 23))
features |= MMX;
if (result & (1 << 25))
features |= SSE;
if (result & (1 << 26))
features |= SSE2;
if ((features & MMX) && !(features & SSE) &&
(strcmp (vendor, "AuthenticAMD") == 0 ||
strcmp (vendor, "Geode by NSC") == 0))
{
/* check for AMD MMX extensions */
#ifdef __GNUC__
__asm__ (
" push %%ebx\n"
" mov $0x80000000, %%eax\n"
" cpuid\n"
" xor %%edx, %%edx\n"
" cmp $0x1, %%eax\n"
" jge 2f\n"
" mov $0x80000001, %%eax\n"
" cpuid\n"
"2:\n"
" pop %%ebx\n"
" mov %%edx, %0\n"
: "=r" (result)
:
: "%eax", "%ecx", "%edx"
);
#elif defined _MSC_VER
_asm {
push ebx
mov eax, 80000000h
cpuid
xor edx, edx
cmp eax, 1
jge notamd
mov eax, 80000001h
cpuid
notamd:
pop ebx
mov result, edx
}
#endif
if (result & (1 << 22))
features |= MMX_EXTENSIONS;
}
}
#endif /* HAVE_GETISAX */
return features;
}
static pixman_bool_t
pixman_have_mmx (void)
{
static pixman_bool_t initialized = FALSE;
static pixman_bool_t mmx_present;
if (!initialized)
{
unsigned int features = detect_cpu_features ();
mmx_present = (features & (MMX | MMX_EXTENSIONS)) == (MMX | MMX_EXTENSIONS);
initialized = TRUE;
}
return mmx_present;
}
#ifdef USE_SSE2
static pixman_bool_t
pixman_have_sse2 (void)
{
static pixman_bool_t initialized = FALSE;
static pixman_bool_t sse2_present;
if (!initialized)
{
unsigned int features = detect_cpu_features ();
sse2_present = (features & (MMX | MMX_EXTENSIONS | SSE | SSE2)) == (MMX | MMX_EXTENSIONS | SSE | SSE2);
initialized = TRUE;
}
return sse2_present;
}
#endif
#else /* __amd64__ */
#ifdef USE_MMX
#define pixman_have_mmx() TRUE
#endif
#ifdef USE_SSE2
#define pixman_have_sse2() TRUE
#endif
#endif /* __amd64__ */
#endif
pixman_implementation_t *
_pixman_choose_implementation (void)
{
#ifdef USE_SSE2
if (pixman_have_sse2 ())
return _pixman_implementation_create_sse2 ();
#endif
#ifdef USE_MMX
if (pixman_have_mmx ())
return _pixman_implementation_create_mmx ();
#endif
#ifdef USE_ARM_NEON
if (pixman_have_arm_neon ())
return _pixman_implementation_create_arm_neon ();
#endif
#ifdef USE_ARM_SIMD
if (pixman_have_arm_simd ())
return _pixman_implementation_create_arm_simd ();
#endif
#ifdef USE_VMX
if (pixman_have_vmx ())
return _pixman_implementation_create_vmx ();
#endif
return _pixman_implementation_create_fast_path ();
}

Просмотреть файл

@ -20,11 +20,11 @@
* PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef rasterizeSpan
#ifndef rasterize_span
#endif
static void
rasterizeEdges (pixman_image_t *image,
RASTERIZE_EDGES (pixman_image_t *image,
pixman_edge_t *l,
pixman_edge_t *r,
pixman_fixed_t t,
@ -50,7 +50,7 @@ rasterizeEdges (pixman_image_t *image,
#if N_BITS == 1
/* For the non-antialiased case, round the coordinates up, in effect
* sampling the center of the pixel. (The AA case does a similar
* adjustment in RenderSamplesX) */
* adjustment in RENDER_SAMPLES_X) */
lx += X_FRAC_FIRST(1);
rx += X_FRAC_FIRST(1);
#endif
@ -78,53 +78,85 @@ rasterizeEdges (pixman_image_t *image,
#if N_BITS == 1
{
#ifdef WORDS_BIGENDIAN
# define SCREEN_SHIFT_LEFT(x,n) ((x) << (n))
# define SCREEN_SHIFT_RIGHT(x,n) ((x) >> (n))
#else
# define SCREEN_SHIFT_LEFT(x,n) ((x) >> (n))
# define SCREEN_SHIFT_RIGHT(x,n) ((x) << (n))
#endif
#define LEFT_MASK(x) \
(((x) & 0x1f) ? \
SCREEN_SHIFT_RIGHT (0xffffffff, (x) & 0x1f) : 0)
#define RIGHT_MASK(x) \
(((32 - (x)) & 0x1f) ? \
SCREEN_SHIFT_LEFT (0xffffffff, (32 - (x)) & 0x1f) : 0)
#define MASK_BITS(x,w,l,n,r) { \
n = (w); \
r = RIGHT_MASK ((x) + n); \
l = LEFT_MASK (x); \
if (l) { \
n -= 32 - ((x) & 0x1f); \
if (n < 0) { \
n = 0; \
l &= r; \
r = 0; \
} \
} \
n >>= 5; \
}
uint32_t *a = line;
uint32_t startmask;
uint32_t endmask;
int nmiddle;
int width = rxi - lxi;
int x = lxi;
a += x >> 5;
x &= 0x1f;
MASK_BITS (x, width, startmask, nmiddle, endmask);
a += x >> FB_SHIFT;
x &= FB_MASK;
FbMaskBits (x, width, startmask, nmiddle, endmask);
if (startmask) {
WRITE(image, a, READ(image, a) | startmask);
a++;
}
while (nmiddle--)
WRITE(image, a++, FB_ALLONES);
if (endmask)
WRITE(image, a, READ(image, a) | endmask);
if (startmask) {
WRITE(image, a, READ(image, a) | startmask);
a++;
}
while (nmiddle--)
WRITE(image, a++, 0xffffffff);
if (endmask)
WRITE(image, a, READ(image, a) | endmask);
}
#else
{
DefineAlpha(line,lxi);
DEFINE_ALPHA(line,lxi);
int lxs;
int rxs;
/* Sample coverage for edge pixels */
lxs = RenderSamplesX (lx, N_BITS);
rxs = RenderSamplesX (rx, N_BITS);
lxs = RENDER_SAMPLES_X (lx, N_BITS);
rxs = RENDER_SAMPLES_X (rx, N_BITS);
/* Add coverage across row */
if (lxi == rxi)
{
AddAlpha (rxs - lxs);
ADD_ALPHA (rxs - lxs);
}
else
{
int xi;
AddAlpha (N_X_FRAC(N_BITS) - lxs);
StepAlpha;
ADD_ALPHA (N_X_FRAC(N_BITS) - lxs);
STEP_ALPHA;
for (xi = lxi + 1; xi < rxi; xi++)
{
AddAlpha (N_X_FRAC(N_BITS));
StepAlpha;
ADD_ALPHA (N_X_FRAC(N_BITS));
STEP_ALPHA;
}
AddAlpha (rxs);
ADD_ALPHA (rxs);
}
}
#endif
@ -136,19 +168,19 @@ rasterizeEdges (pixman_image_t *image,
#if N_BITS > 1
if (pixman_fixed_frac (y) != Y_FRAC_LAST(N_BITS))
{
RenderEdgeStepSmall (l);
RenderEdgeStepSmall (r);
RENDER_EDGE_STEP_SMALL (l);
RENDER_EDGE_STEP_SMALL (r);
y += STEP_Y_SMALL(N_BITS);
}
else
#endif
{
RenderEdgeStepBig (l);
RenderEdgeStepBig (r);
RENDER_EDGE_STEP_BIG (l);
RENDER_EDGE_STEP_BIG (r);
y += STEP_Y_BIG(N_BITS);
line += stride;
}
}
}
#undef rasterizeSpan
#undef rasterize_span

Просмотреть файл

@ -27,6 +27,35 @@
#include <string.h>
#include "pixman-private.h"
#include "pixman-accessor.h"
/*
* Step across a small sample grid gap
*/
#define RENDER_EDGE_STEP_SMALL(edge) \
{ \
edge->x += edge->stepx_small; \
edge->e += edge->dx_small; \
if (edge->e > 0) \
{ \
edge->e -= edge->dy; \
edge->x += edge->signdx; \
} \
}
/*
* Step across a large sample grid gap
*/
#define RENDER_EDGE_STEP_BIG(edge) \
{ \
edge->x += edge->stepx_big; \
edge->e += edge->dx_big; \
if (edge->e > 0) \
{ \
edge->e -= edge->dy; \
edge->x += edge->signdx; \
} \
}
#ifdef PIXMAN_FB_ACCESSORS
#define PIXMAN_RASTERIZE_EDGES pixman_rasterize_edges_accessors
@ -38,36 +67,38 @@
* 4 bit alpha
*/
#define N_BITS 4
#define rasterizeEdges fbRasterizeEdges4
#define N_BITS 4
#define RASTERIZE_EDGES rasterize_edges_4
#if BITMAP_BIT_ORDER == LSBFirst
#define Shift4(o) ((o) << 2)
#ifndef WORDS_BIG_ENDIAN
#define SHIFT_4(o) ((o) << 2)
#else
#define Shift4(o) ((1-(o)) << 2)
#define SHIFT_4(o) ((1 - (o)) << 2)
#endif
#define Get4(x,o) (((x) >> Shift4(o)) & 0xf)
#define Put4(x,o,v) (((x) & ~(0xf << Shift4(o))) | (((v) & 0xf) << Shift4(o)))
#define GET_4(x, o) (((x) >> SHIFT_4 (o)) & 0xf)
#define PUT_4(x, o, v) \
(((x) & ~(0xf << SHIFT_4 (o))) | (((v) & 0xf) << SHIFT_4 (o)))
#define DefineAlpha(line,x) \
uint8_t *__ap = (uint8_t *) line + ((x) >> 1); \
int __ao = (x) & 1
#define DEFINE_ALPHA(line, x) \
uint8_t *__ap = (uint8_t *) line + ((x) >> 1); \
int __ao = (x) & 1
#define StepAlpha ((__ap += __ao), (__ao ^= 1))
#define STEP_ALPHA ((__ap += __ao), (__ao ^= 1))
#define AddAlpha(a) { \
uint8_t __o = READ(image, __ap); \
uint8_t __a = (a) + Get4(__o, __ao); \
WRITE(image, __ap, Put4 (__o, __ao, __a | (0 - ((__a) >> 4)))); \
#define ADD_ALPHA(a) \
{ \
uint8_t __o = READ (image, __ap); \
uint8_t __a = (a) + GET_4 (__o, __ao); \
WRITE (image, __ap, PUT_4 (__o, __ao, __a | (0 - ((__a) >> 4)))); \
}
#include "pixman-edge-imp.h"
#undef AddAlpha
#undef StepAlpha
#undef DefineAlpha
#undef rasterizeEdges
#undef ADD_ALPHA
#undef STEP_ALPHA
#undef DEFINE_ALPHA
#undef RASTERIZE_EDGES
#undef N_BITS
@ -76,35 +107,38 @@
*/
#define N_BITS 1
#define rasterizeEdges fbRasterizeEdges1
#define RASTERIZE_EDGES rasterize_edges_1
#include "pixman-edge-imp.h"
#undef rasterizeEdges
#undef RASTERIZE_EDGES
#undef N_BITS
/*
* 8 bit alpha
*/
static inline uint8_t
static force_inline uint8_t
clip255 (int x)
{
if (x > 255) return 255;
if (x > 255)
return 255;
return x;
}
#define add_saturate_8(buf,val,length) \
do { \
int i__ = (length); \
uint8_t *buf__ = (buf); \
int val__ = (val); \
\
while (i__--) \
{ \
WRITE(image, (buf__), clip255 (READ(image, (buf__)) + (val__))); \
(buf__)++; \
} \
#define ADD_SATURATE_8(buf, val, length) \
do \
{ \
int i__ = (length); \
uint8_t *buf__ = (buf); \
int val__ = (val); \
\
while (i__--) \
{ \
WRITE (image, (buf__), clip255 (READ (image, (buf__)) + (val__))); \
(buf__)++; \
} \
} while (0)
/*
@ -119,13 +153,13 @@ clip255 (int x)
* fill_start fill_end
*/
static void
fbRasterizeEdges8 (pixman_image_t *image,
pixman_edge_t *l,
pixman_edge_t *r,
pixman_fixed_t t,
pixman_fixed_t b)
rasterize_edges_8 (pixman_image_t *image,
pixman_edge_t * l,
pixman_edge_t * r,
pixman_fixed_t t,
pixman_fixed_t b)
{
pixman_fixed_t y = t;
pixman_fixed_t y = t;
uint32_t *line;
int fill_start = -1, fill_end = -1;
int fill_size = 0;
@ -138,153 +172,165 @@ fbRasterizeEdges8 (pixman_image_t *image,
for (;;)
{
uint8_t *ap = (uint8_t *) line;
pixman_fixed_t lx, rx;
int lxi, rxi;
pixman_fixed_t lx, rx;
int lxi, rxi;
/* clip X */
lx = l->x;
if (lx < 0)
/* clip X */
lx = l->x;
if (lx < 0)
lx = 0;
rx = r->x;
if (pixman_fixed_to_int (rx) >= width)
rx = r->x;
if (pixman_fixed_to_int (rx) >= width)
{
/* Use the last pixel of the scanline, covered 100%.
* We can't use the first pixel following the scanline,
* because accessing it could result in a buffer overrun.
*/
rx = pixman_int_to_fixed (width) - 1;
}
/* Skip empty (or backwards) sections */
if (rx > lx)
{
/* Skip empty (or backwards) sections */
if (rx > lx)
{
int lxs, rxs;
/* Find pixel bounds for span. */
lxi = pixman_fixed_to_int (lx);
rxi = pixman_fixed_to_int (rx);
/* Find pixel bounds for span. */
lxi = pixman_fixed_to_int (lx);
rxi = pixman_fixed_to_int (rx);
/* Sample coverage for edge pixels */
lxs = RenderSamplesX (lx, 8);
rxs = RenderSamplesX (rx, 8);
lxs = RENDER_SAMPLES_X (lx, 8);
rxs = RENDER_SAMPLES_X (rx, 8);
/* Add coverage across row */
if (lxi == rxi)
{
WRITE(image, ap +lxi, clip255 (READ(image, ap + lxi) + rxs - lxs));
if (lxi == rxi)
{
WRITE (image, ap + lxi,
clip255 (READ (image, ap + lxi) + rxs - lxs));
}
else
{
WRITE(image, ap + lxi, clip255 (READ(image, ap + lxi) + N_X_FRAC(8) - lxs));
else
{
WRITE (image, ap + lxi,
clip255 (READ (image, ap + lxi) + N_X_FRAC (8) - lxs));
/* Move forward so that lxi/rxi is the pixel span */
lxi++;
/* Move forward so that lxi/rxi is the pixel span */
lxi++;
/* Don't bother trying to optimize the fill unless
/* Don't bother trying to optimize the fill unless
* the span is longer than 4 pixels. */
if (rxi - lxi > 4)
{
if (fill_start < 0)
{
fill_start = lxi;
fill_end = rxi;
fill_size++;
if (rxi - lxi > 4)
{
if (fill_start < 0)
{
fill_start = lxi;
fill_end = rxi;
fill_size++;
}
else
{
if (lxi >= fill_end || rxi < fill_start)
{
/* We're beyond what we saved, just fill it */
add_saturate_8 (ap + fill_start,
fill_size * N_X_FRAC(8),
fill_end - fill_start);
fill_start = lxi;
fill_end = rxi;
fill_size = 1;
else
{
if (lxi >= fill_end || rxi < fill_start)
{
/* We're beyond what we saved, just fill it */
ADD_SATURATE_8 (ap + fill_start,
fill_size * N_X_FRAC (8),
fill_end - fill_start);
fill_start = lxi;
fill_end = rxi;
fill_size = 1;
}
else
{
/* Update fill_start */
if (lxi > fill_start)
{
add_saturate_8 (ap + fill_start,
fill_size * N_X_FRAC(8),
lxi - fill_start);
fill_start = lxi;
else
{
/* Update fill_start */
if (lxi > fill_start)
{
ADD_SATURATE_8 (ap + fill_start,
fill_size * N_X_FRAC (8),
lxi - fill_start);
fill_start = lxi;
}
else if (lxi < fill_start)
{
add_saturate_8 (ap + lxi, N_X_FRAC(8),
fill_start - lxi);
else if (lxi < fill_start)
{
ADD_SATURATE_8 (ap + lxi, N_X_FRAC (8),
fill_start - lxi);
}
/* Update fill_end */
if (rxi < fill_end)
{
add_saturate_8 (ap + rxi,
fill_size * N_X_FRAC(8),
fill_end - rxi);
fill_end = rxi;
/* Update fill_end */
if (rxi < fill_end)
{
ADD_SATURATE_8 (ap + rxi,
fill_size * N_X_FRAC (8),
fill_end - rxi);
fill_end = rxi;
}
else if (fill_end < rxi)
{
add_saturate_8 (ap + fill_end,
N_X_FRAC(8),
rxi - fill_end);
else if (fill_end < rxi)
{
ADD_SATURATE_8 (ap + fill_end,
N_X_FRAC (8),
rxi - fill_end);
}
fill_size++;
fill_size++;
}
}
}
else
{
add_saturate_8 (ap + lxi, N_X_FRAC(8), rxi - lxi);
else
{
ADD_SATURATE_8 (ap + lxi, N_X_FRAC (8), rxi - lxi);
}
WRITE(image, ap + rxi, clip255 (READ(image, ap + rxi) + rxs));
WRITE (image, ap + rxi, clip255 (READ (image, ap + rxi) + rxs));
}
}
if (y == b) {
if (y == b)
{
/* We're done, make sure we clean up any remaining fill. */
if (fill_start != fill_end) {
if (fill_size == N_Y_FRAC(8))
{
MEMSET_WRAPPED (image, ap + fill_start, 0xff, fill_end - fill_start);
}
else
{
add_saturate_8 (ap + fill_start, fill_size * N_X_FRAC(8),
fill_end - fill_start);
}
}
break;
}
if (pixman_fixed_frac (y) != Y_FRAC_LAST(8))
{
RenderEdgeStepSmall (l);
RenderEdgeStepSmall (r);
y += STEP_Y_SMALL(8);
}
else
{
RenderEdgeStepBig (l);
RenderEdgeStepBig (r);
y += STEP_Y_BIG(8);
if (fill_start != fill_end)
{
if (fill_size == N_Y_FRAC(8))
{
MEMSET_WRAPPED (image, ap + fill_start, 0xff, fill_end - fill_start);
if (fill_size == N_Y_FRAC (8))
{
MEMSET_WRAPPED (image, ap + fill_start,
0xff, fill_end - fill_start);
}
else
{
add_saturate_8 (ap + fill_start, fill_size * N_X_FRAC(8),
fill_end - fill_start);
else
{
ADD_SATURATE_8 (ap + fill_start, fill_size * N_X_FRAC (8),
fill_end - fill_start);
}
}
break;
}
if (pixman_fixed_frac (y) != Y_FRAC_LAST (8))
{
RENDER_EDGE_STEP_SMALL (l);
RENDER_EDGE_STEP_SMALL (r);
y += STEP_Y_SMALL (8);
}
else
{
RENDER_EDGE_STEP_BIG (l);
RENDER_EDGE_STEP_BIG (r);
y += STEP_Y_BIG (8);
if (fill_start != fill_end)
{
if (fill_size == N_Y_FRAC (8))
{
MEMSET_WRAPPED (image, ap + fill_start,
0xff, fill_end - fill_start);
}
else
{
ADD_SATURATE_8 (ap + fill_start, fill_size * N_X_FRAC (8),
fill_end - fill_start);
}
fill_start = fill_end = -1;
fill_size = 0;
}
line += stride;
}
line += stride;
}
}
}
@ -294,22 +340,27 @@ static
#endif
void
PIXMAN_RASTERIZE_EDGES (pixman_image_t *image,
pixman_edge_t *l,
pixman_edge_t *r,
pixman_fixed_t t,
pixman_fixed_t b)
pixman_edge_t * l,
pixman_edge_t * r,
pixman_fixed_t t,
pixman_fixed_t b)
{
switch (PIXMAN_FORMAT_BPP (image->bits.format))
{
case 1:
fbRasterizeEdges1 (image, l, r, t, b);
rasterize_edges_1 (image, l, r, t, b);
break;
case 4:
fbRasterizeEdges4 (image, l, r, t, b);
rasterize_edges_4 (image, l, r, t, b);
break;
case 8:
fbRasterizeEdges8 (image, l, r, t, b);
rasterize_edges_8 (image, l, r, t, b);
break;
default:
break;
}
}
@ -317,12 +368,14 @@ PIXMAN_RASTERIZE_EDGES (pixman_image_t *image,
PIXMAN_EXPORT void
pixman_rasterize_edges (pixman_image_t *image,
pixman_edge_t *l,
pixman_edge_t *r,
pixman_fixed_t t,
pixman_fixed_t b)
pixman_edge_t * l,
pixman_edge_t * r,
pixman_fixed_t t,
pixman_fixed_t b)
{
if (image->common.read_func || image->common.write_func)
return_if_fail (image->type == BITS);
if (image->bits.read_func || image->bits.write_func)
pixman_rasterize_edges_accessors (image, l, r, t, b);
else
pixman_rasterize_edges_no_accessors (image, l, r, t, b);

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -0,0 +1,336 @@
/*
* Copyright © 2009 Red Hat, Inc.
* Copyright © 2000 SuSE, Inc.
* Copyright © 2007 Red Hat, Inc.
* Copyright © 2000 Keith Packard, member of The XFree86 Project, Inc.
* 2005 Lars Knoll & Zack Rusin, Trolltech
* 2008 Aaron Plattner, NVIDIA Corporation
*
* Permission to use, copy, modify, distribute, and sell this software and its
* documentation for any purpose is hereby granted without fee, provided that
* the above copyright notice appear in all copies and that both that
* copyright notice and this permission notice appear in supporting
* documentation, and that the name of Red Hat not be used in advertising or
* publicity pertaining to distribution of the software without specific,
* written prior permission. Red Hat makes no representations about the
* suitability of this software for any purpose. It is provided "as is"
* without express or implied warranty.
*
* THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS
* SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
* FITNESS, IN NO EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY
* SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN
* AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING
* OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS
* SOFTWARE.
*/
#ifdef HAVE_CONFIG_H
#include <config.h>
#endif
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <limits.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "pixman-private.h"
#include "pixman-combine32.h"
#include "pixman-private.h"
#define SCANLINE_BUFFER_LENGTH 8192
static void
general_composite_rect (pixman_implementation_t *imp,
pixman_op_t op,
pixman_image_t * src,
pixman_image_t * mask,
pixman_image_t * dest,
int32_t src_x,
int32_t src_y,
int32_t mask_x,
int32_t mask_y,
int32_t dest_x,
int32_t dest_y,
int32_t width,
int32_t height)
{
uint8_t stack_scanline_buffer[SCANLINE_BUFFER_LENGTH * 3];
const pixman_format_code_t src_format =
src->type == BITS ? src->bits.format : 0;
const pixman_format_code_t mask_format =
mask && mask->type == BITS ? mask->bits.format : 0;
const pixman_format_code_t dest_format =
dest->type == BITS ? dest->bits.format : 0;
const int src_wide = PIXMAN_FORMAT_IS_WIDE (src_format);
const int mask_wide = mask && PIXMAN_FORMAT_IS_WIDE (mask_format);
const int dest_wide = PIXMAN_FORMAT_IS_WIDE (dest_format);
const int wide = src_wide || mask_wide || dest_wide;
const int Bpp = wide ? 8 : 4;
uint8_t *scanline_buffer = stack_scanline_buffer;
uint8_t *src_buffer, *mask_buffer, *dest_buffer;
fetch_scanline_t fetch_src = NULL, fetch_mask = NULL, fetch_dest = NULL;
pixman_combine_32_func_t compose;
store_scanline_t store;
source_image_class_t src_class, mask_class;
pixman_bool_t component_alpha;
uint32_t *bits;
int32_t stride;
int i;
if (width * Bpp > SCANLINE_BUFFER_LENGTH)
{
scanline_buffer = pixman_malloc_abc (width, 3, Bpp);
if (!scanline_buffer)
return;
}
src_buffer = scanline_buffer;
mask_buffer = src_buffer + width * Bpp;
dest_buffer = mask_buffer + width * Bpp;
src_class = _pixman_image_classify (src,
src_x, src_y,
width, height);
mask_class = SOURCE_IMAGE_CLASS_UNKNOWN;
if (mask)
{
mask_class = _pixman_image_classify (mask,
src_x, src_y,
width, height);
}
if (op == PIXMAN_OP_CLEAR)
fetch_src = NULL;
else if (wide)
fetch_src = _pixman_image_get_scanline_64;
else
fetch_src = _pixman_image_get_scanline_32;
if (!mask || op == PIXMAN_OP_CLEAR)
fetch_mask = NULL;
else if (wide)
fetch_mask = _pixman_image_get_scanline_64;
else
fetch_mask = _pixman_image_get_scanline_32;
if (op == PIXMAN_OP_CLEAR || op == PIXMAN_OP_SRC)
fetch_dest = NULL;
else if (wide)
fetch_dest = _pixman_image_get_scanline_64;
else
fetch_dest = _pixman_image_get_scanline_32;
if (wide)
store = _pixman_image_store_scanline_64;
else
store = _pixman_image_store_scanline_32;
/* Skip the store step and composite directly into the
* destination if the output format of the compose func matches
* the destination format.
*
* If the destination format is a8r8g8b8 then we can always do
* this. If it is x8r8g8b8, then we can only do it if the
* operator doesn't make use of destination alpha.
*/
if ((dest->bits.format == PIXMAN_a8r8g8b8) ||
(dest->bits.format == PIXMAN_x8r8g8b8 &&
(op == PIXMAN_OP_OVER ||
op == PIXMAN_OP_ADD ||
op == PIXMAN_OP_SRC ||
op == PIXMAN_OP_CLEAR ||
op == PIXMAN_OP_IN_REVERSE ||
op == PIXMAN_OP_OUT_REVERSE ||
op == PIXMAN_OP_DST)))
{
if (!wide &&
!dest->common.alpha_map &&
!dest->bits.write_func)
{
store = NULL;
}
}
if (!store)
{
bits = dest->bits.bits;
stride = dest->bits.rowstride;
}
else
{
bits = NULL;
stride = 0;
}
component_alpha =
fetch_src &&
fetch_mask &&
mask &&
mask->common.type == BITS &&
mask->common.component_alpha &&
PIXMAN_FORMAT_RGB (mask->bits.format);
if (wide)
{
if (component_alpha)
compose = (pixman_combine_32_func_t)_pixman_implementation_combine_64_ca;
else
compose = (pixman_combine_32_func_t)_pixman_implementation_combine_64;
}
else
{
if (component_alpha)
compose = _pixman_implementation_combine_32_ca;
else
compose = _pixman_implementation_combine_32;
}
if (!compose)
return;
if (!fetch_mask)
mask_buffer = NULL;
for (i = 0; i < height; ++i)
{
/* fill first half of scanline with source */
if (fetch_src)
{
if (fetch_mask)
{
/* fetch mask before source so that fetching of
source can be optimized */
fetch_mask (mask, mask_x, mask_y + i,
width, (void *)mask_buffer, 0, 0);
if (mask_class == SOURCE_IMAGE_CLASS_HORIZONTAL)
fetch_mask = NULL;
}
if (src_class == SOURCE_IMAGE_CLASS_HORIZONTAL)
{
fetch_src (src, src_x, src_y + i,
width, (void *)src_buffer, 0, 0);
fetch_src = NULL;
}
else
{
fetch_src (src, src_x, src_y + i,
width, (void *)src_buffer, (void *)mask_buffer,
0xffffffff);
}
}
else if (fetch_mask)
{
fetch_mask (mask, mask_x, mask_y + i,
width, (void *)mask_buffer, 0, 0);
}
if (store)
{
/* fill dest into second half of scanline */
if (fetch_dest)
{
fetch_dest (dest, dest_x, dest_y + i,
width, (void *)dest_buffer, 0, 0);
}
/* blend */
compose (imp->toplevel, op,
(void *)dest_buffer,
(void *)src_buffer,
(void *)mask_buffer,
width);
/* write back */
store (&(dest->bits), dest_x, dest_y + i, width,
(void *)dest_buffer);
}
else
{
/* blend */
compose (imp->toplevel, op,
bits + (dest_y + i) * stride + dest_x,
(void *)src_buffer, (void *)mask_buffer, width);
}
}
if (scanline_buffer != stack_scanline_buffer)
free (scanline_buffer);
}
static void
general_composite (pixman_implementation_t * imp,
pixman_op_t op,
pixman_image_t * src,
pixman_image_t * mask,
pixman_image_t * dest,
int32_t src_x,
int32_t src_y,
int32_t mask_x,
int32_t mask_y,
int32_t dest_x,
int32_t dest_y,
int32_t width,
int32_t height)
{
_pixman_walk_composite_region (imp, op, src, mask, dest, src_x, src_y,
mask_x, mask_y, dest_x, dest_y,
width, height,
general_composite_rect);
}
static pixman_bool_t
general_blt (pixman_implementation_t *imp,
uint32_t * src_bits,
uint32_t * dst_bits,
int src_stride,
int dst_stride,
int src_bpp,
int dst_bpp,
int src_x,
int src_y,
int dst_x,
int dst_y,
int width,
int height)
{
/* We can't blit unless we have sse2 or mmx */
return FALSE;
}
static pixman_bool_t
general_fill (pixman_implementation_t *imp,
uint32_t * bits,
int stride,
int bpp,
int x,
int y,
int width,
int height,
uint32_t xor)
{
return FALSE;
}
pixman_implementation_t *
_pixman_implementation_create_general (void)
{
pixman_implementation_t *imp = _pixman_implementation_create (NULL);
_pixman_setup_combiner_functions_32 (imp);
_pixman_setup_combiner_functions_64 (imp);
imp->composite = general_composite;
imp->blt = general_blt;
imp->fill = general_fill;
return imp;
}

Просмотреть файл

@ -0,0 +1,254 @@
/*
*
* Copyright © 2000 Keith Packard, member of The XFree86 Project, Inc.
* 2005 Lars Knoll & Zack Rusin, Trolltech
*
* Permission to use, copy, modify, distribute, and sell this software and its
* documentation for any purpose is hereby granted without fee, provided that
* the above copyright notice appear in all copies and that both that
* copyright notice and this permission notice appear in supporting
* documentation, and that the name of Keith Packard not be used in
* advertising or publicity pertaining to distribution of the software without
* specific, written prior permission. Keith Packard makes no
* representations about the suitability of this software for any purpose. It
* is provided "as is" without express or implied warranty.
*
* THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS
* SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
* FITNESS, IN NO EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY
* SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN
* AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING
* OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS
* SOFTWARE.
*/
#ifdef HAVE_CONFIG_H
#include <config.h>
#endif
#include "pixman-private.h"
void
_pixman_gradient_walker_init (pixman_gradient_walker_t *walker,
gradient_t * gradient,
unsigned int spread)
{
walker->num_stops = gradient->n_stops;
walker->stops = gradient->stops;
walker->left_x = 0;
walker->right_x = 0x10000;
walker->stepper = 0;
walker->left_ag = 0;
walker->left_rb = 0;
walker->right_ag = 0;
walker->right_rb = 0;
walker->spread = spread;
walker->need_reset = TRUE;
}
void
_pixman_gradient_walker_reset (pixman_gradient_walker_t *walker,
pixman_fixed_32_32_t pos)
{
int32_t x, left_x, right_x;
pixman_color_t *left_c, *right_c;
int n, count = walker->num_stops;
pixman_gradient_stop_t * stops = walker->stops;
static const pixman_color_t transparent_black = { 0, 0, 0, 0 };
switch (walker->spread)
{
case PIXMAN_REPEAT_NORMAL:
x = (int32_t)pos & 0xFFFF;
for (n = 0; n < count; n++)
if (x < stops[n].x)
break;
if (n == 0)
{
left_x = stops[count - 1].x - 0x10000;
left_c = &stops[count - 1].color;
}
else
{
left_x = stops[n - 1].x;
left_c = &stops[n - 1].color;
}
if (n == count)
{
right_x = stops[0].x + 0x10000;
right_c = &stops[0].color;
}
else
{
right_x = stops[n].x;
right_c = &stops[n].color;
}
left_x += (pos - x);
right_x += (pos - x);
break;
case PIXMAN_REPEAT_PAD:
for (n = 0; n < count; n++)
if (pos < stops[n].x)
break;
if (n == 0)
{
left_x = INT32_MIN;
left_c = &stops[0].color;
}
else
{
left_x = stops[n - 1].x;
left_c = &stops[n - 1].color;
}
if (n == count)
{
right_x = INT32_MAX;
right_c = &stops[n - 1].color;
}
else
{
right_x = stops[n].x;
right_c = &stops[n].color;
}
break;
case PIXMAN_REPEAT_REFLECT:
x = (int32_t)pos & 0xFFFF;
if ((int32_t)pos & 0x10000)
x = 0x10000 - x;
for (n = 0; n < count; n++)
if (x < stops[n].x)
break;
if (n == 0)
{
left_x = -stops[0].x;
left_c = &stops[0].color;
}
else
{
left_x = stops[n - 1].x;
left_c = &stops[n - 1].color;
}
if (n == count)
{
right_x = 0x20000 - stops[n - 1].x;
right_c = &stops[n - 1].color;
}
else
{
right_x = stops[n].x;
right_c = &stops[n].color;
}
if ((int32_t)pos & 0x10000)
{
pixman_color_t *tmp_c;
int32_t tmp_x;
tmp_x = 0x10000 - right_x;
right_x = 0x10000 - left_x;
left_x = tmp_x;
tmp_c = right_c;
right_c = left_c;
left_c = tmp_c;
x = 0x10000 - x;
}
left_x += (pos - x);
right_x += (pos - x);
break;
default: /* REPEAT_NONE */
for (n = 0; n < count; n++)
if (pos < stops[n].x)
break;
if (n == 0)
{
left_x = INT32_MIN;
right_x = stops[0].x;
left_c = right_c = (pixman_color_t*) &transparent_black;
}
else if (n == count)
{
left_x = stops[n - 1].x;
right_x = INT32_MAX;
left_c = right_c = (pixman_color_t*) &transparent_black;
}
else
{
left_x = stops[n - 1].x;
right_x = stops[n].x;
left_c = &stops[n - 1].color;
right_c = &stops[n].color;
}
}
walker->left_x = left_x;
walker->right_x = right_x;
walker->left_ag = ((left_c->alpha >> 8) << 16) | (left_c->green >> 8);
walker->left_rb = ((left_c->red & 0xff00) << 8) | (left_c->blue >> 8);
walker->right_ag = ((right_c->alpha >> 8) << 16) | (right_c->green >> 8);
walker->right_rb = ((right_c->red & 0xff00) << 8) | (right_c->blue >> 8);
if (walker->left_x == walker->right_x ||
( walker->left_ag == walker->right_ag &&
walker->left_rb == walker->right_rb ) )
{
walker->stepper = 0;
}
else
{
int32_t width = right_x - left_x;
walker->stepper = ((1 << 24) + width / 2) / width;
}
walker->need_reset = FALSE;
}
#define PIXMAN_GRADIENT_WALKER_NEED_RESET(w, x) \
( (w)->need_reset || (x) < (w)->left_x || (x) >= (w)->right_x)
/* the following assumes that PIXMAN_GRADIENT_WALKER_NEED_RESET(w,x) is FALSE */
uint32_t
_pixman_gradient_walker_pixel (pixman_gradient_walker_t *walker,
pixman_fixed_32_32_t x)
{
int dist, idist;
uint32_t t1, t2, a, color;
if (PIXMAN_GRADIENT_WALKER_NEED_RESET (walker, x))
_pixman_gradient_walker_reset (walker, x);
dist = ((int)(x - walker->left_x) * walker->stepper) >> 16;
idist = 256 - dist;
/* combined INTERPOLATE and premultiply */
t1 = walker->left_rb * idist + walker->right_rb * dist;
t1 = (t1 >> 8) & 0xff00ff;
t2 = walker->left_ag * idist + walker->right_ag * dist;
t2 &= 0xff00ff00;
color = t2 & 0xff000000;
a = t2 >> 24;
t1 = t1 * a + 0x800080;
t1 = (t1 + ((t1 >> 8) & 0xff00ff)) >> 8;
t2 = (t2 >> 8) * a + 0x800080;
t2 = (t2 + ((t2 >> 8) & 0xff00ff));
return (color | (t1 & 0xff00ff) | (t2 & 0xff00));
}

Просмотреть файл

@ -27,26 +27,18 @@
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <assert.h>
#include "pixman-private.h"
#include "pixman-combine32.h"
#define Alpha(x) ((x) >> 24)
static void
init_source_image (source_image_t *image)
{
image->class = SOURCE_IMAGE_CLASS_UNKNOWN;
}
static pixman_bool_t
init_gradient (gradient_t *gradient,
const pixman_gradient_stop_t *stops,
int n_stops)
pixman_bool_t
_pixman_init_gradient (gradient_t * gradient,
const pixman_gradient_stop_t *stops,
int n_stops)
{
return_val_if_fail (n_stops > 0, FALSE);
init_source_image (&gradient->common);
gradient->stops = pixman_malloc_ab (n_stops, sizeof (pixman_gradient_stop_t));
if (!gradient->stops)
return FALSE;
@ -56,24 +48,52 @@ init_gradient (gradient_t *gradient,
gradient->n_stops = n_stops;
gradient->stop_range = 0xffff;
gradient->color_table = NULL;
gradient->color_table_size = 0;
gradient->common.class = SOURCE_IMAGE_CLASS_UNKNOWN;
return TRUE;
}
static uint32_t
color_to_uint32 (const pixman_color_t *color)
/*
* By default, just evaluate the image at 32bpp and expand. Individual image
* types can plug in a better scanline getter if they want to. For example
* we could produce smoother gradients by evaluating them at higher color
* depth, but that's a project for the future.
*/
void
_pixman_image_get_scanline_generic_64 (pixman_image_t * image,
int x,
int y,
int width,
uint32_t * buffer,
const uint32_t * mask,
uint32_t mask_bits)
{
return
(color->alpha >> 8 << 24) |
(color->red >> 8 << 16) |
(color->green & 0xff00) |
(color->blue >> 8);
uint32_t *mask8 = NULL;
/* Contract the mask image, if one exists, so that the 32-bit fetch
* function can use it.
*/
if (mask)
{
mask8 = pixman_malloc_ab (width, sizeof(uint32_t));
if (!mask8)
return;
pixman_contract (mask8, (uint64_t *)mask, width);
}
/* Fetch the source image into the first half of buffer. */
_pixman_image_get_scanline_32 (image, x, y, width, (uint32_t*)buffer, mask8,
mask_bits);
/* Expand from 32bpp to 64bpp in place. */
pixman_expand ((uint64_t *)buffer, buffer, PIXMAN_a8r8g8b8, width);
free (mask8);
}
static pixman_image_t *
allocate_image (void)
pixman_image_t *
_pixman_image_allocate (void)
{
pixman_image_t *image = malloc (sizeof (pixman_image_t));
@ -81,10 +101,10 @@ allocate_image (void)
{
image_common_t *common = &image->common;
pixman_region32_init (&common->full_region);
pixman_region32_init (&common->clip_region);
common->src_clip = &common->full_region;
common->has_client_clip = FALSE;
common->have_clip_region = FALSE;
common->clip_sources = FALSE;
common->transform = NULL;
common->repeat = PIXMAN_REPEAT_NONE;
common->filter = PIXMAN_FILTER_NEAREST;
@ -93,13 +113,63 @@ allocate_image (void)
common->alpha_map = NULL;
common->component_alpha = FALSE;
common->ref_count = 1;
common->read_func = NULL;
common->write_func = NULL;
common->classify = NULL;
common->client_clip = FALSE;
common->destroy_func = NULL;
common->destroy_data = NULL;
common->need_workaround = FALSE;
common->dirty = TRUE;
}
return image;
}
source_image_class_t
_pixman_image_classify (pixman_image_t *image,
int x,
int y,
int width,
int height)
{
if (image->common.classify)
return image->common.classify (image, x, y, width, height);
else
return SOURCE_IMAGE_CLASS_UNKNOWN;
}
void
_pixman_image_get_scanline_32 (pixman_image_t *image,
int x,
int y,
int width,
uint32_t * buffer,
const uint32_t *mask,
uint32_t mask_bits)
{
image->common.get_scanline_32 (image, x, y, width, buffer, mask, mask_bits);
}
/* Even thought the type of buffer is uint32_t *, the function actually expects
* a uint64_t *buffer.
*/
void
_pixman_image_get_scanline_64 (pixman_image_t *image,
int x,
int y,
int width,
uint32_t * buffer,
const uint32_t *unused,
uint32_t unused2)
{
image->common.get_scanline_64 (image, x, y, width, buffer, unused, unused2);
}
static void
image_property_changed (pixman_image_t *image)
{
image->common.dirty = TRUE;
}
/* Ref Counting */
PIXMAN_EXPORT pixman_image_t *
pixman_image_ref (pixman_image_t *image)
@ -119,8 +189,10 @@ pixman_image_unref (pixman_image_t *image)
if (common->ref_count == 0)
{
if (image->common.destroy_func)
image->common.destroy_func (image, image->common.destroy_data);
pixman_region32_fini (&common->clip_region);
pixman_region32_fini (&common->full_region);
if (common->transform)
free (common->transform);
@ -131,21 +203,14 @@ pixman_image_unref (pixman_image_t *image)
if (common->alpha_map)
pixman_image_unref ((pixman_image_t *)common->alpha_map);
#if 0
if (image->type == BITS && image->bits.indexed)
free (image->bits.indexed);
#endif
#if 0
memset (image, 0xaa, sizeof (pixman_image_t));
#endif
if (image->type == LINEAR || image->type == RADIAL || image->type == CONICAL)
if (image->type == LINEAR ||
image->type == RADIAL ||
image->type == CONICAL)
{
if (image->gradient.stops)
free (image->gradient.stops);
}
if (image->type == BITS && image->bits.free_me)
free (image->bits.free_me);
@ -157,327 +222,146 @@ pixman_image_unref (pixman_image_t *image)
return FALSE;
}
/* Constructors */
PIXMAN_EXPORT pixman_image_t *
pixman_image_create_solid_fill (pixman_color_t *color)
PIXMAN_EXPORT void
pixman_image_set_destroy_function (pixman_image_t * image,
pixman_image_destroy_func_t func,
void * data)
{
pixman_image_t *img = allocate_image();
if (!img)
return NULL;
init_source_image (&img->solid.common);
img->type = SOLID;
img->solid.color = color_to_uint32 (color);
return img;
image->common.destroy_func = func;
image->common.destroy_data = data;
}
PIXMAN_EXPORT pixman_image_t *
pixman_image_create_linear_gradient (pixman_point_fixed_t *p1,
pixman_point_fixed_t *p2,
const pixman_gradient_stop_t *stops,
int n_stops)
void
_pixman_image_reset_clip_region (pixman_image_t *image)
{
pixman_image_t *image;
linear_gradient_t *linear;
return_val_if_fail (n_stops >= 2, NULL);
image = allocate_image();
if (!image)
return NULL;
linear = &image->linear;
if (!init_gradient (&linear->common, stops, n_stops))
{
free (image);
return NULL;
}
linear->p1 = *p1;
linear->p2 = *p2;
image->type = LINEAR;
return image;
image->common.have_clip_region = FALSE;
}
PIXMAN_EXPORT pixman_image_t *
pixman_image_create_radial_gradient (pixman_point_fixed_t *inner,
pixman_point_fixed_t *outer,
pixman_fixed_t inner_radius,
pixman_fixed_t outer_radius,
const pixman_gradient_stop_t *stops,
int n_stops)
void
_pixman_image_validate (pixman_image_t *image)
{
pixman_image_t *image;
radial_gradient_t *radial;
return_val_if_fail (n_stops >= 2, NULL);
image = allocate_image();
if (!image)
return NULL;
radial = &image->radial;
if (!init_gradient (&radial->common, stops, n_stops))
if (image->common.dirty)
{
free (image);
return NULL;
image->common.property_changed (image);
image->common.dirty = FALSE;
}
image->type = RADIAL;
radial->c1.x = inner->x;
radial->c1.y = inner->y;
radial->c1.radius = inner_radius;
radial->c2.x = outer->x;
radial->c2.y = outer->y;
radial->c2.radius = outer_radius;
radial->cdx = pixman_fixed_to_double (radial->c2.x - radial->c1.x);
radial->cdy = pixman_fixed_to_double (radial->c2.y - radial->c1.y);
radial->dr = pixman_fixed_to_double (radial->c2.radius - radial->c1.radius);
radial->A = (radial->cdx * radial->cdx
+ radial->cdy * radial->cdy
- radial->dr * radial->dr);
return image;
}
PIXMAN_EXPORT pixman_image_t *
pixman_image_create_conical_gradient (pixman_point_fixed_t *center,
pixman_fixed_t angle,
const pixman_gradient_stop_t *stops,
int n_stops)
{
pixman_image_t *image = allocate_image();
conical_gradient_t *conical;
if (!image)
return NULL;
conical = &image->conical;
if (!init_gradient (&conical->common, stops, n_stops))
{
free (image);
return NULL;
}
image->type = CONICAL;
conical->center = *center;
conical->angle = angle;
return image;
}
static uint32_t *
create_bits (pixman_format_code_t format,
int width,
int height,
int *rowstride_bytes)
{
int stride;
int buf_size;
int bpp;
/* what follows is a long-winded way, avoiding any possibility of integer
* overflows, of saying:
* stride = ((width * bpp + FB_MASK) >> FB_SHIFT) * sizeof (uint32_t);
*/
bpp = PIXMAN_FORMAT_BPP (format);
if (pixman_multiply_overflows_int (width, bpp))
return NULL;
stride = width * bpp;
if (pixman_addition_overflows_int (stride, FB_MASK))
return NULL;
stride += FB_MASK;
stride >>= FB_SHIFT;
#if FB_SHIFT < 2
if (pixman_multiply_overflows_int (stride, sizeof (uint32_t)))
return NULL;
#endif
stride *= sizeof (uint32_t);
if (pixman_multiply_overflows_int (height, stride))
return NULL;
buf_size = height * stride;
if (rowstride_bytes)
*rowstride_bytes = stride;
return calloc (buf_size, 1);
}
static void
reset_clip_region (pixman_image_t *image)
{
pixman_region32_fini (&image->common.clip_region);
if (image->type == BITS)
{
pixman_region32_init_rect (&image->common.clip_region, 0, 0,
image->bits.width, image->bits.height);
}
else
{
pixman_region32_init (&image->common.clip_region);
}
}
PIXMAN_EXPORT pixman_image_t *
pixman_image_create_bits (pixman_format_code_t format,
int width,
int height,
uint32_t *bits,
int rowstride_bytes)
{
pixman_image_t *image;
uint32_t *free_me = NULL;
/* must be a whole number of uint32_t's
*/
return_val_if_fail (bits == NULL ||
(rowstride_bytes % sizeof (uint32_t)) == 0, NULL);
if (!bits && width && height)
{
free_me = bits = create_bits (format, width, height, &rowstride_bytes);
if (!bits)
return NULL;
}
image = allocate_image();
if (!image) {
if (free_me)
free (free_me);
return NULL;
}
image->type = BITS;
image->bits.format = format;
image->bits.width = width;
image->bits.height = height;
image->bits.bits = bits;
image->bits.free_me = free_me;
image->bits.rowstride = rowstride_bytes / (int) sizeof (uint32_t); /* we store it in number
* of uint32_t's
*/
image->bits.indexed = NULL;
pixman_region32_fini (&image->common.full_region);
pixman_region32_init_rect (&image->common.full_region, 0, 0,
image->bits.width, image->bits.height);
reset_clip_region (image);
return image;
}
PIXMAN_EXPORT pixman_bool_t
pixman_image_set_clip_region32 (pixman_image_t *image,
pixman_region32_t *region)
pixman_image_set_clip_region32 (pixman_image_t * image,
pixman_region32_t *region)
{
image_common_t *common = (image_common_t *)image;
pixman_bool_t result;
if (region)
{
return pixman_region32_copy (&common->clip_region, region);
if ((result = pixman_region32_copy (&common->clip_region, region)))
image->common.have_clip_region = TRUE;
}
else
{
reset_clip_region (image);
_pixman_image_reset_clip_region (image);
return TRUE;
result = TRUE;
}
}
image_property_changed (image);
return result;
}
PIXMAN_EXPORT pixman_bool_t
pixman_image_set_clip_region (pixman_image_t *image,
pixman_region16_t *region)
pixman_image_set_clip_region (pixman_image_t * image,
pixman_region16_t *region)
{
image_common_t *common = (image_common_t *)image;
pixman_bool_t result;
if (region)
{
return pixman_region32_copy_from_region16 (&common->clip_region, region);
if ((result = pixman_region32_copy_from_region16 (&common->clip_region, region)))
image->common.have_clip_region = TRUE;
}
else
{
reset_clip_region (image);
_pixman_image_reset_clip_region (image);
return TRUE;
result = TRUE;
}
image_property_changed (image);
return result;
}
/* Sets whether the clip region includes a clip region set by the client
*/
PIXMAN_EXPORT void
pixman_image_set_has_client_clip (pixman_image_t *image,
pixman_bool_t client_clip)
pixman_bool_t client_clip)
{
image->common.has_client_clip = client_clip;
image->common.client_clip = client_clip;
}
PIXMAN_EXPORT pixman_bool_t
pixman_image_set_transform (pixman_image_t *image,
const pixman_transform_t *transform)
pixman_image_set_transform (pixman_image_t * image,
const pixman_transform_t *transform)
{
static const pixman_transform_t id =
{
{ { pixman_fixed_1, 0, 0 },
{ 0, pixman_fixed_1, 0 },
{ 0, 0, pixman_fixed_1 }
}
{ 0, 0, pixman_fixed_1 } }
};
image_common_t *common = (image_common_t *)image;
pixman_bool_t result;
if (common->transform == transform)
return TRUE;
if (memcmp (&id, transform, sizeof (pixman_transform_t)) == 0)
{
free(common->transform);
free (common->transform);
common->transform = NULL;
return TRUE;
result = TRUE;
goto out;
}
if (common->transform == NULL)
common->transform = malloc (sizeof (pixman_transform_t));
if (common->transform == NULL)
return FALSE;
{
result = FALSE;
memcpy(common->transform, transform, sizeof(pixman_transform_t));
goto out;
}
return TRUE;
memcpy (common->transform, transform, sizeof(pixman_transform_t));
result = TRUE;
out:
image_property_changed (image);
return result;
}
PIXMAN_EXPORT void
pixman_image_set_repeat (pixman_image_t *image,
pixman_repeat_t repeat)
pixman_image_set_repeat (pixman_image_t *image,
pixman_repeat_t repeat)
{
image->common.repeat = repeat;
image_property_changed (image);
}
PIXMAN_EXPORT pixman_bool_t
pixman_image_set_filter (pixman_image_t *image,
pixman_filter_t filter,
const pixman_fixed_t *params,
int n_params)
pixman_image_set_filter (pixman_image_t * image,
pixman_filter_t filter,
const pixman_fixed_t *params,
int n_params)
{
image_common_t *common = (image_common_t *)image;
pixman_fixed_t *new_params;
@ -493,7 +377,7 @@ pixman_image_set_filter (pixman_image_t *image,
return FALSE;
memcpy (new_params,
params, n_params * sizeof (pixman_fixed_t));
params, n_params * sizeof (pixman_fixed_t));
}
common->filter = filter;
@ -503,19 +387,18 @@ pixman_image_set_filter (pixman_image_t *image,
common->filter_params = new_params;
common->n_filter_params = n_params;
image_property_changed (image);
return TRUE;
}
PIXMAN_EXPORT void
pixman_image_set_source_clipping (pixman_image_t *image,
pixman_bool_t source_clipping)
pixman_image_set_source_clipping (pixman_image_t *image,
pixman_bool_t clip_sources)
{
image_common_t *common = &image->common;
image->common.clip_sources = clip_sources;
if (source_clipping)
common->src_clip = &common->clip_region;
else
common->src_clip = &common->full_region;
image_property_changed (image);
}
/* Unlike all the other property setters, this function does not
@ -523,19 +406,21 @@ pixman_image_set_source_clipping (pixman_image_t *image,
* way, way too expensive.
*/
PIXMAN_EXPORT void
pixman_image_set_indexed (pixman_image_t *image,
const pixman_indexed_t *indexed)
pixman_image_set_indexed (pixman_image_t * image,
const pixman_indexed_t *indexed)
{
bits_image_t *bits = (bits_image_t *)image;
bits->indexed = indexed;
image_property_changed (image);
}
PIXMAN_EXPORT void
pixman_image_set_alpha_map (pixman_image_t *image,
pixman_image_t *alpha_map,
int16_t x,
int16_t y)
pixman_image_t *alpha_map,
int16_t x,
int16_t y)
{
image_common_t *common = (image_common_t *)image;
@ -552,32 +437,41 @@ pixman_image_set_alpha_map (pixman_image_t *image,
common->alpha_map = NULL;
}
common->alpha_origin.x = x;
common->alpha_origin.y = y;
common->alpha_origin_x = x;
common->alpha_origin_y = y;
image_property_changed (image);
}
PIXMAN_EXPORT void
pixman_image_set_component_alpha (pixman_image_t *image,
pixman_bool_t component_alpha)
pixman_image_set_component_alpha (pixman_image_t *image,
pixman_bool_t component_alpha)
{
image->common.component_alpha = component_alpha;
image_property_changed (image);
}
PIXMAN_EXPORT pixman_bool_t
pixman_image_get_component_alpha (pixman_image_t *image)
pixman_image_get_component_alpha (pixman_image_t *image)
{
return image->common.component_alpha;
}
PIXMAN_EXPORT void
pixman_image_set_accessors (pixman_image_t *image,
pixman_read_memory_func_t read_func,
pixman_write_memory_func_t write_func)
pixman_image_set_accessors (pixman_image_t * image,
pixman_read_memory_func_t read_func,
pixman_write_memory_func_t write_func)
{
return_if_fail (image != NULL);
image->common.read_func = read_func;
image->common.write_func = write_func;
if (image->type == BITS)
{
image->bits.read_func = read_func;
image->bits.write_func = write_func;
image_property_changed (image);
}
}
PIXMAN_EXPORT uint32_t *
@ -625,209 +519,96 @@ pixman_image_get_depth (pixman_image_t *image)
return 0;
}
static pixman_bool_t
color_to_pixel (pixman_color_t *color,
uint32_t *pixel,
pixman_format_code_t format)
{
uint32_t c = color_to_uint32 (color);
if (!(format == PIXMAN_a8r8g8b8 ||
format == PIXMAN_x8r8g8b8 ||
format == PIXMAN_a8b8g8r8 ||
format == PIXMAN_x8b8g8r8 ||
format == PIXMAN_r5g6b5 ||
format == PIXMAN_b5g6r5 ||
format == PIXMAN_a8))
{
return FALSE;
}
if (PIXMAN_FORMAT_TYPE (format) == PIXMAN_TYPE_ABGR)
{
c = ((c & 0xff000000) >> 0) |
((c & 0x00ff0000) >> 16) |
((c & 0x0000ff00) >> 0) |
((c & 0x000000ff) << 16);
}
if (format == PIXMAN_a8)
c = c >> 24;
else if (format == PIXMAN_r5g6b5 ||
format == PIXMAN_b5g6r5)
c = cvt8888to0565 (c);
#if 0
printf ("color: %x %x %x %x\n", color->alpha, color->red, color->green, color->blue);
printf ("pixel: %x\n", c);
#endif
*pixel = c;
return TRUE;
}
PIXMAN_EXPORT pixman_bool_t
pixman_image_fill_rectangles (pixman_op_t op,
pixman_image_t *dest,
pixman_color_t *color,
int n_rects,
const pixman_rectangle16_t *rects)
{
pixman_image_t *solid;
pixman_color_t c;
int i;
if (color->alpha == 0xffff)
{
if (op == PIXMAN_OP_OVER)
op = PIXMAN_OP_SRC;
}
if (op == PIXMAN_OP_CLEAR)
{
c.red = 0;
c.green = 0;
c.blue = 0;
c.alpha = 0;
color = &c;
op = PIXMAN_OP_SRC;
}
if (op == PIXMAN_OP_SRC)
{
uint32_t pixel;
if (color_to_pixel (color, &pixel, dest->bits.format))
{
for (i = 0; i < n_rects; ++i)
{
pixman_region32_t fill_region;
int n_boxes, j;
pixman_box32_t *boxes;
pixman_region32_init_rect (&fill_region, rects[i].x, rects[i].y, rects[i].width, rects[i].height);
pixman_region32_intersect (&fill_region, &fill_region, &dest->common.clip_region);
boxes = pixman_region32_rectangles (&fill_region, &n_boxes);
for (j = 0; j < n_boxes; ++j)
{
const pixman_box32_t *box = &(boxes[j]);
pixman_fill (dest->bits.bits, dest->bits.rowstride, PIXMAN_FORMAT_BPP (dest->bits.format),
box->x1, box->y1, box->x2 - box->x1, box->y2 - box->y1,
pixel);
}
pixman_region32_fini (&fill_region);
}
return TRUE;
}
}
solid = pixman_image_create_solid_fill (color);
if (!solid)
return FALSE;
for (i = 0; i < n_rects; ++i)
{
const pixman_rectangle16_t *rect = &(rects[i]);
pixman_image_composite (op, solid, NULL, dest,
0, 0, 0, 0,
rect->x, rect->y,
rect->width, rect->height);
}
pixman_image_unref (solid);
return TRUE;
}
pixman_bool_t
pixman_image_can_get_solid (pixman_image_t *image)
_pixman_image_is_solid (pixman_image_t *image)
{
if (image->type == SOLID)
return TRUE;
if (image->type != BITS ||
image->bits.width != 1 ||
image->bits.height != 1)
if (image->type != BITS ||
image->bits.width != 1 ||
image->bits.height != 1)
{
return FALSE;
}
if (image->common.repeat != PIXMAN_REPEAT_NORMAL)
if (image->common.repeat == PIXMAN_REPEAT_NONE)
return FALSE;
switch (image->bits.format)
return TRUE;
}
uint32_t
_pixman_image_get_solid (pixman_image_t * image,
pixman_format_code_t format)
{
uint32_t result;
_pixman_image_get_scanline_32 (image, 0, 0, 1, &result, NULL, 0);
/* If necessary, convert RGB <--> BGR. */
if (PIXMAN_FORMAT_TYPE (format) != PIXMAN_TYPE_ARGB)
{
case PIXMAN_a8r8g8b8:
case PIXMAN_x8r8g8b8:
case PIXMAN_a8b8g8r8:
case PIXMAN_x8b8g8r8:
case PIXMAN_r8g8b8:
case PIXMAN_b8g8r8:
case PIXMAN_r5g6b5:
case PIXMAN_b5g6r5:
return TRUE;
default:
return FALSE;
result = (((result & 0xff000000) >> 0) |
((result & 0x00ff0000) >> 16) |
((result & 0x0000ff00) >> 0) |
((result & 0x000000ff) << 16));
}
return result;
}
pixman_bool_t
pixman_image_is_opaque(pixman_image_t *image)
_pixman_image_is_opaque (pixman_image_t *image)
{
int i = 0;
int gradientNumberOfColors = 0;
int i;
if(image->common.alpha_map)
return FALSE;
if (image->common.alpha_map)
return FALSE;
switch(image->type)
switch (image->type)
{
case BITS:
if(PIXMAN_FORMAT_A(image->bits.format))
return FALSE;
break;
if (image->common.repeat == PIXMAN_REPEAT_NONE)
return FALSE;
if (PIXMAN_FORMAT_A (image->bits.format))
return FALSE;
break;
case LINEAR:
case CONICAL:
case RADIAL:
gradientNumberOfColors = image->gradient.n_stops;
i=0;
while(i<gradientNumberOfColors)
{
if(image->gradient.stops[i].color.alpha != 0xffff)
return FALSE;
i++;
}
break;
if (image->common.repeat == PIXMAN_REPEAT_NONE)
return FALSE;
for (i = 0; i < image->gradient.n_stops; ++i)
{
if (image->gradient.stops[i].color.alpha != 0xffff)
return FALSE;
}
break;
case CONICAL:
/* Conical gradients always have a transparent border */
return FALSE;
break;
case SOLID:
if(Alpha(image->solid.color) != 0xff)
return FALSE;
if (ALPHA_8 (image->solid.color) != 0xff)
return FALSE;
break;
default:
return FALSE;
break;
}
/* Convolution filters can introduce translucency if the sum of the weights
is lower than 1. */
/* Convolution filters can introduce translucency if the sum of the
* weights is lower than 1.
*/
if (image->common.filter == PIXMAN_FILTER_CONVOLUTION)
return FALSE;
return FALSE;
if (image->common.repeat == PIXMAN_REPEAT_NONE)
{
if (image->common.filter != PIXMAN_FILTER_NEAREST)
return FALSE;
if (image->common.transform)
return FALSE;
/* Gradients do not necessarily cover the entire compositing area */
if (image->type == LINEAR || image->type == CONICAL || image->type == RADIAL)
return FALSE;
}
return TRUE;
return TRUE;
}

Просмотреть файл

@ -0,0 +1,268 @@
/*
* Copyright © 2009 Red Hat, Inc.
*
* Permission to use, copy, modify, distribute, and sell this software and its
* documentation for any purpose is hereby granted without fee, provided that
* the above copyright notice appear in all copies and that both that
* copyright notice and this permission notice appear in supporting
* documentation, and that the name of Red Hat not be used in advertising or
* publicity pertaining to distribution of the software without specific,
* written prior permission. Red Hat makes no representations about the
* suitability of this software for any purpose. It is provided "as is"
* without express or implied warranty.
*
* THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS
* SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
* FITNESS, IN NO EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY
* SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN
* AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING
* OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS
* SOFTWARE.
*/
#ifdef HAVE_CONFIG_H
#include <config.h>
#endif
#include <stdlib.h>
#include "pixman-private.h"
static void
delegate_composite (pixman_implementation_t * imp,
pixman_op_t op,
pixman_image_t * src,
pixman_image_t * mask,
pixman_image_t * dest,
int32_t src_x,
int32_t src_y,
int32_t mask_x,
int32_t mask_y,
int32_t dest_x,
int32_t dest_y,
int32_t width,
int32_t height)
{
_pixman_implementation_composite (imp->delegate,
op,
src, mask, dest,
src_x, src_y,
mask_x, mask_y,
dest_x, dest_y,
width, height);
}
static void
delegate_combine_32 (pixman_implementation_t * imp,
pixman_op_t op,
uint32_t * dest,
const uint32_t * src,
const uint32_t * mask,
int width)
{
_pixman_implementation_combine_32 (imp->delegate,
op, dest, src, mask, width);
}
static void
delegate_combine_64 (pixman_implementation_t * imp,
pixman_op_t op,
uint64_t * dest,
const uint64_t * src,
const uint64_t * mask,
int width)
{
_pixman_implementation_combine_64 (imp->delegate,
op, dest, src, mask, width);
}
static void
delegate_combine_32_ca (pixman_implementation_t * imp,
pixman_op_t op,
uint32_t * dest,
const uint32_t * src,
const uint32_t * mask,
int width)
{
_pixman_implementation_combine_32_ca (imp->delegate,
op, dest, src, mask, width);
}
static void
delegate_combine_64_ca (pixman_implementation_t * imp,
pixman_op_t op,
uint64_t * dest,
const uint64_t * src,
const uint64_t * mask,
int width)
{
_pixman_implementation_combine_64_ca (imp->delegate,
op, dest, src, mask, width);
}
static pixman_bool_t
delegate_blt (pixman_implementation_t * imp,
uint32_t * src_bits,
uint32_t * dst_bits,
int src_stride,
int dst_stride,
int src_bpp,
int dst_bpp,
int src_x,
int src_y,
int dst_x,
int dst_y,
int width,
int height)
{
return _pixman_implementation_blt (
imp->delegate, src_bits, dst_bits, src_stride, dst_stride,
src_bpp, dst_bpp, src_x, src_y, dst_x, dst_y,
width, height);
}
static pixman_bool_t
delegate_fill (pixman_implementation_t *imp,
uint32_t * bits,
int stride,
int bpp,
int x,
int y,
int width,
int height,
uint32_t xor)
{
return _pixman_implementation_fill (
imp->delegate, bits, stride, bpp, x, y, width, height, xor);
}
pixman_implementation_t *
_pixman_implementation_create (pixman_implementation_t *delegate)
{
pixman_implementation_t *imp = malloc (sizeof (pixman_implementation_t));
pixman_implementation_t *d;
int i;
if (!imp)
return NULL;
/* Make sure the whole delegate chain has the right toplevel */
imp->delegate = delegate;
for (d = imp; d != NULL; d = d->delegate)
d->toplevel = imp;
/* Fill out function pointers with ones that just delegate
*/
imp->composite = delegate_composite;
imp->blt = delegate_blt;
imp->fill = delegate_fill;
for (i = 0; i < PIXMAN_N_OPERATORS; ++i)
{
imp->combine_32[i] = delegate_combine_32;
imp->combine_64[i] = delegate_combine_64;
imp->combine_32_ca[i] = delegate_combine_32_ca;
imp->combine_64_ca[i] = delegate_combine_64_ca;
}
return imp;
}
void
_pixman_implementation_combine_32 (pixman_implementation_t * imp,
pixman_op_t op,
uint32_t * dest,
const uint32_t * src,
const uint32_t * mask,
int width)
{
(*imp->combine_32[op]) (imp, op, dest, src, mask, width);
}
void
_pixman_implementation_combine_64 (pixman_implementation_t * imp,
pixman_op_t op,
uint64_t * dest,
const uint64_t * src,
const uint64_t * mask,
int width)
{
(*imp->combine_64[op]) (imp, op, dest, src, mask, width);
}
void
_pixman_implementation_combine_32_ca (pixman_implementation_t * imp,
pixman_op_t op,
uint32_t * dest,
const uint32_t * src,
const uint32_t * mask,
int width)
{
(*imp->combine_32_ca[op]) (imp, op, dest, src, mask, width);
}
void
_pixman_implementation_combine_64_ca (pixman_implementation_t * imp,
pixman_op_t op,
uint64_t * dest,
const uint64_t * src,
const uint64_t * mask,
int width)
{
(*imp->combine_64_ca[op]) (imp, op, dest, src, mask, width);
}
void
_pixman_implementation_composite (pixman_implementation_t * imp,
pixman_op_t op,
pixman_image_t * src,
pixman_image_t * mask,
pixman_image_t * dest,
int32_t src_x,
int32_t src_y,
int32_t mask_x,
int32_t mask_y,
int32_t dest_x,
int32_t dest_y,
int32_t width,
int32_t height)
{
(*imp->composite) (imp, op,
src, mask, dest,
src_x, src_y, mask_x, mask_y, dest_x, dest_y,
width, height);
}
pixman_bool_t
_pixman_implementation_blt (pixman_implementation_t * imp,
uint32_t * src_bits,
uint32_t * dst_bits,
int src_stride,
int dst_stride,
int src_bpp,
int dst_bpp,
int src_x,
int src_y,
int dst_x,
int dst_y,
int width,
int height)
{
return (*imp->blt) (imp, src_bits, dst_bits, src_stride, dst_stride,
src_bpp, dst_bpp, src_x, src_y, dst_x, dst_y,
width, height);
}
pixman_bool_t
_pixman_implementation_fill (pixman_implementation_t *imp,
uint32_t * bits,
int stride,
int bpp,
int x,
int y,
int width,
int height,
uint32_t xor)
{
return (*imp->fill) (imp, bits, stride, bpp, x, y, width, height, xor);
}

Просмотреть файл

@ -0,0 +1,294 @@
/*
* Copyright © 2000 SuSE, Inc.
* Copyright © 2007 Red Hat, Inc.
* Copyright © 2000 Keith Packard, member of The XFree86 Project, Inc.
* 2005 Lars Knoll & Zack Rusin, Trolltech
*
* Permission to use, copy, modify, distribute, and sell this software and its
* documentation for any purpose is hereby granted without fee, provided that
* the above copyright notice appear in all copies and that both that
* copyright notice and this permission notice appear in supporting
* documentation, and that the name of Keith Packard not be used in
* advertising or publicity pertaining to distribution of the software without
* specific, written prior permission. Keith Packard makes no
* representations about the suitability of this software for any purpose. It
* is provided "as is" without express or implied warranty.
*
* THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS
* SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
* FITNESS, IN NO EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY
* SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN
* AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING
* OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS
* SOFTWARE.
*/
#ifdef HAVE_CONFIG_H
#include <config.h>
#endif
#include <stdlib.h>
#include "pixman-private.h"
static source_image_class_t
linear_gradient_classify (pixman_image_t *image,
int x,
int y,
int width,
int height)
{
linear_gradient_t *linear = (linear_gradient_t *)image;
pixman_vector_t v;
pixman_fixed_32_32_t l;
pixman_fixed_48_16_t dx, dy, a, b, off;
pixman_fixed_48_16_t factors[4];
int i;
image->source.class = SOURCE_IMAGE_CLASS_UNKNOWN;
dx = linear->p2.x - linear->p1.x;
dy = linear->p2.y - linear->p1.y;
l = dx * dx + dy * dy;
if (l)
{
a = (dx << 32) / l;
b = (dy << 32) / l;
}
else
{
a = b = 0;
}
off = (-a * linear->p1.x
-b * linear->p1.y) >> 16;
for (i = 0; i < 3; i++)
{
v.vector[0] = pixman_int_to_fixed ((i % 2) * (width - 1) + x);
v.vector[1] = pixman_int_to_fixed ((i / 2) * (height - 1) + y);
v.vector[2] = pixman_fixed_1;
if (image->common.transform)
{
if (!pixman_transform_point_3d (image->common.transform, &v))
{
image->source.class = SOURCE_IMAGE_CLASS_UNKNOWN;
return image->source.class;
}
}
factors[i] = ((a * v.vector[0] + b * v.vector[1]) >> 16) + off;
}
if (factors[2] == factors[0])
image->source.class = SOURCE_IMAGE_CLASS_HORIZONTAL;
else if (factors[1] == factors[0])
image->source.class = SOURCE_IMAGE_CLASS_VERTICAL;
return image->source.class;
}
static void
linear_gradient_get_scanline_32 (pixman_image_t *image,
int x,
int y,
int width,
uint32_t * buffer,
const uint32_t *mask,
uint32_t mask_bits)
{
pixman_vector_t v, unit;
pixman_fixed_32_32_t l;
pixman_fixed_48_16_t dx, dy, a, b, off;
gradient_t *gradient = (gradient_t *)image;
source_image_t *source = (source_image_t *)image;
linear_gradient_t *linear = (linear_gradient_t *)image;
uint32_t *end = buffer + width;
pixman_gradient_walker_t walker;
_pixman_gradient_walker_init (&walker, gradient, source->common.repeat);
/* reference point is the center of the pixel */
v.vector[0] = pixman_int_to_fixed (x) + pixman_fixed_1 / 2;
v.vector[1] = pixman_int_to_fixed (y) + pixman_fixed_1 / 2;
v.vector[2] = pixman_fixed_1;
if (source->common.transform)
{
if (!pixman_transform_point_3d (source->common.transform, &v))
return;
unit.vector[0] = source->common.transform->matrix[0][0];
unit.vector[1] = source->common.transform->matrix[1][0];
unit.vector[2] = source->common.transform->matrix[2][0];
}
else
{
unit.vector[0] = pixman_fixed_1;
unit.vector[1] = 0;
unit.vector[2] = 0;
}
dx = linear->p2.x - linear->p1.x;
dy = linear->p2.y - linear->p1.y;
l = dx * dx + dy * dy;
if (l != 0)
{
a = (dx << 32) / l;
b = (dy << 32) / l;
off = (-a * linear->p1.x
-b * linear->p1.y) >> 16;
}
if (l == 0 || (unit.vector[2] == 0 && v.vector[2] == pixman_fixed_1))
{
pixman_fixed_48_16_t inc, t;
/* affine transformation only */
if (l == 0)
{
t = 0;
inc = 0;
}
else
{
t = ((a * v.vector[0] + b * v.vector[1]) >> 16) + off;
inc = (a * unit.vector[0] + b * unit.vector[1]) >> 16;
}
if (source->class == SOURCE_IMAGE_CLASS_VERTICAL)
{
register uint32_t color;
color = _pixman_gradient_walker_pixel (&walker, t);
while (buffer < end)
*buffer++ = color;
}
else
{
if (!mask)
{
while (buffer < end)
{
*buffer++ = _pixman_gradient_walker_pixel (&walker, t);
t += inc;
}
}
else
{
while (buffer < end)
{
if (*mask++ & mask_bits)
*buffer = _pixman_gradient_walker_pixel (&walker, t);
buffer++;
t += inc;
}
}
}
}
else
{
/* projective transformation */
pixman_fixed_48_16_t t;
if (source->class == SOURCE_IMAGE_CLASS_VERTICAL)
{
register uint32_t color;
if (v.vector[2] == 0)
{
t = 0;
}
else
{
pixman_fixed_48_16_t x, y;
x = ((pixman_fixed_48_16_t) v.vector[0] << 16) / v.vector[2];
y = ((pixman_fixed_48_16_t) v.vector[1] << 16) / v.vector[2];
t = ((a * x + b * y) >> 16) + off;
}
color = _pixman_gradient_walker_pixel (&walker, t);
while (buffer < end)
*buffer++ = color;
}
else
{
while (buffer < end)
{
if (!mask || *mask++ & mask_bits)
{
if (v.vector[2] == 0)
{
t = 0;
}
else
{
pixman_fixed_48_16_t x, y;
x = ((pixman_fixed_48_16_t)v.vector[0] << 16) / v.vector[2];
y = ((pixman_fixed_48_16_t)v.vector[1] << 16) / v.vector[2];
t = ((a * x + b * y) >> 16) + off;
}
*buffer = _pixman_gradient_walker_pixel (&walker, t);
}
++buffer;
v.vector[0] += unit.vector[0];
v.vector[1] += unit.vector[1];
v.vector[2] += unit.vector[2];
}
}
}
}
static void
linear_gradient_property_changed (pixman_image_t *image)
{
image->common.get_scanline_32 = linear_gradient_get_scanline_32;
image->common.get_scanline_64 = _pixman_image_get_scanline_generic_64;
}
PIXMAN_EXPORT pixman_image_t *
pixman_image_create_linear_gradient (pixman_point_fixed_t * p1,
pixman_point_fixed_t * p2,
const pixman_gradient_stop_t *stops,
int n_stops)
{
pixman_image_t *image;
linear_gradient_t *linear;
return_val_if_fail (n_stops >= 2, NULL);
image = _pixman_image_allocate ();
if (!image)
return NULL;
linear = &image->linear;
if (!_pixman_init_gradient (&linear->common, stops, n_stops))
{
free (image);
return NULL;
}
linear->p1 = *p1;
linear->p2 = *p2;
image->type = LINEAR;
image->source.class = SOURCE_IMAGE_CLASS_UNKNOWN;
image->common.classify = linear_gradient_classify;
image->common.property_changed = linear_gradient_property_changed;
return image;
}

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -0,0 +1,368 @@
/*
*
* Copyright © 2000 Keith Packard, member of The XFree86 Project, Inc.
* Copyright © 2000 SuSE, Inc.
* 2005 Lars Knoll & Zack Rusin, Trolltech
* Copyright © 2007 Red Hat, Inc.
*
*
* Permission to use, copy, modify, distribute, and sell this software and its
* documentation for any purpose is hereby granted without fee, provided that
* the above copyright notice appear in all copies and that both that
* copyright notice and this permission notice appear in supporting
* documentation, and that the name of Keith Packard not be used in
* advertising or publicity pertaining to distribution of the software without
* specific, written prior permission. Keith Packard makes no
* representations about the suitability of this software for any purpose. It
* is provided "as is" without express or implied warranty.
*
* THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS
* SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
* FITNESS, IN NO EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY
* SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN
* AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING
* OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS
* SOFTWARE.
*/
#ifdef HAVE_CONFIG_H
#include <config.h>
#endif
#include <stdlib.h>
#include <math.h>
#include "pixman-private.h"
static void
radial_gradient_get_scanline_32 (pixman_image_t *image,
int x,
int y,
int width,
uint32_t * buffer,
const uint32_t *mask,
uint32_t mask_bits)
{
/*
* In the radial gradient problem we are given two circles (c,r) and
* (c,r) that define the gradient itself. Then, for any point p, we
* must compute the value(s) of t within [0.0, 1.0] representing the
* circle(s) that would color the point.
*
* There are potentially two values of t since the point p can be
* colored by both sides of the circle, (which happens whenever one
* circle is not entirely contained within the other).
*
* If we solve for a value of t that is outside of [0.0, 1.0] then we
* use the extend mode (NONE, REPEAT, REFLECT, or PAD) to map to a
* value within [0.0, 1.0].
*
* Here is an illustration of the problem:
*
* p
* p
*
* · r
* p ·
* θ
*
* r · c
* θ ·
*
* c
*
* Given (c,r), (c,r) and p, we must find an angle θ such that two
* points p and p on the two circles are collinear with p. Then, the
* desired value of t is the ratio of the length of pp to the length
* of pp.
*
* So, we have six unknown values: (px, py), (px, py), θ and t.
* We can also write six equations that constrain the problem:
*
* Point p is a distance r from c at an angle of θ:
*
* 1. px = cx + r·cos θ
* 2. py = cy + r·sin θ
*
* Point p is a distance r from c at an angle of θ:
*
* 3. px = cx + r2·cos θ
* 4. py = cy + r2·sin θ
*
* Point p lies at a fraction t along the line segment pp:
*
* 5. px = t·px + (1-t)·px
* 6. py = t·py + (1-t)·py
*
* To solve, first subtitute 1-4 into 5 and 6:
*
* px = t·(cx + r·cos θ) + (1-t)·(cx + r·cos θ)
* py = t·(cy + r·sin θ) + (1-t)·(cy + r·sin θ)
*
* Then solve each for cos θ and sin θ expressed as a function of t:
*
* cos θ = (-(cx - cx)·t + (px - cx)) / ((r-r)·t + r)
* sin θ = (-(cy - cy)·t + (py - cy)) / ((r-r)·t + r)
*
* To simplify this a bit, we define new variables for several of the
* common terms as shown below:
*
* p
* p
*
* · r
* p ·
* pdy
* c
* r ·
* · cdy
*
* c pdx cdx
*
* cdx = (cx - cx)
* cdy = (cy - cy)
* dr = r-r
* pdx = px - cx
* pdy = py - cy
*
* Note that cdx, cdy, and dr do not depend on point p at all, so can
* be pre-computed for the entire gradient. The simplifed equations
* are now:
*
* cos θ = (-cdx·t + pdx) / (dr·t + r)
* sin θ = (-cdy·t + pdy) / (dr·t + r)
*
* Finally, to get a single function of t and eliminate the last
* unknown θ, we use the identity sin²θ + cos²θ = 1. First, square
* each equation, (we knew a quadratic was coming since it must be
* possible to obtain two solutions in some cases):
*
* cos²θ = (cdx²t² - 2·cdx·pdx·t + pdx²) / (dr²·t² + 2·r·dr·t + r²)
* sin²θ = (cdy²t² - 2·cdy·pdy·t + pdy²) / (dr²·t² + 2·r·dr·t + r²)
*
* Then add both together, set the result equal to 1, and express as a
* standard quadratic equation in t of the form At² + Bt + C = 0
*
* (cdx² + cdy² - dr²)·t² - 2·(cdx·pdx + cdy·pdy + r·dr)·t + (pdx² + pdy² - r²) = 0
*
* In other words:
*
* A = cdx² + cdy² - dr²
* B = -2·(pdx·cdx + pdy·cdy + r·dr)
* C = pdx² + pdy² - r²
*
* And again, notice that A does not depend on p, so can be
* precomputed. From here we just use the quadratic formula to solve
* for t:
*
* t = (-2·B ± (B² - 4·A·C)) / 2·A
*/
gradient_t *gradient = (gradient_t *)image;
source_image_t *source = (source_image_t *)image;
radial_gradient_t *radial = (radial_gradient_t *)image;
uint32_t *end = buffer + width;
pixman_gradient_walker_t walker;
pixman_bool_t affine = TRUE;
double cx = 1.;
double cy = 0.;
double cz = 0.;
double rx = x + 0.5;
double ry = y + 0.5;
double rz = 1.;
_pixman_gradient_walker_init (&walker, gradient, source->common.repeat);
if (source->common.transform)
{
pixman_vector_t v;
/* reference point is the center of the pixel */
v.vector[0] = pixman_int_to_fixed (x) + pixman_fixed_1 / 2;
v.vector[1] = pixman_int_to_fixed (y) + pixman_fixed_1 / 2;
v.vector[2] = pixman_fixed_1;
if (!pixman_transform_point_3d (source->common.transform, &v))
return;
cx = source->common.transform->matrix[0][0] / 65536.;
cy = source->common.transform->matrix[1][0] / 65536.;
cz = source->common.transform->matrix[2][0] / 65536.;
rx = v.vector[0] / 65536.;
ry = v.vector[1] / 65536.;
rz = v.vector[2] / 65536.;
affine =
source->common.transform->matrix[2][0] == 0 &&
v.vector[2] == pixman_fixed_1;
}
if (affine)
{
/* When computing t over a scanline, we notice that some expressions
* are constant so we can compute them just once. Given:
*
* t = (-2·B ± (B² - 4·A·C)) / 2·A
*
* where
*
* A = cdx² + cdy² - dr² [precomputed as radial->A]
* B = -2·(pdx·cdx + pdy·cdy + r·dr)
* C = pdx² + pdy² - r²
*
* Since we have an affine transformation, we know that (pdx, pdy)
* increase linearly with each pixel,
*
* pdx = pdx + n·cx,
* pdy = pdy + n·cy,
*
* we can then express B in terms of an linear increment along
* the scanline:
*
* B = B + n·cB, with
* B = -2·(pdx·cdx + pdy·cdy + r·dr) and
* cB = -2·(cx·cdx + cy·cdy)
*
* Thus we can replace the full evaluation of B per-pixel (4 multiplies,
* 2 additions) with a single addition.
*/
double r1 = radial->c1.radius / 65536.;
double r1sq = r1 * r1;
double pdx = rx - radial->c1.x / 65536.;
double pdy = ry - radial->c1.y / 65536.;
double A = radial->A;
double invA = -65536. / (2. * A);
double A4 = -4. * A;
double B = -2. * (pdx*radial->cdx + pdy*radial->cdy + r1*radial->dr);
double cB = -2. * (cx*radial->cdx + cy*radial->cdy);
pixman_bool_t invert = A * radial->dr < 0;
while (buffer < end)
{
if (!mask || *mask++ & mask_bits)
{
pixman_fixed_48_16_t t;
double det = B * B + A4 * (pdx * pdx + pdy * pdy - r1sq);
if (det <= 0.)
t = (pixman_fixed_48_16_t) (B * invA);
else if (invert)
t = (pixman_fixed_48_16_t) ((B + sqrt (det)) * invA);
else
t = (pixman_fixed_48_16_t) ((B - sqrt (det)) * invA);
*buffer = _pixman_gradient_walker_pixel (&walker, t);
}
++buffer;
pdx += cx;
pdy += cy;
B += cB;
}
}
else
{
/* projective */
while (buffer < end)
{
if (!mask || *mask++ & mask_bits)
{
double pdx, pdy;
double B, C;
double det;
double c1x = radial->c1.x / 65536.0;
double c1y = radial->c1.y / 65536.0;
double r1 = radial->c1.radius / 65536.0;
pixman_fixed_48_16_t t;
double x, y;
if (rz != 0)
{
x = rx / rz;
y = ry / rz;
}
else
{
x = y = 0.;
}
pdx = x - c1x;
pdy = y - c1y;
B = -2 * (pdx * radial->cdx +
pdy * radial->cdy +
r1 * radial->dr);
C = (pdx * pdx + pdy * pdy - r1 * r1);
det = (B * B) - (4 * radial->A * C);
if (det < 0.0)
det = 0.0;
if (radial->A * radial->dr < 0)
t = (pixman_fixed_48_16_t) ((-B - sqrt (det)) / (2.0 * radial->A) * 65536);
else
t = (pixman_fixed_48_16_t) ((-B + sqrt (det)) / (2.0 * radial->A) * 65536);
*buffer = _pixman_gradient_walker_pixel (&walker, t);
}
++buffer;
rx += cx;
ry += cy;
rz += cz;
}
}
}
static void
radial_gradient_property_changed (pixman_image_t *image)
{
image->common.get_scanline_32 = radial_gradient_get_scanline_32;
image->common.get_scanline_64 = _pixman_image_get_scanline_generic_64;
}
PIXMAN_EXPORT pixman_image_t *
pixman_image_create_radial_gradient (pixman_point_fixed_t * inner,
pixman_point_fixed_t * outer,
pixman_fixed_t inner_radius,
pixman_fixed_t outer_radius,
const pixman_gradient_stop_t *stops,
int n_stops)
{
pixman_image_t *image;
radial_gradient_t *radial;
return_val_if_fail (n_stops >= 2, NULL);
image = _pixman_image_allocate ();
if (!image)
return NULL;
radial = &image->radial;
if (!_pixman_init_gradient (&radial->common, stops, n_stops))
{
free (image);
return NULL;
}
image->type = RADIAL;
radial->c1.x = inner->x;
radial->c1.y = inner->y;
radial->c1.radius = inner_radius;
radial->c2.x = outer->x;
radial->c2.y = outer->y;
radial->c2.radius = outer_radius;
radial->cdx = pixman_fixed_to_double (radial->c2.x - radial->c1.x);
radial->cdy = pixman_fixed_to_double (radial->c2.y - radial->c1.y);
radial->dr = pixman_fixed_to_double (radial->c2.radius - radial->c1.radius);
radial->A = (radial->cdx * radial->cdx +
radial->cdy * radial->cdy -
radial->dr * radial->dr);
image->common.property_changed = radial_gradient_property_changed;
return image;
}

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -42,42 +42,22 @@ typedef struct {
#define PREFIX(x) pixman_region##x
#include "pixman-region.c"
/* This function exists only to make it possible to preserve the X ABI -
* it should go away at first opportunity.
*
* The problem is that the X ABI exports the three structs and has used
* them through macros. So the X server calls this function with
* the addresses of those structs which makes the existing code continue to
* work.
*/
PIXMAN_EXPORT void
pixman_region_set_static_pointers (pixman_box16_t *empty_box,
pixman_region16_data_t *empty_data,
pixman_region16_data_t *broken_data)
{
pixman_region_internal_set_static_pointers (empty_box, empty_data, broken_data);
pixman_region_empty_box = empty_box;
pixman_region_empty_data = empty_data;
pixman_broken_data = broken_data;
}
pixman_bool_t
pixman_region16_copy_from_region32 (pixman_region16_t *dst,
pixman_region32_t *src)
{
int n_boxes, i;
pixman_box32_t *boxes32;
pixman_box16_t *boxes16;
pixman_bool_t retval;
boxes32 = pixman_region32_rectangles (src, &n_boxes);
boxes16 = pixman_malloc_ab (n_boxes, sizeof (pixman_box16_t));
if (!boxes16)
return FALSE;
for (i = 0; i < n_boxes; ++i)
{
boxes16[i].x1 = boxes32[i].x1;
boxes16[i].y1 = boxes32[i].y1;
boxes16[i].x2 = boxes32[i].x2;
boxes16[i].y2 = boxes32[i].y2;
}
pixman_region_fini (dst);
retval = pixman_region_init_rects (dst, boxes16, n_boxes);
free (boxes16);
return retval;
}
#include "pixman-region.c"

Просмотреть файл

@ -40,34 +40,4 @@ typedef struct {
#define PREFIX(x) pixman_region32##x
pixman_bool_t
pixman_region32_copy_from_region16 (pixman_region32_t *dst,
pixman_region16_t *src)
{
int n_boxes, i;
pixman_box16_t *boxes16;
pixman_box32_t *boxes32;
pixman_bool_t retval;
boxes16 = pixman_region_rectangles (src, &n_boxes);
boxes32 = pixman_malloc_ab (n_boxes, sizeof (pixman_box32_t));
if (!boxes32)
return FALSE;
for (i = 0; i < n_boxes; ++i)
{
boxes32[i].x1 = boxes16[i].x1;
boxes32[i].y1 = boxes16[i].y1;
boxes32[i].x2 = boxes16[i].x2;
boxes32[i].y2 = boxes16[i].y2;
}
pixman_region32_fini (dst);
retval = pixman_region32_init_rects (dst, boxes32, n_boxes);
free (boxes32);
return retval;
}
#include "pixman-region.c"

Просмотреть файл

@ -0,0 +1,91 @@
/*
* Copyright © 2000 SuSE, Inc.
* Copyright © 2007, 2009 Red Hat, Inc.
* Copyright © 2009 Soren Sandmann
*
* Permission to use, copy, modify, distribute, and sell this software and its
* documentation for any purpose is hereby granted without fee, provided that
* the above copyright notice appear in all copies and that both that
* copyright notice and this permission notice appear in supporting
* documentation, and that the name of SuSE not be used in advertising or
* publicity pertaining to distribution of the software without specific,
* written prior permission. SuSE makes no representations about the
* suitability of this software for any purpose. It is provided "as is"
* without express or implied warranty.
*
* SuSE DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING ALL
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT SHALL SuSE
* BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
* OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifdef HAVE_CONFIG_H
#include <config.h>
#endif
#include "pixman-private.h"
static void
solid_fill_get_scanline_32 (pixman_image_t *image,
int x,
int y,
int width,
uint32_t * buffer,
const uint32_t *mask,
uint32_t mask_bits)
{
uint32_t *end = buffer + width;
register uint32_t color = ((solid_fill_t *)image)->color;
while (buffer < end)
*(buffer++) = color;
return;
}
static source_image_class_t
solid_fill_classify (pixman_image_t *image,
int x,
int y,
int width,
int height)
{
return (image->source.class = SOURCE_IMAGE_CLASS_HORIZONTAL);
}
static void
solid_fill_property_changed (pixman_image_t *image)
{
image->common.get_scanline_32 = solid_fill_get_scanline_32;
image->common.get_scanline_64 = _pixman_image_get_scanline_generic_64;
}
static uint32_t
color_to_uint32 (const pixman_color_t *color)
{
return
(color->alpha >> 8 << 24) |
(color->red >> 8 << 16) |
(color->green & 0xff00) |
(color->blue >> 8);
}
PIXMAN_EXPORT pixman_image_t *
pixman_image_create_solid_fill (pixman_color_t *color)
{
pixman_image_t *img = _pixman_image_allocate ();
if (!img)
return NULL;
img->type = SOLID;
img->solid.color = color_to_uint32 (color);
img->source.class = SOURCE_IMAGE_CLASS_UNKNOWN;
img->common.classify = solid_fill_classify;
img->common.property_changed = solid_fill_property_changed;
return img;
}

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -27,38 +27,38 @@
#include <stdio.h>
#include "pixman-private.h"
#ifdef PIXMAN_TIMER
#ifdef PIXMAN_TIMERS
static PixmanTimer *timers;
static pixman_timer_t *timers;
static void
dump_timers (void)
{
PixmanTimer *timer;
pixman_timer_t *timer;
for (timer = timers; timer != NULL; timer = timer->next)
{
printf ("%s: total: %llu n: %llu avg: %f\n",
timer->name,
timer->total,
timer->n_times,
timer->total / (double)timer->n_times);
timer->name,
timer->total,
timer->n_times,
timer->total / (double)timer->n_times);
}
}
void
pixman_timer_register (PixmanTimer *timer)
pixman_timer_register (pixman_timer_t *timer)
{
static int initialized;
int atexit(void (*function)(void));
int atexit (void (*function)(void));
if (!initialized)
{
atexit (dump_timers);
initialized = 1;
}
timer->next = timers;
timers = timer;
}

Просмотреть файл

@ -27,30 +27,230 @@
#include <stdio.h>
#include "pixman-private.h"
typedef uint32_t FbBits;
/*
* Compute the smallest value no less than y which is on a
* grid row
*/
PIXMAN_EXPORT pixman_fixed_t
pixman_sample_ceil_y (pixman_fixed_t y, int n)
{
pixman_fixed_t f = pixman_fixed_frac (y);
pixman_fixed_t i = pixman_fixed_floor (y);
f = ((f + Y_FRAC_FIRST (n)) / STEP_Y_SMALL (n)) * STEP_Y_SMALL (n) +
Y_FRAC_FIRST (n);
if (f > Y_FRAC_LAST (n))
{
if (pixman_fixed_to_int (i) == 0x7fff)
{
f = 0xffff; /* saturate */
}
else
{
f = Y_FRAC_FIRST (n);
i += pixman_fixed_1;
}
}
return (i | f);
}
/*
* Compute the largest value no greater than y which is on a
* grid row
*/
PIXMAN_EXPORT pixman_fixed_t
pixman_sample_floor_y (pixman_fixed_t y,
int n)
{
pixman_fixed_t f = pixman_fixed_frac (y);
pixman_fixed_t i = pixman_fixed_floor (y);
f = DIV (f - Y_FRAC_FIRST (n), STEP_Y_SMALL (n)) * STEP_Y_SMALL (n) +
Y_FRAC_FIRST (n);
if (f < Y_FRAC_FIRST (n))
{
if (pixman_fixed_to_int (i) == 0x8000)
{
f = 0; /* saturate */
}
else
{
f = Y_FRAC_LAST (n);
i -= pixman_fixed_1;
}
}
return (i | f);
}
/*
* Step an edge by any amount (including negative values)
*/
PIXMAN_EXPORT void
pixman_edge_step (pixman_edge_t *e,
int n)
{
pixman_fixed_48_16_t ne;
e->x += n * e->stepx;
ne = e->e + n * (pixman_fixed_48_16_t) e->dx;
if (n >= 0)
{
if (ne > 0)
{
int nx = (ne + e->dy - 1) / e->dy;
e->e = ne - nx * (pixman_fixed_48_16_t) e->dy;
e->x += nx * e->signdx;
}
}
else
{
if (ne <= -e->dy)
{
int nx = (-ne) / e->dy;
e->e = ne + nx * (pixman_fixed_48_16_t) e->dy;
e->x -= nx * e->signdx;
}
}
}
/*
* A private routine to initialize the multi-step
* elements of an edge structure
*/
static void
_pixman_edge_multi_init (pixman_edge_t * e,
int n,
pixman_fixed_t *stepx_p,
pixman_fixed_t *dx_p)
{
pixman_fixed_t stepx;
pixman_fixed_48_16_t ne;
ne = n * (pixman_fixed_48_16_t) e->dx;
stepx = n * e->stepx;
if (ne > 0)
{
int nx = ne / e->dy;
ne -= nx * e->dy;
stepx += nx * e->signdx;
}
*dx_p = ne;
*stepx_p = stepx;
}
/*
* Initialize one edge structure given the line endpoints and a
* starting y value
*/
PIXMAN_EXPORT void
pixman_edge_init (pixman_edge_t *e,
int n,
pixman_fixed_t y_start,
pixman_fixed_t x_top,
pixman_fixed_t y_top,
pixman_fixed_t x_bot,
pixman_fixed_t y_bot)
{
pixman_fixed_t dx, dy;
e->x = x_top;
e->e = 0;
dx = x_bot - x_top;
dy = y_bot - y_top;
e->dy = dy;
e->dx = 0;
if (dy)
{
if (dx >= 0)
{
e->signdx = 1;
e->stepx = dx / dy;
e->dx = dx % dy;
e->e = -dy;
}
else
{
e->signdx = -1;
e->stepx = -(-dx / dy);
e->dx = -dx % dy;
e->e = 0;
}
_pixman_edge_multi_init (e, STEP_Y_SMALL (n),
&e->stepx_small, &e->dx_small);
_pixman_edge_multi_init (e, STEP_Y_BIG (n),
&e->stepx_big, &e->dx_big);
}
pixman_edge_step (e, y_start - y_top);
}
/*
* Initialize one edge structure given a line, starting y value
* and a pixel offset for the line
*/
PIXMAN_EXPORT void
pixman_line_fixed_edge_init (pixman_edge_t * e,
int n,
pixman_fixed_t y,
const pixman_line_fixed_t *line,
int x_off,
int y_off)
{
pixman_fixed_t x_off_fixed = pixman_int_to_fixed (x_off);
pixman_fixed_t y_off_fixed = pixman_int_to_fixed (y_off);
const pixman_point_fixed_t *top, *bot;
if (line->p1.y <= line->p2.y)
{
top = &line->p1;
bot = &line->p2;
}
else
{
top = &line->p2;
bot = &line->p1;
}
pixman_edge_init (e, n, y,
top->x + x_off_fixed,
top->y + y_off_fixed,
bot->x + x_off_fixed,
bot->y + y_off_fixed);
}
PIXMAN_EXPORT void
pixman_add_traps (pixman_image_t * image,
int16_t x_off,
int16_t y_off,
int ntrap,
pixman_trap_t *traps)
pixman_add_traps (pixman_image_t * image,
int16_t x_off,
int16_t y_off,
int ntrap,
pixman_trap_t * traps)
{
int bpp;
int width;
int height;
int bpp;
int width;
int height;
pixman_fixed_t x_off_fixed;
pixman_fixed_t y_off_fixed;
pixman_edge_t l, r;
pixman_fixed_t t, b;
pixman_fixed_t x_off_fixed;
pixman_fixed_t y_off_fixed;
pixman_edge_t l, r;
pixman_fixed_t t, b;
_pixman_image_validate (image);
width = image->bits.width;
height = image->bits.height;
bpp = PIXMAN_FORMAT_BPP (image->bits.format);
x_off_fixed = pixman_int_to_fixed(x_off);
y_off_fixed = pixman_int_to_fixed(y_off);
x_off_fixed = pixman_int_to_fixed (x_off);
y_off_fixed = pixman_int_to_fixed (y_off);
while (ntrap--)
{
@ -58,83 +258,82 @@ pixman_add_traps (pixman_image_t * image,
if (t < 0)
t = 0;
t = pixman_sample_ceil_y (t, bpp);
b = traps->bot.y + y_off_fixed;
if (pixman_fixed_to_int (b) >= height)
b = pixman_int_to_fixed (height) - 1;
b = pixman_sample_floor_y (b, bpp);
if (b >= t)
{
/* initialize edge walkers */
pixman_edge_init (&l, bpp, t,
traps->top.l + x_off_fixed,
traps->top.y + y_off_fixed,
traps->bot.l + x_off_fixed,
traps->bot.y + y_off_fixed);
traps->top.l + x_off_fixed,
traps->top.y + y_off_fixed,
traps->bot.l + x_off_fixed,
traps->bot.y + y_off_fixed);
pixman_edge_init (&r, bpp, t,
traps->top.r + x_off_fixed,
traps->top.y + y_off_fixed,
traps->bot.r + x_off_fixed,
traps->bot.y + y_off_fixed);
traps->top.r + x_off_fixed,
traps->top.y + y_off_fixed,
traps->bot.r + x_off_fixed,
traps->bot.y + y_off_fixed);
pixman_rasterize_edges (image, &l, &r, t, b);
}
traps++;
}
}
#if 0
static void
dump_image (pixman_image_t *image,
const char *title)
const char * title)
{
int i, j;
if (!image->type == BITS)
{
printf ("%s is not a regular image\n", title);
}
if (!image->bits.format == PIXMAN_a8)
{
printf ("%s is not an alpha mask\n", title);
}
printf ("\n\n\n%s: \n", title);
for (i = 0; i < image->bits.height; ++i)
{
uint8_t *line =
(uint8_t *)&(image->bits.bits[i * image->bits.rowstride]);
for (j = 0; j < image->bits.width; ++j)
printf ("%c", line[j]? '#' : ' ');
printf ("%c", line[j] ? '#' : ' ');
printf ("\n");
}
}
#endif
PIXMAN_EXPORT void
pixman_add_trapezoids (pixman_image_t *image,
int16_t x_off,
int y_off,
int ntraps,
const pixman_trapezoid_t *traps)
pixman_add_trapezoids (pixman_image_t * image,
int16_t x_off,
int y_off,
int ntraps,
const pixman_trapezoid_t *traps)
{
int i;
#if 0
dump_image (image, "before");
#endif
for (i = 0; i < ntraps; ++i)
{
const pixman_trapezoid_t *trap = &(traps[i]);
if (!pixman_trapezoid_valid (trap))
continue;
pixman_rasterize_trapezoid (image, trap, x_off, y_off);
}
@ -144,21 +343,23 @@ pixman_add_trapezoids (pixman_image_t *image,
}
PIXMAN_EXPORT void
pixman_rasterize_trapezoid (pixman_image_t * image,
const pixman_trapezoid_t *trap,
int x_off,
int y_off)
pixman_rasterize_trapezoid (pixman_image_t * image,
const pixman_trapezoid_t *trap,
int x_off,
int y_off)
{
int bpp;
int width;
int height;
int bpp;
int width;
int height;
pixman_fixed_t x_off_fixed;
pixman_fixed_t y_off_fixed;
pixman_edge_t l, r;
pixman_fixed_t t, b;
pixman_fixed_t x_off_fixed;
pixman_fixed_t y_off_fixed;
pixman_edge_t l, r;
pixman_fixed_t t, b;
return_if_fail (image->type == BITS);
_pixman_image_validate (image);
if (!pixman_trapezoid_valid (trap))
return;
@ -166,9 +367,10 @@ pixman_rasterize_trapezoid (pixman_image_t * image,
width = image->bits.width;
height = image->bits.height;
bpp = PIXMAN_FORMAT_BPP (image->bits.format);
x_off_fixed = pixman_int_to_fixed(x_off);
y_off_fixed = pixman_int_to_fixed(y_off);
x_off_fixed = pixman_int_to_fixed (x_off);
y_off_fixed = pixman_int_to_fixed (y_off);
t = trap->top + y_off_fixed;
if (t < 0)
t = 0;
@ -178,7 +380,7 @@ pixman_rasterize_trapezoid (pixman_image_t * image,
if (pixman_fixed_to_int (b) >= height)
b = pixman_int_to_fixed (height) - 1;
b = pixman_sample_floor_y (b, bpp);
if (b >= t)
{
/* initialize edge walkers */
@ -188,97 +390,3 @@ pixman_rasterize_trapezoid (pixman_image_t * image,
pixman_rasterize_edges (image, &l, &r, t, b);
}
}
#if 0
static int
_GreaterY (pixman_point_fixed_t *a, pixman_point_fixed_t *b)
{
if (a->y == b->y)
return a->x > b->x;
return a->y > b->y;
}
/*
* Note that the definition of this function is a bit odd because
* of the X coordinate space (y increasing downwards).
*/
static int
_Clockwise (pixman_point_fixed_t *ref, pixman_point_fixed_t *a, pixman_point_fixed_t *b)
{
pixman_point_fixed_t ad, bd;
ad.x = a->x - ref->x;
ad.y = a->y - ref->y;
bd.x = b->x - ref->x;
bd.y = b->y - ref->y;
return ((pixman_fixed_32_32_t) bd.y * ad.x - (pixman_fixed_32_32_t) ad.y * bd.x) < 0;
}
/* FIXME -- this could be made more efficient */
void
fbAddTriangles (pixman_image_t * pPicture,
int16_t x_off,
int16_t y_off,
int ntri,
xTriangle *tris)
{
pixman_point_fixed_t *top, *left, *right, *tmp;
xTrapezoid trap;
for (; ntri; ntri--, tris++)
{
top = &tris->p1;
left = &tris->p2;
right = &tris->p3;
if (_GreaterY (top, left)) {
tmp = left; left = top; top = tmp;
}
if (_GreaterY (top, right)) {
tmp = right; right = top; top = tmp;
}
if (_Clockwise (top, right, left)) {
tmp = right; right = left; left = tmp;
}
/*
* Two cases:
*
* + +
* / \ / \
* / \ / \
* / + + \
* / -- -- \
* / -- -- \
* / --- --- \
* +-- --+
*/
trap.top = top->y;
trap.left.p1 = *top;
trap.left.p2 = *left;
trap.right.p1 = *top;
trap.right.p2 = *right;
if (right->y < left->y)
trap.bottom = right->y;
else
trap.bottom = left->y;
fbRasterizeTrapezoid (pPicture, &trap, x_off, y_off);
if (right->y < left->y)
{
trap.top = right->y;
trap.bottom = left->y;
trap.right.p1 = *right;
trap.right.p2 = *left;
}
else
{
trap.top = left->y;
trap.bottom = right->y;
trap.left.p1 = *left;
trap.left.p2 = *right;
}
fbRasterizeTrapezoid (pPicture, &trap, x_off, y_off);
}
}
#endif

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -32,10 +32,10 @@
#endif
#define PIXMAN_VERSION_MAJOR 0
#define PIXMAN_VERSION_MINOR 13
#define PIXMAN_VERSION_MICRO 3
#define PIXMAN_VERSION_MINOR 15
#define PIXMAN_VERSION_MICRO 17
#define PIXMAN_VERSION_STRING "0.13.3"
#define PIXMAN_VERSION_STRING "0.15.17"
#define PIXMAN_VERSION_ENCODE(major, minor, micro) ( \
((major) * 10000) \

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -0,0 +1,263 @@
#ifndef MMX_X64_H_INCLUDED
#define MMX_X64_H_INCLUDED
/* Implementation of x64 MMX substitition functions, before
* pixman is reimplemented not to use __m64 type on Visual C++
*
* Copyright (C)2009 by George Yohng
* Released in public domain.
*/
#include <intrin.h>
#define M64C(a) (*(const __m64 *)(&a))
#define M64U(a) (*(const unsigned long long *)(&a))
__inline __m64
_m_from_int (int a)
{
long long i64 = a;
return M64C (i64);
}
__inline __m64
_mm_setzero_si64 ()
{
long long i64 = 0;
return M64C (i64);
}
__inline __m64
_mm_set_pi32 (int i1, int i0)
{
unsigned long long i64 = ((unsigned)i0) + (((unsigned long long)(unsigned)i1) << 32);
return M64C (i64);
}
__inline void
_m_empty ()
{
}
__inline __m64
_mm_set1_pi16 (short w)
{
unsigned long long i64 = ((unsigned long long)(unsigned short)(w)) * 0x0001000100010001ULL;
return M64C (i64);
}
__inline int
_m_to_int (__m64 m)
{
return m.m64_i32[0];
}
__inline __m64
_mm_movepi64_pi64 (__m128i a)
{
return M64C (a.m128i_i64[0]);
}
__inline __m64
_m_pand (__m64 a, __m64 b)
{
unsigned long long i64 = M64U (a) & M64U (b);
return M64C (i64);
}
__inline __m64
_m_por (__m64 a, __m64 b)
{
unsigned long long i64 = M64U (a) | M64U (b);
return M64C (i64);
}
__inline __m64
_m_pxor (__m64 a, __m64 b)
{
unsigned long long i64 = M64U (a) ^ M64U (b);
return M64C (i64);
}
__inline __m64
_m_pmulhuw (__m64 a, __m64 b) /* unoptimized */
{
unsigned short d[4] =
{
(unsigned short)((((unsigned)a.m64_u16[0]) * b.m64_u16[0]) >> 16),
(unsigned short)((((unsigned)a.m64_u16[1]) * b.m64_u16[1]) >> 16),
(unsigned short)((((unsigned)a.m64_u16[2]) * b.m64_u16[2]) >> 16),
(unsigned short)((((unsigned)a.m64_u16[3]) * b.m64_u16[3]) >> 16)
};
return M64C (d[0]);
}
__inline __m64
_m_pmullw2 (__m64 a, __m64 b) /* unoptimized */
{
unsigned short d[4] =
{
(unsigned short)((((unsigned)a.m64_u16[0]) * b.m64_u16[0])),
(unsigned short)((((unsigned)a.m64_u16[1]) * b.m64_u16[1])),
(unsigned short)((((unsigned)a.m64_u16[2]) * b.m64_u16[2])),
(unsigned short)((((unsigned)a.m64_u16[3]) * b.m64_u16[3]))
};
return M64C (d[0]);
}
__inline __m64
_m_pmullw (__m64 a, __m64 b) /* unoptimized */
{
unsigned long long x =
((unsigned long long)(unsigned short)((((unsigned)a.m64_u16[0]) * b.m64_u16[0]))) +
(((unsigned long long)(unsigned short)((((unsigned)a.m64_u16[1]) * b.m64_u16[1]))) << 16) +
(((unsigned long long)(unsigned short)((((unsigned)a.m64_u16[2]) * b.m64_u16[2]))) << 32) +
(((unsigned long long)(unsigned short)((((unsigned)a.m64_u16[3]) * b.m64_u16[3]))) << 48);
return M64C (x);
}
__inline __m64
_m_paddusb (__m64 a, __m64 b) /* unoptimized */
{
unsigned long long x = (M64U (a) & 0x00FF00FF00FF00FFULL) +
(M64U (b) & 0x00FF00FF00FF00FFULL);
unsigned long long y = ((M64U (a) >> 8) & 0x00FF00FF00FF00FFULL) +
((M64U (b) >> 8) & 0x00FF00FF00FF00FFULL);
x |= ((x & 0xFF00FF00FF00FF00ULL) >> 8) * 0xFF;
y |= ((y & 0xFF00FF00FF00FF00ULL) >> 8) * 0xFF;
x = (x & 0x00FF00FF00FF00FFULL) | ((y & 0x00FF00FF00FF00FFULL) << 8);
return M64C (x);
}
__inline __m64
_m_paddusw (__m64 a, __m64 b) /* unoptimized */
{
unsigned long long x = (M64U (a) & 0x0000FFFF0000FFFFULL) +
(M64U (b) & 0x0000FFFF0000FFFFULL);
unsigned long long y = ((M64U (a) >> 16) & 0x0000FFFF0000FFFFULL) +
((M64U (b) >> 16) & 0x0000FFFF0000FFFFULL);
x |= ((x & 0xFFFF0000FFFF0000) >> 16) * 0xFFFF;
y |= ((y & 0xFFFF0000FFFF0000) >> 16) * 0xFFFF;
x = (x & 0x0000FFFF0000FFFFULL) | ((y & 0x0000FFFF0000FFFFULL) << 16);
return M64C (x);
}
__inline __m64
_m_pshufw (__m64 a, int n) /* unoptimized */
{
unsigned short d[4] =
{
a.m64_u16[n & 3],
a.m64_u16[(n >> 2) & 3],
a.m64_u16[(n >> 4) & 3],
a.m64_u16[(n >> 6) & 3]
};
return M64C (d[0]);
}
__inline unsigned char
sat16 (unsigned short d)
{
if (d > 0xFF) return 0xFF;
else return d & 0xFF;
}
__inline __m64
_m_packuswb (__m64 m1, __m64 m2) /* unoptimized */
{
unsigned char d[8] =
{
sat16 (m1.m64_u16[0]),
sat16 (m1.m64_u16[1]),
sat16 (m1.m64_u16[2]),
sat16 (m1.m64_u16[3]),
sat16 (m2.m64_u16[0]),
sat16 (m2.m64_u16[1]),
sat16 (m2.m64_u16[2]),
sat16 (m2.m64_u16[3])
};
return M64C (d[0]);
}
__inline __m64 _m_punpcklbw (__m64 m1, __m64 m2) /* unoptimized */
{
unsigned char d[8] =
{
m1.m64_u8[0],
m2.m64_u8[0],
m1.m64_u8[1],
m2.m64_u8[1],
m1.m64_u8[2],
m2.m64_u8[2],
m1.m64_u8[3],
m2.m64_u8[3],
};
return M64C (d[0]);
}
__inline __m64 _m_punpckhbw (__m64 m1, __m64 m2) /* unoptimized */
{
unsigned char d[8] =
{
m1.m64_u8[4],
m2.m64_u8[4],
m1.m64_u8[5],
m2.m64_u8[5],
m1.m64_u8[6],
m2.m64_u8[6],
m1.m64_u8[7],
m2.m64_u8[7],
};
return M64C (d[0]);
}
__inline __m64 _m_psrlwi (__m64 a, int n) /* unoptimized */
{
unsigned short d[4] =
{
a.m64_u16[0] >> n,
a.m64_u16[1] >> n,
a.m64_u16[2] >> n,
a.m64_u16[3] >> n
};
return M64C (d[0]);
}
__inline __m64 _m_psrlqi (__m64 m, int n)
{
unsigned long long x = M64U (m) >> n;
return M64C (x);
}
__inline __m64 _m_psllqi (__m64 m, int n)
{
unsigned long long x = M64U (m) << n;
return M64C (x);
}
#endif /* MMX_X64_H_INCLUDED */

Просмотреть файл

@ -0,0 +1,543 @@
/* -*- Mode: c; c-basic-offset: 4; tab-width: 8; indent-tabs-mode: t; -*- */
/*
* Copyright © 2000 SuSE, Inc.
* Copyright © 2007 Red Hat, Inc.
*
* Permission to use, copy, modify, distribute, and sell this software and its
* documentation for any purpose is hereby granted without fee, provided that
* the above copyright notice appear in all copies and that both that
* copyright notice and this permission notice appear in supporting
* documentation, and that the name of SuSE not be used in advertising or
* publicity pertaining to distribution of the software without specific,
* written prior permission. SuSE makes no representations about the
* suitability of this software for any purpose. It is provided "as is"
* without express or implied warranty.
*
* SuSE DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING ALL
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT SHALL SuSE
* BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
* OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*
* Author: Keith Packard, SuSE, Inc.
*/
#ifdef HAVE_CONFIG_H
#include <config.h>
#endif
#include "pixman-private.h"
/*
* Operator optimizations based on source or destination opacity
*/
typedef struct
{
pixman_op_t op;
pixman_op_t op_src_dst_opaque;
pixman_op_t op_src_opaque;
pixman_op_t op_dst_opaque;
} optimized_operator_info_t;
static const optimized_operator_info_t optimized_operators[] =
{
/* Input Operator SRC&DST Opaque SRC Opaque DST Opaque */
{ PIXMAN_OP_OVER, PIXMAN_OP_SRC, PIXMAN_OP_SRC, PIXMAN_OP_OVER },
{ PIXMAN_OP_OVER_REVERSE, PIXMAN_OP_DST, PIXMAN_OP_OVER_REVERSE, PIXMAN_OP_DST },
{ PIXMAN_OP_IN, PIXMAN_OP_SRC, PIXMAN_OP_IN, PIXMAN_OP_SRC },
{ PIXMAN_OP_IN_REVERSE, PIXMAN_OP_DST, PIXMAN_OP_DST, PIXMAN_OP_IN_REVERSE },
{ PIXMAN_OP_OUT, PIXMAN_OP_CLEAR, PIXMAN_OP_OUT, PIXMAN_OP_CLEAR },
{ PIXMAN_OP_OUT_REVERSE, PIXMAN_OP_CLEAR, PIXMAN_OP_CLEAR, PIXMAN_OP_OUT_REVERSE },
{ PIXMAN_OP_ATOP, PIXMAN_OP_SRC, PIXMAN_OP_IN, PIXMAN_OP_OVER },
{ PIXMAN_OP_ATOP_REVERSE, PIXMAN_OP_DST, PIXMAN_OP_OVER_REVERSE, PIXMAN_OP_IN_REVERSE },
{ PIXMAN_OP_XOR, PIXMAN_OP_CLEAR, PIXMAN_OP_OUT, PIXMAN_OP_OUT_REVERSE },
{ PIXMAN_OP_SATURATE, PIXMAN_OP_DST, PIXMAN_OP_OVER_REVERSE, PIXMAN_OP_DST },
{ PIXMAN_OP_NONE }
};
static pixman_implementation_t *imp;
/*
* Check if the current operator could be optimized
*/
static const optimized_operator_info_t*
pixman_operator_can_be_optimized (pixman_op_t op)
{
const optimized_operator_info_t *info;
for (info = optimized_operators; info->op != PIXMAN_OP_NONE; info++)
{
if (info->op == op)
return info;
}
return NULL;
}
/*
* Optimize the current operator based on opacity of source or destination
* The output operator should be mathematically equivalent to the source.
*/
static pixman_op_t
pixman_optimize_operator (pixman_op_t op,
pixman_image_t *src_image,
pixman_image_t *mask_image,
pixman_image_t *dst_image)
{
pixman_bool_t is_source_opaque;
pixman_bool_t is_dest_opaque;
const optimized_operator_info_t *info = pixman_operator_can_be_optimized (op);
if (!info || mask_image)
return op;
is_source_opaque = _pixman_image_is_opaque (src_image);
is_dest_opaque = _pixman_image_is_opaque (dst_image);
if (is_source_opaque == FALSE && is_dest_opaque == FALSE)
return op;
if (is_source_opaque && is_dest_opaque)
return info->op_src_dst_opaque;
else if (is_source_opaque)
return info->op_src_opaque;
else if (is_dest_opaque)
return info->op_dst_opaque;
return op;
}
static void
apply_workaround (pixman_image_t *image,
int16_t * x,
int16_t * y,
uint32_t ** save_bits,
int * save_dx,
int * save_dy)
{
/* Some X servers generate images that point to the
* wrong place in memory, but then set the clip region
* to point to the right place. Because of an old bug
* in pixman, this would actually work.
*
* Here we try and undo the damage
*/
int bpp = PIXMAN_FORMAT_BPP (image->bits.format) / 8;
pixman_box32_t *extents;
uint8_t *t;
int dx, dy;
extents = pixman_region32_extents (&(image->common.clip_region));
dx = extents->x1;
dy = extents->y1;
*save_bits = image->bits.bits;
*x -= dx;
*y -= dy;
pixman_region32_translate (&(image->common.clip_region), -dx, -dy);
t = (uint8_t *)image->bits.bits;
t += dy * image->bits.rowstride * 4 + dx * bpp;
image->bits.bits = (uint32_t *)t;
*save_dx = dx;
*save_dy = dy;
}
static void
unapply_workaround (pixman_image_t *image, uint32_t *bits, int dx, int dy)
{
image->bits.bits = bits;
pixman_region32_translate (&image->common.clip_region, dx, dy);
}
PIXMAN_EXPORT void
pixman_image_composite (pixman_op_t op,
pixman_image_t * src,
pixman_image_t * mask,
pixman_image_t * dest,
int16_t src_x,
int16_t src_y,
int16_t mask_x,
int16_t mask_y,
int16_t dest_x,
int16_t dest_y,
uint16_t width,
uint16_t height)
{
uint32_t *src_bits;
int src_dx, src_dy;
uint32_t *mask_bits;
int mask_dx, mask_dy;
uint32_t *dest_bits;
int dest_dx, dest_dy;
_pixman_image_validate (src);
if (mask)
_pixman_image_validate (mask);
_pixman_image_validate (dest);
/*
* Check if we can replace our operator by a simpler one
* if the src or dest are opaque. The output operator should be
* mathematically equivalent to the source.
*/
op = pixman_optimize_operator(op, src, mask, dest);
if (op == PIXMAN_OP_DST ||
op == PIXMAN_OP_CONJOINT_DST ||
op == PIXMAN_OP_DISJOINT_DST)
{
return;
}
if (!imp)
imp = _pixman_choose_implementation ();
if (src->common.need_workaround)
apply_workaround (src, &src_x, &src_y, &src_bits, &src_dx, &src_dy);
if (mask && mask->common.need_workaround)
apply_workaround (mask, &mask_x, &mask_y, &mask_bits, &mask_dx, &mask_dy);
if (dest->common.need_workaround)
apply_workaround (dest, &dest_x, &dest_y, &dest_bits, &dest_dx, &dest_dy);
_pixman_implementation_composite (imp, op,
src, mask, dest,
src_x, src_y,
mask_x, mask_y,
dest_x, dest_y,
width, height);
if (src->common.need_workaround)
unapply_workaround (src, src_bits, src_dx, src_dy);
if (mask && mask->common.need_workaround)
unapply_workaround (mask, mask_bits, mask_dx, mask_dy);
if (dest->common.need_workaround)
unapply_workaround (dest, dest_bits, dest_dx, dest_dy);
}
PIXMAN_EXPORT pixman_bool_t
pixman_blt (uint32_t *src_bits,
uint32_t *dst_bits,
int src_stride,
int dst_stride,
int src_bpp,
int dst_bpp,
int src_x,
int src_y,
int dst_x,
int dst_y,
int width,
int height)
{
if (!imp)
imp = _pixman_choose_implementation ();
return _pixman_implementation_blt (imp, src_bits, dst_bits, src_stride, dst_stride,
src_bpp, dst_bpp,
src_x, src_y,
dst_x, dst_y,
width, height);
}
PIXMAN_EXPORT pixman_bool_t
pixman_fill (uint32_t *bits,
int stride,
int bpp,
int x,
int y,
int width,
int height,
uint32_t xor)
{
if (!imp)
imp = _pixman_choose_implementation ();
return _pixman_implementation_fill (imp, bits, stride, bpp, x, y, width, height, xor);
}
static uint32_t
color_to_uint32 (const pixman_color_t *color)
{
return
(color->alpha >> 8 << 24) |
(color->red >> 8 << 16) |
(color->green & 0xff00) |
(color->blue >> 8);
}
static pixman_bool_t
color_to_pixel (pixman_color_t * color,
uint32_t * pixel,
pixman_format_code_t format)
{
uint32_t c = color_to_uint32 (color);
if (!(format == PIXMAN_a8r8g8b8 ||
format == PIXMAN_x8r8g8b8 ||
format == PIXMAN_a8b8g8r8 ||
format == PIXMAN_x8b8g8r8 ||
format == PIXMAN_b8g8r8a8 ||
format == PIXMAN_b8g8r8x8 ||
format == PIXMAN_r5g6b5 ||
format == PIXMAN_b5g6r5 ||
format == PIXMAN_a8))
{
return FALSE;
}
if (PIXMAN_FORMAT_TYPE (format) == PIXMAN_TYPE_ABGR)
{
c = ((c & 0xff000000) >> 0) |
((c & 0x00ff0000) >> 16) |
((c & 0x0000ff00) >> 0) |
((c & 0x000000ff) << 16);
}
if (PIXMAN_FORMAT_TYPE (format) == PIXMAN_TYPE_BGRA)
{
c = ((c & 0xff000000) >> 24) |
((c & 0x00ff0000) >> 8) |
((c & 0x0000ff00) << 8) |
((c & 0x000000ff) << 24);
}
if (format == PIXMAN_a8)
c = c >> 24;
else if (format == PIXMAN_r5g6b5 ||
format == PIXMAN_b5g6r5)
c = CONVERT_8888_TO_0565 (c);
#if 0
printf ("color: %x %x %x %x\n", color->alpha, color->red, color->green, color->blue);
printf ("pixel: %x\n", c);
#endif
*pixel = c;
return TRUE;
}
PIXMAN_EXPORT pixman_bool_t
pixman_image_fill_rectangles (pixman_op_t op,
pixman_image_t * dest,
pixman_color_t * color,
int n_rects,
const pixman_rectangle16_t *rects)
{
pixman_image_t *solid;
pixman_color_t c;
int i;
_pixman_image_validate (dest);
if (color->alpha == 0xffff)
{
if (op == PIXMAN_OP_OVER)
op = PIXMAN_OP_SRC;
}
if (op == PIXMAN_OP_CLEAR)
{
c.red = 0;
c.green = 0;
c.blue = 0;
c.alpha = 0;
color = &c;
op = PIXMAN_OP_SRC;
}
if (op == PIXMAN_OP_SRC)
{
uint32_t pixel;
if (color_to_pixel (color, &pixel, dest->bits.format))
{
for (i = 0; i < n_rects; ++i)
{
pixman_region32_t fill_region;
int n_boxes, j;
pixman_box32_t *boxes;
pixman_region32_init_rect (&fill_region, rects[i].x, rects[i].y, rects[i].width, rects[i].height);
if (dest->common.have_clip_region)
{
if (!pixman_region32_intersect (&fill_region,
&fill_region,
&dest->common.clip_region))
return FALSE;
}
boxes = pixman_region32_rectangles (&fill_region, &n_boxes);
for (j = 0; j < n_boxes; ++j)
{
const pixman_box32_t *box = &(boxes[j]);
pixman_fill (dest->bits.bits, dest->bits.rowstride, PIXMAN_FORMAT_BPP (dest->bits.format),
box->x1, box->y1, box->x2 - box->x1, box->y2 - box->y1,
pixel);
}
pixman_region32_fini (&fill_region);
}
return TRUE;
}
}
solid = pixman_image_create_solid_fill (color);
if (!solid)
return FALSE;
for (i = 0; i < n_rects; ++i)
{
const pixman_rectangle16_t *rect = &(rects[i]);
pixman_image_composite (op, solid, NULL, dest,
0, 0, 0, 0,
rect->x, rect->y,
rect->width, rect->height);
}
pixman_image_unref (solid);
return TRUE;
}
/**
* pixman_version:
*
* Returns the version of the pixman library encoded in a single
* integer as per %PIXMAN_VERSION_ENCODE. The encoding ensures that
* later versions compare greater than earlier versions.
*
* A run-time comparison to check that pixman's version is greater than
* or equal to version X.Y.Z could be performed as follows:
*
* <informalexample><programlisting>
* if (pixman_version() >= PIXMAN_VERSION_ENCODE(X,Y,Z)) {...}
* </programlisting></informalexample>
*
* See also pixman_version_string() as well as the compile-time
* equivalents %PIXMAN_VERSION and %PIXMAN_VERSION_STRING.
*
* Return value: the encoded version.
**/
PIXMAN_EXPORT int
pixman_version (void)
{
return PIXMAN_VERSION;
}
/**
* pixman_version_string:
*
* Returns the version of the pixman library as a human-readable string
* of the form "X.Y.Z".
*
* See also pixman_version() as well as the compile-time equivalents
* %PIXMAN_VERSION_STRING and %PIXMAN_VERSION.
*
* Return value: a string containing the version.
**/
PIXMAN_EXPORT const char*
pixman_version_string (void)
{
return PIXMAN_VERSION_STRING;
}
/**
* pixman_format_supported_source:
* @format: A pixman_format_code_t format
*
* Return value: whether the provided format code is a supported
* format for a pixman surface used as a source in
* rendering.
*
* Currently, all pixman_format_code_t values are supported.
**/
PIXMAN_EXPORT pixman_bool_t
pixman_format_supported_source (pixman_format_code_t format)
{
switch (format)
{
/* 32 bpp formats */
case PIXMAN_a2b10g10r10:
case PIXMAN_x2b10g10r10:
case PIXMAN_a2r10g10b10:
case PIXMAN_x2r10g10b10:
case PIXMAN_a8r8g8b8:
case PIXMAN_x8r8g8b8:
case PIXMAN_a8b8g8r8:
case PIXMAN_x8b8g8r8:
case PIXMAN_b8g8r8a8:
case PIXMAN_b8g8r8x8:
case PIXMAN_r8g8b8:
case PIXMAN_b8g8r8:
case PIXMAN_r5g6b5:
case PIXMAN_b5g6r5:
/* 16 bpp formats */
case PIXMAN_a1r5g5b5:
case PIXMAN_x1r5g5b5:
case PIXMAN_a1b5g5r5:
case PIXMAN_x1b5g5r5:
case PIXMAN_a4r4g4b4:
case PIXMAN_x4r4g4b4:
case PIXMAN_a4b4g4r4:
case PIXMAN_x4b4g4r4:
/* 8bpp formats */
case PIXMAN_a8:
case PIXMAN_r3g3b2:
case PIXMAN_b2g3r3:
case PIXMAN_a2r2g2b2:
case PIXMAN_a2b2g2r2:
case PIXMAN_c8:
case PIXMAN_g8:
case PIXMAN_x4a4:
/* Collides with PIXMAN_c8
case PIXMAN_x4c4:
*/
/* Collides with PIXMAN_g8
case PIXMAN_x4g4:
*/
/* 4bpp formats */
case PIXMAN_a4:
case PIXMAN_r1g2b1:
case PIXMAN_b1g2r1:
case PIXMAN_a1r1g1b1:
case PIXMAN_a1b1g1r1:
case PIXMAN_c4:
case PIXMAN_g4:
/* 1bpp formats */
case PIXMAN_a1:
case PIXMAN_g1:
/* YUV formats */
case PIXMAN_yuy2:
case PIXMAN_yv12:
return TRUE;
default:
return FALSE;
}
}
/**
* pixman_format_supported_destination:
* @format: A pixman_format_code_t format
*
* Return value: whether the provided format code is a supported
* format for a pixman surface used as a destination in
* rendering.
*
* Currently, all pixman_format_code_t values are supported
* except for the YUV formats.
**/
PIXMAN_EXPORT pixman_bool_t
pixman_format_supported_destination (pixman_format_code_t format)
{
/* YUV formats cannot be written to at the moment */
if (format == PIXMAN_yuy2 || format == PIXMAN_yv12)
return FALSE;
return pixman_format_supported_source (format);
}

Просмотреть файл

@ -76,7 +76,7 @@ SOFTWARE.
/*
* Standard integers
*/
#if defined (_SVR4) || defined (SVR4) || defined (__OpenBSD__) || defined (_sgi) || defined (__sun) || defined (sun) || defined (__digital__)
#if defined (_SVR4) || defined (SVR4) || defined (__OpenBSD__) || defined (_sgi) || defined (__sun) || defined (sun) || defined (__digital__) || defined (__HP_cc)
# include <inttypes.h>
#elif defined (_MSC_VER)
typedef __int8 int8_t;
@ -111,6 +111,7 @@ typedef pixman_fixed_16_16_t pixman_fixed_t;
#define pixman_fixed_e ((pixman_fixed_t) 1)
#define pixman_fixed_1 (pixman_int_to_fixed(1))
#define pixman_fixed_1_minus_e (pixman_fixed_1 - pixman_fixed_e)
#define pixman_fixed_minus_1 (pixman_int_to_fixed(-1))
#define pixman_fixed_to_int(f) ((int) ((f) >> 16))
#define pixman_int_to_fixed(i) ((pixman_fixed_t) ((i) << 16))
#define pixman_fixed_to_double(f) (double) ((f) / (double) pixman_fixed_1)
@ -168,147 +169,96 @@ struct pixman_transform
/* forward declaration (sorry) */
struct pixman_box16;
void
pixman_transform_init_identity(struct pixman_transform *matrix);
pixman_bool_t
pixman_transform_point_3d (const struct pixman_transform *transform,
struct pixman_vector *vector);
pixman_bool_t
pixman_transform_point(const struct pixman_transform *transform,
struct pixman_vector *vector);
pixman_bool_t
pixman_transform_multiply (struct pixman_transform *dst,
const struct pixman_transform *l,
const struct pixman_transform *r);
void
pixman_transform_init_scale (struct pixman_transform *t,
pixman_fixed_t sx,
pixman_fixed_t sy);
pixman_bool_t
pixman_transform_scale(struct pixman_transform *forward,
struct pixman_transform *reverse,
pixman_fixed_t sx, pixman_fixed_t sy);
void
pixman_transform_init_rotate(struct pixman_transform *t,
pixman_fixed_t cos,
pixman_fixed_t sin);
pixman_bool_t
pixman_transform_rotate(struct pixman_transform *forward,
struct pixman_transform *reverse,
pixman_fixed_t c, pixman_fixed_t s);
void
pixman_transform_init_translate(struct pixman_transform *t,
pixman_fixed_t tx, pixman_fixed_t ty);
pixman_bool_t
pixman_transform_translate(struct pixman_transform *forward,
struct pixman_transform *reverse,
pixman_fixed_t tx, pixman_fixed_t ty);
pixman_bool_t
pixman_transform_bounds(const struct pixman_transform *matrix,
struct pixman_box16 *b);
pixman_bool_t
pixman_transform_invert (struct pixman_transform *dst,
const struct pixman_transform *src);
pixman_bool_t
pixman_transform_is_identity(const struct pixman_transform *t);
pixman_bool_t
pixman_transform_is_scale(const struct pixman_transform *t);
pixman_bool_t
pixman_transform_is_int_translate(const struct pixman_transform *t);
pixman_bool_t
pixman_transform_is_inverse (const struct pixman_transform *a,
const struct pixman_transform *b);
void pixman_transform_init_identity (struct pixman_transform *matrix);
pixman_bool_t pixman_transform_point_3d (const struct pixman_transform *transform,
struct pixman_vector *vector);
pixman_bool_t pixman_transform_point (const struct pixman_transform *transform,
struct pixman_vector *vector);
pixman_bool_t pixman_transform_multiply (struct pixman_transform *dst,
const struct pixman_transform *l,
const struct pixman_transform *r);
void pixman_transform_init_scale (struct pixman_transform *t,
pixman_fixed_t sx,
pixman_fixed_t sy);
pixman_bool_t pixman_transform_scale (struct pixman_transform *forward,
struct pixman_transform *reverse,
pixman_fixed_t sx,
pixman_fixed_t sy);
void pixman_transform_init_rotate (struct pixman_transform *t,
pixman_fixed_t cos,
pixman_fixed_t sin);
pixman_bool_t pixman_transform_rotate (struct pixman_transform *forward,
struct pixman_transform *reverse,
pixman_fixed_t c,
pixman_fixed_t s);
void pixman_transform_init_translate (struct pixman_transform *t,
pixman_fixed_t tx,
pixman_fixed_t ty);
pixman_bool_t pixman_transform_translate (struct pixman_transform *forward,
struct pixman_transform *reverse,
pixman_fixed_t tx,
pixman_fixed_t ty);
pixman_bool_t pixman_transform_bounds (const struct pixman_transform *matrix,
struct pixman_box16 *b);
pixman_bool_t pixman_transform_invert (struct pixman_transform *dst,
const struct pixman_transform *src);
pixman_bool_t pixman_transform_is_identity (const struct pixman_transform *t);
pixman_bool_t pixman_transform_is_scale (const struct pixman_transform *t);
pixman_bool_t pixman_transform_is_int_translate (const struct pixman_transform *t);
pixman_bool_t pixman_transform_is_inverse (const struct pixman_transform *a,
const struct pixman_transform *b);
/*
* Floating point matrices
*/
struct pixman_f_vector {
struct pixman_f_vector
{
double v[3];
};
struct pixman_f_transform {
struct pixman_f_transform
{
double m[3][3];
};
pixman_bool_t
pixman_transform_from_pixman_f_transform (struct pixman_transform *t,
const struct pixman_f_transform *ft);
pixman_bool_t pixman_transform_from_pixman_f_transform (struct pixman_transform *t,
const struct pixman_f_transform *ft);
void pixman_f_transform_from_pixman_transform (struct pixman_f_transform *ft,
const struct pixman_transform *t);
pixman_bool_t pixman_f_transform_invert (struct pixman_f_transform *dst,
const struct pixman_f_transform *src);
pixman_bool_t pixman_f_transform_point (const struct pixman_f_transform *t,
struct pixman_f_vector *v);
void pixman_f_transform_point_3d (const struct pixman_f_transform *t,
struct pixman_f_vector *v);
void pixman_f_transform_multiply (struct pixman_f_transform *dst,
const struct pixman_f_transform *l,
const struct pixman_f_transform *r);
void pixman_f_transform_init_scale (struct pixman_f_transform *t,
double sx,
double sy);
pixman_bool_t pixman_f_transform_scale (struct pixman_f_transform *forward,
struct pixman_f_transform *reverse,
double sx,
double sy);
void pixman_f_transform_init_rotate (struct pixman_f_transform *t,
double cos,
double sin);
pixman_bool_t pixman_f_transform_rotate (struct pixman_f_transform *forward,
struct pixman_f_transform *reverse,
double c,
double s);
void pixman_f_transform_init_translate (struct pixman_f_transform *t,
double tx,
double ty);
pixman_bool_t pixman_f_transform_translate (struct pixman_f_transform *forward,
struct pixman_f_transform *reverse,
double tx,
double ty);
pixman_bool_t pixman_f_transform_bounds (const struct pixman_f_transform *t,
struct pixman_box16 *b);
void pixman_f_transform_init_identity (struct pixman_f_transform *t);
void
pixman_f_transform_from_pixman_transform (struct pixman_f_transform *ft,
const struct pixman_transform *t);
pixman_bool_t
pixman_transform_from_pixman_f_transform (struct pixman_transform *t,
const struct pixman_f_transform *ft);
pixman_bool_t
pixman_f_transform_invert (struct pixman_f_transform *dst,
const struct pixman_f_transform *src);
pixman_bool_t
pixman_f_transform_point (const struct pixman_f_transform *t,
struct pixman_f_vector *v);
void
pixman_f_transform_point_3d (const struct pixman_f_transform *t,
struct pixman_f_vector *v);
void
pixman_f_transform_multiply (struct pixman_f_transform *dst,
const struct pixman_f_transform *l,
const struct pixman_f_transform *r);
void
pixman_f_transform_init_scale (struct pixman_f_transform *t, double sx, double sy);
pixman_bool_t
pixman_f_transform_scale (struct pixman_f_transform *forward,
struct pixman_f_transform *reverse,
double sx, double sy);
void
pixman_f_transform_init_rotate (struct pixman_f_transform *t, double cos, double sin);
pixman_bool_t
pixman_f_transform_rotate (struct pixman_f_transform *forward,
struct pixman_f_transform *reverse,
double c, double s);
void
pixman_f_transform_init_translate (struct pixman_f_transform *t, double tx, double ty);
pixman_bool_t
pixman_f_transform_translate (struct pixman_f_transform *forward,
struct pixman_f_transform *reverse,
double tx, double ty);
pixman_bool_t
pixman_f_transform_bounds (const struct pixman_f_transform *t, struct pixman_box16 *b);
void
pixman_f_transform_init_identity (struct pixman_f_transform *t);
/* Don't blame me, blame XRender */
typedef enum
{
PIXMAN_REPEAT_NONE,
@ -370,7 +320,27 @@ typedef enum
PIXMAN_OP_CONJOINT_ATOP_REVERSE = 0x2a,
PIXMAN_OP_CONJOINT_XOR = 0x2b,
PIXMAN_OP_NONE
PIXMAN_OP_MULTIPLY = 0x30,
PIXMAN_OP_SCREEN = 0x31,
PIXMAN_OP_OVERLAY = 0x32,
PIXMAN_OP_DARKEN = 0x33,
PIXMAN_OP_LIGHTEN = 0x34,
PIXMAN_OP_COLOR_DODGE = 0x35,
PIXMAN_OP_COLOR_BURN = 0x36,
PIXMAN_OP_HARD_LIGHT = 0x37,
PIXMAN_OP_SOFT_LIGHT = 0x38,
PIXMAN_OP_DIFFERENCE = 0x39,
PIXMAN_OP_EXCLUSION = 0x3a,
PIXMAN_OP_HSL_HUE = 0x3b,
PIXMAN_OP_HSL_SATURATION = 0x3c,
PIXMAN_OP_HSL_COLOR = 0x3d,
PIXMAN_OP_HSL_LUMINOSITY = 0x3e
#ifdef PIXMAN_USE_INTERNAL_API
,
PIXMAN_N_OPERATORS,
PIXMAN_OP_NONE = PIXMAN_N_OPERATORS
#endif
} pixman_op_t;
/*
@ -389,8 +359,8 @@ struct pixman_region16_data {
struct pixman_rectangle16
{
int16_t x, y;
uint16_t width, height;
int16_t x, y;
uint16_t width, height;
};
struct pixman_box16
@ -401,7 +371,7 @@ struct pixman_box16
struct pixman_region16
{
pixman_box16_t extents;
pixman_region16_data_t *data;
pixman_region16_data_t *data;
};
typedef enum
@ -411,70 +381,69 @@ typedef enum
PIXMAN_REGION_PART
} pixman_region_overlap_t;
/* This function exists only to make it possible to preserve the X ABI - it should
* go away at first opportunity.
/* This function exists only to make it possible to preserve
* the X ABI - it should go away at first opportunity.
*/
void pixman_region_set_static_pointers (pixman_box16_t *empty_box,
pixman_region16_data_t *empty_data,
pixman_region16_data_t *broken_data);
void pixman_region_set_static_pointers (pixman_box16_t *empty_box,
pixman_region16_data_t *empty_data,
pixman_region16_data_t *broken_data);
/* creation/destruction */
void pixman_region_init (pixman_region16_t *region);
void pixman_region_init_rect (pixman_region16_t *region,
int x,
int y,
unsigned int width,
unsigned int height);
pixman_bool_t pixman_region_init_rects (pixman_region16_t *region,
pixman_box16_t *boxes,
int count);
void pixman_region_init_with_extents (pixman_region16_t *region,
pixman_box16_t *extents);
void pixman_region_fini (pixman_region16_t *region);
void pixman_region_init (pixman_region16_t *region);
void pixman_region_init_rect (pixman_region16_t *region,
int x,
int y,
unsigned int width,
unsigned int height);
pixman_bool_t pixman_region_init_rects (pixman_region16_t *region,
pixman_box16_t *boxes,
int count);
void pixman_region_init_with_extents (pixman_region16_t *region,
pixman_box16_t *extents);
void pixman_region_fini (pixman_region16_t *region);
/* manipulation */
void pixman_region_translate (pixman_region16_t *region,
int x,
int y);
pixman_bool_t pixman_region_copy (pixman_region16_t *dest,
pixman_region16_t *source);
pixman_bool_t pixman_region_intersect (pixman_region16_t *newReg,
pixman_region16_t *reg1,
pixman_region16_t *reg2);
pixman_bool_t pixman_region_union (pixman_region16_t *newReg,
pixman_region16_t *reg1,
pixman_region16_t *reg2);
pixman_bool_t pixman_region_union_rect (pixman_region16_t *dest,
pixman_region16_t *source,
int x,
int y,
unsigned int width,
unsigned int height);
pixman_bool_t pixman_region_subtract (pixman_region16_t *regD,
pixman_region16_t *regM,
pixman_region16_t *regS);
pixman_bool_t pixman_region_inverse (pixman_region16_t *newReg,
pixman_region16_t *reg1,
pixman_box16_t *invRect);
pixman_bool_t pixman_region_contains_point (pixman_region16_t *region,
int x,
int y,
pixman_box16_t *box);
pixman_region_overlap_t pixman_region_contains_rectangle (pixman_region16_t *pixman_region16_t,
pixman_box16_t *prect);
pixman_bool_t pixman_region_not_empty (pixman_region16_t *region);
pixman_box16_t * pixman_region_extents (pixman_region16_t *region);
int pixman_region_n_rects (pixman_region16_t *region);
pixman_box16_t * pixman_region_rectangles (pixman_region16_t *region,
int *n_rects);
pixman_bool_t pixman_region_equal (pixman_region16_t *region1,
pixman_region16_t *region2);
pixman_bool_t pixman_region_selfcheck (pixman_region16_t *region);
void pixman_region_reset (pixman_region16_t *region,
pixman_box16_t *box);
void pixman_region_translate (pixman_region16_t *region,
int x,
int y);
pixman_bool_t pixman_region_copy (pixman_region16_t *dest,
pixman_region16_t *source);
pixman_bool_t pixman_region_intersect (pixman_region16_t *new_reg,
pixman_region16_t *reg1,
pixman_region16_t *reg2);
pixman_bool_t pixman_region_union (pixman_region16_t *new_reg,
pixman_region16_t *reg1,
pixman_region16_t *reg2);
pixman_bool_t pixman_region_union_rect (pixman_region16_t *dest,
pixman_region16_t *source,
int x,
int y,
unsigned int width,
unsigned int height);
pixman_bool_t pixman_region_subtract (pixman_region16_t *reg_d,
pixman_region16_t *reg_m,
pixman_region16_t *reg_s);
pixman_bool_t pixman_region_inverse (pixman_region16_t *new_reg,
pixman_region16_t *reg1,
pixman_box16_t *inv_rect);
pixman_bool_t pixman_region_contains_point (pixman_region16_t *region,
int x,
int y,
pixman_box16_t *box);
pixman_region_overlap_t pixman_region_contains_rectangle (pixman_region16_t *pixman_region16_t,
pixman_box16_t *prect);
pixman_bool_t pixman_region_not_empty (pixman_region16_t *region);
pixman_box16_t * pixman_region_extents (pixman_region16_t *region);
int pixman_region_n_rects (pixman_region16_t *region);
pixman_box16_t * pixman_region_rectangles (pixman_region16_t *region,
int *n_rects);
pixman_bool_t pixman_region_equal (pixman_region16_t *region1,
pixman_region16_t *region2);
pixman_bool_t pixman_region_selfcheck (pixman_region16_t *region);
void pixman_region_reset (pixman_region16_t *region,
pixman_box16_t *box);
/*
* 32 bit regions
*/
@ -527,10 +496,10 @@ void pixman_region32_translate (pixman_region32_t *r
int y);
pixman_bool_t pixman_region32_copy (pixman_region32_t *dest,
pixman_region32_t *source);
pixman_bool_t pixman_region32_intersect (pixman_region32_t *newReg,
pixman_bool_t pixman_region32_intersect (pixman_region32_t *new_reg,
pixman_region32_t *reg1,
pixman_region32_t *reg2);
pixman_bool_t pixman_region32_union (pixman_region32_t *newReg,
pixman_bool_t pixman_region32_union (pixman_region32_t *new_reg,
pixman_region32_t *reg1,
pixman_region32_t *reg2);
pixman_bool_t pixman_region32_union_rect (pixman_region32_t *dest,
@ -539,12 +508,12 @@ pixman_bool_t pixman_region32_union_rect (pixman_region32_t *d
int y,
unsigned int width,
unsigned int height);
pixman_bool_t pixman_region32_subtract (pixman_region32_t *regD,
pixman_region32_t *regM,
pixman_region32_t *regS);
pixman_bool_t pixman_region32_inverse (pixman_region32_t *newReg,
pixman_bool_t pixman_region32_subtract (pixman_region32_t *reg_d,
pixman_region32_t *reg_m,
pixman_region32_t *reg_s);
pixman_bool_t pixman_region32_inverse (pixman_region32_t *new_reg,
pixman_region32_t *reg1,
pixman_box32_t *invRect);
pixman_box32_t *inv_rect);
pixman_bool_t pixman_region32_contains_point (pixman_region32_t *region,
int x,
int y,
@ -598,6 +567,8 @@ typedef struct pixman_gradient_stop pixman_gradient_stop_t;
typedef uint32_t (* pixman_read_memory_func_t) (const void *src, int size);
typedef void (* pixman_write_memory_func_t) (void *dst, uint32_t value, int size);
typedef void (* pixman_image_destroy_func_t) (pixman_image_t *image, void *data);
struct pixman_gradient_stop {
pixman_fixed_t x;
pixman_color_t color;
@ -649,70 +620,76 @@ struct pixman_indexed
#define PIXMAN_TYPE_GRAY 5
#define PIXMAN_TYPE_YUY2 6
#define PIXMAN_TYPE_YV12 7
#define PIXMAN_TYPE_BGRA 8
#define PIXMAN_FORMAT_COLOR(f) \
(PIXMAN_FORMAT_TYPE(f) == PIXMAN_TYPE_ARGB || \
PIXMAN_FORMAT_TYPE(f) == PIXMAN_TYPE_ABGR)
PIXMAN_FORMAT_TYPE(f) == PIXMAN_TYPE_ABGR || \
PIXMAN_FORMAT_TYPE(f) == PIXMAN_TYPE_BGRA)
/* 32bpp formats */
typedef enum {
PIXMAN_a8r8g8b8 = PIXMAN_FORMAT(32,PIXMAN_TYPE_ARGB,8,8,8,8),
PIXMAN_x8r8g8b8 = PIXMAN_FORMAT(32,PIXMAN_TYPE_ARGB,0,8,8,8),
PIXMAN_a8b8g8r8 = PIXMAN_FORMAT(32,PIXMAN_TYPE_ABGR,8,8,8,8),
PIXMAN_x8b8g8r8 = PIXMAN_FORMAT(32,PIXMAN_TYPE_ABGR,0,8,8,8),
PIXMAN_a8r8g8b8 = PIXMAN_FORMAT(32,PIXMAN_TYPE_ARGB,8,8,8,8),
PIXMAN_x8r8g8b8 = PIXMAN_FORMAT(32,PIXMAN_TYPE_ARGB,0,8,8,8),
PIXMAN_a8b8g8r8 = PIXMAN_FORMAT(32,PIXMAN_TYPE_ABGR,8,8,8,8),
PIXMAN_x8b8g8r8 = PIXMAN_FORMAT(32,PIXMAN_TYPE_ABGR,0,8,8,8),
PIXMAN_b8g8r8a8 = PIXMAN_FORMAT(32,PIXMAN_TYPE_BGRA,8,8,8,8),
PIXMAN_b8g8r8x8 = PIXMAN_FORMAT(32,PIXMAN_TYPE_BGRA,0,8,8,8),
PIXMAN_x2r10g10b10 = PIXMAN_FORMAT(32,PIXMAN_TYPE_ARGB,0,10,10,10),
PIXMAN_a2r10g10b10 = PIXMAN_FORMAT(32,PIXMAN_TYPE_ARGB,2,10,10,10),
PIXMAN_x2b10g10r10 = PIXMAN_FORMAT(32,PIXMAN_TYPE_ABGR,0,10,10,10),
PIXMAN_a2b10g10r10 = PIXMAN_FORMAT(32,PIXMAN_TYPE_ABGR,2,10,10,10),
/* 24bpp formats */
PIXMAN_r8g8b8 = PIXMAN_FORMAT(24,PIXMAN_TYPE_ARGB,0,8,8,8),
PIXMAN_b8g8r8 = PIXMAN_FORMAT(24,PIXMAN_TYPE_ABGR,0,8,8,8),
PIXMAN_r8g8b8 = PIXMAN_FORMAT(24,PIXMAN_TYPE_ARGB,0,8,8,8),
PIXMAN_b8g8r8 = PIXMAN_FORMAT(24,PIXMAN_TYPE_ABGR,0,8,8,8),
/* 16bpp formats */
PIXMAN_r5g6b5 = PIXMAN_FORMAT(16,PIXMAN_TYPE_ARGB,0,5,6,5),
PIXMAN_b5g6r5 = PIXMAN_FORMAT(16,PIXMAN_TYPE_ABGR,0,5,6,5),
PIXMAN_a1r5g5b5 = PIXMAN_FORMAT(16,PIXMAN_TYPE_ARGB,1,5,5,5),
PIXMAN_x1r5g5b5 = PIXMAN_FORMAT(16,PIXMAN_TYPE_ARGB,0,5,5,5),
PIXMAN_a1b5g5r5 = PIXMAN_FORMAT(16,PIXMAN_TYPE_ABGR,1,5,5,5),
PIXMAN_x1b5g5r5 = PIXMAN_FORMAT(16,PIXMAN_TYPE_ABGR,0,5,5,5),
PIXMAN_a4r4g4b4 = PIXMAN_FORMAT(16,PIXMAN_TYPE_ARGB,4,4,4,4),
PIXMAN_x4r4g4b4 = PIXMAN_FORMAT(16,PIXMAN_TYPE_ARGB,0,4,4,4),
PIXMAN_a4b4g4r4 = PIXMAN_FORMAT(16,PIXMAN_TYPE_ABGR,4,4,4,4),
PIXMAN_x4b4g4r4 = PIXMAN_FORMAT(16,PIXMAN_TYPE_ABGR,0,4,4,4),
PIXMAN_r5g6b5 = PIXMAN_FORMAT(16,PIXMAN_TYPE_ARGB,0,5,6,5),
PIXMAN_b5g6r5 = PIXMAN_FORMAT(16,PIXMAN_TYPE_ABGR,0,5,6,5),
PIXMAN_a1r5g5b5 = PIXMAN_FORMAT(16,PIXMAN_TYPE_ARGB,1,5,5,5),
PIXMAN_x1r5g5b5 = PIXMAN_FORMAT(16,PIXMAN_TYPE_ARGB,0,5,5,5),
PIXMAN_a1b5g5r5 = PIXMAN_FORMAT(16,PIXMAN_TYPE_ABGR,1,5,5,5),
PIXMAN_x1b5g5r5 = PIXMAN_FORMAT(16,PIXMAN_TYPE_ABGR,0,5,5,5),
PIXMAN_a4r4g4b4 = PIXMAN_FORMAT(16,PIXMAN_TYPE_ARGB,4,4,4,4),
PIXMAN_x4r4g4b4 = PIXMAN_FORMAT(16,PIXMAN_TYPE_ARGB,0,4,4,4),
PIXMAN_a4b4g4r4 = PIXMAN_FORMAT(16,PIXMAN_TYPE_ABGR,4,4,4,4),
PIXMAN_x4b4g4r4 = PIXMAN_FORMAT(16,PIXMAN_TYPE_ABGR,0,4,4,4),
/* 8bpp formats */
PIXMAN_a8 = PIXMAN_FORMAT(8,PIXMAN_TYPE_A,8,0,0,0),
PIXMAN_r3g3b2 = PIXMAN_FORMAT(8,PIXMAN_TYPE_ARGB,0,3,3,2),
PIXMAN_b2g3r3 = PIXMAN_FORMAT(8,PIXMAN_TYPE_ABGR,0,3,3,2),
PIXMAN_a2r2g2b2 = PIXMAN_FORMAT(8,PIXMAN_TYPE_ARGB,2,2,2,2),
PIXMAN_a2b2g2r2 = PIXMAN_FORMAT(8,PIXMAN_TYPE_ABGR,2,2,2,2),
PIXMAN_c8 = PIXMAN_FORMAT(8,PIXMAN_TYPE_COLOR,0,0,0,0),
PIXMAN_g8 = PIXMAN_FORMAT(8,PIXMAN_TYPE_GRAY,0,0,0,0),
PIXMAN_x4a4 = PIXMAN_FORMAT(8,PIXMAN_TYPE_A,4,0,0,0),
PIXMAN_x4c4 = PIXMAN_FORMAT(8,PIXMAN_TYPE_COLOR,0,0,0,0),
PIXMAN_x4g4 = PIXMAN_FORMAT(8,PIXMAN_TYPE_GRAY,0,0,0,0),
PIXMAN_a8 = PIXMAN_FORMAT(8,PIXMAN_TYPE_A,8,0,0,0),
PIXMAN_r3g3b2 = PIXMAN_FORMAT(8,PIXMAN_TYPE_ARGB,0,3,3,2),
PIXMAN_b2g3r3 = PIXMAN_FORMAT(8,PIXMAN_TYPE_ABGR,0,3,3,2),
PIXMAN_a2r2g2b2 = PIXMAN_FORMAT(8,PIXMAN_TYPE_ARGB,2,2,2,2),
PIXMAN_a2b2g2r2 = PIXMAN_FORMAT(8,PIXMAN_TYPE_ABGR,2,2,2,2),
PIXMAN_c8 = PIXMAN_FORMAT(8,PIXMAN_TYPE_COLOR,0,0,0,0),
PIXMAN_g8 = PIXMAN_FORMAT(8,PIXMAN_TYPE_GRAY,0,0,0,0),
PIXMAN_x4a4 = PIXMAN_FORMAT(8,PIXMAN_TYPE_A,4,0,0,0),
PIXMAN_x4c4 = PIXMAN_FORMAT(8,PIXMAN_TYPE_COLOR,0,0,0,0),
PIXMAN_x4g4 = PIXMAN_FORMAT(8,PIXMAN_TYPE_GRAY,0,0,0,0),
/* 4bpp formats */
PIXMAN_a4 = PIXMAN_FORMAT(4,PIXMAN_TYPE_A,4,0,0,0),
PIXMAN_r1g2b1 = PIXMAN_FORMAT(4,PIXMAN_TYPE_ARGB,0,1,2,1),
PIXMAN_b1g2r1 = PIXMAN_FORMAT(4,PIXMAN_TYPE_ABGR,0,1,2,1),
PIXMAN_a1r1g1b1 = PIXMAN_FORMAT(4,PIXMAN_TYPE_ARGB,1,1,1,1),
PIXMAN_a1b1g1r1 = PIXMAN_FORMAT(4,PIXMAN_TYPE_ABGR,1,1,1,1),
PIXMAN_c4 = PIXMAN_FORMAT(4,PIXMAN_TYPE_COLOR,0,0,0,0),
PIXMAN_g4 = PIXMAN_FORMAT(4,PIXMAN_TYPE_GRAY,0,0,0,0),
PIXMAN_a4 = PIXMAN_FORMAT(4,PIXMAN_TYPE_A,4,0,0,0),
PIXMAN_r1g2b1 = PIXMAN_FORMAT(4,PIXMAN_TYPE_ARGB,0,1,2,1),
PIXMAN_b1g2r1 = PIXMAN_FORMAT(4,PIXMAN_TYPE_ABGR,0,1,2,1),
PIXMAN_a1r1g1b1 = PIXMAN_FORMAT(4,PIXMAN_TYPE_ARGB,1,1,1,1),
PIXMAN_a1b1g1r1 = PIXMAN_FORMAT(4,PIXMAN_TYPE_ABGR,1,1,1,1),
PIXMAN_c4 = PIXMAN_FORMAT(4,PIXMAN_TYPE_COLOR,0,0,0,0),
PIXMAN_g4 = PIXMAN_FORMAT(4,PIXMAN_TYPE_GRAY,0,0,0,0),
/* 1bpp formats */
PIXMAN_a1 = PIXMAN_FORMAT(1,PIXMAN_TYPE_A,1,0,0,0),
PIXMAN_g1 = PIXMAN_FORMAT(1,PIXMAN_TYPE_GRAY,0,0,0,0),
PIXMAN_a1 = PIXMAN_FORMAT(1,PIXMAN_TYPE_A,1,0,0,0),
PIXMAN_g1 = PIXMAN_FORMAT(1,PIXMAN_TYPE_GRAY,0,0,0,0),
/* YUV formats */
PIXMAN_yuy2 = PIXMAN_FORMAT(16,PIXMAN_TYPE_YUY2,0,0,0,0),
PIXMAN_yv12 = PIXMAN_FORMAT(12,PIXMAN_TYPE_YV12,0,0,0,0)
PIXMAN_yuy2 = PIXMAN_FORMAT(16,PIXMAN_TYPE_YUY2,0,0,0,0),
PIXMAN_yv12 = PIXMAN_FORMAT(12,PIXMAN_TYPE_YV12,0,0,0,0)
} pixman_format_code_t;
/* Querying supported format values. */
@ -745,6 +722,9 @@ pixman_image_t *pixman_image_create_bits (pixman_format_code_t
pixman_image_t *pixman_image_ref (pixman_image_t *image);
pixman_bool_t pixman_image_unref (pixman_image_t *image);
void pixman_image_set_destroy_function (pixman_image_t *image,
pixman_image_destroy_func_t function,
void *data);
/* Set properties */
pixman_bool_t pixman_image_set_clip_region (pixman_image_t *image,
@ -778,7 +758,7 @@ void pixman_image_set_indexed (pixman_image_t *image,
uint32_t *pixman_image_get_data (pixman_image_t *image);
int pixman_image_get_width (pixman_image_t *image);
int pixman_image_get_height (pixman_image_t *image);
int pixman_image_get_stride (pixman_image_t *image);
int pixman_image_get_stride (pixman_image_t *image); /* in bytes */
int pixman_image_get_depth (pixman_image_t *image);
pixman_bool_t pixman_image_fill_rectangles (pixman_op_t op,
pixman_image_t *image,
@ -787,16 +767,16 @@ pixman_bool_t pixman_image_fill_rectangles (pixman_op_t op,
const pixman_rectangle16_t *rects);
/* Composite */
pixman_bool_t pixman_compute_composite_region (pixman_region16_t *pRegion,
pixman_image_t *pSrc,
pixman_image_t *pMask,
pixman_image_t *pDst,
int16_t xSrc,
int16_t ySrc,
int16_t xMask,
int16_t yMask,
int16_t xDst,
int16_t yDst,
pixman_bool_t pixman_compute_composite_region (pixman_region16_t *region,
pixman_image_t *src_image,
pixman_image_t *mask_image,
pixman_image_t *dst_image,
int16_t src_x,
int16_t src_y,
int16_t mask_x,
int16_t mask_y,
int16_t dest_x,
int16_t dest_y,
uint16_t width,
uint16_t height);
void pixman_image_composite (pixman_op_t op,
@ -812,6 +792,20 @@ void pixman_image_composite (pixman_op_t op,
uint16_t width,
uint16_t height);
/* Old X servers rely on out-of-bounds accesses when they are asked
* to composite with a window as the source. They create a pixman image
* pointing to some bogus position in memory, but then they set a clip
* region to the position where the actual bits are.
*
* Due to a bug in old versions of pixman, where it would not clip
* against the image bounds when a clip region was set, this would
* actually work. So by default we allow certain out-of-bound access
* to happen unless explicitly disabled.
*
* Fixed X servers should call this function to disable the workaround.
*/
void pixman_disable_out_of_bounds_workaround (void);
/*
* Trapezoids
*/
@ -829,26 +823,26 @@ struct pixman_edge
{
pixman_fixed_t x;
pixman_fixed_t e;
pixman_fixed_t stepx;
pixman_fixed_t signdx;
pixman_fixed_t dy;
pixman_fixed_t dx;
pixman_fixed_t stepx;
pixman_fixed_t signdx;
pixman_fixed_t dy;
pixman_fixed_t dx;
pixman_fixed_t stepx_small;
pixman_fixed_t stepx_big;
pixman_fixed_t dx_small;
pixman_fixed_t dx_big;
pixman_fixed_t stepx_small;
pixman_fixed_t stepx_big;
pixman_fixed_t dx_small;
pixman_fixed_t dx_big;
};
struct pixman_trapezoid
{
pixman_fixed_t top, bottom;
pixman_fixed_t top, bottom;
pixman_line_fixed_t left, right;
};
/* whether 't' is a well defined not obviously empty trapezoid */
#define pixman_trapezoid_valid(t) \
#define pixman_trapezoid_valid(t) \
((t)->left.p1.y != (t)->left.p2.y && \
(t)->right.p1.y != (t)->right.p2.y && \
(int) ((t)->bottom - (t)->top) > 0)
@ -902,5 +896,4 @@ void pixman_rasterize_trapezoid (pixman_image_t *image,
int x_off,
int y_off);
#endif /* PIXMAN_H__ */