diff --git a/bench/RefCntBench.cpp b/bench/RefCntBench.cpp index 351513b83..f846b1ac6 100644 --- a/bench/RefCntBench.cpp +++ b/bench/RefCntBench.cpp @@ -14,6 +14,54 @@ enum { M = 2 }; +class AtomicInc32 : public Benchmark { +public: + AtomicInc32() : fX(0) {} + + virtual bool isSuitableFor(Backend backend) SK_OVERRIDE { + return backend == kNonRendering_Backend; + } + +protected: + virtual const char* onGetName() { + return "atomic_inc_32"; + } + + virtual void onDraw(const int loops, SkCanvas*) { + for (int i = 0; i < loops; ++i) { + sk_atomic_inc(&fX); + } + } + +private: + int32_t fX; + typedef Benchmark INHERITED; +}; + +class AtomicInc64 : public Benchmark { +public: + AtomicInc64() : fX(0) {} + + virtual bool isSuitableFor(Backend backend) SK_OVERRIDE { + return backend == kNonRendering_Backend; + } + +protected: + virtual const char* onGetName() { + return "atomic_inc_64"; + } + + virtual void onDraw(const int loops, SkCanvas*) { + for (int i = 0; i < loops; ++i) { + sk_atomic_inc(&fX); + } + } + +private: + int64_t fX; + typedef Benchmark INHERITED; +}; + class RefCntBench_Stack : public Benchmark { public: virtual bool isSuitableFor(Backend backend) SK_OVERRIDE { @@ -191,6 +239,9 @@ private: /////////////////////////////////////////////////////////////////////////////// +DEF_BENCH( return new AtomicInc32(); ) +DEF_BENCH( return new AtomicInc64(); ) + DEF_BENCH( return new RefCntBench_Stack(); ) DEF_BENCH( return new RefCntBench_Heap(); ) DEF_BENCH( return new RefCntBench_New(); ) diff --git a/include/core/SkThread.h b/include/core/SkThread.h index 4f7f32609..403b288f1 100644 --- a/include/core/SkThread.h +++ b/include/core/SkThread.h @@ -16,22 +16,26 @@ * No additional memory barrier is required; this must act as a compiler barrier. */ static int32_t sk_atomic_inc(int32_t* addr); +static int64_t sk_atomic_inc(int64_t* addr); /** Atomically adds inc to the int referenced by addr and returns the previous value. * No additional memory barrier is required; this must act as a compiler barrier. */ static int32_t sk_atomic_add(int32_t* addr, int32_t inc); +static int64_t sk_atomic_add(int64_t* addr, int64_t inc); /** Atomically subtracts one from the int referenced by addr and returns the previous value. * This must act as a release (SL/S) memory barrier and as a compiler barrier. */ static int32_t sk_atomic_dec(int32_t* addr); +static int64_t sk_atomic_dec(int64_t* addr); /** Atomic compare and set. * If *addr == before, set *addr to after and return true, otherwise return false. * This must act as a release (SL/S) memory barrier and as a compiler barrier. */ static bool sk_atomic_cas(int32_t* addr, int32_t before, int32_t after); +static bool sk_atomic_cas(int64_t* addr, int64_t before, int64_t after); /** If sk_atomic_dec does not act as an acquire (L/SL) barrier, * this must act as an acquire (L/SL) memory barrier and as a compiler barrier. @@ -49,8 +53,8 @@ static void sk_membar_acquire__after_atomic_conditional_inc(); * and returns the previous value. * No additional memory barrier is required; this must act as a compiler barrier. */ -static inline int32_t sk_atomic_conditional_inc(int32_t* addr) { - int32_t prev; +template static inline INT_TYPE sk_atomic_conditional_inc(INT_TYPE* addr) { + INT_TYPE prev; do { prev = *addr; if (0 == prev) { diff --git a/src/ports/SkAtomics_sync.h b/src/ports/SkAtomics_sync.h index b0d17527f..8135ae2e2 100644 --- a/src/ports/SkAtomics_sync.h +++ b/src/ports/SkAtomics_sync.h @@ -16,14 +16,26 @@ static inline __attribute__((always_inline)) int32_t sk_atomic_inc(int32_t* addr return __sync_fetch_and_add(addr, 1); } +static inline __attribute__((always_inline)) int64_t sk_atomic_inc(int64_t* addr) { + return __sync_fetch_and_add(addr, 1); +} + static inline __attribute__((always_inline)) int32_t sk_atomic_add(int32_t* addr, int32_t inc) { return __sync_fetch_and_add(addr, inc); } +static inline __attribute__((always_inline)) int64_t sk_atomic_add(int64_t* addr, int64_t inc) { + return __sync_fetch_and_add(addr, inc); +} + static inline __attribute__((always_inline)) int32_t sk_atomic_dec(int32_t* addr) { return __sync_fetch_and_add(addr, -1); } +static inline __attribute__((always_inline)) int64_t sk_atomic_dec(int64_t* addr) { + return __sync_fetch_and_add(addr, -1); +} + static inline __attribute__((always_inline)) void sk_membar_acquire__after_atomic_dec() { } static inline __attribute__((always_inline)) bool sk_atomic_cas(int32_t* addr, @@ -32,6 +44,12 @@ static inline __attribute__((always_inline)) bool sk_atomic_cas(int32_t* addr, return __sync_bool_compare_and_swap(addr, before, after); } +static inline __attribute__((always_inline)) bool sk_atomic_cas(int64_t* addr, + int64_t before, + int64_t after) { + return __sync_bool_compare_and_swap(addr, before, after); +} + static inline __attribute__((always_inline)) void* sk_atomic_cas(void** addr, void* before, void* after) { diff --git a/src/ports/SkAtomics_win.h b/src/ports/SkAtomics_win.h index 16923947e..4b344dde7 100644 --- a/src/ports/SkAtomics_win.h +++ b/src/ports/SkAtomics_win.h @@ -25,21 +25,39 @@ static inline int32_t sk_atomic_inc(int32_t* addr) { return _InterlockedIncrement(reinterpret_cast(addr)) - 1; } +static inline int64_t sk_atomic_inc(int64_t* addr) { + // InterlockedIncrement returns the new value, we want to return the old. + return InterlockedIncrement64(addr) - 1; +} + static inline int32_t sk_atomic_add(int32_t* addr, int32_t inc) { return _InterlockedExchangeAdd(reinterpret_cast(addr), static_cast(inc)); } +static inline int64_t sk_atomic_add(int64_t* addr, int64_t inc) { + return InterlockedExchangeAdd64(addr, inc); +} + static inline int32_t sk_atomic_dec(int32_t* addr) { // InterlockedDecrement returns the new value, we want to return the old. return _InterlockedDecrement(reinterpret_cast(addr)) + 1; } +static inline int64_t sk_atomic_dec(int64_t* addr) { + // InterlockedDecrement returns the new value, we want to return the old. + return InterlockedDecrement64(addr) + 1; +} + static inline void sk_membar_acquire__after_atomic_dec() { } static inline bool sk_atomic_cas(int32_t* addr, int32_t before, int32_t after) { return _InterlockedCompareExchange(reinterpret_cast(addr), after, before) == before; } +static inline bool sk_atomic_cas(int64_t* addr, int64_t before, int64_t after) { + return _InterlockedCompareExchange64(addr, after, before) == before; +} + static inline void* sk_atomic_cas(void** addr, void* before, void* after) { return InterlockedCompareExchangePointer(addr, after, before); }