зеркало из https://github.com/mozilla/moz-skia.git
Add support for 64bit atomic inc/dec/cas
R=mtklein@google.com Author: bsalomon@google.com Review URL: https://codereview.chromium.org/377073002
This commit is contained in:
Родитель
d08cb905a7
Коммит
00a8fae0ce
|
@ -14,6 +14,54 @@ enum {
|
|||
M = 2
|
||||
};
|
||||
|
||||
class AtomicInc32 : public Benchmark {
|
||||
public:
|
||||
AtomicInc32() : fX(0) {}
|
||||
|
||||
virtual bool isSuitableFor(Backend backend) SK_OVERRIDE {
|
||||
return backend == kNonRendering_Backend;
|
||||
}
|
||||
|
||||
protected:
|
||||
virtual const char* onGetName() {
|
||||
return "atomic_inc_32";
|
||||
}
|
||||
|
||||
virtual void onDraw(const int loops, SkCanvas*) {
|
||||
for (int i = 0; i < loops; ++i) {
|
||||
sk_atomic_inc(&fX);
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
int32_t fX;
|
||||
typedef Benchmark INHERITED;
|
||||
};
|
||||
|
||||
class AtomicInc64 : public Benchmark {
|
||||
public:
|
||||
AtomicInc64() : fX(0) {}
|
||||
|
||||
virtual bool isSuitableFor(Backend backend) SK_OVERRIDE {
|
||||
return backend == kNonRendering_Backend;
|
||||
}
|
||||
|
||||
protected:
|
||||
virtual const char* onGetName() {
|
||||
return "atomic_inc_64";
|
||||
}
|
||||
|
||||
virtual void onDraw(const int loops, SkCanvas*) {
|
||||
for (int i = 0; i < loops; ++i) {
|
||||
sk_atomic_inc(&fX);
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
int64_t fX;
|
||||
typedef Benchmark INHERITED;
|
||||
};
|
||||
|
||||
class RefCntBench_Stack : public Benchmark {
|
||||
public:
|
||||
virtual bool isSuitableFor(Backend backend) SK_OVERRIDE {
|
||||
|
@ -191,6 +239,9 @@ private:
|
|||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
DEF_BENCH( return new AtomicInc32(); )
|
||||
DEF_BENCH( return new AtomicInc64(); )
|
||||
|
||||
DEF_BENCH( return new RefCntBench_Stack(); )
|
||||
DEF_BENCH( return new RefCntBench_Heap(); )
|
||||
DEF_BENCH( return new RefCntBench_New(); )
|
||||
|
|
|
@ -16,22 +16,26 @@
|
|||
* No additional memory barrier is required; this must act as a compiler barrier.
|
||||
*/
|
||||
static int32_t sk_atomic_inc(int32_t* addr);
|
||||
static int64_t sk_atomic_inc(int64_t* addr);
|
||||
|
||||
/** Atomically adds inc to the int referenced by addr and returns the previous value.
|
||||
* No additional memory barrier is required; this must act as a compiler barrier.
|
||||
*/
|
||||
static int32_t sk_atomic_add(int32_t* addr, int32_t inc);
|
||||
static int64_t sk_atomic_add(int64_t* addr, int64_t inc);
|
||||
|
||||
/** Atomically subtracts one from the int referenced by addr and returns the previous value.
|
||||
* This must act as a release (SL/S) memory barrier and as a compiler barrier.
|
||||
*/
|
||||
static int32_t sk_atomic_dec(int32_t* addr);
|
||||
static int64_t sk_atomic_dec(int64_t* addr);
|
||||
|
||||
/** Atomic compare and set.
|
||||
* If *addr == before, set *addr to after and return true, otherwise return false.
|
||||
* This must act as a release (SL/S) memory barrier and as a compiler barrier.
|
||||
*/
|
||||
static bool sk_atomic_cas(int32_t* addr, int32_t before, int32_t after);
|
||||
static bool sk_atomic_cas(int64_t* addr, int64_t before, int64_t after);
|
||||
|
||||
/** If sk_atomic_dec does not act as an acquire (L/SL) barrier,
|
||||
* this must act as an acquire (L/SL) memory barrier and as a compiler barrier.
|
||||
|
@ -49,8 +53,8 @@ static void sk_membar_acquire__after_atomic_conditional_inc();
|
|||
* and returns the previous value.
|
||||
* No additional memory barrier is required; this must act as a compiler barrier.
|
||||
*/
|
||||
static inline int32_t sk_atomic_conditional_inc(int32_t* addr) {
|
||||
int32_t prev;
|
||||
template<typename INT_TYPE> static inline INT_TYPE sk_atomic_conditional_inc(INT_TYPE* addr) {
|
||||
INT_TYPE prev;
|
||||
do {
|
||||
prev = *addr;
|
||||
if (0 == prev) {
|
||||
|
|
|
@ -16,14 +16,26 @@ static inline __attribute__((always_inline)) int32_t sk_atomic_inc(int32_t* addr
|
|||
return __sync_fetch_and_add(addr, 1);
|
||||
}
|
||||
|
||||
static inline __attribute__((always_inline)) int64_t sk_atomic_inc(int64_t* addr) {
|
||||
return __sync_fetch_and_add(addr, 1);
|
||||
}
|
||||
|
||||
static inline __attribute__((always_inline)) int32_t sk_atomic_add(int32_t* addr, int32_t inc) {
|
||||
return __sync_fetch_and_add(addr, inc);
|
||||
}
|
||||
|
||||
static inline __attribute__((always_inline)) int64_t sk_atomic_add(int64_t* addr, int64_t inc) {
|
||||
return __sync_fetch_and_add(addr, inc);
|
||||
}
|
||||
|
||||
static inline __attribute__((always_inline)) int32_t sk_atomic_dec(int32_t* addr) {
|
||||
return __sync_fetch_and_add(addr, -1);
|
||||
}
|
||||
|
||||
static inline __attribute__((always_inline)) int64_t sk_atomic_dec(int64_t* addr) {
|
||||
return __sync_fetch_and_add(addr, -1);
|
||||
}
|
||||
|
||||
static inline __attribute__((always_inline)) void sk_membar_acquire__after_atomic_dec() { }
|
||||
|
||||
static inline __attribute__((always_inline)) bool sk_atomic_cas(int32_t* addr,
|
||||
|
@ -32,6 +44,12 @@ static inline __attribute__((always_inline)) bool sk_atomic_cas(int32_t* addr,
|
|||
return __sync_bool_compare_and_swap(addr, before, after);
|
||||
}
|
||||
|
||||
static inline __attribute__((always_inline)) bool sk_atomic_cas(int64_t* addr,
|
||||
int64_t before,
|
||||
int64_t after) {
|
||||
return __sync_bool_compare_and_swap(addr, before, after);
|
||||
}
|
||||
|
||||
static inline __attribute__((always_inline)) void* sk_atomic_cas(void** addr,
|
||||
void* before,
|
||||
void* after) {
|
||||
|
|
|
@ -25,21 +25,39 @@ static inline int32_t sk_atomic_inc(int32_t* addr) {
|
|||
return _InterlockedIncrement(reinterpret_cast<long*>(addr)) - 1;
|
||||
}
|
||||
|
||||
static inline int64_t sk_atomic_inc(int64_t* addr) {
|
||||
// InterlockedIncrement returns the new value, we want to return the old.
|
||||
return InterlockedIncrement64(addr) - 1;
|
||||
}
|
||||
|
||||
static inline int32_t sk_atomic_add(int32_t* addr, int32_t inc) {
|
||||
return _InterlockedExchangeAdd(reinterpret_cast<long*>(addr), static_cast<long>(inc));
|
||||
}
|
||||
|
||||
static inline int64_t sk_atomic_add(int64_t* addr, int64_t inc) {
|
||||
return InterlockedExchangeAdd64(addr, inc);
|
||||
}
|
||||
|
||||
static inline int32_t sk_atomic_dec(int32_t* addr) {
|
||||
// InterlockedDecrement returns the new value, we want to return the old.
|
||||
return _InterlockedDecrement(reinterpret_cast<long*>(addr)) + 1;
|
||||
}
|
||||
|
||||
static inline int64_t sk_atomic_dec(int64_t* addr) {
|
||||
// InterlockedDecrement returns the new value, we want to return the old.
|
||||
return InterlockedDecrement64(addr) + 1;
|
||||
}
|
||||
|
||||
static inline void sk_membar_acquire__after_atomic_dec() { }
|
||||
|
||||
static inline bool sk_atomic_cas(int32_t* addr, int32_t before, int32_t after) {
|
||||
return _InterlockedCompareExchange(reinterpret_cast<long*>(addr), after, before) == before;
|
||||
}
|
||||
|
||||
static inline bool sk_atomic_cas(int64_t* addr, int64_t before, int64_t after) {
|
||||
return _InterlockedCompareExchange64(addr, after, before) == before;
|
||||
}
|
||||
|
||||
static inline void* sk_atomic_cas(void** addr, void* before, void* after) {
|
||||
return InterlockedCompareExchangePointer(addr, after, before);
|
||||
}
|
||||
|
|
Загрузка…
Ссылка в новой задаче