зеркало из https://github.com/microsoft/git.git
block-sha1: factor out get_be and put_be wrappers
The BLK_SHA1 code has optimized wrappers for doing endian conversions on memory that may not be aligned. Let's pull them out so that we can use them elsewhere, especially the time-tested list of platforms that prefer each strategy. Signed-off-by: Jeff King <peff@peff.net> Signed-off-by: Junio C Hamano <gitster@pobox.com>
This commit is contained in:
Родитель
1a6d8b9148
Коммит
802b123366
|
@ -62,38 +62,6 @@
|
|||
#define setW(x, val) (W(x) = (val))
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Performance might be improved if the CPU architecture is OK with
|
||||
* unaligned 32-bit loads and a fast ntohl() is available.
|
||||
* Otherwise fall back to byte loads and shifts which is portable,
|
||||
* and is faster on architectures with memory alignment issues.
|
||||
*/
|
||||
|
||||
#if defined(__i386__) || defined(__x86_64__) || \
|
||||
defined(_M_IX86) || defined(_M_X64) || \
|
||||
defined(__ppc__) || defined(__ppc64__) || \
|
||||
defined(__powerpc__) || defined(__powerpc64__) || \
|
||||
defined(__s390__) || defined(__s390x__)
|
||||
|
||||
#define get_be32(p) ntohl(*(unsigned int *)(p))
|
||||
#define put_be32(p, v) do { *(unsigned int *)(p) = htonl(v); } while (0)
|
||||
|
||||
#else
|
||||
|
||||
#define get_be32(p) ( \
|
||||
(*((unsigned char *)(p) + 0) << 24) | \
|
||||
(*((unsigned char *)(p) + 1) << 16) | \
|
||||
(*((unsigned char *)(p) + 2) << 8) | \
|
||||
(*((unsigned char *)(p) + 3) << 0) )
|
||||
#define put_be32(p, v) do { \
|
||||
unsigned int __v = (v); \
|
||||
*((unsigned char *)(p) + 0) = __v >> 24; \
|
||||
*((unsigned char *)(p) + 1) = __v >> 16; \
|
||||
*((unsigned char *)(p) + 2) = __v >> 8; \
|
||||
*((unsigned char *)(p) + 3) = __v >> 0; } while (0)
|
||||
|
||||
#endif
|
||||
|
||||
/* This "rolls" over the 512-bit array */
|
||||
#define W(x) (array[(x)&15])
|
||||
|
||||
|
|
|
@ -122,3 +122,35 @@ static inline uint64_t git_bswap64(uint64_t x)
|
|||
#endif
|
||||
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Performance might be improved if the CPU architecture is OK with
|
||||
* unaligned 32-bit loads and a fast ntohl() is available.
|
||||
* Otherwise fall back to byte loads and shifts which is portable,
|
||||
* and is faster on architectures with memory alignment issues.
|
||||
*/
|
||||
|
||||
#if defined(__i386__) || defined(__x86_64__) || \
|
||||
defined(_M_IX86) || defined(_M_X64) || \
|
||||
defined(__ppc__) || defined(__ppc64__) || \
|
||||
defined(__powerpc__) || defined(__powerpc64__) || \
|
||||
defined(__s390__) || defined(__s390x__)
|
||||
|
||||
#define get_be32(p) ntohl(*(unsigned int *)(p))
|
||||
#define put_be32(p, v) do { *(unsigned int *)(p) = htonl(v); } while (0)
|
||||
|
||||
#else
|
||||
|
||||
#define get_be32(p) ( \
|
||||
(*((unsigned char *)(p) + 0) << 24) | \
|
||||
(*((unsigned char *)(p) + 1) << 16) | \
|
||||
(*((unsigned char *)(p) + 2) << 8) | \
|
||||
(*((unsigned char *)(p) + 3) << 0) )
|
||||
#define put_be32(p, v) do { \
|
||||
unsigned int __v = (v); \
|
||||
*((unsigned char *)(p) + 0) = __v >> 24; \
|
||||
*((unsigned char *)(p) + 1) = __v >> 16; \
|
||||
*((unsigned char *)(p) + 2) = __v >> 8; \
|
||||
*((unsigned char *)(p) + 3) = __v >> 0; } while (0)
|
||||
|
||||
#endif
|
||||
|
|
Загрузка…
Ссылка в новой задаче