2017-03-12 01:28:18 +03:00
|
|
|
#ifndef HASH_H
|
|
|
|
#define HASH_H
|
|
|
|
|
2023-04-22 23:17:20 +03:00
|
|
|
#include "hash-ll.h"
|
2020-12-04 21:51:39 +03:00
|
|
|
#include "repository.h"
|
2017-11-13 00:28:52 +03:00
|
|
|
|
2019-06-20 10:41:45 +03:00
|
|
|
#define the_hash_algo the_repository->hash_algo
|
|
|
|
|
2021-04-26 04:02:55 +03:00
|
|
|
static inline int hashcmp(const unsigned char *sha1, const unsigned char *sha2)
|
|
|
|
{
|
|
|
|
return hashcmp_algop(sha1, sha2, the_hash_algo);
|
|
|
|
}
|
|
|
|
|
2020-12-04 21:51:39 +03:00
|
|
|
static inline int oidcmp(const struct object_id *oid1, const struct object_id *oid2)
|
|
|
|
{
|
2021-04-26 04:02:55 +03:00
|
|
|
const struct git_hash_algo *algop;
|
|
|
|
if (!oid1->algo)
|
|
|
|
algop = the_hash_algo;
|
|
|
|
else
|
|
|
|
algop = &hash_algos[oid1->algo];
|
|
|
|
return hashcmp_algop(oid1->hash, oid2->hash, algop);
|
2020-12-04 21:51:39 +03:00
|
|
|
}
|
|
|
|
|
2021-04-26 04:02:55 +03:00
|
|
|
static inline int hasheq(const unsigned char *sha1, const unsigned char *sha2)
|
|
|
|
{
|
|
|
|
return hasheq_algop(sha1, sha2, the_hash_algo);
|
|
|
|
}
|
|
|
|
|
2020-12-04 21:51:39 +03:00
|
|
|
static inline int oideq(const struct object_id *oid1, const struct object_id *oid2)
|
|
|
|
{
|
2021-04-26 04:02:55 +03:00
|
|
|
const struct git_hash_algo *algop;
|
|
|
|
if (!oid1->algo)
|
|
|
|
algop = the_hash_algo;
|
|
|
|
else
|
|
|
|
algop = &hash_algos[oid1->algo];
|
|
|
|
return hasheq_algop(oid1->hash, oid2->hash, algop);
|
2020-12-04 21:51:39 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline int is_null_oid(const struct object_id *oid)
|
|
|
|
{
|
2021-04-26 04:02:56 +03:00
|
|
|
return oideq(oid, null_oid());
|
2020-12-04 21:51:39 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void hashcpy(unsigned char *sha_dst, const unsigned char *sha_src)
|
|
|
|
{
|
|
|
|
memcpy(sha_dst, sha_src, the_hash_algo->rawsz);
|
|
|
|
}
|
|
|
|
|
parallel-checkout: send the new object_id algo field to the workers
An object_id storing a SHA-1 name has some unused bytes at the end of
the hash array. Since these bytes are not used, they are usually not
initialized to any value either. However, at
parallel_checkout.c:send_one_item() the object_id of a cache entry is
copied into a buffer which is later sent to a checkout worker through a
pipe write(). This makes Valgrind complain about passing uninitialized
bytes to a syscall. The worker won't use these uninitialized bytes
either, but the warning could confuse someone trying to debug this code;
So instead of using oidcpy(), send_one_item() uses hashcpy() to only
copy the used/initialized bytes of the object_id, and leave the
remaining part with zeros.
However, since cf0983213c ("hash: add an algo member to struct
object_id", 2021-04-26), using hashcpy() is no longer sufficient here as
it won't copy the new algo field from the object_id. Let's add and use a
new function which meets both our requirements of copying all the
important object_id data while still avoiding the uninitialized bytes,
by padding the end of the hash array in the destination object_id. With
this change, we also no longer need the destination buffer from
send_one_item() to be initialized with zeros, so let's switch from
xcalloc() to xmalloc() to make this clear.
Signed-off-by: Matheus Tavares <matheus.bernardino@usp.br>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2021-05-17 22:49:03 +03:00
|
|
|
/* Like oidcpy() but zero-pads the unused bytes in dst's hash array. */
|
|
|
|
static inline void oidcpy_with_padding(struct object_id *dst,
|
2021-07-08 02:10:18 +03:00
|
|
|
const struct object_id *src)
|
parallel-checkout: send the new object_id algo field to the workers
An object_id storing a SHA-1 name has some unused bytes at the end of
the hash array. Since these bytes are not used, they are usually not
initialized to any value either. However, at
parallel_checkout.c:send_one_item() the object_id of a cache entry is
copied into a buffer which is later sent to a checkout worker through a
pipe write(). This makes Valgrind complain about passing uninitialized
bytes to a syscall. The worker won't use these uninitialized bytes
either, but the warning could confuse someone trying to debug this code;
So instead of using oidcpy(), send_one_item() uses hashcpy() to only
copy the used/initialized bytes of the object_id, and leave the
remaining part with zeros.
However, since cf0983213c ("hash: add an algo member to struct
object_id", 2021-04-26), using hashcpy() is no longer sufficient here as
it won't copy the new algo field from the object_id. Let's add and use a
new function which meets both our requirements of copying all the
important object_id data while still avoiding the uninitialized bytes,
by padding the end of the hash array in the destination object_id. With
this change, we also no longer need the destination buffer from
send_one_item() to be initialized with zeros, so let's switch from
xcalloc() to xmalloc() to make this clear.
Signed-off-by: Matheus Tavares <matheus.bernardino@usp.br>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2021-05-17 22:49:03 +03:00
|
|
|
{
|
|
|
|
size_t hashsz;
|
|
|
|
|
|
|
|
if (!src->algo)
|
|
|
|
hashsz = the_hash_algo->rawsz;
|
|
|
|
else
|
|
|
|
hashsz = hash_algos[src->algo].rawsz;
|
|
|
|
|
|
|
|
memcpy(dst->hash, src->hash, hashsz);
|
|
|
|
memset(dst->hash + hashsz, 0, GIT_MAX_RAWSZ - hashsz);
|
|
|
|
dst->algo = src->algo;
|
|
|
|
}
|
|
|
|
|
2020-12-04 21:51:39 +03:00
|
|
|
static inline void hashclr(unsigned char *hash)
|
|
|
|
{
|
|
|
|
memset(hash, 0, the_hash_algo->rawsz);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void oidclr(struct object_id *oid)
|
|
|
|
{
|
|
|
|
memset(oid->hash, 0, GIT_MAX_RAWSZ);
|
2021-04-26 04:02:55 +03:00
|
|
|
oid->algo = hash_algo_by_ptr(the_hash_algo);
|
2020-12-04 21:51:39 +03:00
|
|
|
}
|
|
|
|
|
2023-10-02 05:40:17 +03:00
|
|
|
static inline void oidread_algop(struct object_id *oid, const unsigned char *hash, const struct git_hash_algo *algop)
|
|
|
|
{
|
|
|
|
memcpy(oid->hash, hash, algop->rawsz);
|
|
|
|
oid->algo = hash_algo_by_ptr(algop);
|
|
|
|
}
|
|
|
|
|
2020-12-04 21:51:39 +03:00
|
|
|
static inline void oidread(struct object_id *oid, const unsigned char *hash)
|
|
|
|
{
|
2023-10-02 05:40:17 +03:00
|
|
|
oidread_algop(oid, hash, the_hash_algo);
|
2020-12-04 21:51:39 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline int is_empty_blob_sha1(const unsigned char *sha1)
|
|
|
|
{
|
|
|
|
return hasheq(sha1, the_hash_algo->empty_blob->hash);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int is_empty_blob_oid(const struct object_id *oid)
|
|
|
|
{
|
|
|
|
return oideq(oid, the_hash_algo->empty_blob);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int is_empty_tree_sha1(const unsigned char *sha1)
|
|
|
|
{
|
|
|
|
return hasheq(sha1, the_hash_algo->empty_tree->hash);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int is_empty_tree_oid(const struct object_id *oid)
|
|
|
|
{
|
|
|
|
return oideq(oid, the_hash_algo->empty_tree);
|
|
|
|
}
|
|
|
|
|
2017-03-12 01:28:18 +03:00
|
|
|
#endif
|