2005-06-27 07:27:56 +04:00
|
|
|
#ifndef CSUM_FILE_H
|
|
|
|
#define CSUM_FILE_H
|
|
|
|
|
2018-08-15 20:54:05 +03:00
|
|
|
#include "hash.h"
|
|
|
|
|
2007-10-31 00:06:21 +03:00
|
|
|
struct progress;
|
|
|
|
|
2005-06-27 07:27:56 +04:00
|
|
|
/* A SHA1-protected file */
|
2018-02-01 05:18:46 +03:00
|
|
|
struct hashfile {
|
2007-11-05 06:54:50 +03:00
|
|
|
int fd;
|
2011-02-03 04:29:01 +03:00
|
|
|
int check_fd;
|
2007-11-05 06:54:50 +03:00
|
|
|
unsigned int offset;
|
2018-02-01 05:18:47 +03:00
|
|
|
git_hash_ctx ctx;
|
2007-11-05 06:15:41 +03:00
|
|
|
off_t total;
|
2007-10-31 00:06:21 +03:00
|
|
|
struct progress *tp;
|
2007-11-05 06:54:50 +03:00
|
|
|
const char *name;
|
compute a CRC32 for each object as stored in a pack
The most important optimization for performance when repacking is the
ability to reuse data from a previous pack as is and bypass any delta
or even SHA1 computation by simply copying the raw data from one pack
to another directly.
The problem with this is that any data corruption within a copied object
would go unnoticed and the new (repacked) pack would be self-consistent
with its own checksum despite containing a corrupted object. This is a
real issue that already happened at least once in the past.
In some attempt to prevent this, we validate the copied data by inflating
it and making sure no error is signaled by zlib. But this is still not
perfect as a significant portion of a pack content is made of object
headers and references to delta base objects which are not deflated and
therefore not validated when repacking actually making the pack data reuse
still not as safe as it could be.
Of course a full SHA1 validation could be performed, but that implies
full data inflating and delta replaying which is extremely costly, which
cost the data reuse optimization was designed to avoid in the first place.
So the best solution to this is simply to store a CRC32 of the raw pack
data for each object in the pack index. This way any object in a pack can
be validated before being copied as is in another pack, including header
and any other non deflated data.
Why CRC32 instead of a faster checksum like Adler32? Quoting Wikipedia:
Jonathan Stone discovered in 2001 that Adler-32 has a weakness for very
short messages. He wrote "Briefly, the problem is that, for very short
packets, Adler32 is guaranteed to give poor coverage of the available
bits. Don't take my word for it, ask Mark Adler. :-)" The problem is
that sum A does not wrap for short messages. The maximum value of A for
a 128-byte message is 32640, which is below the value 65521 used by the
modulo operation. An extended explanation can be found in RFC 3309,
which mandates the use of CRC32 instead of Adler-32 for SCTP, the
Stream Control Transmission Protocol.
In the context of a GIT pack, we have lots of small objects, especially
deltas, which are likely to be quite small and in a size range for which
Adler32 is dimed not to be sufficient. Another advantage of CRC32 is the
possibility for recovery from certain types of small corruptions like
single bit errors which are the most probable type of corruptions.
OK what this patch does is to compute the CRC32 of each object written to
a pack within pack-objects. It is not written to the index yet and it is
obviously not validated when reusing pack data yet either.
Signed-off-by: Nicolas Pitre <nico@cam.org>
Signed-off-by: Junio C Hamano <junkio@cox.net>
2007-04-09 09:06:31 +04:00
|
|
|
int do_crc;
|
|
|
|
uint32_t crc32;
|
csum-file.h: increase hashfile buffer size
The hashfile API uses a hard-coded buffer size of 8KB and has ever since
it was introduced in c38138c (git-pack-objects: write the pack files
with a SHA1 csum, 2005-06-26). It performs a similar function to the
hashing buffers in read-cache.c, but that code was updated from 8KB to
128KB in f279894 (read-cache: make the index write buffer size 128K,
2021-02-18). The justification there was that do_write_index() improves
from 1.02s to 0.72s. Since our end goal is to have the index writing
code use the hashfile API, we need to unify this buffer size to avoid a
performance regression.
There is a buffer, 'check_buffer', that is used to verify the check_fd
file descriptor. When this buffer increases to 128K to fit the data
being flushed, it causes the stack to overflow the limits placed in the
test suite. To avoid issues with stack size, move both 'buffer' and
'check_buffer' to be heap pointers within 'struct hashfile'. The
'check_buffer' member is left as NULL unless check_fd is set in
hashfd_check(). Both buffers are cleared as part of finalize_hashfile()
which also frees the full structure.
Since these buffers are now on the heap, we can adjust their size based
on the needs of the consumer. In particular, callers to
hashfd_throughput() are expecting to report progress indicators as the
buffer flushes. These callers would prefer the smaller 8k buffer to
avoid large delays between updates, especially for users with slower
networks. When the progress indicator is not used, the larger buffer is
preferrable.
By adding a new trace2 region in the chunk-format API, we can see that
the writing portion of 'git multi-pack-index write' lowers from ~1.49s
to ~1.47s on a Linux machine. These effects may be more pronounced or
diminished on other filesystems. The end-to-end timing is too noisy to
have a definitive change either way.
Signed-off-by: Derrick Stolee <dstolee@microsoft.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2021-05-18 21:32:46 +03:00
|
|
|
size_t buffer_len;
|
|
|
|
unsigned char *buffer;
|
|
|
|
unsigned char *check_buffer;
|
2005-06-27 07:27:56 +04:00
|
|
|
};
|
|
|
|
|
2011-11-18 04:26:54 +04:00
|
|
|
/* Checkpoint */
|
2018-02-01 05:18:46 +03:00
|
|
|
struct hashfile_checkpoint {
|
2011-11-18 04:26:54 +04:00
|
|
|
off_t offset;
|
2018-02-01 05:18:47 +03:00
|
|
|
git_hash_ctx ctx;
|
2011-11-18 04:26:54 +04:00
|
|
|
};
|
|
|
|
|
2019-04-29 11:28:14 +03:00
|
|
|
void hashfile_checkpoint(struct hashfile *, struct hashfile_checkpoint *);
|
|
|
|
int hashfile_truncate(struct hashfile *, struct hashfile_checkpoint *);
|
2011-11-18 04:26:54 +04:00
|
|
|
|
2018-04-02 23:34:14 +03:00
|
|
|
/* finalize_hashfile flags */
|
2018-04-02 23:34:15 +03:00
|
|
|
#define CSUM_CLOSE 1
|
|
|
|
#define CSUM_FSYNC 2
|
|
|
|
#define CSUM_HASH_IN_STREAM 4
|
2008-05-30 19:42:16 +04:00
|
|
|
|
2019-04-29 11:28:14 +03:00
|
|
|
struct hashfile *hashfd(int fd, const char *name);
|
|
|
|
struct hashfile *hashfd_check(const char *name);
|
|
|
|
struct hashfile *hashfd_throughput(int fd, const char *name, struct progress *tp);
|
|
|
|
int finalize_hashfile(struct hashfile *, unsigned char *, unsigned int);
|
|
|
|
void hashwrite(struct hashfile *, const void *, unsigned int);
|
|
|
|
void hashflush(struct hashfile *f);
|
|
|
|
void crc32_begin(struct hashfile *);
|
|
|
|
uint32_t crc32_end(struct hashfile *);
|
2005-06-27 07:27:56 +04:00
|
|
|
|
2019-12-18 14:25:42 +03:00
|
|
|
/*
|
|
|
|
* Returns the total number of bytes fed to the hashfile so far (including ones
|
|
|
|
* that have not been written out to the descriptor yet).
|
|
|
|
*/
|
|
|
|
static inline off_t hashfile_total(struct hashfile *f)
|
|
|
|
{
|
|
|
|
return f->total + f->offset;
|
|
|
|
}
|
|
|
|
|
2018-02-01 05:18:46 +03:00
|
|
|
static inline void hashwrite_u8(struct hashfile *f, uint8_t data)
|
2014-11-27 08:24:01 +03:00
|
|
|
{
|
2018-02-01 05:18:46 +03:00
|
|
|
hashwrite(f, &data, sizeof(data));
|
2014-11-27 08:24:01 +03:00
|
|
|
}
|
|
|
|
|
2018-02-01 05:18:46 +03:00
|
|
|
static inline void hashwrite_be32(struct hashfile *f, uint32_t data)
|
2014-11-27 08:24:01 +03:00
|
|
|
{
|
|
|
|
data = htonl(data);
|
2018-02-01 05:18:46 +03:00
|
|
|
hashwrite(f, &data, sizeof(data));
|
2014-11-27 08:24:01 +03:00
|
|
|
}
|
|
|
|
|
2020-11-12 15:20:19 +03:00
|
|
|
static inline size_t hashwrite_be64(struct hashfile *f, uint64_t data)
|
|
|
|
{
|
|
|
|
data = htonll(data);
|
|
|
|
hashwrite(f, &data, sizeof(data));
|
|
|
|
return sizeof(data);
|
|
|
|
}
|
|
|
|
|
2005-06-27 07:27:56 +04:00
|
|
|
#endif
|