2005-04-19 00:04:43 +04:00
|
|
|
/*
|
|
|
|
* GIT - The information manager from hell
|
|
|
|
*
|
|
|
|
* Copyright (C) Linus Torvalds, 2005
|
|
|
|
*
|
|
|
|
* This handles basic git sha1 object files - packing, unpacking,
|
|
|
|
* creation etc.
|
|
|
|
*/
|
|
|
|
#include "cache.h"
|
2005-06-27 14:35:33 +04:00
|
|
|
#include "delta.h"
|
2005-06-29 01:21:02 +04:00
|
|
|
#include "pack.h"
|
2006-04-02 16:44:09 +04:00
|
|
|
#include "blob.h"
|
|
|
|
#include "commit.h"
|
|
|
|
#include "tag.h"
|
|
|
|
#include "tree.h"
|
2005-04-19 00:04:43 +04:00
|
|
|
|
2005-04-23 22:09:32 +04:00
|
|
|
#ifndef O_NOATIME
|
|
|
|
#if defined(__linux__) && (defined(__i386__) || defined(__PPC__))
|
|
|
|
#define O_NOATIME 01000000
|
|
|
|
#else
|
|
|
|
#define O_NOATIME 0
|
|
|
|
#endif
|
|
|
|
#endif
|
|
|
|
|
2007-01-10 07:07:11 +03:00
|
|
|
#ifdef NO_C99_FORMAT
|
|
|
|
#define SZ_FMT "lu"
|
|
|
|
#else
|
|
|
|
#define SZ_FMT "zu"
|
|
|
|
#endif
|
|
|
|
|
2006-08-15 21:23:48 +04:00
|
|
|
const unsigned char null_sha1[20];
|
2005-10-01 01:02:47 +04:00
|
|
|
|
2005-04-23 22:09:32 +04:00
|
|
|
static unsigned int sha1_file_open_flag = O_NOATIME;
|
|
|
|
|
2006-09-21 03:04:46 +04:00
|
|
|
signed char hexval_table[256] = {
|
|
|
|
-1, -1, -1, -1, -1, -1, -1, -1, /* 00-07 */
|
|
|
|
-1, -1, -1, -1, -1, -1, -1, -1, /* 08-0f */
|
|
|
|
-1, -1, -1, -1, -1, -1, -1, -1, /* 10-17 */
|
|
|
|
-1, -1, -1, -1, -1, -1, -1, -1, /* 18-1f */
|
|
|
|
-1, -1, -1, -1, -1, -1, -1, -1, /* 20-27 */
|
|
|
|
-1, -1, -1, -1, -1, -1, -1, -1, /* 28-2f */
|
|
|
|
0, 1, 2, 3, 4, 5, 6, 7, /* 30-37 */
|
|
|
|
8, 9, -1, -1, -1, -1, -1, -1, /* 38-3f */
|
|
|
|
-1, 10, 11, 12, 13, 14, 15, -1, /* 40-47 */
|
|
|
|
-1, -1, -1, -1, -1, -1, -1, -1, /* 48-4f */
|
|
|
|
-1, -1, -1, -1, -1, -1, -1, -1, /* 50-57 */
|
|
|
|
-1, -1, -1, -1, -1, -1, -1, -1, /* 58-5f */
|
|
|
|
-1, 10, 11, 12, 13, 14, 15, -1, /* 60-67 */
|
|
|
|
-1, -1, -1, -1, -1, -1, -1, -1, /* 68-67 */
|
|
|
|
-1, -1, -1, -1, -1, -1, -1, -1, /* 70-77 */
|
|
|
|
-1, -1, -1, -1, -1, -1, -1, -1, /* 78-7f */
|
|
|
|
-1, -1, -1, -1, -1, -1, -1, -1, /* 80-87 */
|
|
|
|
-1, -1, -1, -1, -1, -1, -1, -1, /* 88-8f */
|
|
|
|
-1, -1, -1, -1, -1, -1, -1, -1, /* 90-97 */
|
|
|
|
-1, -1, -1, -1, -1, -1, -1, -1, /* 98-9f */
|
|
|
|
-1, -1, -1, -1, -1, -1, -1, -1, /* a0-a7 */
|
|
|
|
-1, -1, -1, -1, -1, -1, -1, -1, /* a8-af */
|
|
|
|
-1, -1, -1, -1, -1, -1, -1, -1, /* b0-b7 */
|
|
|
|
-1, -1, -1, -1, -1, -1, -1, -1, /* b8-bf */
|
|
|
|
-1, -1, -1, -1, -1, -1, -1, -1, /* c0-c7 */
|
|
|
|
-1, -1, -1, -1, -1, -1, -1, -1, /* c8-cf */
|
|
|
|
-1, -1, -1, -1, -1, -1, -1, -1, /* d0-d7 */
|
|
|
|
-1, -1, -1, -1, -1, -1, -1, -1, /* d8-df */
|
|
|
|
-1, -1, -1, -1, -1, -1, -1, -1, /* e0-e7 */
|
|
|
|
-1, -1, -1, -1, -1, -1, -1, -1, /* e8-ef */
|
|
|
|
-1, -1, -1, -1, -1, -1, -1, -1, /* f0-f7 */
|
|
|
|
-1, -1, -1, -1, -1, -1, -1, -1, /* f8-ff */
|
|
|
|
};
|
2005-04-19 00:04:43 +04:00
|
|
|
|
|
|
|
int get_sha1_hex(const char *hex, unsigned char *sha1)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
for (i = 0; i < 20; i++) {
|
|
|
|
unsigned int val = (hexval(hex[0]) << 4) | hexval(hex[1]);
|
|
|
|
if (val & ~0xff)
|
|
|
|
return -1;
|
|
|
|
*sha1++ = val;
|
|
|
|
hex += 2;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2005-07-06 12:11:52 +04:00
|
|
|
int safe_create_leading_directories(char *path)
|
|
|
|
{
|
|
|
|
char *pos = path;
|
2006-02-10 04:56:13 +03:00
|
|
|
struct stat st;
|
|
|
|
|
2005-11-07 02:36:15 +03:00
|
|
|
if (*pos == '/')
|
|
|
|
pos++;
|
2005-07-06 12:11:52 +04:00
|
|
|
|
|
|
|
while (pos) {
|
|
|
|
pos = strchr(pos, '/');
|
|
|
|
if (!pos)
|
|
|
|
break;
|
|
|
|
*pos = 0;
|
2006-02-10 04:56:13 +03:00
|
|
|
if (!stat(path, &st)) {
|
|
|
|
/* path exists */
|
|
|
|
if (!S_ISDIR(st.st_mode)) {
|
2005-07-06 12:11:52 +04:00
|
|
|
*pos = '/';
|
2006-02-10 04:56:13 +03:00
|
|
|
return -3;
|
2005-07-06 12:11:52 +04:00
|
|
|
}
|
2005-12-23 01:13:56 +03:00
|
|
|
}
|
2006-02-10 04:56:13 +03:00
|
|
|
else if (mkdir(path, 0777)) {
|
|
|
|
*pos = '/';
|
|
|
|
return -1;
|
|
|
|
}
|
2005-12-23 01:13:56 +03:00
|
|
|
else if (adjust_shared_perm(path)) {
|
|
|
|
*pos = '/';
|
|
|
|
return -2;
|
|
|
|
}
|
2005-07-06 12:11:52 +04:00
|
|
|
*pos++ = '/';
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
2005-07-05 22:31:32 +04:00
|
|
|
|
2005-04-19 00:04:43 +04:00
|
|
|
char * sha1_to_hex(const unsigned char *sha1)
|
|
|
|
{
|
2006-05-04 04:21:08 +04:00
|
|
|
static int bufno;
|
|
|
|
static char hexbuffer[4][50];
|
2005-04-19 00:04:43 +04:00
|
|
|
static const char hex[] = "0123456789abcdef";
|
2006-05-04 04:21:08 +04:00
|
|
|
char *buffer = hexbuffer[3 & ++bufno], *buf = buffer;
|
2005-04-19 00:04:43 +04:00
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < 20; i++) {
|
|
|
|
unsigned int val = *sha1++;
|
|
|
|
*buf++ = hex[val >> 4];
|
|
|
|
*buf++ = hex[val & 0xf];
|
|
|
|
}
|
2005-12-22 20:55:59 +03:00
|
|
|
*buf = '\0';
|
|
|
|
|
2005-04-19 00:04:43 +04:00
|
|
|
return buffer;
|
|
|
|
}
|
|
|
|
|
2005-05-07 11:38:04 +04:00
|
|
|
static void fill_sha1_path(char *pathbuf, const unsigned char *sha1)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
for (i = 0; i < 20; i++) {
|
|
|
|
static char hex[] = "0123456789abcdef";
|
|
|
|
unsigned int val = sha1[i];
|
|
|
|
char *pos = pathbuf + i*2 + (i > 0);
|
|
|
|
*pos++ = hex[val >> 4];
|
|
|
|
*pos = hex[val & 0xf];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2005-04-19 00:04:43 +04:00
|
|
|
/*
|
|
|
|
* NOTE! This returns a statically allocated buffer, so you have to be
|
2006-09-02 08:16:31 +04:00
|
|
|
* careful about using it. Do a "xstrdup()" if you need to save the
|
2005-04-19 00:04:43 +04:00
|
|
|
* filename.
|
2005-05-07 11:38:04 +04:00
|
|
|
*
|
|
|
|
* Also note that this returns the location for creating. Reading
|
|
|
|
* SHA1 file can happen from any alternate directory listed in the
|
2005-05-10 04:57:56 +04:00
|
|
|
* DB_ENVIRONMENT environment variable if it is not found in
|
2005-05-07 11:38:04 +04:00
|
|
|
* the primary object database.
|
2005-04-19 00:04:43 +04:00
|
|
|
*/
|
|
|
|
char *sha1_file_name(const unsigned char *sha1)
|
|
|
|
{
|
|
|
|
static char *name, *base;
|
|
|
|
|
|
|
|
if (!base) {
|
2005-05-10 04:57:56 +04:00
|
|
|
const char *sha1_file_directory = get_object_directory();
|
2005-04-19 00:04:43 +04:00
|
|
|
int len = strlen(sha1_file_directory);
|
2005-04-26 23:00:58 +04:00
|
|
|
base = xmalloc(len + 60);
|
2005-04-19 00:04:43 +04:00
|
|
|
memcpy(base, sha1_file_directory, len);
|
|
|
|
memset(base+len, 0, 60);
|
|
|
|
base[len] = '/';
|
|
|
|
base[len+3] = '/';
|
|
|
|
name = base + len + 1;
|
|
|
|
}
|
2005-05-07 11:38:04 +04:00
|
|
|
fill_sha1_path(name, sha1);
|
2005-04-19 00:04:43 +04:00
|
|
|
return base;
|
|
|
|
}
|
|
|
|
|
2005-08-01 04:53:44 +04:00
|
|
|
char *sha1_pack_name(const unsigned char *sha1)
|
|
|
|
{
|
|
|
|
static const char hex[] = "0123456789abcdef";
|
|
|
|
static char *name, *base, *buf;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
if (!base) {
|
|
|
|
const char *sha1_file_directory = get_object_directory();
|
|
|
|
int len = strlen(sha1_file_directory);
|
|
|
|
base = xmalloc(len + 60);
|
|
|
|
sprintf(base, "%s/pack/pack-1234567890123456789012345678901234567890.pack", sha1_file_directory);
|
|
|
|
name = base + len + 11;
|
|
|
|
}
|
|
|
|
|
|
|
|
buf = name;
|
|
|
|
|
|
|
|
for (i = 0; i < 20; i++) {
|
|
|
|
unsigned int val = *sha1++;
|
|
|
|
*buf++ = hex[val >> 4];
|
|
|
|
*buf++ = hex[val & 0xf];
|
|
|
|
}
|
|
|
|
|
|
|
|
return base;
|
|
|
|
}
|
|
|
|
|
|
|
|
char *sha1_pack_index_name(const unsigned char *sha1)
|
|
|
|
{
|
|
|
|
static const char hex[] = "0123456789abcdef";
|
|
|
|
static char *name, *base, *buf;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
if (!base) {
|
|
|
|
const char *sha1_file_directory = get_object_directory();
|
|
|
|
int len = strlen(sha1_file_directory);
|
|
|
|
base = xmalloc(len + 60);
|
|
|
|
sprintf(base, "%s/pack/pack-1234567890123456789012345678901234567890.idx", sha1_file_directory);
|
|
|
|
name = base + len + 11;
|
|
|
|
}
|
|
|
|
|
|
|
|
buf = name;
|
|
|
|
|
|
|
|
for (i = 0; i < 20; i++) {
|
|
|
|
unsigned int val = *sha1++;
|
|
|
|
*buf++ = hex[val >> 4];
|
|
|
|
*buf++ = hex[val & 0xf];
|
|
|
|
}
|
|
|
|
|
|
|
|
return base;
|
|
|
|
}
|
|
|
|
|
2005-08-15 04:25:57 +04:00
|
|
|
struct alternate_object_database *alt_odb_list;
|
|
|
|
static struct alternate_object_database **alt_odb_tail;
|
2005-05-07 11:38:04 +04:00
|
|
|
|
2006-05-07 22:19:21 +04:00
|
|
|
static void read_info_alternates(const char * alternates, int depth);
|
|
|
|
|
2005-05-09 00:51:13 +04:00
|
|
|
/*
|
|
|
|
* Prepare alternate object database registry.
|
2005-08-15 04:25:57 +04:00
|
|
|
*
|
|
|
|
* The variable alt_odb_list points at the list of struct
|
|
|
|
* alternate_object_database. The elements on this list come from
|
|
|
|
* non-empty elements from colon separated ALTERNATE_DB_ENVIRONMENT
|
|
|
|
* environment variable, and $GIT_OBJECT_DIRECTORY/info/alternates,
|
2005-12-05 09:48:43 +03:00
|
|
|
* whose contents is similar to that environment variable but can be
|
|
|
|
* LF separated. Its base points at a statically allocated buffer that
|
2005-08-15 04:25:57 +04:00
|
|
|
* contains "/the/directory/corresponding/to/.git/objects/...", while
|
|
|
|
* its name points just after the slash at the end of ".git/objects/"
|
|
|
|
* in the example above, and has enough space to hold 40-byte hex
|
|
|
|
* SHA1, an extra slash for the first level indirection, and the
|
|
|
|
* terminating NUL.
|
2005-05-09 00:51:13 +04:00
|
|
|
*/
|
2006-05-07 22:19:21 +04:00
|
|
|
static int link_alt_odb_entry(const char * entry, int len, const char * relative_base, int depth)
|
2005-05-07 11:38:04 +04:00
|
|
|
{
|
2006-05-07 22:19:21 +04:00
|
|
|
struct stat st;
|
2005-12-05 09:48:43 +03:00
|
|
|
const char *objdir = get_object_directory();
|
2006-05-07 22:19:21 +04:00
|
|
|
struct alternate_object_database *ent;
|
|
|
|
struct alternate_object_database *alt;
|
|
|
|
/* 43 = 40-byte + 2 '/' + terminating NUL */
|
|
|
|
int pfxlen = len;
|
|
|
|
int entlen = pfxlen + 43;
|
2005-09-13 11:05:22 +04:00
|
|
|
int base_len = -1;
|
2005-08-15 04:25:57 +04:00
|
|
|
|
2006-05-07 22:19:21 +04:00
|
|
|
if (*entry != '/' && relative_base) {
|
|
|
|
/* Relative alt-odb */
|
|
|
|
if (base_len < 0)
|
|
|
|
base_len = strlen(relative_base) + 1;
|
|
|
|
entlen += base_len;
|
|
|
|
pfxlen += base_len;
|
|
|
|
}
|
|
|
|
ent = xmalloc(sizeof(*ent) + entlen);
|
|
|
|
|
|
|
|
if (*entry != '/' && relative_base) {
|
|
|
|
memcpy(ent->base, relative_base, base_len - 1);
|
|
|
|
ent->base[base_len - 1] = '/';
|
|
|
|
memcpy(ent->base + base_len, entry, len);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
memcpy(ent->base, entry, pfxlen);
|
|
|
|
|
|
|
|
ent->name = ent->base + pfxlen + 1;
|
|
|
|
ent->base[pfxlen + 3] = '/';
|
|
|
|
ent->base[pfxlen] = ent->base[entlen-1] = 0;
|
|
|
|
|
|
|
|
/* Detect cases where alternate disappeared */
|
|
|
|
if (stat(ent->base, &st) || !S_ISDIR(st.st_mode)) {
|
|
|
|
error("object directory %s does not exist; "
|
|
|
|
"check .git/objects/info/alternates.",
|
|
|
|
ent->base);
|
|
|
|
free(ent);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Prevent the common mistake of listing the same
|
|
|
|
* thing twice, or object directory itself.
|
|
|
|
*/
|
|
|
|
for (alt = alt_odb_list; alt; alt = alt->next) {
|
|
|
|
if (!memcmp(ent->base, alt->base, pfxlen)) {
|
|
|
|
free(ent);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (!memcmp(ent->base, objdir, pfxlen)) {
|
|
|
|
free(ent);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* add the alternate entry */
|
|
|
|
*alt_odb_tail = ent;
|
|
|
|
alt_odb_tail = &(ent->next);
|
|
|
|
ent->next = NULL;
|
|
|
|
|
|
|
|
/* recursively add alternates */
|
|
|
|
read_info_alternates(ent->base, depth + 1);
|
|
|
|
|
|
|
|
ent->base[pfxlen] = '/';
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void link_alt_odb_entries(const char *alt, const char *ep, int sep,
|
|
|
|
const char *relative_base, int depth)
|
|
|
|
{
|
|
|
|
const char *cp, *last;
|
|
|
|
|
|
|
|
if (depth > 5) {
|
|
|
|
error("%s: ignoring alternate object stores, nesting too deep.",
|
|
|
|
relative_base);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2005-08-15 04:25:57 +04:00
|
|
|
last = alt;
|
2005-08-17 05:22:05 +04:00
|
|
|
while (last < ep) {
|
|
|
|
cp = last;
|
|
|
|
if (cp < ep && *cp == '#') {
|
|
|
|
while (cp < ep && *cp != sep)
|
|
|
|
cp++;
|
|
|
|
last = cp + 1;
|
|
|
|
continue;
|
|
|
|
}
|
2006-05-07 22:19:21 +04:00
|
|
|
while (cp < ep && *cp != sep)
|
|
|
|
cp++;
|
2005-08-15 04:25:57 +04:00
|
|
|
if (last != cp) {
|
2006-05-07 22:19:21 +04:00
|
|
|
if ((*last != '/') && depth) {
|
|
|
|
error("%s: ignoring relative alternate object store %s",
|
|
|
|
relative_base, last);
|
|
|
|
} else {
|
|
|
|
link_alt_odb_entry(last, cp - last,
|
|
|
|
relative_base, depth);
|
2005-12-05 09:48:43 +03:00
|
|
|
}
|
2005-08-15 04:25:57 +04:00
|
|
|
}
|
2005-08-17 05:22:05 +04:00
|
|
|
while (cp < ep && *cp == sep)
|
2005-08-15 04:25:57 +04:00
|
|
|
cp++;
|
|
|
|
last = cp;
|
2005-08-17 05:22:05 +04:00
|
|
|
}
|
2005-08-15 04:25:57 +04:00
|
|
|
}
|
|
|
|
|
2006-05-07 22:19:21 +04:00
|
|
|
static void read_info_alternates(const char * relative_base, int depth)
|
2005-08-15 04:25:57 +04:00
|
|
|
{
|
2005-08-17 05:22:05 +04:00
|
|
|
char *map;
|
2007-03-07 04:44:37 +03:00
|
|
|
size_t mapsz;
|
2005-08-15 04:25:57 +04:00
|
|
|
struct stat st;
|
2006-05-07 22:19:21 +04:00
|
|
|
char path[PATH_MAX];
|
|
|
|
int fd;
|
2005-08-15 04:25:57 +04:00
|
|
|
|
2006-05-07 22:19:21 +04:00
|
|
|
sprintf(path, "%s/info/alternates", relative_base);
|
2005-08-15 04:25:57 +04:00
|
|
|
fd = open(path, O_RDONLY);
|
|
|
|
if (fd < 0)
|
|
|
|
return;
|
|
|
|
if (fstat(fd, &st) || (st.st_size == 0)) {
|
|
|
|
close(fd);
|
2005-06-29 01:56:57 +04:00
|
|
|
return;
|
2005-05-07 11:38:04 +04:00
|
|
|
}
|
2007-03-07 04:44:37 +03:00
|
|
|
mapsz = xsize_t(st.st_size);
|
|
|
|
map = xmmap(NULL, mapsz, PROT_READ, MAP_PRIVATE, fd, 0);
|
2005-08-15 04:25:57 +04:00
|
|
|
close(fd);
|
|
|
|
|
2007-03-07 04:44:37 +03:00
|
|
|
link_alt_odb_entries(map, map + mapsz, '\n', relative_base, depth);
|
2006-05-07 22:19:21 +04:00
|
|
|
|
2007-03-07 04:44:37 +03:00
|
|
|
munmap(map, mapsz);
|
2005-05-07 11:38:04 +04:00
|
|
|
}
|
|
|
|
|
2006-05-07 22:19:21 +04:00
|
|
|
void prepare_alt_odb(void)
|
|
|
|
{
|
2006-06-28 13:04:39 +04:00
|
|
|
const char *alt;
|
2006-05-07 22:19:21 +04:00
|
|
|
|
|
|
|
alt = getenv(ALTERNATE_DB_ENVIRONMENT);
|
|
|
|
if (!alt) alt = "";
|
|
|
|
|
|
|
|
if (alt_odb_tail)
|
|
|
|
return;
|
|
|
|
alt_odb_tail = &alt_odb_list;
|
|
|
|
link_alt_odb_entries(alt, alt + strlen(alt), ':', NULL, 0);
|
|
|
|
|
|
|
|
read_info_alternates(get_object_directory(), 0);
|
|
|
|
}
|
|
|
|
|
2005-05-07 11:38:04 +04:00
|
|
|
static char *find_sha1_file(const unsigned char *sha1, struct stat *st)
|
|
|
|
{
|
|
|
|
char *name = sha1_file_name(sha1);
|
2005-08-15 04:25:57 +04:00
|
|
|
struct alternate_object_database *alt;
|
2005-05-07 11:38:04 +04:00
|
|
|
|
|
|
|
if (!stat(name, st))
|
|
|
|
return name;
|
2005-06-29 01:56:57 +04:00
|
|
|
prepare_alt_odb();
|
2005-08-15 04:25:57 +04:00
|
|
|
for (alt = alt_odb_list; alt; alt = alt->next) {
|
|
|
|
name = alt->name;
|
2005-05-07 11:38:04 +04:00
|
|
|
fill_sha1_path(name, sha1);
|
2005-08-15 04:25:57 +04:00
|
|
|
if (!stat(alt->base, st))
|
|
|
|
return alt->base;
|
2005-05-07 11:38:04 +04:00
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2006-12-23 10:34:28 +03:00
|
|
|
static unsigned int pack_used_ctr;
|
2006-12-23 10:34:47 +03:00
|
|
|
static unsigned int pack_mmap_calls;
|
|
|
|
static unsigned int peak_pack_open_windows;
|
|
|
|
static unsigned int pack_open_windows;
|
|
|
|
static size_t peak_pack_mapped;
|
2006-12-23 10:34:28 +03:00
|
|
|
static size_t pack_mapped;
|
2005-06-29 01:56:57 +04:00
|
|
|
struct packed_git *packed_git;
|
2005-06-27 14:35:33 +04:00
|
|
|
|
2006-12-23 10:34:47 +03:00
|
|
|
void pack_report()
|
|
|
|
{
|
|
|
|
fprintf(stderr,
|
2007-01-10 07:07:11 +03:00
|
|
|
"pack_report: getpagesize() = %10" SZ_FMT "\n"
|
|
|
|
"pack_report: core.packedGitWindowSize = %10" SZ_FMT "\n"
|
|
|
|
"pack_report: core.packedGitLimit = %10" SZ_FMT "\n",
|
2007-02-14 20:11:40 +03:00
|
|
|
(size_t) getpagesize(),
|
2006-12-23 10:34:47 +03:00
|
|
|
packed_git_window_size,
|
|
|
|
packed_git_limit);
|
|
|
|
fprintf(stderr,
|
|
|
|
"pack_report: pack_used_ctr = %10u\n"
|
|
|
|
"pack_report: pack_mmap_calls = %10u\n"
|
|
|
|
"pack_report: pack_open_windows = %10u / %10u\n"
|
2007-01-10 07:07:11 +03:00
|
|
|
"pack_report: pack_mapped = "
|
|
|
|
"%10" SZ_FMT " / %10" SZ_FMT "\n",
|
2006-12-23 10:34:47 +03:00
|
|
|
pack_used_ctr,
|
|
|
|
pack_mmap_calls,
|
|
|
|
pack_open_windows, peak_pack_open_windows,
|
|
|
|
pack_mapped, peak_pack_mapped);
|
|
|
|
}
|
|
|
|
|
2007-03-16 23:42:50 +03:00
|
|
|
static int check_packed_git_idx(const char *path, struct packed_git *p)
|
2005-06-27 14:35:33 +04:00
|
|
|
{
|
|
|
|
void *idx_map;
|
2007-03-16 23:42:50 +03:00
|
|
|
struct pack_idx_header *hdr;
|
2007-03-07 04:44:37 +03:00
|
|
|
size_t idx_size;
|
2007-03-16 23:42:50 +03:00
|
|
|
uint32_t nr, i, *index;
|
2006-01-19 07:26:14 +03:00
|
|
|
int fd = open(path, O_RDONLY);
|
2005-06-27 14:35:33 +04:00
|
|
|
struct stat st;
|
2007-03-16 23:42:50 +03:00
|
|
|
|
2005-06-27 14:35:33 +04:00
|
|
|
if (fd < 0)
|
|
|
|
return -1;
|
|
|
|
if (fstat(fd, &st)) {
|
|
|
|
close(fd);
|
|
|
|
return -1;
|
|
|
|
}
|
2007-03-07 04:44:37 +03:00
|
|
|
idx_size = xsize_t(st.st_size);
|
2007-03-07 04:44:11 +03:00
|
|
|
if (idx_size < 4 * 256 + 20 + 20) {
|
|
|
|
close(fd);
|
|
|
|
return error("index file %s is too small", path);
|
|
|
|
}
|
2006-12-24 08:47:23 +03:00
|
|
|
idx_map = xmmap(NULL, idx_size, PROT_READ, MAP_PRIVATE, fd, 0);
|
2005-06-27 14:35:33 +04:00
|
|
|
close(fd);
|
|
|
|
|
2007-01-18 04:43:57 +03:00
|
|
|
/* a future index format would start with this, as older git
|
|
|
|
* binaries would fail the non-monotonic index check below.
|
|
|
|
* give a nicer warning to the user if we can.
|
|
|
|
*/
|
2007-03-16 23:42:50 +03:00
|
|
|
hdr = idx_map;
|
|
|
|
if (hdr->idx_signature == htonl(PACK_IDX_SIGNATURE)) {
|
2007-03-07 04:44:11 +03:00
|
|
|
munmap(idx_map, idx_size);
|
2007-01-18 04:43:57 +03:00
|
|
|
return error("index file %s is a newer version"
|
|
|
|
" and is not supported by this binary"
|
|
|
|
" (try upgrading GIT to a newer version)",
|
|
|
|
path);
|
2007-03-07 04:44:11 +03:00
|
|
|
}
|
2007-01-18 04:43:57 +03:00
|
|
|
|
2005-06-27 14:35:33 +04:00
|
|
|
nr = 0;
|
2007-03-16 23:42:50 +03:00
|
|
|
index = idx_map;
|
2005-06-27 14:35:33 +04:00
|
|
|
for (i = 0; i < 256; i++) {
|
2007-03-07 04:44:19 +03:00
|
|
|
uint32_t n = ntohl(index[i]);
|
2007-03-07 04:44:11 +03:00
|
|
|
if (n < nr) {
|
|
|
|
munmap(idx_map, idx_size);
|
2007-01-18 04:43:57 +03:00
|
|
|
return error("non-monotonic index %s", path);
|
2007-03-07 04:44:11 +03:00
|
|
|
}
|
2005-06-27 14:35:33 +04:00
|
|
|
nr = n;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Total size:
|
|
|
|
* - 256 index entries 4 bytes each
|
|
|
|
* - 24-byte entries * nr (20-byte sha1 + 4-byte offset)
|
|
|
|
* - 20-byte SHA1 of the packfile
|
|
|
|
* - 20-byte SHA1 file checksum
|
|
|
|
*/
|
2007-03-07 04:44:11 +03:00
|
|
|
if (idx_size != 4*256 + nr * 24 + 20 + 20) {
|
|
|
|
munmap(idx_map, idx_size);
|
2007-01-18 04:43:57 +03:00
|
|
|
return error("wrong index file size in %s", path);
|
2007-03-07 04:44:11 +03:00
|
|
|
}
|
2005-06-27 14:35:33 +04:00
|
|
|
|
2007-03-16 23:42:50 +03:00
|
|
|
p->index_version = 1;
|
|
|
|
p->index_data = idx_map;
|
|
|
|
p->index_size = idx_size;
|
2005-06-27 14:35:33 +04:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2006-12-23 10:34:44 +03:00
|
|
|
static void scan_windows(struct packed_git *p,
|
|
|
|
struct packed_git **lru_p,
|
|
|
|
struct pack_window **lru_w,
|
|
|
|
struct pack_window **lru_l)
|
2005-06-27 14:35:33 +04:00
|
|
|
{
|
2006-12-23 10:34:44 +03:00
|
|
|
struct pack_window *w, *w_l;
|
|
|
|
|
|
|
|
for (w_l = NULL, w = p->windows; w; w = w->next) {
|
|
|
|
if (!w->inuse_cnt) {
|
|
|
|
if (!*lru_w || w->last_used < (*lru_w)->last_used) {
|
|
|
|
*lru_p = p;
|
|
|
|
*lru_w = w;
|
|
|
|
*lru_l = w_l;
|
2006-12-23 10:34:23 +03:00
|
|
|
}
|
|
|
|
}
|
2006-12-23 10:34:44 +03:00
|
|
|
w_l = w;
|
2005-06-29 13:51:27 +04:00
|
|
|
}
|
2006-12-23 10:34:44 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static int unuse_one_window(struct packed_git *current)
|
|
|
|
{
|
|
|
|
struct packed_git *p, *lru_p = NULL;
|
|
|
|
struct pack_window *lru_w = NULL, *lru_l = NULL;
|
|
|
|
|
|
|
|
if (current)
|
|
|
|
scan_windows(current, &lru_p, &lru_w, &lru_l);
|
|
|
|
for (p = packed_git; p; p = p->next)
|
|
|
|
scan_windows(p, &lru_p, &lru_w, &lru_l);
|
2006-12-23 10:34:23 +03:00
|
|
|
if (lru_p) {
|
|
|
|
munmap(lru_w->base, lru_w->len);
|
|
|
|
pack_mapped -= lru_w->len;
|
|
|
|
if (lru_l)
|
|
|
|
lru_l->next = lru_w->next;
|
|
|
|
else {
|
|
|
|
lru_p->windows = lru_w->next;
|
2006-12-23 10:34:44 +03:00
|
|
|
if (!lru_p->windows && lru_p != current) {
|
2006-12-23 10:34:23 +03:00
|
|
|
close(lru_p->pack_fd);
|
|
|
|
lru_p->pack_fd = -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
free(lru_w);
|
2006-12-23 10:34:47 +03:00
|
|
|
pack_open_windows--;
|
2006-12-23 10:34:23 +03:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
return 0;
|
2005-06-29 13:51:27 +04:00
|
|
|
}
|
|
|
|
|
2006-12-24 08:47:19 +03:00
|
|
|
void release_pack_memory(size_t need)
|
|
|
|
{
|
|
|
|
size_t cur = pack_mapped;
|
|
|
|
while (need >= (cur - pack_mapped) && unuse_one_window(NULL))
|
|
|
|
; /* nothing */
|
|
|
|
}
|
|
|
|
|
2006-12-23 10:34:08 +03:00
|
|
|
void unuse_pack(struct pack_window **w_cursor)
|
2005-06-29 13:51:27 +04:00
|
|
|
{
|
2006-12-23 10:34:08 +03:00
|
|
|
struct pack_window *w = *w_cursor;
|
|
|
|
if (w) {
|
|
|
|
w->inuse_cnt--;
|
|
|
|
*w_cursor = NULL;
|
|
|
|
}
|
2005-06-27 14:35:33 +04:00
|
|
|
}
|
|
|
|
|
2007-02-02 11:00:03 +03:00
|
|
|
/*
|
|
|
|
* Do not call this directly as this leaks p->pack_fd on error return;
|
|
|
|
* call open_packed_git() instead.
|
|
|
|
*/
|
|
|
|
static int open_packed_git_1(struct packed_git *p)
|
2005-06-27 14:35:33 +04:00
|
|
|
{
|
2006-12-23 10:34:01 +03:00
|
|
|
struct stat st;
|
|
|
|
struct pack_header hdr;
|
|
|
|
unsigned char sha1[20];
|
|
|
|
unsigned char *idx_sha1;
|
2006-12-29 11:30:01 +03:00
|
|
|
long fd_flag;
|
2006-12-23 10:34:01 +03:00
|
|
|
|
|
|
|
p->pack_fd = open(p->pack_name, O_RDONLY);
|
|
|
|
if (p->pack_fd < 0 || fstat(p->pack_fd, &st))
|
2007-02-01 23:52:33 +03:00
|
|
|
return -1;
|
2006-12-23 10:34:01 +03:00
|
|
|
|
|
|
|
/* If we created the struct before we had the pack we lack size. */
|
2005-08-01 04:53:44 +04:00
|
|
|
if (!p->pack_size) {
|
|
|
|
if (!S_ISREG(st.st_mode))
|
2007-02-01 23:52:33 +03:00
|
|
|
return error("packfile %s not a regular file", p->pack_name);
|
2005-08-01 04:53:44 +04:00
|
|
|
p->pack_size = st.st_size;
|
2006-12-23 10:34:01 +03:00
|
|
|
} else if (p->pack_size != st.st_size)
|
2007-02-01 23:52:33 +03:00
|
|
|
return error("packfile %s size changed", p->pack_name);
|
2006-12-23 10:34:01 +03:00
|
|
|
|
2006-12-29 11:30:01 +03:00
|
|
|
/* We leave these file descriptors open with sliding mmap;
|
|
|
|
* there is no point keeping them open across exec(), though.
|
|
|
|
*/
|
|
|
|
fd_flag = fcntl(p->pack_fd, F_GETFD, 0);
|
|
|
|
if (fd_flag < 0)
|
2007-02-01 23:52:33 +03:00
|
|
|
return error("cannot determine file descriptor flags");
|
2006-12-29 11:30:01 +03:00
|
|
|
fd_flag |= FD_CLOEXEC;
|
|
|
|
if (fcntl(p->pack_fd, F_SETFD, fd_flag) == -1)
|
2007-02-01 23:52:33 +03:00
|
|
|
return error("cannot set FD_CLOEXEC");
|
2006-12-29 11:30:01 +03:00
|
|
|
|
2006-12-23 10:34:01 +03:00
|
|
|
/* Verify we recognize this pack file format. */
|
2007-01-14 09:01:49 +03:00
|
|
|
if (read_in_full(p->pack_fd, &hdr, sizeof(hdr)) != sizeof(hdr))
|
2007-02-01 23:52:33 +03:00
|
|
|
return error("file %s is far too short to be a packfile", p->pack_name);
|
2006-12-23 10:34:01 +03:00
|
|
|
if (hdr.hdr_signature != htonl(PACK_SIGNATURE))
|
2007-02-01 23:52:33 +03:00
|
|
|
return error("file %s is not a GIT packfile", p->pack_name);
|
2006-12-23 10:34:01 +03:00
|
|
|
if (!pack_version_ok(hdr.hdr_version))
|
2007-02-01 23:52:33 +03:00
|
|
|
return error("packfile %s is version %u and not supported"
|
2006-12-23 10:34:01 +03:00
|
|
|
" (try upgrading GIT to a newer version)",
|
|
|
|
p->pack_name, ntohl(hdr.hdr_version));
|
|
|
|
|
|
|
|
/* Verify the pack matches its index. */
|
|
|
|
if (num_packed_objects(p) != ntohl(hdr.hdr_entries))
|
2007-02-01 23:52:33 +03:00
|
|
|
return error("packfile %s claims to have %u objects"
|
2006-12-23 10:34:01 +03:00
|
|
|
" while index size indicates %u objects",
|
|
|
|
p->pack_name, ntohl(hdr.hdr_entries),
|
|
|
|
num_packed_objects(p));
|
|
|
|
if (lseek(p->pack_fd, p->pack_size - sizeof(sha1), SEEK_SET) == -1)
|
2007-02-01 23:52:33 +03:00
|
|
|
return error("end of packfile %s is unavailable", p->pack_name);
|
2007-01-14 09:01:49 +03:00
|
|
|
if (read_in_full(p->pack_fd, sha1, sizeof(sha1)) != sizeof(sha1))
|
2007-02-01 23:52:33 +03:00
|
|
|
return error("packfile %s signature is unavailable", p->pack_name);
|
2007-03-16 23:42:50 +03:00
|
|
|
idx_sha1 = ((unsigned char *)p->index_data) + p->index_size - 40;
|
2006-12-23 10:34:01 +03:00
|
|
|
if (hashcmp(sha1, idx_sha1))
|
2007-02-01 23:52:33 +03:00
|
|
|
return error("packfile %s does not match index", p->pack_name);
|
|
|
|
return 0;
|
2006-12-23 10:34:01 +03:00
|
|
|
}
|
|
|
|
|
2007-02-02 11:00:03 +03:00
|
|
|
static int open_packed_git(struct packed_git *p)
|
|
|
|
{
|
|
|
|
if (!open_packed_git_1(p))
|
|
|
|
return 0;
|
|
|
|
if (p->pack_fd != -1) {
|
|
|
|
close(p->pack_fd);
|
|
|
|
p->pack_fd = -1;
|
|
|
|
}
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2007-03-07 04:44:30 +03:00
|
|
|
static int in_window(struct pack_window *win, off_t offset)
|
2006-12-23 10:34:28 +03:00
|
|
|
{
|
|
|
|
/* We must promise at least 20 bytes (one hash) after the
|
|
|
|
* offset is available from this window, otherwise the offset
|
|
|
|
* is not actually in this window and a different window (which
|
|
|
|
* has that one hash excess) must be used. This is to support
|
|
|
|
* the object header and delta base parsing routines below.
|
|
|
|
*/
|
|
|
|
off_t win_off = win->offset;
|
|
|
|
return win_off <= offset
|
|
|
|
&& (offset + 20) <= (win_off + win->len);
|
|
|
|
}
|
|
|
|
|
2006-12-23 10:34:08 +03:00
|
|
|
unsigned char* use_pack(struct packed_git *p,
|
|
|
|
struct pack_window **w_cursor,
|
2007-03-07 04:44:30 +03:00
|
|
|
off_t offset,
|
2006-12-23 10:34:08 +03:00
|
|
|
unsigned int *left)
|
2006-12-23 10:34:01 +03:00
|
|
|
{
|
2006-12-23 10:34:28 +03:00
|
|
|
struct pack_window *win = *w_cursor;
|
2006-12-23 10:34:08 +03:00
|
|
|
|
2007-02-01 23:52:33 +03:00
|
|
|
if (p->pack_fd == -1 && open_packed_git(p))
|
|
|
|
die("packfile %s cannot be accessed", p->pack_name);
|
2006-12-23 10:34:28 +03:00
|
|
|
|
|
|
|
/* Since packfiles end in a hash of their content and its
|
|
|
|
* pointless to ask for an offset into the middle of that
|
|
|
|
* hash, and the in_window function above wouldn't match
|
|
|
|
* don't allow an offset too close to the end of the file.
|
|
|
|
*/
|
|
|
|
if (offset > (p->pack_size - 20))
|
|
|
|
die("offset beyond end of packfile (truncated pack?)");
|
|
|
|
|
|
|
|
if (!win || !in_window(win, offset)) {
|
|
|
|
if (win)
|
|
|
|
win->inuse_cnt--;
|
|
|
|
for (win = p->windows; win; win = win->next) {
|
|
|
|
if (in_window(win, offset))
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (!win) {
|
2007-02-14 20:11:40 +03:00
|
|
|
size_t window_align = packed_git_window_size / 2;
|
2007-03-07 04:44:37 +03:00
|
|
|
off_t len;
|
2006-12-23 10:34:28 +03:00
|
|
|
win = xcalloc(1, sizeof(*win));
|
2007-02-14 20:11:40 +03:00
|
|
|
win->offset = (offset / window_align) * window_align;
|
2007-03-07 04:44:37 +03:00
|
|
|
len = p->pack_size - win->offset;
|
|
|
|
if (len > packed_git_window_size)
|
|
|
|
len = packed_git_window_size;
|
|
|
|
win->len = (size_t)len;
|
2006-12-23 10:34:28 +03:00
|
|
|
pack_mapped += win->len;
|
2006-12-23 10:34:44 +03:00
|
|
|
while (packed_git_limit < pack_mapped
|
|
|
|
&& unuse_one_window(p))
|
2006-12-23 10:34:28 +03:00
|
|
|
; /* nothing */
|
2006-12-24 08:47:23 +03:00
|
|
|
win->base = xmmap(NULL, win->len,
|
2006-12-23 10:34:28 +03:00
|
|
|
PROT_READ, MAP_PRIVATE,
|
|
|
|
p->pack_fd, win->offset);
|
|
|
|
if (win->base == MAP_FAILED)
|
2006-12-23 10:34:41 +03:00
|
|
|
die("packfile %s cannot be mapped: %s",
|
|
|
|
p->pack_name,
|
|
|
|
strerror(errno));
|
2006-12-23 10:34:47 +03:00
|
|
|
pack_mmap_calls++;
|
|
|
|
pack_open_windows++;
|
|
|
|
if (pack_mapped > peak_pack_mapped)
|
|
|
|
peak_pack_mapped = pack_mapped;
|
|
|
|
if (pack_open_windows > peak_pack_open_windows)
|
|
|
|
peak_pack_open_windows = pack_open_windows;
|
2006-12-23 10:34:28 +03:00
|
|
|
win->next = p->windows;
|
|
|
|
p->windows = win;
|
|
|
|
}
|
2005-06-27 14:35:33 +04:00
|
|
|
}
|
2006-12-23 10:34:08 +03:00
|
|
|
if (win != *w_cursor) {
|
|
|
|
win->last_used = pack_used_ctr++;
|
|
|
|
win->inuse_cnt++;
|
|
|
|
*w_cursor = win;
|
|
|
|
}
|
2006-12-23 10:34:28 +03:00
|
|
|
offset -= win->offset;
|
2006-12-23 10:34:08 +03:00
|
|
|
if (left)
|
2007-03-07 04:44:37 +03:00
|
|
|
*left = win->len - xsize_t(offset);
|
2006-12-23 10:34:08 +03:00
|
|
|
return win->base + offset;
|
2005-06-27 14:35:33 +04:00
|
|
|
}
|
|
|
|
|
2007-03-16 23:42:50 +03:00
|
|
|
struct packed_git *add_packed_git(const char *path, int path_len, int local)
|
2005-06-27 14:35:33 +04:00
|
|
|
{
|
|
|
|
struct stat st;
|
2007-03-16 23:42:50 +03:00
|
|
|
struct packed_git *p = xmalloc(sizeof(*p) + path_len + 2);
|
2005-06-27 14:35:33 +04:00
|
|
|
|
2007-03-16 23:42:50 +03:00
|
|
|
/*
|
|
|
|
* Make sure a corresponding .pack file exists and that
|
|
|
|
* the index looks sane.
|
|
|
|
*/
|
|
|
|
path_len -= strlen(".idx");
|
|
|
|
if (path_len < 1)
|
2005-06-27 14:35:33 +04:00
|
|
|
return NULL;
|
2007-03-16 23:42:50 +03:00
|
|
|
memcpy(p->pack_name, path, path_len);
|
|
|
|
strcpy(p->pack_name + path_len, ".pack");
|
|
|
|
if (stat(p->pack_name, &st) || !S_ISREG(st.st_mode) ||
|
|
|
|
check_packed_git_idx(path, p)) {
|
|
|
|
free(p);
|
2005-06-27 14:35:33 +04:00
|
|
|
return NULL;
|
|
|
|
}
|
2007-03-16 23:42:50 +03:00
|
|
|
|
2005-06-27 14:35:33 +04:00
|
|
|
/* ok, it looks sane as far as we can check without
|
|
|
|
* actually mapping the pack file.
|
|
|
|
*/
|
|
|
|
p->pack_size = st.st_size;
|
|
|
|
p->next = NULL;
|
2006-12-23 10:33:44 +03:00
|
|
|
p->windows = NULL;
|
2006-12-23 10:34:01 +03:00
|
|
|
p->pack_fd = -1;
|
2005-10-14 02:38:28 +04:00
|
|
|
p->pack_local = local;
|
2007-03-09 14:52:12 +03:00
|
|
|
p->mtime = st.st_mtime;
|
2007-03-16 23:42:50 +03:00
|
|
|
if (path_len < 40 || get_sha1_hex(path + path_len - 40, p->sha1))
|
|
|
|
hashclr(p->sha1);
|
2005-06-27 14:35:33 +04:00
|
|
|
return p;
|
|
|
|
}
|
|
|
|
|
2005-08-01 04:53:44 +04:00
|
|
|
struct packed_git *parse_pack_index(unsigned char *sha1)
|
2005-08-16 08:10:03 +04:00
|
|
|
{
|
|
|
|
char *path = sha1_pack_index_name(sha1);
|
|
|
|
return parse_pack_index_file(sha1, path);
|
|
|
|
}
|
|
|
|
|
2007-03-16 23:42:50 +03:00
|
|
|
struct packed_git *parse_pack_index_file(const unsigned char *sha1,
|
|
|
|
const char *idx_path)
|
2005-08-01 04:53:44 +04:00
|
|
|
{
|
2007-03-16 23:42:50 +03:00
|
|
|
const char *path = sha1_pack_name(sha1);
|
|
|
|
struct packed_git *p = xmalloc(sizeof(*p) + strlen(path) + 2);
|
2005-08-01 04:53:44 +04:00
|
|
|
|
2007-03-16 23:42:50 +03:00
|
|
|
if (check_packed_git_idx(idx_path, p)) {
|
|
|
|
free(p);
|
2005-08-01 04:53:44 +04:00
|
|
|
return NULL;
|
2007-03-16 23:42:50 +03:00
|
|
|
}
|
2005-08-01 04:53:44 +04:00
|
|
|
|
|
|
|
strcpy(p->pack_name, path);
|
|
|
|
p->pack_size = 0;
|
|
|
|
p->next = NULL;
|
2006-12-23 10:33:44 +03:00
|
|
|
p->windows = NULL;
|
2006-12-23 10:34:01 +03:00
|
|
|
p->pack_fd = -1;
|
2006-08-23 10:49:00 +04:00
|
|
|
hashcpy(p->sha1, sha1);
|
2005-08-01 04:53:44 +04:00
|
|
|
return p;
|
|
|
|
}
|
|
|
|
|
|
|
|
void install_packed_git(struct packed_git *pack)
|
|
|
|
{
|
|
|
|
pack->next = packed_git;
|
|
|
|
packed_git = pack;
|
|
|
|
}
|
|
|
|
|
2005-10-14 02:38:28 +04:00
|
|
|
static void prepare_packed_git_one(char *objdir, int local)
|
2005-06-27 14:35:33 +04:00
|
|
|
{
|
|
|
|
char path[PATH_MAX];
|
|
|
|
int len;
|
|
|
|
DIR *dir;
|
|
|
|
struct dirent *de;
|
|
|
|
|
|
|
|
sprintf(path, "%s/pack", objdir);
|
|
|
|
len = strlen(path);
|
|
|
|
dir = opendir(path);
|
2006-02-18 03:14:52 +03:00
|
|
|
if (!dir) {
|
2006-02-22 22:16:38 +03:00
|
|
|
if (errno != ENOENT)
|
2006-02-23 04:47:10 +03:00
|
|
|
error("unable to open object pack directory: %s: %s",
|
2006-02-22 22:16:38 +03:00
|
|
|
path, strerror(errno));
|
2005-06-27 14:35:33 +04:00
|
|
|
return;
|
2006-02-18 03:14:52 +03:00
|
|
|
}
|
2005-06-27 14:35:33 +04:00
|
|
|
path[len++] = '/';
|
|
|
|
while ((de = readdir(dir)) != NULL) {
|
|
|
|
int namelen = strlen(de->d_name);
|
|
|
|
struct packed_git *p;
|
|
|
|
|
2006-08-11 16:01:45 +04:00
|
|
|
if (!has_extension(de->d_name, ".idx"))
|
2005-06-27 14:35:33 +04:00
|
|
|
continue;
|
|
|
|
|
2007-02-01 23:52:27 +03:00
|
|
|
/* Don't reopen a pack we already have. */
|
2005-06-27 14:35:33 +04:00
|
|
|
strcpy(path + len, de->d_name);
|
2006-06-02 20:49:32 +04:00
|
|
|
for (p = packed_git; p; p = p->next) {
|
|
|
|
if (!memcmp(path, p->pack_name, len + namelen - 4))
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (p)
|
|
|
|
continue;
|
2007-02-01 23:52:27 +03:00
|
|
|
/* See if it really is a valid .idx file with corresponding
|
|
|
|
* .pack file that we can map.
|
|
|
|
*/
|
2005-10-14 02:38:28 +04:00
|
|
|
p = add_packed_git(path, len + namelen, local);
|
2005-06-27 14:35:33 +04:00
|
|
|
if (!p)
|
|
|
|
continue;
|
2007-02-01 23:52:22 +03:00
|
|
|
install_packed_git(p);
|
2005-06-27 14:35:33 +04:00
|
|
|
}
|
2005-07-06 10:52:17 +04:00
|
|
|
closedir(dir);
|
2005-06-27 14:35:33 +04:00
|
|
|
}
|
|
|
|
|
2007-03-09 14:52:12 +03:00
|
|
|
static int sort_pack(const void *a_, const void *b_)
|
|
|
|
{
|
|
|
|
struct packed_git *a = *((struct packed_git **)a_);
|
|
|
|
struct packed_git *b = *((struct packed_git **)b_);
|
|
|
|
int st;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Local packs tend to contain objects specific to our
|
|
|
|
* variant of the project than remote ones. In addition,
|
|
|
|
* remote ones could be on a network mounted filesystem.
|
|
|
|
* Favor local ones for these reasons.
|
|
|
|
*/
|
|
|
|
st = a->pack_local - b->pack_local;
|
|
|
|
if (st)
|
|
|
|
return -st;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Younger packs tend to contain more recent objects,
|
|
|
|
* and more recent objects tend to get accessed more
|
|
|
|
* often.
|
|
|
|
*/
|
|
|
|
if (a->mtime < b->mtime)
|
|
|
|
return 1;
|
|
|
|
else if (a->mtime == b->mtime)
|
|
|
|
return 0;
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void rearrange_packed_git(void)
|
|
|
|
{
|
|
|
|
struct packed_git **ary, *p;
|
|
|
|
int i, n;
|
|
|
|
|
|
|
|
for (n = 0, p = packed_git; p; p = p->next)
|
|
|
|
n++;
|
|
|
|
if (n < 2)
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* prepare an array of packed_git for easier sorting */
|
|
|
|
ary = xcalloc(n, sizeof(struct packed_git *));
|
|
|
|
for (n = 0, p = packed_git; p; p = p->next)
|
|
|
|
ary[n++] = p;
|
|
|
|
|
|
|
|
qsort(ary, n, sizeof(struct packed_git *), sort_pack);
|
|
|
|
|
|
|
|
/* link them back again */
|
|
|
|
for (i = 0; i < n - 1; i++)
|
|
|
|
ary[i]->next = ary[i + 1];
|
|
|
|
ary[n - 1]->next = NULL;
|
|
|
|
packed_git = ary[0];
|
|
|
|
|
|
|
|
free(ary);
|
|
|
|
}
|
|
|
|
|
2006-06-02 19:32:23 +04:00
|
|
|
static int prepare_packed_git_run_once = 0;
|
2005-06-29 01:56:57 +04:00
|
|
|
void prepare_packed_git(void)
|
2005-06-27 14:35:33 +04:00
|
|
|
{
|
2005-08-15 04:25:57 +04:00
|
|
|
struct alternate_object_database *alt;
|
2005-06-27 14:35:33 +04:00
|
|
|
|
2006-06-02 19:32:23 +04:00
|
|
|
if (prepare_packed_git_run_once)
|
2005-06-27 14:35:33 +04:00
|
|
|
return;
|
2005-10-14 02:38:28 +04:00
|
|
|
prepare_packed_git_one(get_object_directory(), 1);
|
2005-06-29 01:56:57 +04:00
|
|
|
prepare_alt_odb();
|
2005-08-15 04:25:57 +04:00
|
|
|
for (alt = alt_odb_list; alt; alt = alt->next) {
|
2005-12-05 09:48:43 +03:00
|
|
|
alt->name[-1] = 0;
|
2005-10-14 02:38:28 +04:00
|
|
|
prepare_packed_git_one(alt->base, 0);
|
2005-12-05 09:48:43 +03:00
|
|
|
alt->name[-1] = '/';
|
2005-06-27 14:35:33 +04:00
|
|
|
}
|
2007-03-09 14:52:12 +03:00
|
|
|
rearrange_packed_git();
|
2006-06-02 19:32:23 +04:00
|
|
|
prepare_packed_git_run_once = 1;
|
|
|
|
}
|
|
|
|
|
2006-11-02 01:06:21 +03:00
|
|
|
void reprepare_packed_git(void)
|
2006-06-02 19:32:23 +04:00
|
|
|
{
|
|
|
|
prepare_packed_git_run_once = 0;
|
|
|
|
prepare_packed_git();
|
2005-06-27 14:35:33 +04:00
|
|
|
}
|
|
|
|
|
2005-06-03 19:05:39 +04:00
|
|
|
int check_sha1_signature(const unsigned char *sha1, void *map, unsigned long size, const char *type)
|
2005-04-19 00:04:43 +04:00
|
|
|
{
|
|
|
|
unsigned char real_sha1[20];
|
2006-10-15 16:02:18 +04:00
|
|
|
hash_sha1_file(map, size, type, real_sha1);
|
2006-08-17 22:54:57 +04:00
|
|
|
return hashcmp(sha1, real_sha1) ? -1 : 0;
|
2005-04-19 00:04:43 +04:00
|
|
|
}
|
|
|
|
|
2006-07-18 02:04:47 +04:00
|
|
|
void *map_sha1_file(const unsigned char *sha1, unsigned long *size)
|
2005-04-19 00:04:43 +04:00
|
|
|
{
|
|
|
|
struct stat st;
|
|
|
|
void *map;
|
2005-04-23 22:09:32 +04:00
|
|
|
int fd;
|
2005-05-07 11:38:04 +04:00
|
|
|
char *filename = find_sha1_file(sha1, &st);
|
|
|
|
|
|
|
|
if (!filename) {
|
|
|
|
return NULL;
|
|
|
|
}
|
2005-04-19 00:04:43 +04:00
|
|
|
|
2005-04-23 22:09:32 +04:00
|
|
|
fd = open(filename, O_RDONLY | sha1_file_open_flag);
|
2005-04-19 00:04:43 +04:00
|
|
|
if (fd < 0) {
|
2005-04-23 22:09:32 +04:00
|
|
|
/* See if it works without O_NOATIME */
|
|
|
|
switch (sha1_file_open_flag) {
|
|
|
|
default:
|
|
|
|
fd = open(filename, O_RDONLY);
|
|
|
|
if (fd >= 0)
|
|
|
|
break;
|
|
|
|
/* Fallthrough */
|
|
|
|
case 0:
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2005-06-27 14:35:33 +04:00
|
|
|
/* If it failed once, it will probably fail again.
|
|
|
|
* Stop using O_NOATIME
|
|
|
|
*/
|
2005-04-23 22:09:32 +04:00
|
|
|
sha1_file_open_flag = 0;
|
2005-04-19 00:04:43 +04:00
|
|
|
}
|
2007-03-07 04:44:37 +03:00
|
|
|
*size = xsize_t(st.st_size);
|
|
|
|
map = xmmap(NULL, *size, PROT_READ, MAP_PRIVATE, fd, 0);
|
2005-04-19 00:04:43 +04:00
|
|
|
close(fd);
|
|
|
|
return map;
|
|
|
|
}
|
|
|
|
|
2006-07-18 02:04:47 +04:00
|
|
|
int legacy_loose_object(unsigned char *map)
|
|
|
|
{
|
|
|
|
unsigned int word;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Is it a zlib-compressed buffer? If so, the first byte
|
|
|
|
* must be 0x78 (15-bit window size, deflated), and the
|
|
|
|
* first 16-bit word is evenly divisible by 31
|
|
|
|
*/
|
|
|
|
word = (map[0] << 8) + map[1];
|
|
|
|
if (map[0] == 0x78 && !(word % 31))
|
|
|
|
return 1;
|
|
|
|
else
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2006-09-04 08:09:18 +04:00
|
|
|
unsigned long unpack_object_header_gently(const unsigned char *buf, unsigned long len, enum object_type *type, unsigned long *sizep)
|
2005-06-02 04:54:59 +04:00
|
|
|
{
|
2006-09-02 02:17:01 +04:00
|
|
|
unsigned shift;
|
2006-07-11 23:48:08 +04:00
|
|
|
unsigned char c;
|
|
|
|
unsigned long size;
|
2006-09-02 02:17:01 +04:00
|
|
|
unsigned long used = 0;
|
|
|
|
|
|
|
|
c = buf[used++];
|
|
|
|
*type = (c >> 4) & 7;
|
|
|
|
size = c & 15;
|
|
|
|
shift = 4;
|
|
|
|
while (c & 0x80) {
|
|
|
|
if (len <= used)
|
|
|
|
return 0;
|
|
|
|
if (sizeof(long) * 8 <= shift)
|
|
|
|
return 0;
|
|
|
|
c = buf[used++];
|
|
|
|
size += (c & 0x7f) << shift;
|
|
|
|
shift += 7;
|
|
|
|
}
|
|
|
|
*sizep = size;
|
|
|
|
return used;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int unpack_sha1_header(z_stream *stream, unsigned char *map, unsigned long mapsize, void *buffer, unsigned long bufsiz)
|
|
|
|
{
|
|
|
|
unsigned long size, used;
|
|
|
|
static const char valid_loose_object_type[8] = {
|
|
|
|
0, /* OBJ_EXT */
|
|
|
|
1, 1, 1, 1, /* "commit", "tree", "blob", "tag" */
|
|
|
|
0, /* "delta" and others are invalid in a loose object */
|
2006-07-11 23:48:08 +04:00
|
|
|
};
|
2006-09-02 02:17:01 +04:00
|
|
|
enum object_type type;
|
2006-07-11 23:48:08 +04:00
|
|
|
|
2005-06-02 04:54:59 +04:00
|
|
|
/* Get the data stream */
|
|
|
|
memset(stream, 0, sizeof(*stream));
|
|
|
|
stream->next_in = map;
|
|
|
|
stream->avail_in = mapsize;
|
|
|
|
stream->next_out = buffer;
|
2006-07-11 23:48:08 +04:00
|
|
|
stream->avail_out = bufsiz;
|
|
|
|
|
2006-07-18 02:04:47 +04:00
|
|
|
if (legacy_loose_object(map)) {
|
2006-07-11 23:48:08 +04:00
|
|
|
inflateInit(stream);
|
|
|
|
return inflate(stream, 0);
|
|
|
|
}
|
|
|
|
|
2006-09-02 02:17:01 +04:00
|
|
|
used = unpack_object_header_gently(map, mapsize, &type, &size);
|
|
|
|
if (!used || !valid_loose_object_type[type])
|
2006-07-11 23:48:08 +04:00
|
|
|
return -1;
|
2006-09-02 02:17:01 +04:00
|
|
|
map += used;
|
|
|
|
mapsize -= used;
|
2006-07-11 23:48:08 +04:00
|
|
|
|
|
|
|
/* Set up the stream for the rest.. */
|
|
|
|
stream->next_in = map;
|
|
|
|
stream->avail_in = mapsize;
|
2005-06-02 04:54:59 +04:00
|
|
|
inflateInit(stream);
|
2006-07-11 23:48:08 +04:00
|
|
|
|
|
|
|
/* And generate the fake traditional header */
|
2006-09-02 02:17:01 +04:00
|
|
|
stream->total_out = 1 + snprintf(buffer, bufsiz, "%s %lu",
|
2007-02-26 22:55:58 +03:00
|
|
|
typename(type), size);
|
2006-07-11 23:48:08 +04:00
|
|
|
return 0;
|
2005-06-02 04:54:59 +04:00
|
|
|
}
|
|
|
|
|
2007-03-05 11:21:37 +03:00
|
|
|
static void *unpack_sha1_rest(z_stream *stream, void *buffer, unsigned long size, const unsigned char *sha1)
|
2005-06-02 18:57:25 +04:00
|
|
|
{
|
|
|
|
int bytes = strlen(buffer) + 1;
|
2005-06-22 00:04:33 +04:00
|
|
|
unsigned char *buf = xmalloc(1+size);
|
2006-07-11 23:48:08 +04:00
|
|
|
unsigned long n;
|
2007-03-05 11:21:37 +03:00
|
|
|
int status = Z_OK;
|
2005-06-02 18:57:25 +04:00
|
|
|
|
2006-07-11 23:48:08 +04:00
|
|
|
n = stream->total_out - bytes;
|
|
|
|
if (n > size)
|
|
|
|
n = size;
|
|
|
|
memcpy(buf, (char *) buffer + bytes, n);
|
|
|
|
bytes = n;
|
2005-06-02 18:57:25 +04:00
|
|
|
if (bytes < size) {
|
|
|
|
stream->next_out = buf + bytes;
|
|
|
|
stream->avail_out = size - bytes;
|
2007-03-05 11:21:37 +03:00
|
|
|
while (status == Z_OK)
|
|
|
|
status = inflate(stream, Z_FINISH);
|
2005-06-02 18:57:25 +04:00
|
|
|
}
|
|
|
|
buf[size] = 0;
|
2007-03-05 11:21:37 +03:00
|
|
|
if ((status == Z_OK || status == Z_STREAM_END) && !stream->avail_in) {
|
|
|
|
inflateEnd(stream);
|
|
|
|
return buf;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (status < 0)
|
|
|
|
error("corrupt loose object '%s'", sha1_to_hex(sha1));
|
|
|
|
else if (stream->avail_in)
|
|
|
|
error("garbage at end of loose object '%s'",
|
|
|
|
sha1_to_hex(sha1));
|
|
|
|
free(buf);
|
|
|
|
return NULL;
|
2005-06-02 18:57:25 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We used to just use "sscanf()", but that's actually way
|
|
|
|
* too permissive for what we want to check. So do an anal
|
|
|
|
* object header parse by hand.
|
|
|
|
*/
|
2007-02-26 22:55:59 +03:00
|
|
|
static int parse_sha1_header(const char *hdr, unsigned long *sizep)
|
2005-06-02 18:57:25 +04:00
|
|
|
{
|
2007-02-26 22:55:59 +03:00
|
|
|
char type[10];
|
2005-06-02 18:57:25 +04:00
|
|
|
int i;
|
|
|
|
unsigned long size;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The type can be at most ten bytes (including the
|
|
|
|
* terminating '\0' that we add), and is followed by
|
2007-02-26 22:55:59 +03:00
|
|
|
* a space.
|
2005-06-02 18:57:25 +04:00
|
|
|
*/
|
2007-02-26 22:55:59 +03:00
|
|
|
i = 0;
|
2005-06-02 18:57:25 +04:00
|
|
|
for (;;) {
|
|
|
|
char c = *hdr++;
|
|
|
|
if (c == ' ')
|
|
|
|
break;
|
2007-02-26 22:55:59 +03:00
|
|
|
type[i++] = c;
|
|
|
|
if (i >= sizeof(type))
|
2005-06-02 18:57:25 +04:00
|
|
|
return -1;
|
|
|
|
}
|
2007-02-26 22:55:59 +03:00
|
|
|
type[i] = 0;
|
2005-06-02 18:57:25 +04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* The length must follow immediately, and be in canonical
|
|
|
|
* decimal format (ie "010" is not valid).
|
|
|
|
*/
|
|
|
|
size = *hdr++ - '0';
|
|
|
|
if (size > 9)
|
|
|
|
return -1;
|
|
|
|
if (size) {
|
|
|
|
for (;;) {
|
|
|
|
unsigned long c = *hdr - '0';
|
|
|
|
if (c > 9)
|
|
|
|
break;
|
|
|
|
hdr++;
|
|
|
|
size = size * 10 + c;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
*sizep = size;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The length must be followed by a zero byte
|
|
|
|
*/
|
2007-02-26 22:55:59 +03:00
|
|
|
return *hdr ? -1 : type_from_string(type);
|
2005-06-02 18:57:25 +04:00
|
|
|
}
|
|
|
|
|
2007-03-05 11:21:37 +03:00
|
|
|
static void *unpack_sha1_file(void *map, unsigned long mapsize, enum object_type *type, unsigned long *size, const unsigned char *sha1)
|
2005-04-19 00:04:43 +04:00
|
|
|
{
|
2005-06-02 18:57:25 +04:00
|
|
|
int ret;
|
2005-04-19 00:04:43 +04:00
|
|
|
z_stream stream;
|
2005-06-02 18:57:25 +04:00
|
|
|
char hdr[8192];
|
2005-04-19 00:04:43 +04:00
|
|
|
|
2005-06-02 18:57:25 +04:00
|
|
|
ret = unpack_sha1_header(&stream, map, mapsize, hdr, sizeof(hdr));
|
2007-02-26 22:55:59 +03:00
|
|
|
if (ret < Z_OK || (*type = parse_sha1_header(hdr, size)) < 0)
|
2005-04-19 00:04:43 +04:00
|
|
|
return NULL;
|
|
|
|
|
2007-03-05 11:21:37 +03:00
|
|
|
return unpack_sha1_rest(&stream, hdr, *size, sha1);
|
2005-04-19 00:04:43 +04:00
|
|
|
}
|
|
|
|
|
2007-03-07 04:44:30 +03:00
|
|
|
static off_t get_delta_base(struct packed_git *p,
|
2006-12-23 10:34:08 +03:00
|
|
|
struct pack_window **w_curs,
|
2007-03-07 04:44:30 +03:00
|
|
|
off_t *curpos,
|
2007-02-26 22:55:59 +03:00
|
|
|
enum object_type type,
|
2007-03-07 04:44:30 +03:00
|
|
|
off_t delta_obj_offset)
|
2006-09-21 08:06:49 +04:00
|
|
|
{
|
2007-02-26 22:55:56 +03:00
|
|
|
unsigned char *base_info = use_pack(p, w_curs, *curpos, NULL);
|
2007-03-07 04:44:30 +03:00
|
|
|
off_t base_offset;
|
2006-09-21 08:06:49 +04:00
|
|
|
|
2006-12-23 10:34:18 +03:00
|
|
|
/* use_pack() assured us we have [base_info, base_info + 20)
|
|
|
|
* as a range that we can look at without walking off the
|
|
|
|
* end of the mapped window. Its actually the hash size
|
|
|
|
* that is assured. An OFS_DELTA longer than the hash size
|
|
|
|
* is stupid, as then a REF_DELTA would be smaller to store.
|
|
|
|
*/
|
2007-02-26 22:55:59 +03:00
|
|
|
if (type == OBJ_OFS_DELTA) {
|
2006-09-21 08:06:49 +04:00
|
|
|
unsigned used = 0;
|
|
|
|
unsigned char c = base_info[used++];
|
|
|
|
base_offset = c & 127;
|
|
|
|
while (c & 128) {
|
|
|
|
base_offset += 1;
|
|
|
|
if (!base_offset || base_offset & ~(~0UL >> 7))
|
|
|
|
die("offset value overflow for delta base object");
|
|
|
|
c = base_info[used++];
|
|
|
|
base_offset = (base_offset << 7) + (c & 127);
|
|
|
|
}
|
|
|
|
base_offset = delta_obj_offset - base_offset;
|
|
|
|
if (base_offset >= delta_obj_offset)
|
|
|
|
die("delta base offset out of bound");
|
2007-02-26 22:55:56 +03:00
|
|
|
*curpos += used;
|
2007-02-26 22:55:59 +03:00
|
|
|
} else if (type == OBJ_REF_DELTA) {
|
2006-09-21 08:06:49 +04:00
|
|
|
/* The base entry _must_ be in the same pack */
|
|
|
|
base_offset = find_pack_entry_one(base_info, p);
|
|
|
|
if (!base_offset)
|
|
|
|
die("failed to find delta-pack base object %s",
|
|
|
|
sha1_to_hex(base_info));
|
2007-02-26 22:55:56 +03:00
|
|
|
*curpos += 20;
|
2006-09-21 08:06:49 +04:00
|
|
|
} else
|
|
|
|
die("I am totally screwed");
|
2007-02-26 22:55:56 +03:00
|
|
|
return base_offset;
|
2006-09-21 08:06:49 +04:00
|
|
|
}
|
|
|
|
|
2005-07-01 04:15:39 +04:00
|
|
|
/* forward declaration for a mutually recursive function */
|
2007-03-07 04:44:30 +03:00
|
|
|
static int packed_object_info(struct packed_git *p, off_t offset,
|
2007-02-26 22:55:59 +03:00
|
|
|
unsigned long *sizep);
|
2005-07-01 04:15:39 +04:00
|
|
|
|
2006-09-21 08:05:37 +04:00
|
|
|
static int packed_delta_info(struct packed_git *p,
|
2006-12-23 10:34:08 +03:00
|
|
|
struct pack_window **w_curs,
|
2007-03-07 04:44:30 +03:00
|
|
|
off_t curpos,
|
2007-02-26 22:55:59 +03:00
|
|
|
enum object_type type,
|
2007-03-07 04:44:30 +03:00
|
|
|
off_t obj_offset,
|
2006-09-21 08:05:37 +04:00
|
|
|
unsigned long *sizep)
|
2005-06-28 10:58:08 +04:00
|
|
|
{
|
2007-03-07 04:44:30 +03:00
|
|
|
off_t base_offset;
|
2005-07-01 04:15:39 +04:00
|
|
|
|
2007-02-26 22:55:59 +03:00
|
|
|
base_offset = get_delta_base(p, w_curs, &curpos, type, obj_offset);
|
|
|
|
type = packed_object_info(p, base_offset, NULL);
|
2005-07-01 04:15:39 +04:00
|
|
|
|
2005-07-01 04:13:07 +04:00
|
|
|
/* We choose to only get the type of the base object and
|
|
|
|
* ignore potentially corrupt pack file that expects the delta
|
|
|
|
* based on a base with a wrong size. This saves tons of
|
|
|
|
* inflate() calls.
|
|
|
|
*/
|
|
|
|
if (sizep) {
|
|
|
|
const unsigned char *data;
|
2006-12-23 10:34:13 +03:00
|
|
|
unsigned char delta_head[20], *in;
|
2005-07-01 04:13:07 +04:00
|
|
|
z_stream stream;
|
|
|
|
int st;
|
|
|
|
|
|
|
|
memset(&stream, 0, sizeof(stream));
|
|
|
|
stream.next_out = delta_head;
|
|
|
|
stream.avail_out = sizeof(delta_head);
|
|
|
|
|
|
|
|
inflateInit(&stream);
|
2006-12-23 10:34:13 +03:00
|
|
|
do {
|
2007-02-26 22:55:56 +03:00
|
|
|
in = use_pack(p, w_curs, curpos, &stream.avail_in);
|
2006-12-23 10:34:13 +03:00
|
|
|
stream.next_in = in;
|
|
|
|
st = inflate(&stream, Z_FINISH);
|
2007-02-26 22:55:56 +03:00
|
|
|
curpos += stream.next_in - in;
|
2006-12-23 10:34:13 +03:00
|
|
|
} while ((st == Z_OK || st == Z_BUF_ERROR)
|
|
|
|
&& stream.total_out < sizeof(delta_head));
|
2005-07-01 04:13:07 +04:00
|
|
|
inflateEnd(&stream);
|
|
|
|
if ((st != Z_STREAM_END) &&
|
|
|
|
stream.total_out != sizeof(delta_head))
|
|
|
|
die("delta data unpack-initial failed");
|
|
|
|
|
|
|
|
/* Examine the initial part of the delta to figure out
|
|
|
|
* the result size.
|
|
|
|
*/
|
|
|
|
data = delta_head;
|
2006-04-07 23:26:10 +04:00
|
|
|
|
|
|
|
/* ignore base size */
|
|
|
|
get_delta_hdr_size(&data, delta_head+sizeof(delta_head));
|
2005-06-28 10:58:08 +04:00
|
|
|
|
2005-07-01 04:13:07 +04:00
|
|
|
/* Read the result size */
|
2007-02-26 22:55:56 +03:00
|
|
|
*sizep = get_delta_hdr_size(&data, delta_head+sizeof(delta_head));
|
2005-07-01 04:13:07 +04:00
|
|
|
}
|
2007-02-26 22:55:59 +03:00
|
|
|
|
|
|
|
return type;
|
2005-06-28 10:58:08 +04:00
|
|
|
}
|
|
|
|
|
2007-02-26 22:55:56 +03:00
|
|
|
static int unpack_object_header(struct packed_git *p,
|
|
|
|
struct pack_window **w_curs,
|
2007-03-07 04:44:30 +03:00
|
|
|
off_t *curpos,
|
2007-02-26 22:55:56 +03:00
|
|
|
unsigned long *sizep)
|
2005-06-29 01:21:02 +04:00
|
|
|
{
|
2006-12-23 10:34:08 +03:00
|
|
|
unsigned char *base;
|
|
|
|
unsigned int left;
|
2006-09-02 02:17:01 +04:00
|
|
|
unsigned long used;
|
2007-02-26 22:55:56 +03:00
|
|
|
enum object_type type;
|
2005-06-29 01:21:02 +04:00
|
|
|
|
2006-12-23 10:34:18 +03:00
|
|
|
/* use_pack() assures us we have [base, base + 20) available
|
|
|
|
* as a range that we can look at at. (Its actually the hash
|
2007-02-04 07:49:16 +03:00
|
|
|
* size that is assured.) With our object header encoding
|
2006-12-23 10:34:18 +03:00
|
|
|
* the maximum deflated object size is 2^137, which is just
|
|
|
|
* insane, so we know won't exceed what we have been given.
|
|
|
|
*/
|
2007-02-26 22:55:56 +03:00
|
|
|
base = use_pack(p, w_curs, *curpos, &left);
|
|
|
|
used = unpack_object_header_gently(base, left, &type, sizep);
|
2006-09-02 02:17:01 +04:00
|
|
|
if (!used)
|
|
|
|
die("object offset outside of pack file");
|
2007-02-26 22:55:56 +03:00
|
|
|
*curpos += used;
|
2006-09-02 02:17:01 +04:00
|
|
|
|
2007-02-26 22:55:56 +03:00
|
|
|
return type;
|
2005-06-29 01:21:02 +04:00
|
|
|
}
|
|
|
|
|
2007-02-26 22:55:59 +03:00
|
|
|
const char *packed_object_info_detail(struct packed_git *p,
|
2007-03-07 04:44:30 +03:00
|
|
|
off_t obj_offset,
|
2007-02-26 22:55:59 +03:00
|
|
|
unsigned long *size,
|
|
|
|
unsigned long *store_size,
|
|
|
|
unsigned int *delta_chain_length,
|
|
|
|
unsigned char *base_sha1)
|
2005-07-01 04:17:20 +04:00
|
|
|
{
|
2006-12-23 10:34:08 +03:00
|
|
|
struct pack_window *w_curs = NULL;
|
2007-03-07 04:44:30 +03:00
|
|
|
off_t curpos;
|
|
|
|
unsigned long dummy;
|
2006-09-21 08:05:37 +04:00
|
|
|
unsigned char *next_sha1;
|
2007-02-26 22:55:59 +03:00
|
|
|
enum object_type type;
|
2005-07-01 04:17:20 +04:00
|
|
|
|
2006-09-21 08:05:37 +04:00
|
|
|
*delta_chain_length = 0;
|
2007-02-26 22:55:56 +03:00
|
|
|
curpos = obj_offset;
|
2007-02-26 22:55:59 +03:00
|
|
|
type = unpack_object_header(p, &w_curs, &curpos, size);
|
2006-09-21 08:05:37 +04:00
|
|
|
|
|
|
|
for (;;) {
|
2007-02-26 22:55:59 +03:00
|
|
|
switch (type) {
|
2006-09-21 08:05:37 +04:00
|
|
|
default:
|
2006-12-20 21:34:05 +03:00
|
|
|
die("pack %s contains unknown object type %d",
|
2007-02-26 22:55:59 +03:00
|
|
|
p->pack_name, type);
|
2006-09-21 08:05:37 +04:00
|
|
|
case OBJ_COMMIT:
|
|
|
|
case OBJ_TREE:
|
|
|
|
case OBJ_BLOB:
|
|
|
|
case OBJ_TAG:
|
|
|
|
*store_size = 0; /* notyet */
|
2006-12-23 10:34:08 +03:00
|
|
|
unuse_pack(&w_curs);
|
2007-02-26 22:55:59 +03:00
|
|
|
return typename(type);
|
2006-09-21 08:06:49 +04:00
|
|
|
case OBJ_OFS_DELTA:
|
2007-02-26 22:55:59 +03:00
|
|
|
obj_offset = get_delta_base(p, &w_curs, &curpos, type, obj_offset);
|
2006-09-21 08:06:49 +04:00
|
|
|
if (*delta_chain_length == 0) {
|
2007-02-26 22:55:56 +03:00
|
|
|
/* TODO: find base_sha1 as pointed by curpos */
|
2007-03-07 04:44:08 +03:00
|
|
|
hashclr(base_sha1);
|
2006-09-21 08:06:49 +04:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
case OBJ_REF_DELTA:
|
2007-02-26 22:55:56 +03:00
|
|
|
next_sha1 = use_pack(p, &w_curs, curpos, NULL);
|
2006-09-21 08:05:37 +04:00
|
|
|
if (*delta_chain_length == 0)
|
|
|
|
hashcpy(base_sha1, next_sha1);
|
2007-02-26 22:55:56 +03:00
|
|
|
obj_offset = find_pack_entry_one(next_sha1, p);
|
2006-09-21 08:05:37 +04:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
(*delta_chain_length)++;
|
2007-02-26 22:55:56 +03:00
|
|
|
curpos = obj_offset;
|
2007-02-26 22:55:59 +03:00
|
|
|
type = unpack_object_header(p, &w_curs, &curpos, &dummy);
|
2005-07-01 04:17:20 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2007-03-07 04:44:30 +03:00
|
|
|
static int packed_object_info(struct packed_git *p, off_t obj_offset,
|
2007-02-26 22:55:59 +03:00
|
|
|
unsigned long *sizep)
|
2005-06-27 14:35:33 +04:00
|
|
|
{
|
2006-12-23 10:34:08 +03:00
|
|
|
struct pack_window *w_curs = NULL;
|
2007-03-07 04:44:30 +03:00
|
|
|
unsigned long size;
|
|
|
|
off_t curpos = obj_offset;
|
2007-02-26 22:55:59 +03:00
|
|
|
enum object_type type;
|
2005-06-28 10:58:08 +04:00
|
|
|
|
2007-02-26 22:55:59 +03:00
|
|
|
type = unpack_object_header(p, &w_curs, &curpos, &size);
|
2005-06-28 10:58:08 +04:00
|
|
|
|
2007-02-26 22:55:59 +03:00
|
|
|
switch (type) {
|
2006-09-21 08:06:49 +04:00
|
|
|
case OBJ_OFS_DELTA:
|
|
|
|
case OBJ_REF_DELTA:
|
2007-02-26 22:55:59 +03:00
|
|
|
type = packed_delta_info(p, &w_curs, curpos,
|
|
|
|
type, obj_offset, sizep);
|
2007-02-26 22:55:56 +03:00
|
|
|
break;
|
2005-06-29 01:21:02 +04:00
|
|
|
case OBJ_COMMIT:
|
|
|
|
case OBJ_TREE:
|
|
|
|
case OBJ_BLOB:
|
|
|
|
case OBJ_TAG:
|
2007-02-26 22:55:56 +03:00
|
|
|
if (sizep)
|
|
|
|
*sizep = size;
|
2005-06-28 20:58:23 +04:00
|
|
|
break;
|
2005-06-27 14:35:33 +04:00
|
|
|
default:
|
2006-12-20 21:34:05 +03:00
|
|
|
die("pack %s contains unknown object type %d",
|
2007-02-26 22:55:59 +03:00
|
|
|
p->pack_name, type);
|
2005-06-27 14:35:33 +04:00
|
|
|
}
|
2007-02-26 22:55:56 +03:00
|
|
|
unuse_pack(&w_curs);
|
2007-02-26 22:55:59 +03:00
|
|
|
return type;
|
2005-06-27 14:35:33 +04:00
|
|
|
}
|
|
|
|
|
2006-08-26 12:12:27 +04:00
|
|
|
static void *unpack_compressed_entry(struct packed_git *p,
|
2006-12-23 10:34:08 +03:00
|
|
|
struct pack_window **w_curs,
|
2007-03-07 04:44:30 +03:00
|
|
|
off_t curpos,
|
2006-08-26 12:12:27 +04:00
|
|
|
unsigned long size)
|
2006-08-26 12:10:43 +04:00
|
|
|
{
|
|
|
|
int st;
|
|
|
|
z_stream stream;
|
2006-12-23 10:34:13 +03:00
|
|
|
unsigned char *buffer, *in;
|
2006-08-26 12:10:43 +04:00
|
|
|
|
|
|
|
buffer = xmalloc(size + 1);
|
|
|
|
buffer[size] = 0;
|
|
|
|
memset(&stream, 0, sizeof(stream));
|
|
|
|
stream.next_out = buffer;
|
|
|
|
stream.avail_out = size;
|
|
|
|
|
|
|
|
inflateInit(&stream);
|
2006-12-23 10:34:13 +03:00
|
|
|
do {
|
2007-02-26 22:55:56 +03:00
|
|
|
in = use_pack(p, w_curs, curpos, &stream.avail_in);
|
2006-12-23 10:34:13 +03:00
|
|
|
stream.next_in = in;
|
|
|
|
st = inflate(&stream, Z_FINISH);
|
2007-02-26 22:55:56 +03:00
|
|
|
curpos += stream.next_in - in;
|
2006-12-23 10:34:13 +03:00
|
|
|
} while (st == Z_OK || st == Z_BUF_ERROR);
|
2006-08-26 12:10:43 +04:00
|
|
|
inflateEnd(&stream);
|
|
|
|
if ((st != Z_STREAM_END) || stream.total_out != size) {
|
|
|
|
free(buffer);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
return buffer;
|
|
|
|
}
|
|
|
|
|
2006-08-26 12:12:27 +04:00
|
|
|
static void *unpack_delta_entry(struct packed_git *p,
|
2006-12-23 10:34:08 +03:00
|
|
|
struct pack_window **w_curs,
|
2007-03-07 04:44:30 +03:00
|
|
|
off_t curpos,
|
2005-06-27 14:35:33 +04:00
|
|
|
unsigned long delta_size,
|
2007-03-07 04:44:30 +03:00
|
|
|
off_t obj_offset,
|
2007-02-26 22:55:59 +03:00
|
|
|
enum object_type *type,
|
2006-08-26 12:12:27 +04:00
|
|
|
unsigned long *sizep)
|
2005-06-27 14:35:33 +04:00
|
|
|
{
|
2006-08-26 12:11:02 +04:00
|
|
|
void *delta_data, *result, *base;
|
2007-03-07 04:44:30 +03:00
|
|
|
unsigned long base_size;
|
|
|
|
off_t base_offset;
|
2006-09-21 08:05:37 +04:00
|
|
|
|
2007-02-26 22:55:59 +03:00
|
|
|
base_offset = get_delta_base(p, w_curs, &curpos, *type, obj_offset);
|
2006-12-23 10:33:25 +03:00
|
|
|
base = unpack_entry(p, base_offset, type, &base_size);
|
2006-03-20 00:43:42 +03:00
|
|
|
if (!base)
|
2007-03-07 04:44:30 +03:00
|
|
|
die("failed to read delta base object"
|
|
|
|
" at %"PRIuMAX" from %s",
|
|
|
|
(uintmax_t)base_offset, p->pack_name);
|
2006-03-20 00:43:42 +03:00
|
|
|
|
2007-02-26 22:55:56 +03:00
|
|
|
delta_data = unpack_compressed_entry(p, w_curs, curpos, delta_size);
|
2005-06-27 14:35:33 +04:00
|
|
|
result = patch_delta(base, base_size,
|
|
|
|
delta_data, delta_size,
|
2007-02-26 22:55:59 +03:00
|
|
|
sizep);
|
2005-06-27 14:35:33 +04:00
|
|
|
if (!result)
|
|
|
|
die("failed to apply delta");
|
|
|
|
free(delta_data);
|
|
|
|
free(base);
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2007-03-07 04:44:30 +03:00
|
|
|
void *unpack_entry(struct packed_git *p, off_t obj_offset,
|
2007-02-26 22:55:59 +03:00
|
|
|
enum object_type *type, unsigned long *sizep)
|
2005-06-27 14:35:33 +04:00
|
|
|
{
|
2006-12-23 10:34:08 +03:00
|
|
|
struct pack_window *w_curs = NULL;
|
2007-03-07 04:44:30 +03:00
|
|
|
off_t curpos = obj_offset;
|
2007-02-26 22:55:59 +03:00
|
|
|
void *data;
|
2005-06-27 14:35:33 +04:00
|
|
|
|
2007-02-26 22:55:59 +03:00
|
|
|
*type = unpack_object_header(p, &w_curs, &curpos, sizep);
|
|
|
|
switch (*type) {
|
2006-09-21 08:06:49 +04:00
|
|
|
case OBJ_OFS_DELTA:
|
|
|
|
case OBJ_REF_DELTA:
|
2007-02-26 22:55:59 +03:00
|
|
|
data = unpack_delta_entry(p, &w_curs, curpos, *sizep,
|
|
|
|
obj_offset, type, sizep);
|
2006-12-23 10:33:25 +03:00
|
|
|
break;
|
2005-06-29 01:21:02 +04:00
|
|
|
case OBJ_COMMIT:
|
|
|
|
case OBJ_TREE:
|
|
|
|
case OBJ_BLOB:
|
|
|
|
case OBJ_TAG:
|
2007-02-26 22:55:59 +03:00
|
|
|
data = unpack_compressed_entry(p, &w_curs, curpos, *sizep);
|
2006-12-23 10:33:25 +03:00
|
|
|
break;
|
2005-06-27 14:35:33 +04:00
|
|
|
default:
|
2007-02-26 22:55:59 +03:00
|
|
|
die("unknown object type %i in %s", *type, p->pack_name);
|
2005-06-27 14:35:33 +04:00
|
|
|
}
|
2006-12-23 10:34:08 +03:00
|
|
|
unuse_pack(&w_curs);
|
2007-02-26 22:55:59 +03:00
|
|
|
return data;
|
2005-06-27 14:35:33 +04:00
|
|
|
}
|
|
|
|
|
2007-03-07 04:44:19 +03:00
|
|
|
uint32_t num_packed_objects(const struct packed_git *p)
|
2005-06-29 01:56:57 +04:00
|
|
|
{
|
2005-06-29 13:51:27 +04:00
|
|
|
/* See check_packed_git_idx() */
|
2007-03-07 04:44:37 +03:00
|
|
|
return (uint32_t)((p->index_size - 20 - 20 - 4*256) / 24);
|
2005-06-29 01:56:57 +04:00
|
|
|
}
|
|
|
|
|
2007-03-07 04:44:19 +03:00
|
|
|
int nth_packed_object_sha1(const struct packed_git *p, uint32_t n,
|
2005-06-29 01:56:57 +04:00
|
|
|
unsigned char* sha1)
|
|
|
|
{
|
2007-03-16 23:42:50 +03:00
|
|
|
const unsigned char *index = p->index_data;
|
|
|
|
index += 4 * 256;
|
2007-03-07 04:44:19 +03:00
|
|
|
if (num_packed_objects(p) <= n)
|
2005-06-29 01:56:57 +04:00
|
|
|
return -1;
|
2007-03-16 23:42:50 +03:00
|
|
|
hashcpy(sha1, index + 24 * n + 4);
|
2005-06-29 01:56:57 +04:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2007-03-07 04:44:30 +03:00
|
|
|
off_t find_pack_entry_one(const unsigned char *sha1,
|
2006-09-21 08:05:37 +04:00
|
|
|
struct packed_git *p)
|
2005-06-27 14:35:33 +04:00
|
|
|
{
|
2007-03-16 23:42:50 +03:00
|
|
|
const uint32_t *level1_ofs = p->index_data;
|
2005-06-27 14:35:33 +04:00
|
|
|
int hi = ntohl(level1_ofs[*sha1]);
|
|
|
|
int lo = ((*sha1 == 0x0) ? 0 : ntohl(level1_ofs[*sha1 - 1]));
|
2007-03-16 23:42:50 +03:00
|
|
|
const unsigned char *index = p->index_data;
|
|
|
|
|
|
|
|
index += 4 * 256;
|
2005-06-27 14:35:33 +04:00
|
|
|
|
|
|
|
do {
|
|
|
|
int mi = (lo + hi) / 2;
|
2007-03-16 23:42:50 +03:00
|
|
|
int cmp = hashcmp(index + 24 * mi + 4, sha1);
|
2006-09-21 08:05:37 +04:00
|
|
|
if (!cmp)
|
2007-01-18 10:17:28 +03:00
|
|
|
return ntohl(*((uint32_t *)((char *)index + (24 * mi))));
|
2005-06-27 14:35:33 +04:00
|
|
|
if (cmp > 0)
|
|
|
|
hi = mi;
|
|
|
|
else
|
|
|
|
lo = mi+1;
|
|
|
|
} while (lo < hi);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2006-10-29 12:37:11 +03:00
|
|
|
static int matches_pack_name(struct packed_git *p, const char *ig)
|
|
|
|
{
|
|
|
|
const char *last_c, *c;
|
|
|
|
|
|
|
|
if (!strcmp(p->pack_name, ig))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
for (c = p->pack_name, last_c = c; *c;)
|
|
|
|
if (*c == '/')
|
|
|
|
last_c = ++c;
|
|
|
|
else
|
|
|
|
++c;
|
|
|
|
if (!strcmp(last_c, ig))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2006-09-06 13:12:09 +04:00
|
|
|
static int find_pack_entry(const unsigned char *sha1, struct pack_entry *e, const char **ignore_packed)
|
2005-06-27 14:35:33 +04:00
|
|
|
{
|
|
|
|
struct packed_git *p;
|
2007-03-07 04:44:30 +03:00
|
|
|
off_t offset;
|
2006-09-21 08:05:37 +04:00
|
|
|
|
2005-06-27 14:35:33 +04:00
|
|
|
prepare_packed_git();
|
|
|
|
|
|
|
|
for (p = packed_git; p; p = p->next) {
|
2006-09-06 13:12:09 +04:00
|
|
|
if (ignore_packed) {
|
|
|
|
const char **ig;
|
|
|
|
for (ig = ignore_packed; *ig; ig++)
|
2006-10-29 12:37:11 +03:00
|
|
|
if (!matches_pack_name(p, *ig))
|
2006-09-06 13:12:09 +04:00
|
|
|
break;
|
|
|
|
if (*ig)
|
|
|
|
continue;
|
|
|
|
}
|
2006-09-21 08:05:37 +04:00
|
|
|
offset = find_pack_entry_one(sha1, p);
|
|
|
|
if (offset) {
|
Don't find objects in packs which aren't available anymore.
Matthias Lederhofer identified a race condition where a Git reader
process was able to locate an object in a packed_git index, but
was then preempted while a `git repack -a -d` ran and completed.
By the time the reader was able to seek in the packfile to get the
object data, the packfile no longer existed on disk.
In this particular case the reader process did not attempt to
open the packfile before it was deleted, so it did not already
have the pack_fd field popuplated. With the packfile itself gone,
there was no way for the reader to open it and fetch the data.
I'm fixing the race condition by teaching find_pack_entry to ignore
a packed_git whose packfile is not currently open and which cannot
be opened. If none of the currently known packs can supply the
object, we will return 0 and the caller will decide the object is
not available. If this is the first attempt at finding an object,
the caller will reprepare_packed_git and try again. If it was
the second attempt, the caller will typically return NULL back,
and an error message about a missing object will be reported.
This patch does not address the situation of a reader which is
being starved out by a tight sequence of `git repack -a -d` runs.
In this particular case the reader will try twice, probably fail
both times, and declare the object in question cannot be found.
As it is highly unlikely that a real world `git repack -a -d` can
complete faster than a reader can open a packfile, so I don't think
this is a huge concern.
Signed-off-by: Shawn O. Pearce <spearce@spearce.org>
Signed-off-by: Junio C Hamano <junkio@cox.net>
2007-02-01 23:52:38 +03:00
|
|
|
/*
|
|
|
|
* We are about to tell the caller where they can
|
|
|
|
* locate the requested object. We better make
|
|
|
|
* sure the packfile is still here and can be
|
|
|
|
* accessed before supplying that answer, as
|
|
|
|
* it may have been deleted since the index
|
|
|
|
* was loaded!
|
|
|
|
*/
|
|
|
|
if (p->pack_fd == -1 && open_packed_git(p)) {
|
|
|
|
error("packfile %s cannot be accessed", p->pack_name);
|
|
|
|
continue;
|
|
|
|
}
|
2006-09-21 08:05:37 +04:00
|
|
|
e->offset = offset;
|
|
|
|
e->p = p;
|
|
|
|
hashcpy(e->sha1, sha1);
|
2005-06-27 14:35:33 +04:00
|
|
|
return 1;
|
2006-09-21 08:05:37 +04:00
|
|
|
}
|
2005-06-27 14:35:33 +04:00
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2005-08-01 04:53:44 +04:00
|
|
|
struct packed_git *find_sha1_pack(const unsigned char *sha1,
|
|
|
|
struct packed_git *packs)
|
|
|
|
{
|
|
|
|
struct packed_git *p;
|
|
|
|
|
|
|
|
for (p = packs; p; p = p->next) {
|
2006-09-21 08:05:37 +04:00
|
|
|
if (find_pack_entry_one(sha1, p))
|
2005-08-01 04:53:44 +04:00
|
|
|
return p;
|
|
|
|
}
|
|
|
|
return NULL;
|
2007-02-26 22:55:59 +03:00
|
|
|
|
2005-08-01 04:53:44 +04:00
|
|
|
}
|
|
|
|
|
2007-02-26 22:55:59 +03:00
|
|
|
static int sha1_loose_object_info(const unsigned char *sha1, unsigned long *sizep)
|
2005-06-03 02:20:54 +04:00
|
|
|
{
|
2005-06-27 14:34:06 +04:00
|
|
|
int status;
|
2005-06-03 02:20:54 +04:00
|
|
|
unsigned long mapsize, size;
|
|
|
|
void *map;
|
|
|
|
z_stream stream;
|
2007-02-26 22:55:55 +03:00
|
|
|
char hdr[32];
|
2005-06-03 02:20:54 +04:00
|
|
|
|
2006-07-18 02:04:47 +04:00
|
|
|
map = map_sha1_file(sha1, &mapsize);
|
2006-11-28 02:18:55 +03:00
|
|
|
if (!map)
|
|
|
|
return error("unable to find %s", sha1_to_hex(sha1));
|
2005-06-27 14:34:06 +04:00
|
|
|
if (unpack_sha1_header(&stream, map, mapsize, hdr, sizeof(hdr)) < 0)
|
|
|
|
status = error("unable to unpack %s header",
|
|
|
|
sha1_to_hex(sha1));
|
2007-02-26 22:55:59 +03:00
|
|
|
else if ((status = parse_sha1_header(hdr, &size)) < 0)
|
2005-06-27 14:34:06 +04:00
|
|
|
status = error("unable to parse %s header", sha1_to_hex(sha1));
|
2007-02-26 22:55:59 +03:00
|
|
|
else if (sizep)
|
|
|
|
*sizep = size;
|
2005-06-03 02:20:54 +04:00
|
|
|
inflateEnd(&stream);
|
|
|
|
munmap(map, mapsize);
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
2007-02-26 22:55:59 +03:00
|
|
|
int sha1_object_info(const unsigned char *sha1, unsigned long *sizep)
|
2006-11-28 02:18:55 +03:00
|
|
|
{
|
|
|
|
struct pack_entry e;
|
|
|
|
|
|
|
|
if (!find_pack_entry(sha1, &e, NULL)) {
|
|
|
|
reprepare_packed_git();
|
|
|
|
if (!find_pack_entry(sha1, &e, NULL))
|
2007-02-26 22:55:59 +03:00
|
|
|
return sha1_loose_object_info(sha1, sizep);
|
2006-11-28 02:18:55 +03:00
|
|
|
}
|
2007-02-26 22:55:59 +03:00
|
|
|
return packed_object_info(e.p, e.offset, sizep);
|
2006-11-28 02:18:55 +03:00
|
|
|
}
|
|
|
|
|
2007-02-26 22:55:59 +03:00
|
|
|
static void *read_packed_sha1(const unsigned char *sha1,
|
|
|
|
enum object_type *type, unsigned long *size)
|
2005-06-27 14:35:33 +04:00
|
|
|
{
|
|
|
|
struct pack_entry e;
|
|
|
|
|
2007-01-22 23:29:45 +03:00
|
|
|
if (!find_pack_entry(sha1, &e, NULL))
|
2005-06-27 14:35:33 +04:00
|
|
|
return NULL;
|
2007-01-22 23:29:45 +03:00
|
|
|
else
|
|
|
|
return unpack_entry(e.p, e.offset, type, size);
|
2005-06-27 14:35:33 +04:00
|
|
|
}
|
|
|
|
|
2007-02-05 08:42:38 +03:00
|
|
|
/*
|
|
|
|
* This is meant to hold a *small* number of objects that you would
|
|
|
|
* want read_sha1_file() to be able to return, but yet you do not want
|
|
|
|
* to write them into the object store (e.g. a browse-only
|
|
|
|
* application).
|
|
|
|
*/
|
|
|
|
static struct cached_object {
|
|
|
|
unsigned char sha1[20];
|
2007-02-26 22:55:59 +03:00
|
|
|
enum object_type type;
|
2007-02-05 08:42:38 +03:00
|
|
|
void *buf;
|
|
|
|
unsigned long size;
|
|
|
|
} *cached_objects;
|
|
|
|
static int cached_object_nr, cached_object_alloc;
|
|
|
|
|
|
|
|
static struct cached_object *find_cached_object(const unsigned char *sha1)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
struct cached_object *co = cached_objects;
|
|
|
|
|
|
|
|
for (i = 0; i < cached_object_nr; i++, co++) {
|
|
|
|
if (!hashcmp(co->sha1, sha1))
|
|
|
|
return co;
|
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2007-02-26 22:55:59 +03:00
|
|
|
int pretend_sha1_file(void *buf, unsigned long len, enum object_type type,
|
|
|
|
unsigned char *sha1)
|
2007-02-05 08:42:38 +03:00
|
|
|
{
|
|
|
|
struct cached_object *co;
|
|
|
|
|
2007-02-26 22:55:59 +03:00
|
|
|
hash_sha1_file(buf, len, typename(type), sha1);
|
2007-02-05 08:42:38 +03:00
|
|
|
if (has_sha1_file(sha1) || find_cached_object(sha1))
|
|
|
|
return 0;
|
|
|
|
if (cached_object_alloc <= cached_object_nr) {
|
|
|
|
cached_object_alloc = alloc_nr(cached_object_alloc);
|
|
|
|
cached_objects = xrealloc(cached_objects,
|
|
|
|
sizeof(*cached_objects) *
|
|
|
|
cached_object_alloc);
|
|
|
|
}
|
|
|
|
co = &cached_objects[cached_object_nr++];
|
|
|
|
co->size = len;
|
2007-02-26 22:55:59 +03:00
|
|
|
co->type = type;
|
2007-02-16 04:02:06 +03:00
|
|
|
co->buf = xmalloc(len);
|
|
|
|
memcpy(co->buf, buf, len);
|
2007-02-05 08:42:38 +03:00
|
|
|
hashcpy(co->sha1, sha1);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2007-02-26 22:55:59 +03:00
|
|
|
void *read_sha1_file(const unsigned char *sha1, enum object_type *type,
|
|
|
|
unsigned long *size)
|
2005-04-19 00:04:43 +04:00
|
|
|
{
|
|
|
|
unsigned long mapsize;
|
|
|
|
void *map, *buf;
|
2007-02-05 08:42:38 +03:00
|
|
|
struct cached_object *co;
|
|
|
|
|
|
|
|
co = find_cached_object(sha1);
|
|
|
|
if (co) {
|
|
|
|
buf = xmalloc(co->size + 1);
|
|
|
|
memcpy(buf, co->buf, co->size);
|
|
|
|
((char*)buf)[co->size] = 0;
|
2007-02-26 22:55:59 +03:00
|
|
|
*type = co->type;
|
2007-02-05 08:42:38 +03:00
|
|
|
*size = co->size;
|
|
|
|
return buf;
|
|
|
|
}
|
2005-04-19 00:04:43 +04:00
|
|
|
|
2007-01-22 23:29:45 +03:00
|
|
|
buf = read_packed_sha1(sha1, type, size);
|
|
|
|
if (buf)
|
|
|
|
return buf;
|
2006-07-18 02:04:47 +04:00
|
|
|
map = map_sha1_file(sha1, &mapsize);
|
2005-04-19 00:04:43 +04:00
|
|
|
if (map) {
|
2007-03-05 11:21:37 +03:00
|
|
|
buf = unpack_sha1_file(map, mapsize, type, size, sha1);
|
2005-04-19 00:04:43 +04:00
|
|
|
munmap(map, mapsize);
|
|
|
|
return buf;
|
|
|
|
}
|
2006-06-02 19:32:23 +04:00
|
|
|
reprepare_packed_git();
|
2007-01-22 23:29:45 +03:00
|
|
|
return read_packed_sha1(sha1, type, size);
|
2005-04-19 00:04:43 +04:00
|
|
|
}
|
|
|
|
|
2005-04-29 03:42:27 +04:00
|
|
|
void *read_object_with_reference(const unsigned char *sha1,
|
2007-02-26 22:55:59 +03:00
|
|
|
const char *required_type_name,
|
2005-04-29 03:42:27 +04:00
|
|
|
unsigned long *size,
|
|
|
|
unsigned char *actual_sha1_return)
|
2005-04-21 05:06:49 +04:00
|
|
|
{
|
2007-02-26 22:55:59 +03:00
|
|
|
enum object_type type, required_type;
|
2005-04-21 05:06:49 +04:00
|
|
|
void *buffer;
|
|
|
|
unsigned long isize;
|
2005-04-29 03:42:27 +04:00
|
|
|
unsigned char actual_sha1[20];
|
2005-04-21 05:06:49 +04:00
|
|
|
|
2007-02-26 22:55:59 +03:00
|
|
|
required_type = type_from_string(required_type_name);
|
2006-08-23 10:49:00 +04:00
|
|
|
hashcpy(actual_sha1, sha1);
|
2005-04-29 03:42:27 +04:00
|
|
|
while (1) {
|
|
|
|
int ref_length = -1;
|
|
|
|
const char *ref_type = NULL;
|
2005-04-21 05:06:49 +04:00
|
|
|
|
2007-02-26 22:55:59 +03:00
|
|
|
buffer = read_sha1_file(actual_sha1, &type, &isize);
|
2005-04-29 03:42:27 +04:00
|
|
|
if (!buffer)
|
|
|
|
return NULL;
|
2007-02-26 22:55:59 +03:00
|
|
|
if (type == required_type) {
|
2005-04-29 03:42:27 +04:00
|
|
|
*size = isize;
|
|
|
|
if (actual_sha1_return)
|
2006-08-23 10:49:00 +04:00
|
|
|
hashcpy(actual_sha1_return, actual_sha1);
|
2005-04-29 03:42:27 +04:00
|
|
|
return buffer;
|
|
|
|
}
|
|
|
|
/* Handle references */
|
2007-02-26 22:55:59 +03:00
|
|
|
else if (type == OBJ_COMMIT)
|
2005-04-29 03:42:27 +04:00
|
|
|
ref_type = "tree ";
|
2007-02-26 22:55:59 +03:00
|
|
|
else if (type == OBJ_TAG)
|
2005-04-29 03:42:27 +04:00
|
|
|
ref_type = "object ";
|
|
|
|
else {
|
|
|
|
free(buffer);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
ref_length = strlen(ref_type);
|
2005-04-21 05:06:49 +04:00
|
|
|
|
2005-04-29 03:42:27 +04:00
|
|
|
if (memcmp(buffer, ref_type, ref_length) ||
|
2006-06-18 19:18:09 +04:00
|
|
|
get_sha1_hex((char *) buffer + ref_length, actual_sha1)) {
|
2005-04-29 03:42:27 +04:00
|
|
|
free(buffer);
|
|
|
|
return NULL;
|
|
|
|
}
|
2005-08-08 22:44:43 +04:00
|
|
|
free(buffer);
|
2005-04-29 03:42:27 +04:00
|
|
|
/* Now we have the ID of the referred-to object in
|
|
|
|
* actual_sha1. Check again. */
|
2005-04-21 05:06:49 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2006-10-15 16:02:03 +04:00
|
|
|
static void write_sha1_file_prepare(void *buf, unsigned long len,
|
|
|
|
const char *type, unsigned char *sha1,
|
2007-02-26 22:55:55 +03:00
|
|
|
char *hdr, int *hdrlen)
|
2005-06-28 06:03:13 +04:00
|
|
|
{
|
|
|
|
SHA_CTX c;
|
|
|
|
|
|
|
|
/* Generate the header */
|
2007-02-26 22:55:55 +03:00
|
|
|
*hdrlen = sprintf(hdr, "%s %lu", type, len)+1;
|
2005-06-28 06:03:13 +04:00
|
|
|
|
|
|
|
/* Sha1.. */
|
|
|
|
SHA1_Init(&c);
|
|
|
|
SHA1_Update(&c, hdr, *hdrlen);
|
|
|
|
SHA1_Update(&c, buf, len);
|
|
|
|
SHA1_Final(sha1, &c);
|
|
|
|
}
|
|
|
|
|
Create object subdirectories on demand
This makes it possible to have a "sparse" git object subdirectory
structure, something that has become much more attractive now that people
use pack-files all the time.
As a result of pack-files, a git object directory doesn't necessarily have
any individual objects lying around, and in that case it's just wasting
space to keep the empty first-level object directories around: on many
filesystems the 256 empty directories will be aboue 1MB of diskspace.
Even more importantly, after you re-pack a project that _used_ to be
unpacked, you could be left with huge directories that no longer contain
anything, but that waste space and take time to look through.
With this change, "git prune-packed" can just do an rmdir() on the
directories, and they'll get removed if empty, and re-created on demand.
This patch also tries to fix up "write_sha1_from_fd()" to use the new
common infrastructure for creating the object files, closing a hole where
we might otherwise leave half-written objects in the object database.
[jc: I unoptimized the part that really removes the fan-out directories
to ease transition. init-db still wastes 1MB of diskspace to hold 256
empty fan-outs, and prune-packed rmdir()'s the grown but empty directories,
but runs mkdir() immediately after that -- reducing the saving from 150KB
to 146KB. These parts will be re-introduced when everybody has the
on-demand capability.]
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Signed-off-by: Junio C Hamano <junkio@cox.net>
2005-10-09 02:54:01 +04:00
|
|
|
/*
|
|
|
|
* Link the tempfile to the final place, possibly creating the
|
|
|
|
* last directory level as you do so.
|
|
|
|
*
|
|
|
|
* Returns the errno on failure, 0 on success.
|
|
|
|
*/
|
2006-09-01 11:17:47 +04:00
|
|
|
static int link_temp_to_file(const char *tmpfile, const char *filename)
|
Create object subdirectories on demand
This makes it possible to have a "sparse" git object subdirectory
structure, something that has become much more attractive now that people
use pack-files all the time.
As a result of pack-files, a git object directory doesn't necessarily have
any individual objects lying around, and in that case it's just wasting
space to keep the empty first-level object directories around: on many
filesystems the 256 empty directories will be aboue 1MB of diskspace.
Even more importantly, after you re-pack a project that _used_ to be
unpacked, you could be left with huge directories that no longer contain
anything, but that waste space and take time to look through.
With this change, "git prune-packed" can just do an rmdir() on the
directories, and they'll get removed if empty, and re-created on demand.
This patch also tries to fix up "write_sha1_from_fd()" to use the new
common infrastructure for creating the object files, closing a hole where
we might otherwise leave half-written objects in the object database.
[jc: I unoptimized the part that really removes the fan-out directories
to ease transition. init-db still wastes 1MB of diskspace to hold 256
empty fan-outs, and prune-packed rmdir()'s the grown but empty directories,
but runs mkdir() immediately after that -- reducing the saving from 150KB
to 146KB. These parts will be re-introduced when everybody has the
on-demand capability.]
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Signed-off-by: Junio C Hamano <junkio@cox.net>
2005-10-09 02:54:01 +04:00
|
|
|
{
|
|
|
|
int ret;
|
2006-07-12 09:00:16 +04:00
|
|
|
char *dir;
|
Create object subdirectories on demand
This makes it possible to have a "sparse" git object subdirectory
structure, something that has become much more attractive now that people
use pack-files all the time.
As a result of pack-files, a git object directory doesn't necessarily have
any individual objects lying around, and in that case it's just wasting
space to keep the empty first-level object directories around: on many
filesystems the 256 empty directories will be aboue 1MB of diskspace.
Even more importantly, after you re-pack a project that _used_ to be
unpacked, you could be left with huge directories that no longer contain
anything, but that waste space and take time to look through.
With this change, "git prune-packed" can just do an rmdir() on the
directories, and they'll get removed if empty, and re-created on demand.
This patch also tries to fix up "write_sha1_from_fd()" to use the new
common infrastructure for creating the object files, closing a hole where
we might otherwise leave half-written objects in the object database.
[jc: I unoptimized the part that really removes the fan-out directories
to ease transition. init-db still wastes 1MB of diskspace to hold 256
empty fan-outs, and prune-packed rmdir()'s the grown but empty directories,
but runs mkdir() immediately after that -- reducing the saving from 150KB
to 146KB. These parts will be re-introduced when everybody has the
on-demand capability.]
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Signed-off-by: Junio C Hamano <junkio@cox.net>
2005-10-09 02:54:01 +04:00
|
|
|
|
|
|
|
if (!link(tmpfile, filename))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/*
|
2006-07-12 09:00:16 +04:00
|
|
|
* Try to mkdir the last path component if that failed.
|
Create object subdirectories on demand
This makes it possible to have a "sparse" git object subdirectory
structure, something that has become much more attractive now that people
use pack-files all the time.
As a result of pack-files, a git object directory doesn't necessarily have
any individual objects lying around, and in that case it's just wasting
space to keep the empty first-level object directories around: on many
filesystems the 256 empty directories will be aboue 1MB of diskspace.
Even more importantly, after you re-pack a project that _used_ to be
unpacked, you could be left with huge directories that no longer contain
anything, but that waste space and take time to look through.
With this change, "git prune-packed" can just do an rmdir() on the
directories, and they'll get removed if empty, and re-created on demand.
This patch also tries to fix up "write_sha1_from_fd()" to use the new
common infrastructure for creating the object files, closing a hole where
we might otherwise leave half-written objects in the object database.
[jc: I unoptimized the part that really removes the fan-out directories
to ease transition. init-db still wastes 1MB of diskspace to hold 256
empty fan-outs, and prune-packed rmdir()'s the grown but empty directories,
but runs mkdir() immediately after that -- reducing the saving from 150KB
to 146KB. These parts will be re-introduced when everybody has the
on-demand capability.]
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Signed-off-by: Junio C Hamano <junkio@cox.net>
2005-10-09 02:54:01 +04:00
|
|
|
*
|
|
|
|
* Re-try the "link()" regardless of whether the mkdir
|
|
|
|
* succeeds, since a race might mean that somebody
|
|
|
|
* else succeeded.
|
|
|
|
*/
|
|
|
|
ret = errno;
|
2006-07-12 09:00:16 +04:00
|
|
|
dir = strrchr(filename, '/');
|
|
|
|
if (dir) {
|
|
|
|
*dir = 0;
|
2006-11-03 05:02:17 +03:00
|
|
|
if (!mkdir(filename, 0777) && adjust_shared_perm(filename)) {
|
2006-11-01 02:56:58 +03:00
|
|
|
*dir = '/';
|
2006-07-12 09:00:16 +04:00
|
|
|
return -2;
|
2006-11-01 02:56:58 +03:00
|
|
|
}
|
2006-07-12 09:00:16 +04:00
|
|
|
*dir = '/';
|
|
|
|
if (!link(tmpfile, filename))
|
|
|
|
return 0;
|
|
|
|
ret = errno;
|
Create object subdirectories on demand
This makes it possible to have a "sparse" git object subdirectory
structure, something that has become much more attractive now that people
use pack-files all the time.
As a result of pack-files, a git object directory doesn't necessarily have
any individual objects lying around, and in that case it's just wasting
space to keep the empty first-level object directories around: on many
filesystems the 256 empty directories will be aboue 1MB of diskspace.
Even more importantly, after you re-pack a project that _used_ to be
unpacked, you could be left with huge directories that no longer contain
anything, but that waste space and take time to look through.
With this change, "git prune-packed" can just do an rmdir() on the
directories, and they'll get removed if empty, and re-created on demand.
This patch also tries to fix up "write_sha1_from_fd()" to use the new
common infrastructure for creating the object files, closing a hole where
we might otherwise leave half-written objects in the object database.
[jc: I unoptimized the part that really removes the fan-out directories
to ease transition. init-db still wastes 1MB of diskspace to hold 256
empty fan-outs, and prune-packed rmdir()'s the grown but empty directories,
but runs mkdir() immediately after that -- reducing the saving from 150KB
to 146KB. These parts will be re-introduced when everybody has the
on-demand capability.]
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Signed-off-by: Junio C Hamano <junkio@cox.net>
2005-10-09 02:54:01 +04:00
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Move the just written object into its final resting place
|
|
|
|
*/
|
2006-09-01 11:17:47 +04:00
|
|
|
int move_temp_to_file(const char *tmpfile, const char *filename)
|
Create object subdirectories on demand
This makes it possible to have a "sparse" git object subdirectory
structure, something that has become much more attractive now that people
use pack-files all the time.
As a result of pack-files, a git object directory doesn't necessarily have
any individual objects lying around, and in that case it's just wasting
space to keep the empty first-level object directories around: on many
filesystems the 256 empty directories will be aboue 1MB of diskspace.
Even more importantly, after you re-pack a project that _used_ to be
unpacked, you could be left with huge directories that no longer contain
anything, but that waste space and take time to look through.
With this change, "git prune-packed" can just do an rmdir() on the
directories, and they'll get removed if empty, and re-created on demand.
This patch also tries to fix up "write_sha1_from_fd()" to use the new
common infrastructure for creating the object files, closing a hole where
we might otherwise leave half-written objects in the object database.
[jc: I unoptimized the part that really removes the fan-out directories
to ease transition. init-db still wastes 1MB of diskspace to hold 256
empty fan-outs, and prune-packed rmdir()'s the grown but empty directories,
but runs mkdir() immediately after that -- reducing the saving from 150KB
to 146KB. These parts will be re-introduced when everybody has the
on-demand capability.]
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Signed-off-by: Junio C Hamano <junkio@cox.net>
2005-10-09 02:54:01 +04:00
|
|
|
{
|
|
|
|
int ret = link_temp_to_file(tmpfile, filename);
|
2005-10-26 21:27:36 +04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Coda hack - coda doesn't like cross-directory links,
|
|
|
|
* so we fall back to a rename, which will mean that it
|
|
|
|
* won't be able to check collisions, but that's not a
|
|
|
|
* big deal.
|
|
|
|
*
|
|
|
|
* The same holds for FAT formatted media.
|
|
|
|
*
|
|
|
|
* When this succeeds, we just return 0. We have nothing
|
|
|
|
* left to unlink.
|
|
|
|
*/
|
|
|
|
if (ret && ret != EEXIST) {
|
|
|
|
if (!rename(tmpfile, filename))
|
Create object subdirectories on demand
This makes it possible to have a "sparse" git object subdirectory
structure, something that has become much more attractive now that people
use pack-files all the time.
As a result of pack-files, a git object directory doesn't necessarily have
any individual objects lying around, and in that case it's just wasting
space to keep the empty first-level object directories around: on many
filesystems the 256 empty directories will be aboue 1MB of diskspace.
Even more importantly, after you re-pack a project that _used_ to be
unpacked, you could be left with huge directories that no longer contain
anything, but that waste space and take time to look through.
With this change, "git prune-packed" can just do an rmdir() on the
directories, and they'll get removed if empty, and re-created on demand.
This patch also tries to fix up "write_sha1_from_fd()" to use the new
common infrastructure for creating the object files, closing a hole where
we might otherwise leave half-written objects in the object database.
[jc: I unoptimized the part that really removes the fan-out directories
to ease transition. init-db still wastes 1MB of diskspace to hold 256
empty fan-outs, and prune-packed rmdir()'s the grown but empty directories,
but runs mkdir() immediately after that -- reducing the saving from 150KB
to 146KB. These parts will be re-introduced when everybody has the
on-demand capability.]
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Signed-off-by: Junio C Hamano <junkio@cox.net>
2005-10-09 02:54:01 +04:00
|
|
|
return 0;
|
2005-10-26 03:41:20 +04:00
|
|
|
ret = errno;
|
Create object subdirectories on demand
This makes it possible to have a "sparse" git object subdirectory
structure, something that has become much more attractive now that people
use pack-files all the time.
As a result of pack-files, a git object directory doesn't necessarily have
any individual objects lying around, and in that case it's just wasting
space to keep the empty first-level object directories around: on many
filesystems the 256 empty directories will be aboue 1MB of diskspace.
Even more importantly, after you re-pack a project that _used_ to be
unpacked, you could be left with huge directories that no longer contain
anything, but that waste space and take time to look through.
With this change, "git prune-packed" can just do an rmdir() on the
directories, and they'll get removed if empty, and re-created on demand.
This patch also tries to fix up "write_sha1_from_fd()" to use the new
common infrastructure for creating the object files, closing a hole where
we might otherwise leave half-written objects in the object database.
[jc: I unoptimized the part that really removes the fan-out directories
to ease transition. init-db still wastes 1MB of diskspace to hold 256
empty fan-outs, and prune-packed rmdir()'s the grown but empty directories,
but runs mkdir() immediately after that -- reducing the saving from 150KB
to 146KB. These parts will be re-introduced when everybody has the
on-demand capability.]
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Signed-off-by: Junio C Hamano <junkio@cox.net>
2005-10-09 02:54:01 +04:00
|
|
|
}
|
|
|
|
unlink(tmpfile);
|
|
|
|
if (ret) {
|
|
|
|
if (ret != EEXIST) {
|
2006-11-09 15:52:05 +03:00
|
|
|
return error("unable to write sha1 filename %s: %s\n", filename, strerror(ret));
|
Create object subdirectories on demand
This makes it possible to have a "sparse" git object subdirectory
structure, something that has become much more attractive now that people
use pack-files all the time.
As a result of pack-files, a git object directory doesn't necessarily have
any individual objects lying around, and in that case it's just wasting
space to keep the empty first-level object directories around: on many
filesystems the 256 empty directories will be aboue 1MB of diskspace.
Even more importantly, after you re-pack a project that _used_ to be
unpacked, you could be left with huge directories that no longer contain
anything, but that waste space and take time to look through.
With this change, "git prune-packed" can just do an rmdir() on the
directories, and they'll get removed if empty, and re-created on demand.
This patch also tries to fix up "write_sha1_from_fd()" to use the new
common infrastructure for creating the object files, closing a hole where
we might otherwise leave half-written objects in the object database.
[jc: I unoptimized the part that really removes the fan-out directories
to ease transition. init-db still wastes 1MB of diskspace to hold 256
empty fan-outs, and prune-packed rmdir()'s the grown but empty directories,
but runs mkdir() immediately after that -- reducing the saving from 150KB
to 146KB. These parts will be re-introduced when everybody has the
on-demand capability.]
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Signed-off-by: Junio C Hamano <junkio@cox.net>
2005-10-09 02:54:01 +04:00
|
|
|
}
|
|
|
|
/* FIXME!!! Collision check here ? */
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2006-05-24 19:30:54 +04:00
|
|
|
static int write_buffer(int fd, const void *buf, size_t len)
|
|
|
|
{
|
2007-01-12 07:23:00 +03:00
|
|
|
if (write_in_full(fd, buf, len) < 0)
|
2007-01-08 18:58:23 +03:00
|
|
|
return error("file write error (%s)", strerror(errno));
|
2006-05-24 19:30:54 +04:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2006-07-11 23:48:08 +04:00
|
|
|
static int write_binary_header(unsigned char *hdr, enum object_type type, unsigned long len)
|
|
|
|
{
|
|
|
|
int hdr_len;
|
|
|
|
unsigned char c;
|
|
|
|
|
|
|
|
c = (type << 4) | (len & 15);
|
|
|
|
len >>= 4;
|
|
|
|
hdr_len = 1;
|
|
|
|
while (len) {
|
|
|
|
*hdr++ = c | 0x80;
|
|
|
|
hdr_len++;
|
|
|
|
c = (len & 0x7f);
|
|
|
|
len >>= 7;
|
|
|
|
}
|
|
|
|
*hdr = c;
|
|
|
|
return hdr_len;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void setup_object_header(z_stream *stream, const char *type, unsigned long len)
|
|
|
|
{
|
2007-02-26 22:55:55 +03:00
|
|
|
int obj_type, hdrlen;
|
2006-07-11 23:48:08 +04:00
|
|
|
|
|
|
|
if (use_legacy_headers) {
|
|
|
|
while (deflate(stream, 0) == Z_OK)
|
|
|
|
/* nothing */;
|
|
|
|
return;
|
|
|
|
}
|
2007-02-26 22:55:58 +03:00
|
|
|
obj_type = type_from_string(type);
|
2007-02-26 22:55:55 +03:00
|
|
|
hdrlen = write_binary_header(stream->next_out, obj_type, len);
|
|
|
|
stream->total_out = hdrlen;
|
|
|
|
stream->next_out += hdrlen;
|
|
|
|
stream->avail_out -= hdrlen;
|
2006-07-11 23:48:08 +04:00
|
|
|
}
|
|
|
|
|
2006-10-14 14:45:36 +04:00
|
|
|
int hash_sha1_file(void *buf, unsigned long len, const char *type,
|
|
|
|
unsigned char *sha1)
|
|
|
|
{
|
2007-02-26 22:55:55 +03:00
|
|
|
char hdr[32];
|
2006-10-14 14:45:36 +04:00
|
|
|
int hdrlen;
|
|
|
|
write_sha1_file_prepare(buf, len, type, sha1, hdr, &hdrlen);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2005-05-18 16:14:09 +04:00
|
|
|
int write_sha1_file(void *buf, unsigned long len, const char *type, unsigned char *returnsha1)
|
2005-04-19 00:04:43 +04:00
|
|
|
{
|
|
|
|
int size;
|
2005-05-18 16:14:09 +04:00
|
|
|
unsigned char *compressed;
|
2005-04-19 00:04:43 +04:00
|
|
|
z_stream stream;
|
|
|
|
unsigned char sha1[20];
|
2005-04-20 20:28:05 +04:00
|
|
|
char *filename;
|
2005-05-03 22:46:16 +04:00
|
|
|
static char tmpfile[PATH_MAX];
|
2007-02-26 22:55:55 +03:00
|
|
|
char hdr[32];
|
Create object subdirectories on demand
This makes it possible to have a "sparse" git object subdirectory
structure, something that has become much more attractive now that people
use pack-files all the time.
As a result of pack-files, a git object directory doesn't necessarily have
any individual objects lying around, and in that case it's just wasting
space to keep the empty first-level object directories around: on many
filesystems the 256 empty directories will be aboue 1MB of diskspace.
Even more importantly, after you re-pack a project that _used_ to be
unpacked, you could be left with huge directories that no longer contain
anything, but that waste space and take time to look through.
With this change, "git prune-packed" can just do an rmdir() on the
directories, and they'll get removed if empty, and re-created on demand.
This patch also tries to fix up "write_sha1_from_fd()" to use the new
common infrastructure for creating the object files, closing a hole where
we might otherwise leave half-written objects in the object database.
[jc: I unoptimized the part that really removes the fan-out directories
to ease transition. init-db still wastes 1MB of diskspace to hold 256
empty fan-outs, and prune-packed rmdir()'s the grown but empty directories,
but runs mkdir() immediately after that -- reducing the saving from 150KB
to 146KB. These parts will be re-introduced when everybody has the
on-demand capability.]
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Signed-off-by: Junio C Hamano <junkio@cox.net>
2005-10-09 02:54:01 +04:00
|
|
|
int fd, hdrlen;
|
2005-04-25 21:19:53 +04:00
|
|
|
|
2005-06-28 06:03:13 +04:00
|
|
|
/* Normally if we have it in the pack then we do not bother writing
|
|
|
|
* it out into .git/objects/??/?{38} file.
|
|
|
|
*/
|
2006-10-15 16:02:03 +04:00
|
|
|
write_sha1_file_prepare(buf, len, type, sha1, hdr, &hdrlen);
|
|
|
|
filename = sha1_file_name(sha1);
|
2005-04-20 20:28:05 +04:00
|
|
|
if (returnsha1)
|
2006-08-23 10:49:00 +04:00
|
|
|
hashcpy(returnsha1, sha1);
|
2005-06-28 06:03:13 +04:00
|
|
|
if (has_sha1_file(sha1))
|
|
|
|
return 0;
|
2005-05-03 22:46:16 +04:00
|
|
|
fd = open(filename, O_RDONLY);
|
|
|
|
if (fd >= 0) {
|
2005-04-20 20:28:05 +04:00
|
|
|
/*
|
2005-05-03 22:46:16 +04:00
|
|
|
* FIXME!!! We might do collision checking here, but we'd
|
|
|
|
* need to uncompress the old file and check it. Later.
|
2005-04-20 20:28:05 +04:00
|
|
|
*/
|
2005-05-03 22:46:16 +04:00
|
|
|
close(fd);
|
2005-04-20 20:28:05 +04:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2005-05-03 22:46:16 +04:00
|
|
|
if (errno != ENOENT) {
|
2006-11-09 15:52:05 +03:00
|
|
|
return error("sha1 file %s: %s\n", filename, strerror(errno));
|
2005-05-03 22:46:16 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
snprintf(tmpfile, sizeof(tmpfile), "%s/obj_XXXXXX", get_object_directory());
|
2005-05-07 11:38:04 +04:00
|
|
|
|
2005-05-03 22:46:16 +04:00
|
|
|
fd = mkstemp(tmpfile);
|
|
|
|
if (fd < 0) {
|
2006-11-09 15:52:05 +03:00
|
|
|
if (errno == EPERM)
|
|
|
|
return error("insufficient permission for adding an object to repository database %s\n", get_object_directory());
|
|
|
|
else
|
|
|
|
return error("unable to create temporary sha1 filename %s: %s\n", tmpfile, strerror(errno));
|
2005-05-03 22:46:16 +04:00
|
|
|
}
|
|
|
|
|
2005-04-19 00:04:43 +04:00
|
|
|
/* Set it up */
|
|
|
|
memset(&stream, 0, sizeof(stream));
|
2006-07-04 00:11:47 +04:00
|
|
|
deflateInit(&stream, zlib_compression_level);
|
2006-07-11 23:48:08 +04:00
|
|
|
size = 8 + deflateBound(&stream, len+hdrlen);
|
2005-04-26 23:00:58 +04:00
|
|
|
compressed = xmalloc(size);
|
2005-04-19 00:04:43 +04:00
|
|
|
|
|
|
|
/* Compress it */
|
|
|
|
stream.next_out = compressed;
|
|
|
|
stream.avail_out = size;
|
2005-04-25 21:19:53 +04:00
|
|
|
|
|
|
|
/* First header.. */
|
2007-02-26 22:55:55 +03:00
|
|
|
stream.next_in = (unsigned char *)hdr;
|
2005-04-25 21:19:53 +04:00
|
|
|
stream.avail_in = hdrlen;
|
2006-07-11 23:48:08 +04:00
|
|
|
setup_object_header(&stream, type, len);
|
2005-04-25 21:19:53 +04:00
|
|
|
|
|
|
|
/* Then the data itself.. */
|
|
|
|
stream.next_in = buf;
|
|
|
|
stream.avail_in = len;
|
2005-04-19 00:04:43 +04:00
|
|
|
while (deflate(&stream, Z_FINISH) == Z_OK)
|
|
|
|
/* nothing */;
|
|
|
|
deflateEnd(&stream);
|
|
|
|
size = stream.total_out;
|
|
|
|
|
2006-05-24 19:30:54 +04:00
|
|
|
if (write_buffer(fd, compressed, size) < 0)
|
|
|
|
die("unable to write sha1 file");
|
2005-05-03 22:46:16 +04:00
|
|
|
fchmod(fd, 0444);
|
2005-04-20 20:28:05 +04:00
|
|
|
close(fd);
|
2005-05-07 12:27:00 +04:00
|
|
|
free(compressed);
|
2005-04-19 00:04:43 +04:00
|
|
|
|
Create object subdirectories on demand
This makes it possible to have a "sparse" git object subdirectory
structure, something that has become much more attractive now that people
use pack-files all the time.
As a result of pack-files, a git object directory doesn't necessarily have
any individual objects lying around, and in that case it's just wasting
space to keep the empty first-level object directories around: on many
filesystems the 256 empty directories will be aboue 1MB of diskspace.
Even more importantly, after you re-pack a project that _used_ to be
unpacked, you could be left with huge directories that no longer contain
anything, but that waste space and take time to look through.
With this change, "git prune-packed" can just do an rmdir() on the
directories, and they'll get removed if empty, and re-created on demand.
This patch also tries to fix up "write_sha1_from_fd()" to use the new
common infrastructure for creating the object files, closing a hole where
we might otherwise leave half-written objects in the object database.
[jc: I unoptimized the part that really removes the fan-out directories
to ease transition. init-db still wastes 1MB of diskspace to hold 256
empty fan-outs, and prune-packed rmdir()'s the grown but empty directories,
but runs mkdir() immediately after that -- reducing the saving from 150KB
to 146KB. These parts will be re-introduced when everybody has the
on-demand capability.]
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Signed-off-by: Junio C Hamano <junkio@cox.net>
2005-10-09 02:54:01 +04:00
|
|
|
return move_temp_to_file(tmpfile, filename);
|
2005-04-19 00:04:43 +04:00
|
|
|
}
|
2005-04-24 05:47:23 +04:00
|
|
|
|
2006-05-24 19:30:54 +04:00
|
|
|
/*
|
|
|
|
* We need to unpack and recompress the object for writing
|
|
|
|
* it out to a different file.
|
|
|
|
*/
|
|
|
|
static void *repack_object(const unsigned char *sha1, unsigned long *objsize)
|
2005-07-11 02:25:38 +04:00
|
|
|
{
|
2006-05-24 19:30:54 +04:00
|
|
|
size_t size;
|
2005-07-11 02:25:38 +04:00
|
|
|
z_stream stream;
|
2006-05-24 19:30:54 +04:00
|
|
|
unsigned char *unpacked;
|
|
|
|
unsigned long len;
|
2007-02-26 22:55:59 +03:00
|
|
|
enum object_type type;
|
2007-02-26 22:55:55 +03:00
|
|
|
char hdr[32];
|
2006-05-24 19:30:54 +04:00
|
|
|
int hdrlen;
|
|
|
|
void *buf;
|
2005-08-08 22:45:36 +04:00
|
|
|
|
2006-07-10 10:57:51 +04:00
|
|
|
/* need to unpack and recompress it by itself */
|
2007-02-26 22:55:59 +03:00
|
|
|
unpacked = read_packed_sha1(sha1, &type, &len);
|
2007-01-22 23:29:45 +03:00
|
|
|
if (!unpacked)
|
|
|
|
error("cannot read sha1_file for %s", sha1_to_hex(sha1));
|
2005-07-11 02:25:38 +04:00
|
|
|
|
2007-02-26 22:55:59 +03:00
|
|
|
hdrlen = sprintf(hdr, "%s %lu", typename(type), len) + 1;
|
2005-07-11 02:25:38 +04:00
|
|
|
|
2006-05-24 19:30:54 +04:00
|
|
|
/* Set it up */
|
|
|
|
memset(&stream, 0, sizeof(stream));
|
2006-07-04 00:11:47 +04:00
|
|
|
deflateInit(&stream, zlib_compression_level);
|
2006-05-24 19:30:54 +04:00
|
|
|
size = deflateBound(&stream, len + hdrlen);
|
|
|
|
buf = xmalloc(size);
|
2005-07-11 02:25:38 +04:00
|
|
|
|
2006-05-24 19:30:54 +04:00
|
|
|
/* Compress it */
|
|
|
|
stream.next_out = buf;
|
|
|
|
stream.avail_out = size;
|
2005-07-11 02:25:38 +04:00
|
|
|
|
2006-05-24 19:30:54 +04:00
|
|
|
/* First header.. */
|
|
|
|
stream.next_in = (void *)hdr;
|
|
|
|
stream.avail_in = hdrlen;
|
|
|
|
while (deflate(&stream, 0) == Z_OK)
|
|
|
|
/* nothing */;
|
2005-08-08 22:45:36 +04:00
|
|
|
|
2006-05-24 19:30:54 +04:00
|
|
|
/* Then the data itself.. */
|
|
|
|
stream.next_in = unpacked;
|
|
|
|
stream.avail_in = len;
|
|
|
|
while (deflate(&stream, Z_FINISH) == Z_OK)
|
|
|
|
/* nothing */;
|
|
|
|
deflateEnd(&stream);
|
|
|
|
free(unpacked);
|
2005-08-08 22:45:36 +04:00
|
|
|
|
2006-05-24 19:30:54 +04:00
|
|
|
*objsize = stream.total_out;
|
|
|
|
return buf;
|
|
|
|
}
|
|
|
|
|
|
|
|
int write_sha1_to_fd(int fd, const unsigned char *sha1)
|
|
|
|
{
|
|
|
|
int retval;
|
|
|
|
unsigned long objsize;
|
2006-07-18 02:04:47 +04:00
|
|
|
void *buf = map_sha1_file(sha1, &objsize);
|
2006-05-24 19:30:54 +04:00
|
|
|
|
|
|
|
if (buf) {
|
|
|
|
retval = write_buffer(fd, buf, objsize);
|
|
|
|
munmap(buf, objsize);
|
|
|
|
return retval;
|
|
|
|
}
|
|
|
|
|
|
|
|
buf = repack_object(sha1, &objsize);
|
|
|
|
retval = write_buffer(fd, buf, objsize);
|
|
|
|
free(buf);
|
|
|
|
return retval;
|
2005-07-11 02:25:38 +04:00
|
|
|
}
|
|
|
|
|
2005-08-03 03:46:29 +04:00
|
|
|
int write_sha1_from_fd(const unsigned char *sha1, int fd, char *buffer,
|
|
|
|
size_t bufsize, size_t *bufposn)
|
2005-04-24 05:47:23 +04:00
|
|
|
{
|
Create object subdirectories on demand
This makes it possible to have a "sparse" git object subdirectory
structure, something that has become much more attractive now that people
use pack-files all the time.
As a result of pack-files, a git object directory doesn't necessarily have
any individual objects lying around, and in that case it's just wasting
space to keep the empty first-level object directories around: on many
filesystems the 256 empty directories will be aboue 1MB of diskspace.
Even more importantly, after you re-pack a project that _used_ to be
unpacked, you could be left with huge directories that no longer contain
anything, but that waste space and take time to look through.
With this change, "git prune-packed" can just do an rmdir() on the
directories, and they'll get removed if empty, and re-created on demand.
This patch also tries to fix up "write_sha1_from_fd()" to use the new
common infrastructure for creating the object files, closing a hole where
we might otherwise leave half-written objects in the object database.
[jc: I unoptimized the part that really removes the fan-out directories
to ease transition. init-db still wastes 1MB of diskspace to hold 256
empty fan-outs, and prune-packed rmdir()'s the grown but empty directories,
but runs mkdir() immediately after that -- reducing the saving from 150KB
to 146KB. These parts will be re-introduced when everybody has the
on-demand capability.]
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Signed-off-by: Junio C Hamano <junkio@cox.net>
2005-10-09 02:54:01 +04:00
|
|
|
char tmpfile[PATH_MAX];
|
2005-04-24 05:47:23 +04:00
|
|
|
int local;
|
|
|
|
z_stream stream;
|
|
|
|
unsigned char real_sha1[20];
|
2005-05-18 16:14:09 +04:00
|
|
|
unsigned char discard[4096];
|
2005-04-24 05:47:23 +04:00
|
|
|
int ret;
|
|
|
|
SHA_CTX c;
|
|
|
|
|
Create object subdirectories on demand
This makes it possible to have a "sparse" git object subdirectory
structure, something that has become much more attractive now that people
use pack-files all the time.
As a result of pack-files, a git object directory doesn't necessarily have
any individual objects lying around, and in that case it's just wasting
space to keep the empty first-level object directories around: on many
filesystems the 256 empty directories will be aboue 1MB of diskspace.
Even more importantly, after you re-pack a project that _used_ to be
unpacked, you could be left with huge directories that no longer contain
anything, but that waste space and take time to look through.
With this change, "git prune-packed" can just do an rmdir() on the
directories, and they'll get removed if empty, and re-created on demand.
This patch also tries to fix up "write_sha1_from_fd()" to use the new
common infrastructure for creating the object files, closing a hole where
we might otherwise leave half-written objects in the object database.
[jc: I unoptimized the part that really removes the fan-out directories
to ease transition. init-db still wastes 1MB of diskspace to hold 256
empty fan-outs, and prune-packed rmdir()'s the grown but empty directories,
but runs mkdir() immediately after that -- reducing the saving from 150KB
to 146KB. These parts will be re-introduced when everybody has the
on-demand capability.]
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Signed-off-by: Junio C Hamano <junkio@cox.net>
2005-10-09 02:54:01 +04:00
|
|
|
snprintf(tmpfile, sizeof(tmpfile), "%s/obj_XXXXXX", get_object_directory());
|
2005-04-24 05:47:23 +04:00
|
|
|
|
Create object subdirectories on demand
This makes it possible to have a "sparse" git object subdirectory
structure, something that has become much more attractive now that people
use pack-files all the time.
As a result of pack-files, a git object directory doesn't necessarily have
any individual objects lying around, and in that case it's just wasting
space to keep the empty first-level object directories around: on many
filesystems the 256 empty directories will be aboue 1MB of diskspace.
Even more importantly, after you re-pack a project that _used_ to be
unpacked, you could be left with huge directories that no longer contain
anything, but that waste space and take time to look through.
With this change, "git prune-packed" can just do an rmdir() on the
directories, and they'll get removed if empty, and re-created on demand.
This patch also tries to fix up "write_sha1_from_fd()" to use the new
common infrastructure for creating the object files, closing a hole where
we might otherwise leave half-written objects in the object database.
[jc: I unoptimized the part that really removes the fan-out directories
to ease transition. init-db still wastes 1MB of diskspace to hold 256
empty fan-outs, and prune-packed rmdir()'s the grown but empty directories,
but runs mkdir() immediately after that -- reducing the saving from 150KB
to 146KB. These parts will be re-introduced when everybody has the
on-demand capability.]
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Signed-off-by: Junio C Hamano <junkio@cox.net>
2005-10-09 02:54:01 +04:00
|
|
|
local = mkstemp(tmpfile);
|
2006-11-09 15:52:05 +03:00
|
|
|
if (local < 0) {
|
|
|
|
if (errno == EPERM)
|
|
|
|
return error("insufficient permission for adding an object to repository database %s\n", get_object_directory());
|
|
|
|
else
|
|
|
|
return error("unable to create temporary sha1 filename %s: %s\n", tmpfile, strerror(errno));
|
|
|
|
}
|
2005-04-24 05:47:23 +04:00
|
|
|
|
|
|
|
memset(&stream, 0, sizeof(stream));
|
|
|
|
|
|
|
|
inflateInit(&stream);
|
|
|
|
|
|
|
|
SHA1_Init(&c);
|
|
|
|
|
|
|
|
do {
|
|
|
|
ssize_t size;
|
2005-08-03 03:46:29 +04:00
|
|
|
if (*bufposn) {
|
|
|
|
stream.avail_in = *bufposn;
|
2005-08-10 00:54:40 +04:00
|
|
|
stream.next_in = (unsigned char *) buffer;
|
2005-08-03 03:46:29 +04:00
|
|
|
do {
|
|
|
|
stream.next_out = discard;
|
|
|
|
stream.avail_out = sizeof(discard);
|
|
|
|
ret = inflate(&stream, Z_SYNC_FLUSH);
|
|
|
|
SHA1_Update(&c, discard, sizeof(discard) -
|
|
|
|
stream.avail_out);
|
|
|
|
} while (stream.avail_in && ret == Z_OK);
|
2006-05-24 19:30:54 +04:00
|
|
|
if (write_buffer(local, buffer, *bufposn - stream.avail_in) < 0)
|
|
|
|
die("unable to write sha1 file");
|
2005-08-03 03:46:29 +04:00
|
|
|
memmove(buffer, buffer + *bufposn - stream.avail_in,
|
|
|
|
stream.avail_in);
|
|
|
|
*bufposn = stream.avail_in;
|
|
|
|
if (ret != Z_OK)
|
|
|
|
break;
|
|
|
|
}
|
2007-01-08 18:58:08 +03:00
|
|
|
size = xread(fd, buffer + *bufposn, bufsize - *bufposn);
|
2005-04-24 05:47:23 +04:00
|
|
|
if (size <= 0) {
|
|
|
|
close(local);
|
Create object subdirectories on demand
This makes it possible to have a "sparse" git object subdirectory
structure, something that has become much more attractive now that people
use pack-files all the time.
As a result of pack-files, a git object directory doesn't necessarily have
any individual objects lying around, and in that case it's just wasting
space to keep the empty first-level object directories around: on many
filesystems the 256 empty directories will be aboue 1MB of diskspace.
Even more importantly, after you re-pack a project that _used_ to be
unpacked, you could be left with huge directories that no longer contain
anything, but that waste space and take time to look through.
With this change, "git prune-packed" can just do an rmdir() on the
directories, and they'll get removed if empty, and re-created on demand.
This patch also tries to fix up "write_sha1_from_fd()" to use the new
common infrastructure for creating the object files, closing a hole where
we might otherwise leave half-written objects in the object database.
[jc: I unoptimized the part that really removes the fan-out directories
to ease transition. init-db still wastes 1MB of diskspace to hold 256
empty fan-outs, and prune-packed rmdir()'s the grown but empty directories,
but runs mkdir() immediately after that -- reducing the saving from 150KB
to 146KB. These parts will be re-introduced when everybody has the
on-demand capability.]
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Signed-off-by: Junio C Hamano <junkio@cox.net>
2005-10-09 02:54:01 +04:00
|
|
|
unlink(tmpfile);
|
2005-04-24 05:47:23 +04:00
|
|
|
if (!size)
|
|
|
|
return error("Connection closed?");
|
|
|
|
perror("Reading from connection");
|
|
|
|
return -1;
|
|
|
|
}
|
2005-08-03 03:46:29 +04:00
|
|
|
*bufposn += size;
|
|
|
|
} while (1);
|
2005-04-24 05:47:23 +04:00
|
|
|
inflateEnd(&stream);
|
|
|
|
|
|
|
|
close(local);
|
|
|
|
SHA1_Final(real_sha1, &c);
|
|
|
|
if (ret != Z_STREAM_END) {
|
Create object subdirectories on demand
This makes it possible to have a "sparse" git object subdirectory
structure, something that has become much more attractive now that people
use pack-files all the time.
As a result of pack-files, a git object directory doesn't necessarily have
any individual objects lying around, and in that case it's just wasting
space to keep the empty first-level object directories around: on many
filesystems the 256 empty directories will be aboue 1MB of diskspace.
Even more importantly, after you re-pack a project that _used_ to be
unpacked, you could be left with huge directories that no longer contain
anything, but that waste space and take time to look through.
With this change, "git prune-packed" can just do an rmdir() on the
directories, and they'll get removed if empty, and re-created on demand.
This patch also tries to fix up "write_sha1_from_fd()" to use the new
common infrastructure for creating the object files, closing a hole where
we might otherwise leave half-written objects in the object database.
[jc: I unoptimized the part that really removes the fan-out directories
to ease transition. init-db still wastes 1MB of diskspace to hold 256
empty fan-outs, and prune-packed rmdir()'s the grown but empty directories,
but runs mkdir() immediately after that -- reducing the saving from 150KB
to 146KB. These parts will be re-introduced when everybody has the
on-demand capability.]
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Signed-off-by: Junio C Hamano <junkio@cox.net>
2005-10-09 02:54:01 +04:00
|
|
|
unlink(tmpfile);
|
2005-04-24 05:47:23 +04:00
|
|
|
return error("File %s corrupted", sha1_to_hex(sha1));
|
|
|
|
}
|
2006-08-17 22:54:57 +04:00
|
|
|
if (hashcmp(sha1, real_sha1)) {
|
Create object subdirectories on demand
This makes it possible to have a "sparse" git object subdirectory
structure, something that has become much more attractive now that people
use pack-files all the time.
As a result of pack-files, a git object directory doesn't necessarily have
any individual objects lying around, and in that case it's just wasting
space to keep the empty first-level object directories around: on many
filesystems the 256 empty directories will be aboue 1MB of diskspace.
Even more importantly, after you re-pack a project that _used_ to be
unpacked, you could be left with huge directories that no longer contain
anything, but that waste space and take time to look through.
With this change, "git prune-packed" can just do an rmdir() on the
directories, and they'll get removed if empty, and re-created on demand.
This patch also tries to fix up "write_sha1_from_fd()" to use the new
common infrastructure for creating the object files, closing a hole where
we might otherwise leave half-written objects in the object database.
[jc: I unoptimized the part that really removes the fan-out directories
to ease transition. init-db still wastes 1MB of diskspace to hold 256
empty fan-outs, and prune-packed rmdir()'s the grown but empty directories,
but runs mkdir() immediately after that -- reducing the saving from 150KB
to 146KB. These parts will be re-introduced when everybody has the
on-demand capability.]
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Signed-off-by: Junio C Hamano <junkio@cox.net>
2005-10-09 02:54:01 +04:00
|
|
|
unlink(tmpfile);
|
2006-02-23 04:47:10 +03:00
|
|
|
return error("File %s has bad hash", sha1_to_hex(sha1));
|
2005-04-24 05:47:23 +04:00
|
|
|
}
|
Create object subdirectories on demand
This makes it possible to have a "sparse" git object subdirectory
structure, something that has become much more attractive now that people
use pack-files all the time.
As a result of pack-files, a git object directory doesn't necessarily have
any individual objects lying around, and in that case it's just wasting
space to keep the empty first-level object directories around: on many
filesystems the 256 empty directories will be aboue 1MB of diskspace.
Even more importantly, after you re-pack a project that _used_ to be
unpacked, you could be left with huge directories that no longer contain
anything, but that waste space and take time to look through.
With this change, "git prune-packed" can just do an rmdir() on the
directories, and they'll get removed if empty, and re-created on demand.
This patch also tries to fix up "write_sha1_from_fd()" to use the new
common infrastructure for creating the object files, closing a hole where
we might otherwise leave half-written objects in the object database.
[jc: I unoptimized the part that really removes the fan-out directories
to ease transition. init-db still wastes 1MB of diskspace to hold 256
empty fan-outs, and prune-packed rmdir()'s the grown but empty directories,
but runs mkdir() immediately after that -- reducing the saving from 150KB
to 146KB. These parts will be re-introduced when everybody has the
on-demand capability.]
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Signed-off-by: Junio C Hamano <junkio@cox.net>
2005-10-09 02:54:01 +04:00
|
|
|
|
|
|
|
return move_temp_to_file(tmpfile, sha1_file_name(sha1));
|
2005-04-24 05:47:23 +04:00
|
|
|
}
|
|
|
|
|
2005-08-01 04:53:44 +04:00
|
|
|
int has_pack_index(const unsigned char *sha1)
|
|
|
|
{
|
|
|
|
struct stat st;
|
|
|
|
if (stat(sha1_pack_index_name(sha1), &st))
|
|
|
|
return 0;
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
int has_pack_file(const unsigned char *sha1)
|
|
|
|
{
|
|
|
|
struct stat st;
|
|
|
|
if (stat(sha1_pack_name(sha1), &st))
|
|
|
|
return 0;
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2006-09-06 13:12:09 +04:00
|
|
|
int has_sha1_pack(const unsigned char *sha1, const char **ignore_packed)
|
2005-07-04 00:06:36 +04:00
|
|
|
{
|
|
|
|
struct pack_entry e;
|
2006-09-06 13:12:09 +04:00
|
|
|
return find_pack_entry(sha1, &e, ignore_packed);
|
2005-07-04 00:06:36 +04:00
|
|
|
}
|
|
|
|
|
2005-04-24 05:47:23 +04:00
|
|
|
int has_sha1_file(const unsigned char *sha1)
|
|
|
|
{
|
|
|
|
struct stat st;
|
2005-06-27 14:35:33 +04:00
|
|
|
struct pack_entry e;
|
|
|
|
|
2006-09-06 13:12:09 +04:00
|
|
|
if (find_pack_entry(sha1, &e, NULL))
|
2005-06-27 14:35:33 +04:00
|
|
|
return 1;
|
2005-07-11 11:00:55 +04:00
|
|
|
return find_sha1_file(sha1, &st) ? 1 : 0;
|
2005-04-24 05:47:23 +04:00
|
|
|
}
|
2005-05-02 10:45:49 +04:00
|
|
|
|
2006-05-23 22:19:04 +04:00
|
|
|
/*
|
|
|
|
* reads from fd as long as possible into a supplied buffer of size bytes.
|
2006-07-10 09:50:18 +04:00
|
|
|
* If necessary the buffer's size is increased using realloc()
|
2006-05-23 22:19:04 +04:00
|
|
|
*
|
|
|
|
* returns 0 if anything went fine and -1 otherwise
|
|
|
|
*
|
|
|
|
* NOTE: both buf and size may change, but even when -1 is returned
|
|
|
|
* you still have to free() it yourself.
|
|
|
|
*/
|
|
|
|
int read_pipe(int fd, char** return_buf, unsigned long* return_size)
|
2005-12-11 01:25:24 +03:00
|
|
|
{
|
2006-05-23 22:19:04 +04:00
|
|
|
char* buf = *return_buf;
|
|
|
|
unsigned long size = *return_size;
|
|
|
|
int iret;
|
2005-12-11 01:25:24 +03:00
|
|
|
unsigned long off = 0;
|
2006-05-23 22:19:04 +04:00
|
|
|
|
2005-12-11 01:25:24 +03:00
|
|
|
do {
|
2006-05-23 22:19:04 +04:00
|
|
|
iret = xread(fd, buf + off, size - off);
|
2005-12-11 01:25:24 +03:00
|
|
|
if (iret > 0) {
|
|
|
|
off += iret;
|
|
|
|
if (off == size) {
|
|
|
|
size *= 2;
|
2006-08-26 18:16:18 +04:00
|
|
|
buf = xrealloc(buf, size);
|
2005-12-11 01:25:24 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
} while (iret > 0);
|
2006-05-23 22:19:04 +04:00
|
|
|
|
|
|
|
*return_buf = buf;
|
|
|
|
*return_size = off;
|
|
|
|
|
|
|
|
if (iret < 0)
|
|
|
|
return -1;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int index_pipe(unsigned char *sha1, int fd, const char *type, int write_object)
|
|
|
|
{
|
|
|
|
unsigned long size = 4096;
|
2006-09-01 02:32:39 +04:00
|
|
|
char *buf = xmalloc(size);
|
2006-05-23 22:19:04 +04:00
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (read_pipe(fd, &buf, &size)) {
|
2005-12-11 01:25:24 +03:00
|
|
|
free(buf);
|
|
|
|
return -1;
|
|
|
|
}
|
2006-05-23 22:19:04 +04:00
|
|
|
|
2005-12-11 01:25:24 +03:00
|
|
|
if (!type)
|
2006-04-02 16:44:09 +04:00
|
|
|
type = blob_type;
|
2005-12-11 01:25:24 +03:00
|
|
|
if (write_object)
|
2006-05-23 22:19:04 +04:00
|
|
|
ret = write_sha1_file(buf, size, type, sha1);
|
2006-10-14 14:45:36 +04:00
|
|
|
else
|
|
|
|
ret = hash_sha1_file(buf, size, type, sha1);
|
2005-12-11 01:25:24 +03:00
|
|
|
free(buf);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2007-02-28 22:45:56 +03:00
|
|
|
int index_fd(unsigned char *sha1, int fd, struct stat *st, int write_object,
|
2007-02-28 22:52:04 +03:00
|
|
|
enum object_type type, const char *path)
|
2005-05-02 10:45:49 +04:00
|
|
|
{
|
2007-03-07 04:44:37 +03:00
|
|
|
size_t size = xsize_t(st->st_size);
|
2007-03-07 04:44:17 +03:00
|
|
|
void *buf = NULL;
|
Lazy man's auto-CRLF
It currently does NOT know about file attributes, so it does its
conversion purely based on content. Maybe that is more in the "git
philosophy" anyway, since content is king, but I think we should try to do
the file attributes to turn it off on demand.
Anyway, BY DEFAULT it is off regardless, because it requires a
[core]
AutoCRLF = true
in your config file to be enabled. We could make that the default for
Windows, of course, the same way we do some other things (filemode etc).
But you can actually enable it on UNIX, and it will cause:
- "git update-index" will write blobs without CRLF
- "git diff" will diff working tree files without CRLF
- "git checkout" will write files to the working tree _with_ CRLF
and things work fine.
Funnily, it actually shows an odd file in git itself:
git clone -n git test-crlf
cd test-crlf
git config core.autocrlf true
git checkout
git diff
shows a diff for "Documentation/docbook-xsl.css". Why? Because we have
actually checked in that file *with* CRLF! So when "core.autocrlf" is
true, we'll always generate a *different* hash for it in the index,
because the index hash will be for the content _without_ CRLF.
Is this complete? I dunno. It seems to work for me. It doesn't use the
filename at all right now, and that's probably a deficiency (we could
certainly make the "is_binary()" heuristics also take standard filename
heuristics into account).
I don't pass in the filename at all for the "index_fd()" case
(git-update-index), so that would need to be passed around, but this
actually works fine.
NOTE NOTE NOTE! The "is_binary()" heuristics are totally made-up by yours
truly. I will not guarantee that they work at all reasonable. Caveat
emptor. But it _is_ simple, and it _is_ safe, since it's all off by
default.
The patch is pretty simple - the biggest part is the new "convert.c" file,
but even that is really just basic stuff that anybody can write in
"Teaching C 101" as a final project for their first class in programming.
Not to say that it's bug-free, of course - but at least we're not talking
about rocket surgery here.
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Junio C Hamano <junkio@cox.net>
2007-02-13 22:07:23 +03:00
|
|
|
int ret, re_allocated = 0;
|
2005-05-02 10:45:49 +04:00
|
|
|
|
|
|
|
if (size)
|
2006-12-24 08:47:23 +03:00
|
|
|
buf = xmmap(NULL, size, PROT_READ, MAP_PRIVATE, fd, 0);
|
2005-05-02 10:45:49 +04:00
|
|
|
close(fd);
|
|
|
|
|
2005-07-09 03:51:55 +04:00
|
|
|
if (!type)
|
2007-02-28 22:45:56 +03:00
|
|
|
type = OBJ_BLOB;
|
Lazy man's auto-CRLF
It currently does NOT know about file attributes, so it does its
conversion purely based on content. Maybe that is more in the "git
philosophy" anyway, since content is king, but I think we should try to do
the file attributes to turn it off on demand.
Anyway, BY DEFAULT it is off regardless, because it requires a
[core]
AutoCRLF = true
in your config file to be enabled. We could make that the default for
Windows, of course, the same way we do some other things (filemode etc).
But you can actually enable it on UNIX, and it will cause:
- "git update-index" will write blobs without CRLF
- "git diff" will diff working tree files without CRLF
- "git checkout" will write files to the working tree _with_ CRLF
and things work fine.
Funnily, it actually shows an odd file in git itself:
git clone -n git test-crlf
cd test-crlf
git config core.autocrlf true
git checkout
git diff
shows a diff for "Documentation/docbook-xsl.css". Why? Because we have
actually checked in that file *with* CRLF! So when "core.autocrlf" is
true, we'll always generate a *different* hash for it in the index,
because the index hash will be for the content _without_ CRLF.
Is this complete? I dunno. It seems to work for me. It doesn't use the
filename at all right now, and that's probably a deficiency (we could
certainly make the "is_binary()" heuristics also take standard filename
heuristics into account).
I don't pass in the filename at all for the "index_fd()" case
(git-update-index), so that would need to be passed around, but this
actually works fine.
NOTE NOTE NOTE! The "is_binary()" heuristics are totally made-up by yours
truly. I will not guarantee that they work at all reasonable. Caveat
emptor. But it _is_ simple, and it _is_ safe, since it's all off by
default.
The patch is pretty simple - the biggest part is the new "convert.c" file,
but even that is really just basic stuff that anybody can write in
"Teaching C 101" as a final project for their first class in programming.
Not to say that it's bug-free, of course - but at least we're not talking
about rocket surgery here.
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Junio C Hamano <junkio@cox.net>
2007-02-13 22:07:23 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Convert blobs to git internal format
|
|
|
|
*/
|
2007-02-28 22:57:39 +03:00
|
|
|
if ((type == OBJ_BLOB) && S_ISREG(st->st_mode)) {
|
Lazy man's auto-CRLF
It currently does NOT know about file attributes, so it does its
conversion purely based on content. Maybe that is more in the "git
philosophy" anyway, since content is king, but I think we should try to do
the file attributes to turn it off on demand.
Anyway, BY DEFAULT it is off regardless, because it requires a
[core]
AutoCRLF = true
in your config file to be enabled. We could make that the default for
Windows, of course, the same way we do some other things (filemode etc).
But you can actually enable it on UNIX, and it will cause:
- "git update-index" will write blobs without CRLF
- "git diff" will diff working tree files without CRLF
- "git checkout" will write files to the working tree _with_ CRLF
and things work fine.
Funnily, it actually shows an odd file in git itself:
git clone -n git test-crlf
cd test-crlf
git config core.autocrlf true
git checkout
git diff
shows a diff for "Documentation/docbook-xsl.css". Why? Because we have
actually checked in that file *with* CRLF! So when "core.autocrlf" is
true, we'll always generate a *different* hash for it in the index,
because the index hash will be for the content _without_ CRLF.
Is this complete? I dunno. It seems to work for me. It doesn't use the
filename at all right now, and that's probably a deficiency (we could
certainly make the "is_binary()" heuristics also take standard filename
heuristics into account).
I don't pass in the filename at all for the "index_fd()" case
(git-update-index), so that would need to be passed around, but this
actually works fine.
NOTE NOTE NOTE! The "is_binary()" heuristics are totally made-up by yours
truly. I will not guarantee that they work at all reasonable. Caveat
emptor. But it _is_ simple, and it _is_ safe, since it's all off by
default.
The patch is pretty simple - the biggest part is the new "convert.c" file,
but even that is really just basic stuff that anybody can write in
"Teaching C 101" as a final project for their first class in programming.
Not to say that it's bug-free, of course - but at least we're not talking
about rocket surgery here.
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Junio C Hamano <junkio@cox.net>
2007-02-13 22:07:23 +03:00
|
|
|
unsigned long nsize = size;
|
|
|
|
char *nbuf = buf;
|
2007-02-28 22:52:04 +03:00
|
|
|
if (convert_to_git(path, &nbuf, &nsize)) {
|
Lazy man's auto-CRLF
It currently does NOT know about file attributes, so it does its
conversion purely based on content. Maybe that is more in the "git
philosophy" anyway, since content is king, but I think we should try to do
the file attributes to turn it off on demand.
Anyway, BY DEFAULT it is off regardless, because it requires a
[core]
AutoCRLF = true
in your config file to be enabled. We could make that the default for
Windows, of course, the same way we do some other things (filemode etc).
But you can actually enable it on UNIX, and it will cause:
- "git update-index" will write blobs without CRLF
- "git diff" will diff working tree files without CRLF
- "git checkout" will write files to the working tree _with_ CRLF
and things work fine.
Funnily, it actually shows an odd file in git itself:
git clone -n git test-crlf
cd test-crlf
git config core.autocrlf true
git checkout
git diff
shows a diff for "Documentation/docbook-xsl.css". Why? Because we have
actually checked in that file *with* CRLF! So when "core.autocrlf" is
true, we'll always generate a *different* hash for it in the index,
because the index hash will be for the content _without_ CRLF.
Is this complete? I dunno. It seems to work for me. It doesn't use the
filename at all right now, and that's probably a deficiency (we could
certainly make the "is_binary()" heuristics also take standard filename
heuristics into account).
I don't pass in the filename at all for the "index_fd()" case
(git-update-index), so that would need to be passed around, but this
actually works fine.
NOTE NOTE NOTE! The "is_binary()" heuristics are totally made-up by yours
truly. I will not guarantee that they work at all reasonable. Caveat
emptor. But it _is_ simple, and it _is_ safe, since it's all off by
default.
The patch is pretty simple - the biggest part is the new "convert.c" file,
but even that is really just basic stuff that anybody can write in
"Teaching C 101" as a final project for their first class in programming.
Not to say that it's bug-free, of course - but at least we're not talking
about rocket surgery here.
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Junio C Hamano <junkio@cox.net>
2007-02-13 22:07:23 +03:00
|
|
|
if (size)
|
|
|
|
munmap(buf, size);
|
|
|
|
size = nsize;
|
|
|
|
buf = nbuf;
|
|
|
|
re_allocated = 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2005-07-09 03:51:55 +04:00
|
|
|
if (write_object)
|
2007-02-28 22:45:56 +03:00
|
|
|
ret = write_sha1_file(buf, size, typename(type), sha1);
|
2006-10-14 14:45:36 +04:00
|
|
|
else
|
2007-02-28 22:45:56 +03:00
|
|
|
ret = hash_sha1_file(buf, size, typename(type), sha1);
|
Lazy man's auto-CRLF
It currently does NOT know about file attributes, so it does its
conversion purely based on content. Maybe that is more in the "git
philosophy" anyway, since content is king, but I think we should try to do
the file attributes to turn it off on demand.
Anyway, BY DEFAULT it is off regardless, because it requires a
[core]
AutoCRLF = true
in your config file to be enabled. We could make that the default for
Windows, of course, the same way we do some other things (filemode etc).
But you can actually enable it on UNIX, and it will cause:
- "git update-index" will write blobs without CRLF
- "git diff" will diff working tree files without CRLF
- "git checkout" will write files to the working tree _with_ CRLF
and things work fine.
Funnily, it actually shows an odd file in git itself:
git clone -n git test-crlf
cd test-crlf
git config core.autocrlf true
git checkout
git diff
shows a diff for "Documentation/docbook-xsl.css". Why? Because we have
actually checked in that file *with* CRLF! So when "core.autocrlf" is
true, we'll always generate a *different* hash for it in the index,
because the index hash will be for the content _without_ CRLF.
Is this complete? I dunno. It seems to work for me. It doesn't use the
filename at all right now, and that's probably a deficiency (we could
certainly make the "is_binary()" heuristics also take standard filename
heuristics into account).
I don't pass in the filename at all for the "index_fd()" case
(git-update-index), so that would need to be passed around, but this
actually works fine.
NOTE NOTE NOTE! The "is_binary()" heuristics are totally made-up by yours
truly. I will not guarantee that they work at all reasonable. Caveat
emptor. But it _is_ simple, and it _is_ safe, since it's all off by
default.
The patch is pretty simple - the biggest part is the new "convert.c" file,
but even that is really just basic stuff that anybody can write in
"Teaching C 101" as a final project for their first class in programming.
Not to say that it's bug-free, of course - but at least we're not talking
about rocket surgery here.
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Junio C Hamano <junkio@cox.net>
2007-02-13 22:07:23 +03:00
|
|
|
if (re_allocated) {
|
|
|
|
free(buf);
|
|
|
|
return ret;
|
|
|
|
}
|
2005-05-03 22:46:16 +04:00
|
|
|
if (size)
|
|
|
|
munmap(buf, size);
|
|
|
|
return ret;
|
2005-05-02 10:45:49 +04:00
|
|
|
}
|
2005-10-07 14:42:00 +04:00
|
|
|
|
|
|
|
int index_path(unsigned char *sha1, const char *path, struct stat *st, int write_object)
|
|
|
|
{
|
|
|
|
int fd;
|
|
|
|
char *target;
|
2007-03-07 04:44:37 +03:00
|
|
|
size_t len;
|
2005-10-07 14:42:00 +04:00
|
|
|
|
|
|
|
switch (st->st_mode & S_IFMT) {
|
|
|
|
case S_IFREG:
|
|
|
|
fd = open(path, O_RDONLY);
|
|
|
|
if (fd < 0)
|
|
|
|
return error("open(\"%s\"): %s", path,
|
|
|
|
strerror(errno));
|
2007-02-28 22:52:04 +03:00
|
|
|
if (index_fd(sha1, fd, st, write_object, OBJ_BLOB, path) < 0)
|
2005-10-07 14:42:00 +04:00
|
|
|
return error("%s: failed to insert into database",
|
|
|
|
path);
|
|
|
|
break;
|
|
|
|
case S_IFLNK:
|
2007-03-07 04:44:37 +03:00
|
|
|
len = xsize_t(st->st_size);
|
|
|
|
target = xmalloc(len + 1);
|
|
|
|
if (readlink(path, target, len + 1) != st->st_size) {
|
2005-10-07 14:42:00 +04:00
|
|
|
char *errstr = strerror(errno);
|
|
|
|
free(target);
|
|
|
|
return error("readlink(\"%s\"): %s", path,
|
|
|
|
errstr);
|
|
|
|
}
|
2006-10-14 14:45:36 +04:00
|
|
|
if (!write_object)
|
2007-03-07 04:44:37 +03:00
|
|
|
hash_sha1_file(target, len, blob_type, sha1);
|
|
|
|
else if (write_sha1_file(target, len, blob_type, sha1))
|
2005-10-07 14:42:00 +04:00
|
|
|
return error("%s: failed to insert into database",
|
|
|
|
path);
|
|
|
|
free(target);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return error("%s: unsupported file type", path);
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
2007-01-23 08:55:18 +03:00
|
|
|
|
|
|
|
int read_pack_header(int fd, struct pack_header *header)
|
|
|
|
{
|
|
|
|
char *c = (char*)header;
|
|
|
|
ssize_t remaining = sizeof(struct pack_header);
|
|
|
|
do {
|
|
|
|
ssize_t r = xread(fd, c, remaining);
|
|
|
|
if (r <= 0)
|
|
|
|
/* "eof before pack header was fully read" */
|
|
|
|
return PH_ERROR_EOF;
|
|
|
|
remaining -= r;
|
|
|
|
c += r;
|
|
|
|
} while (remaining > 0);
|
|
|
|
if (header->hdr_signature != htonl(PACK_SIGNATURE))
|
|
|
|
/* "protocol error (pack signature mismatch detected)" */
|
|
|
|
return PH_ERROR_PACK_SIGNATURE;
|
|
|
|
if (!pack_version_ok(header->hdr_version))
|
|
|
|
/* "protocol error (pack version unsupported)" */
|
|
|
|
return PH_ERROR_PROTOCOL;
|
|
|
|
return 0;
|
|
|
|
}
|