2023-04-11 10:41:56 +03:00
|
|
|
#include "git-compat-util.h"
|
2023-03-21 09:26:03 +03:00
|
|
|
#include "environment.h"
|
2006-01-07 12:33:54 +03:00
|
|
|
#include "tag.h"
|
2023-04-11 10:41:49 +03:00
|
|
|
#include "object-name.h"
|
2023-05-16 09:34:06 +03:00
|
|
|
#include "object-store-ll.h"
|
2007-02-26 22:56:00 +03:00
|
|
|
#include "commit.h"
|
|
|
|
#include "tree.h"
|
|
|
|
#include "blob.h"
|
2018-05-16 00:48:42 +03:00
|
|
|
#include "alloc.h"
|
2017-01-18 02:37:18 +03:00
|
|
|
#include "gpg-interface.h"
|
2023-02-24 03:09:27 +03:00
|
|
|
#include "hex.h"
|
2018-07-13 03:03:07 +03:00
|
|
|
#include "packfile.h"
|
2005-04-28 18:46:33 +04:00
|
|
|
|
|
|
|
const char *tag_type = "tag";
|
|
|
|
|
2016-04-22 17:52:04 +03:00
|
|
|
static int run_gpg_verify(const char *buf, unsigned long size, unsigned flags)
|
|
|
|
{
|
|
|
|
struct signature_check sigc;
|
2021-02-11 05:08:03 +03:00
|
|
|
struct strbuf payload = STRBUF_INIT;
|
|
|
|
struct strbuf signature = STRBUF_INIT;
|
2016-04-22 17:52:04 +03:00
|
|
|
int ret;
|
|
|
|
|
|
|
|
memset(&sigc, 0, sizeof(sigc));
|
|
|
|
|
2021-02-11 05:08:03 +03:00
|
|
|
if (!parse_signature(buf, size, &payload, &signature)) {
|
2016-04-22 17:52:04 +03:00
|
|
|
if (flags & GPG_VERIFY_VERBOSE)
|
2021-02-11 05:08:03 +03:00
|
|
|
write_in_full(1, buf, size);
|
2016-04-22 17:52:04 +03:00
|
|
|
return error("no signature found");
|
|
|
|
}
|
|
|
|
|
2021-12-09 11:52:47 +03:00
|
|
|
sigc.payload_type = SIGNATURE_PAYLOAD_TAG;
|
2021-12-09 11:52:43 +03:00
|
|
|
sigc.payload = strbuf_detach(&payload, &sigc.payload_len);
|
|
|
|
ret = check_signature(&sigc, signature.buf, signature.len);
|
2017-01-18 02:37:18 +03:00
|
|
|
|
|
|
|
if (!(flags & GPG_VERIFY_OMIT_STATUS))
|
|
|
|
print_signature_buffer(&sigc, flags);
|
2016-04-22 17:52:04 +03:00
|
|
|
|
|
|
|
signature_check_clear(&sigc);
|
2021-02-11 05:08:03 +03:00
|
|
|
strbuf_release(&payload);
|
|
|
|
strbuf_release(&signature);
|
2016-04-22 17:52:04 +03:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2017-07-13 03:44:15 +03:00
|
|
|
int gpg_verify_tag(const struct object_id *oid, const char *name_to_report,
|
2016-04-22 17:52:04 +03:00
|
|
|
unsigned flags)
|
|
|
|
{
|
|
|
|
enum object_type type;
|
|
|
|
char *buf;
|
|
|
|
unsigned long size;
|
|
|
|
int ret;
|
|
|
|
|
2018-04-25 21:20:59 +03:00
|
|
|
type = oid_object_info(the_repository, oid, NULL);
|
2016-04-22 17:52:04 +03:00
|
|
|
if (type != OBJ_TAG)
|
|
|
|
return error("%s: cannot verify a non-tag object of type %s.",
|
|
|
|
name_to_report ?
|
|
|
|
name_to_report :
|
2023-03-28 16:58:46 +03:00
|
|
|
repo_find_unique_abbrev(the_repository, oid, DEFAULT_ABBREV),
|
2018-02-14 21:59:24 +03:00
|
|
|
type_name(type));
|
2016-04-22 17:52:04 +03:00
|
|
|
|
2023-03-28 16:58:50 +03:00
|
|
|
buf = repo_read_object_file(the_repository, oid, &type, &size);
|
2016-04-22 17:52:04 +03:00
|
|
|
if (!buf)
|
|
|
|
return error("%s: unable to read file.",
|
|
|
|
name_to_report ?
|
|
|
|
name_to_report :
|
2023-03-28 16:58:46 +03:00
|
|
|
repo_find_unique_abbrev(the_repository, oid, DEFAULT_ABBREV));
|
2016-04-22 17:52:04 +03:00
|
|
|
|
|
|
|
ret = run_gpg_verify(buf, size, flags);
|
|
|
|
|
|
|
|
free(buf);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2018-06-29 04:22:20 +03:00
|
|
|
struct object *deref_tag(struct repository *r, struct object *o, const char *warn, int warnlen)
|
2005-08-05 11:47:56 +04:00
|
|
|
{
|
2018-07-13 03:03:07 +03:00
|
|
|
struct object_id *last_oid = NULL;
|
2006-07-12 07:45:31 +04:00
|
|
|
while (o && o->type == OBJ_TAG)
|
2018-07-13 03:03:07 +03:00
|
|
|
if (((struct tag *)o)->tagged) {
|
|
|
|
last_oid = &((struct tag *)o)->tagged->oid;
|
2018-08-03 01:30:46 +03:00
|
|
|
o = parse_object(r, last_oid);
|
2018-07-13 03:03:07 +03:00
|
|
|
} else {
|
|
|
|
last_oid = NULL;
|
2008-02-18 10:31:55 +03:00
|
|
|
o = NULL;
|
2018-07-13 03:03:07 +03:00
|
|
|
}
|
2005-11-03 02:19:13 +03:00
|
|
|
if (!o && warn) {
|
2018-07-13 03:03:07 +03:00
|
|
|
if (last_oid && is_promisor_object(last_oid))
|
|
|
|
return NULL;
|
2005-11-03 02:19:13 +03:00
|
|
|
if (!warnlen)
|
|
|
|
warnlen = strlen(warn);
|
|
|
|
error("missing object referenced by '%.*s'", warnlen, warn);
|
|
|
|
}
|
2005-08-05 11:47:56 +04:00
|
|
|
return o;
|
|
|
|
}
|
|
|
|
|
upload-pack: avoid parsing tag destinations
When upload-pack advertises refs, it dereferences any tags
it sees, and shows the resulting sha1 to the client. It does
this by calling deref_tag. That function must load and parse
each tag object to find the sha1 of the tagged object.
However, it also ends up parsing the tagged object itself,
which is not strictly necessary for upload-pack's use.
Each tag produces two object loads (assuming it is not a
recursive tag), when it could get away with only a single
one. Dropping the second load halves the effort we spend.
The downside is that we are no longer verifying the
resulting object by loading it. In particular:
1. We never cross-check the "type" field given in the tag
object with the type of the pointed-to object. If the
tag says it points to a tag but doesn't, then we will
keep peeling and realize the error. If the tag says it
points to a non-tag but actually points to a tag, we
will stop peeling and just advertise the pointed-to
tag.
2. If we are missing the pointed-to object, we will not
realize (because we never even look it up in the object
db).
However, both of these are errors in the object database,
and both will be detected if a client actually requests the
broken objects in question. So we are simply pushing the
verification away from the advertising stage, and down to
the actual fetching stage.
On my test repo with 120K refs, this drops the time to
advertise the refs from ~3.2s to ~2.0s.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2012-01-06 23:18:01 +04:00
|
|
|
struct object *deref_tag_noverify(struct object *o)
|
|
|
|
{
|
|
|
|
while (o && o->type == OBJ_TAG) {
|
2018-06-29 04:21:51 +03:00
|
|
|
o = parse_object(the_repository, &o->oid);
|
upload-pack: avoid parsing tag destinations
When upload-pack advertises refs, it dereferences any tags
it sees, and shows the resulting sha1 to the client. It does
this by calling deref_tag. That function must load and parse
each tag object to find the sha1 of the tagged object.
However, it also ends up parsing the tagged object itself,
which is not strictly necessary for upload-pack's use.
Each tag produces two object loads (assuming it is not a
recursive tag), when it could get away with only a single
one. Dropping the second load halves the effort we spend.
The downside is that we are no longer verifying the
resulting object by loading it. In particular:
1. We never cross-check the "type" field given in the tag
object with the type of the pointed-to object. If the
tag says it points to a tag but doesn't, then we will
keep peeling and realize the error. If the tag says it
points to a non-tag but actually points to a tag, we
will stop peeling and just advertise the pointed-to
tag.
2. If we are missing the pointed-to object, we will not
realize (because we never even look it up in the object
db).
However, both of these are errors in the object database,
and both will be detected if a client actually requests the
broken objects in question. So we are simply pushing the
verification away from the advertising stage, and down to
the actual fetching stage.
On my test repo with 120K refs, this drops the time to
advertise the refs from ~3.2s to ~2.0s.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2012-01-06 23:18:01 +04:00
|
|
|
if (o && o->type == OBJ_TAG && ((struct tag *)o)->tagged)
|
|
|
|
o = ((struct tag *)o)->tagged;
|
|
|
|
else
|
|
|
|
o = NULL;
|
|
|
|
}
|
|
|
|
return o;
|
|
|
|
}
|
|
|
|
|
2018-06-29 04:22:11 +03:00
|
|
|
struct tag *lookup_tag(struct repository *r, const struct object_id *oid)
|
2005-04-28 18:46:33 +04:00
|
|
|
{
|
2019-06-20 10:41:14 +03:00
|
|
|
struct object *obj = lookup_object(r, oid);
|
2007-04-17 09:11:43 +04:00
|
|
|
if (!obj)
|
2019-06-20 10:41:21 +03:00
|
|
|
return create_object(r, oid, alloc_tag_node(r));
|
2020-06-17 12:14:08 +03:00
|
|
|
return object_as_type(obj, OBJ_TAG, 0);
|
2005-04-28 18:46:33 +04:00
|
|
|
}
|
|
|
|
|
2017-04-26 22:29:31 +03:00
|
|
|
static timestamp_t parse_tag_date(const char *buf, const char *tail)
|
2010-04-13 03:25:28 +04:00
|
|
|
{
|
|
|
|
const char *dateptr;
|
|
|
|
|
|
|
|
while (buf < tail && *buf++ != '>')
|
|
|
|
/* nada */;
|
|
|
|
if (buf >= tail)
|
|
|
|
return 0;
|
|
|
|
dateptr = buf;
|
|
|
|
while (buf < tail && *buf++ != '\n')
|
|
|
|
/* nada */;
|
|
|
|
if (buf >= tail)
|
|
|
|
return 0;
|
2017-04-21 13:45:44 +03:00
|
|
|
/* dateptr < buf && buf[-1] == '\n', so parsing will stop at buf-1 */
|
|
|
|
return parse_timestamp(dateptr, NULL, 10);
|
2010-04-13 03:25:28 +04:00
|
|
|
}
|
|
|
|
|
2018-05-16 00:48:42 +03:00
|
|
|
void release_tag_memory(struct tag *t)
|
|
|
|
{
|
|
|
|
free(t->tag);
|
|
|
|
t->tagged = NULL;
|
|
|
|
t->object.parsed = 0;
|
|
|
|
t->date = 0;
|
|
|
|
}
|
|
|
|
|
2018-06-29 04:22:12 +03:00
|
|
|
int parse_tag_buffer(struct repository *r, struct tag *item, const void *data, unsigned long size)
|
2005-04-28 18:46:33 +04:00
|
|
|
{
|
2017-05-07 01:10:02 +03:00
|
|
|
struct object_id oid;
|
2005-06-22 04:35:10 +04:00
|
|
|
char type[20];
|
2010-04-13 03:25:27 +04:00
|
|
|
const char *bufptr = data;
|
|
|
|
const char *tail = bufptr + size;
|
|
|
|
const char *nl;
|
2005-04-30 20:51:03 +04:00
|
|
|
|
2010-04-13 03:25:25 +04:00
|
|
|
if (item->object.parsed)
|
|
|
|
return 0;
|
commit, tag: don't set parsed bit for parse failures
If we can't parse a commit, then parse_commit() will return an error
code. But it _also_ sets the "parsed" flag, which tells us not to bother
trying to re-parse the object. That means that subsequent parses have no
idea that the information in the struct may be bogus. I.e., doing this:
parse_commit(commit);
...
if (parse_commit(commit) < 0)
die("commit is broken");
will never trigger the die(). The second parse_commit() will see the
"parsed" flag and quietly return success.
There are two obvious ways to fix this:
1. Stop setting "parsed" until we've successfully parsed.
2. Keep a second "corrupt" flag to indicate that we saw an error (and
when the parsed flag is set, return 0/-1 depending on the corrupt
flag).
This patch does option 1. The obvious downside versus option 2 is that
we might continually re-parse a broken object. But in practice,
corruption like this is rare, and we typically die() or return an error
in the caller. So it's OK not to worry about optimizing for corruption.
And it's much simpler: we don't need to use an extra bit in the object
struct, and callers which check the "parsed" flag don't need to learn
about the corrupt bit, too.
There's no new test here, because this case is already covered in t5318.
Note that we do need to update the expected message there, because we
now detect the problem in the return from "parse_commit()", and not with
a separate check for a NULL tree. In fact, we can now ditch that
explicit tree check entirely, as we're covered robustly by this change
(and the previous recent change to treat a NULL tree as a parse error).
We'll also give tags the same treatment. I don't know offhand of any
cases where the problem can be triggered (it implies somebody ignoring a
parse error earlier in the process), but consistently returning an error
should cause the least surprise.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2019-10-26 00:20:20 +03:00
|
|
|
|
|
|
|
if (item->tag) {
|
|
|
|
/*
|
|
|
|
* Presumably left over from a previous failed parse;
|
|
|
|
* clear it out in preparation for re-parsing (we'll probably
|
|
|
|
* hit the same error, which lets us tell our current caller
|
|
|
|
* about the problem).
|
|
|
|
*/
|
|
|
|
FREE_AND_NULL(item->tag);
|
|
|
|
}
|
2005-04-28 18:46:33 +04:00
|
|
|
|
2018-10-15 03:01:58 +03:00
|
|
|
if (size < the_hash_algo->hexsz + 24)
|
2005-05-06 21:48:34 +04:00
|
|
|
return -1;
|
2017-05-07 01:10:02 +03:00
|
|
|
if (memcmp("object ", bufptr, 7) || parse_oid_hex(bufptr + 7, &oid, &bufptr) || *bufptr++ != '\n')
|
2005-05-06 21:48:34 +04:00
|
|
|
return -1;
|
2005-04-28 18:46:33 +04:00
|
|
|
|
2013-12-01 00:55:40 +04:00
|
|
|
if (!starts_with(bufptr, "type "))
|
2005-05-06 21:48:34 +04:00
|
|
|
return -1;
|
2010-04-13 03:25:27 +04:00
|
|
|
bufptr += 5;
|
|
|
|
nl = memchr(bufptr, '\n', tail - bufptr);
|
|
|
|
if (!nl || sizeof(type) <= (nl - bufptr))
|
2005-05-06 21:48:34 +04:00
|
|
|
return -1;
|
2015-09-25 00:08:26 +03:00
|
|
|
memcpy(type, bufptr, nl - bufptr);
|
2010-04-13 03:25:27 +04:00
|
|
|
type[nl - bufptr] = '\0';
|
|
|
|
bufptr = nl + 1;
|
2005-04-28 18:46:33 +04:00
|
|
|
|
2007-02-26 22:56:00 +03:00
|
|
|
if (!strcmp(type, blob_type)) {
|
2018-06-29 04:22:12 +03:00
|
|
|
item->tagged = (struct object *)lookup_blob(r, &oid);
|
2007-02-26 22:56:00 +03:00
|
|
|
} else if (!strcmp(type, tree_type)) {
|
2018-06-29 04:22:12 +03:00
|
|
|
item->tagged = (struct object *)lookup_tree(r, &oid);
|
2007-02-26 22:56:00 +03:00
|
|
|
} else if (!strcmp(type, commit_type)) {
|
2018-06-29 04:22:12 +03:00
|
|
|
item->tagged = (struct object *)lookup_commit(r, &oid);
|
2007-02-26 22:56:00 +03:00
|
|
|
} else if (!strcmp(type, tag_type)) {
|
2018-06-29 04:22:12 +03:00
|
|
|
item->tagged = (struct object *)lookup_tag(r, &oid);
|
2007-02-26 22:56:00 +03:00
|
|
|
} else {
|
parse_tag_buffer(): treat NULL tag pointer as parse error
When parsing a tag, we may end up with a NULL "tagged" field when
there's a type mismatch (e.g., the tag claims to point to object X as a
commit, but we previously saw X as a blob in the same process), but we
do not otherwise indicate a parse failure to the caller.
This is similar to the case discussed in the previous commit, where a
commit could end up with a NULL tree field: while slightly convenient
for callers who want to overlook a corrupt object, it means that normal
callers have to explicitly deal with this case (rather than just relying
on the return code from parsing). And most don't, leading to segfault
fixes like the one in c77722b3ea (use get_tagged_oid(), 2019-09-05).
Let's address this more centrally, by returning an error code from the
parse itself, which most callers would already notice (adventurous
callers are free to ignore the error and continue looking at the
struct).
This also covers the case where the tag contains a nonsensical "type"
field (there we produced a user-visible error but still returned success
to the caller; now we'll produce a slightly better message and return an
error).
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2019-10-18 07:45:35 +03:00
|
|
|
return error("unknown tag type '%s' in %s",
|
|
|
|
type, oid_to_hex(&item->object.oid));
|
2007-02-26 22:56:00 +03:00
|
|
|
}
|
|
|
|
|
parse_tag_buffer(): treat NULL tag pointer as parse error
When parsing a tag, we may end up with a NULL "tagged" field when
there's a type mismatch (e.g., the tag claims to point to object X as a
commit, but we previously saw X as a blob in the same process), but we
do not otherwise indicate a parse failure to the caller.
This is similar to the case discussed in the previous commit, where a
commit could end up with a NULL tree field: while slightly convenient
for callers who want to overlook a corrupt object, it means that normal
callers have to explicitly deal with this case (rather than just relying
on the return code from parsing). And most don't, leading to segfault
fixes like the one in c77722b3ea (use get_tagged_oid(), 2019-09-05).
Let's address this more centrally, by returning an error code from the
parse itself, which most callers would already notice (adventurous
callers are free to ignore the error and continue looking at the
struct).
This also covers the case where the tag contains a nonsensical "type"
field (there we produced a user-visible error but still returned success
to the caller; now we'll produce a slightly better message and return an
error).
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2019-10-18 07:45:35 +03:00
|
|
|
if (!item->tagged)
|
|
|
|
return error("bad tag pointer to %s in %s",
|
|
|
|
oid_to_hex(&oid),
|
|
|
|
oid_to_hex(&item->object.oid));
|
|
|
|
|
2013-12-01 00:55:40 +04:00
|
|
|
if (bufptr + 4 < tail && starts_with(bufptr, "tag "))
|
2011-02-14 16:02:51 +03:00
|
|
|
; /* good */
|
|
|
|
else
|
2010-04-13 03:25:27 +04:00
|
|
|
return -1;
|
|
|
|
bufptr += 4;
|
|
|
|
nl = memchr(bufptr, '\n', tail - bufptr);
|
|
|
|
if (!nl)
|
|
|
|
return -1;
|
|
|
|
item->tag = xmemdupz(bufptr, nl - bufptr);
|
|
|
|
bufptr = nl + 1;
|
|
|
|
|
2013-12-01 00:55:40 +04:00
|
|
|
if (bufptr + 7 < tail && starts_with(bufptr, "tagger "))
|
2010-04-13 03:25:28 +04:00
|
|
|
item->date = parse_tag_date(bufptr, tail);
|
|
|
|
else
|
|
|
|
item->date = 0;
|
|
|
|
|
commit, tag: don't set parsed bit for parse failures
If we can't parse a commit, then parse_commit() will return an error
code. But it _also_ sets the "parsed" flag, which tells us not to bother
trying to re-parse the object. That means that subsequent parses have no
idea that the information in the struct may be bogus. I.e., doing this:
parse_commit(commit);
...
if (parse_commit(commit) < 0)
die("commit is broken");
will never trigger the die(). The second parse_commit() will see the
"parsed" flag and quietly return success.
There are two obvious ways to fix this:
1. Stop setting "parsed" until we've successfully parsed.
2. Keep a second "corrupt" flag to indicate that we saw an error (and
when the parsed flag is set, return 0/-1 depending on the corrupt
flag).
This patch does option 1. The obvious downside versus option 2 is that
we might continually re-parse a broken object. But in practice,
corruption like this is rare, and we typically die() or return an error
in the caller. So it's OK not to worry about optimizing for corruption.
And it's much simpler: we don't need to use an extra bit in the object
struct, and callers which check the "parsed" flag don't need to learn
about the corrupt bit, too.
There's no new test here, because this case is already covered in t5318.
Note that we do need to update the expected message there, because we
now detect the problem in the return from "parse_commit()", and not with
a separate check for a NULL tree. In fact, we can now ditch that
explicit tree check entirely, as we're covered robustly by this change
(and the previous recent change to treat a NULL tree as a parse error).
We'll also give tags the same treatment. I don't know offhand of any
cases where the problem can be triggered (it implies somebody ignoring a
parse error earlier in the process), but consistently returning an error
should cause the least surprise.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2019-10-26 00:20:20 +03:00
|
|
|
item->object.parsed = 1;
|
2005-04-28 18:46:33 +04:00
|
|
|
return 0;
|
2005-05-06 21:48:34 +04:00
|
|
|
}
|
2005-05-04 21:44:15 +04:00
|
|
|
|
2005-05-06 21:48:34 +04:00
|
|
|
int parse_tag(struct tag *item)
|
|
|
|
{
|
2007-02-26 22:55:59 +03:00
|
|
|
enum object_type type;
|
2005-05-06 21:48:34 +04:00
|
|
|
void *data;
|
|
|
|
unsigned long size;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (item->object.parsed)
|
|
|
|
return 0;
|
2023-03-28 16:58:50 +03:00
|
|
|
data = repo_read_object_file(the_repository, &item->object.oid, &type,
|
|
|
|
&size);
|
2005-05-06 21:48:34 +04:00
|
|
|
if (!data)
|
|
|
|
return error("Could not read %s",
|
2015-11-10 05:22:28 +03:00
|
|
|
oid_to_hex(&item->object.oid));
|
2007-02-26 22:55:59 +03:00
|
|
|
if (type != OBJ_TAG) {
|
2005-05-06 21:48:34 +04:00
|
|
|
free(data);
|
|
|
|
return error("Object %s not a tag",
|
2015-11-10 05:22:28 +03:00
|
|
|
oid_to_hex(&item->object.oid));
|
2005-05-06 21:48:34 +04:00
|
|
|
}
|
2018-06-29 04:22:04 +03:00
|
|
|
ret = parse_tag_buffer(the_repository, item, data, size);
|
2005-05-04 21:44:15 +04:00
|
|
|
free(data);
|
2005-05-06 21:48:34 +04:00
|
|
|
return ret;
|
2005-04-28 18:46:33 +04:00
|
|
|
}
|
2019-09-05 22:55:55 +03:00
|
|
|
|
|
|
|
struct object_id *get_tagged_oid(struct tag *tag)
|
|
|
|
{
|
|
|
|
if (!tag->tagged)
|
|
|
|
die("bad tag");
|
|
|
|
return &tag->tagged->oid;
|
|
|
|
}
|