зеркало из https://github.com/microsoft/git.git
Merge branch 'ls/filter-process-delayed' into jt/subprocess-handshake
* ls/filter-process-delayed: convert: add "status=delayed" to filter process protocol convert: refactor capabilities negotiation convert: move multiple file filter error handling to separate function convert: put the flags field before the flag itself for consistent style t0021: write "OUT <size>" only on success t0021: make debug log file name configurable t0021: keep filter log files on comparison
This commit is contained in:
Коммит
487fe1ffcd
|
@ -425,8 +425,8 @@ packet: git< capability=clean
|
|||
packet: git< capability=smudge
|
||||
packet: git< 0000
|
||||
------------------------
|
||||
Supported filter capabilities in version 2 are "clean" and
|
||||
"smudge".
|
||||
Supported filter capabilities in version 2 are "clean", "smudge",
|
||||
and "delay".
|
||||
|
||||
Afterwards Git sends a list of "key=value" pairs terminated with
|
||||
a flush packet. The list will contain at least the filter command
|
||||
|
@ -512,12 +512,73 @@ the protocol then Git will stop the filter process and restart it
|
|||
with the next file that needs to be processed. Depending on the
|
||||
`filter.<driver>.required` flag Git will interpret that as error.
|
||||
|
||||
After the filter has processed a blob it is expected to wait for
|
||||
the next "key=value" list containing a command. Git will close
|
||||
After the filter has processed a command it is expected to wait for
|
||||
a "key=value" list containing the next command. Git will close
|
||||
the command pipe on exit. The filter is expected to detect EOF
|
||||
and exit gracefully on its own. Git will wait until the filter
|
||||
process has stopped.
|
||||
|
||||
Delay
|
||||
^^^^^
|
||||
|
||||
If the filter supports the "delay" capability, then Git can send the
|
||||
flag "can-delay" after the filter command and pathname. This flag
|
||||
denotes that the filter can delay filtering the current blob (e.g. to
|
||||
compensate network latencies) by responding with no content but with
|
||||
the status "delayed" and a flush packet.
|
||||
------------------------
|
||||
packet: git> command=smudge
|
||||
packet: git> pathname=path/testfile.dat
|
||||
packet: git> can-delay=1
|
||||
packet: git> 0000
|
||||
packet: git> CONTENT
|
||||
packet: git> 0000
|
||||
packet: git< status=delayed
|
||||
packet: git< 0000
|
||||
------------------------
|
||||
|
||||
If the filter supports the "delay" capability then it must support the
|
||||
"list_available_blobs" command. If Git sends this command, then the
|
||||
filter is expected to return a list of pathnames representing blobs
|
||||
that have been delayed earlier and are now available.
|
||||
The list must be terminated with a flush packet followed
|
||||
by a "success" status that is also terminated with a flush packet. If
|
||||
no blobs for the delayed paths are available, yet, then the filter is
|
||||
expected to block the response until at least one blob becomes
|
||||
available. The filter can tell Git that it has no more delayed blobs
|
||||
by sending an empty list. As soon as the filter responds with an empty
|
||||
list, Git stops asking. All blobs that Git has not received at this
|
||||
point are considered missing and will result in an error.
|
||||
|
||||
------------------------
|
||||
packet: git> command=list_available_blobs
|
||||
packet: git> 0000
|
||||
packet: git< pathname=path/testfile.dat
|
||||
packet: git< pathname=path/otherfile.dat
|
||||
packet: git< 0000
|
||||
packet: git< status=success
|
||||
packet: git< 0000
|
||||
------------------------
|
||||
|
||||
After Git received the pathnames, it will request the corresponding
|
||||
blobs again. These requests contain a pathname and an empty content
|
||||
section. The filter is expected to respond with the smudged content
|
||||
in the usual way as explained above.
|
||||
------------------------
|
||||
packet: git> command=smudge
|
||||
packet: git> pathname=path/testfile.dat
|
||||
packet: git> 0000
|
||||
packet: git> 0000 # empty content!
|
||||
packet: git< status=success
|
||||
packet: git< 0000
|
||||
packet: git< SMUDGED_CONTENT
|
||||
packet: git< 0000
|
||||
packet: git< 0000 # empty list, keep "status=success" unchanged!
|
||||
------------------------
|
||||
|
||||
Example
|
||||
^^^^^^^
|
||||
|
||||
A long running filter demo implementation can be found in
|
||||
`contrib/long-running-filter/example.pl` located in the Git
|
||||
core repository. If you develop your own long running filter
|
||||
|
|
|
@ -358,6 +358,8 @@ static int checkout_paths(const struct checkout_opts *opts,
|
|||
state.force = 1;
|
||||
state.refresh_cache = 1;
|
||||
state.istate = &the_index;
|
||||
|
||||
enable_delayed_checkout(&state);
|
||||
for (pos = 0; pos < active_nr; pos++) {
|
||||
struct cache_entry *ce = active_cache[pos];
|
||||
if (ce->ce_flags & CE_MATCHED) {
|
||||
|
@ -372,6 +374,7 @@ static int checkout_paths(const struct checkout_opts *opts,
|
|||
pos = skip_same_name(ce, pos) - 1;
|
||||
}
|
||||
}
|
||||
errs |= finish_delayed_checkout(&state);
|
||||
|
||||
if (write_locked_index(&the_index, lock_file, COMMIT_LOCK))
|
||||
die(_("unable to write new index file"));
|
||||
|
|
3
cache.h
3
cache.h
|
@ -1492,6 +1492,7 @@ struct checkout {
|
|||
struct index_state *istate;
|
||||
const char *base_dir;
|
||||
int base_dir_len;
|
||||
struct delayed_checkout *delayed_checkout;
|
||||
unsigned force:1,
|
||||
quiet:1,
|
||||
not_new:1,
|
||||
|
@ -1501,6 +1502,8 @@ struct checkout {
|
|||
|
||||
#define TEMPORARY_FILENAME_LENGTH 25
|
||||
extern int checkout_entry(struct cache_entry *ce, const struct checkout *state, char *topath);
|
||||
extern void enable_delayed_checkout(struct checkout *state);
|
||||
extern int finish_delayed_checkout(struct checkout *state);
|
||||
|
||||
struct cache_def {
|
||||
struct strbuf path;
|
||||
|
|
202
convert.c
202
convert.c
|
@ -501,6 +501,7 @@ static int apply_single_file_filter(const char *path, const char *src, size_t le
|
|||
|
||||
#define CAP_CLEAN (1u<<0)
|
||||
#define CAP_SMUDGE (1u<<1)
|
||||
#define CAP_DELAY (1u<<2)
|
||||
|
||||
struct cmd2process {
|
||||
struct subprocess_entry subprocess; /* must be the first member! */
|
||||
|
@ -512,7 +513,7 @@ static struct hashmap subprocess_map;
|
|||
|
||||
static int start_multi_file_filter_fn(struct subprocess_entry *subprocess)
|
||||
{
|
||||
int err;
|
||||
int err, i;
|
||||
struct cmd2process *entry = (struct cmd2process *)subprocess;
|
||||
struct string_list cap_list = STRING_LIST_INIT_NODUP;
|
||||
char *cap_buf;
|
||||
|
@ -520,6 +521,15 @@ static int start_multi_file_filter_fn(struct subprocess_entry *subprocess)
|
|||
struct child_process *process = &subprocess->process;
|
||||
const char *cmd = subprocess->cmd;
|
||||
|
||||
static const struct {
|
||||
const char *name;
|
||||
unsigned int cap;
|
||||
} known_caps[] = {
|
||||
{ "clean", CAP_CLEAN },
|
||||
{ "smudge", CAP_SMUDGE },
|
||||
{ "delay", CAP_DELAY },
|
||||
};
|
||||
|
||||
sigchain_push(SIGPIPE, SIG_IGN);
|
||||
|
||||
err = packet_writel(process->in, "git-filter-client", "version=2", NULL);
|
||||
|
@ -538,7 +548,15 @@ static int start_multi_file_filter_fn(struct subprocess_entry *subprocess)
|
|||
if (err)
|
||||
goto done;
|
||||
|
||||
err = packet_writel(process->in, "capability=clean", "capability=smudge", NULL);
|
||||
for (i = 0; i < ARRAY_SIZE(known_caps); ++i) {
|
||||
err = packet_write_fmt_gently(
|
||||
process->in, "capability=%s\n", known_caps[i].name);
|
||||
if (err)
|
||||
goto done;
|
||||
}
|
||||
err = packet_flush_gently(process->in);
|
||||
if (err)
|
||||
goto done;
|
||||
|
||||
for (;;) {
|
||||
cap_buf = packet_read_line(process->out, NULL);
|
||||
|
@ -550,16 +568,15 @@ static int start_multi_file_filter_fn(struct subprocess_entry *subprocess)
|
|||
continue;
|
||||
|
||||
cap_name = cap_list.items[1].string;
|
||||
if (!strcmp(cap_name, "clean")) {
|
||||
entry->supported_capabilities |= CAP_CLEAN;
|
||||
} else if (!strcmp(cap_name, "smudge")) {
|
||||
entry->supported_capabilities |= CAP_SMUDGE;
|
||||
} else {
|
||||
warning(
|
||||
"external filter '%s' requested unsupported filter capability '%s'",
|
||||
cmd, cap_name
|
||||
);
|
||||
}
|
||||
i = ARRAY_SIZE(known_caps) - 1;
|
||||
while (i >= 0 && strcmp(cap_name, known_caps[i].name))
|
||||
i--;
|
||||
|
||||
if (i >= 0)
|
||||
entry->supported_capabilities |= known_caps[i].cap;
|
||||
else
|
||||
warning("external filter '%s' requested unsupported filter capability '%s'",
|
||||
cmd, cap_name);
|
||||
|
||||
string_list_clear(&cap_list, 0);
|
||||
}
|
||||
|
@ -570,11 +587,36 @@ done:
|
|||
return err;
|
||||
}
|
||||
|
||||
static void handle_filter_error(const struct strbuf *filter_status,
|
||||
struct cmd2process *entry,
|
||||
const unsigned int wanted_capability) {
|
||||
if (!strcmp(filter_status->buf, "error"))
|
||||
; /* The filter signaled a problem with the file. */
|
||||
else if (!strcmp(filter_status->buf, "abort") && wanted_capability) {
|
||||
/*
|
||||
* The filter signaled a permanent problem. Don't try to filter
|
||||
* files with the same command for the lifetime of the current
|
||||
* Git process.
|
||||
*/
|
||||
entry->supported_capabilities &= ~wanted_capability;
|
||||
} else {
|
||||
/*
|
||||
* Something went wrong with the protocol filter.
|
||||
* Force shutdown and restart if another blob requires filtering.
|
||||
*/
|
||||
error("external filter '%s' failed", entry->subprocess.cmd);
|
||||
subprocess_stop(&subprocess_map, &entry->subprocess);
|
||||
free(entry);
|
||||
}
|
||||
}
|
||||
|
||||
static int apply_multi_file_filter(const char *path, const char *src, size_t len,
|
||||
int fd, struct strbuf *dst, const char *cmd,
|
||||
const unsigned int wanted_capability)
|
||||
const unsigned int wanted_capability,
|
||||
struct delayed_checkout *dco)
|
||||
{
|
||||
int err;
|
||||
int can_delay = 0;
|
||||
struct cmd2process *entry;
|
||||
struct child_process *process;
|
||||
struct strbuf nbuf = STRBUF_INIT;
|
||||
|
@ -603,12 +645,12 @@ static int apply_multi_file_filter(const char *path, const char *src, size_t len
|
|||
}
|
||||
process = &entry->subprocess.process;
|
||||
|
||||
if (!(wanted_capability & entry->supported_capabilities))
|
||||
if (!(entry->supported_capabilities & wanted_capability))
|
||||
return 0;
|
||||
|
||||
if (CAP_CLEAN & wanted_capability)
|
||||
if (wanted_capability & CAP_CLEAN)
|
||||
filter_type = "clean";
|
||||
else if (CAP_SMUDGE & wanted_capability)
|
||||
else if (wanted_capability & CAP_SMUDGE)
|
||||
filter_type = "smudge";
|
||||
else
|
||||
die("unexpected filter type");
|
||||
|
@ -630,6 +672,14 @@ static int apply_multi_file_filter(const char *path, const char *src, size_t len
|
|||
if (err)
|
||||
goto done;
|
||||
|
||||
if ((entry->supported_capabilities & CAP_DELAY) &&
|
||||
dco && dco->state == CE_CAN_DELAY) {
|
||||
can_delay = 1;
|
||||
err = packet_write_fmt_gently(process->in, "can-delay=1\n");
|
||||
if (err)
|
||||
goto done;
|
||||
}
|
||||
|
||||
err = packet_flush_gently(process->in);
|
||||
if (err)
|
||||
goto done;
|
||||
|
@ -645,14 +695,73 @@ static int apply_multi_file_filter(const char *path, const char *src, size_t len
|
|||
if (err)
|
||||
goto done;
|
||||
|
||||
err = strcmp(filter_status.buf, "success");
|
||||
if (can_delay && !strcmp(filter_status.buf, "delayed")) {
|
||||
string_list_insert(&dco->filters, cmd);
|
||||
string_list_insert(&dco->paths, path);
|
||||
} else {
|
||||
/* The filter got the blob and wants to send us a response. */
|
||||
err = strcmp(filter_status.buf, "success");
|
||||
if (err)
|
||||
goto done;
|
||||
|
||||
err = read_packetized_to_strbuf(process->out, &nbuf) < 0;
|
||||
if (err)
|
||||
goto done;
|
||||
|
||||
err = subprocess_read_status(process->out, &filter_status);
|
||||
if (err)
|
||||
goto done;
|
||||
|
||||
err = strcmp(filter_status.buf, "success");
|
||||
}
|
||||
|
||||
done:
|
||||
sigchain_pop(SIGPIPE);
|
||||
|
||||
if (err)
|
||||
handle_filter_error(&filter_status, entry, wanted_capability);
|
||||
else
|
||||
strbuf_swap(dst, &nbuf);
|
||||
strbuf_release(&nbuf);
|
||||
return !err;
|
||||
}
|
||||
|
||||
|
||||
int async_query_available_blobs(const char *cmd, struct string_list *available_paths)
|
||||
{
|
||||
int err;
|
||||
char *line;
|
||||
struct cmd2process *entry;
|
||||
struct child_process *process;
|
||||
struct strbuf filter_status = STRBUF_INIT;
|
||||
|
||||
assert(subprocess_map_initialized);
|
||||
entry = (struct cmd2process *)subprocess_find_entry(&subprocess_map, cmd);
|
||||
if (!entry) {
|
||||
error("external filter '%s' is not available anymore although "
|
||||
"not all paths have been filtered", cmd);
|
||||
return 0;
|
||||
}
|
||||
process = &entry->subprocess.process;
|
||||
sigchain_push(SIGPIPE, SIG_IGN);
|
||||
|
||||
err = packet_write_fmt_gently(
|
||||
process->in, "command=list_available_blobs\n");
|
||||
if (err)
|
||||
goto done;
|
||||
|
||||
err = read_packetized_to_strbuf(process->out, &nbuf) < 0;
|
||||
err = packet_flush_gently(process->in);
|
||||
if (err)
|
||||
goto done;
|
||||
|
||||
while ((line = packet_read_line(process->out, NULL))) {
|
||||
const char *path;
|
||||
if (skip_prefix(line, "pathname=", &path))
|
||||
string_list_insert(available_paths, xstrdup(path));
|
||||
else
|
||||
; /* ignore unknown keys */
|
||||
}
|
||||
|
||||
err = subprocess_read_status(process->out, &filter_status);
|
||||
if (err)
|
||||
goto done;
|
||||
|
@ -662,29 +771,8 @@ static int apply_multi_file_filter(const char *path, const char *src, size_t len
|
|||
done:
|
||||
sigchain_pop(SIGPIPE);
|
||||
|
||||
if (err) {
|
||||
if (!strcmp(filter_status.buf, "error")) {
|
||||
/* The filter signaled a problem with the file. */
|
||||
} else if (!strcmp(filter_status.buf, "abort")) {
|
||||
/*
|
||||
* The filter signaled a permanent problem. Don't try to filter
|
||||
* files with the same command for the lifetime of the current
|
||||
* Git process.
|
||||
*/
|
||||
entry->supported_capabilities &= ~wanted_capability;
|
||||
} else {
|
||||
/*
|
||||
* Something went wrong with the protocol filter.
|
||||
* Force shutdown and restart if another blob requires filtering.
|
||||
*/
|
||||
error("external filter '%s' failed", cmd);
|
||||
subprocess_stop(&subprocess_map, &entry->subprocess);
|
||||
free(entry);
|
||||
}
|
||||
} else {
|
||||
strbuf_swap(dst, &nbuf);
|
||||
}
|
||||
strbuf_release(&nbuf);
|
||||
if (err)
|
||||
handle_filter_error(&filter_status, entry, 0);
|
||||
return !err;
|
||||
}
|
||||
|
||||
|
@ -699,7 +787,8 @@ static struct convert_driver {
|
|||
|
||||
static int apply_filter(const char *path, const char *src, size_t len,
|
||||
int fd, struct strbuf *dst, struct convert_driver *drv,
|
||||
const unsigned int wanted_capability)
|
||||
const unsigned int wanted_capability,
|
||||
struct delayed_checkout *dco)
|
||||
{
|
||||
const char *cmd = NULL;
|
||||
|
||||
|
@ -709,15 +798,16 @@ static int apply_filter(const char *path, const char *src, size_t len,
|
|||
if (!dst)
|
||||
return 1;
|
||||
|
||||
if ((CAP_CLEAN & wanted_capability) && !drv->process && drv->clean)
|
||||
if ((wanted_capability & CAP_CLEAN) && !drv->process && drv->clean)
|
||||
cmd = drv->clean;
|
||||
else if ((CAP_SMUDGE & wanted_capability) && !drv->process && drv->smudge)
|
||||
else if ((wanted_capability & CAP_SMUDGE) && !drv->process && drv->smudge)
|
||||
cmd = drv->smudge;
|
||||
|
||||
if (cmd && *cmd)
|
||||
return apply_single_file_filter(path, src, len, fd, dst, cmd);
|
||||
else if (drv->process && *drv->process)
|
||||
return apply_multi_file_filter(path, src, len, fd, dst, drv->process, wanted_capability);
|
||||
return apply_multi_file_filter(path, src, len, fd, dst,
|
||||
drv->process, wanted_capability, dco);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -1058,7 +1148,7 @@ int would_convert_to_git_filter_fd(const char *path)
|
|||
if (!ca.drv->required)
|
||||
return 0;
|
||||
|
||||
return apply_filter(path, NULL, 0, -1, NULL, ca.drv, CAP_CLEAN);
|
||||
return apply_filter(path, NULL, 0, -1, NULL, ca.drv, CAP_CLEAN, NULL);
|
||||
}
|
||||
|
||||
const char *get_convert_attr_ascii(const char *path)
|
||||
|
@ -1096,7 +1186,7 @@ int convert_to_git(const struct index_state *istate,
|
|||
|
||||
convert_attrs(&ca, path);
|
||||
|
||||
ret |= apply_filter(path, src, len, -1, dst, ca.drv, CAP_CLEAN);
|
||||
ret |= apply_filter(path, src, len, -1, dst, ca.drv, CAP_CLEAN, NULL);
|
||||
if (!ret && ca.drv && ca.drv->required)
|
||||
die("%s: clean filter '%s' failed", path, ca.drv->name);
|
||||
|
||||
|
@ -1122,7 +1212,7 @@ void convert_to_git_filter_fd(const struct index_state *istate,
|
|||
assert(ca.drv);
|
||||
assert(ca.drv->clean || ca.drv->process);
|
||||
|
||||
if (!apply_filter(path, NULL, 0, fd, dst, ca.drv, CAP_CLEAN))
|
||||
if (!apply_filter(path, NULL, 0, fd, dst, ca.drv, CAP_CLEAN, NULL))
|
||||
die("%s: clean filter '%s' failed", path, ca.drv->name);
|
||||
|
||||
crlf_to_git(istate, path, dst->buf, dst->len, dst, ca.crlf_action, checksafe);
|
||||
|
@ -1131,7 +1221,7 @@ void convert_to_git_filter_fd(const struct index_state *istate,
|
|||
|
||||
static int convert_to_working_tree_internal(const char *path, const char *src,
|
||||
size_t len, struct strbuf *dst,
|
||||
int normalizing)
|
||||
int normalizing, struct delayed_checkout *dco)
|
||||
{
|
||||
int ret = 0, ret_filter = 0;
|
||||
struct conv_attrs ca;
|
||||
|
@ -1156,22 +1246,30 @@ static int convert_to_working_tree_internal(const char *path, const char *src,
|
|||
}
|
||||
}
|
||||
|
||||
ret_filter = apply_filter(path, src, len, -1, dst, ca.drv, CAP_SMUDGE);
|
||||
ret_filter = apply_filter(
|
||||
path, src, len, -1, dst, ca.drv, CAP_SMUDGE, dco);
|
||||
if (!ret_filter && ca.drv && ca.drv->required)
|
||||
die("%s: smudge filter %s failed", path, ca.drv->name);
|
||||
|
||||
return ret | ret_filter;
|
||||
}
|
||||
|
||||
int async_convert_to_working_tree(const char *path, const char *src,
|
||||
size_t len, struct strbuf *dst,
|
||||
void *dco)
|
||||
{
|
||||
return convert_to_working_tree_internal(path, src, len, dst, 0, dco);
|
||||
}
|
||||
|
||||
int convert_to_working_tree(const char *path, const char *src, size_t len, struct strbuf *dst)
|
||||
{
|
||||
return convert_to_working_tree_internal(path, src, len, dst, 0);
|
||||
return convert_to_working_tree_internal(path, src, len, dst, 0, NULL);
|
||||
}
|
||||
|
||||
int renormalize_buffer(const struct index_state *istate, const char *path,
|
||||
const char *src, size_t len, struct strbuf *dst)
|
||||
{
|
||||
int ret = convert_to_working_tree_internal(path, src, len, dst, 1);
|
||||
int ret = convert_to_working_tree_internal(path, src, len, dst, 1, NULL);
|
||||
if (ret) {
|
||||
src = dst->buf;
|
||||
len = dst->len;
|
||||
|
|
26
convert.h
26
convert.h
|
@ -4,6 +4,8 @@
|
|||
#ifndef CONVERT_H
|
||||
#define CONVERT_H
|
||||
|
||||
#include "string-list.h"
|
||||
|
||||
struct index_state;
|
||||
|
||||
enum safe_crlf {
|
||||
|
@ -34,6 +36,26 @@ enum eol {
|
|||
#endif
|
||||
};
|
||||
|
||||
enum ce_delay_state {
|
||||
CE_NO_DELAY = 0,
|
||||
CE_CAN_DELAY = 1,
|
||||
CE_RETRY = 2
|
||||
};
|
||||
|
||||
struct delayed_checkout {
|
||||
/*
|
||||
* State of the currently processed cache entry. If the state is
|
||||
* CE_CAN_DELAY, then the filter can delay the current cache entry.
|
||||
* If the state is CE_RETRY, then this signals the filter that the
|
||||
* cache entry was requested before.
|
||||
*/
|
||||
enum ce_delay_state state;
|
||||
/* List of filter drivers that signaled delayed blobs. */
|
||||
struct string_list filters;
|
||||
/* List of delayed blobs identified by their path. */
|
||||
struct string_list paths;
|
||||
};
|
||||
|
||||
extern enum eol core_eol;
|
||||
extern const char *get_cached_convert_stats_ascii(const struct index_state *istate,
|
||||
const char *path);
|
||||
|
@ -46,6 +68,10 @@ extern int convert_to_git(const struct index_state *istate,
|
|||
struct strbuf *dst, enum safe_crlf checksafe);
|
||||
extern int convert_to_working_tree(const char *path, const char *src,
|
||||
size_t len, struct strbuf *dst);
|
||||
extern int async_convert_to_working_tree(const char *path, const char *src,
|
||||
size_t len, struct strbuf *dst,
|
||||
void *dco);
|
||||
extern int async_query_available_blobs(const char *cmd, struct string_list *available_paths);
|
||||
extern int renormalize_buffer(const struct index_state *istate,
|
||||
const char *path, const char *src, size_t len,
|
||||
struct strbuf *dst);
|
||||
|
|
132
entry.c
132
entry.c
|
@ -137,6 +137,105 @@ static int streaming_write_entry(const struct cache_entry *ce, char *path,
|
|||
return result;
|
||||
}
|
||||
|
||||
void enable_delayed_checkout(struct checkout *state)
|
||||
{
|
||||
if (!state->delayed_checkout) {
|
||||
state->delayed_checkout = xmalloc(sizeof(*state->delayed_checkout));
|
||||
state->delayed_checkout->state = CE_CAN_DELAY;
|
||||
string_list_init(&state->delayed_checkout->filters, 0);
|
||||
string_list_init(&state->delayed_checkout->paths, 0);
|
||||
}
|
||||
}
|
||||
|
||||
static int remove_available_paths(struct string_list_item *item, void *cb_data)
|
||||
{
|
||||
struct string_list *available_paths = cb_data;
|
||||
struct string_list_item *available;
|
||||
|
||||
available = string_list_lookup(available_paths, item->string);
|
||||
if (available)
|
||||
available->util = (void *)item->string;
|
||||
return !available;
|
||||
}
|
||||
|
||||
int finish_delayed_checkout(struct checkout *state)
|
||||
{
|
||||
int errs = 0;
|
||||
struct string_list_item *filter, *path;
|
||||
struct delayed_checkout *dco = state->delayed_checkout;
|
||||
|
||||
if (!state->delayed_checkout)
|
||||
return errs;
|
||||
|
||||
dco->state = CE_RETRY;
|
||||
while (dco->filters.nr > 0) {
|
||||
for_each_string_list_item(filter, &dco->filters) {
|
||||
struct string_list available_paths = STRING_LIST_INIT_NODUP;
|
||||
|
||||
if (!async_query_available_blobs(filter->string, &available_paths)) {
|
||||
/* Filter reported an error */
|
||||
errs = 1;
|
||||
filter->string = "";
|
||||
continue;
|
||||
}
|
||||
if (available_paths.nr <= 0) {
|
||||
/*
|
||||
* Filter responded with no entries. That means
|
||||
* the filter is done and we can remove the
|
||||
* filter from the list (see
|
||||
* "string_list_remove_empty_items" call below).
|
||||
*/
|
||||
filter->string = "";
|
||||
continue;
|
||||
}
|
||||
|
||||
/*
|
||||
* In dco->paths we store a list of all delayed paths.
|
||||
* The filter just send us a list of available paths.
|
||||
* Remove them from the list.
|
||||
*/
|
||||
filter_string_list(&dco->paths, 0,
|
||||
&remove_available_paths, &available_paths);
|
||||
|
||||
for_each_string_list_item(path, &available_paths) {
|
||||
struct cache_entry* ce;
|
||||
|
||||
if (!path->util) {
|
||||
error("external filter '%s' signaled that '%s' "
|
||||
"is now available although it has not been "
|
||||
"delayed earlier",
|
||||
filter->string, path->string);
|
||||
errs |= 1;
|
||||
|
||||
/*
|
||||
* Do not ask the filter for available blobs,
|
||||
* again, as the filter is likely buggy.
|
||||
*/
|
||||
filter->string = "";
|
||||
continue;
|
||||
}
|
||||
ce = index_file_exists(state->istate, path->string,
|
||||
strlen(path->string), 0);
|
||||
errs |= (ce ? checkout_entry(ce, state, NULL) : 1);
|
||||
}
|
||||
}
|
||||
string_list_remove_empty_items(&dco->filters, 0);
|
||||
}
|
||||
string_list_clear(&dco->filters, 0);
|
||||
|
||||
/* At this point we should not have any delayed paths anymore. */
|
||||
errs |= dco->paths.nr;
|
||||
for_each_string_list_item(path, &dco->paths) {
|
||||
error("'%s' was not filtered properly", path->string);
|
||||
}
|
||||
string_list_clear(&dco->paths, 0);
|
||||
|
||||
free(dco);
|
||||
state->delayed_checkout = NULL;
|
||||
|
||||
return errs;
|
||||
}
|
||||
|
||||
static int write_entry(struct cache_entry *ce,
|
||||
char *path, const struct checkout *state, int to_tempfile)
|
||||
{
|
||||
|
@ -179,11 +278,34 @@ static int write_entry(struct cache_entry *ce,
|
|||
/*
|
||||
* Convert from git internal format to working tree format
|
||||
*/
|
||||
if (ce_mode_s_ifmt == S_IFREG &&
|
||||
convert_to_working_tree(ce->name, new, size, &buf)) {
|
||||
free(new);
|
||||
new = strbuf_detach(&buf, &newsize);
|
||||
size = newsize;
|
||||
if (ce_mode_s_ifmt == S_IFREG) {
|
||||
struct delayed_checkout *dco = state->delayed_checkout;
|
||||
if (dco && dco->state != CE_NO_DELAY) {
|
||||
/* Do not send the blob in case of a retry. */
|
||||
if (dco->state == CE_RETRY) {
|
||||
new = NULL;
|
||||
size = 0;
|
||||
}
|
||||
ret = async_convert_to_working_tree(
|
||||
ce->name, new, size, &buf, dco);
|
||||
if (ret && string_list_has_string(&dco->paths, ce->name)) {
|
||||
free(new);
|
||||
goto finish;
|
||||
}
|
||||
} else
|
||||
ret = convert_to_working_tree(
|
||||
ce->name, new, size, &buf);
|
||||
|
||||
if (ret) {
|
||||
free(new);
|
||||
new = strbuf_detach(&buf, &newsize);
|
||||
size = newsize;
|
||||
}
|
||||
/*
|
||||
* No "else" here as errors from convert are OK at this
|
||||
* point. If the error would have been fatal (e.g.
|
||||
* filter is required), then we would have died already.
|
||||
*/
|
||||
}
|
||||
|
||||
fd = open_output_fd(path, ce, to_tempfile);
|
||||
|
|
|
@ -28,7 +28,7 @@ file_size () {
|
|||
}
|
||||
|
||||
filter_git () {
|
||||
rm -f rot13-filter.log &&
|
||||
rm -f *.log &&
|
||||
git "$@"
|
||||
}
|
||||
|
||||
|
@ -42,10 +42,10 @@ test_cmp_count () {
|
|||
for FILE in "$expect" "$actual"
|
||||
do
|
||||
sort "$FILE" | uniq -c |
|
||||
sed -e "s/^ *[0-9][0-9]*[ ]*IN: /x IN: /" >"$FILE.tmp" &&
|
||||
mv "$FILE.tmp" "$FILE" || return
|
||||
sed -e "s/^ *[0-9][0-9]*[ ]*IN: /x IN: /" >"$FILE.tmp"
|
||||
done &&
|
||||
test_cmp "$expect" "$actual"
|
||||
test_cmp "$expect.tmp" "$actual.tmp" &&
|
||||
rm "$expect.tmp" "$actual.tmp"
|
||||
}
|
||||
|
||||
# Compare two files but exclude all `clean` invocations because Git can
|
||||
|
@ -56,10 +56,10 @@ test_cmp_exclude_clean () {
|
|||
actual=$2
|
||||
for FILE in "$expect" "$actual"
|
||||
do
|
||||
grep -v "IN: clean" "$FILE" >"$FILE.tmp" &&
|
||||
mv "$FILE.tmp" "$FILE"
|
||||
grep -v "IN: clean" "$FILE" >"$FILE.tmp"
|
||||
done &&
|
||||
test_cmp "$expect" "$actual"
|
||||
test_cmp "$expect.tmp" "$actual.tmp" &&
|
||||
rm "$expect.tmp" "$actual.tmp"
|
||||
}
|
||||
|
||||
# Check that the contents of two files are equal and that their rot13 version
|
||||
|
@ -342,7 +342,7 @@ test_expect_success 'diff does not reuse worktree files that need cleaning' '
|
|||
'
|
||||
|
||||
test_expect_success PERL 'required process filter should filter data' '
|
||||
test_config_global filter.protocol.process "rot13-filter.pl clean smudge" &&
|
||||
test_config_global filter.protocol.process "rot13-filter.pl debug.log clean smudge" &&
|
||||
test_config_global filter.protocol.required true &&
|
||||
rm -rf repo &&
|
||||
mkdir repo &&
|
||||
|
@ -375,7 +375,7 @@ test_expect_success PERL 'required process filter should filter data' '
|
|||
IN: clean testsubdir/test3 '\''sq'\'',\$x=.r $S3 [OK] -- OUT: $S3 . [OK]
|
||||
STOP
|
||||
EOF
|
||||
test_cmp_count expected.log rot13-filter.log &&
|
||||
test_cmp_count expected.log debug.log &&
|
||||
|
||||
git commit -m "test commit 2" &&
|
||||
rm -f test2.r "testsubdir/test3 '\''sq'\'',\$x=.r" &&
|
||||
|
@ -388,7 +388,7 @@ test_expect_success PERL 'required process filter should filter data' '
|
|||
IN: smudge testsubdir/test3 '\''sq'\'',\$x=.r $S3 [OK] -- OUT: $S3 . [OK]
|
||||
STOP
|
||||
EOF
|
||||
test_cmp_exclude_clean expected.log rot13-filter.log &&
|
||||
test_cmp_exclude_clean expected.log debug.log &&
|
||||
|
||||
filter_git checkout --quiet --no-progress empty-branch &&
|
||||
cat >expected.log <<-EOF &&
|
||||
|
@ -397,7 +397,7 @@ test_expect_success PERL 'required process filter should filter data' '
|
|||
IN: clean test.r $S [OK] -- OUT: $S . [OK]
|
||||
STOP
|
||||
EOF
|
||||
test_cmp_exclude_clean expected.log rot13-filter.log &&
|
||||
test_cmp_exclude_clean expected.log debug.log &&
|
||||
|
||||
filter_git checkout --quiet --no-progress master &&
|
||||
cat >expected.log <<-EOF &&
|
||||
|
@ -409,7 +409,7 @@ test_expect_success PERL 'required process filter should filter data' '
|
|||
IN: smudge testsubdir/test3 '\''sq'\'',\$x=.r $S3 [OK] -- OUT: $S3 . [OK]
|
||||
STOP
|
||||
EOF
|
||||
test_cmp_exclude_clean expected.log rot13-filter.log &&
|
||||
test_cmp_exclude_clean expected.log debug.log &&
|
||||
|
||||
test_cmp_committed_rot13 "$TEST_ROOT/test.o" test.r &&
|
||||
test_cmp_committed_rot13 "$TEST_ROOT/test2.o" test2.r &&
|
||||
|
@ -419,7 +419,7 @@ test_expect_success PERL 'required process filter should filter data' '
|
|||
|
||||
test_expect_success PERL 'required process filter takes precedence' '
|
||||
test_config_global filter.protocol.clean false &&
|
||||
test_config_global filter.protocol.process "rot13-filter.pl clean" &&
|
||||
test_config_global filter.protocol.process "rot13-filter.pl debug.log clean" &&
|
||||
test_config_global filter.protocol.required true &&
|
||||
rm -rf repo &&
|
||||
mkdir repo &&
|
||||
|
@ -439,12 +439,12 @@ test_expect_success PERL 'required process filter takes precedence' '
|
|||
IN: clean test.r $S [OK] -- OUT: $S . [OK]
|
||||
STOP
|
||||
EOF
|
||||
test_cmp_count expected.log rot13-filter.log
|
||||
test_cmp_count expected.log debug.log
|
||||
)
|
||||
'
|
||||
|
||||
test_expect_success PERL 'required process filter should be used only for "clean" operation only' '
|
||||
test_config_global filter.protocol.process "rot13-filter.pl clean" &&
|
||||
test_config_global filter.protocol.process "rot13-filter.pl debug.log clean" &&
|
||||
rm -rf repo &&
|
||||
mkdir repo &&
|
||||
(
|
||||
|
@ -462,7 +462,7 @@ test_expect_success PERL 'required process filter should be used only for "clean
|
|||
IN: clean test.r $S [OK] -- OUT: $S . [OK]
|
||||
STOP
|
||||
EOF
|
||||
test_cmp_count expected.log rot13-filter.log &&
|
||||
test_cmp_count expected.log debug.log &&
|
||||
|
||||
rm test.r &&
|
||||
|
||||
|
@ -474,12 +474,12 @@ test_expect_success PERL 'required process filter should be used only for "clean
|
|||
init handshake complete
|
||||
STOP
|
||||
EOF
|
||||
test_cmp_exclude_clean expected.log rot13-filter.log
|
||||
test_cmp_exclude_clean expected.log debug.log
|
||||
)
|
||||
'
|
||||
|
||||
test_expect_success PERL 'required process filter should process multiple packets' '
|
||||
test_config_global filter.protocol.process "rot13-filter.pl clean smudge" &&
|
||||
test_config_global filter.protocol.process "rot13-filter.pl debug.log clean smudge" &&
|
||||
test_config_global filter.protocol.required true &&
|
||||
|
||||
rm -rf repo &&
|
||||
|
@ -514,7 +514,7 @@ test_expect_success PERL 'required process filter should process multiple packet
|
|||
IN: clean 3pkt_2+1.file $(($S*2+1)) [OK] -- OUT: $(($S*2+1)) ... [OK]
|
||||
STOP
|
||||
EOF
|
||||
test_cmp_count expected.log rot13-filter.log &&
|
||||
test_cmp_count expected.log debug.log &&
|
||||
|
||||
rm -f *.file &&
|
||||
|
||||
|
@ -529,7 +529,7 @@ test_expect_success PERL 'required process filter should process multiple packet
|
|||
IN: smudge 3pkt_2+1.file $(($S*2+1)) [OK] -- OUT: $(($S*2+1)) ... [OK]
|
||||
STOP
|
||||
EOF
|
||||
test_cmp_exclude_clean expected.log rot13-filter.log &&
|
||||
test_cmp_exclude_clean expected.log debug.log &&
|
||||
|
||||
for FILE in *.file
|
||||
do
|
||||
|
@ -539,7 +539,7 @@ test_expect_success PERL 'required process filter should process multiple packet
|
|||
'
|
||||
|
||||
test_expect_success PERL 'required process filter with clean error should fail' '
|
||||
test_config_global filter.protocol.process "rot13-filter.pl clean smudge" &&
|
||||
test_config_global filter.protocol.process "rot13-filter.pl debug.log clean smudge" &&
|
||||
test_config_global filter.protocol.required true &&
|
||||
rm -rf repo &&
|
||||
mkdir repo &&
|
||||
|
@ -558,7 +558,7 @@ test_expect_success PERL 'required process filter with clean error should fail'
|
|||
'
|
||||
|
||||
test_expect_success PERL 'process filter should restart after unexpected write failure' '
|
||||
test_config_global filter.protocol.process "rot13-filter.pl clean smudge" &&
|
||||
test_config_global filter.protocol.process "rot13-filter.pl debug.log clean smudge" &&
|
||||
rm -rf repo &&
|
||||
mkdir repo &&
|
||||
(
|
||||
|
@ -579,7 +579,7 @@ test_expect_success PERL 'process filter should restart after unexpected write f
|
|||
git add . &&
|
||||
rm -f *.r &&
|
||||
|
||||
rm -f rot13-filter.log &&
|
||||
rm -f debug.log &&
|
||||
git checkout --quiet --no-progress . 2>git-stderr.log &&
|
||||
|
||||
grep "smudge write error at" git-stderr.log &&
|
||||
|
@ -588,14 +588,14 @@ test_expect_success PERL 'process filter should restart after unexpected write f
|
|||
cat >expected.log <<-EOF &&
|
||||
START
|
||||
init handshake complete
|
||||
IN: smudge smudge-write-fail.r $SF [OK] -- OUT: $SF [WRITE FAIL]
|
||||
IN: smudge smudge-write-fail.r $SF [OK] -- [WRITE FAIL]
|
||||
START
|
||||
init handshake complete
|
||||
IN: smudge test.r $S [OK] -- OUT: $S . [OK]
|
||||
IN: smudge test2.r $S2 [OK] -- OUT: $S2 . [OK]
|
||||
STOP
|
||||
EOF
|
||||
test_cmp_exclude_clean expected.log rot13-filter.log &&
|
||||
test_cmp_exclude_clean expected.log debug.log &&
|
||||
|
||||
test_cmp_committed_rot13 "$TEST_ROOT/test.o" test.r &&
|
||||
test_cmp_committed_rot13 "$TEST_ROOT/test2.o" test2.r &&
|
||||
|
@ -609,7 +609,7 @@ test_expect_success PERL 'process filter should restart after unexpected write f
|
|||
'
|
||||
|
||||
test_expect_success PERL 'process filter should not be restarted if it signals an error' '
|
||||
test_config_global filter.protocol.process "rot13-filter.pl clean smudge" &&
|
||||
test_config_global filter.protocol.process "rot13-filter.pl debug.log clean smudge" &&
|
||||
rm -rf repo &&
|
||||
mkdir repo &&
|
||||
(
|
||||
|
@ -634,12 +634,12 @@ test_expect_success PERL 'process filter should not be restarted if it signals a
|
|||
cat >expected.log <<-EOF &&
|
||||
START
|
||||
init handshake complete
|
||||
IN: smudge error.r $SE [OK] -- OUT: 0 [ERROR]
|
||||
IN: smudge error.r $SE [OK] -- [ERROR]
|
||||
IN: smudge test.r $S [OK] -- OUT: $S . [OK]
|
||||
IN: smudge test2.r $S2 [OK] -- OUT: $S2 . [OK]
|
||||
STOP
|
||||
EOF
|
||||
test_cmp_exclude_clean expected.log rot13-filter.log &&
|
||||
test_cmp_exclude_clean expected.log debug.log &&
|
||||
|
||||
test_cmp_committed_rot13 "$TEST_ROOT/test.o" test.r &&
|
||||
test_cmp_committed_rot13 "$TEST_ROOT/test2.o" test2.r &&
|
||||
|
@ -648,7 +648,7 @@ test_expect_success PERL 'process filter should not be restarted if it signals a
|
|||
'
|
||||
|
||||
test_expect_success PERL 'process filter abort stops processing of all further files' '
|
||||
test_config_global filter.protocol.process "rot13-filter.pl clean smudge" &&
|
||||
test_config_global filter.protocol.process "rot13-filter.pl debug.log clean smudge" &&
|
||||
rm -rf repo &&
|
||||
mkdir repo &&
|
||||
(
|
||||
|
@ -673,10 +673,10 @@ test_expect_success PERL 'process filter abort stops processing of all further f
|
|||
cat >expected.log <<-EOF &&
|
||||
START
|
||||
init handshake complete
|
||||
IN: smudge abort.r $SA [OK] -- OUT: 0 [ABORT]
|
||||
IN: smudge abort.r $SA [OK] -- [ABORT]
|
||||
STOP
|
||||
EOF
|
||||
test_cmp_exclude_clean expected.log rot13-filter.log &&
|
||||
test_cmp_exclude_clean expected.log debug.log &&
|
||||
|
||||
test_cmp "$TEST_ROOT/test.o" test.r &&
|
||||
test_cmp "$TEST_ROOT/test2.o" test2.r &&
|
||||
|
@ -701,4 +701,120 @@ test_expect_success PERL 'invalid process filter must fail (and not hang!)' '
|
|||
)
|
||||
'
|
||||
|
||||
test_expect_success PERL 'delayed checkout in process filter' '
|
||||
test_config_global filter.a.process "rot13-filter.pl a.log clean smudge delay" &&
|
||||
test_config_global filter.a.required true &&
|
||||
test_config_global filter.b.process "rot13-filter.pl b.log clean smudge delay" &&
|
||||
test_config_global filter.b.required true &&
|
||||
|
||||
rm -rf repo &&
|
||||
mkdir repo &&
|
||||
(
|
||||
cd repo &&
|
||||
git init &&
|
||||
echo "*.a filter=a" >.gitattributes &&
|
||||
echo "*.b filter=b" >>.gitattributes &&
|
||||
cp "$TEST_ROOT/test.o" test.a &&
|
||||
cp "$TEST_ROOT/test.o" test-delay10.a &&
|
||||
cp "$TEST_ROOT/test.o" test-delay11.a &&
|
||||
cp "$TEST_ROOT/test.o" test-delay20.a &&
|
||||
cp "$TEST_ROOT/test.o" test-delay10.b &&
|
||||
git add . &&
|
||||
git commit -m "test commit"
|
||||
) &&
|
||||
|
||||
S=$(file_size "$TEST_ROOT/test.o") &&
|
||||
cat >a.exp <<-EOF &&
|
||||
START
|
||||
init handshake complete
|
||||
IN: smudge test.a $S [OK] -- OUT: $S . [OK]
|
||||
IN: smudge test-delay10.a $S [OK] -- [DELAYED]
|
||||
IN: smudge test-delay11.a $S [OK] -- [DELAYED]
|
||||
IN: smudge test-delay20.a $S [OK] -- [DELAYED]
|
||||
IN: list_available_blobs test-delay10.a test-delay11.a [OK]
|
||||
IN: smudge test-delay10.a 0 [OK] -- OUT: $S . [OK]
|
||||
IN: smudge test-delay11.a 0 [OK] -- OUT: $S . [OK]
|
||||
IN: list_available_blobs test-delay20.a [OK]
|
||||
IN: smudge test-delay20.a 0 [OK] -- OUT: $S . [OK]
|
||||
IN: list_available_blobs [OK]
|
||||
STOP
|
||||
EOF
|
||||
cat >b.exp <<-EOF &&
|
||||
START
|
||||
init handshake complete
|
||||
IN: smudge test-delay10.b $S [OK] -- [DELAYED]
|
||||
IN: list_available_blobs test-delay10.b [OK]
|
||||
IN: smudge test-delay10.b 0 [OK] -- OUT: $S . [OK]
|
||||
IN: list_available_blobs [OK]
|
||||
STOP
|
||||
EOF
|
||||
|
||||
rm -rf repo-cloned &&
|
||||
filter_git clone repo repo-cloned &&
|
||||
test_cmp_count a.exp repo-cloned/a.log &&
|
||||
test_cmp_count b.exp repo-cloned/b.log &&
|
||||
|
||||
(
|
||||
cd repo-cloned &&
|
||||
test_cmp_committed_rot13 "$TEST_ROOT/test.o" test.a &&
|
||||
test_cmp_committed_rot13 "$TEST_ROOT/test.o" test-delay10.a &&
|
||||
test_cmp_committed_rot13 "$TEST_ROOT/test.o" test-delay11.a &&
|
||||
test_cmp_committed_rot13 "$TEST_ROOT/test.o" test-delay20.a &&
|
||||
test_cmp_committed_rot13 "$TEST_ROOT/test.o" test-delay10.b &&
|
||||
|
||||
rm *.a *.b &&
|
||||
filter_git checkout . &&
|
||||
test_cmp_count ../a.exp a.log &&
|
||||
test_cmp_count ../b.exp b.log &&
|
||||
|
||||
test_cmp_committed_rot13 "$TEST_ROOT/test.o" test.a &&
|
||||
test_cmp_committed_rot13 "$TEST_ROOT/test.o" test-delay10.a &&
|
||||
test_cmp_committed_rot13 "$TEST_ROOT/test.o" test-delay11.a &&
|
||||
test_cmp_committed_rot13 "$TEST_ROOT/test.o" test-delay20.a &&
|
||||
test_cmp_committed_rot13 "$TEST_ROOT/test.o" test-delay10.b
|
||||
)
|
||||
'
|
||||
|
||||
test_expect_success PERL 'missing file in delayed checkout' '
|
||||
test_config_global filter.bug.process "rot13-filter.pl bug.log clean smudge delay" &&
|
||||
test_config_global filter.bug.required true &&
|
||||
|
||||
rm -rf repo &&
|
||||
mkdir repo &&
|
||||
(
|
||||
cd repo &&
|
||||
git init &&
|
||||
echo "*.a filter=bug" >.gitattributes &&
|
||||
cp "$TEST_ROOT/test.o" missing-delay.a
|
||||
git add . &&
|
||||
git commit -m "test commit"
|
||||
) &&
|
||||
|
||||
rm -rf repo-cloned &&
|
||||
test_must_fail git clone repo repo-cloned 2>git-stderr.log &&
|
||||
cat git-stderr.log &&
|
||||
grep "error: .missing-delay\.a. was not filtered properly" git-stderr.log
|
||||
'
|
||||
|
||||
test_expect_success PERL 'invalid file in delayed checkout' '
|
||||
test_config_global filter.bug.process "rot13-filter.pl bug.log clean smudge delay" &&
|
||||
test_config_global filter.bug.required true &&
|
||||
|
||||
rm -rf repo &&
|
||||
mkdir repo &&
|
||||
(
|
||||
cd repo &&
|
||||
git init &&
|
||||
echo "*.a filter=bug" >.gitattributes &&
|
||||
cp "$TEST_ROOT/test.o" invalid-delay.a &&
|
||||
cp "$TEST_ROOT/test.o" unfiltered
|
||||
git add . &&
|
||||
git commit -m "test commit"
|
||||
) &&
|
||||
|
||||
rm -rf repo-cloned &&
|
||||
test_must_fail git clone repo repo-cloned 2>git-stderr.log &&
|
||||
grep "error: external filter .* signaled that .unfiltered. is now available although it has not been delayed earlier" git-stderr.log
|
||||
'
|
||||
|
||||
test_done
|
||||
|
|
|
@ -2,8 +2,9 @@
|
|||
# Example implementation for the Git filter protocol version 2
|
||||
# See Documentation/gitattributes.txt, section "Filter Protocol"
|
||||
#
|
||||
# The script takes the list of supported protocol capabilities as
|
||||
# arguments ("clean", "smudge", etc).
|
||||
# The first argument defines a debug log file that the script write to.
|
||||
# All remaining arguments define a list of supported protocol
|
||||
# capabilities ("clean", "smudge", etc).
|
||||
#
|
||||
# This implementation supports special test cases:
|
||||
# (1) If data with the pathname "clean-write-fail.r" is processed with
|
||||
|
@ -17,6 +18,16 @@
|
|||
# operation then the filter signals that it cannot or does not want
|
||||
# to process the file and any file after that is processed with the
|
||||
# same command.
|
||||
# (5) If data with a pathname that is a key in the DELAY hash is
|
||||
# requested (e.g. "test-delay10.a") then the filter responds with
|
||||
# a "delay" status and sets the "requested" field in the DELAY hash.
|
||||
# The filter will signal the availability of this object after
|
||||
# "count" (field in DELAY hash) "list_available_blobs" commands.
|
||||
# (6) If data with the pathname "missing-delay.a" is processed that the
|
||||
# filter will drop the path from the "list_available_blobs" response.
|
||||
# (7) If data with the pathname "invalid-delay.a" is processed that the
|
||||
# filter will add the path "unfiltered" which was not delayed before
|
||||
# to the "list_available_blobs" response.
|
||||
#
|
||||
|
||||
use strict;
|
||||
|
@ -24,9 +35,19 @@ use warnings;
|
|||
use IO::File;
|
||||
|
||||
my $MAX_PACKET_CONTENT_SIZE = 65516;
|
||||
my $log_file = shift @ARGV;
|
||||
my @capabilities = @ARGV;
|
||||
|
||||
open my $debug, ">>", "rot13-filter.log" or die "cannot open log file: $!";
|
||||
open my $debug, ">>", $log_file or die "cannot open log file: $!";
|
||||
|
||||
my %DELAY = (
|
||||
'test-delay10.a' => { "requested" => 0, "count" => 1 },
|
||||
'test-delay11.a' => { "requested" => 0, "count" => 1 },
|
||||
'test-delay20.a' => { "requested" => 0, "count" => 2 },
|
||||
'test-delay10.b' => { "requested" => 0, "count" => 1 },
|
||||
'missing-delay.a' => { "requested" => 0, "count" => 1 },
|
||||
'invalid-delay.a' => { "requested" => 0, "count" => 1 },
|
||||
);
|
||||
|
||||
sub rot13 {
|
||||
my $str = shift;
|
||||
|
@ -64,7 +85,7 @@ sub packet_bin_read {
|
|||
|
||||
sub packet_txt_read {
|
||||
my ( $res, $buf ) = packet_bin_read();
|
||||
unless ( $buf =~ s/\n$// ) {
|
||||
unless ( $buf eq '' or $buf =~ s/\n$// ) {
|
||||
die "A non-binary line MUST be terminated by an LF.";
|
||||
}
|
||||
return ( $res, $buf );
|
||||
|
@ -99,6 +120,7 @@ packet_flush();
|
|||
|
||||
( packet_txt_read() eq ( 0, "capability=clean" ) ) || die "bad capability";
|
||||
( packet_txt_read() eq ( 0, "capability=smudge" ) ) || die "bad capability";
|
||||
( packet_txt_read() eq ( 0, "capability=delay" ) ) || die "bad capability";
|
||||
( packet_bin_read() eq ( 1, "" ) ) || die "bad capability end";
|
||||
|
||||
foreach (@capabilities) {
|
||||
|
@ -109,88 +131,142 @@ print $debug "init handshake complete\n";
|
|||
$debug->flush();
|
||||
|
||||
while (1) {
|
||||
my ($command) = packet_txt_read() =~ /^command=(.+)$/;
|
||||
my ( $command ) = packet_txt_read() =~ /^command=(.+)$/;
|
||||
print $debug "IN: $command";
|
||||
$debug->flush();
|
||||
|
||||
my ($pathname) = packet_txt_read() =~ /^pathname=(.+)$/;
|
||||
print $debug " $pathname";
|
||||
$debug->flush();
|
||||
if ( $command eq "list_available_blobs" ) {
|
||||
# Flush
|
||||
packet_bin_read();
|
||||
|
||||
if ( $pathname eq "" ) {
|
||||
die "bad pathname '$pathname'";
|
||||
}
|
||||
|
||||
# Flush
|
||||
packet_bin_read();
|
||||
|
||||
my $input = "";
|
||||
{
|
||||
binmode(STDIN);
|
||||
my $buffer;
|
||||
my $done = 0;
|
||||
while ( !$done ) {
|
||||
( $done, $buffer ) = packet_bin_read();
|
||||
$input .= $buffer;
|
||||
}
|
||||
print $debug " " . length($input) . " [OK] -- ";
|
||||
$debug->flush();
|
||||
}
|
||||
|
||||
my $output;
|
||||
if ( $pathname eq "error.r" or $pathname eq "abort.r" ) {
|
||||
$output = "";
|
||||
}
|
||||
elsif ( $command eq "clean" and grep( /^clean$/, @capabilities ) ) {
|
||||
$output = rot13($input);
|
||||
}
|
||||
elsif ( $command eq "smudge" and grep( /^smudge$/, @capabilities ) ) {
|
||||
$output = rot13($input);
|
||||
}
|
||||
else {
|
||||
die "bad command '$command'";
|
||||
}
|
||||
|
||||
print $debug "OUT: " . length($output) . " ";
|
||||
$debug->flush();
|
||||
|
||||
if ( $pathname eq "error.r" ) {
|
||||
print $debug "[ERROR]\n";
|
||||
$debug->flush();
|
||||
packet_txt_write("status=error");
|
||||
packet_flush();
|
||||
}
|
||||
elsif ( $pathname eq "abort.r" ) {
|
||||
print $debug "[ABORT]\n";
|
||||
$debug->flush();
|
||||
packet_txt_write("status=abort");
|
||||
packet_flush();
|
||||
}
|
||||
else {
|
||||
packet_txt_write("status=success");
|
||||
packet_flush();
|
||||
|
||||
if ( $pathname eq "${command}-write-fail.r" ) {
|
||||
print $debug "[WRITE FAIL]\n";
|
||||
$debug->flush();
|
||||
die "${command} write error";
|
||||
}
|
||||
|
||||
while ( length($output) > 0 ) {
|
||||
my $packet = substr( $output, 0, $MAX_PACKET_CONTENT_SIZE );
|
||||
packet_bin_write($packet);
|
||||
# dots represent the number of packets
|
||||
print $debug ".";
|
||||
if ( length($output) > $MAX_PACKET_CONTENT_SIZE ) {
|
||||
$output = substr( $output, $MAX_PACKET_CONTENT_SIZE );
|
||||
}
|
||||
else {
|
||||
$output = "";
|
||||
foreach my $pathname ( sort keys %DELAY ) {
|
||||
if ( $DELAY{$pathname}{"requested"} >= 1 ) {
|
||||
$DELAY{$pathname}{"count"} = $DELAY{$pathname}{"count"} - 1;
|
||||
if ( $pathname eq "invalid-delay.a" ) {
|
||||
# Send Git a pathname that was not delayed earlier
|
||||
packet_txt_write("pathname=unfiltered");
|
||||
}
|
||||
if ( $pathname eq "missing-delay.a" ) {
|
||||
# Do not signal Git that this file is available
|
||||
} elsif ( $DELAY{$pathname}{"count"} == 0 ) {
|
||||
print $debug " $pathname";
|
||||
packet_txt_write("pathname=$pathname");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
packet_flush();
|
||||
|
||||
print $debug " [OK]\n";
|
||||
$debug->flush();
|
||||
packet_txt_write("status=success");
|
||||
packet_flush();
|
||||
}
|
||||
else {
|
||||
my ( $pathname ) = packet_txt_read() =~ /^pathname=(.+)$/;
|
||||
print $debug " $pathname";
|
||||
$debug->flush();
|
||||
|
||||
if ( $pathname eq "" ) {
|
||||
die "bad pathname '$pathname'";
|
||||
}
|
||||
|
||||
# Read until flush
|
||||
my ( $done, $buffer ) = packet_txt_read();
|
||||
while ( $buffer ne '' ) {
|
||||
if ( $buffer eq "can-delay=1" ) {
|
||||
if ( exists $DELAY{$pathname} and $DELAY{$pathname}{"requested"} == 0 ) {
|
||||
$DELAY{$pathname}{"requested"} = 1;
|
||||
}
|
||||
} else {
|
||||
die "Unknown message '$buffer'";
|
||||
}
|
||||
|
||||
( $done, $buffer ) = packet_txt_read();
|
||||
}
|
||||
|
||||
my $input = "";
|
||||
{
|
||||
binmode(STDIN);
|
||||
my $buffer;
|
||||
my $done = 0;
|
||||
while ( !$done ) {
|
||||
( $done, $buffer ) = packet_bin_read();
|
||||
$input .= $buffer;
|
||||
}
|
||||
print $debug " " . length($input) . " [OK] -- ";
|
||||
$debug->flush();
|
||||
}
|
||||
|
||||
my $output;
|
||||
if ( exists $DELAY{$pathname} and exists $DELAY{$pathname}{"output"} ) {
|
||||
$output = $DELAY{$pathname}{"output"}
|
||||
}
|
||||
elsif ( $pathname eq "error.r" or $pathname eq "abort.r" ) {
|
||||
$output = "";
|
||||
}
|
||||
elsif ( $command eq "clean" and grep( /^clean$/, @capabilities ) ) {
|
||||
$output = rot13($input);
|
||||
}
|
||||
elsif ( $command eq "smudge" and grep( /^smudge$/, @capabilities ) ) {
|
||||
$output = rot13($input);
|
||||
}
|
||||
else {
|
||||
die "bad command '$command'";
|
||||
}
|
||||
|
||||
if ( $pathname eq "error.r" ) {
|
||||
print $debug "[ERROR]\n";
|
||||
$debug->flush();
|
||||
packet_txt_write("status=error");
|
||||
packet_flush();
|
||||
}
|
||||
elsif ( $pathname eq "abort.r" ) {
|
||||
print $debug "[ABORT]\n";
|
||||
$debug->flush();
|
||||
packet_txt_write("status=abort");
|
||||
packet_flush();
|
||||
}
|
||||
elsif ( $command eq "smudge" and
|
||||
exists $DELAY{$pathname} and
|
||||
$DELAY{$pathname}{"requested"} == 1
|
||||
) {
|
||||
print $debug "[DELAYED]\n";
|
||||
$debug->flush();
|
||||
packet_txt_write("status=delayed");
|
||||
packet_flush();
|
||||
$DELAY{$pathname}{"requested"} = 2;
|
||||
$DELAY{$pathname}{"output"} = $output;
|
||||
}
|
||||
else {
|
||||
packet_txt_write("status=success");
|
||||
packet_flush();
|
||||
|
||||
if ( $pathname eq "${command}-write-fail.r" ) {
|
||||
print $debug "[WRITE FAIL]\n";
|
||||
$debug->flush();
|
||||
die "${command} write error";
|
||||
}
|
||||
|
||||
print $debug "OUT: " . length($output) . " ";
|
||||
$debug->flush();
|
||||
|
||||
while ( length($output) > 0 ) {
|
||||
my $packet = substr( $output, 0, $MAX_PACKET_CONTENT_SIZE );
|
||||
packet_bin_write($packet);
|
||||
# dots represent the number of packets
|
||||
print $debug ".";
|
||||
if ( length($output) > $MAX_PACKET_CONTENT_SIZE ) {
|
||||
$output = substr( $output, $MAX_PACKET_CONTENT_SIZE );
|
||||
}
|
||||
else {
|
||||
$output = "";
|
||||
}
|
||||
}
|
||||
packet_flush();
|
||||
print $debug " [OK]\n";
|
||||
$debug->flush();
|
||||
packet_flush();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -380,6 +380,7 @@ static int check_updates(struct unpack_trees_options *o)
|
|||
if (should_update_submodules() && o->update && !o->dry_run)
|
||||
reload_gitmodules_file(index, &state);
|
||||
|
||||
enable_delayed_checkout(&state);
|
||||
for (i = 0; i < index->cache_nr; i++) {
|
||||
struct cache_entry *ce = index->cache[i];
|
||||
|
||||
|
@ -394,6 +395,7 @@ static int check_updates(struct unpack_trees_options *o)
|
|||
}
|
||||
}
|
||||
}
|
||||
errs |= finish_delayed_checkout(&state);
|
||||
stop_progress(&progress);
|
||||
if (o->update)
|
||||
git_attr_set_direction(GIT_ATTR_CHECKIN, NULL);
|
||||
|
|
Загрузка…
Ссылка в новой задаче