run-command API: have "run_processes_parallel{,_tr2}()" return void
Change the "run_processes_parallel{,_tr2}()" functions to return void,
instead of int. Ever since c553c72eed (run-command: add an
asynchronous parallel child processor, 2015-12-15) they have
unconditionally returned 0.
To get a "real" return value out of this function the caller needs to
get it via the "task_finished_fn" callback, see the example in hook.c
added in 96e7225b31 (hook: add 'run' subcommand, 2021-12-22).
So the "result = " and "if (!result)" code added to "builtin/fetch.c"
d54dea77db (fetch: let --jobs=<n> parallelize --multiple, too,
2019-10-05) has always been redundant, we always took that "if"
path. Likewise the "ret =" in "t/helper/test-run-command.c" added in
be5d88e112 (test-tool run-command: learn to run (parts of) the
testsuite, 2019-10-04) wasn't used, instead we got the return value
from the "if (suite.failed.nr > 0)" block seen in the context.
Subsequent commits will alter this API interface, getting rid of this
always-zero return value makes it easier to understand those changes.
Signed-off-by: Ævar Arnfjörð Bjarmason <avarab@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
This commit is contained in:
committed by
Junio C Hamano
parent
a083f94c21
commit
7dd5762d9f
@ -1953,15 +1953,14 @@ static int fetch_multiple(struct string_list *list, int max_children)
|
||||
struct parallel_fetch_state state = { argv.v, list, 0, 0 };
|
||||
|
||||
strvec_push(&argv, "--end-of-options");
|
||||
result = run_processes_parallel_tr2(max_children,
|
||||
&fetch_next_remote,
|
||||
&fetch_failed_to_start,
|
||||
&fetch_finished,
|
||||
&state,
|
||||
"fetch", "parallel/fetch");
|
||||
run_processes_parallel_tr2(max_children,
|
||||
&fetch_next_remote,
|
||||
&fetch_failed_to_start,
|
||||
&fetch_finished,
|
||||
&state,
|
||||
"fetch", "parallel/fetch");
|
||||
|
||||
if (!result)
|
||||
result = state.result;
|
||||
result = state.result;
|
||||
} else
|
||||
for (i = 0; i < list->nr; i++) {
|
||||
const char *name = list->items[i].string;
|
||||
|
||||
@ -1783,11 +1783,11 @@ static int pp_collect_finished(struct parallel_processes *pp)
|
||||
return result;
|
||||
}
|
||||
|
||||
int run_processes_parallel(int n,
|
||||
get_next_task_fn get_next_task,
|
||||
start_failure_fn start_failure,
|
||||
task_finished_fn task_finished,
|
||||
void *pp_cb)
|
||||
void run_processes_parallel(int n,
|
||||
get_next_task_fn get_next_task,
|
||||
start_failure_fn start_failure,
|
||||
task_finished_fn task_finished,
|
||||
void *pp_cb)
|
||||
{
|
||||
int i, code;
|
||||
int output_timeout = 100;
|
||||
@ -1834,25 +1834,20 @@ int run_processes_parallel(int n,
|
||||
}
|
||||
|
||||
pp_cleanup(&pp);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int run_processes_parallel_tr2(int n, get_next_task_fn get_next_task,
|
||||
start_failure_fn start_failure,
|
||||
task_finished_fn task_finished, void *pp_cb,
|
||||
const char *tr2_category, const char *tr2_label)
|
||||
void run_processes_parallel_tr2(int n, get_next_task_fn get_next_task,
|
||||
start_failure_fn start_failure,
|
||||
task_finished_fn task_finished, void *pp_cb,
|
||||
const char *tr2_category, const char *tr2_label)
|
||||
{
|
||||
int result;
|
||||
|
||||
trace2_region_enter_printf(tr2_category, tr2_label, NULL, "max:%d",
|
||||
((n < 1) ? online_cpus() : n));
|
||||
|
||||
result = run_processes_parallel(n, get_next_task, start_failure,
|
||||
task_finished, pp_cb);
|
||||
run_processes_parallel(n, get_next_task, start_failure,
|
||||
task_finished, pp_cb);
|
||||
|
||||
trace2_region_leave(tr2_category, tr2_label, NULL);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
int run_auto_maintenance(int quiet)
|
||||
|
||||
@ -485,14 +485,14 @@ typedef int (*task_finished_fn)(int result,
|
||||
* API reads that setting.
|
||||
*/
|
||||
extern int run_processes_parallel_ungroup;
|
||||
int run_processes_parallel(int n,
|
||||
get_next_task_fn,
|
||||
start_failure_fn,
|
||||
task_finished_fn,
|
||||
void *pp_cb);
|
||||
int run_processes_parallel_tr2(int n, get_next_task_fn, start_failure_fn,
|
||||
task_finished_fn, void *pp_cb,
|
||||
const char *tr2_category, const char *tr2_label);
|
||||
void run_processes_parallel(int n,
|
||||
get_next_task_fn,
|
||||
start_failure_fn,
|
||||
task_finished_fn,
|
||||
void *pp_cb);
|
||||
void run_processes_parallel_tr2(int n, get_next_task_fn, start_failure_fn,
|
||||
task_finished_fn, void *pp_cb,
|
||||
const char *tr2_category, const char *tr2_label);
|
||||
|
||||
/**
|
||||
* Convenience function which prepares env for a command to be run in a
|
||||
|
||||
@ -192,8 +192,8 @@ static int testsuite(int argc, const char **argv)
|
||||
fprintf(stderr, "Running %"PRIuMAX" tests (%d at a time)\n",
|
||||
(uintmax_t)suite.tests.nr, max_jobs);
|
||||
|
||||
ret = run_processes_parallel(max_jobs, next_test, test_failed,
|
||||
test_finished, &suite);
|
||||
run_processes_parallel(max_jobs, next_test, test_failed,
|
||||
test_finished, &suite);
|
||||
|
||||
if (suite.failed.nr > 0) {
|
||||
ret = 1;
|
||||
@ -428,16 +428,16 @@ int cmd__run_command(int argc, const char **argv)
|
||||
strvec_pushv(&proc.args, (const char **)argv + 3);
|
||||
|
||||
if (!strcmp(argv[1], "run-command-parallel")) {
|
||||
exit(run_processes_parallel(jobs, parallel_next,
|
||||
NULL, NULL, &proc));
|
||||
run_processes_parallel(jobs, parallel_next, NULL, NULL, &proc);
|
||||
} else if (!strcmp(argv[1], "run-command-abort")) {
|
||||
exit(run_processes_parallel(jobs, parallel_next,
|
||||
NULL, task_finished, &proc));
|
||||
run_processes_parallel(jobs, parallel_next, NULL,
|
||||
task_finished, &proc);
|
||||
} else if (!strcmp(argv[1], "run-command-no-jobs")) {
|
||||
exit(run_processes_parallel(jobs, no_job,
|
||||
NULL, task_finished, &proc));
|
||||
run_processes_parallel(jobs, no_job, NULL, task_finished,
|
||||
&proc);
|
||||
} else {
|
||||
fprintf(stderr, "check usage\n");
|
||||
return 1;
|
||||
}
|
||||
exit(0);
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user