Merge branch 'kn/the-repository' into kn/midx-wo-the-repository

* kn/the-repository:
  packfile.c: remove unnecessary prepare_packed_git() call
  midx: add repository to `multi_pack_index` struct
  config: make `packed_git_(limit|window_size)` non-global variables
  config: make `delta_base_cache_limit` a non-global variable
  packfile: pass down repository to `for_each_packed_object`
  packfile: pass down repository to `has_object[_kept]_pack`
  packfile: pass down repository to `odb_pack_name`
  packfile: pass `repository` to static function in the file
  packfile: use `repository` from `packed_git` directly
  packfile: add repository to struct `packed_git`
This commit is contained in:
Junio C Hamano
2024-12-04 10:31:46 +09:00
36 changed files with 275 additions and 190 deletions

View File

@ -827,15 +827,16 @@ static int batch_objects(struct batch_options *opt)
cb.seen = &seen; cb.seen = &seen;
for_each_loose_object(batch_unordered_loose, &cb, 0); for_each_loose_object(batch_unordered_loose, &cb, 0);
for_each_packed_object(batch_unordered_packed, &cb, for_each_packed_object(the_repository, batch_unordered_packed,
FOR_EACH_OBJECT_PACK_ORDER); &cb, FOR_EACH_OBJECT_PACK_ORDER);
oidset_clear(&seen); oidset_clear(&seen);
} else { } else {
struct oid_array sa = OID_ARRAY_INIT; struct oid_array sa = OID_ARRAY_INIT;
for_each_loose_object(collect_loose_object, &sa, 0); for_each_loose_object(collect_loose_object, &sa, 0);
for_each_packed_object(collect_packed_object, &sa, 0); for_each_packed_object(the_repository, collect_packed_object,
&sa, 0);
oid_array_for_each_unique(&sa, batch_object_cb, &cb); oid_array_for_each_unique(&sa, batch_object_cb, &cb);

View File

@ -67,7 +67,7 @@ static int count_loose(const struct object_id *oid, const char *path,
else { else {
loose_size += on_disk_bytes(st); loose_size += on_disk_bytes(st);
loose++; loose++;
if (verbose && has_object_pack(oid)) if (verbose && has_object_pack(the_repository, oid))
packed_loose++; packed_loose++;
} }
return 0; return 0;

View File

@ -765,6 +765,7 @@ static void start_packfile(void)
p->pack_fd = pack_fd; p->pack_fd = pack_fd;
p->do_not_close = 1; p->do_not_close = 1;
p->repo = the_repository;
pack_file = hashfd(pack_fd, p->pack_name); pack_file = hashfd(pack_fd, p->pack_name);
pack_data = p; pack_data = p;
@ -805,7 +806,7 @@ static char *keep_pack(const char *curr_index_name)
struct strbuf name = STRBUF_INIT; struct strbuf name = STRBUF_INIT;
int keep_fd; int keep_fd;
odb_pack_name(&name, pack_data->hash, "keep"); odb_pack_name(pack_data->repo, &name, pack_data->hash, "keep");
keep_fd = odb_pack_keep(name.buf); keep_fd = odb_pack_keep(name.buf);
if (keep_fd < 0) if (keep_fd < 0)
die_errno("cannot create keep file"); die_errno("cannot create keep file");
@ -813,11 +814,11 @@ static char *keep_pack(const char *curr_index_name)
if (close(keep_fd)) if (close(keep_fd))
die_errno("failed to write keep file"); die_errno("failed to write keep file");
odb_pack_name(&name, pack_data->hash, "pack"); odb_pack_name(pack_data->repo, &name, pack_data->hash, "pack");
if (finalize_object_file(pack_data->pack_name, name.buf)) if (finalize_object_file(pack_data->pack_name, name.buf))
die("cannot store pack file"); die("cannot store pack file");
odb_pack_name(&name, pack_data->hash, "idx"); odb_pack_name(pack_data->repo, &name, pack_data->hash, "idx");
if (finalize_object_file(curr_index_name, name.buf)) if (finalize_object_file(curr_index_name, name.buf))
die("cannot store index file"); die("cannot store index file");
free((void *)curr_index_name); free((void *)curr_index_name);
@ -831,7 +832,7 @@ static void unkeep_all_packs(void)
for (k = 0; k < pack_id; k++) { for (k = 0; k < pack_id; k++) {
struct packed_git *p = all_packs[k]; struct packed_git *p = all_packs[k];
odb_pack_name(&name, p->hash, "keep"); odb_pack_name(p->repo, &name, p->hash, "keep");
unlink_or_warn(name.buf); unlink_or_warn(name.buf);
} }
strbuf_release(&name); strbuf_release(&name);
@ -888,7 +889,7 @@ static void end_packfile(void)
idx_name = keep_pack(create_index()); idx_name = keep_pack(create_index());
/* Register the packfile with core git's machinery. */ /* Register the packfile with core git's machinery. */
new_p = add_packed_git(idx_name, strlen(idx_name), 1); new_p = add_packed_git(pack_data->repo, idx_name, strlen(idx_name), 1);
if (!new_p) if (!new_p)
die("core git rejected index %s", idx_name); die("core git rejected index %s", idx_name);
all_packs[pack_id] = new_p; all_packs[pack_id] = new_p;
@ -3538,7 +3539,7 @@ static void parse_argv(void)
int cmd_fast_import(int argc, int cmd_fast_import(int argc,
const char **argv, const char **argv,
const char *prefix, const char *prefix,
struct repository *repo UNUSED) struct repository *repo)
{ {
unsigned int i; unsigned int i;
@ -3659,7 +3660,7 @@ int cmd_fast_import(int argc,
fprintf(stderr, " pools: %10lu KiB\n", (unsigned long)((tree_entry_allocd + fi_mem_pool.pool_alloc) /1024)); fprintf(stderr, " pools: %10lu KiB\n", (unsigned long)((tree_entry_allocd + fi_mem_pool.pool_alloc) /1024));
fprintf(stderr, " objects: %10" PRIuMAX " KiB\n", (alloc_count*sizeof(struct object_entry))/1024); fprintf(stderr, " objects: %10" PRIuMAX " KiB\n", (alloc_count*sizeof(struct object_entry))/1024);
fprintf(stderr, "---------------------------------------------------------------------\n"); fprintf(stderr, "---------------------------------------------------------------------\n");
pack_report(); pack_report(repo);
fprintf(stderr, "---------------------------------------------------------------------\n"); fprintf(stderr, "---------------------------------------------------------------------\n");
fprintf(stderr, "\n"); fprintf(stderr, "\n");
} }

View File

@ -150,7 +150,7 @@ static int mark_object(struct object *obj, enum object_type type,
return 0; return 0;
obj->flags |= REACHABLE; obj->flags |= REACHABLE;
if (is_promisor_object(&obj->oid)) if (is_promisor_object(the_repository, &obj->oid))
/* /*
* Further recursion does not need to be performed on this * Further recursion does not need to be performed on this
* object since it is a promisor object (so it does not need to * object since it is a promisor object (so it does not need to
@ -270,9 +270,9 @@ static void check_reachable_object(struct object *obj)
* do a full fsck * do a full fsck
*/ */
if (!(obj->flags & HAS_OBJ)) { if (!(obj->flags & HAS_OBJ)) {
if (is_promisor_object(&obj->oid)) if (is_promisor_object(the_repository, &obj->oid))
return; return;
if (has_object_pack(&obj->oid)) if (has_object_pack(the_repository, &obj->oid))
return; /* it is in pack - forget about it */ return; /* it is in pack - forget about it */
printf_ln(_("missing %s %s"), printf_ln(_("missing %s %s"),
printable_type(&obj->oid, obj->type), printable_type(&obj->oid, obj->type),
@ -391,7 +391,10 @@ static void check_connectivity(void)
* traversal. * traversal.
*/ */
for_each_loose_object(mark_loose_unreachable_referents, NULL, 0); for_each_loose_object(mark_loose_unreachable_referents, NULL, 0);
for_each_packed_object(mark_packed_unreachable_referents, NULL, 0); for_each_packed_object(the_repository,
mark_packed_unreachable_referents,
NULL,
0);
} }
/* Look up all the requirements, warn about missing objects.. */ /* Look up all the requirements, warn about missing objects.. */
@ -488,7 +491,7 @@ static void fsck_handle_reflog_oid(const char *refname, struct object_id *oid,
refname, timestamp); refname, timestamp);
obj->flags |= USED; obj->flags |= USED;
mark_object_reachable(obj); mark_object_reachable(obj);
} else if (!is_promisor_object(oid)) { } else if (!is_promisor_object(the_repository, oid)) {
error(_("%s: invalid reflog entry %s"), error(_("%s: invalid reflog entry %s"),
refname, oid_to_hex(oid)); refname, oid_to_hex(oid));
errors_found |= ERROR_REACHABLE; errors_found |= ERROR_REACHABLE;
@ -531,7 +534,7 @@ static int fsck_handle_ref(const char *refname, const char *referent UNUSED, con
obj = parse_object(the_repository, oid); obj = parse_object(the_repository, oid);
if (!obj) { if (!obj) {
if (is_promisor_object(oid)) { if (is_promisor_object(the_repository, oid)) {
/* /*
* Increment default_refs anyway, because this is a * Increment default_refs anyway, because this is a
* valid ref. * valid ref.
@ -966,7 +969,8 @@ int cmd_fsck(int argc,
if (connectivity_only) { if (connectivity_only) {
for_each_loose_object(mark_loose_for_connectivity, NULL, 0); for_each_loose_object(mark_loose_for_connectivity, NULL, 0);
for_each_packed_object(mark_packed_for_connectivity, NULL, 0); for_each_packed_object(the_repository,
mark_packed_for_connectivity, NULL, 0);
} else { } else {
prepare_alt_odb(the_repository); prepare_alt_odb(the_repository);
for (odb = the_repository->objects->odb; odb; odb = odb->next) for (odb = the_repository->objects->odb; odb; odb = odb->next)
@ -1011,7 +1015,7 @@ int cmd_fsck(int argc,
&oid); &oid);
if (!obj || !(obj->flags & HAS_OBJ)) { if (!obj || !(obj->flags & HAS_OBJ)) {
if (is_promisor_object(&oid)) if (is_promisor_object(the_repository, &oid))
continue; continue;
error(_("%s: object missing"), oid_to_hex(&oid)); error(_("%s: object missing"), oid_to_hex(&oid));
errors_found |= ERROR_OBJECT; errors_found |= ERROR_OBJECT;

View File

@ -138,6 +138,11 @@ struct gc_config {
char *repack_filter_to; char *repack_filter_to;
unsigned long big_pack_threshold; unsigned long big_pack_threshold;
unsigned long max_delta_cache_size; unsigned long max_delta_cache_size;
/*
* Remove this member from gc_config once repo_settings is passed
* through the callchain.
*/
size_t delta_base_cache_limit;
}; };
#define GC_CONFIG_INIT { \ #define GC_CONFIG_INIT { \
@ -153,6 +158,7 @@ struct gc_config {
.prune_expire = xstrdup("2.weeks.ago"), \ .prune_expire = xstrdup("2.weeks.ago"), \
.prune_worktrees_expire = xstrdup("3.months.ago"), \ .prune_worktrees_expire = xstrdup("3.months.ago"), \
.max_delta_cache_size = DEFAULT_DELTA_CACHE_SIZE, \ .max_delta_cache_size = DEFAULT_DELTA_CACHE_SIZE, \
.delta_base_cache_limit = DEFAULT_DELTA_BASE_CACHE_LIMIT, \
} }
static void gc_config_release(struct gc_config *cfg) static void gc_config_release(struct gc_config *cfg)
@ -168,6 +174,7 @@ static void gc_config(struct gc_config *cfg)
{ {
const char *value; const char *value;
char *owned = NULL; char *owned = NULL;
unsigned long ulongval;
if (!git_config_get_value("gc.packrefs", &value)) { if (!git_config_get_value("gc.packrefs", &value)) {
if (value && !strcmp(value, "notbare")) if (value && !strcmp(value, "notbare"))
@ -206,6 +213,9 @@ static void gc_config(struct gc_config *cfg)
git_config_get_ulong("gc.bigpackthreshold", &cfg->big_pack_threshold); git_config_get_ulong("gc.bigpackthreshold", &cfg->big_pack_threshold);
git_config_get_ulong("pack.deltacachesize", &cfg->max_delta_cache_size); git_config_get_ulong("pack.deltacachesize", &cfg->max_delta_cache_size);
if (!git_config_get_ulong("core.deltabasecachelimit", &ulongval))
cfg->delta_base_cache_limit = ulongval;
if (!git_config_get_string("gc.repackfilter", &owned)) { if (!git_config_get_string("gc.repackfilter", &owned)) {
free(cfg->repack_filter); free(cfg->repack_filter);
cfg->repack_filter = owned; cfg->repack_filter = owned;
@ -416,7 +426,7 @@ static uint64_t estimate_repack_memory(struct gc_config *cfg,
* read_sha1_file() (either at delta calculation phase, or * read_sha1_file() (either at delta calculation phase, or
* writing phase) also fills up the delta base cache * writing phase) also fills up the delta base cache
*/ */
heap += delta_base_cache_limit; heap += cfg->delta_base_cache_limit;
/* and of course pack-objects has its own delta cache */ /* and of course pack-objects has its own delta cache */
heap += cfg->max_delta_cache_size; heap += cfg->max_delta_cache_size;

View File

@ -1291,7 +1291,7 @@ static void parse_pack_objects(unsigned char *hash)
* recursively checking if the resulting object is used as a base * recursively checking if the resulting object is used as a base
* for some more deltas. * for some more deltas.
*/ */
static void resolve_deltas(void) static void resolve_deltas(struct pack_idx_option *opts)
{ {
int i; int i;
@ -1307,7 +1307,7 @@ static void resolve_deltas(void)
nr_ref_deltas + nr_ofs_deltas); nr_ref_deltas + nr_ofs_deltas);
nr_dispatched = 0; nr_dispatched = 0;
base_cache_limit = delta_base_cache_limit * nr_threads; base_cache_limit = opts->delta_base_cache_limit * nr_threads;
if (nr_threads > 1 || getenv("GIT_FORCE_THREADS")) { if (nr_threads > 1 || getenv("GIT_FORCE_THREADS")) {
init_thread(); init_thread();
work_lock(); work_lock();
@ -1532,7 +1532,7 @@ static void write_special_file(const char *suffix, const char *msg,
if (pack_name) if (pack_name)
filename = derive_filename(pack_name, "pack", suffix, &name_buf); filename = derive_filename(pack_name, "pack", suffix, &name_buf);
else else
filename = odb_pack_name(&name_buf, hash, suffix); filename = odb_pack_name(the_repository, &name_buf, hash, suffix);
fd = odb_pack_keep(filename); fd = odb_pack_keep(filename);
if (fd < 0) { if (fd < 0) {
@ -1560,7 +1560,7 @@ static void rename_tmp_packfile(const char **final_name,
{ {
if (!*final_name || strcmp(*final_name, curr_name)) { if (!*final_name || strcmp(*final_name, curr_name)) {
if (!*final_name) if (!*final_name)
*final_name = odb_pack_name(name, hash, ext); *final_name = odb_pack_name(the_repository, name, hash, ext);
if (finalize_object_file(curr_name, *final_name)) if (finalize_object_file(curr_name, *final_name))
die(_("unable to rename temporary '*.%s' file to '%s'"), die(_("unable to rename temporary '*.%s' file to '%s'"),
ext, *final_name); ext, *final_name);
@ -1605,7 +1605,8 @@ static void final(const char *final_pack_name, const char *curr_pack_name,
if (do_fsck_object) { if (do_fsck_object) {
struct packed_git *p; struct packed_git *p;
p = add_packed_git(final_index_name, strlen(final_index_name), 0); p = add_packed_git(the_repository, final_index_name,
strlen(final_index_name), 0);
if (p) if (p)
install_packed_git(the_repository, p); install_packed_git(the_repository, p);
} }
@ -1656,6 +1657,10 @@ static int git_index_pack_config(const char *k, const char *v,
else else
opts->flags &= ~WRITE_REV; opts->flags &= ~WRITE_REV;
} }
if (!strcmp(k, "core.deltabasecachelimit")) {
opts->delta_base_cache_limit = git_config_ulong(k, v, ctx->kvi);
return 0;
}
return git_default_config(k, v, ctx, cb); return git_default_config(k, v, ctx, cb);
} }
@ -1703,7 +1708,8 @@ static void read_v2_anomalous_offsets(struct packed_git *p,
static void read_idx_option(struct pack_idx_option *opts, const char *pack_name) static void read_idx_option(struct pack_idx_option *opts, const char *pack_name)
{ {
struct packed_git *p = add_packed_git(pack_name, strlen(pack_name), 1); struct packed_git *p = add_packed_git(the_repository, pack_name,
strlen(pack_name), 1);
if (!p) if (!p)
die(_("Cannot open existing pack file '%s'"), pack_name); die(_("Cannot open existing pack file '%s'"), pack_name);
@ -2033,7 +2039,7 @@ int cmd_index_pack(int argc,
parse_pack_objects(pack_hash); parse_pack_objects(pack_hash);
if (report_end_of_input) if (report_end_of_input)
write_in_full(2, "\0", 1); write_in_full(2, "\0", 1);
resolve_deltas(); resolve_deltas(&opts);
conclude_pack(fix_thin_pack, curr_pack, pack_hash); conclude_pack(fix_thin_pack, curr_pack, pack_hash);
free(ofs_deltas); free(ofs_deltas);
free(ref_deltas); free(ref_deltas);

View File

@ -1530,7 +1530,7 @@ static int want_found_object(const struct object_id *oid, int exclude,
return 0; return 0;
if (ignore_packed_keep_in_core && p->pack_keep_in_core) if (ignore_packed_keep_in_core && p->pack_keep_in_core)
return 0; return 0;
if (has_object_kept_pack(oid, flags)) if (has_object_kept_pack(p->repo, oid, flags))
return 0; return 0;
} }
@ -3628,7 +3628,7 @@ static void show_cruft_commit(struct commit *commit, void *data)
static int cruft_include_check_obj(struct object *obj, void *data UNUSED) static int cruft_include_check_obj(struct object *obj, void *data UNUSED)
{ {
return !has_object_kept_pack(&obj->oid, IN_CORE_KEEP_PACKS); return !has_object_kept_pack(to_pack.repo, &obj->oid, IN_CORE_KEEP_PACKS);
} }
static int cruft_include_check(struct commit *commit, void *data) static int cruft_include_check(struct commit *commit, void *data)
@ -3859,7 +3859,8 @@ static void show_object__ma_allow_promisor(struct object *obj, const char *name,
* Quietly ignore EXPECTED missing objects. This avoids problems with * Quietly ignore EXPECTED missing objects. This avoids problems with
* staging them now and getting an odd error later. * staging them now and getting an odd error later.
*/ */
if (!has_object(the_repository, &obj->oid, 0) && is_promisor_object(&obj->oid)) if (!has_object(the_repository, &obj->oid, 0) &&
is_promisor_object(to_pack.repo, &obj->oid))
return; return;
show_object(obj, name, data); show_object(obj, name, data);
@ -3928,7 +3929,9 @@ static int add_object_in_unpacked_pack(const struct object_id *oid,
static void add_objects_in_unpacked_packs(void) static void add_objects_in_unpacked_packs(void)
{ {
if (for_each_packed_object(add_object_in_unpacked_pack, NULL, if (for_each_packed_object(to_pack.repo,
add_object_in_unpacked_pack,
NULL,
FOR_EACH_OBJECT_PACK_ORDER | FOR_EACH_OBJECT_PACK_ORDER |
FOR_EACH_OBJECT_LOCAL_ONLY | FOR_EACH_OBJECT_LOCAL_ONLY |
FOR_EACH_OBJECT_SKIP_IN_CORE_KEPT_PACKS | FOR_EACH_OBJECT_SKIP_IN_CORE_KEPT_PACKS |

View File

@ -690,7 +690,7 @@ int cmd_pack_redundant(int argc, const char **argv, const char *prefix UNUSED, s
pl = red = pack_list_difference(local_packs, min); pl = red = pack_list_difference(local_packs, min);
while (pl) { while (pl) {
printf("%s\n%s\n", printf("%s\n%s\n",
odb_pack_name(&idx_name, pl->pack->hash, "idx"), odb_pack_name(pl->pack->repo, &idx_name, pl->pack->hash, "idx"),
pl->pack->pack_name); pl->pack->pack_name);
pl = pl->next; pl = pl->next;
} }

View File

@ -404,7 +404,7 @@ static void repack_promisor_objects(const struct pack_objects_args *args,
* {type -> existing pack order} ordering when computing deltas instead * {type -> existing pack order} ordering when computing deltas instead
* of a {type -> size} ordering, which may produce better deltas. * of a {type -> size} ordering, which may produce better deltas.
*/ */
for_each_packed_object(write_oid, &cmd, for_each_packed_object(the_repository, write_oid, &cmd,
FOR_EACH_OBJECT_PROMISOR_ONLY); FOR_EACH_OBJECT_PROMISOR_ONLY);
if (cmd.in == -1) { if (cmd.in == -1) {

View File

@ -121,7 +121,7 @@ static inline void finish_object__ma(struct object *obj)
return; return;
case MA_ALLOW_PROMISOR: case MA_ALLOW_PROMISOR:
if (is_promisor_object(&obj->oid)) if (is_promisor_object(the_repository, &obj->oid))
return; return;
die("unexpected missing %s object '%s'", die("unexpected missing %s object '%s'",
type_name(obj->type), oid_to_hex(&obj->oid)); type_name(obj->type), oid_to_hex(&obj->oid));

View File

@ -1914,7 +1914,7 @@ static int fill_oids_from_packs(struct write_commit_graph_context *ctx,
struct packed_git *p; struct packed_git *p;
strbuf_setlen(&packname, dirlen); strbuf_setlen(&packname, dirlen);
strbuf_addstr(&packname, pack_indexes->items[i].string); strbuf_addstr(&packname, pack_indexes->items[i].string);
p = add_packed_git(packname.buf, packname.len, 1); p = add_packed_git(ctx->r, packname.buf, packname.len, 1);
if (!p) { if (!p) {
ret = error(_("error adding pack %s"), packname.buf); ret = error(_("error adding pack %s"), packname.buf);
goto cleanup; goto cleanup;
@ -1960,7 +1960,7 @@ static void fill_oids_from_all_packs(struct write_commit_graph_context *ctx)
ctx->progress = start_delayed_progress( ctx->progress = start_delayed_progress(
_("Finding commits for commit graph among packed objects"), _("Finding commits for commit graph among packed objects"),
ctx->approx_nr_objects); ctx->approx_nr_objects);
for_each_packed_object(add_packed_commits, ctx, for_each_packed_object(ctx->r, add_packed_commits, ctx,
FOR_EACH_OBJECT_PACK_ORDER); FOR_EACH_OBJECT_PACK_ORDER);
if (ctx->progress_done < ctx->approx_nr_objects) if (ctx->progress_done < ctx->approx_nr_objects)
display_progress(ctx->progress, ctx->approx_nr_objects); display_progress(ctx->progress, ctx->approx_nr_objects);

View File

@ -1493,33 +1493,11 @@ static int git_default_core_config(const char *var, const char *value,
return 0; return 0;
} }
if (!strcmp(var, "core.packedgitwindowsize")) {
int pgsz_x2 = getpagesize() * 2;
packed_git_window_size = git_config_ulong(var, value, ctx->kvi);
/* This value must be multiple of (pagesize * 2) */
packed_git_window_size /= pgsz_x2;
if (packed_git_window_size < 1)
packed_git_window_size = 1;
packed_git_window_size *= pgsz_x2;
return 0;
}
if (!strcmp(var, "core.bigfilethreshold")) { if (!strcmp(var, "core.bigfilethreshold")) {
big_file_threshold = git_config_ulong(var, value, ctx->kvi); big_file_threshold = git_config_ulong(var, value, ctx->kvi);
return 0; return 0;
} }
if (!strcmp(var, "core.packedgitlimit")) {
packed_git_limit = git_config_ulong(var, value, ctx->kvi);
return 0;
}
if (!strcmp(var, "core.deltabasecachelimit")) {
delta_base_cache_limit = git_config_ulong(var, value, ctx->kvi);
return 0;
}
if (!strcmp(var, "core.autocrlf")) { if (!strcmp(var, "core.autocrlf")) {
if (value && !strcasecmp(value, "input")) { if (value && !strcasecmp(value, "input")) {
auto_crlf = AUTO_CRLF_INPUT; auto_crlf = AUTO_CRLF_INPUT;

View File

@ -54,7 +54,8 @@ int check_connected(oid_iterate_fn fn, void *cb_data,
strbuf_add(&idx_file, transport->pack_lockfiles.items[0].string, strbuf_add(&idx_file, transport->pack_lockfiles.items[0].string,
base_len); base_len);
strbuf_addstr(&idx_file, ".idx"); strbuf_addstr(&idx_file, ".idx");
new_pack = add_packed_git(idx_file.buf, idx_file.len, 1); new_pack = add_packed_git(the_repository, idx_file.buf,
idx_file.len, 1);
strbuf_release(&idx_file); strbuf_release(&idx_file);
} }

3
diff.c
View File

@ -4041,7 +4041,8 @@ static int reuse_worktree_file(struct index_state *istate,
* objects however would tend to be slower as they need * objects however would tend to be slower as they need
* to be individually opened and inflated. * to be individually opened and inflated.
*/ */
if (!FAST_WORKING_DIRECTORY && !want_file && has_object_pack(oid)) if (!FAST_WORKING_DIRECTORY && !want_file &&
has_object_pack(istate->repo, oid))
return 0; return 0;
/* /*

View File

@ -49,9 +49,6 @@ int fsync_object_files = -1;
int use_fsync = -1; int use_fsync = -1;
enum fsync_method fsync_method = FSYNC_METHOD_DEFAULT; enum fsync_method fsync_method = FSYNC_METHOD_DEFAULT;
enum fsync_component fsync_components = FSYNC_COMPONENTS_DEFAULT; enum fsync_component fsync_components = FSYNC_COMPONENTS_DEFAULT;
size_t packed_git_window_size = DEFAULT_PACKED_GIT_WINDOW_SIZE;
size_t packed_git_limit = DEFAULT_PACKED_GIT_LIMIT;
size_t delta_base_cache_limit = 96 * 1024 * 1024;
unsigned long big_file_threshold = 512 * 1024 * 1024; unsigned long big_file_threshold = 512 * 1024 * 1024;
char *editor_program; char *editor_program;
char *askpass_program; char *askpass_program;

View File

@ -165,7 +165,6 @@ extern int zlib_compression_level;
extern int pack_compression_level; extern int pack_compression_level;
extern size_t packed_git_window_size; extern size_t packed_git_window_size;
extern size_t packed_git_limit; extern size_t packed_git_limit;
extern size_t delta_base_cache_limit;
extern unsigned long big_file_threshold; extern unsigned long big_file_threshold;
extern unsigned long pack_size_limit_cfg; extern unsigned long pack_size_limit_cfg;
extern int max_allowed_tree_depth; extern int max_allowed_tree_depth;

2
fsck.c
View File

@ -1295,7 +1295,7 @@ static int fsck_blobs(struct oidset *blobs_found, struct oidset *blobs_done,
buf = repo_read_object_file(the_repository, oid, &type, &size); buf = repo_read_object_file(the_repository, oid, &type, &size);
if (!buf) { if (!buf) {
if (is_promisor_object(oid)) if (is_promisor_object(the_repository, oid))
continue; continue;
ret |= report(options, ret |= report(options,
oid, OBJ_BLOB, msg_missing, oid, OBJ_BLOB, msg_missing,

4
http.c
View File

@ -2439,7 +2439,7 @@ static int fetch_and_setup_pack_index(struct packed_git **packs_head,
if (!tmp_idx) if (!tmp_idx)
return -1; return -1;
new_pack = parse_pack_index(sha1, tmp_idx); new_pack = parse_pack_index(the_repository, sha1, tmp_idx);
if (!new_pack) { if (!new_pack) {
unlink(tmp_idx); unlink(tmp_idx);
free(tmp_idx); free(tmp_idx);
@ -2581,7 +2581,7 @@ struct http_pack_request *new_direct_http_pack_request(
preq->url = url; preq->url = url;
odb_pack_name(&preq->tmpfile, packed_git_hash, "pack"); odb_pack_name(the_repository, &preq->tmpfile, packed_git_hash, "pack");
strbuf_addstr(&preq->tmpfile, ".temp"); strbuf_addstr(&preq->tmpfile, ".temp");
preq->packfile = fopen(preq->tmpfile.buf, "a"); preq->packfile = fopen(preq->tmpfile.buf, "a");
if (!preq->packfile) { if (!preq->packfile) {

View File

@ -41,7 +41,8 @@ static void show_object(struct traversal_context *ctx,
{ {
if (!ctx->show_object) if (!ctx->show_object)
return; return;
if (ctx->revs->unpacked && has_object_pack(&object->oid)) if (ctx->revs->unpacked && has_object_pack(ctx->revs->repo,
&object->oid))
return; return;
ctx->show_object(object, name, ctx->show_data); ctx->show_object(object, name, ctx->show_data);
@ -74,7 +75,7 @@ static void process_blob(struct traversal_context *ctx,
*/ */
if (ctx->revs->exclude_promisor_objects && if (ctx->revs->exclude_promisor_objects &&
!repo_has_object_file(the_repository, &obj->oid) && !repo_has_object_file(the_repository, &obj->oid) &&
is_promisor_object(&obj->oid)) is_promisor_object(ctx->revs->repo, &obj->oid))
return; return;
pathlen = path->len; pathlen = path->len;
@ -179,7 +180,7 @@ static void process_tree(struct traversal_context *ctx,
* an incomplete list of missing objects. * an incomplete list of missing objects.
*/ */
if (revs->exclude_promisor_objects && if (revs->exclude_promisor_objects &&
is_promisor_object(&obj->oid)) is_promisor_object(revs->repo, &obj->oid))
return; return;
if (!revs->do_not_die_on_missing_objects) if (!revs->do_not_die_on_missing_objects)

View File

@ -154,7 +154,7 @@ static void add_pack_to_midx(const char *full_path, size_t full_path_len,
return; return;
ALLOC_GROW(ctx->info, ctx->nr + 1, ctx->alloc); ALLOC_GROW(ctx->info, ctx->nr + 1, ctx->alloc);
p = add_packed_git(full_path, full_path_len, 0); p = add_packed_git(the_repository, full_path, full_path_len, 0);
if (!p) { if (!p) {
warning(_("failed to add packfile '%s'"), warning(_("failed to add packfile '%s'"),
full_path); full_path);

3
midx.c
View File

@ -131,6 +131,7 @@ static struct multi_pack_index *load_multi_pack_index_one(const char *object_dir
m->data = midx_map; m->data = midx_map;
m->data_len = midx_size; m->data_len = midx_size;
m->local = local; m->local = local;
m->repo = the_repository;
m->signature = get_be32(m->data); m->signature = get_be32(m->data);
if (m->signature != MIDX_SIGNATURE) if (m->signature != MIDX_SIGNATURE)
@ -464,7 +465,7 @@ int prepare_midx_pack(struct repository *r, struct multi_pack_index *m,
strhash(key.buf), key.buf, strhash(key.buf), key.buf,
struct packed_git, packmap_ent); struct packed_git, packmap_ent);
if (!p) { if (!p) {
p = add_packed_git(pack_name.buf, pack_name.len, m->local); p = add_packed_git(r, pack_name.buf, pack_name.len, m->local);
if (p) { if (p) {
install_packed_git(r, p); install_packed_git(r, p);
list_add_tail(&p->mru, &r->objects->packed_git_mru); list_add_tail(&p->mru, &r->objects->packed_git_mru);

3
midx.h
View File

@ -71,6 +71,9 @@ struct multi_pack_index {
const char **pack_names; const char **pack_names;
struct packed_git **packs; struct packed_git **packs;
struct repository *repo;
char object_dir[FLEX_ARRAY]; char object_dir[FLEX_ARRAY];
}; };

View File

@ -10,6 +10,7 @@
struct oidmap; struct oidmap;
struct oidtree; struct oidtree;
struct strbuf; struct strbuf;
struct repository;
struct object_directory { struct object_directory {
struct object_directory *next; struct object_directory *next;
@ -135,6 +136,10 @@ struct packed_git {
*/ */
const uint32_t *mtimes_map; const uint32_t *mtimes_map;
size_t mtimes_size; size_t mtimes_size;
/* repo denotes the repository this packfile belongs to */
struct repository *repo;
/* something like ".git/objects/pack/xxxxx.pack" */ /* something like ".git/objects/pack/xxxxx.pack" */
char pack_name[FLEX_ARRAY]; /* more */ char pack_name[FLEX_ARRAY]; /* more */
}; };
@ -545,7 +550,7 @@ typedef int each_packed_object_fn(const struct object_id *oid,
int for_each_object_in_pack(struct packed_git *p, int for_each_object_in_pack(struct packed_git *p,
each_packed_object_fn, void *data, each_packed_object_fn, void *data,
enum for_each_object_flags flags); enum for_each_object_flags flags);
int for_each_packed_object(each_packed_object_fn, void *, int for_each_packed_object(struct repository *repo, each_packed_object_fn cb,
enum for_each_object_flags flags); void *data, enum for_each_object_flags flags);
#endif /* OBJECT_STORE_LL_H */ #endif /* OBJECT_STORE_LL_H */

View File

@ -1,5 +1,3 @@
#define USE_THE_REPOSITORY_VARIABLE
#include "git-compat-util.h" #include "git-compat-util.h"
#include "commit.h" #include "commit.h"
#include "gettext.h" #include "gettext.h"
@ -177,12 +175,21 @@ static uint32_t bitmap_num_objects(struct bitmap_index *index)
return index->pack->num_objects; return index->pack->num_objects;
} }
static struct repository *bitmap_repo(struct bitmap_index *bitmap_git)
{
if (bitmap_is_midx(bitmap_git))
return bitmap_git->midx->repo;
return bitmap_git->pack->repo;
}
static int load_bitmap_header(struct bitmap_index *index) static int load_bitmap_header(struct bitmap_index *index)
{ {
struct bitmap_disk_header *header = (void *)index->map; struct bitmap_disk_header *header = (void *)index->map;
size_t header_size = sizeof(*header) - GIT_MAX_RAWSZ + the_hash_algo->rawsz; const struct git_hash_algo *hash_algo = bitmap_repo(index)->hash_algo;
if (index->map_size < header_size + the_hash_algo->rawsz) size_t header_size = sizeof(*header) - GIT_MAX_RAWSZ + hash_algo->rawsz;
if (index->map_size < header_size + hash_algo->rawsz)
return error(_("corrupted bitmap index (too small)")); return error(_("corrupted bitmap index (too small)"));
if (memcmp(header->magic, BITMAP_IDX_SIGNATURE, sizeof(BITMAP_IDX_SIGNATURE)) != 0) if (memcmp(header->magic, BITMAP_IDX_SIGNATURE, sizeof(BITMAP_IDX_SIGNATURE)) != 0)
@ -196,7 +203,7 @@ static int load_bitmap_header(struct bitmap_index *index)
{ {
uint32_t flags = ntohs(header->options); uint32_t flags = ntohs(header->options);
size_t cache_size = st_mult(bitmap_num_objects(index), sizeof(uint32_t)); size_t cache_size = st_mult(bitmap_num_objects(index), sizeof(uint32_t));
unsigned char *index_end = index->map + index->map_size - the_hash_algo->rawsz; unsigned char *index_end = index->map + index->map_size - hash_algo->rawsz;
if ((flags & BITMAP_OPT_FULL_DAG) == 0) if ((flags & BITMAP_OPT_FULL_DAG) == 0)
BUG("unsupported options for bitmap index file " BUG("unsupported options for bitmap index file "
@ -409,7 +416,7 @@ static int open_midx_bitmap_1(struct bitmap_index *bitmap_git,
if (bitmap_git->pack || bitmap_git->midx) { if (bitmap_git->pack || bitmap_git->midx) {
struct strbuf buf = STRBUF_INIT; struct strbuf buf = STRBUF_INIT;
get_midx_filename(&buf, midx->object_dir); get_midx_filename(&buf, midx->object_dir);
trace2_data_string("bitmap", the_repository, trace2_data_string("bitmap", bitmap_repo(bitmap_git),
"ignoring extra midx bitmap file", buf.buf); "ignoring extra midx bitmap file", buf.buf);
close(fd); close(fd);
strbuf_release(&buf); strbuf_release(&buf);
@ -427,7 +434,7 @@ static int open_midx_bitmap_1(struct bitmap_index *bitmap_git,
goto cleanup; goto cleanup;
if (!hasheq(get_midx_checksum(bitmap_git->midx), bitmap_git->checksum, if (!hasheq(get_midx_checksum(bitmap_git->midx), bitmap_git->checksum,
the_repository->hash_algo)) { bitmap_repo(bitmap_git)->hash_algo)) {
error(_("checksum doesn't match in MIDX and bitmap")); error(_("checksum doesn't match in MIDX and bitmap"));
goto cleanup; goto cleanup;
} }
@ -438,7 +445,9 @@ static int open_midx_bitmap_1(struct bitmap_index *bitmap_git,
} }
for (i = 0; i < bitmap_git->midx->num_packs; i++) { for (i = 0; i < bitmap_git->midx->num_packs; i++) {
if (prepare_midx_pack(the_repository, bitmap_git->midx, i)) { if (prepare_midx_pack(bitmap_repo(bitmap_git),
bitmap_git->midx,
i)) {
warning(_("could not open pack %s"), warning(_("could not open pack %s"),
bitmap_git->midx->pack_names[i]); bitmap_git->midx->pack_names[i]);
goto cleanup; goto cleanup;
@ -492,8 +501,9 @@ static int open_pack_bitmap_1(struct bitmap_index *bitmap_git, struct packed_git
} }
if (bitmap_git->pack || bitmap_git->midx) { if (bitmap_git->pack || bitmap_git->midx) {
trace2_data_string("bitmap", the_repository, trace2_data_string("bitmap", bitmap_repo(bitmap_git),
"ignoring extra bitmap file", packfile->pack_name); "ignoring extra bitmap file",
packfile->pack_name);
close(fd); close(fd);
return -1; return -1;
} }
@ -518,8 +528,8 @@ static int open_pack_bitmap_1(struct bitmap_index *bitmap_git, struct packed_git
return -1; return -1;
} }
trace2_data_string("bitmap", the_repository, "opened bitmap file", trace2_data_string("bitmap", bitmap_repo(bitmap_git),
packfile->pack_name); "opened bitmap file", packfile->pack_name);
return 0; return 0;
} }
@ -649,7 +659,7 @@ struct bitmap_index *prepare_bitmap_git(struct repository *r)
struct bitmap_index *prepare_midx_bitmap_git(struct multi_pack_index *midx) struct bitmap_index *prepare_midx_bitmap_git(struct multi_pack_index *midx)
{ {
struct repository *r = the_repository; struct repository *r = midx->repo;
struct bitmap_index *bitmap_git = xcalloc(1, sizeof(*bitmap_git)); struct bitmap_index *bitmap_git = xcalloc(1, sizeof(*bitmap_git));
if (!open_midx_bitmap_1(bitmap_git, midx) && !load_bitmap(r, bitmap_git)) if (!open_midx_bitmap_1(bitmap_git, midx) && !load_bitmap(r, bitmap_git))
@ -1213,6 +1223,7 @@ static struct bitmap *find_boundary_objects(struct bitmap_index *bitmap_git,
{ {
struct bitmap_boundary_cb cb; struct bitmap_boundary_cb cb;
struct object_list *root; struct object_list *root;
struct repository *repo;
unsigned int i; unsigned int i;
unsigned int tmp_blobs, tmp_trees, tmp_tags; unsigned int tmp_blobs, tmp_trees, tmp_tags;
int any_missing = 0; int any_missing = 0;
@ -1222,6 +1233,8 @@ static struct bitmap *find_boundary_objects(struct bitmap_index *bitmap_git,
cb.base = bitmap_new(); cb.base = bitmap_new();
object_array_init(&cb.boundary); object_array_init(&cb.boundary);
repo = bitmap_repo(bitmap_git);
revs->ignore_missing_links = 1; revs->ignore_missing_links = 1;
if (bitmap_git->pseudo_merges.nr) { if (bitmap_git->pseudo_merges.nr) {
@ -1280,19 +1293,19 @@ static struct bitmap *find_boundary_objects(struct bitmap_index *bitmap_git,
* revision walk to (a) OR in any bitmaps that are UNINTERESTING * revision walk to (a) OR in any bitmaps that are UNINTERESTING
* between the tips and boundary, and (b) record the boundary. * between the tips and boundary, and (b) record the boundary.
*/ */
trace2_region_enter("pack-bitmap", "boundary-prepare", the_repository); trace2_region_enter("pack-bitmap", "boundary-prepare", repo);
if (prepare_revision_walk(revs)) if (prepare_revision_walk(revs))
die("revision walk setup failed"); die("revision walk setup failed");
trace2_region_leave("pack-bitmap", "boundary-prepare", the_repository); trace2_region_leave("pack-bitmap", "boundary-prepare", repo);
trace2_region_enter("pack-bitmap", "boundary-traverse", the_repository); trace2_region_enter("pack-bitmap", "boundary-traverse", repo);
revs->boundary = 1; revs->boundary = 1;
traverse_commit_list_filtered(revs, traverse_commit_list_filtered(revs,
show_boundary_commit, show_boundary_commit,
show_boundary_object, show_boundary_object,
&cb, NULL); &cb, NULL);
revs->boundary = 0; revs->boundary = 0;
trace2_region_leave("pack-bitmap", "boundary-traverse", the_repository); trace2_region_leave("pack-bitmap", "boundary-traverse", repo);
revs->blob_objects = tmp_blobs; revs->blob_objects = tmp_blobs;
revs->tree_objects = tmp_trees; revs->tree_objects = tmp_trees;
@ -1304,7 +1317,7 @@ static struct bitmap *find_boundary_objects(struct bitmap_index *bitmap_git,
/* /*
* Then add the boundary commit(s) as fill-in traversal tips. * Then add the boundary commit(s) as fill-in traversal tips.
*/ */
trace2_region_enter("pack-bitmap", "boundary-fill-in", the_repository); trace2_region_enter("pack-bitmap", "boundary-fill-in", repo);
for (i = 0; i < cb.boundary.nr; i++) { for (i = 0; i < cb.boundary.nr; i++) {
struct object *obj = cb.boundary.objects[i].item; struct object *obj = cb.boundary.objects[i].item;
if (bitmap_walk_contains(bitmap_git, cb.base, &obj->oid)) if (bitmap_walk_contains(bitmap_git, cb.base, &obj->oid))
@ -1314,7 +1327,7 @@ static struct bitmap *find_boundary_objects(struct bitmap_index *bitmap_git,
} }
if (revs->pending.nr) if (revs->pending.nr)
cb.base = fill_in_bitmap(bitmap_git, revs, cb.base, NULL); cb.base = fill_in_bitmap(bitmap_git, revs, cb.base, NULL);
trace2_region_leave("pack-bitmap", "boundary-fill-in", the_repository); trace2_region_leave("pack-bitmap", "boundary-fill-in", repo);
cleanup: cleanup:
object_array_clear(&cb.boundary); object_array_clear(&cb.boundary);
@ -1718,7 +1731,8 @@ static unsigned long get_size_by_pos(struct bitmap_index *bitmap_git,
ofs = pack_pos_to_offset(pack, pos); ofs = pack_pos_to_offset(pack, pos);
} }
if (packed_object_info(the_repository, pack, ofs, &oi) < 0) { if (packed_object_info(bitmap_repo(bitmap_git), pack, ofs,
&oi) < 0) {
struct object_id oid; struct object_id oid;
nth_bitmap_object_oid(bitmap_git, &oid, nth_bitmap_object_oid(bitmap_git, &oid,
pack_pos_to_index(pack, pos)); pack_pos_to_index(pack, pos));
@ -1727,7 +1741,8 @@ static unsigned long get_size_by_pos(struct bitmap_index *bitmap_git,
} else { } else {
struct eindex *eindex = &bitmap_git->ext_index; struct eindex *eindex = &bitmap_git->ext_index;
struct object *obj = eindex->objects[pos - bitmap_num_objects(bitmap_git)]; struct object *obj = eindex->objects[pos - bitmap_num_objects(bitmap_git)];
if (oid_object_info_extended(the_repository, &obj->oid, &oi, 0) < 0) if (oid_object_info_extended(bitmap_repo(bitmap_git), &obj->oid,
&oi, 0) < 0)
die(_("unable to get size of %s"), oid_to_hex(&obj->oid)); die(_("unable to get size of %s"), oid_to_hex(&obj->oid));
} }
@ -1889,7 +1904,8 @@ static void filter_packed_objects_from_bitmap(struct bitmap_index *bitmap_git,
bitmap_unset(result, i); bitmap_unset(result, i);
for (i = 0; i < eindex->count; ++i) { for (i = 0; i < eindex->count; ++i) {
if (has_object_pack(&eindex->objects[i]->oid)) if (has_object_pack(bitmap_repo(bitmap_git),
&eindex->objects[i]->oid))
bitmap_unset(result, objects_nr + i); bitmap_unset(result, objects_nr + i);
} }
} }
@ -1907,6 +1923,7 @@ struct bitmap_index *prepare_bitmap_walk(struct rev_info *revs,
struct bitmap *haves_bitmap = NULL; struct bitmap *haves_bitmap = NULL;
struct bitmap_index *bitmap_git; struct bitmap_index *bitmap_git;
struct repository *repo;
/* /*
* We can't do pathspec limiting with bitmaps, because we don't know * We can't do pathspec limiting with bitmaps, because we don't know
@ -1980,18 +1997,20 @@ struct bitmap_index *prepare_bitmap_walk(struct rev_info *revs,
if (!use_boundary_traversal) if (!use_boundary_traversal)
object_array_clear(&revs->pending); object_array_clear(&revs->pending);
repo = bitmap_repo(bitmap_git);
if (haves) { if (haves) {
if (use_boundary_traversal) { if (use_boundary_traversal) {
trace2_region_enter("pack-bitmap", "haves/boundary", the_repository); trace2_region_enter("pack-bitmap", "haves/boundary", repo);
haves_bitmap = find_boundary_objects(bitmap_git, revs, haves); haves_bitmap = find_boundary_objects(bitmap_git, revs, haves);
trace2_region_leave("pack-bitmap", "haves/boundary", the_repository); trace2_region_leave("pack-bitmap", "haves/boundary", repo);
} else { } else {
trace2_region_enter("pack-bitmap", "haves/classic", the_repository); trace2_region_enter("pack-bitmap", "haves/classic", repo);
revs->ignore_missing_links = 1; revs->ignore_missing_links = 1;
haves_bitmap = find_objects(bitmap_git, revs, haves, NULL); haves_bitmap = find_objects(bitmap_git, revs, haves, NULL);
reset_revision_walk(); reset_revision_walk();
revs->ignore_missing_links = 0; revs->ignore_missing_links = 0;
trace2_region_leave("pack-bitmap", "haves/classic", the_repository); trace2_region_leave("pack-bitmap", "haves/classic", repo);
} }
if (!haves_bitmap) if (!haves_bitmap)
@ -2025,17 +2044,17 @@ struct bitmap_index *prepare_bitmap_walk(struct rev_info *revs,
object_list_free(&wants); object_list_free(&wants);
object_list_free(&haves); object_list_free(&haves);
trace2_data_intmax("bitmap", the_repository, "pseudo_merges_satisfied", trace2_data_intmax("bitmap", repo, "pseudo_merges_satisfied",
pseudo_merges_satisfied_nr); pseudo_merges_satisfied_nr);
trace2_data_intmax("bitmap", the_repository, "pseudo_merges_cascades", trace2_data_intmax("bitmap", repo, "pseudo_merges_cascades",
pseudo_merges_cascades_nr); pseudo_merges_cascades_nr);
trace2_data_intmax("bitmap", the_repository, "bitmap/hits", trace2_data_intmax("bitmap", repo, "bitmap/hits",
existing_bitmaps_hits_nr); existing_bitmaps_hits_nr);
trace2_data_intmax("bitmap", the_repository, "bitmap/misses", trace2_data_intmax("bitmap", repo, "bitmap/misses",
existing_bitmaps_misses_nr); existing_bitmaps_misses_nr);
trace2_data_intmax("bitmap", the_repository, "bitmap/roots_with_bitmap", trace2_data_intmax("bitmap", repo, "bitmap/roots_with_bitmap",
roots_with_bitmaps_nr); roots_with_bitmaps_nr);
trace2_data_intmax("bitmap", the_repository, "bitmap/roots_without_bitmap", trace2_data_intmax("bitmap", repo, "bitmap/roots_without_bitmap",
roots_without_bitmaps_nr); roots_without_bitmaps_nr);
return bitmap_git; return bitmap_git;
@ -2256,7 +2275,7 @@ void reuse_partial_packfile_from_bitmap(struct bitmap_index *bitmap_git,
struct bitmap **reuse_out, struct bitmap **reuse_out,
int multi_pack_reuse) int multi_pack_reuse)
{ {
struct repository *r = the_repository; struct repository *r = bitmap_repo(bitmap_git);
struct bitmapped_pack *packs = NULL; struct bitmapped_pack *packs = NULL;
struct bitmap *result = bitmap_git->result; struct bitmap *result = bitmap_git->result;
struct bitmap *reuse; struct bitmap *reuse;
@ -2792,7 +2811,7 @@ int rebuild_bitmap(const uint32_t *reposition,
uint32_t *create_bitmap_mapping(struct bitmap_index *bitmap_git, uint32_t *create_bitmap_mapping(struct bitmap_index *bitmap_git,
struct packing_data *mapping) struct packing_data *mapping)
{ {
struct repository *r = the_repository; struct repository *r = bitmap_repo(bitmap_git);
uint32_t i, num_objects; uint32_t i, num_objects;
uint32_t *reposition; uint32_t *reposition;
@ -2948,7 +2967,8 @@ static off_t get_disk_usage_for_extended(struct bitmap_index *bitmap_git)
st_add(bitmap_num_objects(bitmap_git), i))) st_add(bitmap_num_objects(bitmap_git), i)))
continue; continue;
if (oid_object_info_extended(the_repository, &obj->oid, &oi, 0) < 0) if (oid_object_info_extended(bitmap_repo(bitmap_git), &obj->oid,
&oi, 0) < 0)
die(_("unable to get disk usage of '%s'"), die(_("unable to get disk usage of '%s'"),
oid_to_hex(&obj->oid)); oid_to_hex(&obj->oid));

View File

@ -7,7 +7,8 @@
struct repository; struct repository;
#define DEFAULT_DELTA_CACHE_SIZE (256 * 1024 * 1024) #define DEFAULT_DELTA_CACHE_SIZE (256 * 1024 * 1024)
#define DEFAULT_DELTA_BASE_CACHE_LIMIT (96 * 1024 * 1024)
#define OE_DFS_STATE_BITS 2 #define OE_DFS_STATE_BITS 2
#define OE_DEPTH_BITS 12 #define OE_DEPTH_BITS 12

View File

@ -21,6 +21,7 @@ void reset_pack_idx_option(struct pack_idx_option *opts)
memset(opts, 0, sizeof(*opts)); memset(opts, 0, sizeof(*opts));
opts->version = 2; opts->version = 2;
opts->off32_limit = 0x7fffffff; opts->off32_limit = 0x7fffffff;
opts->delta_base_cache_limit = DEFAULT_DELTA_BASE_CACHE_LIMIT;
} }
static int sha1_compare(const void *_a, const void *_b) static int sha1_compare(const void *_a, const void *_b)

2
pack.h
View File

@ -58,6 +58,8 @@ struct pack_idx_option {
*/ */
int anomaly_alloc, anomaly_nr; int anomaly_alloc, anomaly_nr;
uint32_t *anomaly; uint32_t *anomaly;
size_t delta_base_cache_limit;
}; };
void reset_pack_idx_option(struct pack_idx_option *); void reset_pack_idx_option(struct pack_idx_option *);

View File

@ -1,4 +1,3 @@
#define USE_THE_REPOSITORY_VARIABLE
#include "git-compat-util.h" #include "git-compat-util.h"
#include "environment.h" #include "environment.h"
@ -25,13 +24,12 @@
#include "pack-revindex.h" #include "pack-revindex.h"
#include "promisor-remote.h" #include "promisor-remote.h"
char *odb_pack_name(struct strbuf *buf, char *odb_pack_name(struct repository *r, struct strbuf *buf,
const unsigned char *hash, const unsigned char *hash, const char *ext)
const char *ext)
{ {
strbuf_reset(buf); strbuf_reset(buf);
strbuf_addf(buf, "%s/pack/pack-%s.%s", repo_get_object_directory(the_repository), strbuf_addf(buf, "%s/pack/pack-%s.%s", repo_get_object_directory(r),
hash_to_hex(hash), ext); hash_to_hex_algop(hash, r->hash_algo), ext);
return buf->buf; return buf->buf;
} }
@ -47,15 +45,15 @@ static size_t pack_mapped;
#define SZ_FMT PRIuMAX #define SZ_FMT PRIuMAX
static inline uintmax_t sz_fmt(size_t s) { return s; } static inline uintmax_t sz_fmt(size_t s) { return s; }
void pack_report(void) void pack_report(struct repository *repo)
{ {
fprintf(stderr, fprintf(stderr,
"pack_report: getpagesize() = %10" SZ_FMT "\n" "pack_report: getpagesize() = %10" SZ_FMT "\n"
"pack_report: core.packedGitWindowSize = %10" SZ_FMT "\n" "pack_report: core.packedGitWindowSize = %10" SZ_FMT "\n"
"pack_report: core.packedGitLimit = %10" SZ_FMT "\n", "pack_report: core.packedGitLimit = %10" SZ_FMT "\n",
sz_fmt(getpagesize()), sz_fmt(getpagesize()),
sz_fmt(packed_git_window_size), sz_fmt(repo->settings.packed_git_window_size),
sz_fmt(packed_git_limit)); sz_fmt(repo->settings.packed_git_limit));
fprintf(stderr, fprintf(stderr,
"pack_report: pack_used_ctr = %10u\n" "pack_report: pack_used_ctr = %10u\n"
"pack_report: pack_mmap_calls = %10u\n" "pack_report: pack_mmap_calls = %10u\n"
@ -79,7 +77,7 @@ static int check_packed_git_idx(const char *path, struct packed_git *p)
size_t idx_size; size_t idx_size;
int fd = git_open(path), ret; int fd = git_open(path), ret;
struct stat st; struct stat st;
const unsigned int hashsz = the_hash_algo->rawsz; const unsigned int hashsz = p->repo->hash_algo->rawsz;
if (fd < 0) if (fd < 0)
return -1; return -1;
@ -217,11 +215,12 @@ uint32_t get_pack_fanout(struct packed_git *p, uint32_t value)
return ntohl(level1_ofs[value]); return ntohl(level1_ofs[value]);
} }
static struct packed_git *alloc_packed_git(int extra) static struct packed_git *alloc_packed_git(struct repository *r, int extra)
{ {
struct packed_git *p = xmalloc(st_add(sizeof(*p), extra)); struct packed_git *p = xmalloc(st_add(sizeof(*p), extra));
memset(p, 0, sizeof(*p)); memset(p, 0, sizeof(*p));
p->pack_fd = -1; p->pack_fd = -1;
p->repo = r;
return p; return p;
} }
@ -233,15 +232,16 @@ static char *pack_path_from_idx(const char *idx_path)
return xstrfmt("%.*s.pack", (int)len, idx_path); return xstrfmt("%.*s.pack", (int)len, idx_path);
} }
struct packed_git *parse_pack_index(unsigned char *sha1, const char *idx_path) struct packed_git *parse_pack_index(struct repository *r, unsigned char *sha1,
const char *idx_path)
{ {
char *path = pack_path_from_idx(idx_path); char *path = pack_path_from_idx(idx_path);
size_t alloc = st_add(strlen(path), 1); size_t alloc = st_add(strlen(path), 1);
struct packed_git *p = alloc_packed_git(alloc); struct packed_git *p = alloc_packed_git(r, alloc);
memcpy(p->pack_name, path, alloc); /* includes NUL */ memcpy(p->pack_name, path, alloc); /* includes NUL */
free(path); free(path);
hashcpy(p->hash, sha1, the_repository->hash_algo); hashcpy(p->hash, sha1, p->repo->hash_algo);
if (check_packed_git_idx(idx_path, p)) { if (check_packed_git_idx(idx_path, p)) {
free(p); free(p);
return NULL; return NULL;
@ -276,7 +276,7 @@ static int unuse_one_window(struct packed_git *current)
if (current) if (current)
scan_windows(current, &lru_p, &lru_w, &lru_l); scan_windows(current, &lru_p, &lru_w, &lru_l);
for (p = the_repository->objects->packed_git; p; p = p->next) for (p = current->repo->objects->packed_git; p; p = p->next)
scan_windows(p, &lru_p, &lru_w, &lru_l); scan_windows(p, &lru_p, &lru_w, &lru_l);
if (lru_p) { if (lru_p) {
munmap(lru_w->base, lru_w->len); munmap(lru_w->base, lru_w->len);
@ -458,13 +458,13 @@ static void find_lru_pack(struct packed_git *p, struct packed_git **lru_p, struc
*accept_windows_inuse = has_windows_inuse; *accept_windows_inuse = has_windows_inuse;
} }
static int close_one_pack(void) static int close_one_pack(struct repository *r)
{ {
struct packed_git *p, *lru_p = NULL; struct packed_git *p, *lru_p = NULL;
struct pack_window *mru_w = NULL; struct pack_window *mru_w = NULL;
int accept_windows_inuse = 1; int accept_windows_inuse = 1;
for (p = the_repository->objects->packed_git; p; p = p->next) { for (p = r->objects->packed_git; p; p = p->next) {
if (p->pack_fd == -1) if (p->pack_fd == -1)
continue; continue;
find_lru_pack(p, &lru_p, &mru_w, &accept_windows_inuse); find_lru_pack(p, &lru_p, &mru_w, &accept_windows_inuse);
@ -538,7 +538,7 @@ static int open_packed_git_1(struct packed_git *p)
unsigned char hash[GIT_MAX_RAWSZ]; unsigned char hash[GIT_MAX_RAWSZ];
unsigned char *idx_hash; unsigned char *idx_hash;
ssize_t read_result; ssize_t read_result;
const unsigned hashsz = the_hash_algo->rawsz; const unsigned hashsz = p->repo->hash_algo->rawsz;
if (open_pack_index(p)) if (open_pack_index(p))
return error("packfile %s index unavailable", p->pack_name); return error("packfile %s index unavailable", p->pack_name);
@ -553,7 +553,7 @@ static int open_packed_git_1(struct packed_git *p)
pack_max_fds = 1; pack_max_fds = 1;
} }
while (pack_max_fds <= pack_open_fds && close_one_pack()) while (pack_max_fds <= pack_open_fds && close_one_pack(p->repo))
; /* nothing */ ; /* nothing */
p->pack_fd = git_open(p->pack_name); p->pack_fd = git_open(p->pack_name);
@ -595,7 +595,7 @@ static int open_packed_git_1(struct packed_git *p)
if (read_result != hashsz) if (read_result != hashsz)
return error("packfile %s signature is unavailable", p->pack_name); return error("packfile %s signature is unavailable", p->pack_name);
idx_hash = ((unsigned char *)p->index_data) + p->index_size - hashsz * 2; idx_hash = ((unsigned char *)p->index_data) + p->index_size - hashsz * 2;
if (!hasheq(hash, idx_hash, the_repository->hash_algo)) if (!hasheq(hash, idx_hash, p->repo->hash_algo))
return error("packfile %s does not match index", p->pack_name); return error("packfile %s does not match index", p->pack_name);
return 0; return 0;
} }
@ -608,7 +608,8 @@ static int open_packed_git(struct packed_git *p)
return -1; return -1;
} }
static int in_window(struct pack_window *win, off_t offset) static int in_window(struct repository *r, struct pack_window *win,
off_t offset)
{ {
/* We must promise at least one full hash after the /* We must promise at least one full hash after the
* offset is available from this window, otherwise the offset * offset is available from this window, otherwise the offset
@ -618,7 +619,7 @@ static int in_window(struct pack_window *win, off_t offset)
*/ */
off_t win_off = win->offset; off_t win_off = win->offset;
return win_off <= offset return win_off <= offset
&& (offset + the_hash_algo->rawsz) <= (win_off + win->len); && (offset + r->hash_algo->rawsz) <= (win_off + win->len);
} }
unsigned char *use_pack(struct packed_git *p, unsigned char *use_pack(struct packed_git *p,
@ -635,21 +636,28 @@ unsigned char *use_pack(struct packed_git *p,
*/ */
if (!p->pack_size && p->pack_fd == -1 && open_packed_git(p)) if (!p->pack_size && p->pack_fd == -1 && open_packed_git(p))
die("packfile %s cannot be accessed", p->pack_name); die("packfile %s cannot be accessed", p->pack_name);
if (offset > (p->pack_size - the_hash_algo->rawsz)) if (offset > (p->pack_size - p->repo->hash_algo->rawsz))
die("offset beyond end of packfile (truncated pack?)"); die("offset beyond end of packfile (truncated pack?)");
if (offset < 0) if (offset < 0)
die(_("offset before end of packfile (broken .idx?)")); die(_("offset before end of packfile (broken .idx?)"));
if (!win || !in_window(win, offset)) { if (!win || !in_window(p->repo, win, offset)) {
if (win) if (win)
win->inuse_cnt--; win->inuse_cnt--;
for (win = p->windows; win; win = win->next) { for (win = p->windows; win; win = win->next) {
if (in_window(win, offset)) if (in_window(p->repo, win, offset))
break; break;
} }
if (!win) { if (!win) {
size_t window_align = packed_git_window_size / 2; size_t window_align;
off_t len; off_t len;
struct repo_settings *settings;
/* lazy load the settings in case it hasn't been setup */
prepare_repo_settings(p->repo);
settings = &p->repo->settings;
window_align = settings->packed_git_window_size / 2;
if (p->pack_fd == -1 && open_packed_git(p)) if (p->pack_fd == -1 && open_packed_git(p))
die("packfile %s cannot be accessed", p->pack_name); die("packfile %s cannot be accessed", p->pack_name);
@ -657,11 +665,12 @@ unsigned char *use_pack(struct packed_git *p,
CALLOC_ARRAY(win, 1); CALLOC_ARRAY(win, 1);
win->offset = (offset / window_align) * window_align; win->offset = (offset / window_align) * window_align;
len = p->pack_size - win->offset; len = p->pack_size - win->offset;
if (len > packed_git_window_size) if (len > settings->packed_git_window_size)
len = packed_git_window_size; len = settings->packed_git_window_size;
win->len = (size_t)len; win->len = (size_t)len;
pack_mapped += win->len; pack_mapped += win->len;
while (packed_git_limit < pack_mapped
while (settings->packed_git_limit < pack_mapped
&& unuse_one_window(p)) && unuse_one_window(p))
; /* nothing */ ; /* nothing */
win->base = xmmap_gently(NULL, win->len, win->base = xmmap_gently(NULL, win->len,
@ -703,11 +712,13 @@ void unuse_pack(struct pack_window **w_cursor)
} }
} }
struct packed_git *add_packed_git(const char *path, size_t path_len, int local) struct packed_git *add_packed_git(struct repository *r, const char *path,
size_t path_len, int local)
{ {
struct stat st; struct stat st;
size_t alloc; size_t alloc;
struct packed_git *p; struct packed_git *p;
struct object_id oid;
/* /*
* Make sure a corresponding .pack file exists and that * Make sure a corresponding .pack file exists and that
@ -721,7 +732,7 @@ struct packed_git *add_packed_git(const char *path, size_t path_len, int local)
* the use xsnprintf double-checks that) * the use xsnprintf double-checks that)
*/ */
alloc = st_add3(path_len, strlen(".promisor"), 1); alloc = st_add3(path_len, strlen(".promisor"), 1);
p = alloc_packed_git(alloc); p = alloc_packed_git(r, alloc);
memcpy(p->pack_name, path, path_len); memcpy(p->pack_name, path, path_len);
xsnprintf(p->pack_name + path_len, alloc - path_len, ".keep"); xsnprintf(p->pack_name + path_len, alloc - path_len, ".keep");
@ -748,9 +759,13 @@ struct packed_git *add_packed_git(const char *path, size_t path_len, int local)
p->pack_size = st.st_size; p->pack_size = st.st_size;
p->pack_local = local; p->pack_local = local;
p->mtime = st.st_mtime; p->mtime = st.st_mtime;
if (path_len < the_hash_algo->hexsz || if (path_len < r->hash_algo->hexsz ||
get_hash_hex(path + path_len - the_hash_algo->hexsz, p->hash)) get_oid_hex_algop(path + path_len - r->hash_algo->hexsz, &oid,
hashclr(p->hash, the_repository->hash_algo); r->hash_algo))
hashclr(p->hash, r->hash_algo);
else
hashcpy(p->hash, oid.hash, r->hash_algo);
return p; return p;
} }
@ -877,7 +892,7 @@ static void prepare_pack(const char *full_name, size_t full_name_len,
/* Don't reopen a pack we already have. */ /* Don't reopen a pack we already have. */
if (!hashmap_get(&data->r->objects->pack_map, &hent, pack_name)) { if (!hashmap_get(&data->r->objects->pack_map, &hent, pack_name)) {
p = add_packed_git(full_name, full_name_len, data->local); p = add_packed_git(data->r, full_name, full_name_len, data->local);
if (p) if (p)
install_packed_git(data->r, p); install_packed_git(data->r, p);
} }
@ -1240,9 +1255,9 @@ off_t get_delta_base(struct packed_git *p,
} else if (type == OBJ_REF_DELTA) { } else if (type == OBJ_REF_DELTA) {
/* The base entry _must_ be in the same pack */ /* The base entry _must_ be in the same pack */
struct object_id oid; struct object_id oid;
oidread(&oid, base_info, the_repository->hash_algo); oidread(&oid, base_info, p->repo->hash_algo);
base_offset = find_pack_entry_one(&oid, p); base_offset = find_pack_entry_one(&oid, p);
*curpos += the_hash_algo->rawsz; *curpos += p->repo->hash_algo->rawsz;
} else } else
die("I am totally screwed"); die("I am totally screwed");
return base_offset; return base_offset;
@ -1263,7 +1278,7 @@ static int get_delta_base_oid(struct packed_git *p,
{ {
if (type == OBJ_REF_DELTA) { if (type == OBJ_REF_DELTA) {
unsigned char *base = use_pack(p, w_curs, curpos, NULL); unsigned char *base = use_pack(p, w_curs, curpos, NULL);
oidread(oid, base, the_repository->hash_algo); oidread(oid, base, p->repo->hash_algo);
return 0; return 0;
} else if (type == OBJ_OFS_DELTA) { } else if (type == OBJ_OFS_DELTA) {
uint32_t base_pos; uint32_t base_pos;
@ -1488,7 +1503,9 @@ void clear_delta_base_cache(void)
} }
static void add_delta_base_cache(struct packed_git *p, off_t base_offset, static void add_delta_base_cache(struct packed_git *p, off_t base_offset,
void *base, unsigned long base_size, enum object_type type) void *base, unsigned long base_size,
unsigned long delta_base_cache_limit,
enum object_type type)
{ {
struct delta_base_cache_entry *ent; struct delta_base_cache_entry *ent;
struct list_head *lru, *tmp; struct list_head *lru, *tmp;
@ -1605,7 +1622,7 @@ int packed_object_info(struct repository *r, struct packed_git *p,
goto out; goto out;
} }
} else } else
oidclr(oi->delta_base_oid, the_repository->hash_algo); oidclr(oi->delta_base_oid, p->repo->hash_algo);
} }
oi->whence = in_delta_base_cache(p, obj_offset) ? OI_DBCACHED : oi->whence = in_delta_base_cache(p, obj_offset) ? OI_DBCACHED :
@ -1690,6 +1707,8 @@ void *unpack_entry(struct repository *r, struct packed_git *p, off_t obj_offset,
int delta_stack_nr = 0, delta_stack_alloc = UNPACK_ENTRY_STACK_PREALLOC; int delta_stack_nr = 0, delta_stack_alloc = UNPACK_ENTRY_STACK_PREALLOC;
int base_from_cache = 0; int base_from_cache = 0;
prepare_repo_settings(p->repo);
write_pack_access_log(p, obj_offset); write_pack_access_log(p, obj_offset);
/* PHASE 1: drill down to the innermost base object */ /* PHASE 1: drill down to the innermost base object */
@ -1870,7 +1889,9 @@ void *unpack_entry(struct repository *r, struct packed_git *p, off_t obj_offset,
* before we are done using it. * before we are done using it.
*/ */
if (!external_base) if (!external_base)
add_delta_base_cache(p, base_obj_offset, base, base_size, type); add_delta_base_cache(p, base_obj_offset, base, base_size,
p->repo->settings.delta_base_cache_limit,
type);
free(delta_data); free(delta_data);
free(external_base); free(external_base);
@ -1894,7 +1915,7 @@ int bsearch_pack(const struct object_id *oid, const struct packed_git *p, uint32
{ {
const unsigned char *index_fanout = p->index_data; const unsigned char *index_fanout = p->index_data;
const unsigned char *index_lookup; const unsigned char *index_lookup;
const unsigned int hashsz = the_hash_algo->rawsz; const unsigned int hashsz = p->repo->hash_algo->rawsz;
int index_lookup_width; int index_lookup_width;
if (!index_fanout) if (!index_fanout)
@ -1919,7 +1940,7 @@ int nth_packed_object_id(struct object_id *oid,
uint32_t n) uint32_t n)
{ {
const unsigned char *index = p->index_data; const unsigned char *index = p->index_data;
const unsigned int hashsz = the_hash_algo->rawsz; const unsigned int hashsz = p->repo->hash_algo->rawsz;
if (!index) { if (!index) {
if (open_pack_index(p)) if (open_pack_index(p))
return -1; return -1;
@ -1930,11 +1951,10 @@ int nth_packed_object_id(struct object_id *oid,
index += 4 * 256; index += 4 * 256;
if (p->index_version == 1) { if (p->index_version == 1) {
oidread(oid, index + st_add(st_mult(hashsz + 4, n), 4), oidread(oid, index + st_add(st_mult(hashsz + 4, n), 4),
the_repository->hash_algo); p->repo->hash_algo);
} else { } else {
index += 8; index += 8;
oidread(oid, index + st_mult(hashsz, n), oidread(oid, index + st_mult(hashsz, n), p->repo->hash_algo);
the_repository->hash_algo);
} }
return 0; return 0;
} }
@ -1956,7 +1976,7 @@ void check_pack_index_ptr(const struct packed_git *p, const void *vptr)
off_t nth_packed_object_offset(const struct packed_git *p, uint32_t n) off_t nth_packed_object_offset(const struct packed_git *p, uint32_t n)
{ {
const unsigned char *index = p->index_data; const unsigned char *index = p->index_data;
const unsigned int hashsz = the_hash_algo->rawsz; const unsigned int hashsz = p->repo->hash_algo->rawsz;
index += 4 * 256; index += 4 * 256;
if (p->index_version == 1) { if (p->index_version == 1) {
return ntohl(*((uint32_t *)(index + st_mult(hashsz + 4, n)))); return ntohl(*((uint32_t *)(index + st_mult(hashsz + 4, n))));
@ -2136,16 +2156,17 @@ int find_kept_pack_entry(struct repository *r,
return 0; return 0;
} }
int has_object_pack(const struct object_id *oid) int has_object_pack(struct repository *r, const struct object_id *oid)
{ {
struct pack_entry e; struct pack_entry e;
return find_pack_entry(the_repository, oid, &e); return find_pack_entry(r, oid, &e);
} }
int has_object_kept_pack(const struct object_id *oid, unsigned flags) int has_object_kept_pack(struct repository *r, const struct object_id *oid,
unsigned flags)
{ {
struct pack_entry e; struct pack_entry e;
return find_kept_pack_entry(the_repository, oid, flags, &e); return find_kept_pack_entry(r, oid, flags, &e);
} }
int for_each_object_in_pack(struct packed_git *p, int for_each_object_in_pack(struct packed_git *p,
@ -2156,7 +2177,7 @@ int for_each_object_in_pack(struct packed_git *p,
int r = 0; int r = 0;
if (flags & FOR_EACH_OBJECT_PACK_ORDER) { if (flags & FOR_EACH_OBJECT_PACK_ORDER) {
if (load_pack_revindex(the_repository, p)) if (load_pack_revindex(p->repo, p))
return -1; return -1;
} }
@ -2192,15 +2213,14 @@ int for_each_object_in_pack(struct packed_git *p,
return r; return r;
} }
int for_each_packed_object(each_packed_object_fn cb, void *data, int for_each_packed_object(struct repository *repo, each_packed_object_fn cb,
enum for_each_object_flags flags) void *data, enum for_each_object_flags flags)
{ {
struct packed_git *p; struct packed_git *p;
int r = 0; int r = 0;
int pack_errors = 0; int pack_errors = 0;
prepare_packed_git(the_repository); for (p = get_all_packs(repo); p; p = p->next) {
for (p = get_all_packs(the_repository); p; p = p->next) {
if ((flags & FOR_EACH_OBJECT_LOCAL_ONLY) && !p->pack_local) if ((flags & FOR_EACH_OBJECT_LOCAL_ONLY) && !p->pack_local)
continue; continue;
if ((flags & FOR_EACH_OBJECT_PROMISOR_ONLY) && if ((flags & FOR_EACH_OBJECT_PROMISOR_ONLY) &&
@ -2224,7 +2244,7 @@ int for_each_packed_object(each_packed_object_fn cb, void *data,
} }
static int add_promisor_object(const struct object_id *oid, static int add_promisor_object(const struct object_id *oid,
struct packed_git *pack UNUSED, struct packed_git *pack,
uint32_t pos UNUSED, uint32_t pos UNUSED,
void *set_) void *set_)
{ {
@ -2232,12 +2252,12 @@ static int add_promisor_object(const struct object_id *oid,
struct object *obj; struct object *obj;
int we_parsed_object; int we_parsed_object;
obj = lookup_object(the_repository, oid); obj = lookup_object(pack->repo, oid);
if (obj && obj->parsed) { if (obj && obj->parsed) {
we_parsed_object = 0; we_parsed_object = 0;
} else { } else {
we_parsed_object = 1; we_parsed_object = 1;
obj = parse_object(the_repository, oid); obj = parse_object(pack->repo, oid);
} }
if (!obj) if (!obj)
@ -2278,14 +2298,14 @@ static int add_promisor_object(const struct object_id *oid,
return 0; return 0;
} }
int is_promisor_object(const struct object_id *oid) int is_promisor_object(struct repository *r, const struct object_id *oid)
{ {
static struct oidset promisor_objects; static struct oidset promisor_objects;
static int promisor_objects_prepared; static int promisor_objects_prepared;
if (!promisor_objects_prepared) { if (!promisor_objects_prepared) {
if (repo_has_promisor_remote(the_repository)) { if (repo_has_promisor_remote(r)) {
for_each_packed_object(add_promisor_object, for_each_packed_object(r, add_promisor_object,
&promisor_objects, &promisor_objects,
FOR_EACH_OBJECT_PROMISOR_ONLY | FOR_EACH_OBJECT_PROMISOR_ONLY |
FOR_EACH_OBJECT_PACK_ORDER); FOR_EACH_OBJECT_PACK_ORDER);

View File

@ -29,7 +29,8 @@ struct pack_entry {
* *
* Example: odb_pack_name(out, sha1, "idx") => ".git/objects/pack/pack-1234..idx" * Example: odb_pack_name(out, sha1, "idx") => ".git/objects/pack/pack-1234..idx"
*/ */
char *odb_pack_name(struct strbuf *buf, const unsigned char *sha1, const char *ext); char *odb_pack_name(struct repository *r, struct strbuf *buf,
const unsigned char *hash, const char *ext);
/* /*
* Return the basename of the packfile, omitting any containing directory * Return the basename of the packfile, omitting any containing directory
@ -46,7 +47,8 @@ const char *pack_basename(struct packed_git *p);
* and does not add the resulting packed_git struct to the internal list of * and does not add the resulting packed_git struct to the internal list of
* packs. You probably want add_packed_git() instead. * packs. You probably want add_packed_git() instead.
*/ */
struct packed_git *parse_pack_index(unsigned char *sha1, const char *idx_path); struct packed_git *parse_pack_index(struct repository *r, unsigned char *sha1,
const char *idx_path);
typedef void each_file_in_pack_dir_fn(const char *full_path, size_t full_path_len, typedef void each_file_in_pack_dir_fn(const char *full_path, size_t full_path_len,
const char *file_name, void *data); const char *file_name, void *data);
@ -87,7 +89,7 @@ unsigned long repo_approximate_object_count(struct repository *r);
struct packed_git *find_oid_pack(const struct object_id *oid, struct packed_git *find_oid_pack(const struct object_id *oid,
struct packed_git *packs); struct packed_git *packs);
void pack_report(void); void pack_report(struct repository *repo);
/* /*
* mmap the index file for the specified packfile (if it is not * mmap the index file for the specified packfile (if it is not
@ -113,7 +115,8 @@ void close_pack(struct packed_git *);
void close_object_store(struct raw_object_store *o); void close_object_store(struct raw_object_store *o);
void unuse_pack(struct pack_window **); void unuse_pack(struct pack_window **);
void clear_delta_base_cache(void); void clear_delta_base_cache(void);
struct packed_git *add_packed_git(const char *path, size_t path_len, int local); struct packed_git *add_packed_git(struct repository *r, const char *path,
size_t path_len, int local);
/* /*
* Unlink the .pack and associated extension files. * Unlink the .pack and associated extension files.
@ -190,14 +193,15 @@ const struct packed_git *has_packed_and_bad(struct repository *, const struct ob
int find_pack_entry(struct repository *r, const struct object_id *oid, struct pack_entry *e); int find_pack_entry(struct repository *r, const struct object_id *oid, struct pack_entry *e);
int find_kept_pack_entry(struct repository *r, const struct object_id *oid, unsigned flags, struct pack_entry *e); int find_kept_pack_entry(struct repository *r, const struct object_id *oid, unsigned flags, struct pack_entry *e);
int has_object_pack(const struct object_id *oid); int has_object_pack(struct repository *r, const struct object_id *oid);
int has_object_kept_pack(const struct object_id *oid, unsigned flags); int has_object_kept_pack(struct repository *r, const struct object_id *oid,
unsigned flags);
/* /*
* Return 1 if an object in a promisor packfile is or refers to the given * Return 1 if an object in a promisor packfile is or refers to the given
* object, 0 otherwise. * object, 0 otherwise.
*/ */
int is_promisor_object(const struct object_id *oid); int is_promisor_object(struct repository *r, const struct object_id *oid);
/* /*
* Expose a function for fuzz testing. * Expose a function for fuzz testing.

View File

@ -283,7 +283,7 @@ void promisor_remote_get_direct(struct repository *repo,
} }
for (i = 0; i < remaining_nr; i++) { for (i = 0; i < remaining_nr; i++) {
if (is_promisor_object(&remaining_oids[i])) if (is_promisor_object(repo, &remaining_oids[i]))
die(_("could not fetch %s from promisor remote"), die(_("could not fetch %s from promisor remote"),
oid_to_hex(&remaining_oids[i])); oid_to_hex(&remaining_oids[i]));
} }

View File

@ -24,7 +24,7 @@ static int prune_object(const struct object_id *oid, const char *path,
{ {
int *opts = data; int *opts = data;
if (!has_object_pack(oid)) if (!has_object_pack(the_repository, oid))
return 0; return 0;
if (*opts & PRUNE_PACKED_DRY_RUN) if (*opts & PRUNE_PACKED_DRY_RUN)

View File

@ -239,7 +239,7 @@ static int want_recent_object(struct recent_data *data,
const struct object_id *oid) const struct object_id *oid)
{ {
if (data->ignore_in_core_kept_packs && if (data->ignore_in_core_kept_packs &&
has_object_kept_pack(oid, IN_CORE_KEEP_PACKS)) has_object_kept_pack(data->revs->repo, oid, IN_CORE_KEEP_PACKS))
return 0; return 0;
return 1; return 1;
} }
@ -324,7 +324,7 @@ int add_unseen_recent_objects_to_traversal(struct rev_info *revs,
if (ignore_in_core_kept_packs) if (ignore_in_core_kept_packs)
flags |= FOR_EACH_OBJECT_SKIP_IN_CORE_KEPT_PACKS; flags |= FOR_EACH_OBJECT_SKIP_IN_CORE_KEPT_PACKS;
r = for_each_packed_object(add_recent_packed, &data, flags); r = for_each_packed_object(revs->repo, add_recent_packed, &data, flags);
done: done:
oidset_clear(&data.extra_recent_oids); oidset_clear(&data.extra_recent_oids);

View File

@ -3,6 +3,7 @@
#include "repo-settings.h" #include "repo-settings.h"
#include "repository.h" #include "repository.h"
#include "midx.h" #include "midx.h"
#include "pack-objects.h"
static void repo_cfg_bool(struct repository *r, const char *key, int *dest, static void repo_cfg_bool(struct repository *r, const char *key, int *dest,
int def) int def)
@ -26,6 +27,7 @@ void prepare_repo_settings(struct repository *r)
const char *strval; const char *strval;
int manyfiles; int manyfiles;
int read_changed_paths; int read_changed_paths;
unsigned long ulongval;
if (!r->gitdir) if (!r->gitdir)
BUG("Cannot add settings for uninitialized repository"); BUG("Cannot add settings for uninitialized repository");
@ -123,6 +125,22 @@ void prepare_repo_settings(struct repository *r)
* removed. * removed.
*/ */
r->settings.command_requires_full_index = 1; r->settings.command_requires_full_index = 1;
if (!repo_config_get_ulong(r, "core.deltabasecachelimit", &ulongval))
r->settings.delta_base_cache_limit = ulongval;
if (!repo_config_get_ulong(r, "core.packedgitwindowsize", &ulongval)) {
int pgsz_x2 = getpagesize() * 2;
/* This value must be multiple of (pagesize * 2) */
ulongval /= pgsz_x2;
if (ulongval < 1)
ulongval = 1;
r->settings.packed_git_window_size = ulongval * pgsz_x2;
}
if (!repo_config_get_ulong(r, "core.packedgitlimit", &ulongval))
r->settings.packed_git_limit = ulongval;
} }
enum log_refs_config repo_settings_get_log_all_ref_updates(struct repository *repo) enum log_refs_config repo_settings_get_log_all_ref_updates(struct repository *repo)

View File

@ -57,12 +57,19 @@ struct repo_settings {
int core_multi_pack_index; int core_multi_pack_index;
int warn_ambiguous_refs; /* lazily loaded via accessor */ int warn_ambiguous_refs; /* lazily loaded via accessor */
size_t delta_base_cache_limit;
size_t packed_git_window_size;
size_t packed_git_limit;
}; };
#define REPO_SETTINGS_INIT { \ #define REPO_SETTINGS_INIT { \
.index_version = -1, \ .index_version = -1, \
.core_untracked_cache = UNTRACKED_CACHE_KEEP, \ .core_untracked_cache = UNTRACKED_CACHE_KEEP, \
.fetch_negotiation_algorithm = FETCH_NEGOTIATION_CONSECUTIVE, \ .fetch_negotiation_algorithm = FETCH_NEGOTIATION_CONSECUTIVE, \
.warn_ambiguous_refs = -1, \ .warn_ambiguous_refs = -1, \
.delta_base_cache_limit = DEFAULT_DELTA_BASE_CACHE_LIMIT, \
.packed_git_window_size = DEFAULT_PACKED_GIT_WINDOW_SIZE, \
.packed_git_limit = DEFAULT_PACKED_GIT_LIMIT, \
} }
void prepare_repo_settings(struct repository *r); void prepare_repo_settings(struct repository *r);

View File

@ -390,7 +390,8 @@ static struct object *get_reference(struct rev_info *revs, const char *name,
if (!object) { if (!object) {
if (revs->ignore_missing) if (revs->ignore_missing)
return NULL; return NULL;
if (revs->exclude_promisor_objects && is_promisor_object(oid)) if (revs->exclude_promisor_objects &&
is_promisor_object(revs->repo, oid))
return NULL; return NULL;
if (revs->do_not_die_on_missing_objects) { if (revs->do_not_die_on_missing_objects) {
oidset_insert(&revs->missing_commits, oid); oidset_insert(&revs->missing_commits, oid);
@ -432,7 +433,7 @@ static struct commit *handle_commit(struct rev_info *revs,
if (revs->ignore_missing_links || (flags & UNINTERESTING)) if (revs->ignore_missing_links || (flags & UNINTERESTING))
return NULL; return NULL;
if (revs->exclude_promisor_objects && if (revs->exclude_promisor_objects &&
is_promisor_object(&tag->tagged->oid)) is_promisor_object(revs->repo, &tag->tagged->oid))
return NULL; return NULL;
if (revs->do_not_die_on_missing_objects && oid) { if (revs->do_not_die_on_missing_objects && oid) {
oidset_insert(&revs->missing_commits, oid); oidset_insert(&revs->missing_commits, oid);
@ -1211,7 +1212,7 @@ static int process_parents(struct rev_info *revs, struct commit *commit,
revs->do_not_die_on_missing_objects; revs->do_not_die_on_missing_objects;
if (repo_parse_commit_gently(revs->repo, p, gently) < 0) { if (repo_parse_commit_gently(revs->repo, p, gently) < 0) {
if (revs->exclude_promisor_objects && if (revs->exclude_promisor_objects &&
is_promisor_object(&p->object.oid)) { is_promisor_object(revs->repo, &p->object.oid)) {
if (revs->first_parent_only) if (revs->first_parent_only)
break; break;
continue; continue;
@ -3920,7 +3921,7 @@ int prepare_revision_walk(struct rev_info *revs)
revs->treesame.name = "treesame"; revs->treesame.name = "treesame";
if (revs->exclude_promisor_objects) { if (revs->exclude_promisor_objects) {
for_each_packed_object(mark_uninteresting, revs, for_each_packed_object(revs->repo, mark_uninteresting, revs,
FOR_EACH_OBJECT_PROMISOR_ONLY); FOR_EACH_OBJECT_PROMISOR_ONLY);
} }
@ -4108,10 +4109,10 @@ enum commit_action get_commit_action(struct rev_info *revs, struct commit *commi
{ {
if (commit->object.flags & SHOWN) if (commit->object.flags & SHOWN)
return commit_ignore; return commit_ignore;
if (revs->unpacked && has_object_pack(&commit->object.oid)) if (revs->unpacked && has_object_pack(revs->repo, &commit->object.oid))
return commit_ignore; return commit_ignore;
if (revs->no_kept_objects) { if (revs->no_kept_objects) {
if (has_object_kept_pack(&commit->object.oid, if (has_object_kept_pack(revs->repo, &commit->object.oid,
revs->keep_pack_cache_flags)) revs->keep_pack_cache_flags))
return commit_ignore; return commit_ignore;
} }

2
tag.c
View File

@ -84,7 +84,7 @@ struct object *deref_tag(struct repository *r, struct object *o, const char *war
o = NULL; o = NULL;
} }
if (!o && warn) { if (!o && warn) {
if (last_oid && is_promisor_object(last_oid)) if (last_oid && is_promisor_object(r, last_oid))
return NULL; return NULL;
if (!warnlen) if (!warnlen)
warnlen = strlen(warn); warnlen = strlen(warn);