Merge branch 'nd/pack-objects-pack-struct'
"git pack-objects" needs to allocate tons of "struct object_entry" while doing its work, and shrinking its size helps the performance quite a bit. * nd/pack-objects-pack-struct: ci: exercise the whole test suite with uncommon code in pack-objects pack-objects: reorder members to shrink struct object_entry pack-objects: shrink delta_size field in struct object_entry pack-objects: shrink size field in struct object_entry pack-objects: clarify the use of object_entry::size pack-objects: don't check size when the object is bad pack-objects: shrink z_delta_size field in struct object_entry pack-objects: refer to delta objects by index instead of pointer pack-objects: move in_pack out of struct object_entry pack-objects: move in_pack_pos out of struct object_entry pack-objects: use bitfield for object_entry::depth pack-objects: use bitfield for object_entry::dfs_state pack-objects: turn type and in_pack_type to bitfields pack-objects: a bit of document about struct object_entry read-cache.c: make $GIT_TEST_SPLIT_INDEX boolean
This commit is contained in:
@ -32,6 +32,18 @@
|
||||
#include "object-store.h"
|
||||
#include "dir.h"
|
||||
|
||||
#define IN_PACK(obj) oe_in_pack(&to_pack, obj)
|
||||
#define SIZE(obj) oe_size(&to_pack, obj)
|
||||
#define SET_SIZE(obj,size) oe_set_size(&to_pack, obj, size)
|
||||
#define DELTA_SIZE(obj) oe_delta_size(&to_pack, obj)
|
||||
#define DELTA(obj) oe_delta(&to_pack, obj)
|
||||
#define DELTA_CHILD(obj) oe_delta_child(&to_pack, obj)
|
||||
#define DELTA_SIBLING(obj) oe_delta_sibling(&to_pack, obj)
|
||||
#define SET_DELTA(obj, val) oe_set_delta(&to_pack, obj, val)
|
||||
#define SET_DELTA_SIZE(obj, val) oe_set_delta_size(&to_pack, obj, val)
|
||||
#define SET_DELTA_CHILD(obj, val) oe_set_delta_child(&to_pack, obj, val)
|
||||
#define SET_DELTA_SIBLING(obj, val) oe_set_delta_sibling(&to_pack, obj, val)
|
||||
|
||||
static const char *pack_usage[] = {
|
||||
N_("git pack-objects --stdout [<options>...] [< <ref-list> | < <object-list>]"),
|
||||
N_("git pack-objects [<options>...] <base-name> [< <ref-list> | < <object-list>]"),
|
||||
@ -129,13 +141,14 @@ static void *get_delta(struct object_entry *entry)
|
||||
buf = read_object_file(&entry->idx.oid, &type, &size);
|
||||
if (!buf)
|
||||
die("unable to read %s", oid_to_hex(&entry->idx.oid));
|
||||
base_buf = read_object_file(&entry->delta->idx.oid, &type, &base_size);
|
||||
base_buf = read_object_file(&DELTA(entry)->idx.oid, &type,
|
||||
&base_size);
|
||||
if (!base_buf)
|
||||
die("unable to read %s",
|
||||
oid_to_hex(&entry->delta->idx.oid));
|
||||
oid_to_hex(&DELTA(entry)->idx.oid));
|
||||
delta_buf = diff_delta(base_buf, base_size,
|
||||
buf, size, &delta_size, 0);
|
||||
if (!delta_buf || delta_size != entry->delta_size)
|
||||
if (!delta_buf || delta_size != DELTA_SIZE(entry))
|
||||
die("delta size changed");
|
||||
free(buf);
|
||||
free(base_buf);
|
||||
@ -268,8 +281,8 @@ static unsigned long write_no_reuse_object(struct hashfile *f, struct object_ent
|
||||
struct git_istream *st = NULL;
|
||||
|
||||
if (!usable_delta) {
|
||||
if (entry->type == OBJ_BLOB &&
|
||||
entry->size > big_file_threshold &&
|
||||
if (oe_type(entry) == OBJ_BLOB &&
|
||||
oe_size_greater_than(&to_pack, entry, big_file_threshold) &&
|
||||
(st = open_istream(&entry->idx.oid, &type, &size, NULL)) != NULL)
|
||||
buf = NULL;
|
||||
else {
|
||||
@ -285,15 +298,15 @@ static unsigned long write_no_reuse_object(struct hashfile *f, struct object_ent
|
||||
FREE_AND_NULL(entry->delta_data);
|
||||
entry->z_delta_size = 0;
|
||||
} else if (entry->delta_data) {
|
||||
size = entry->delta_size;
|
||||
size = DELTA_SIZE(entry);
|
||||
buf = entry->delta_data;
|
||||
entry->delta_data = NULL;
|
||||
type = (allow_ofs_delta && entry->delta->idx.offset) ?
|
||||
type = (allow_ofs_delta && DELTA(entry)->idx.offset) ?
|
||||
OBJ_OFS_DELTA : OBJ_REF_DELTA;
|
||||
} else {
|
||||
buf = get_delta(entry);
|
||||
size = entry->delta_size;
|
||||
type = (allow_ofs_delta && entry->delta->idx.offset) ?
|
||||
size = DELTA_SIZE(entry);
|
||||
type = (allow_ofs_delta && DELTA(entry)->idx.offset) ?
|
||||
OBJ_OFS_DELTA : OBJ_REF_DELTA;
|
||||
}
|
||||
|
||||
@ -317,7 +330,7 @@ static unsigned long write_no_reuse_object(struct hashfile *f, struct object_ent
|
||||
* encoding of the relative offset for the delta
|
||||
* base from this object's position in the pack.
|
||||
*/
|
||||
off_t ofs = entry->idx.offset - entry->delta->idx.offset;
|
||||
off_t ofs = entry->idx.offset - DELTA(entry)->idx.offset;
|
||||
unsigned pos = sizeof(dheader) - 1;
|
||||
dheader[pos] = ofs & 127;
|
||||
while (ofs >>= 7)
|
||||
@ -343,7 +356,7 @@ static unsigned long write_no_reuse_object(struct hashfile *f, struct object_ent
|
||||
return 0;
|
||||
}
|
||||
hashwrite(f, header, hdrlen);
|
||||
hashwrite(f, entry->delta->idx.oid.hash, 20);
|
||||
hashwrite(f, DELTA(entry)->idx.oid.hash, 20);
|
||||
hdrlen += 20;
|
||||
} else {
|
||||
if (limit && hdrlen + datalen + 20 >= limit) {
|
||||
@ -369,21 +382,22 @@ static unsigned long write_no_reuse_object(struct hashfile *f, struct object_ent
|
||||
static off_t write_reuse_object(struct hashfile *f, struct object_entry *entry,
|
||||
unsigned long limit, int usable_delta)
|
||||
{
|
||||
struct packed_git *p = entry->in_pack;
|
||||
struct packed_git *p = IN_PACK(entry);
|
||||
struct pack_window *w_curs = NULL;
|
||||
struct revindex_entry *revidx;
|
||||
off_t offset;
|
||||
enum object_type type = entry->type;
|
||||
enum object_type type = oe_type(entry);
|
||||
off_t datalen;
|
||||
unsigned char header[MAX_PACK_OBJECT_HEADER],
|
||||
dheader[MAX_PACK_OBJECT_HEADER];
|
||||
unsigned hdrlen;
|
||||
unsigned long entry_size = SIZE(entry);
|
||||
|
||||
if (entry->delta)
|
||||
type = (allow_ofs_delta && entry->delta->idx.offset) ?
|
||||
if (DELTA(entry))
|
||||
type = (allow_ofs_delta && DELTA(entry)->idx.offset) ?
|
||||
OBJ_OFS_DELTA : OBJ_REF_DELTA;
|
||||
hdrlen = encode_in_pack_object_header(header, sizeof(header),
|
||||
type, entry->size);
|
||||
type, entry_size);
|
||||
|
||||
offset = entry->in_pack_offset;
|
||||
revidx = find_pack_revindex(p, offset);
|
||||
@ -400,7 +414,7 @@ static off_t write_reuse_object(struct hashfile *f, struct object_entry *entry,
|
||||
datalen -= entry->in_pack_header_size;
|
||||
|
||||
if (!pack_to_stdout && p->index_version == 1 &&
|
||||
check_pack_inflate(p, &w_curs, offset, datalen, entry->size)) {
|
||||
check_pack_inflate(p, &w_curs, offset, datalen, entry_size)) {
|
||||
error("corrupt packed object for %s",
|
||||
oid_to_hex(&entry->idx.oid));
|
||||
unuse_pack(&w_curs);
|
||||
@ -408,7 +422,7 @@ static off_t write_reuse_object(struct hashfile *f, struct object_entry *entry,
|
||||
}
|
||||
|
||||
if (type == OBJ_OFS_DELTA) {
|
||||
off_t ofs = entry->idx.offset - entry->delta->idx.offset;
|
||||
off_t ofs = entry->idx.offset - DELTA(entry)->idx.offset;
|
||||
unsigned pos = sizeof(dheader) - 1;
|
||||
dheader[pos] = ofs & 127;
|
||||
while (ofs >>= 7)
|
||||
@ -427,7 +441,7 @@ static off_t write_reuse_object(struct hashfile *f, struct object_entry *entry,
|
||||
return 0;
|
||||
}
|
||||
hashwrite(f, header, hdrlen);
|
||||
hashwrite(f, entry->delta->idx.oid.hash, 20);
|
||||
hashwrite(f, DELTA(entry)->idx.oid.hash, 20);
|
||||
hdrlen += 20;
|
||||
reused_delta++;
|
||||
} else {
|
||||
@ -467,28 +481,29 @@ static off_t write_object(struct hashfile *f,
|
||||
else
|
||||
limit = pack_size_limit - write_offset;
|
||||
|
||||
if (!entry->delta)
|
||||
if (!DELTA(entry))
|
||||
usable_delta = 0; /* no delta */
|
||||
else if (!pack_size_limit)
|
||||
usable_delta = 1; /* unlimited packfile */
|
||||
else if (entry->delta->idx.offset == (off_t)-1)
|
||||
else if (DELTA(entry)->idx.offset == (off_t)-1)
|
||||
usable_delta = 0; /* base was written to another pack */
|
||||
else if (entry->delta->idx.offset)
|
||||
else if (DELTA(entry)->idx.offset)
|
||||
usable_delta = 1; /* base already exists in this pack */
|
||||
else
|
||||
usable_delta = 0; /* base could end up in another pack */
|
||||
|
||||
if (!reuse_object)
|
||||
to_reuse = 0; /* explicit */
|
||||
else if (!entry->in_pack)
|
||||
else if (!IN_PACK(entry))
|
||||
to_reuse = 0; /* can't reuse what we don't have */
|
||||
else if (entry->type == OBJ_REF_DELTA || entry->type == OBJ_OFS_DELTA)
|
||||
else if (oe_type(entry) == OBJ_REF_DELTA ||
|
||||
oe_type(entry) == OBJ_OFS_DELTA)
|
||||
/* check_object() decided it for us ... */
|
||||
to_reuse = usable_delta;
|
||||
/* ... but pack split may override that */
|
||||
else if (entry->type != entry->in_pack_type)
|
||||
else if (oe_type(entry) != entry->in_pack_type)
|
||||
to_reuse = 0; /* pack has delta which is unusable */
|
||||
else if (entry->delta)
|
||||
else if (DELTA(entry))
|
||||
to_reuse = 0; /* we want to pack afresh */
|
||||
else
|
||||
to_reuse = 1; /* we have it in-pack undeltified,
|
||||
@ -540,12 +555,12 @@ static enum write_one_status write_one(struct hashfile *f,
|
||||
}
|
||||
|
||||
/* if we are deltified, write out base object first. */
|
||||
if (e->delta) {
|
||||
if (DELTA(e)) {
|
||||
e->idx.offset = 1; /* now recurse */
|
||||
switch (write_one(f, e->delta, offset)) {
|
||||
switch (write_one(f, DELTA(e), offset)) {
|
||||
case WRITE_ONE_RECURSIVE:
|
||||
/* we cannot depend on this one */
|
||||
e->delta = NULL;
|
||||
SET_DELTA(e, NULL);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
@ -607,34 +622,34 @@ static void add_descendants_to_write_order(struct object_entry **wo,
|
||||
/* add this node... */
|
||||
add_to_write_order(wo, endp, e);
|
||||
/* all its siblings... */
|
||||
for (s = e->delta_sibling; s; s = s->delta_sibling) {
|
||||
for (s = DELTA_SIBLING(e); s; s = DELTA_SIBLING(s)) {
|
||||
add_to_write_order(wo, endp, s);
|
||||
}
|
||||
}
|
||||
/* drop down a level to add left subtree nodes if possible */
|
||||
if (e->delta_child) {
|
||||
if (DELTA_CHILD(e)) {
|
||||
add_to_order = 1;
|
||||
e = e->delta_child;
|
||||
e = DELTA_CHILD(e);
|
||||
} else {
|
||||
add_to_order = 0;
|
||||
/* our sibling might have some children, it is next */
|
||||
if (e->delta_sibling) {
|
||||
e = e->delta_sibling;
|
||||
if (DELTA_SIBLING(e)) {
|
||||
e = DELTA_SIBLING(e);
|
||||
continue;
|
||||
}
|
||||
/* go back to our parent node */
|
||||
e = e->delta;
|
||||
while (e && !e->delta_sibling) {
|
||||
e = DELTA(e);
|
||||
while (e && !DELTA_SIBLING(e)) {
|
||||
/* we're on the right side of a subtree, keep
|
||||
* going up until we can go right again */
|
||||
e = e->delta;
|
||||
e = DELTA(e);
|
||||
}
|
||||
if (!e) {
|
||||
/* done- we hit our original root node */
|
||||
return;
|
||||
}
|
||||
/* pass it off to sibling at this level */
|
||||
e = e->delta_sibling;
|
||||
e = DELTA_SIBLING(e);
|
||||
}
|
||||
};
|
||||
}
|
||||
@ -645,7 +660,7 @@ static void add_family_to_write_order(struct object_entry **wo,
|
||||
{
|
||||
struct object_entry *root;
|
||||
|
||||
for (root = e; root->delta; root = root->delta)
|
||||
for (root = e; DELTA(root); root = DELTA(root))
|
||||
; /* nothing */
|
||||
add_descendants_to_write_order(wo, endp, root);
|
||||
}
|
||||
@ -660,8 +675,8 @@ static struct object_entry **compute_write_order(void)
|
||||
for (i = 0; i < to_pack.nr_objects; i++) {
|
||||
objects[i].tagged = 0;
|
||||
objects[i].filled = 0;
|
||||
objects[i].delta_child = NULL;
|
||||
objects[i].delta_sibling = NULL;
|
||||
SET_DELTA_CHILD(&objects[i], NULL);
|
||||
SET_DELTA_SIBLING(&objects[i], NULL);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -671,11 +686,11 @@ static struct object_entry **compute_write_order(void)
|
||||
*/
|
||||
for (i = to_pack.nr_objects; i > 0;) {
|
||||
struct object_entry *e = &objects[--i];
|
||||
if (!e->delta)
|
||||
if (!DELTA(e))
|
||||
continue;
|
||||
/* Mark me as the first child */
|
||||
e->delta_sibling = e->delta->delta_child;
|
||||
e->delta->delta_child = e;
|
||||
e->delta_sibling_idx = DELTA(e)->delta_child_idx;
|
||||
SET_DELTA_CHILD(DELTA(e), e);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -707,8 +722,8 @@ static struct object_entry **compute_write_order(void)
|
||||
* And then all remaining commits and tags.
|
||||
*/
|
||||
for (i = last_untagged; i < to_pack.nr_objects; i++) {
|
||||
if (objects[i].type != OBJ_COMMIT &&
|
||||
objects[i].type != OBJ_TAG)
|
||||
if (oe_type(&objects[i]) != OBJ_COMMIT &&
|
||||
oe_type(&objects[i]) != OBJ_TAG)
|
||||
continue;
|
||||
add_to_write_order(wo, &wo_end, &objects[i]);
|
||||
}
|
||||
@ -717,7 +732,7 @@ static struct object_entry **compute_write_order(void)
|
||||
* And then all the trees.
|
||||
*/
|
||||
for (i = last_untagged; i < to_pack.nr_objects; i++) {
|
||||
if (objects[i].type != OBJ_TREE)
|
||||
if (oe_type(&objects[i]) != OBJ_TREE)
|
||||
continue;
|
||||
add_to_write_order(wo, &wo_end, &objects[i]);
|
||||
}
|
||||
@ -880,7 +895,8 @@ static void write_pack_file(void)
|
||||
|
||||
if (write_bitmap_index) {
|
||||
bitmap_writer_set_checksum(oid.hash);
|
||||
bitmap_writer_build_type_index(written_list, nr_written);
|
||||
bitmap_writer_build_type_index(
|
||||
&to_pack, written_list, nr_written);
|
||||
}
|
||||
|
||||
finish_tmp_packfile(&tmpname, pack_tmp_name,
|
||||
@ -1071,14 +1087,13 @@ static void create_object_entry(const struct object_id *oid,
|
||||
|
||||
entry = packlist_alloc(&to_pack, oid->hash, index_pos);
|
||||
entry->hash = hash;
|
||||
if (type)
|
||||
entry->type = type;
|
||||
oe_set_type(entry, type);
|
||||
if (exclude)
|
||||
entry->preferred_base = 1;
|
||||
else
|
||||
nr_result++;
|
||||
if (found_pack) {
|
||||
entry->in_pack = found_pack;
|
||||
oe_set_in_pack(&to_pack, entry, found_pack);
|
||||
entry->in_pack_offset = found_offset;
|
||||
}
|
||||
|
||||
@ -1403,8 +1418,10 @@ static void cleanup_preferred_base(void)
|
||||
|
||||
static void check_object(struct object_entry *entry)
|
||||
{
|
||||
if (entry->in_pack) {
|
||||
struct packed_git *p = entry->in_pack;
|
||||
unsigned long canonical_size;
|
||||
|
||||
if (IN_PACK(entry)) {
|
||||
struct packed_git *p = IN_PACK(entry);
|
||||
struct pack_window *w_curs = NULL;
|
||||
const unsigned char *base_ref = NULL;
|
||||
struct object_entry *base_entry;
|
||||
@ -1412,6 +1429,8 @@ static void check_object(struct object_entry *entry)
|
||||
unsigned long avail;
|
||||
off_t ofs;
|
||||
unsigned char *buf, c;
|
||||
enum object_type type;
|
||||
unsigned long in_pack_size;
|
||||
|
||||
buf = use_pack(p, &w_curs, entry->in_pack_offset, &avail);
|
||||
|
||||
@ -1420,11 +1439,15 @@ static void check_object(struct object_entry *entry)
|
||||
* since non-delta representations could still be reused.
|
||||
*/
|
||||
used = unpack_object_header_buffer(buf, avail,
|
||||
&entry->in_pack_type,
|
||||
&entry->size);
|
||||
&type,
|
||||
&in_pack_size);
|
||||
if (used == 0)
|
||||
goto give_up;
|
||||
|
||||
if (type < 0)
|
||||
BUG("invalid type %d", type);
|
||||
entry->in_pack_type = type;
|
||||
|
||||
/*
|
||||
* Determine if this is a delta and if so whether we can
|
||||
* reuse it or not. Otherwise let's find out as cheaply as
|
||||
@ -1433,9 +1456,10 @@ static void check_object(struct object_entry *entry)
|
||||
switch (entry->in_pack_type) {
|
||||
default:
|
||||
/* Not a delta hence we've already got all we need. */
|
||||
entry->type = entry->in_pack_type;
|
||||
oe_set_type(entry, entry->in_pack_type);
|
||||
SET_SIZE(entry, in_pack_size);
|
||||
entry->in_pack_header_size = used;
|
||||
if (entry->type < OBJ_COMMIT || entry->type > OBJ_BLOB)
|
||||
if (oe_type(entry) < OBJ_COMMIT || oe_type(entry) > OBJ_BLOB)
|
||||
goto give_up;
|
||||
unuse_pack(&w_curs);
|
||||
return;
|
||||
@ -1489,25 +1513,29 @@ static void check_object(struct object_entry *entry)
|
||||
* deltify other objects against, in order to avoid
|
||||
* circular deltas.
|
||||
*/
|
||||
entry->type = entry->in_pack_type;
|
||||
entry->delta = base_entry;
|
||||
entry->delta_size = entry->size;
|
||||
entry->delta_sibling = base_entry->delta_child;
|
||||
base_entry->delta_child = entry;
|
||||
oe_set_type(entry, entry->in_pack_type);
|
||||
SET_SIZE(entry, in_pack_size); /* delta size */
|
||||
SET_DELTA(entry, base_entry);
|
||||
SET_DELTA_SIZE(entry, in_pack_size);
|
||||
entry->delta_sibling_idx = base_entry->delta_child_idx;
|
||||
SET_DELTA_CHILD(base_entry, entry);
|
||||
unuse_pack(&w_curs);
|
||||
return;
|
||||
}
|
||||
|
||||
if (entry->type) {
|
||||
if (oe_type(entry)) {
|
||||
off_t delta_pos;
|
||||
|
||||
/*
|
||||
* This must be a delta and we already know what the
|
||||
* final object type is. Let's extract the actual
|
||||
* object size from the delta header.
|
||||
*/
|
||||
entry->size = get_size_from_delta(p, &w_curs,
|
||||
entry->in_pack_offset + entry->in_pack_header_size);
|
||||
if (entry->size == 0)
|
||||
delta_pos = entry->in_pack_offset + entry->in_pack_header_size;
|
||||
canonical_size = get_size_from_delta(p, &w_curs, delta_pos);
|
||||
if (canonical_size == 0)
|
||||
goto give_up;
|
||||
SET_SIZE(entry, canonical_size);
|
||||
unuse_pack(&w_curs);
|
||||
return;
|
||||
}
|
||||
@ -1521,28 +1549,34 @@ static void check_object(struct object_entry *entry)
|
||||
unuse_pack(&w_curs);
|
||||
}
|
||||
|
||||
entry->type = oid_object_info(the_repository, &entry->idx.oid,
|
||||
&entry->size);
|
||||
/*
|
||||
* The error condition is checked in prepare_pack(). This is
|
||||
* to permit a missing preferred base object to be ignored
|
||||
* as a preferred base. Doing so can result in a larger
|
||||
* pack file, but the transfer will still take place.
|
||||
*/
|
||||
oe_set_type(entry,
|
||||
oid_object_info(the_repository, &entry->idx.oid, &canonical_size));
|
||||
if (entry->type_valid) {
|
||||
SET_SIZE(entry, canonical_size);
|
||||
} else {
|
||||
/*
|
||||
* Bad object type is checked in prepare_pack(). This is
|
||||
* to permit a missing preferred base object to be ignored
|
||||
* as a preferred base. Doing so can result in a larger
|
||||
* pack file, but the transfer will still take place.
|
||||
*/
|
||||
}
|
||||
}
|
||||
|
||||
static int pack_offset_sort(const void *_a, const void *_b)
|
||||
{
|
||||
const struct object_entry *a = *(struct object_entry **)_a;
|
||||
const struct object_entry *b = *(struct object_entry **)_b;
|
||||
const struct packed_git *a_in_pack = IN_PACK(a);
|
||||
const struct packed_git *b_in_pack = IN_PACK(b);
|
||||
|
||||
/* avoid filesystem trashing with loose objects */
|
||||
if (!a->in_pack && !b->in_pack)
|
||||
if (!a_in_pack && !b_in_pack)
|
||||
return oidcmp(&a->idx.oid, &b->idx.oid);
|
||||
|
||||
if (a->in_pack < b->in_pack)
|
||||
if (a_in_pack < b_in_pack)
|
||||
return -1;
|
||||
if (a->in_pack > b->in_pack)
|
||||
if (a_in_pack > b_in_pack)
|
||||
return 1;
|
||||
return a->in_pack_offset < b->in_pack_offset ? -1 :
|
||||
(a->in_pack_offset > b->in_pack_offset);
|
||||
@ -1563,31 +1597,37 @@ static int pack_offset_sort(const void *_a, const void *_b)
|
||||
*/
|
||||
static void drop_reused_delta(struct object_entry *entry)
|
||||
{
|
||||
struct object_entry **p = &entry->delta->delta_child;
|
||||
unsigned *idx = &to_pack.objects[entry->delta_idx - 1].delta_child_idx;
|
||||
struct object_info oi = OBJECT_INFO_INIT;
|
||||
enum object_type type;
|
||||
unsigned long size;
|
||||
|
||||
while (*p) {
|
||||
if (*p == entry)
|
||||
*p = (*p)->delta_sibling;
|
||||
while (*idx) {
|
||||
struct object_entry *oe = &to_pack.objects[*idx - 1];
|
||||
|
||||
if (oe == entry)
|
||||
*idx = oe->delta_sibling_idx;
|
||||
else
|
||||
p = &(*p)->delta_sibling;
|
||||
idx = &oe->delta_sibling_idx;
|
||||
}
|
||||
entry->delta = NULL;
|
||||
SET_DELTA(entry, NULL);
|
||||
entry->depth = 0;
|
||||
|
||||
oi.sizep = &entry->size;
|
||||
oi.typep = &entry->type;
|
||||
if (packed_object_info(the_repository, entry->in_pack,
|
||||
entry->in_pack_offset, &oi) < 0) {
|
||||
oi.sizep = &size;
|
||||
oi.typep = &type;
|
||||
if (packed_object_info(the_repository, IN_PACK(entry), entry->in_pack_offset, &oi) < 0) {
|
||||
/*
|
||||
* We failed to get the info from this pack for some reason;
|
||||
* fall back to sha1_object_info, which may find another copy.
|
||||
* And if that fails, the error will be recorded in entry->type
|
||||
* And if that fails, the error will be recorded in oe_type(entry)
|
||||
* and dealt with in prepare_pack().
|
||||
*/
|
||||
entry->type = oid_object_info(the_repository, &entry->idx.oid,
|
||||
&entry->size);
|
||||
oe_set_type(entry,
|
||||
oid_object_info(the_repository, &entry->idx.oid, &size));
|
||||
} else {
|
||||
oe_set_type(entry, type);
|
||||
}
|
||||
SET_SIZE(entry, size);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1611,7 +1651,7 @@ static void break_delta_chains(struct object_entry *entry)
|
||||
|
||||
for (cur = entry, total_depth = 0;
|
||||
cur;
|
||||
cur = cur->delta, total_depth++) {
|
||||
cur = DELTA(cur), total_depth++) {
|
||||
if (cur->dfs_state == DFS_DONE) {
|
||||
/*
|
||||
* We've already seen this object and know it isn't
|
||||
@ -1636,7 +1676,7 @@ static void break_delta_chains(struct object_entry *entry)
|
||||
* it's not a delta, we're done traversing, but we'll mark it
|
||||
* done to save time on future traversals.
|
||||
*/
|
||||
if (!cur->delta) {
|
||||
if (!DELTA(cur)) {
|
||||
cur->dfs_state = DFS_DONE;
|
||||
break;
|
||||
}
|
||||
@ -1659,7 +1699,7 @@ static void break_delta_chains(struct object_entry *entry)
|
||||
* We keep all commits in the chain that we examined.
|
||||
*/
|
||||
cur->dfs_state = DFS_ACTIVE;
|
||||
if (cur->delta->dfs_state == DFS_ACTIVE) {
|
||||
if (DELTA(cur)->dfs_state == DFS_ACTIVE) {
|
||||
drop_reused_delta(cur);
|
||||
cur->dfs_state = DFS_DONE;
|
||||
break;
|
||||
@ -1674,7 +1714,7 @@ static void break_delta_chains(struct object_entry *entry)
|
||||
* an extra "next" pointer to keep going after we reset cur->delta.
|
||||
*/
|
||||
for (cur = entry; cur; cur = next) {
|
||||
next = cur->delta;
|
||||
next = DELTA(cur);
|
||||
|
||||
/*
|
||||
* We should have a chain of zero or more ACTIVE states down to
|
||||
@ -1731,7 +1771,8 @@ static void get_object_details(void)
|
||||
for (i = 0; i < to_pack.nr_objects; i++) {
|
||||
struct object_entry *entry = sorted_by_offset[i];
|
||||
check_object(entry);
|
||||
if (big_file_threshold < entry->size)
|
||||
if (entry->type_valid &&
|
||||
oe_size_greater_than(&to_pack, entry, big_file_threshold))
|
||||
entry->no_try_delta = 1;
|
||||
display_progress(progress_state, i + 1);
|
||||
}
|
||||
@ -1760,10 +1801,14 @@ static int type_size_sort(const void *_a, const void *_b)
|
||||
{
|
||||
const struct object_entry *a = *(struct object_entry **)_a;
|
||||
const struct object_entry *b = *(struct object_entry **)_b;
|
||||
enum object_type a_type = oe_type(a);
|
||||
enum object_type b_type = oe_type(b);
|
||||
unsigned long a_size = SIZE(a);
|
||||
unsigned long b_size = SIZE(b);
|
||||
|
||||
if (a->type > b->type)
|
||||
if (a_type > b_type)
|
||||
return -1;
|
||||
if (a->type < b->type)
|
||||
if (a_type < b_type)
|
||||
return 1;
|
||||
if (a->hash > b->hash)
|
||||
return -1;
|
||||
@ -1773,9 +1818,9 @@ static int type_size_sort(const void *_a, const void *_b)
|
||||
return -1;
|
||||
if (a->preferred_base < b->preferred_base)
|
||||
return 1;
|
||||
if (a->size > b->size)
|
||||
if (a_size > b_size)
|
||||
return -1;
|
||||
if (a->size < b->size)
|
||||
if (a_size < b_size)
|
||||
return 1;
|
||||
return a < b ? -1 : (a > b); /* newest first */
|
||||
}
|
||||
@ -1828,6 +1873,46 @@ static pthread_mutex_t progress_mutex;
|
||||
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Return the size of the object without doing any delta
|
||||
* reconstruction (so non-deltas are true object sizes, but deltas
|
||||
* return the size of the delta data).
|
||||
*/
|
||||
unsigned long oe_get_size_slow(struct packing_data *pack,
|
||||
const struct object_entry *e)
|
||||
{
|
||||
struct packed_git *p;
|
||||
struct pack_window *w_curs;
|
||||
unsigned char *buf;
|
||||
enum object_type type;
|
||||
unsigned long used, avail, size;
|
||||
|
||||
if (e->type_ != OBJ_OFS_DELTA && e->type_ != OBJ_REF_DELTA) {
|
||||
read_lock();
|
||||
if (oid_object_info(the_repository, &e->idx.oid, &size) < 0)
|
||||
die(_("unable to get size of %s"),
|
||||
oid_to_hex(&e->idx.oid));
|
||||
read_unlock();
|
||||
return size;
|
||||
}
|
||||
|
||||
p = oe_in_pack(pack, e);
|
||||
if (!p)
|
||||
BUG("when e->type is a delta, it must belong to a pack");
|
||||
|
||||
read_lock();
|
||||
w_curs = NULL;
|
||||
buf = use_pack(p, &w_curs, e->in_pack_offset, &avail);
|
||||
used = unpack_object_header_buffer(buf, avail, &type, &size);
|
||||
if (used == 0)
|
||||
die(_("unable to parse object header of %s"),
|
||||
oid_to_hex(&e->idx.oid));
|
||||
|
||||
unuse_pack(&w_curs);
|
||||
read_unlock();
|
||||
return size;
|
||||
}
|
||||
|
||||
static int try_delta(struct unpacked *trg, struct unpacked *src,
|
||||
unsigned max_depth, unsigned long *mem_usage)
|
||||
{
|
||||
@ -1839,7 +1924,7 @@ static int try_delta(struct unpacked *trg, struct unpacked *src,
|
||||
void *delta_buf;
|
||||
|
||||
/* Don't bother doing diffs between different types */
|
||||
if (trg_entry->type != src_entry->type)
|
||||
if (oe_type(trg_entry) != oe_type(src_entry))
|
||||
return -1;
|
||||
|
||||
/*
|
||||
@ -1850,8 +1935,8 @@ static int try_delta(struct unpacked *trg, struct unpacked *src,
|
||||
* it, we will still save the transfer cost, as we already know
|
||||
* the other side has it and we won't send src_entry at all.
|
||||
*/
|
||||
if (reuse_delta && trg_entry->in_pack &&
|
||||
trg_entry->in_pack == src_entry->in_pack &&
|
||||
if (reuse_delta && IN_PACK(trg_entry) &&
|
||||
IN_PACK(trg_entry) == IN_PACK(src_entry) &&
|
||||
!src_entry->preferred_base &&
|
||||
trg_entry->in_pack_type != OBJ_REF_DELTA &&
|
||||
trg_entry->in_pack_type != OBJ_OFS_DELTA)
|
||||
@ -1862,19 +1947,19 @@ static int try_delta(struct unpacked *trg, struct unpacked *src,
|
||||
return 0;
|
||||
|
||||
/* Now some size filtering heuristics. */
|
||||
trg_size = trg_entry->size;
|
||||
if (!trg_entry->delta) {
|
||||
trg_size = SIZE(trg_entry);
|
||||
if (!DELTA(trg_entry)) {
|
||||
max_size = trg_size/2 - 20;
|
||||
ref_depth = 1;
|
||||
} else {
|
||||
max_size = trg_entry->delta_size;
|
||||
max_size = DELTA_SIZE(trg_entry);
|
||||
ref_depth = trg->depth;
|
||||
}
|
||||
max_size = (uint64_t)max_size * (max_depth - src->depth) /
|
||||
(max_depth - ref_depth + 1);
|
||||
if (max_size == 0)
|
||||
return 0;
|
||||
src_size = src_entry->size;
|
||||
src_size = SIZE(src_entry);
|
||||
sizediff = src_size < trg_size ? trg_size - src_size : 0;
|
||||
if (sizediff >= max_size)
|
||||
return 0;
|
||||
@ -1936,10 +2021,14 @@ static int try_delta(struct unpacked *trg, struct unpacked *src,
|
||||
delta_buf = create_delta(src->index, trg->data, trg_size, &delta_size, max_size);
|
||||
if (!delta_buf)
|
||||
return 0;
|
||||
if (delta_size >= (1U << OE_DELTA_SIZE_BITS)) {
|
||||
free(delta_buf);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (trg_entry->delta) {
|
||||
if (DELTA(trg_entry)) {
|
||||
/* Prefer only shallower same-sized deltas. */
|
||||
if (delta_size == trg_entry->delta_size &&
|
||||
if (delta_size == DELTA_SIZE(trg_entry) &&
|
||||
src->depth + 1 >= trg->depth) {
|
||||
free(delta_buf);
|
||||
return 0;
|
||||
@ -1954,7 +2043,7 @@ static int try_delta(struct unpacked *trg, struct unpacked *src,
|
||||
free(trg_entry->delta_data);
|
||||
cache_lock();
|
||||
if (trg_entry->delta_data) {
|
||||
delta_cache_size -= trg_entry->delta_size;
|
||||
delta_cache_size -= DELTA_SIZE(trg_entry);
|
||||
trg_entry->delta_data = NULL;
|
||||
}
|
||||
if (delta_cacheable(src_size, trg_size, delta_size)) {
|
||||
@ -1966,8 +2055,8 @@ static int try_delta(struct unpacked *trg, struct unpacked *src,
|
||||
free(delta_buf);
|
||||
}
|
||||
|
||||
trg_entry->delta = src_entry;
|
||||
trg_entry->delta_size = delta_size;
|
||||
SET_DELTA(trg_entry, src_entry);
|
||||
SET_DELTA_SIZE(trg_entry, delta_size);
|
||||
trg->depth = src->depth + 1;
|
||||
|
||||
return 1;
|
||||
@ -1975,13 +2064,13 @@ static int try_delta(struct unpacked *trg, struct unpacked *src,
|
||||
|
||||
static unsigned int check_delta_limit(struct object_entry *me, unsigned int n)
|
||||
{
|
||||
struct object_entry *child = me->delta_child;
|
||||
struct object_entry *child = DELTA_CHILD(me);
|
||||
unsigned int m = n;
|
||||
while (child) {
|
||||
unsigned int c = check_delta_limit(child, n + 1);
|
||||
if (m < c)
|
||||
m = c;
|
||||
child = child->delta_sibling;
|
||||
child = DELTA_SIBLING(child);
|
||||
}
|
||||
return m;
|
||||
}
|
||||
@ -1992,7 +2081,7 @@ static unsigned long free_unpacked(struct unpacked *n)
|
||||
free_delta_index(n->index);
|
||||
n->index = NULL;
|
||||
if (n->data) {
|
||||
freed_mem += n->entry->size;
|
||||
freed_mem += SIZE(n->entry);
|
||||
FREE_AND_NULL(n->data);
|
||||
}
|
||||
n->entry = NULL;
|
||||
@ -2050,7 +2139,7 @@ static void find_deltas(struct object_entry **list, unsigned *list_size,
|
||||
* otherwise they would become too deep.
|
||||
*/
|
||||
max_depth = depth;
|
||||
if (entry->delta_child) {
|
||||
if (DELTA_CHILD(entry)) {
|
||||
max_depth -= check_delta_limit(entry, 0);
|
||||
if (max_depth <= 0)
|
||||
goto next;
|
||||
@ -2088,19 +2177,26 @@ static void find_deltas(struct object_entry **list, unsigned *list_size,
|
||||
* between writes at that moment.
|
||||
*/
|
||||
if (entry->delta_data && !pack_to_stdout) {
|
||||
entry->z_delta_size = do_compress(&entry->delta_data,
|
||||
entry->delta_size);
|
||||
cache_lock();
|
||||
delta_cache_size -= entry->delta_size;
|
||||
delta_cache_size += entry->z_delta_size;
|
||||
cache_unlock();
|
||||
unsigned long size;
|
||||
|
||||
size = do_compress(&entry->delta_data, DELTA_SIZE(entry));
|
||||
if (size < (1U << OE_Z_DELTA_BITS)) {
|
||||
entry->z_delta_size = size;
|
||||
cache_lock();
|
||||
delta_cache_size -= DELTA_SIZE(entry);
|
||||
delta_cache_size += entry->z_delta_size;
|
||||
cache_unlock();
|
||||
} else {
|
||||
FREE_AND_NULL(entry->delta_data);
|
||||
entry->z_delta_size = 0;
|
||||
}
|
||||
}
|
||||
|
||||
/* if we made n a delta, and if n is already at max
|
||||
* depth, leaving it in the window is pointless. we
|
||||
* should evict it first.
|
||||
*/
|
||||
if (entry->delta && max_depth <= n->depth)
|
||||
if (DELTA(entry) && max_depth <= n->depth)
|
||||
continue;
|
||||
|
||||
/*
|
||||
@ -2108,7 +2204,7 @@ static void find_deltas(struct object_entry **list, unsigned *list_size,
|
||||
* currently deltified object, to keep it longer. It will
|
||||
* be the first base object to be attempted next.
|
||||
*/
|
||||
if (entry->delta) {
|
||||
if (DELTA(entry)) {
|
||||
struct unpacked swap = array[best_base];
|
||||
int dist = (window + idx - best_base) % window;
|
||||
int dst = best_base;
|
||||
@ -2429,13 +2525,14 @@ static void prepare_pack(int window, int depth)
|
||||
for (i = 0; i < to_pack.nr_objects; i++) {
|
||||
struct object_entry *entry = to_pack.objects + i;
|
||||
|
||||
if (entry->delta)
|
||||
if (DELTA(entry))
|
||||
/* This happens if we decided to reuse existing
|
||||
* delta from a pack. "reuse_delta &&" is implied.
|
||||
*/
|
||||
continue;
|
||||
|
||||
if (entry->size < 50)
|
||||
if (!entry->type_valid ||
|
||||
oe_size_less_than(&to_pack, entry, 50))
|
||||
continue;
|
||||
|
||||
if (entry->no_try_delta)
|
||||
@ -2443,11 +2540,11 @@ static void prepare_pack(int window, int depth)
|
||||
|
||||
if (!entry->preferred_base) {
|
||||
nr_deltas++;
|
||||
if (entry->type < 0)
|
||||
if (oe_type(entry) < 0)
|
||||
die("unable to get type of object %s",
|
||||
oid_to_hex(&entry->idx.oid));
|
||||
} else {
|
||||
if (entry->type < 0) {
|
||||
if (oe_type(entry) < 0) {
|
||||
/*
|
||||
* This object is not found, but we
|
||||
* don't have to include it anyway.
|
||||
@ -2556,7 +2653,7 @@ static void read_object_list_from_stdin(void)
|
||||
die("expected object ID, got garbage:\n %s", line);
|
||||
|
||||
add_preferred_base_object(p + 1);
|
||||
add_object_entry(&oid, 0, p + 1, 0);
|
||||
add_object_entry(&oid, OBJ_NONE, p + 1, 0);
|
||||
}
|
||||
}
|
||||
|
||||
@ -3083,6 +3180,9 @@ int cmd_pack_objects(int argc, const char **argv, const char *prefix)
|
||||
OPT_END(),
|
||||
};
|
||||
|
||||
if (DFS_NUM_STATES > (1 << OE_DFS_STATE_BITS))
|
||||
BUG("too many dfs states, increase OE_DFS_STATE_BITS");
|
||||
|
||||
check_replace_refs = 0;
|
||||
|
||||
reset_pack_idx_option(&pack_idx_opts);
|
||||
@ -3099,6 +3199,17 @@ int cmd_pack_objects(int argc, const char **argv, const char *prefix)
|
||||
if (pack_to_stdout != !base_name || argc)
|
||||
usage_with_options(pack_usage, pack_objects_options);
|
||||
|
||||
if (depth >= (1 << OE_DEPTH_BITS)) {
|
||||
warning(_("delta chain depth %d is too deep, forcing %d"),
|
||||
depth, (1 << OE_DEPTH_BITS) - 1);
|
||||
depth = (1 << OE_DEPTH_BITS) - 1;
|
||||
}
|
||||
if (cache_max_small_delta_size >= (1U << OE_Z_DELTA_BITS)) {
|
||||
warning(_("pack.deltaCacheLimit is too high, forcing %d"),
|
||||
(1U << OE_Z_DELTA_BITS) - 1);
|
||||
cache_max_small_delta_size = (1U << OE_Z_DELTA_BITS) - 1;
|
||||
}
|
||||
|
||||
argv_array_push(&rp, "pack-objects");
|
||||
if (thin) {
|
||||
use_internal_rev_list = 1;
|
||||
@ -3217,6 +3328,8 @@ int cmd_pack_objects(int argc, const char **argv, const char *prefix)
|
||||
}
|
||||
}
|
||||
|
||||
prepare_packing_data(&to_pack);
|
||||
|
||||
if (progress)
|
||||
progress_state = start_progress(_("Enumerating objects"), 0);
|
||||
if (!use_internal_rev_list)
|
||||
|
Reference in New Issue
Block a user