Merge branch 'nd/split-index'

An experiment to use two files (the base file and incremental
changes relative to it) to represent the index to reduce I/O cost
of rewriting a large index when only small part of the working tree
changes.

* nd/split-index: (32 commits)
  t1700: new tests for split-index mode
  t2104: make sure split index mode is off for the version test
  read-cache: force split index mode with GIT_TEST_SPLIT_INDEX
  read-tree: note about dropping split-index mode or index version
  read-tree: force split-index mode off on --index-output
  rev-parse: add --shared-index-path to get shared index path
  update-index --split-index: do not split if $GIT_DIR is read only
  update-index: new options to enable/disable split index mode
  split-index: strip pathname of on-disk replaced entries
  split-index: do not invalidate cache-tree at read time
  split-index: the reading part
  split-index: the writing part
  read-cache: mark updated entries for split index
  read-cache: save deleted entries in split index
  read-cache: mark new entries for split index
  read-cache: split-index mode
  read-cache: save index SHA-1 after reading
  entry.c: update cache_changed if refresh_cache is set in checkout_entry()
  cache-tree: mark istate->cache_changed on prime_cache_tree()
  cache-tree: mark istate->cache_changed on cache tree update
  ...
This commit is contained in:
Junio C Hamano
2014-07-16 11:25:40 -07:00
41 changed files with 1088 additions and 193 deletions

View File

@ -225,7 +225,6 @@ static int checkout_paths(const struct checkout_opts *opts,
int flag;
struct commit *head;
int errs = 0;
int newfd;
struct lock_file *lock_file;
if (opts->track != BRANCH_TRACK_UNSPECIFIED)
@ -256,7 +255,7 @@ static int checkout_paths(const struct checkout_opts *opts,
lock_file = xcalloc(1, sizeof(struct lock_file));
newfd = hold_locked_index(lock_file, 1);
hold_locked_index(lock_file, 1);
if (read_cache_preload(&opts->pathspec) < 0)
return error(_("corrupt index file"));
@ -337,6 +336,7 @@ static int checkout_paths(const struct checkout_opts *opts,
memset(&state, 0, sizeof(state));
state.force = 1;
state.refresh_cache = 1;
state.istate = &the_index;
for (pos = 0; pos < active_nr; pos++) {
struct cache_entry *ce = active_cache[pos];
if (ce->ce_flags & CE_MATCHED) {
@ -352,8 +352,7 @@ static int checkout_paths(const struct checkout_opts *opts,
}
}
if (write_cache(newfd, active_cache, active_nr) ||
commit_locked_index(lock_file))
if (write_locked_index(&the_index, lock_file, COMMIT_LOCK))
die(_("unable to write new index file"));
read_ref_full("HEAD", rev, 0, &flag);
@ -444,8 +443,8 @@ static int merge_working_tree(const struct checkout_opts *opts,
{
int ret;
struct lock_file *lock_file = xcalloc(1, sizeof(struct lock_file));
int newfd = hold_locked_index(lock_file, 1);
hold_locked_index(lock_file, 1);
if (read_cache_preload(NULL) < 0)
return error(_("corrupt index file"));
@ -553,8 +552,7 @@ static int merge_working_tree(const struct checkout_opts *opts,
}
}
if (write_cache(newfd, active_cache, active_nr) ||
commit_locked_index(lock_file))
if (write_locked_index(&the_index, lock_file, COMMIT_LOCK))
die(_("unable to write new index file"));
if (!opts->force && !opts->quiet)