 09751bf1b2
			
		
	
	09751bf1b2
	
	
	
		
			
			cache_entry contains an object_id, and compare_ce_content() would
include that field when calling memcmp on a subset of the cache_entry.
Depending on which hashing algorithm is being used, only part of
object_id.hash is actually being used, therefore including it in a
memcmp() is incorrect. Instead we choose to exclude the object_id when
calling memcmp(), and call oideq() separately.
This issue was found when running t1700-split-index with MSAN, see MSAN
output below (on my machine, offset 76 corresponds to 4 bytes after the
start of object_id.hash).
Uninitialized bytes in MemcmpInterceptorCommon at offset 76 inside [0x7f60e7c00118, 92)
==27914==WARNING: MemorySanitizer: use-of-uninitialized-value
    #0 0x4524ee in memcmp /home/abuild/rpmbuild/BUILD/llvm-11.0.0.src/build/../projects/compiler-rt/lib/msan/../sanitizer_common/sanitizer_common_interceptors.inc:873:10
    #1 0xc867ae in compare_ce_content /home/ahunt/git/git/split-index.c:208:8
    #2 0xc859fb in prepare_to_write_split_index /home/ahunt/git/git/split-index.c:336:9
    #3 0xb4bbca in write_split_index /home/ahunt/git/git/read-cache.c:3107:2
    #4 0xb42b4d in write_locked_index /home/ahunt/git/git/read-cache.c:3295:8
    #5 0x638058 in try_merge_strategy /home/ahunt/git/git/builtin/merge.c:758:7
    #6 0x63057f in cmd_merge /home/ahunt/git/git/builtin/merge.c:1663:9
    #7 0x4a1e76 in run_builtin /home/ahunt/git/git/git.c:461:11
    #8 0x49e1e7 in handle_builtin /home/ahunt/git/git/git.c:714:3
    #9 0x4a0c08 in run_argv /home/ahunt/git/git/git.c:781:4
    #10 0x49d5a8 in cmd_main /home/ahunt/git/git/git.c:912:19
    #11 0x7974da in main /home/ahunt/git/git/common-main.c:52:11
    #12 0x7f60e928e349 in __libc_start_main (/lib64/libc.so.6+0x24349)
    #13 0x421bd9 in _start /home/abuild/rpmbuild/BUILD/glibc-2.26/csu/../sysdeps/x86_64/start.S:120
  Uninitialized value was stored to memory at
    #0 0x447eb9 in __msan_memcpy /home/abuild/rpmbuild/BUILD/llvm-11.0.0.src/build/../projects/compiler-rt/lib/msan/msan_interceptors.cpp:1558:3
    #1 0xb4d1e6 in dup_cache_entry /home/ahunt/git/git/read-cache.c:3457:2
    #2 0xd214fa in add_entry /home/ahunt/git/git/unpack-trees.c:215:18
    #3 0xd1fae0 in keep_entry /home/ahunt/git/git/unpack-trees.c:2276:2
    #4 0xd1ff9e in twoway_merge /home/ahunt/git/git/unpack-trees.c:2504:11
    #5 0xd27028 in call_unpack_fn /home/ahunt/git/git/unpack-trees.c:593:12
    #6 0xd2443d in unpack_nondirectories /home/ahunt/git/git/unpack-trees.c:1106:12
    #7 0xd19435 in unpack_callback /home/ahunt/git/git/unpack-trees.c:1306:6
    #8 0xd0d7ff in traverse_trees /home/ahunt/git/git/tree-walk.c:532:17
    #9 0xd1773a in unpack_trees /home/ahunt/git/git/unpack-trees.c:1683:9
    #10 0xdc6370 in checkout /home/ahunt/git/git/merge-ort.c:3590:8
    #11 0xdc51c3 in merge_switch_to_result /home/ahunt/git/git/merge-ort.c:3728:7
    #12 0xa195a9 in merge_ort_recursive /home/ahunt/git/git/merge-ort-wrappers.c:58:2
    #13 0x637fff in try_merge_strategy /home/ahunt/git/git/builtin/merge.c:751:12
    #14 0x63057f in cmd_merge /home/ahunt/git/git/builtin/merge.c:1663:9
    #15 0x4a1e76 in run_builtin /home/ahunt/git/git/git.c:461:11
    #16 0x49e1e7 in handle_builtin /home/ahunt/git/git/git.c:714:3
    #17 0x4a0c08 in run_argv /home/ahunt/git/git/git.c:781:4
    #18 0x49d5a8 in cmd_main /home/ahunt/git/git/git.c:912:19
    #19 0x7974da in main /home/ahunt/git/git/common-main.c:52:11
  Uninitialized value was created by a heap allocation
    #0 0x44e73d in malloc /home/abuild/rpmbuild/BUILD/llvm-11.0.0.src/build/../projects/compiler-rt/lib/msan/msan_interceptors.cpp:901:3
    #1 0xd592f6 in do_xmalloc /home/ahunt/git/git/wrapper.c:41:8
    #2 0xd59248 in xmalloc /home/ahunt/git/git/wrapper.c:62:9
    #3 0xa17088 in mem_pool_alloc_block /home/ahunt/git/git/mem-pool.c:22:6
    #4 0xa16f78 in mem_pool_init /home/ahunt/git/git/mem-pool.c:44:3
    #5 0xb481b8 in load_all_cache_entries /home/ahunt/git/git/read-cache.c
    #6 0xb44d40 in do_read_index /home/ahunt/git/git/read-cache.c:2298:17
    #7 0xb48a1b in read_index_from /home/ahunt/git/git/read-cache.c:2389:8
    #8 0xbd5a0b in repo_read_index /home/ahunt/git/git/repository.c:276:8
    #9 0xb4bcaf in repo_read_index_unmerged /home/ahunt/git/git/read-cache.c:3326:2
    #10 0x62ed26 in cmd_merge /home/ahunt/git/git/builtin/merge.c:1362:6
    #11 0x4a1e76 in run_builtin /home/ahunt/git/git/git.c:461:11
    #12 0x49e1e7 in handle_builtin /home/ahunt/git/git/git.c:714:3
    #13 0x4a0c08 in run_argv /home/ahunt/git/git/git.c:781:4
    #14 0x49d5a8 in cmd_main /home/ahunt/git/git/git.c:912:19
    #15 0x7974da in main /home/ahunt/git/git/common-main.c:52:11
    #16 0x7f60e928e349 in __libc_start_main (/lib64/libc.so.6+0x24349)
SUMMARY: MemorySanitizer: use-of-uninitialized-value /home/abuild/rpmbuild/BUILD/llvm-11.0.0.src/build/../projects/compiler-rt/lib/msan/../sanitizer_common/sanitizer_common_interceptors.inc:873:10 in memcmp
Exiting
Signed-off-by: Andrzej Hunt <andrzej@ahunt.org>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
		
	
		
			
				
	
	
		
			477 lines
		
	
	
		
			14 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
			
		
		
	
	
			477 lines
		
	
	
		
			14 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
| #include "cache.h"
 | |
| #include "split-index.h"
 | |
| #include "ewah/ewok.h"
 | |
| 
 | |
| struct split_index *init_split_index(struct index_state *istate)
 | |
| {
 | |
| 	if (!istate->split_index) {
 | |
| 		CALLOC_ARRAY(istate->split_index, 1);
 | |
| 		istate->split_index->refcount = 1;
 | |
| 	}
 | |
| 	return istate->split_index;
 | |
| }
 | |
| 
 | |
| int read_link_extension(struct index_state *istate,
 | |
| 			 const void *data_, unsigned long sz)
 | |
| {
 | |
| 	const unsigned char *data = data_;
 | |
| 	struct split_index *si;
 | |
| 	int ret;
 | |
| 
 | |
| 	if (sz < the_hash_algo->rawsz)
 | |
| 		return error("corrupt link extension (too short)");
 | |
| 	si = init_split_index(istate);
 | |
| 	oidread(&si->base_oid, data);
 | |
| 	data += the_hash_algo->rawsz;
 | |
| 	sz -= the_hash_algo->rawsz;
 | |
| 	if (!sz)
 | |
| 		return 0;
 | |
| 	si->delete_bitmap = ewah_new();
 | |
| 	ret = ewah_read_mmap(si->delete_bitmap, data, sz);
 | |
| 	if (ret < 0)
 | |
| 		return error("corrupt delete bitmap in link extension");
 | |
| 	data += ret;
 | |
| 	sz -= ret;
 | |
| 	si->replace_bitmap = ewah_new();
 | |
| 	ret = ewah_read_mmap(si->replace_bitmap, data, sz);
 | |
| 	if (ret < 0)
 | |
| 		return error("corrupt replace bitmap in link extension");
 | |
| 	if (ret != sz)
 | |
| 		return error("garbage at the end of link extension");
 | |
| 	return 0;
 | |
| }
 | |
| 
 | |
| int write_link_extension(struct strbuf *sb,
 | |
| 			 struct index_state *istate)
 | |
| {
 | |
| 	struct split_index *si = istate->split_index;
 | |
| 	strbuf_add(sb, si->base_oid.hash, the_hash_algo->rawsz);
 | |
| 	if (!si->delete_bitmap && !si->replace_bitmap)
 | |
| 		return 0;
 | |
| 	ewah_serialize_strbuf(si->delete_bitmap, sb);
 | |
| 	ewah_serialize_strbuf(si->replace_bitmap, sb);
 | |
| 	return 0;
 | |
| }
 | |
| 
 | |
| static void mark_base_index_entries(struct index_state *base)
 | |
| {
 | |
| 	int i;
 | |
| 	/*
 | |
| 	 * To keep track of the shared entries between
 | |
| 	 * istate->base->cache[] and istate->cache[], base entry
 | |
| 	 * position is stored in each base entry. All positions start
 | |
| 	 * from 1 instead of 0, which is reserved to say "this is a new
 | |
| 	 * entry".
 | |
| 	 */
 | |
| 	for (i = 0; i < base->cache_nr; i++)
 | |
| 		base->cache[i]->index = i + 1;
 | |
| }
 | |
| 
 | |
| void move_cache_to_base_index(struct index_state *istate)
 | |
| {
 | |
| 	struct split_index *si = istate->split_index;
 | |
| 	int i;
 | |
| 
 | |
| 	/*
 | |
| 	 * If there was a previous base index, then transfer ownership of allocated
 | |
| 	 * entries to the parent index.
 | |
| 	 */
 | |
| 	if (si->base &&
 | |
| 		si->base->ce_mem_pool) {
 | |
| 
 | |
| 		if (!istate->ce_mem_pool) {
 | |
| 			istate->ce_mem_pool = xmalloc(sizeof(struct mem_pool));
 | |
| 			mem_pool_init(istate->ce_mem_pool, 0);
 | |
| 		}
 | |
| 
 | |
| 		mem_pool_combine(istate->ce_mem_pool, istate->split_index->base->ce_mem_pool);
 | |
| 	}
 | |
| 
 | |
| 	CALLOC_ARRAY(si->base, 1);
 | |
| 	si->base->version = istate->version;
 | |
| 	/* zero timestamp disables racy test in ce_write_index() */
 | |
| 	si->base->timestamp = istate->timestamp;
 | |
| 	ALLOC_GROW(si->base->cache, istate->cache_nr, si->base->cache_alloc);
 | |
| 	si->base->cache_nr = istate->cache_nr;
 | |
| 
 | |
| 	/*
 | |
| 	 * The mem_pool needs to move with the allocated entries.
 | |
| 	 */
 | |
| 	si->base->ce_mem_pool = istate->ce_mem_pool;
 | |
| 	istate->ce_mem_pool = NULL;
 | |
| 
 | |
| 	COPY_ARRAY(si->base->cache, istate->cache, istate->cache_nr);
 | |
| 	mark_base_index_entries(si->base);
 | |
| 	for (i = 0; i < si->base->cache_nr; i++)
 | |
| 		si->base->cache[i]->ce_flags &= ~CE_UPDATE_IN_BASE;
 | |
| }
 | |
| 
 | |
| static void mark_entry_for_delete(size_t pos, void *data)
 | |
| {
 | |
| 	struct index_state *istate = data;
 | |
| 	if (pos >= istate->cache_nr)
 | |
| 		die("position for delete %d exceeds base index size %d",
 | |
| 		    (int)pos, istate->cache_nr);
 | |
| 	istate->cache[pos]->ce_flags |= CE_REMOVE;
 | |
| 	istate->split_index->nr_deletions++;
 | |
| }
 | |
| 
 | |
| static void replace_entry(size_t pos, void *data)
 | |
| {
 | |
| 	struct index_state *istate = data;
 | |
| 	struct split_index *si = istate->split_index;
 | |
| 	struct cache_entry *dst, *src;
 | |
| 
 | |
| 	if (pos >= istate->cache_nr)
 | |
| 		die("position for replacement %d exceeds base index size %d",
 | |
| 		    (int)pos, istate->cache_nr);
 | |
| 	if (si->nr_replacements >= si->saved_cache_nr)
 | |
| 		die("too many replacements (%d vs %d)",
 | |
| 		    si->nr_replacements, si->saved_cache_nr);
 | |
| 	dst = istate->cache[pos];
 | |
| 	if (dst->ce_flags & CE_REMOVE)
 | |
| 		die("entry %d is marked as both replaced and deleted",
 | |
| 		    (int)pos);
 | |
| 	src = si->saved_cache[si->nr_replacements];
 | |
| 	if (ce_namelen(src))
 | |
| 		die("corrupt link extension, entry %d should have "
 | |
| 		    "zero length name", (int)pos);
 | |
| 	src->index = pos + 1;
 | |
| 	src->ce_flags |= CE_UPDATE_IN_BASE;
 | |
| 	src->ce_namelen = dst->ce_namelen;
 | |
| 	copy_cache_entry(dst, src);
 | |
| 	discard_cache_entry(src);
 | |
| 	si->nr_replacements++;
 | |
| }
 | |
| 
 | |
| void merge_base_index(struct index_state *istate)
 | |
| {
 | |
| 	struct split_index *si = istate->split_index;
 | |
| 	unsigned int i;
 | |
| 
 | |
| 	mark_base_index_entries(si->base);
 | |
| 
 | |
| 	si->saved_cache	    = istate->cache;
 | |
| 	si->saved_cache_nr  = istate->cache_nr;
 | |
| 	istate->cache_nr    = si->base->cache_nr;
 | |
| 	istate->cache	    = NULL;
 | |
| 	istate->cache_alloc = 0;
 | |
| 	ALLOC_GROW(istate->cache, istate->cache_nr, istate->cache_alloc);
 | |
| 	COPY_ARRAY(istate->cache, si->base->cache, istate->cache_nr);
 | |
| 
 | |
| 	si->nr_deletions = 0;
 | |
| 	si->nr_replacements = 0;
 | |
| 	ewah_each_bit(si->replace_bitmap, replace_entry, istate);
 | |
| 	ewah_each_bit(si->delete_bitmap, mark_entry_for_delete, istate);
 | |
| 	if (si->nr_deletions)
 | |
| 		remove_marked_cache_entries(istate, 0);
 | |
| 
 | |
| 	for (i = si->nr_replacements; i < si->saved_cache_nr; i++) {
 | |
| 		if (!ce_namelen(si->saved_cache[i]))
 | |
| 			die("corrupt link extension, entry %d should "
 | |
| 			    "have non-zero length name", i);
 | |
| 		add_index_entry(istate, si->saved_cache[i],
 | |
| 				ADD_CACHE_OK_TO_ADD |
 | |
| 				ADD_CACHE_KEEP_CACHE_TREE |
 | |
| 				/*
 | |
| 				 * we may have to replay what
 | |
| 				 * merge-recursive.c:update_stages()
 | |
| 				 * does, which has this flag on
 | |
| 				 */
 | |
| 				ADD_CACHE_SKIP_DFCHECK);
 | |
| 		si->saved_cache[i] = NULL;
 | |
| 	}
 | |
| 
 | |
| 	ewah_free(si->delete_bitmap);
 | |
| 	ewah_free(si->replace_bitmap);
 | |
| 	FREE_AND_NULL(si->saved_cache);
 | |
| 	si->delete_bitmap  = NULL;
 | |
| 	si->replace_bitmap = NULL;
 | |
| 	si->saved_cache_nr = 0;
 | |
| }
 | |
| 
 | |
| /*
 | |
|  * Compare most of the fields in two cache entries, i.e. all except the
 | |
|  * hashmap_entry and the name.
 | |
|  */
 | |
| static int compare_ce_content(struct cache_entry *a, struct cache_entry *b)
 | |
| {
 | |
| 	const unsigned int ondisk_flags = CE_STAGEMASK | CE_VALID |
 | |
| 					  CE_EXTENDED_FLAGS;
 | |
| 	unsigned int ce_flags = a->ce_flags;
 | |
| 	unsigned int base_flags = b->ce_flags;
 | |
| 	int ret;
 | |
| 
 | |
| 	/* only on-disk flags matter */
 | |
| 	a->ce_flags &= ondisk_flags;
 | |
| 	b->ce_flags &= ondisk_flags;
 | |
| 	ret = memcmp(&a->ce_stat_data, &b->ce_stat_data,
 | |
| 		     offsetof(struct cache_entry, name) -
 | |
| 		     offsetof(struct cache_entry, oid)) ||
 | |
| 		!oideq(&a->oid, &b->oid);
 | |
| 	a->ce_flags = ce_flags;
 | |
| 	b->ce_flags = base_flags;
 | |
| 
 | |
| 	return ret;
 | |
| }
 | |
| 
 | |
| void prepare_to_write_split_index(struct index_state *istate)
 | |
| {
 | |
| 	struct split_index *si = init_split_index(istate);
 | |
| 	struct cache_entry **entries = NULL, *ce;
 | |
| 	int i, nr_entries = 0, nr_alloc = 0;
 | |
| 
 | |
| 	si->delete_bitmap = ewah_new();
 | |
| 	si->replace_bitmap = ewah_new();
 | |
| 
 | |
| 	if (si->base) {
 | |
| 		/* Go through istate->cache[] and mark CE_MATCHED to
 | |
| 		 * entry with positive index. We'll go through
 | |
| 		 * base->cache[] later to delete all entries in base
 | |
| 		 * that are not marked with either CE_MATCHED or
 | |
| 		 * CE_UPDATE_IN_BASE. If istate->cache[i] is a
 | |
| 		 * duplicate, deduplicate it.
 | |
| 		 */
 | |
| 		for (i = 0; i < istate->cache_nr; i++) {
 | |
| 			struct cache_entry *base;
 | |
| 			ce = istate->cache[i];
 | |
| 			if (!ce->index) {
 | |
| 				/*
 | |
| 				 * During simple update index operations this
 | |
| 				 * is a cache entry that is not present in
 | |
| 				 * the shared index.  It will be added to the
 | |
| 				 * split index.
 | |
| 				 *
 | |
| 				 * However, it might also represent a file
 | |
| 				 * that already has a cache entry in the
 | |
| 				 * shared index, but a new index has just
 | |
| 				 * been constructed by unpack_trees(), and
 | |
| 				 * this entry now refers to different content
 | |
| 				 * than what was recorded in the original
 | |
| 				 * index, e.g. during 'read-tree -m HEAD^' or
 | |
| 				 * 'checkout HEAD^'.  In this case the
 | |
| 				 * original entry in the shared index will be
 | |
| 				 * marked as deleted, and this entry will be
 | |
| 				 * added to the split index.
 | |
| 				 */
 | |
| 				continue;
 | |
| 			}
 | |
| 			if (ce->index > si->base->cache_nr) {
 | |
| 				BUG("ce refers to a shared ce at %d, which is beyond the shared index size %d",
 | |
| 				    ce->index, si->base->cache_nr);
 | |
| 			}
 | |
| 			ce->ce_flags |= CE_MATCHED; /* or "shared" */
 | |
| 			base = si->base->cache[ce->index - 1];
 | |
| 			if (ce == base) {
 | |
| 				/* The entry is present in the shared index. */
 | |
| 				if (ce->ce_flags & CE_UPDATE_IN_BASE) {
 | |
| 					/*
 | |
| 					 * Already marked for inclusion in
 | |
| 					 * the split index, either because
 | |
| 					 * the corresponding file was
 | |
| 					 * modified and the cached stat data
 | |
| 					 * was refreshed, or because there
 | |
| 					 * is already a replacement entry in
 | |
| 					 * the split index.
 | |
| 					 * Nothing more to do here.
 | |
| 					 */
 | |
| 				} else if (!ce_uptodate(ce) &&
 | |
| 					   is_racy_timestamp(istate, ce)) {
 | |
| 					/*
 | |
| 					 * A racily clean cache entry stored
 | |
| 					 * only in the shared index: it must
 | |
| 					 * be added to the split index, so
 | |
| 					 * the subsequent do_write_index()
 | |
| 					 * can smudge its stat data.
 | |
| 					 */
 | |
| 					ce->ce_flags |= CE_UPDATE_IN_BASE;
 | |
| 				} else {
 | |
| 					/*
 | |
| 					 * The entry is only present in the
 | |
| 					 * shared index and it was not
 | |
| 					 * refreshed.
 | |
| 					 * Just leave it there.
 | |
| 					 */
 | |
| 				}
 | |
| 				continue;
 | |
| 			}
 | |
| 			if (ce->ce_namelen != base->ce_namelen ||
 | |
| 			    strcmp(ce->name, base->name)) {
 | |
| 				ce->index = 0;
 | |
| 				continue;
 | |
| 			}
 | |
| 			/*
 | |
| 			 * This is the copy of a cache entry that is present
 | |
| 			 * in the shared index, created by unpack_trees()
 | |
| 			 * while it constructed a new index.
 | |
| 			 */
 | |
| 			if (ce->ce_flags & CE_UPDATE_IN_BASE) {
 | |
| 				/*
 | |
| 				 * Already marked for inclusion in the split
 | |
| 				 * index, either because the corresponding
 | |
| 				 * file was modified and the cached stat data
 | |
| 				 * was refreshed, or because the original
 | |
| 				 * entry already had a replacement entry in
 | |
| 				 * the split index.
 | |
| 				 * Nothing to do.
 | |
| 				 */
 | |
| 			} else if (!ce_uptodate(ce) &&
 | |
| 				   is_racy_timestamp(istate, ce)) {
 | |
| 				/*
 | |
| 				 * A copy of a racily clean cache entry from
 | |
| 				 * the shared index.  It must be added to
 | |
| 				 * the split index, so the subsequent
 | |
| 				 * do_write_index() can smudge its stat data.
 | |
| 				 */
 | |
| 				ce->ce_flags |= CE_UPDATE_IN_BASE;
 | |
| 			} else {
 | |
| 				/*
 | |
| 				 * Thoroughly compare the cached data to see
 | |
| 				 * whether it should be marked for inclusion
 | |
| 				 * in the split index.
 | |
| 				 *
 | |
| 				 * This comparison might be unnecessary, as
 | |
| 				 * code paths modifying the cached data do
 | |
| 				 * set CE_UPDATE_IN_BASE as well.
 | |
| 				 */
 | |
| 				if (compare_ce_content(ce, base))
 | |
| 					ce->ce_flags |= CE_UPDATE_IN_BASE;
 | |
| 			}
 | |
| 			discard_cache_entry(base);
 | |
| 			si->base->cache[ce->index - 1] = ce;
 | |
| 		}
 | |
| 		for (i = 0; i < si->base->cache_nr; i++) {
 | |
| 			ce = si->base->cache[i];
 | |
| 			if ((ce->ce_flags & CE_REMOVE) ||
 | |
| 			    !(ce->ce_flags & CE_MATCHED))
 | |
| 				ewah_set(si->delete_bitmap, i);
 | |
| 			else if (ce->ce_flags & CE_UPDATE_IN_BASE) {
 | |
| 				ewah_set(si->replace_bitmap, i);
 | |
| 				ce->ce_flags |= CE_STRIP_NAME;
 | |
| 				ALLOC_GROW(entries, nr_entries+1, nr_alloc);
 | |
| 				entries[nr_entries++] = ce;
 | |
| 			}
 | |
| 			if (is_null_oid(&ce->oid))
 | |
| 				istate->drop_cache_tree = 1;
 | |
| 		}
 | |
| 	}
 | |
| 
 | |
| 	for (i = 0; i < istate->cache_nr; i++) {
 | |
| 		ce = istate->cache[i];
 | |
| 		if ((!si->base || !ce->index) && !(ce->ce_flags & CE_REMOVE)) {
 | |
| 			assert(!(ce->ce_flags & CE_STRIP_NAME));
 | |
| 			ALLOC_GROW(entries, nr_entries+1, nr_alloc);
 | |
| 			entries[nr_entries++] = ce;
 | |
| 		}
 | |
| 		ce->ce_flags &= ~CE_MATCHED;
 | |
| 	}
 | |
| 
 | |
| 	/*
 | |
| 	 * take cache[] out temporarily, put entries[] in its place
 | |
| 	 * for writing
 | |
| 	 */
 | |
| 	si->saved_cache = istate->cache;
 | |
| 	si->saved_cache_nr = istate->cache_nr;
 | |
| 	istate->cache = entries;
 | |
| 	istate->cache_nr = nr_entries;
 | |
| }
 | |
| 
 | |
| void finish_writing_split_index(struct index_state *istate)
 | |
| {
 | |
| 	struct split_index *si = init_split_index(istate);
 | |
| 
 | |
| 	ewah_free(si->delete_bitmap);
 | |
| 	ewah_free(si->replace_bitmap);
 | |
| 	si->delete_bitmap = NULL;
 | |
| 	si->replace_bitmap = NULL;
 | |
| 	free(istate->cache);
 | |
| 	istate->cache = si->saved_cache;
 | |
| 	istate->cache_nr = si->saved_cache_nr;
 | |
| }
 | |
| 
 | |
| void discard_split_index(struct index_state *istate)
 | |
| {
 | |
| 	struct split_index *si = istate->split_index;
 | |
| 	if (!si)
 | |
| 		return;
 | |
| 	istate->split_index = NULL;
 | |
| 	si->refcount--;
 | |
| 	if (si->refcount)
 | |
| 		return;
 | |
| 	if (si->base) {
 | |
| 		discard_index(si->base);
 | |
| 		free(si->base);
 | |
| 	}
 | |
| 	free(si);
 | |
| }
 | |
| 
 | |
| void save_or_free_index_entry(struct index_state *istate, struct cache_entry *ce)
 | |
| {
 | |
| 	if (ce->index &&
 | |
| 	    istate->split_index &&
 | |
| 	    istate->split_index->base &&
 | |
| 	    ce->index <= istate->split_index->base->cache_nr &&
 | |
| 	    ce == istate->split_index->base->cache[ce->index - 1])
 | |
| 		ce->ce_flags |= CE_REMOVE;
 | |
| 	else
 | |
| 		discard_cache_entry(ce);
 | |
| }
 | |
| 
 | |
| void replace_index_entry_in_base(struct index_state *istate,
 | |
| 				 struct cache_entry *old_entry,
 | |
| 				 struct cache_entry *new_entry)
 | |
| {
 | |
| 	if (old_entry->index &&
 | |
| 	    istate->split_index &&
 | |
| 	    istate->split_index->base &&
 | |
| 	    old_entry->index <= istate->split_index->base->cache_nr) {
 | |
| 		new_entry->index = old_entry->index;
 | |
| 		if (old_entry != istate->split_index->base->cache[new_entry->index - 1])
 | |
| 			discard_cache_entry(istate->split_index->base->cache[new_entry->index - 1]);
 | |
| 		istate->split_index->base->cache[new_entry->index - 1] = new_entry;
 | |
| 	}
 | |
| }
 | |
| 
 | |
| void add_split_index(struct index_state *istate)
 | |
| {
 | |
| 	if (!istate->split_index) {
 | |
| 		init_split_index(istate);
 | |
| 		istate->cache_changed |= SPLIT_INDEX_ORDERED;
 | |
| 	}
 | |
| }
 | |
| 
 | |
| void remove_split_index(struct index_state *istate)
 | |
| {
 | |
| 	if (istate->split_index) {
 | |
| 		if (istate->split_index->base) {
 | |
| 			/*
 | |
| 			 * When removing the split index, we need to move
 | |
| 			 * ownership of the mem_pool associated with the
 | |
| 			 * base index to the main index. There may be cache entries
 | |
| 			 * allocated from the base's memory pool that are shared with
 | |
| 			 * the_index.cache[].
 | |
| 			 */
 | |
| 			mem_pool_combine(istate->ce_mem_pool,
 | |
| 					 istate->split_index->base->ce_mem_pool);
 | |
| 
 | |
| 			/*
 | |
| 			 * The split index no longer owns the mem_pool backing
 | |
| 			 * its cache array. As we are discarding this index,
 | |
| 			 * mark the index as having no cache entries, so it
 | |
| 			 * will not attempt to clean up the cache entries or
 | |
| 			 * validate them.
 | |
| 			 */
 | |
| 			istate->split_index->base->cache_nr = 0;
 | |
| 		}
 | |
| 
 | |
| 		/*
 | |
| 		 * We can discard the split index because its
 | |
| 		 * memory pool has been incorporated into the
 | |
| 		 * memory pool associated with the the_index.
 | |
| 		 */
 | |
| 		discard_split_index(istate);
 | |
| 
 | |
| 		istate->cache_changed |= SOMETHING_CHANGED;
 | |
| 	}
 | |
| }
 |