reftable/stack: simplify tracking of table locks
When compacting tables, we store the locks of all tables we are about to compact in the `table_locks` array. As we currently only ever compact all tables in the user-provided range or none, we simply track those locks via the indices of the respective tables in the merged stack. This is about to change though, as we will introduce a mode where auto compaction gracefully handles the case of already-locked files. In this case, it may happen that we only compact a subset of the user-supplied range of tables. In this case, the indices will not necessarily match the lock indices anymore. Refactor the code such that we track the number of locks via a separate variable. The resulting code is expected to perform the same, but will make it easier to perform the described change. Signed-off-by: Patrick Steinhardt <ps@pks.im> Signed-off-by: Junio C Hamano <gitster@pobox.com>
This commit is contained in:

committed by
Junio C Hamano

parent
5f0ed603a1
commit
558f6fbeb1
@ -1016,7 +1016,7 @@ static int stack_compact_range(struct reftable_stack *st,
|
|||||||
struct lock_file *table_locks = NULL;
|
struct lock_file *table_locks = NULL;
|
||||||
struct tempfile *new_table = NULL;
|
struct tempfile *new_table = NULL;
|
||||||
int is_empty_table = 0, err = 0;
|
int is_empty_table = 0, err = 0;
|
||||||
size_t i;
|
size_t i, nlocks = 0;
|
||||||
|
|
||||||
if (first > last || (!expiry && first == last)) {
|
if (first > last || (!expiry && first == last)) {
|
||||||
err = 0;
|
err = 0;
|
||||||
@ -1051,7 +1051,7 @@ static int stack_compact_range(struct reftable_stack *st,
|
|||||||
for (i = first; i <= last; i++) {
|
for (i = first; i <= last; i++) {
|
||||||
stack_filename(&table_name, st, reader_name(st->readers[i]));
|
stack_filename(&table_name, st, reader_name(st->readers[i]));
|
||||||
|
|
||||||
err = hold_lock_file_for_update(&table_locks[i - first],
|
err = hold_lock_file_for_update(&table_locks[nlocks],
|
||||||
table_name.buf, LOCK_NO_DEREF);
|
table_name.buf, LOCK_NO_DEREF);
|
||||||
if (err < 0) {
|
if (err < 0) {
|
||||||
if (errno == EEXIST)
|
if (errno == EEXIST)
|
||||||
@ -1066,7 +1066,7 @@ static int stack_compact_range(struct reftable_stack *st,
|
|||||||
* run into file descriptor exhaustion when we compress a lot
|
* run into file descriptor exhaustion when we compress a lot
|
||||||
* of tables.
|
* of tables.
|
||||||
*/
|
*/
|
||||||
err = close_lock_file_gently(&table_locks[i - first]);
|
err = close_lock_file_gently(&table_locks[nlocks++]);
|
||||||
if (err < 0) {
|
if (err < 0) {
|
||||||
err = REFTABLE_IO_ERROR;
|
err = REFTABLE_IO_ERROR;
|
||||||
goto done;
|
goto done;
|
||||||
@ -1183,8 +1183,8 @@ static int stack_compact_range(struct reftable_stack *st,
|
|||||||
* Delete the old tables. They may still be in use by concurrent
|
* Delete the old tables. They may still be in use by concurrent
|
||||||
* readers, so it is expected that unlinking tables may fail.
|
* readers, so it is expected that unlinking tables may fail.
|
||||||
*/
|
*/
|
||||||
for (i = first; i <= last; i++) {
|
for (i = 0; i < nlocks; i++) {
|
||||||
struct lock_file *table_lock = &table_locks[i - first];
|
struct lock_file *table_lock = &table_locks[i];
|
||||||
char *table_path = get_locked_file_path(table_lock);
|
char *table_path = get_locked_file_path(table_lock);
|
||||||
unlink(table_path);
|
unlink(table_path);
|
||||||
free(table_path);
|
free(table_path);
|
||||||
@ -1192,8 +1192,8 @@ static int stack_compact_range(struct reftable_stack *st,
|
|||||||
|
|
||||||
done:
|
done:
|
||||||
rollback_lock_file(&tables_list_lock);
|
rollback_lock_file(&tables_list_lock);
|
||||||
for (i = first; table_locks && i <= last; i++)
|
for (i = 0; table_locks && i < nlocks; i++)
|
||||||
rollback_lock_file(&table_locks[i - first]);
|
rollback_lock_file(&table_locks[i]);
|
||||||
reftable_free(table_locks);
|
reftable_free(table_locks);
|
||||||
|
|
||||||
delete_tempfile(&new_table);
|
delete_tempfile(&new_table);
|
||||||
|
Reference in New Issue
Block a user