treewide: prefer lockfiles on the stack
There is no longer any need to allocate and leak a `struct lock_file`. The previous patch addressed an instance where we needed a minor tweak alongside the trivial changes. Deal with the remaining instances where we allocate and leak a struct within a single function. Change them to have the `struct lock_file` on the stack instead. These instances were identified by running `git grep "^\s*struct lock_file\s*\*"`. Signed-off-by: Martin Ågren <martin.agren@gmail.com> Signed-off-by: Junio C Hamano <gitster@pobox.com>
This commit is contained in:
committed by
Junio C Hamano
parent
f132a127ee
commit
837e34eba4
24
builtin/am.c
24
builtin/am.c
@ -1134,11 +1134,11 @@ static const char *msgnum(const struct am_state *state)
|
||||
*/
|
||||
static void refresh_and_write_cache(void)
|
||||
{
|
||||
struct lock_file *lock_file = xcalloc(1, sizeof(struct lock_file));
|
||||
struct lock_file lock_file = LOCK_INIT;
|
||||
|
||||
hold_locked_index(lock_file, LOCK_DIE_ON_ERROR);
|
||||
hold_locked_index(&lock_file, LOCK_DIE_ON_ERROR);
|
||||
refresh_cache(REFRESH_QUIET);
|
||||
if (write_locked_index(&the_index, lock_file, COMMIT_LOCK))
|
||||
if (write_locked_index(&the_index, &lock_file, COMMIT_LOCK))
|
||||
die(_("unable to write index file"));
|
||||
}
|
||||
|
||||
@ -1946,15 +1946,14 @@ next:
|
||||
*/
|
||||
static int fast_forward_to(struct tree *head, struct tree *remote, int reset)
|
||||
{
|
||||
struct lock_file *lock_file;
|
||||
struct lock_file lock_file = LOCK_INIT;
|
||||
struct unpack_trees_options opts;
|
||||
struct tree_desc t[2];
|
||||
|
||||
if (parse_tree(head) || parse_tree(remote))
|
||||
return -1;
|
||||
|
||||
lock_file = xcalloc(1, sizeof(struct lock_file));
|
||||
hold_locked_index(lock_file, LOCK_DIE_ON_ERROR);
|
||||
hold_locked_index(&lock_file, LOCK_DIE_ON_ERROR);
|
||||
|
||||
refresh_cache(REFRESH_QUIET);
|
||||
|
||||
@ -1970,11 +1969,11 @@ static int fast_forward_to(struct tree *head, struct tree *remote, int reset)
|
||||
init_tree_desc(&t[1], remote->buffer, remote->size);
|
||||
|
||||
if (unpack_trees(2, t, &opts)) {
|
||||
rollback_lock_file(lock_file);
|
||||
rollback_lock_file(&lock_file);
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (write_locked_index(&the_index, lock_file, COMMIT_LOCK))
|
||||
if (write_locked_index(&the_index, &lock_file, COMMIT_LOCK))
|
||||
die(_("unable to write new index file"));
|
||||
|
||||
return 0;
|
||||
@ -1986,15 +1985,14 @@ static int fast_forward_to(struct tree *head, struct tree *remote, int reset)
|
||||
*/
|
||||
static int merge_tree(struct tree *tree)
|
||||
{
|
||||
struct lock_file *lock_file;
|
||||
struct lock_file lock_file = LOCK_INIT;
|
||||
struct unpack_trees_options opts;
|
||||
struct tree_desc t[1];
|
||||
|
||||
if (parse_tree(tree))
|
||||
return -1;
|
||||
|
||||
lock_file = xcalloc(1, sizeof(struct lock_file));
|
||||
hold_locked_index(lock_file, LOCK_DIE_ON_ERROR);
|
||||
hold_locked_index(&lock_file, LOCK_DIE_ON_ERROR);
|
||||
|
||||
memset(&opts, 0, sizeof(opts));
|
||||
opts.head_idx = 1;
|
||||
@ -2005,11 +2003,11 @@ static int merge_tree(struct tree *tree)
|
||||
init_tree_desc(&t[0], tree->buffer, tree->size);
|
||||
|
||||
if (unpack_trees(1, t, &opts)) {
|
||||
rollback_lock_file(lock_file);
|
||||
rollback_lock_file(&lock_file);
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (write_locked_index(&the_index, lock_file, COMMIT_LOCK))
|
||||
if (write_locked_index(&the_index, &lock_file, COMMIT_LOCK))
|
||||
die(_("unable to write new index file"));
|
||||
|
||||
return 0;
|
||||
|
||||
Reference in New Issue
Block a user