reftable/block: reuse zstream when writing log blocks
While most reftable blocks are written to disk as-is, blocks for log records are compressed with zlib. To compress them we use `compress2()`, which is a simple wrapper around the more complex `zstream` interface that would require multiple function invocations. One downside of this interface is that `compress2()` will reallocate internal state of the `zstream` interface on every single invocation. Consequently, as we call `compress2()` for every single log block which we are about to write, this can lead to quite some memory allocation churn. Refactor the code so that the block writer reuses a `zstream`. This significantly reduces the number of bytes allocated when writing many refs in a single transaction, as demonstrated by the following benchmark that writes 100k refs in a single transaction. Before: HEAP SUMMARY: in use at exit: 671,931 bytes in 151 blocks total heap usage: 22,631,887 allocs, 22,631,736 frees, 1,854,670,793 bytes allocated After: HEAP SUMMARY: in use at exit: 671,931 bytes in 151 blocks total heap usage: 22,620,528 allocs, 22,620,377 frees, 1,245,549,984 bytes allocated Signed-off-by: Patrick Steinhardt <ps@pks.im> Signed-off-by: Junio C Hamano <gitster@pobox.com>
This commit is contained in:

committed by
Junio C Hamano

parent
8aaeffe3b5
commit
a155ab2bf4
@ -76,6 +76,10 @@ void block_writer_init(struct block_writer *bw, uint8_t typ, uint8_t *buf,
|
|||||||
bw->entries = 0;
|
bw->entries = 0;
|
||||||
bw->restart_len = 0;
|
bw->restart_len = 0;
|
||||||
bw->last_key.len = 0;
|
bw->last_key.len = 0;
|
||||||
|
if (!bw->zstream) {
|
||||||
|
REFTABLE_CALLOC_ARRAY(bw->zstream, 1);
|
||||||
|
deflateInit(bw->zstream, 9);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
uint8_t block_writer_type(struct block_writer *bw)
|
uint8_t block_writer_type(struct block_writer *bw)
|
||||||
@ -139,39 +143,57 @@ int block_writer_finish(struct block_writer *w)
|
|||||||
w->next += 2;
|
w->next += 2;
|
||||||
put_be24(w->buf + 1 + w->header_off, w->next);
|
put_be24(w->buf + 1 + w->header_off, w->next);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Log records are stored zlib-compressed. Note that the compression
|
||||||
|
* also spans over the restart points we have just written.
|
||||||
|
*/
|
||||||
if (block_writer_type(w) == BLOCK_TYPE_LOG) {
|
if (block_writer_type(w) == BLOCK_TYPE_LOG) {
|
||||||
int block_header_skip = 4 + w->header_off;
|
int block_header_skip = 4 + w->header_off;
|
||||||
uLongf src_len = w->next - block_header_skip;
|
uLongf src_len = w->next - block_header_skip, compressed_len;
|
||||||
uLongf dest_cap = src_len * 1.001 + 12;
|
unsigned char *compressed;
|
||||||
uint8_t *compressed;
|
int ret;
|
||||||
|
|
||||||
REFTABLE_ALLOC_ARRAY(compressed, dest_cap);
|
ret = deflateReset(w->zstream);
|
||||||
|
if (ret != Z_OK)
|
||||||
|
return REFTABLE_ZLIB_ERROR;
|
||||||
|
|
||||||
while (1) {
|
/*
|
||||||
uLongf out_dest_len = dest_cap;
|
* Precompute the upper bound of how many bytes the compressed
|
||||||
int zresult = compress2(compressed, &out_dest_len,
|
* data may end up with. Combined with `Z_FINISH`, `deflate()`
|
||||||
w->buf + block_header_skip,
|
* is guaranteed to return `Z_STREAM_END`.
|
||||||
src_len, 9);
|
*/
|
||||||
if (zresult == Z_BUF_ERROR && dest_cap < LONG_MAX) {
|
compressed_len = deflateBound(w->zstream, src_len);
|
||||||
dest_cap *= 2;
|
REFTABLE_ALLOC_ARRAY(compressed, compressed_len);
|
||||||
compressed =
|
|
||||||
reftable_realloc(compressed, dest_cap);
|
|
||||||
if (compressed)
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (Z_OK != zresult) {
|
w->zstream->next_out = compressed;
|
||||||
reftable_free(compressed);
|
w->zstream->avail_out = compressed_len;
|
||||||
return REFTABLE_ZLIB_ERROR;
|
w->zstream->next_in = w->buf + block_header_skip;
|
||||||
}
|
w->zstream->avail_in = src_len;
|
||||||
|
|
||||||
memcpy(w->buf + block_header_skip, compressed,
|
/*
|
||||||
out_dest_len);
|
* We want to perform all decompression in a single step, which
|
||||||
w->next = out_dest_len + block_header_skip;
|
* is why we can pass Z_FINISH here. As we have precomputed the
|
||||||
|
* deflated buffer's size via `deflateBound()` this function is
|
||||||
|
* guaranteed to succeed according to the zlib documentation.
|
||||||
|
*/
|
||||||
|
ret = deflate(w->zstream, Z_FINISH);
|
||||||
|
if (ret != Z_STREAM_END) {
|
||||||
reftable_free(compressed);
|
reftable_free(compressed);
|
||||||
break;
|
return REFTABLE_ZLIB_ERROR;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Overwrite the uncompressed data we have already written and
|
||||||
|
* adjust the `next` pointer to point right after the
|
||||||
|
* compressed data.
|
||||||
|
*/
|
||||||
|
memcpy(w->buf + block_header_skip, compressed,
|
||||||
|
w->zstream->total_out);
|
||||||
|
w->next = w->zstream->total_out + block_header_skip;
|
||||||
|
|
||||||
|
reftable_free(compressed);
|
||||||
}
|
}
|
||||||
|
|
||||||
return w->next;
|
return w->next;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -425,6 +447,8 @@ done:
|
|||||||
|
|
||||||
void block_writer_release(struct block_writer *bw)
|
void block_writer_release(struct block_writer *bw)
|
||||||
{
|
{
|
||||||
|
deflateEnd(bw->zstream);
|
||||||
|
FREE_AND_NULL(bw->zstream);
|
||||||
FREE_AND_NULL(bw->restarts);
|
FREE_AND_NULL(bw->restarts);
|
||||||
strbuf_release(&bw->last_key);
|
strbuf_release(&bw->last_key);
|
||||||
/* the block is not owned. */
|
/* the block is not owned. */
|
||||||
|
@ -18,6 +18,7 @@ https://developers.google.com/open-source/licenses/bsd
|
|||||||
* allocation overhead.
|
* allocation overhead.
|
||||||
*/
|
*/
|
||||||
struct block_writer {
|
struct block_writer {
|
||||||
|
z_stream *zstream;
|
||||||
uint8_t *buf;
|
uint8_t *buf;
|
||||||
uint32_t block_size;
|
uint32_t block_size;
|
||||||
|
|
||||||
|
Reference in New Issue
Block a user