Represent bucket as object instead of []byte name.

Thanks to this change:
  - all the maps bucket -> buffer are indexed by int's instead of
string. No need to do: byte[] -> string -> hash conversion on each
access.
  - buckets are strongly typed in backend/mvcc API.
This commit is contained in:
Piotr Tabor
2021-05-17 23:05:27 +02:00
parent 8bddbdc1d6
commit e6baf6d751
25 changed files with 359 additions and 289 deletions

View File

@ -25,6 +25,7 @@ import (
bolt "go.etcd.io/bbolt"
"go.etcd.io/etcd/server/v3/mvcc/backend"
betesting "go.etcd.io/etcd/server/v3/mvcc/backend/testing"
"go.etcd.io/etcd/server/v3/mvcc/buckets"
)
func TestBackendClose(t *testing.T) {
@ -52,8 +53,8 @@ func TestBackendSnapshot(t *testing.T) {
tx := b.BatchTx()
tx.Lock()
tx.UnsafeCreateBucket([]byte("test"))
tx.UnsafePut([]byte("test"), []byte("foo"), []byte("bar"))
tx.UnsafeCreateBucket(buckets.Test)
tx.UnsafePut(buckets.Test, []byte("foo"), []byte("bar"))
tx.Unlock()
b.ForceCommit()
@ -77,7 +78,7 @@ func TestBackendSnapshot(t *testing.T) {
newTx := nb.BatchTx()
newTx.Lock()
ks, _ := newTx.UnsafeRange([]byte("test"), []byte("foo"), []byte("goo"), 0)
ks, _ := newTx.UnsafeRange(buckets.Test, []byte("foo"), []byte("goo"), 0)
if len(ks) != 1 {
t.Errorf("len(kvs) = %d, want 1", len(ks))
}
@ -94,8 +95,8 @@ func TestBackendBatchIntervalCommit(t *testing.T) {
tx := b.BatchTx()
tx.Lock()
tx.UnsafeCreateBucket([]byte("test"))
tx.UnsafePut([]byte("test"), []byte("foo"), []byte("bar"))
tx.UnsafeCreateBucket(buckets.Test)
tx.UnsafePut(buckets.Test, []byte("foo"), []byte("bar"))
tx.Unlock()
for i := 0; i < 10; i++ {
@ -126,9 +127,9 @@ func TestBackendDefrag(t *testing.T) {
tx := b.BatchTx()
tx.Lock()
tx.UnsafeCreateBucket([]byte("test"))
tx.UnsafeCreateBucket(buckets.Test)
for i := 0; i < backend.DefragLimitForTest()+100; i++ {
tx.UnsafePut([]byte("test"), []byte(fmt.Sprintf("foo_%d", i)), []byte("bar"))
tx.UnsafePut(buckets.Test, []byte(fmt.Sprintf("foo_%d", i)), []byte("bar"))
}
tx.Unlock()
b.ForceCommit()
@ -137,7 +138,7 @@ func TestBackendDefrag(t *testing.T) {
tx = b.BatchTx()
tx.Lock()
for i := 0; i < 50; i++ {
tx.UnsafeDelete([]byte("test"), []byte(fmt.Sprintf("foo_%d", i)))
tx.UnsafeDelete(buckets.Test, []byte(fmt.Sprintf("foo_%d", i)))
}
tx.Unlock()
b.ForceCommit()
@ -171,8 +172,8 @@ func TestBackendDefrag(t *testing.T) {
// try put more keys after shrink.
tx = b.BatchTx()
tx.Lock()
tx.UnsafeCreateBucket([]byte("test"))
tx.UnsafePut([]byte("test"), []byte("more"), []byte("bar"))
tx.UnsafeCreateBucket(buckets.Test)
tx.UnsafePut(buckets.Test, []byte("more"), []byte("bar"))
tx.Unlock()
b.ForceCommit()
}
@ -184,15 +185,15 @@ func TestBackendWriteback(t *testing.T) {
tx := b.BatchTx()
tx.Lock()
tx.UnsafeCreateBucket([]byte("key"))
tx.UnsafePut([]byte("key"), []byte("abc"), []byte("bar"))
tx.UnsafePut([]byte("key"), []byte("def"), []byte("baz"))
tx.UnsafePut([]byte("key"), []byte("overwrite"), []byte("1"))
tx.UnsafeCreateBucket(buckets.Key)
tx.UnsafePut(buckets.Key, []byte("abc"), []byte("bar"))
tx.UnsafePut(buckets.Key, []byte("def"), []byte("baz"))
tx.UnsafePut(buckets.Key, []byte("overwrite"), []byte("1"))
tx.Unlock()
// overwrites should be propagated too
tx.Lock()
tx.UnsafePut([]byte("key"), []byte("overwrite"), []byte("2"))
tx.UnsafePut(buckets.Key, []byte("overwrite"), []byte("2"))
tx.Unlock()
keys := []struct {
@ -242,12 +243,14 @@ func TestBackendWriteback(t *testing.T) {
}
rtx := b.ReadTx()
for i, tt := range keys {
rtx.RLock()
k, v := rtx.UnsafeRange([]byte("key"), tt.key, tt.end, tt.limit)
rtx.RUnlock()
if !reflect.DeepEqual(tt.wkey, k) || !reflect.DeepEqual(tt.wval, v) {
t.Errorf("#%d: want k=%+v, v=%+v; got k=%+v, v=%+v", i, tt.wkey, tt.wval, k, v)
}
func() {
rtx.RLock()
defer rtx.RUnlock()
k, v := rtx.UnsafeRange(buckets.Key, tt.key, tt.end, tt.limit)
if !reflect.DeepEqual(tt.wkey, k) || !reflect.DeepEqual(tt.wval, v) {
t.Errorf("#%d: want k=%+v, v=%+v; got k=%+v, v=%+v", i, tt.wkey, tt.wval, k, v)
}
}()
}
}
@ -258,20 +261,20 @@ func TestConcurrentReadTx(t *testing.T) {
wtx1 := b.BatchTx()
wtx1.Lock()
wtx1.UnsafeCreateBucket([]byte("key"))
wtx1.UnsafePut([]byte("key"), []byte("abc"), []byte("ABC"))
wtx1.UnsafePut([]byte("key"), []byte("overwrite"), []byte("1"))
wtx1.UnsafeCreateBucket(buckets.Key)
wtx1.UnsafePut(buckets.Key, []byte("abc"), []byte("ABC"))
wtx1.UnsafePut(buckets.Key, []byte("overwrite"), []byte("1"))
wtx1.Unlock()
wtx2 := b.BatchTx()
wtx2.Lock()
wtx2.UnsafePut([]byte("key"), []byte("def"), []byte("DEF"))
wtx2.UnsafePut([]byte("key"), []byte("overwrite"), []byte("2"))
wtx2.UnsafePut(buckets.Key, []byte("def"), []byte("DEF"))
wtx2.UnsafePut(buckets.Key, []byte("overwrite"), []byte("2"))
wtx2.Unlock()
rtx := b.ConcurrentReadTx()
rtx.RLock() // no-op
k, v := rtx.UnsafeRange([]byte("key"), []byte("abc"), []byte("\xff"), 0)
k, v := rtx.UnsafeRange(buckets.Key, []byte("abc"), []byte("\xff"), 0)
rtx.RUnlock()
wKey := [][]byte{[]byte("abc"), []byte("def"), []byte("overwrite")}
wVal := [][]byte{[]byte("ABC"), []byte("DEF"), []byte("2")}
@ -288,10 +291,10 @@ func TestBackendWritebackForEach(t *testing.T) {
tx := b.BatchTx()
tx.Lock()
tx.UnsafeCreateBucket([]byte("key"))
tx.UnsafeCreateBucket(buckets.Key)
for i := 0; i < 5; i++ {
k := []byte(fmt.Sprintf("%04d", i))
tx.UnsafePut([]byte("key"), k, []byte("bar"))
tx.UnsafePut(buckets.Key, k, []byte("bar"))
}
tx.Unlock()
@ -299,10 +302,10 @@ func TestBackendWritebackForEach(t *testing.T) {
b.ForceCommit()
tx.Lock()
tx.UnsafeCreateBucket([]byte("key"))
tx.UnsafeCreateBucket(buckets.Key)
for i := 5; i < 20; i++ {
k := []byte(fmt.Sprintf("%04d", i))
tx.UnsafePut([]byte("key"), k, []byte("bar"))
tx.UnsafePut(buckets.Key, k, []byte("bar"))
}
tx.Unlock()
@ -313,7 +316,7 @@ func TestBackendWritebackForEach(t *testing.T) {
}
rtx := b.ReadTx()
rtx.RLock()
assert.NoError(t, rtx.UnsafeForEach([]byte("key"), getSeq))
assert.NoError(t, rtx.UnsafeForEach(buckets.Key, getSeq))
rtx.RUnlock()
partialSeq := seq
@ -322,7 +325,7 @@ func TestBackendWritebackForEach(t *testing.T) {
b.ForceCommit()
tx.Lock()
assert.NoError(t, tx.UnsafeForEach([]byte("key"), getSeq))
assert.NoError(t, tx.UnsafeForEach(buckets.Key, getSeq))
tx.Unlock()
if seq != partialSeq {