chore: use testify instead of testing in tests/integration
Signed-off-by: Matthieu MOREL <matthieu.morel35@gmail.com>
This commit is contained in:
parent
f07e2ae4ed
commit
d05b8b7611
@ -21,6 +21,8 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
clientv3 "go.etcd.io/etcd/client/v3"
|
||||
"go.etcd.io/etcd/client/v3/concurrency"
|
||||
integration2 "go.etcd.io/etcd/tests/v3/framework/integration"
|
||||
@ -49,16 +51,12 @@ func TestResumeElection(t *testing.T) {
|
||||
defer cancel()
|
||||
|
||||
// become leader
|
||||
if err = e.Campaign(ctx, "candidate1"); err != nil {
|
||||
t.Fatalf("Campaign() returned non nil err: %s", err)
|
||||
}
|
||||
require.NoErrorf(t, e.Campaign(ctx, "candidate1"), "Campaign() returned non nil err")
|
||||
|
||||
// get the leadership details of the current election
|
||||
var leader *clientv3.GetResponse
|
||||
leader, err = e.Leader(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("Leader() returned non nil err: %s", err)
|
||||
}
|
||||
require.NoErrorf(t, err, "Leader() returned non nil err")
|
||||
|
||||
// Recreate the election
|
||||
e = concurrency.ResumeElection(s, prefix,
|
||||
@ -86,19 +84,13 @@ func TestResumeElection(t *testing.T) {
|
||||
// put some random data to generate a change event, this put should be
|
||||
// ignored by Observe() because it is not under the election prefix.
|
||||
_, err = cli.Put(ctx, "foo", "bar")
|
||||
if err != nil {
|
||||
t.Fatalf("Put('foo') returned non nil err: %s", err)
|
||||
}
|
||||
require.NoErrorf(t, err, "Put('foo') returned non nil err")
|
||||
|
||||
// resign as leader
|
||||
if err := e.Resign(ctx); err != nil {
|
||||
t.Fatalf("Resign() returned non nil err: %s", err)
|
||||
}
|
||||
require.NoErrorf(t, e.Resign(ctx), "Resign() returned non nil err")
|
||||
|
||||
// elect a different candidate
|
||||
if err := e.Campaign(ctx, "candidate2"); err != nil {
|
||||
t.Fatalf("Campaign() returned non nil err: %s", err)
|
||||
}
|
||||
require.NoErrorf(t, e.Campaign(ctx, "candidate2"), "Campaign() returned non nil err")
|
||||
|
||||
// wait for observed leader change
|
||||
resp := <-respChan
|
||||
|
@ -67,9 +67,8 @@ func TestBalancerUnderBlackholeKeepAliveWatch(t *testing.T) {
|
||||
defer cli.Close()
|
||||
|
||||
wch := cli.Watch(context.Background(), "foo", clientv3.WithCreatedNotify())
|
||||
if _, ok := <-wch; !ok {
|
||||
t.Fatalf("watch failed on creation")
|
||||
}
|
||||
_, ok := <-wch
|
||||
require.Truef(t, ok, "watch failed on creation")
|
||||
|
||||
// endpoint can switch to eps[1] when it detects the failure of eps[0]
|
||||
cli.SetEndpoints(eps...)
|
||||
|
@ -63,9 +63,7 @@ func TestDialTLSExpired(t *testing.T) {
|
||||
DialOptions: []grpc.DialOption{grpc.WithBlock()},
|
||||
TLS: tls,
|
||||
})
|
||||
if !clientv3test.IsClientTimeout(err) {
|
||||
t.Fatalf("expected dial timeout error, got %v", err)
|
||||
}
|
||||
require.Truef(t, clientv3test.IsClientTimeout(err), "expected dial timeout error")
|
||||
}
|
||||
|
||||
// TestDialTLSNoConfig ensures the client fails to dial / times out
|
||||
@ -85,9 +83,7 @@ func TestDialTLSNoConfig(t *testing.T) {
|
||||
c.Close()
|
||||
}
|
||||
}()
|
||||
if !clientv3test.IsClientTimeout(err) {
|
||||
t.Fatalf("expected dial timeout error, got %v", err)
|
||||
}
|
||||
require.Truef(t, clientv3test.IsClientTimeout(err), "expected dial timeout error")
|
||||
}
|
||||
|
||||
// TestDialSetEndpointsBeforeFail ensures SetEndpoints can replace unavailable
|
||||
|
@ -260,9 +260,7 @@ func testBalancerUnderNetworkPartitionWatch(t *testing.T, isolateLeader bool) {
|
||||
if len(ev.Events) != 0 {
|
||||
t.Fatal("expected no event")
|
||||
}
|
||||
if err = ev.Err(); !errors.Is(err, rpctypes.ErrNoLeader) {
|
||||
t.Fatalf("expected %v, got %v", rpctypes.ErrNoLeader, err)
|
||||
}
|
||||
require.ErrorIs(t, ev.Err(), rpctypes.ErrNoLeader)
|
||||
case <-time.After(integration2.RequestWaitTimeout): // enough time to detect leader lost
|
||||
t.Fatal("took too long to detect leader lost")
|
||||
}
|
||||
@ -302,9 +300,7 @@ func TestDropReadUnderNetworkPartition(t *testing.T) {
|
||||
ctx, cancel := context.WithTimeout(context.TODO(), 10*time.Second)
|
||||
_, err = kvc.Get(ctx, "a")
|
||||
cancel()
|
||||
if !errors.Is(err, rpctypes.ErrLeaderChanged) {
|
||||
t.Fatalf("expected %v, got %v", rpctypes.ErrLeaderChanged, err)
|
||||
}
|
||||
require.ErrorIsf(t, err, rpctypes.ErrLeaderChanged, "expected %v, got %v", rpctypes.ErrLeaderChanged, err)
|
||||
|
||||
for i := 0; i < 5; i++ {
|
||||
ctx, cancel = context.WithTimeout(context.TODO(), 10*time.Second)
|
||||
|
@ -19,6 +19,8 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
clientv3 "go.etcd.io/etcd/client/v3"
|
||||
recipe "go.etcd.io/etcd/client/v3/experimental/recipes"
|
||||
integration2 "go.etcd.io/etcd/tests/v3/framework/integration"
|
||||
@ -40,12 +42,8 @@ func TestBarrierMultiNode(t *testing.T) {
|
||||
|
||||
func testBarrier(t *testing.T, waiters int, chooseClient func() *clientv3.Client) {
|
||||
b := recipe.NewBarrier(chooseClient(), "test-barrier")
|
||||
if err := b.Hold(); err != nil {
|
||||
t.Fatalf("could not hold barrier (%v)", err)
|
||||
}
|
||||
if err := b.Hold(); err == nil {
|
||||
t.Fatalf("able to double-hold barrier")
|
||||
}
|
||||
require.NoErrorf(t, b.Hold(), "could not hold barrier")
|
||||
require.Errorf(t, b.Hold(), "able to double-hold barrier")
|
||||
|
||||
// put a random key to move the revision forward
|
||||
if _, err := chooseClient().Put(context.Background(), "x", ""); err != nil {
|
||||
@ -75,9 +73,7 @@ func testBarrier(t *testing.T, waiters int, chooseClient func() *clientv3.Client
|
||||
default:
|
||||
}
|
||||
|
||||
if err := b.Release(); err != nil {
|
||||
t.Fatalf("could not release barrier (%v)", err)
|
||||
}
|
||||
require.NoErrorf(t, b.Release(), "could not release barrier")
|
||||
|
||||
timerC := time.After(time.Duration(waiters*100) * time.Millisecond)
|
||||
for i := 0; i < waiters; i++ {
|
||||
|
@ -73,9 +73,7 @@ func TestDoubleBarrier(t *testing.T) {
|
||||
default:
|
||||
}
|
||||
|
||||
if err := b.Enter(); err != nil {
|
||||
t.Fatalf("could not enter last barrier (%v)", err)
|
||||
}
|
||||
require.NoErrorf(t, b.Enter(), "could not enter last barrier")
|
||||
|
||||
timerC := time.After(time.Duration(waiters*100) * time.Millisecond)
|
||||
for i := 0; i < waiters-1; i++ {
|
||||
|
@ -93,9 +93,7 @@ func testMutexLock(t *testing.T, waiters int, chooseClient func() *clientv3.Clie
|
||||
t.Fatalf("lock %d followers did not wait", i)
|
||||
default:
|
||||
}
|
||||
if err := m.Unlock(context.TODO()); err != nil {
|
||||
t.Fatalf("could not release lock (%v)", err)
|
||||
}
|
||||
require.NoErrorf(t, m.Unlock(context.TODO()), "could not release lock")
|
||||
}
|
||||
}
|
||||
wg.Wait()
|
||||
@ -233,9 +231,7 @@ func TestMutexWaitsOnCurrentHolder(t *testing.T) {
|
||||
t.Fatal("failed to receive watch response")
|
||||
}
|
||||
}
|
||||
if putCounts != 2 {
|
||||
t.Fatalf("expect 2 put events, but got %v", putCounts)
|
||||
}
|
||||
require.Equalf(t, 2, putCounts, "expect 2 put events, but got %v", putCounts)
|
||||
|
||||
newOwnerSession, err := concurrency.NewSession(cli)
|
||||
if err != nil {
|
||||
@ -250,12 +246,9 @@ func TestMutexWaitsOnCurrentHolder(t *testing.T) {
|
||||
|
||||
select {
|
||||
case wrp := <-wch:
|
||||
if len(wrp.Events) != 1 {
|
||||
t.Fatalf("expect a event, but got %v events", len(wrp.Events))
|
||||
}
|
||||
if e := wrp.Events[0]; e.Type != mvccpb.PUT {
|
||||
t.Fatalf("expect a put event on prefix test-mutex, but got event type %v", e.Type)
|
||||
}
|
||||
require.Lenf(t, wrp.Events, 1, "expect a event, but got %v events", len(wrp.Events))
|
||||
e := wrp.Events[0]
|
||||
require.Equalf(t, mvccpb.PUT, e.Type, "expect a put event on prefix test-mutex, but got event type %v", e.Type)
|
||||
case <-time.After(time.Second):
|
||||
t.Fatalf("failed to receive a watch response")
|
||||
}
|
||||
@ -266,12 +259,9 @@ func TestMutexWaitsOnCurrentHolder(t *testing.T) {
|
||||
// ensures the deletion of victim waiter from server side.
|
||||
select {
|
||||
case wrp := <-wch:
|
||||
if len(wrp.Events) != 1 {
|
||||
t.Fatalf("expect a event, but got %v events", len(wrp.Events))
|
||||
}
|
||||
if e := wrp.Events[0]; e.Type != mvccpb.DELETE {
|
||||
t.Fatalf("expect a delete event on prefix test-mutex, but got event type %v", e.Type)
|
||||
}
|
||||
require.Lenf(t, wrp.Events, 1, "expect a event, but got %v events", len(wrp.Events))
|
||||
e := wrp.Events[0]
|
||||
require.Equalf(t, mvccpb.DELETE, e.Type, "expect a delete event on prefix test-mutex, but got event type %v", e.Type)
|
||||
case <-time.After(time.Second):
|
||||
t.Fatal("failed to receive a watch response")
|
||||
}
|
||||
@ -357,18 +347,14 @@ func testRWMutex(t *testing.T, waiters int, chooseClient func() *clientv3.Client
|
||||
t.Fatalf("rlock %d readers did not wait", i)
|
||||
default:
|
||||
}
|
||||
if err := wl.Unlock(); err != nil {
|
||||
t.Fatalf("could not release lock (%v)", err)
|
||||
}
|
||||
require.NoErrorf(t, wl.Unlock(), "could not release lock")
|
||||
case rl := <-rlockedC:
|
||||
select {
|
||||
case <-wlockedC:
|
||||
t.Fatalf("rlock %d writers did not wait", i)
|
||||
default:
|
||||
}
|
||||
if err := rl.RUnlock(); err != nil {
|
||||
t.Fatalf("could not release rlock (%v)", err)
|
||||
}
|
||||
require.NoErrorf(t, rl.RUnlock(), "could not release rlock")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -20,6 +20,8 @@ import (
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
recipe "go.etcd.io/etcd/client/v3/experimental/recipes"
|
||||
integration2 "go.etcd.io/etcd/tests/v3/framework/integration"
|
||||
)
|
||||
@ -57,12 +59,8 @@ func TestQueueOneReaderOneWriter(t *testing.T) {
|
||||
q := recipe.NewQueue(etcdc, "testq")
|
||||
for i := 0; i < 5; i++ {
|
||||
s, err := q.Dequeue()
|
||||
if err != nil {
|
||||
t.Fatalf("error dequeueing (%v)", err)
|
||||
}
|
||||
if s != fmt.Sprintf("%d", i) {
|
||||
t.Fatalf("expected dequeue value %v, got %v", s, i)
|
||||
}
|
||||
require.NoErrorf(t, err, "error dequeueing (%v)", err)
|
||||
require.Equalf(t, s, fmt.Sprintf("%d", i), "expected dequeue value %v, got %v", s, i)
|
||||
}
|
||||
}
|
||||
|
||||
@ -103,25 +101,18 @@ func TestPrQueueOneReaderOneWriter(t *testing.T) {
|
||||
for i := 0; i < 5; i++ {
|
||||
// [0, 2] priority for priority collision to test seq keys
|
||||
pr := uint16(rand.Intn(3))
|
||||
if err := q.Enqueue(fmt.Sprintf("%d", pr), pr); err != nil {
|
||||
t.Fatalf("error enqueuing (%v)", err)
|
||||
}
|
||||
require.NoErrorf(t, q.Enqueue(fmt.Sprintf("%d", pr), pr), "error enqueuing")
|
||||
}
|
||||
|
||||
// read back items; confirm priority order is respected
|
||||
lastPr := -1
|
||||
for i := 0; i < 5; i++ {
|
||||
s, err := q.Dequeue()
|
||||
if err != nil {
|
||||
t.Fatalf("error dequeueing (%v)", err)
|
||||
}
|
||||
require.NoErrorf(t, err, "error dequeueing (%v)", err)
|
||||
curPr := 0
|
||||
if _, err := fmt.Sscanf(s, "%d", &curPr); err != nil {
|
||||
t.Fatalf(`error parsing item "%s" (%v)`, s, err)
|
||||
}
|
||||
if lastPr > curPr {
|
||||
t.Fatalf("expected priority %v > %v", curPr, lastPr)
|
||||
}
|
||||
_, err = fmt.Sscanf(s, "%d", &curPr)
|
||||
require.NoErrorf(t, err, `error parsing item "%s" (%v)`, s, err)
|
||||
require.LessOrEqualf(t, lastPr, curPr, "expected priority %v > %v", curPr, lastPr)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -41,9 +41,7 @@ func TestLeaseNotFoundError(t *testing.T) {
|
||||
kv := clus.RandClient()
|
||||
|
||||
_, err := kv.Put(context.TODO(), "foo", "bar", clientv3.WithLease(clientv3.LeaseID(500)))
|
||||
if !errors.Is(err, rpctypes.ErrLeaseNotFound) {
|
||||
t.Fatalf("expected %v, got %v", rpctypes.ErrLeaseNotFound, err)
|
||||
}
|
||||
require.ErrorIsf(t, err, rpctypes.ErrLeaseNotFound, "expected %v, got %v", rpctypes.ErrLeaseNotFound, err)
|
||||
}
|
||||
|
||||
func TestLeaseGrant(t *testing.T) {
|
||||
@ -57,9 +55,7 @@ func TestLeaseGrant(t *testing.T) {
|
||||
kv := clus.RandClient()
|
||||
|
||||
_, merr := lapi.Grant(context.Background(), clientv3.MaxLeaseTTL+1)
|
||||
if !errors.Is(merr, rpctypes.ErrLeaseTTLTooLarge) {
|
||||
t.Fatalf("err = %v, want %v", merr, rpctypes.ErrLeaseTTLTooLarge)
|
||||
}
|
||||
require.ErrorIsf(t, merr, rpctypes.ErrLeaseTTLTooLarge, "err = %v, want %v", merr, rpctypes.ErrLeaseTTLTooLarge)
|
||||
|
||||
resp, err := lapi.Grant(context.Background(), 10)
|
||||
if err != nil {
|
||||
@ -67,9 +63,7 @@ func TestLeaseGrant(t *testing.T) {
|
||||
}
|
||||
|
||||
_, err = kv.Put(context.TODO(), "foo", "bar", clientv3.WithLease(resp.ID))
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create key with lease %v", err)
|
||||
}
|
||||
require.NoErrorf(t, err, "failed to create key with lease %v", err)
|
||||
}
|
||||
|
||||
func TestLeaseRevoke(t *testing.T) {
|
||||
@ -93,9 +87,7 @@ func TestLeaseRevoke(t *testing.T) {
|
||||
}
|
||||
|
||||
_, err = kv.Put(context.TODO(), "foo", "bar", clientv3.WithLease(resp.ID))
|
||||
if !errors.Is(err, rpctypes.ErrLeaseNotFound) {
|
||||
t.Fatalf("err = %v, want %v", err, rpctypes.ErrLeaseNotFound)
|
||||
}
|
||||
require.ErrorIsf(t, err, rpctypes.ErrLeaseNotFound, "err = %v, want %v", err, rpctypes.ErrLeaseNotFound)
|
||||
}
|
||||
|
||||
func TestLeaseKeepAliveOnce(t *testing.T) {
|
||||
@ -153,9 +145,7 @@ func TestLeaseKeepAlive(t *testing.T) {
|
||||
t.Errorf("chan is closed, want not closed")
|
||||
}
|
||||
|
||||
if kresp == nil {
|
||||
t.Fatalf("unexpected null response")
|
||||
}
|
||||
require.NotNilf(t, kresp, "unexpected null response")
|
||||
|
||||
if kresp.ID != resp.ID {
|
||||
t.Errorf("ID = %x, want %x", kresp.ID, resp.ID)
|
||||
@ -341,9 +331,7 @@ func TestLeaseKeepAliveFullResponseQueue(t *testing.T) {
|
||||
|
||||
// expect lease keepalive every 10-second
|
||||
lresp, err := lapi.Grant(context.Background(), 30)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create lease %v", err)
|
||||
}
|
||||
require.NoErrorf(t, err, "failed to create lease %v", err)
|
||||
id := lresp.ID
|
||||
|
||||
old := clientv3.LeaseResponseChSize
|
||||
@ -354,18 +342,14 @@ func TestLeaseKeepAliveFullResponseQueue(t *testing.T) {
|
||||
|
||||
// never fetch from response queue, and let it become full
|
||||
_, err = lapi.KeepAlive(context.Background(), id)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to keepalive lease %v", err)
|
||||
}
|
||||
require.NoErrorf(t, err, "failed to keepalive lease %v", err)
|
||||
|
||||
// TTL should not be refreshed after 3 seconds
|
||||
// expect keepalive to be triggered after TTL/3
|
||||
time.Sleep(3 * time.Second)
|
||||
|
||||
tr, terr := lapi.TimeToLive(context.Background(), id)
|
||||
if terr != nil {
|
||||
t.Fatalf("failed to get lease information %v", terr)
|
||||
}
|
||||
require.NoErrorf(t, terr, "failed to get lease information %v", terr)
|
||||
if tr.TTL >= 29 {
|
||||
t.Errorf("unexpected kept-alive lease TTL %d", tr.TTL)
|
||||
}
|
||||
@ -423,9 +407,7 @@ func TestLeaseRevokeNewAfterClose(t *testing.T) {
|
||||
case <-time.After(integration2.RequestWaitTimeout):
|
||||
t.Fatal("le.Revoke took too long")
|
||||
case errMsg := <-errMsgCh:
|
||||
if errMsg != "" {
|
||||
t.Fatalf("%v", errMsg)
|
||||
}
|
||||
require.Empty(t, errMsg)
|
||||
}
|
||||
}
|
||||
|
||||
@ -445,9 +427,7 @@ func TestLeaseKeepAliveCloseAfterDisconnectRevoke(t *testing.T) {
|
||||
rc, kerr := cli.KeepAlive(context.Background(), resp.ID)
|
||||
require.NoError(t, kerr)
|
||||
kresp := <-rc
|
||||
if kresp.ID != resp.ID {
|
||||
t.Fatalf("ID = %x, want %x", kresp.ID, resp.ID)
|
||||
}
|
||||
require.Equalf(t, kresp.ID, resp.ID, "ID = %x, want %x", kresp.ID, resp.ID)
|
||||
|
||||
// keep client disconnected
|
||||
clus.Members[0].Stop(t)
|
||||
@ -489,9 +469,7 @@ func TestLeaseKeepAliveInitTimeout(t *testing.T) {
|
||||
require.NoError(t, kerr)
|
||||
select {
|
||||
case ka, ok := <-rc:
|
||||
if ok {
|
||||
t.Fatalf("unexpected keepalive %v, expected closed channel", ka)
|
||||
}
|
||||
require.Falsef(t, ok, "unexpected keepalive %v, expected closed channel", ka)
|
||||
case <-time.After(10 * time.Second):
|
||||
t.Fatalf("keepalive channel did not close")
|
||||
}
|
||||
@ -514,17 +492,14 @@ func TestLeaseKeepAliveTTLTimeout(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
rc, kerr := cli.KeepAlive(context.Background(), resp.ID)
|
||||
require.NoError(t, kerr)
|
||||
if kresp := <-rc; kresp.ID != resp.ID {
|
||||
t.Fatalf("ID = %x, want %x", kresp.ID, resp.ID)
|
||||
}
|
||||
kresp := <-rc
|
||||
require.Equalf(t, kresp.ID, resp.ID, "ID = %x, want %x", kresp.ID, resp.ID)
|
||||
|
||||
// keep client disconnected
|
||||
clus.Members[0].Stop(t)
|
||||
select {
|
||||
case ka, ok := <-rc:
|
||||
if ok {
|
||||
t.Fatalf("unexpected keepalive %v, expected closed channel", ka)
|
||||
}
|
||||
require.Falsef(t, ok, "unexpected keepalive %v, expected closed channel", ka)
|
||||
case <-time.After(10 * time.Second):
|
||||
t.Fatalf("keepalive channel did not close")
|
||||
}
|
||||
@ -559,12 +534,8 @@ func TestLeaseTimeToLive(t *testing.T) {
|
||||
|
||||
lresp, lerr := lapi.TimeToLive(context.Background(), resp.ID, clientv3.WithAttachedKeys())
|
||||
require.NoError(t, lerr)
|
||||
if lresp.ID != resp.ID {
|
||||
t.Fatalf("leaseID expected %d, got %d", resp.ID, lresp.ID)
|
||||
}
|
||||
if lresp.GrantedTTL != int64(10) {
|
||||
t.Fatalf("GrantedTTL expected %d, got %d", 10, lresp.GrantedTTL)
|
||||
}
|
||||
require.Equalf(t, lresp.ID, resp.ID, "leaseID expected %d, got %d", resp.ID, lresp.ID)
|
||||
require.Equalf(t, int64(10), lresp.GrantedTTL, "GrantedTTL expected %d, got %d", 10, lresp.GrantedTTL)
|
||||
if lresp.TTL == 0 || lresp.TTL > lresp.GrantedTTL {
|
||||
t.Fatalf("unexpected TTL %d (granted %d)", lresp.TTL, lresp.GrantedTTL)
|
||||
}
|
||||
@ -573,15 +544,11 @@ func TestLeaseTimeToLive(t *testing.T) {
|
||||
ks[i] = string(lresp.Keys[i])
|
||||
}
|
||||
sort.Strings(ks)
|
||||
if !reflect.DeepEqual(ks, keys) {
|
||||
t.Fatalf("keys expected %v, got %v", keys, ks)
|
||||
}
|
||||
require.Truef(t, reflect.DeepEqual(ks, keys), "keys expected %v, got %v", keys, ks)
|
||||
|
||||
lresp, lerr = lapi.TimeToLive(context.Background(), resp.ID)
|
||||
require.NoError(t, lerr)
|
||||
if len(lresp.Keys) != 0 {
|
||||
t.Fatalf("unexpected keys %+v", lresp.Keys)
|
||||
}
|
||||
require.Emptyf(t, lresp.Keys, "unexpected keys %+v", lresp.Keys)
|
||||
}
|
||||
|
||||
func TestLeaseTimeToLiveLeaseNotFound(t *testing.T) {
|
||||
@ -602,21 +569,11 @@ func TestLeaseTimeToLiveLeaseNotFound(t *testing.T) {
|
||||
|
||||
lresp, err := cli.TimeToLive(context.Background(), resp.ID)
|
||||
// TimeToLive() should return a response with TTL=-1.
|
||||
if err != nil {
|
||||
t.Fatalf("expected err to be nil")
|
||||
}
|
||||
if lresp == nil {
|
||||
t.Fatalf("expected lresp not to be nil")
|
||||
}
|
||||
if lresp.ResponseHeader == nil {
|
||||
t.Fatalf("expected ResponseHeader not to be nil")
|
||||
}
|
||||
if lresp.ID != resp.ID {
|
||||
t.Fatalf("expected Lease ID %v, but got %v", resp.ID, lresp.ID)
|
||||
}
|
||||
if lresp.TTL != -1 {
|
||||
t.Fatalf("expected TTL %v, but got %v", lresp.TTL, lresp.TTL)
|
||||
}
|
||||
require.NoErrorf(t, err, "expected err to be nil")
|
||||
require.NotNilf(t, lresp, "expected lresp not to be nil")
|
||||
require.NotNilf(t, lresp.ResponseHeader, "expected ResponseHeader not to be nil")
|
||||
require.Equalf(t, lresp.ID, resp.ID, "expected Lease ID %v, but got %v", resp.ID, lresp.ID)
|
||||
require.Equalf(t, lresp.TTL, int64(-1), "expected TTL %v, but got %v", lresp.TTL, lresp.TTL)
|
||||
}
|
||||
|
||||
func TestLeaseLeases(t *testing.T) {
|
||||
@ -638,13 +595,9 @@ func TestLeaseLeases(t *testing.T) {
|
||||
|
||||
resp, err := cli.Leases(context.Background())
|
||||
require.NoError(t, err)
|
||||
if len(resp.Leases) != 5 {
|
||||
t.Fatalf("len(resp.Leases) expected 5, got %d", len(resp.Leases))
|
||||
}
|
||||
require.Lenf(t, resp.Leases, 5, "len(resp.Leases) expected 5, got %d", len(resp.Leases))
|
||||
for i := range resp.Leases {
|
||||
if ids[i] != resp.Leases[i].ID {
|
||||
t.Fatalf("#%d: lease ID expected %d, got %d", i, ids[i], resp.Leases[i].ID)
|
||||
}
|
||||
require.Equalf(t, ids[i], resp.Leases[i].ID, "#%d: lease ID expected %d, got %d", i, ids[i], resp.Leases[i].ID)
|
||||
}
|
||||
}
|
||||
|
||||
@ -686,9 +639,7 @@ func TestLeaseRenewLostQuorum(t *testing.T) {
|
||||
|
||||
select {
|
||||
case _, ok := <-ka:
|
||||
if !ok {
|
||||
t.Fatalf("keepalive closed")
|
||||
}
|
||||
require.Truef(t, ok, "keepalive closed")
|
||||
case <-time.After(time.Duration(r.TTL) * time.Second):
|
||||
t.Fatalf("timed out waiting for keepalive")
|
||||
}
|
||||
@ -710,9 +661,7 @@ func TestLeaseKeepAliveLoopExit(t *testing.T) {
|
||||
|
||||
_, err = cli.KeepAlive(ctx, resp.ID)
|
||||
var keepAliveHaltedErr clientv3.ErrKeepAliveHalted
|
||||
if !errors.As(err, &keepAliveHaltedErr) {
|
||||
t.Fatalf("expected %T, got %v(%T)", clientv3.ErrKeepAliveHalted{}, err, err)
|
||||
}
|
||||
require.ErrorAsf(t, err, &keepAliveHaltedErr, "expected %T, got %v(%T)", clientv3.ErrKeepAliveHalted{}, err, err)
|
||||
}
|
||||
|
||||
// TestV3LeaseFailureOverlap issues Grant and KeepAlive requests to a cluster
|
||||
@ -813,17 +762,13 @@ func TestLeaseWithRequireLeader(t *testing.T) {
|
||||
|
||||
select {
|
||||
case resp, ok := <-kaReqLeader:
|
||||
if ok {
|
||||
t.Fatalf("expected closed require leader, got response %+v", resp)
|
||||
}
|
||||
require.Falsef(t, ok, "expected closed require leader, got response %+v", resp)
|
||||
case <-time.After(5 * time.Second):
|
||||
t.Fatal("keepalive with require leader took too long to close")
|
||||
}
|
||||
select {
|
||||
case _, ok := <-kaWait:
|
||||
if !ok {
|
||||
t.Fatalf("got closed channel with no require leader, expected non-closed")
|
||||
}
|
||||
require.Truef(t, ok, "got closed channel with no require leader, expected non-closed")
|
||||
case <-time.After(10 * time.Millisecond):
|
||||
// wait some to detect any closes happening soon after kaReqLeader closing
|
||||
}
|
||||
|
@ -102,9 +102,7 @@ func TestLeasingInterval(t *testing.T) {
|
||||
|
||||
resp, err := lkv.Get(context.TODO(), "abc/", clientv3.WithPrefix())
|
||||
require.NoError(t, err)
|
||||
if len(resp.Kvs) != 3 {
|
||||
t.Fatalf("expected keys %+v, got response keys %+v", keys, resp.Kvs)
|
||||
}
|
||||
require.Lenf(t, resp.Kvs, 3, "expected keys %+v, got response keys %+v", keys, resp.Kvs)
|
||||
|
||||
// load into cache
|
||||
_, err = lkv.Get(context.TODO(), "abc/a")
|
||||
@ -113,9 +111,7 @@ func TestLeasingInterval(t *testing.T) {
|
||||
// get when prefix is also a cached key
|
||||
resp, err = lkv.Get(context.TODO(), "abc/a", clientv3.WithPrefix())
|
||||
require.NoError(t, err)
|
||||
if len(resp.Kvs) != 2 {
|
||||
t.Fatalf("expected keys %+v, got response keys %+v", keys, resp.Kvs)
|
||||
}
|
||||
require.Lenf(t, resp.Kvs, 2, "expected keys %+v, got response keys %+v", keys, resp.Kvs)
|
||||
}
|
||||
|
||||
// TestLeasingPutInvalidateNew checks the leasing KV updates its cache on a Put to a new key.
|
||||
@ -137,9 +133,7 @@ func TestLeasingPutInvalidateNew(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
cResp, cerr := clus.Client(0).Get(context.TODO(), "k")
|
||||
require.NoError(t, cerr)
|
||||
if !reflect.DeepEqual(lkvResp, cResp) {
|
||||
t.Fatalf(`expected %+v, got response %+v`, cResp, lkvResp)
|
||||
}
|
||||
require.Truef(t, reflect.DeepEqual(lkvResp, cResp), `expected %+v, got response %+v`, cResp, lkvResp)
|
||||
}
|
||||
|
||||
// TestLeasingPutInvalidateExisting checks the leasing KV updates its cache on a Put to an existing key.
|
||||
@ -164,9 +158,7 @@ func TestLeasingPutInvalidateExisting(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
cResp, cerr := clus.Client(0).Get(context.TODO(), "k")
|
||||
require.NoError(t, cerr)
|
||||
if !reflect.DeepEqual(lkvResp, cResp) {
|
||||
t.Fatalf(`expected %+v, got response %+v`, cResp, lkvResp)
|
||||
}
|
||||
require.Truef(t, reflect.DeepEqual(lkvResp, cResp), `expected %+v, got response %+v`, cResp, lkvResp)
|
||||
}
|
||||
|
||||
// TestLeasingGetNoLeaseTTL checks a key with a TTL is not leased.
|
||||
@ -218,9 +210,7 @@ func TestLeasingGetSerializable(t *testing.T) {
|
||||
// don't necessarily try to acquire leasing key ownership for new key
|
||||
resp, err := lkv.Get(context.TODO(), "uncached", clientv3.WithSerializable())
|
||||
require.NoError(t, err)
|
||||
if len(resp.Kvs) != 0 {
|
||||
t.Fatalf(`expected no keys, got response %+v`, resp)
|
||||
}
|
||||
require.Emptyf(t, resp.Kvs, `expected no keys, got response %+v`, resp)
|
||||
|
||||
clus.Members[0].Stop(t)
|
||||
|
||||
@ -414,9 +404,7 @@ func TestLeasingDeleteOwner(t *testing.T) {
|
||||
resp, err := lkv.Get(context.TODO(), "k")
|
||||
require.NoError(t, err)
|
||||
|
||||
if len(resp.Kvs) != 0 {
|
||||
t.Fatalf(`expected "k" to be deleted, got response %+v`, resp)
|
||||
}
|
||||
require.Emptyf(t, resp.Kvs, `expected "k" to be deleted, got response %+v`, resp)
|
||||
// try to double delete
|
||||
_, err = lkv.Delete(context.TODO(), "k")
|
||||
require.NoError(t, err)
|
||||
@ -447,9 +435,7 @@ func TestLeasingDeleteNonOwner(t *testing.T) {
|
||||
// key should be removed from lkv1
|
||||
resp, err := lkv1.Get(context.TODO(), "k")
|
||||
require.NoError(t, err)
|
||||
if len(resp.Kvs) != 0 {
|
||||
t.Fatalf(`expected "k" to be deleted, got response %+v`, resp)
|
||||
}
|
||||
require.Emptyf(t, resp.Kvs, `expected "k" to be deleted, got response %+v`, resp)
|
||||
}
|
||||
|
||||
func TestLeasingOverwriteResponse(t *testing.T) {
|
||||
@ -532,9 +518,8 @@ func TestLeasingTxnOwnerGetRange(t *testing.T) {
|
||||
|
||||
tresp, terr := lkv.Txn(context.TODO()).Then(clientv3.OpGet("k-", clientv3.WithPrefix())).Commit()
|
||||
require.NoError(t, terr)
|
||||
if resp := tresp.Responses[0].GetResponseRange(); len(resp.Kvs) != keyCount {
|
||||
t.Fatalf("expected %d keys, got response %+v", keyCount, resp.Kvs)
|
||||
}
|
||||
resp := tresp.Responses[0].GetResponseRange()
|
||||
require.Equalf(t, len(resp.Kvs), keyCount, "expected %d keys, got response %+v", keyCount, resp.Kvs)
|
||||
}
|
||||
|
||||
func TestLeasingTxnOwnerGet(t *testing.T) {
|
||||
@ -596,22 +581,14 @@ func TestLeasingTxnOwnerGet(t *testing.T) {
|
||||
Else(elseOps...).Commit()
|
||||
|
||||
require.NoError(t, terr)
|
||||
if tresp.Succeeded != useThen {
|
||||
t.Fatalf("expected succeeded=%v, got tresp=%+v", useThen, tresp)
|
||||
}
|
||||
if len(tresp.Responses) != len(ops) {
|
||||
t.Fatalf("expected %d responses, got %d", len(ops), len(tresp.Responses))
|
||||
}
|
||||
require.Equalf(t, tresp.Succeeded, useThen, "expected succeeded=%v, got tresp=%+v", useThen, tresp)
|
||||
require.Lenf(t, ops, len(tresp.Responses), "expected %d responses, got %d", len(ops), len(tresp.Responses))
|
||||
wrev := presps[len(presps)-1].Header.Revision
|
||||
if tresp.Header.Revision < wrev {
|
||||
t.Fatalf("expected header revision >= %d, got %d", wrev, tresp.Header.Revision)
|
||||
}
|
||||
require.GreaterOrEqualf(t, tresp.Header.Revision, wrev, "expected header revision >= %d, got %d", wrev, tresp.Header.Revision)
|
||||
for i := range ops {
|
||||
k := fmt.Sprintf("k-%d", i)
|
||||
rr := tresp.Responses[i].GetResponseRange()
|
||||
if rr == nil {
|
||||
t.Fatalf("expected get response, got %+v", tresp.Responses[i])
|
||||
}
|
||||
require.NotNilf(t, rr, "expected get response, got %+v", tresp.Responses[i])
|
||||
if string(rr.Kvs[0].Key) != k || string(rr.Kvs[0].Value) != k+k {
|
||||
t.Errorf(`expected key for %q, got %+v`, k, rr.Kvs)
|
||||
}
|
||||
@ -637,18 +614,14 @@ func TestLeasingTxnOwnerDeleteRange(t *testing.T) {
|
||||
// cache in lkv
|
||||
resp, err := lkv.Get(context.TODO(), "k-", clientv3.WithPrefix())
|
||||
require.NoError(t, err)
|
||||
if len(resp.Kvs) != keyCount {
|
||||
t.Fatalf("expected %d keys, got %d", keyCount, len(resp.Kvs))
|
||||
}
|
||||
require.Equalf(t, len(resp.Kvs), keyCount, "expected %d keys, got %d", keyCount, len(resp.Kvs))
|
||||
|
||||
_, terr := lkv.Txn(context.TODO()).Then(clientv3.OpDelete("k-", clientv3.WithPrefix())).Commit()
|
||||
require.NoError(t, terr)
|
||||
|
||||
resp, err = lkv.Get(context.TODO(), "k-", clientv3.WithPrefix())
|
||||
require.NoError(t, err)
|
||||
if len(resp.Kvs) != 0 {
|
||||
t.Fatalf("expected no keys, got %d", len(resp.Kvs))
|
||||
}
|
||||
require.Emptyf(t, resp.Kvs, "expected no keys, got %d", len(resp.Kvs))
|
||||
}
|
||||
|
||||
func TestLeasingTxnOwnerDelete(t *testing.T) {
|
||||
@ -672,9 +645,7 @@ func TestLeasingTxnOwnerDelete(t *testing.T) {
|
||||
|
||||
resp, err := lkv.Get(context.TODO(), "k")
|
||||
require.NoError(t, err)
|
||||
if len(resp.Kvs) != 0 {
|
||||
t.Fatalf("expected no keys, got %d", len(resp.Kvs))
|
||||
}
|
||||
require.Emptyf(t, resp.Kvs, "expected no keys, got %d", len(resp.Kvs))
|
||||
}
|
||||
|
||||
func TestLeasingTxnOwnerIf(t *testing.T) {
|
||||
@ -794,9 +765,8 @@ func TestLeasingTxnCancel(t *testing.T) {
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
cancel()
|
||||
}()
|
||||
if _, err := lkv2.Txn(ctx).Then(clientv3.OpPut("k", "v")).Commit(); !errors.Is(err, context.Canceled) {
|
||||
t.Fatalf("expected %v, got %v", context.Canceled, err)
|
||||
}
|
||||
_, err = lkv2.Txn(ctx).Then(clientv3.OpPut("k", "v")).Commit()
|
||||
require.ErrorIsf(t, err, context.Canceled, "expected %v, got %v", context.Canceled, err)
|
||||
}
|
||||
|
||||
func TestLeasingTxnNonOwnerPut(t *testing.T) {
|
||||
@ -860,9 +830,7 @@ func TestLeasingTxnNonOwnerPut(t *testing.T) {
|
||||
c++
|
||||
}
|
||||
}
|
||||
if c != 3 {
|
||||
t.Fatalf("expected 3 put events, got %+v", evs)
|
||||
}
|
||||
require.Equalf(t, 3, c, "expected 3 put events, got %+v", evs)
|
||||
}
|
||||
|
||||
// TestLeasingTxnRandIfThenOrElse randomly leases keys two separate clients, then
|
||||
@ -946,9 +914,7 @@ func TestLeasingTxnRandIfThenOrElse(t *testing.T) {
|
||||
tresp, terr := lkv1.Txn(context.TODO()).If(cmps...).Then(thenOps...).Else(elseOps...).Commit()
|
||||
require.NoError(t, terr)
|
||||
// cmps always succeed
|
||||
if tresp.Succeeded != useThen {
|
||||
t.Fatalf("expected succeeded=%v, got tresp=%+v", useThen, tresp)
|
||||
}
|
||||
require.Equalf(t, tresp.Succeeded, useThen, "expected succeeded=%v, got tresp=%+v", useThen, tresp)
|
||||
// get should match what was put
|
||||
checkPuts := func(s string, kv clientv3.KV) {
|
||||
for _, op := range ops {
|
||||
@ -982,9 +948,8 @@ func TestLeasingOwnerPutError(t *testing.T) {
|
||||
clus.Members[0].Stop(t)
|
||||
ctx, cancel := context.WithTimeout(context.TODO(), 100*time.Millisecond)
|
||||
defer cancel()
|
||||
if resp, err := lkv.Put(ctx, "k", "v"); err == nil {
|
||||
t.Fatalf("expected error, got response %+v", resp)
|
||||
}
|
||||
resp, err := lkv.Put(ctx, "k", "v")
|
||||
require.Errorf(t, err, "expected error, got response %+v", resp)
|
||||
}
|
||||
|
||||
func TestLeasingOwnerDeleteError(t *testing.T) {
|
||||
@ -1002,9 +967,8 @@ func TestLeasingOwnerDeleteError(t *testing.T) {
|
||||
clus.Members[0].Stop(t)
|
||||
ctx, cancel := context.WithTimeout(context.TODO(), 100*time.Millisecond)
|
||||
defer cancel()
|
||||
if resp, err := lkv.Delete(ctx, "k"); err == nil {
|
||||
t.Fatalf("expected error, got response %+v", resp)
|
||||
}
|
||||
resp, err := lkv.Delete(ctx, "k")
|
||||
require.Errorf(t, err, "expected error, got response %+v", resp)
|
||||
}
|
||||
|
||||
func TestLeasingNonOwnerPutError(t *testing.T) {
|
||||
@ -1019,9 +983,8 @@ func TestLeasingNonOwnerPutError(t *testing.T) {
|
||||
clus.Members[0].Stop(t)
|
||||
ctx, cancel := context.WithTimeout(context.TODO(), 100*time.Millisecond)
|
||||
defer cancel()
|
||||
if resp, err := lkv.Put(ctx, "k", "v"); err == nil {
|
||||
t.Fatalf("expected error, got response %+v", resp)
|
||||
}
|
||||
resp, err := lkv.Put(ctx, "k", "v")
|
||||
require.Errorf(t, err, "expected error, got response %+v", resp)
|
||||
}
|
||||
|
||||
func TestLeasingOwnerDeletePrefix(t *testing.T) {
|
||||
@ -1057,9 +1020,7 @@ func testLeasingOwnerDelete(t *testing.T, del clientv3.Op) {
|
||||
for i := 0; i < 8; i++ {
|
||||
resp, err := lkv.Get(context.TODO(), fmt.Sprintf("key/%d", i))
|
||||
require.NoError(t, err)
|
||||
if len(resp.Kvs) != 0 {
|
||||
t.Fatalf("expected no keys on key/%d, got %+v", i, resp)
|
||||
}
|
||||
require.Emptyf(t, resp.Kvs, "expected no keys on key/%d, got %+v", i, resp)
|
||||
}
|
||||
|
||||
// confirm keys were deleted atomically
|
||||
@ -1070,9 +1031,8 @@ func testLeasingOwnerDelete(t *testing.T, del clientv3.Op) {
|
||||
clientv3.WithRev(delResp.Header.Revision),
|
||||
clientv3.WithPrefix())
|
||||
|
||||
if wresp := <-w; len(wresp.Events) != 8 {
|
||||
t.Fatalf("expected %d delete events,got %d", 8, len(wresp.Events))
|
||||
}
|
||||
wresp := <-w
|
||||
require.Lenf(t, wresp.Events, 8, "expected %d delete events,got %d", 8, len(wresp.Events))
|
||||
}
|
||||
|
||||
func TestLeasingDeleteRangeBounds(t *testing.T) {
|
||||
@ -1102,9 +1062,7 @@ func TestLeasingDeleteRangeBounds(t *testing.T) {
|
||||
for _, k := range []string{"j", "m"} {
|
||||
resp, geterr := clus.Client(0).Get(context.TODO(), "0/"+k, clientv3.WithPrefix())
|
||||
require.NoError(t, geterr)
|
||||
if len(resp.Kvs) != 1 {
|
||||
t.Fatalf("expected leasing key, got %+v", resp)
|
||||
}
|
||||
require.Lenf(t, resp.Kvs, 1, "expected leasing key, got %+v", resp)
|
||||
}
|
||||
|
||||
// j and m should still have leases registered since not under k*
|
||||
@ -1220,15 +1178,11 @@ func TestLeasingPutGetDeleteConcurrent(t *testing.T) {
|
||||
resp, err := lkvs[0].Get(context.TODO(), "k")
|
||||
require.NoError(t, err)
|
||||
|
||||
if len(resp.Kvs) > 0 {
|
||||
t.Fatalf("expected no kvs, got %+v", resp.Kvs)
|
||||
}
|
||||
require.Emptyf(t, resp.Kvs, "expected no kvs, got %+v", resp.Kvs)
|
||||
|
||||
resp, err = clus.Client(0).Get(context.TODO(), "k")
|
||||
require.NoError(t, err)
|
||||
if len(resp.Kvs) > 0 {
|
||||
t.Fatalf("expected no kvs, got %+v", resp.Kvs)
|
||||
}
|
||||
require.Emptyf(t, resp.Kvs, "expected no kvs, got %+v", resp.Kvs)
|
||||
}
|
||||
|
||||
// TestLeasingReconnectOwnerRevoke checks that revocation works if
|
||||
@ -1330,9 +1284,7 @@ func TestLeasingReconnectOwnerRevokeCompact(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
resp, err := lkv1.Get(cctx, "k")
|
||||
require.NoError(t, err)
|
||||
if string(resp.Kvs[0].Value) != "v" {
|
||||
t.Fatalf(`expected "v" value, got %+v`, resp)
|
||||
}
|
||||
require.Equalf(t, "v", string(resp.Kvs[0].Value), `expected "v" value, got %+v`, resp)
|
||||
}
|
||||
|
||||
// TestLeasingReconnectOwnerConsistency checks a write error on an owner will
|
||||
@ -1399,9 +1351,7 @@ func TestLeasingReconnectOwnerConsistency(t *testing.T) {
|
||||
require.NoError(t, lerr)
|
||||
cresp, cerr := clus.Client(0).Get(context.TODO(), "k")
|
||||
require.NoError(t, cerr)
|
||||
if !reflect.DeepEqual(lresp.Kvs, cresp.Kvs) {
|
||||
t.Fatalf("expected %+v, got %+v", cresp, lresp)
|
||||
}
|
||||
require.Truef(t, reflect.DeepEqual(lresp.Kvs, cresp.Kvs), "expected %+v, got %+v", cresp, lresp)
|
||||
}
|
||||
|
||||
func TestLeasingTxnAtomicCache(t *testing.T) {
|
||||
@ -1561,9 +1511,7 @@ func TestLeasingReconnectNonOwnerGet(t *testing.T) {
|
||||
require.NoError(t, lerr)
|
||||
cresp, cerr := clus.Client(0).Get(context.TODO(), k)
|
||||
require.NoError(t, cerr)
|
||||
if !reflect.DeepEqual(lresp.Kvs, cresp.Kvs) {
|
||||
t.Fatalf("expected %+v, got %+v", cresp, lresp)
|
||||
}
|
||||
require.Truef(t, reflect.DeepEqual(lresp.Kvs, cresp.Kvs), "expected %+v, got %+v", cresp, lresp)
|
||||
}
|
||||
}
|
||||
|
||||
@ -1591,9 +1539,7 @@ func TestLeasingTxnRangeCmp(t *testing.T) {
|
||||
cmp := clientv3.Compare(clientv3.Version("k").WithPrefix(), "=", 1)
|
||||
tresp, terr := lkv.Txn(context.TODO()).If(cmp).Commit()
|
||||
require.NoError(t, terr)
|
||||
if tresp.Succeeded {
|
||||
t.Fatalf("expected Succeeded=false, got %+v", tresp)
|
||||
}
|
||||
require.Falsef(t, tresp.Succeeded, "expected Succeeded=false, got %+v", tresp)
|
||||
}
|
||||
|
||||
func TestLeasingDo(t *testing.T) {
|
||||
@ -1631,9 +1577,7 @@ func TestLeasingDo(t *testing.T) {
|
||||
|
||||
gresp, err := clus.Client(0).Get(context.TODO(), "a", clientv3.WithPrefix())
|
||||
require.NoError(t, err)
|
||||
if len(gresp.Kvs) != 0 {
|
||||
t.Fatalf("expected no keys, got %+v", gresp.Kvs)
|
||||
}
|
||||
require.Emptyf(t, gresp.Kvs, "expected no keys, got %+v", gresp.Kvs)
|
||||
}
|
||||
|
||||
func TestLeasingTxnOwnerPutBranch(t *testing.T) {
|
||||
@ -1667,9 +1611,7 @@ func TestLeasingTxnOwnerPutBranch(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
clusResp, err := clus.Client(1).Get(context.TODO(), k)
|
||||
require.NoError(t, err)
|
||||
if !reflect.DeepEqual(clusResp.Kvs, lkvResp.Kvs) {
|
||||
t.Fatalf("expected %+v, got %+v", clusResp.Kvs, lkvResp.Kvs)
|
||||
}
|
||||
require.Truef(t, reflect.DeepEqual(clusResp.Kvs, lkvResp.Kvs), "expected %+v, got %+v", clusResp.Kvs, lkvResp.Kvs)
|
||||
}
|
||||
}
|
||||
|
||||
@ -1751,9 +1693,7 @@ func TestLeasingSessionExpire(t *testing.T) {
|
||||
|
||||
resp, err := lkv.Get(context.TODO(), "abc")
|
||||
require.NoError(t, err)
|
||||
if v := string(resp.Kvs[0].Value); v != "def" {
|
||||
t.Fatalf("expected %q, got %q", "v", v)
|
||||
}
|
||||
require.Equal(t, "def", string(resp.Kvs[0].Value))
|
||||
}
|
||||
|
||||
func TestLeasingSessionExpireCancel(t *testing.T) {
|
||||
|
@ -61,14 +61,10 @@ func TestEndpointManager(t *testing.T) {
|
||||
Endpoint: e1,
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(us[0], wu) {
|
||||
t.Fatalf("up = %#v, want %#v", us[0], wu)
|
||||
}
|
||||
require.Truef(t, reflect.DeepEqual(us[0], wu), "up = %#v, want %#v", us[0], wu)
|
||||
|
||||
err = em.DeleteEndpoint(context.TODO(), "foo/a1")
|
||||
if err != nil {
|
||||
t.Fatalf("failed to udpate %v", err)
|
||||
}
|
||||
require.NoErrorf(t, err, "failed to udpate %v", err)
|
||||
|
||||
us = <-w
|
||||
if us == nil {
|
||||
@ -80,9 +76,7 @@ func TestEndpointManager(t *testing.T) {
|
||||
Key: "foo/a1",
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(us[0], wu) {
|
||||
t.Fatalf("up = %#v, want %#v", us[1], wu)
|
||||
}
|
||||
require.Truef(t, reflect.DeepEqual(us[0], wu), "up = %#v, want %#v", us[0], wu)
|
||||
}
|
||||
|
||||
// TestEndpointManagerAtomicity ensures the resolver will initialize
|
||||
@ -112,9 +106,7 @@ func TestEndpointManagerAtomicity(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
updates := <-w
|
||||
if len(updates) != 2 {
|
||||
t.Fatalf("expected two updates, got %+v", updates)
|
||||
}
|
||||
require.Lenf(t, updates, 2, "expected two updates, got %+v", updates)
|
||||
|
||||
_, err = c.Txn(context.TODO()).Then(etcd.OpDelete("foo/host"), etcd.OpDelete("foo/host2")).Commit()
|
||||
require.NoError(t, err)
|
||||
@ -155,15 +147,9 @@ func TestEndpointManagerCRUD(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatal("failed to list foo")
|
||||
}
|
||||
if len(eps) != 2 {
|
||||
t.Fatalf("unexpected the number of endpoints: %d", len(eps))
|
||||
}
|
||||
if !reflect.DeepEqual(eps[k1], e1) {
|
||||
t.Fatalf("unexpected endpoints: %s", k1)
|
||||
}
|
||||
if !reflect.DeepEqual(eps[k2], e2) {
|
||||
t.Fatalf("unexpected endpoints: %s", k2)
|
||||
}
|
||||
require.Lenf(t, eps, 2, "unexpected the number of endpoints: %d", len(eps))
|
||||
require.Truef(t, reflect.DeepEqual(eps[k1], e1), "unexpected endpoints: %s", k1)
|
||||
require.Truef(t, reflect.DeepEqual(eps[k2], e2), "unexpected endpoints: %s", k2)
|
||||
|
||||
// Delete
|
||||
err = em.DeleteEndpoint(context.TODO(), k1)
|
||||
@ -175,12 +161,8 @@ func TestEndpointManagerCRUD(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatal("failed to list foo")
|
||||
}
|
||||
if len(eps) != 1 {
|
||||
t.Fatalf("unexpected the number of endpoints: %d", len(eps))
|
||||
}
|
||||
if !reflect.DeepEqual(eps[k2], e2) {
|
||||
t.Fatalf("unexpected endpoints: %s", k2)
|
||||
}
|
||||
require.Lenf(t, eps, 1, "unexpected the number of endpoints: %d", len(eps))
|
||||
require.Truef(t, reflect.DeepEqual(eps[k2], e2), "unexpected endpoints: %s", k2)
|
||||
|
||||
// Update
|
||||
k3 := "foo/a3"
|
||||
@ -198,10 +180,6 @@ func TestEndpointManagerCRUD(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatal("failed to list foo")
|
||||
}
|
||||
if len(eps) != 1 {
|
||||
t.Fatalf("unexpected the number of endpoints: %d", len(eps))
|
||||
}
|
||||
if !reflect.DeepEqual(eps[k3], e3) {
|
||||
t.Fatalf("unexpected endpoints: %s", k3)
|
||||
}
|
||||
require.Lenf(t, eps, 1, "unexpected the number of endpoints: %d", len(eps))
|
||||
require.Truef(t, reflect.DeepEqual(eps[k3], e3), "unexpected endpoints: %s", k3)
|
||||
}
|
||||
|
@ -104,26 +104,20 @@ func testEtcdGRPCResolver(t *testing.T, lbPolicy string) {
|
||||
|
||||
t.Logf("Response: %v", string(resp.GetPayload().GetBody()))
|
||||
|
||||
if resp.GetPayload() == nil {
|
||||
t.Fatalf("unexpected response from foo: %s", resp.GetPayload().GetBody())
|
||||
}
|
||||
require.NotNilf(t, resp.GetPayload(), "unexpected response from foo: %s", resp.GetPayload().GetBody())
|
||||
lastResponse = resp.GetPayload().GetBody()
|
||||
}
|
||||
|
||||
// If the load balancing policy is pick first then return payload should equal number of requests
|
||||
t.Logf("Last response: %v", string(lastResponse))
|
||||
if lbPolicy == "pick_first" {
|
||||
if string(lastResponse) != "3500" {
|
||||
t.Fatalf("unexpected total responses from foo: %s", lastResponse)
|
||||
}
|
||||
require.Equalf(t, "3500", string(lastResponse), "unexpected total responses from foo: %s", lastResponse)
|
||||
}
|
||||
|
||||
// If the load balancing policy is round robin we should see roughly half total requests served by each server
|
||||
if lbPolicy == "round_robin" {
|
||||
responses, err := strconv.Atoi(string(lastResponse))
|
||||
if err != nil {
|
||||
t.Fatalf("couldn't convert to int: %s", lastResponse)
|
||||
}
|
||||
require.NoErrorf(t, err, "couldn't convert to int: %s", lastResponse)
|
||||
|
||||
// Allow 25% tolerance as round robin is not perfect and we don't want the test to flake
|
||||
expected := float64(totalRequests) * 0.5
|
||||
|
@ -138,7 +138,5 @@ func TestUnresolvableOrderViolation(t *testing.T) {
|
||||
time.Sleep(1 * time.Second) // give enough time for operation
|
||||
|
||||
_, err = OrderingKv.Get(ctx, "foo", clientv3.WithSerializable())
|
||||
if !errors.Is(err, ordering.ErrNoGreaterRev) {
|
||||
t.Fatalf("expected %v, got %v", ordering.ErrNoGreaterRev, err)
|
||||
}
|
||||
require.ErrorIsf(t, err, ordering.ErrNoGreaterRev, "expected %v, got %v", ordering.ErrNoGreaterRev, err)
|
||||
}
|
||||
|
@ -44,14 +44,10 @@ func TestSaveSnapshotFilePermissions(t *testing.T) {
|
||||
defer os.RemoveAll(dbPath)
|
||||
|
||||
dbInfo, err := os.Stat(dbPath)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to get test snapshot file status: %v", err)
|
||||
}
|
||||
require.NoErrorf(t, err, "failed to get test snapshot file status: %v", err)
|
||||
actualFileMode := dbInfo.Mode()
|
||||
|
||||
if expectedFileMode != actualFileMode {
|
||||
t.Fatalf("expected test snapshot file mode %s, got %s:", expectedFileMode, actualFileMode)
|
||||
}
|
||||
require.Equalf(t, expectedFileMode, actualFileMode, "expected test snapshot file mode %s, got %s:", expectedFileMode, actualFileMode)
|
||||
}
|
||||
|
||||
// TestSaveSnapshotVersion ensures that the snapshot returns proper storage version.
|
||||
@ -67,9 +63,7 @@ func TestSaveSnapshotVersion(t *testing.T) {
|
||||
ver, dbPath := createSnapshotFile(t, cfg, kvs)
|
||||
defer os.RemoveAll(dbPath)
|
||||
|
||||
if ver != "3.6.0" {
|
||||
t.Fatalf("expected snapshot version %s, got %s:", "3.6.0", ver)
|
||||
}
|
||||
require.Equalf(t, "3.6.0", ver, "expected snapshot version %s, got %s:", "3.6.0", ver)
|
||||
}
|
||||
|
||||
type kv struct {
|
||||
|
@ -40,19 +40,13 @@ func TestUserError(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = authapi.UserAdd(context.TODO(), "foo", "bar")
|
||||
if !errors.Is(err, rpctypes.ErrUserAlreadyExist) {
|
||||
t.Fatalf("expected %v, got %v", rpctypes.ErrUserAlreadyExist, err)
|
||||
}
|
||||
require.ErrorIsf(t, err, rpctypes.ErrUserAlreadyExist, "expected %v, got %v", rpctypes.ErrUserAlreadyExist, err)
|
||||
|
||||
_, err = authapi.UserDelete(context.TODO(), "not-exist-user")
|
||||
if !errors.Is(err, rpctypes.ErrUserNotFound) {
|
||||
t.Fatalf("expected %v, got %v", rpctypes.ErrUserNotFound, err)
|
||||
}
|
||||
require.ErrorIsf(t, err, rpctypes.ErrUserNotFound, "expected %v, got %v", rpctypes.ErrUserNotFound, err)
|
||||
|
||||
_, err = authapi.UserGrantRole(context.TODO(), "foo", "test-role-does-not-exist")
|
||||
if !errors.Is(err, rpctypes.ErrRoleNotFound) {
|
||||
t.Fatalf("expected %v, got %v", rpctypes.ErrRoleNotFound, err)
|
||||
}
|
||||
require.ErrorIsf(t, err, rpctypes.ErrRoleNotFound, "expected %v, got %v", rpctypes.ErrRoleNotFound, err)
|
||||
}
|
||||
|
||||
func TestAddUserAfterDelete(t *testing.T) {
|
||||
@ -115,9 +109,8 @@ func TestUserErrorAuth(t *testing.T) {
|
||||
authSetupRoot(t, authapi.Auth)
|
||||
|
||||
// unauthenticated client
|
||||
if _, err := authapi.UserAdd(context.TODO(), "foo", "bar"); !errors.Is(err, rpctypes.ErrUserEmpty) {
|
||||
t.Fatalf("expected %v, got %v", rpctypes.ErrUserEmpty, err)
|
||||
}
|
||||
_, err := authapi.UserAdd(context.TODO(), "foo", "bar")
|
||||
require.ErrorIsf(t, err, rpctypes.ErrUserEmpty, "expected %v, got %v", rpctypes.ErrUserEmpty, err)
|
||||
|
||||
// wrong id or password
|
||||
cfg := clientv3.Config{
|
||||
@ -126,13 +119,11 @@ func TestUserErrorAuth(t *testing.T) {
|
||||
DialOptions: []grpc.DialOption{grpc.WithBlock()},
|
||||
}
|
||||
cfg.Username, cfg.Password = "wrong-id", "123"
|
||||
if _, err := integration2.NewClient(t, cfg); !errors.Is(err, rpctypes.ErrAuthFailed) {
|
||||
t.Fatalf("expected %v, got %v", rpctypes.ErrAuthFailed, err)
|
||||
}
|
||||
_, err = integration2.NewClient(t, cfg)
|
||||
require.ErrorIsf(t, err, rpctypes.ErrAuthFailed, "expected %v, got %v", rpctypes.ErrAuthFailed, err)
|
||||
cfg.Username, cfg.Password = "root", "wrong-pass"
|
||||
if _, err := integration2.NewClient(t, cfg); !errors.Is(err, rpctypes.ErrAuthFailed) {
|
||||
t.Fatalf("expected %v, got %v", rpctypes.ErrAuthFailed, err)
|
||||
}
|
||||
_, err = integration2.NewClient(t, cfg)
|
||||
require.ErrorIsf(t, err, rpctypes.ErrAuthFailed, "expected %v, got %v", rpctypes.ErrAuthFailed, err)
|
||||
|
||||
cfg.Username, cfg.Password = "root", "123"
|
||||
authed, err := integration2.NewClient(t, cfg)
|
||||
|
@ -23,6 +23,8 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"go.etcd.io/etcd/client/pkg/v3/testutil"
|
||||
clientv3 "go.etcd.io/etcd/client/v3"
|
||||
integration2 "go.etcd.io/etcd/tests/v3/framework/integration"
|
||||
@ -87,9 +89,8 @@ func testWatchFragment(t *testing.T, fragment, exceedRecvLimit bool) {
|
||||
}(i)
|
||||
}
|
||||
for i := 0; i < 10; i++ {
|
||||
if err := <-errc; err != nil {
|
||||
t.Fatalf("failed to put: %v", err)
|
||||
}
|
||||
err := <-errc
|
||||
require.NoErrorf(t, err, "failed to put")
|
||||
}
|
||||
|
||||
opts := []clientv3.OpOption{clientv3.WithPrefix(), clientv3.WithRev(1)}
|
||||
@ -103,23 +104,15 @@ func testWatchFragment(t *testing.T, fragment, exceedRecvLimit bool) {
|
||||
case ws := <-wch:
|
||||
// without fragment, should exceed gRPC client receive limit
|
||||
if !fragment && exceedRecvLimit {
|
||||
if len(ws.Events) != 0 {
|
||||
t.Fatalf("expected 0 events with watch fragmentation, got %d", len(ws.Events))
|
||||
}
|
||||
require.Emptyf(t, ws.Events, "expected 0 events with watch fragmentation")
|
||||
exp := "code = ResourceExhausted desc = grpc: received message larger than max ("
|
||||
if !strings.Contains(ws.Err().Error(), exp) {
|
||||
t.Fatalf("expected 'ResourceExhausted' error, got %v", ws.Err())
|
||||
}
|
||||
require.Containsf(t, ws.Err().Error(), exp, "expected 'ResourceExhausted' error")
|
||||
return
|
||||
}
|
||||
|
||||
// still expect merged watch events
|
||||
if len(ws.Events) != 10 {
|
||||
t.Fatalf("expected 10 events with watch fragmentation, got %d", len(ws.Events))
|
||||
}
|
||||
if ws.Err() != nil {
|
||||
t.Fatalf("unexpected error %v", ws.Err())
|
||||
}
|
||||
require.Lenf(t, ws.Events, 10, "expected 10 events with watch fragmentation")
|
||||
require.NoErrorf(t, ws.Err(), "unexpected error")
|
||||
|
||||
case <-time.After(testutil.RequestTimeout):
|
||||
t.Fatalf("took too long to receive events")
|
||||
|
@ -172,9 +172,8 @@ func TestWatchRange(t *testing.T) {
|
||||
}
|
||||
|
||||
func testWatchRange(t *testing.T, wctx *watchctx) {
|
||||
if wctx.ch = wctx.w.Watch(context.TODO(), "a", clientv3.WithRange("c")); wctx.ch == nil {
|
||||
t.Fatalf("expected non-nil channel")
|
||||
}
|
||||
wctx.ch = wctx.w.Watch(context.TODO(), "a", clientv3.WithRange("c"))
|
||||
require.NotNilf(t, wctx.ch, "expected non-nil channel")
|
||||
putAndWatch(t, wctx, "a", "a")
|
||||
putAndWatch(t, wctx, "b", "b")
|
||||
putAndWatch(t, wctx, "bar", "bar")
|
||||
@ -204,9 +203,8 @@ func testWatchReconnRequest(t *testing.T, wctx *watchctx) {
|
||||
}
|
||||
}()
|
||||
// should reconnect when requesting watch
|
||||
if wctx.ch = wctx.w.Watch(context.TODO(), "a"); wctx.ch == nil {
|
||||
t.Fatalf("expected non-nil channel")
|
||||
}
|
||||
wctx.ch = wctx.w.Watch(context.TODO(), "a")
|
||||
require.NotNilf(t, wctx.ch, "expected non-nil channel")
|
||||
|
||||
// wait for disconnections to stop
|
||||
stopc <- struct{}{}
|
||||
@ -230,9 +228,8 @@ func TestWatchReconnInit(t *testing.T) {
|
||||
}
|
||||
|
||||
func testWatchReconnInit(t *testing.T, wctx *watchctx) {
|
||||
if wctx.ch = wctx.w.Watch(context.TODO(), "a"); wctx.ch == nil {
|
||||
t.Fatalf("expected non-nil channel")
|
||||
}
|
||||
wctx.ch = wctx.w.Watch(context.TODO(), "a")
|
||||
require.NotNilf(t, wctx.ch, "expected non-nil channel")
|
||||
wctx.clus.Members[wctx.wclientMember].Bridge().DropConnections()
|
||||
// watcher should recover
|
||||
putAndWatch(t, wctx, "a", "a")
|
||||
@ -245,9 +242,8 @@ func TestWatchReconnRunning(t *testing.T) {
|
||||
}
|
||||
|
||||
func testWatchReconnRunning(t *testing.T, wctx *watchctx) {
|
||||
if wctx.ch = wctx.w.Watch(context.TODO(), "a"); wctx.ch == nil {
|
||||
t.Fatalf("expected non-nil channel")
|
||||
}
|
||||
wctx.ch = wctx.w.Watch(context.TODO(), "a")
|
||||
require.NotNilf(t, wctx.ch, "expected non-nil channel")
|
||||
putAndWatch(t, wctx, "a", "a")
|
||||
// take down watcher connection
|
||||
wctx.clus.Members[wctx.wclientMember].Bridge().DropConnections()
|
||||
@ -267,9 +263,7 @@ func testWatchCancelImmediate(t *testing.T, wctx *watchctx) {
|
||||
wch := wctx.w.Watch(ctx, "a")
|
||||
select {
|
||||
case wresp, ok := <-wch:
|
||||
if ok {
|
||||
t.Fatalf("read wch got %v; expected closed channel", wresp)
|
||||
}
|
||||
require.Falsef(t, ok, "read wch got %v; expected closed channel", wresp)
|
||||
default:
|
||||
t.Fatalf("closed watcher channel should not block")
|
||||
}
|
||||
@ -282,17 +276,14 @@ func TestWatchCancelInit(t *testing.T) {
|
||||
|
||||
func testWatchCancelInit(t *testing.T, wctx *watchctx) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
if wctx.ch = wctx.w.Watch(ctx, "a"); wctx.ch == nil {
|
||||
t.Fatalf("expected non-nil watcher channel")
|
||||
}
|
||||
wctx.ch = wctx.w.Watch(ctx, "a")
|
||||
require.NotNilf(t, wctx.ch, "expected non-nil watcher channel")
|
||||
cancel()
|
||||
select {
|
||||
case <-time.After(time.Second):
|
||||
t.Fatalf("took too long to cancel")
|
||||
case _, ok := <-wctx.ch:
|
||||
if ok {
|
||||
t.Fatalf("expected watcher channel to close")
|
||||
}
|
||||
require.Falsef(t, ok, "expected watcher channel to close")
|
||||
}
|
||||
}
|
||||
|
||||
@ -303,9 +294,8 @@ func TestWatchCancelRunning(t *testing.T) {
|
||||
|
||||
func testWatchCancelRunning(t *testing.T, wctx *watchctx) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
if wctx.ch = wctx.w.Watch(ctx, "a"); wctx.ch == nil {
|
||||
t.Fatalf("expected non-nil watcher channel")
|
||||
}
|
||||
wctx.ch = wctx.w.Watch(ctx, "a")
|
||||
require.NotNilf(t, wctx.ch, "expected non-nil watcher channel")
|
||||
_, err := wctx.kv.Put(ctx, "a", "a")
|
||||
require.NoError(t, err)
|
||||
cancel()
|
||||
@ -322,9 +312,7 @@ func testWatchCancelRunning(t *testing.T, wctx *watchctx) {
|
||||
case <-time.After(time.Second):
|
||||
t.Fatalf("took too long to close")
|
||||
case v, ok2 := <-wctx.ch:
|
||||
if ok2 {
|
||||
t.Fatalf("expected watcher channel to close, got %v", v)
|
||||
}
|
||||
require.Falsef(t, ok2, "expected watcher channel to close, got %v", v)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -336,15 +324,10 @@ func putAndWatch(t *testing.T, wctx *watchctx, key, val string) {
|
||||
case <-time.After(5 * time.Second):
|
||||
t.Fatalf("watch timed out")
|
||||
case v, ok := <-wctx.ch:
|
||||
if !ok {
|
||||
t.Fatalf("unexpected watch close")
|
||||
}
|
||||
if err := v.Err(); err != nil {
|
||||
t.Fatalf("unexpected watch response error: %v", err)
|
||||
}
|
||||
if string(v.Events[0].Kv.Value) != val {
|
||||
t.Fatalf("bad value got %v, wanted %v", v.Events[0].Kv.Value, val)
|
||||
}
|
||||
require.Truef(t, ok, "unexpected watch close")
|
||||
err := v.Err()
|
||||
require.NoErrorf(t, err, "unexpected watch response error")
|
||||
require.Equalf(t, string(v.Events[0].Kv.Value), val, "bad value got %v, wanted %v", v.Events[0].Kv.Value, val)
|
||||
}
|
||||
}
|
||||
|
||||
@ -393,12 +376,8 @@ func TestWatchResumeAfterDisconnect(t *testing.T) {
|
||||
if len(resp.Events) != 2 {
|
||||
t.Fatal("expected two events on watch")
|
||||
}
|
||||
if string(resp.Events[0].Kv.Value) != "3" {
|
||||
t.Fatalf("expected value=3, got event %+v", resp.Events[0])
|
||||
}
|
||||
if string(resp.Events[1].Kv.Value) != "4" {
|
||||
t.Fatalf("expected value=4, got event %+v", resp.Events[1])
|
||||
}
|
||||
require.Equalf(t, "3", string(resp.Events[0].Kv.Value), "expected value=3, got event %+v", resp.Events[0])
|
||||
require.Equalf(t, "4", string(resp.Events[1].Kv.Value), "expected value=4, got event %+v", resp.Events[1])
|
||||
case <-time.After(5 * time.Second):
|
||||
t.Fatal("watch timed out")
|
||||
}
|
||||
@ -449,16 +428,12 @@ func TestWatchResumeCompacted(t *testing.T) {
|
||||
var ok bool
|
||||
select {
|
||||
case wresp, ok = <-wch:
|
||||
if !ok {
|
||||
t.Fatalf("expected wresp, but got closed channel")
|
||||
}
|
||||
require.Truef(t, ok, "expected wresp, but got closed channel")
|
||||
case <-time.After(5 * time.Second):
|
||||
t.Fatalf("compacted watch timed out")
|
||||
}
|
||||
for _, ev := range wresp.Events {
|
||||
if ev.Kv.ModRevision != wRev {
|
||||
t.Fatalf("expected modRev %v, got %+v", wRev, ev)
|
||||
}
|
||||
require.Equalf(t, ev.Kv.ModRevision, wRev, "expected modRev %v, got %+v", wRev, ev)
|
||||
wRev++
|
||||
}
|
||||
if wresp.Err() == nil {
|
||||
@ -643,9 +618,7 @@ func TestWatchRequestProgress(t *testing.T) {
|
||||
for _, rch := range watchChans {
|
||||
select {
|
||||
case resp := <-rch: // wait for notification
|
||||
if len(resp.Events) != 1 {
|
||||
t.Fatalf("resp.Events expected 1, got %d", len(resp.Events))
|
||||
}
|
||||
require.Lenf(t, resp.Events, 1, "resp.Events expected 1, got %d", len(resp.Events))
|
||||
case <-time.After(watchTimeout):
|
||||
t.Fatalf("watch response expected in %v, but timed out", watchTimeout)
|
||||
}
|
||||
@ -661,12 +634,8 @@ func TestWatchRequestProgress(t *testing.T) {
|
||||
for _, rch := range watchChans {
|
||||
select {
|
||||
case resp := <-rch:
|
||||
if !resp.IsProgressNotify() {
|
||||
t.Fatalf("expected resp.IsProgressNotify() == true")
|
||||
}
|
||||
if resp.Header.Revision != 3 {
|
||||
t.Fatalf("resp.Header.Revision expected 3, got %d", resp.Header.Revision)
|
||||
}
|
||||
require.Truef(t, resp.IsProgressNotify(), "expected resp.IsProgressNotify() == true")
|
||||
require.Equalf(t, int64(3), resp.Header.Revision, "resp.Header.Revision expected 3, got %d", resp.Header.Revision)
|
||||
case <-time.After(watchTimeout):
|
||||
t.Fatalf("progress response expected in %v, but timed out", watchTimeout)
|
||||
}
|
||||
@ -685,22 +654,16 @@ func TestWatchEventType(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
watchChan := client.Watch(ctx, "/", clientv3.WithPrefix())
|
||||
|
||||
if _, err := client.Put(ctx, "/toDelete", "foo"); err != nil {
|
||||
t.Fatalf("Put failed: %v", err)
|
||||
}
|
||||
if _, err := client.Put(ctx, "/toDelete", "bar"); err != nil {
|
||||
t.Fatalf("Put failed: %v", err)
|
||||
}
|
||||
if _, err := client.Delete(ctx, "/toDelete"); err != nil {
|
||||
t.Fatalf("Delete failed: %v", err)
|
||||
}
|
||||
_, err := client.Put(ctx, "/toDelete", "foo")
|
||||
require.NoErrorf(t, err, "Put failed: %v", err)
|
||||
_, err = client.Put(ctx, "/toDelete", "bar")
|
||||
require.NoErrorf(t, err, "Put failed: %v", err)
|
||||
_, err = client.Delete(ctx, "/toDelete")
|
||||
require.NoErrorf(t, err, "Delete failed: %v", err)
|
||||
lcr, err := client.Lease.Grant(ctx, 1)
|
||||
if err != nil {
|
||||
t.Fatalf("lease create failed: %v", err)
|
||||
}
|
||||
if _, err := client.Put(ctx, "/toExpire", "foo", clientv3.WithLease(lcr.ID)); err != nil {
|
||||
t.Fatalf("Put failed: %v", err)
|
||||
}
|
||||
require.NoErrorf(t, err, "lease create failed: %v", err)
|
||||
_, err = client.Put(ctx, "/toExpire", "foo", clientv3.WithLease(lcr.ID))
|
||||
require.NoErrorf(t, err, "Put failed: %v", err)
|
||||
|
||||
tests := []struct {
|
||||
et mvccpb.Event_EventType
|
||||
@ -835,28 +798,21 @@ func TestWatchWithRequireLeader(t *testing.T) {
|
||||
|
||||
select {
|
||||
case resp, ok := <-chLeader:
|
||||
if !ok {
|
||||
t.Fatalf("expected %v watch channel, got closed channel", rpctypes.ErrNoLeader)
|
||||
}
|
||||
if !errors.Is(resp.Err(), rpctypes.ErrNoLeader) {
|
||||
t.Fatalf("expected %v watch response error, got %+v", rpctypes.ErrNoLeader, resp)
|
||||
}
|
||||
require.Truef(t, ok, "expected %v watch channel, got closed channel", rpctypes.ErrNoLeader)
|
||||
require.ErrorIsf(t, resp.Err(), rpctypes.ErrNoLeader, "expected %v watch response error, got %+v", rpctypes.ErrNoLeader, resp)
|
||||
case <-time.After(integration2.RequestWaitTimeout):
|
||||
t.Fatal("watch without leader took too long to close")
|
||||
}
|
||||
|
||||
select {
|
||||
case resp, ok := <-chLeader:
|
||||
if ok {
|
||||
t.Fatalf("expected closed channel, got response %v", resp)
|
||||
}
|
||||
require.Falsef(t, ok, "expected closed channel, got response %v", resp)
|
||||
case <-time.After(integration2.RequestWaitTimeout):
|
||||
t.Fatal("waited too long for channel to close")
|
||||
}
|
||||
|
||||
if _, ok := <-chNoLeader; !ok {
|
||||
t.Fatalf("expected response, got closed channel")
|
||||
}
|
||||
_, ok := <-chNoLeader
|
||||
require.Truef(t, ok, "expected response, got closed channel")
|
||||
|
||||
cnt, err := clus.Members[0].Metric(
|
||||
"etcd_server_client_requests_total",
|
||||
@ -866,9 +822,7 @@ func TestWatchWithRequireLeader(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
cv, err := strconv.ParseInt(cnt, 10, 32)
|
||||
require.NoError(t, err)
|
||||
if cv < 2 { // >2 when retried
|
||||
t.Fatalf("expected at least 2, got %q", cnt)
|
||||
}
|
||||
require.GreaterOrEqualf(t, cv, int64(2), "expected at least 2, got %q", cnt)
|
||||
}
|
||||
|
||||
// TestWatchWithFilter checks that watch filtering works.
|
||||
@ -923,9 +877,7 @@ func TestWatchWithCreatedNotification(t *testing.T) {
|
||||
|
||||
resp := <-createC
|
||||
|
||||
if !resp.Created {
|
||||
t.Fatalf("expected created event, got %v", resp)
|
||||
}
|
||||
require.Truef(t, resp.Created, "expected created event, got %v", resp)
|
||||
}
|
||||
|
||||
// TestWatchWithCreatedNotificationDropConn ensures that
|
||||
@ -943,9 +895,7 @@ func TestWatchWithCreatedNotificationDropConn(t *testing.T) {
|
||||
|
||||
resp := <-wch
|
||||
|
||||
if !resp.Created {
|
||||
t.Fatalf("expected created event, got %v", resp)
|
||||
}
|
||||
require.Truef(t, resp.Created, "expected created event, got %v", resp)
|
||||
|
||||
cluster.Members[0].Bridge().DropConnections()
|
||||
|
||||
@ -1012,9 +962,7 @@ func TestWatchCancelOnServer(t *testing.T) {
|
||||
t.Fatalf("expected n=2 and err=nil, got n=%d and err=%v", n, serr)
|
||||
}
|
||||
|
||||
if maxWatchV-minWatchV < numWatches {
|
||||
t.Fatalf("expected %d canceled watchers, got %d", numWatches, maxWatchV-minWatchV)
|
||||
}
|
||||
require.GreaterOrEqualf(t, maxWatchV-minWatchV, numWatches, "expected %d canceled watchers, got %d", numWatches, maxWatchV-minWatchV)
|
||||
}
|
||||
|
||||
// TestWatchOverlapContextCancel stresses the watcher stream teardown path by
|
||||
@ -1180,14 +1128,8 @@ func testWatchClose(t *testing.T, wctx *watchctx) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
wch := wctx.w.Watch(ctx, "a")
|
||||
cancel()
|
||||
if wch == nil {
|
||||
t.Fatalf("expected watcher channel, got nil")
|
||||
}
|
||||
if wctx.w.Close() != nil {
|
||||
t.Fatalf("watch did not close successfully")
|
||||
}
|
||||
require.NotNilf(t, wch, "expected watcher channel, got nil")
|
||||
require.NoErrorf(t, wctx.w.Close(), "watch did not close successfully")
|
||||
wresp, ok := <-wch
|
||||
if ok {
|
||||
t.Fatalf("read wch got %v; expected closed channel", wresp)
|
||||
}
|
||||
require.Falsef(t, ok, "read wch got %v; expected closed channel", wresp)
|
||||
}
|
||||
|
@ -132,9 +132,7 @@ func TestForceNewCluster(t *testing.T) {
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), integration.RequestTimeout)
|
||||
resp, err := c.Members[0].Client.Put(ctx, "/foo", "bar")
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected create error: %v", err)
|
||||
}
|
||||
require.NoErrorf(t, err, "unexpected create error")
|
||||
cancel()
|
||||
// ensure create has been applied in this machine
|
||||
ctx, cancel = context.WithTimeout(context.Background(), integration.RequestTimeout)
|
||||
@ -143,12 +141,8 @@ func TestForceNewCluster(t *testing.T) {
|
||||
if len(resp.Events) != 0 {
|
||||
break
|
||||
}
|
||||
if resp.Err() != nil {
|
||||
t.Fatalf("unexpected watch error: %q", resp.Err())
|
||||
}
|
||||
if resp.Canceled {
|
||||
t.Fatalf("watch cancelled")
|
||||
}
|
||||
require.NoErrorf(t, resp.Err(), "unexpected watch error")
|
||||
require.Falsef(t, resp.Canceled, "watch cancelled")
|
||||
}
|
||||
cancel()
|
||||
|
||||
@ -157,9 +151,7 @@ func TestForceNewCluster(t *testing.T) {
|
||||
c.Members[2].Terminate(t)
|
||||
c.Members[0].ForceNewCluster = true
|
||||
err = c.Members[0].Restart(t)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected ForceRestart error: %v", err)
|
||||
}
|
||||
require.NoErrorf(t, err, "unexpected ForceRestart error")
|
||||
c.WaitMembersForLeader(t, c.Members[:1])
|
||||
|
||||
// use new http client to init new connection
|
||||
@ -170,12 +162,8 @@ func TestForceNewCluster(t *testing.T) {
|
||||
if len(resp.Events) != 0 {
|
||||
break
|
||||
}
|
||||
if resp.Err() != nil {
|
||||
t.Fatalf("unexpected watch error: %q", resp.Err())
|
||||
}
|
||||
if resp.Canceled {
|
||||
t.Fatalf("watch cancelled")
|
||||
}
|
||||
require.NoErrorf(t, resp.Err(), "unexpected watch error")
|
||||
require.Falsef(t, resp.Canceled, "watch cancelled")
|
||||
}
|
||||
cancel()
|
||||
clusterMustProgress(t, c.Members[:1])
|
||||
@ -325,9 +313,8 @@ func TestIssue3699(t *testing.T) {
|
||||
t.Logf("Expecting successful put...")
|
||||
// try to participate in Cluster
|
||||
ctx, cancel := context.WithTimeout(context.Background(), integration.RequestTimeout)
|
||||
if _, err := c.Members[0].Client.Put(ctx, "/foo", "bar"); err != nil {
|
||||
t.Fatalf("unexpected error on Put (%v)", err)
|
||||
}
|
||||
_, err := c.Members[0].Client.Put(ctx, "/foo", "bar")
|
||||
require.NoErrorf(t, err, "unexpected error on Put")
|
||||
cancel()
|
||||
}
|
||||
|
||||
@ -344,9 +331,7 @@ func TestRejectUnhealthyAdd(t *testing.T) {
|
||||
// all attempts to add member should fail
|
||||
for i := 1; i < len(c.Members); i++ {
|
||||
err := c.AddMemberByURL(t, c.Members[i].Client, "unix://foo:12345")
|
||||
if err == nil {
|
||||
t.Fatalf("should have failed adding peer")
|
||||
}
|
||||
require.Errorf(t, err, "should have failed adding peer")
|
||||
// TODO: client should return descriptive error codes for internal errors
|
||||
if !strings.Contains(err.Error(), "unhealthy cluster") {
|
||||
t.Errorf("unexpected error (%v)", err)
|
||||
@ -365,9 +350,7 @@ func TestRejectUnhealthyAdd(t *testing.T) {
|
||||
break
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
t.Fatalf("should have added peer to healthy Cluster (%v)", err)
|
||||
}
|
||||
require.NoErrorf(t, err, "should have added peer to healthy Cluster (%v)", err)
|
||||
}
|
||||
|
||||
// TestRejectUnhealthyRemove ensures an unhealthy cluster rejects removing members
|
||||
@ -384,9 +367,7 @@ func TestRejectUnhealthyRemove(t *testing.T) {
|
||||
|
||||
// reject remove active member since (3,2)-(1,0) => (2,2) lacks quorum
|
||||
err := c.RemoveMember(t, c.Members[leader].Client, uint64(c.Members[2].Server.MemberID()))
|
||||
if err == nil {
|
||||
t.Fatalf("should reject quorum breaking remove: %s", err)
|
||||
}
|
||||
require.Errorf(t, err, "should reject quorum breaking remove: %s", err)
|
||||
// TODO: client should return more descriptive error codes for internal errors
|
||||
if !strings.Contains(err.Error(), "unhealthy cluster") {
|
||||
t.Errorf("unexpected error (%v)", err)
|
||||
@ -396,9 +377,8 @@ func TestRejectUnhealthyRemove(t *testing.T) {
|
||||
time.Sleep(time.Duration(integration.ElectionTicks * int(config.TickDuration)))
|
||||
|
||||
// permit remove dead member since (3,2) - (0,1) => (3,1) has quorum
|
||||
if err = c.RemoveMember(t, c.Members[2].Client, uint64(c.Members[0].Server.MemberID())); err != nil {
|
||||
t.Fatalf("should accept removing down member: %s", err)
|
||||
}
|
||||
err = c.RemoveMember(t, c.Members[2].Client, uint64(c.Members[0].Server.MemberID()))
|
||||
require.NoErrorf(t, err, "should accept removing down member")
|
||||
|
||||
// bring cluster to (4,1)
|
||||
c.Members[0].Restart(t)
|
||||
@ -407,9 +387,8 @@ func TestRejectUnhealthyRemove(t *testing.T) {
|
||||
time.Sleep((3 * etcdserver.HealthInterval) / 2)
|
||||
|
||||
// accept remove member since (4,1)-(1,0) => (3,1) has quorum
|
||||
if err = c.RemoveMember(t, c.Members[1].Client, uint64(c.Members[0].Server.MemberID())); err != nil {
|
||||
t.Fatalf("expected to remove member, got error %v", err)
|
||||
}
|
||||
err = c.RemoveMember(t, c.Members[1].Client, uint64(c.Members[0].Server.MemberID()))
|
||||
require.NoErrorf(t, err, "expected to remove member, got error")
|
||||
}
|
||||
|
||||
// TestRestartRemoved ensures that restarting removed member must exit
|
||||
@ -431,17 +410,15 @@ func TestRestartRemoved(t *testing.T) {
|
||||
firstMember.KeepDataDirTerminate = true
|
||||
|
||||
// 3. remove first member, shut down without deleting data
|
||||
if err := c.RemoveMember(t, c.Members[1].Client, uint64(firstMember.Server.MemberID())); err != nil {
|
||||
t.Fatalf("expected to remove member, got error %v", err)
|
||||
}
|
||||
err := c.RemoveMember(t, c.Members[1].Client, uint64(firstMember.Server.MemberID()))
|
||||
require.NoErrorf(t, err, "expected to remove member, got error")
|
||||
c.WaitLeader(t)
|
||||
|
||||
// 4. restart first member with 'initial-cluster-state=new'
|
||||
// wrong config, expects exit within ReqTimeout
|
||||
firstMember.ServerConfig.NewCluster = false
|
||||
if err := firstMember.Restart(t); err != nil {
|
||||
t.Fatalf("unexpected ForceRestart error: %v", err)
|
||||
}
|
||||
err = firstMember.Restart(t)
|
||||
require.NoErrorf(t, err, "unexpected ForceRestart error")
|
||||
defer func() {
|
||||
firstMember.Close()
|
||||
os.RemoveAll(firstMember.ServerConfig.DataDir)
|
||||
@ -472,9 +449,7 @@ func clusterMustProgress(t *testing.T, members []*integration.Member) {
|
||||
}
|
||||
t.Logf("failed to create key on #0 (%v)", err)
|
||||
}
|
||||
if err != nil {
|
||||
t.Fatalf("create on #0 error: %v", err)
|
||||
}
|
||||
require.NoErrorf(t, err, "create on #0 error")
|
||||
|
||||
for i, m := range members {
|
||||
mctx, mcancel := context.WithTimeout(context.Background(), integration.RequestTimeout)
|
||||
@ -483,12 +458,8 @@ func clusterMustProgress(t *testing.T, members []*integration.Member) {
|
||||
if len(resp.Events) != 0 {
|
||||
break
|
||||
}
|
||||
if resp.Err() != nil {
|
||||
t.Fatalf("#%d: watch error: %q", i, resp.Err())
|
||||
}
|
||||
if resp.Canceled {
|
||||
t.Fatalf("#%d: watch: cancelled", i)
|
||||
}
|
||||
require.NoErrorf(t, resp.Err(), "#%d: watch error", i)
|
||||
require.Falsef(t, resp.Canceled, "#%d: watch: cancelled", i)
|
||||
}
|
||||
mcancel()
|
||||
}
|
||||
|
@ -94,9 +94,7 @@ func TestSnapshotAndRestartMember(t *testing.T) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), integration.RequestTimeout)
|
||||
key := fmt.Sprintf("foo%d", i)
|
||||
_, err = m.Client.Put(ctx, "/"+key, "bar")
|
||||
if err != nil {
|
||||
t.Fatalf("#%d: create on %s error: %v", i, m.URL(), err)
|
||||
}
|
||||
require.NoErrorf(t, err, "#%d: create on %s error", i, m.URL())
|
||||
cancel()
|
||||
}
|
||||
m.Stop(t)
|
||||
@ -107,9 +105,7 @@ func TestSnapshotAndRestartMember(t *testing.T) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), integration.RequestTimeout)
|
||||
key := fmt.Sprintf("foo%d", i)
|
||||
resp, err := m.Client.Get(ctx, "/"+key)
|
||||
if err != nil {
|
||||
t.Fatalf("#%d: get on %s error: %v", i, m.URL(), err)
|
||||
}
|
||||
require.NoErrorf(t, err, "#%d: get on %s error", i, m.URL())
|
||||
cancel()
|
||||
|
||||
if len(resp.Kvs) != 1 || string(resp.Kvs[0].Value) != "bar" {
|
||||
|
@ -40,9 +40,7 @@ func TestMetricDbSizeBoot(t *testing.T) {
|
||||
v, err := clus.Members[0].Metric("etcd_debugging_mvcc_db_total_size_in_bytes")
|
||||
require.NoError(t, err)
|
||||
|
||||
if v == "0" {
|
||||
t.Fatalf("expected non-zero, got %q", v)
|
||||
}
|
||||
require.NotEqualf(t, "0", v, "expected non-zero, got %q", v)
|
||||
}
|
||||
|
||||
func TestMetricDbSizeDefrag(t *testing.T) {
|
||||
@ -75,16 +73,12 @@ func testMetricDbSizeDefrag(t *testing.T, name string) {
|
||||
require.NoError(t, err)
|
||||
bv, err := strconv.Atoi(beforeDefrag)
|
||||
require.NoError(t, err)
|
||||
if bv < expected {
|
||||
t.Fatalf("expected db size greater than %d, got %d", expected, bv)
|
||||
}
|
||||
require.GreaterOrEqualf(t, bv, expected, "expected db size greater than %d, got %d", expected, bv)
|
||||
beforeDefragInUse, err := clus.Members[0].Metric("etcd_mvcc_db_total_size_in_use_in_bytes")
|
||||
require.NoError(t, err)
|
||||
biu, err := strconv.Atoi(beforeDefragInUse)
|
||||
require.NoError(t, err)
|
||||
if biu < expected {
|
||||
t.Fatalf("expected db size in use is greater than %d, got %d", expected, biu)
|
||||
}
|
||||
require.GreaterOrEqualf(t, biu, expected, "expected db size in use is greater than %d, got %d", expected, biu)
|
||||
|
||||
// clear out historical keys, in use bytes should free pages
|
||||
creq := &pb.CompactionRequest{Revision: int64(numPuts), Physical: true}
|
||||
@ -116,9 +110,7 @@ func testMetricDbSizeDefrag(t *testing.T, name string) {
|
||||
break
|
||||
}
|
||||
retry++
|
||||
if retry >= maxRetry {
|
||||
t.Fatalf("%v", err.Error())
|
||||
}
|
||||
require.Lessf(t, retry, maxRetry, "%v", err.Error())
|
||||
}
|
||||
|
||||
// defrag should give freed space back to fs
|
||||
@ -128,17 +120,13 @@ func testMetricDbSizeDefrag(t *testing.T, name string) {
|
||||
require.NoError(t, err)
|
||||
av, err := strconv.Atoi(afterDefrag)
|
||||
require.NoError(t, err)
|
||||
if bv <= av {
|
||||
t.Fatalf("expected less than %d, got %d after defrag", bv, av)
|
||||
}
|
||||
require.Greaterf(t, bv, av, "expected less than %d, got %d after defrag", bv, av)
|
||||
|
||||
afterDefragInUse, err := clus.Members[0].Metric("etcd_mvcc_db_total_size_in_use_in_bytes")
|
||||
require.NoError(t, err)
|
||||
adiu, err := strconv.Atoi(afterDefragInUse)
|
||||
require.NoError(t, err)
|
||||
if adiu > av {
|
||||
t.Fatalf("db size in use (%d) is expected less than db size (%d) after defrag", adiu, av)
|
||||
}
|
||||
require.LessOrEqualf(t, adiu, av, "db size in use (%d) is expected less than db size (%d) after defrag", adiu, av)
|
||||
}
|
||||
|
||||
func TestMetricQuotaBackendBytes(t *testing.T) {
|
||||
@ -150,9 +138,7 @@ func TestMetricQuotaBackendBytes(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
qv, err := strconv.ParseFloat(qs, 64)
|
||||
require.NoError(t, err)
|
||||
if int64(qv) != storage.DefaultQuotaBytes {
|
||||
t.Fatalf("expected %d, got %f", storage.DefaultQuotaBytes, qv)
|
||||
}
|
||||
require.Equalf(t, storage.DefaultQuotaBytes, int64(qv), "expected %d, got %f", storage.DefaultQuotaBytes, qv)
|
||||
}
|
||||
|
||||
func TestMetricsHealth(t *testing.T) {
|
||||
@ -174,9 +160,7 @@ func TestMetricsHealth(t *testing.T) {
|
||||
|
||||
hv, err := clus.Members[0].Metric("etcd_server_health_failures")
|
||||
require.NoError(t, err)
|
||||
if hv != "0" {
|
||||
t.Fatalf("expected '0' from etcd_server_health_failures, got %q", hv)
|
||||
}
|
||||
require.Equalf(t, "0", hv, "expected '0' from etcd_server_health_failures, got %q", hv)
|
||||
}
|
||||
|
||||
func TestMetricsRangeDurationSeconds(t *testing.T) {
|
||||
|
@ -19,6 +19,8 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"go.etcd.io/etcd/tests/v3/framework/integration"
|
||||
)
|
||||
|
||||
@ -65,9 +67,7 @@ func TestNetworkPartition5MembersLeaderInMajority(t *testing.T) {
|
||||
}
|
||||
t.Logf("[%d] got %v", i, err)
|
||||
}
|
||||
if err != nil {
|
||||
t.Fatalf("failed after 3 tries (%v)", err)
|
||||
}
|
||||
require.NoErrorf(t, err, "failed after 3 tries (%v)", err)
|
||||
}
|
||||
|
||||
func testNetworkPartition5MembersLeaderInMajority(t *testing.T) error {
|
||||
|
@ -52,9 +52,7 @@ func TestClusterProxyMemberList(t *testing.T) {
|
||||
DialTimeout: 5 * time.Second,
|
||||
}
|
||||
client, err := integration2.NewClient(t, cfg)
|
||||
if err != nil {
|
||||
t.Fatalf("err %v, want nil", err)
|
||||
}
|
||||
require.NoErrorf(t, err, "err %v, want nil", err)
|
||||
defer client.Close()
|
||||
|
||||
// wait some time for register-loop to write keys
|
||||
@ -62,16 +60,10 @@ func TestClusterProxyMemberList(t *testing.T) {
|
||||
|
||||
var mresp *clientv3.MemberListResponse
|
||||
mresp, err = client.Cluster.MemberList(context.Background())
|
||||
if err != nil {
|
||||
t.Fatalf("err %v, want nil", err)
|
||||
}
|
||||
require.NoErrorf(t, err, "err %v, want nil", err)
|
||||
|
||||
if len(mresp.Members) != 1 {
|
||||
t.Fatalf("len(mresp.Members) expected 1, got %d (%+v)", len(mresp.Members), mresp.Members)
|
||||
}
|
||||
if len(mresp.Members[0].ClientURLs) != 1 {
|
||||
t.Fatalf("len(mresp.Members[0].ClientURLs) expected 1, got %d (%+v)", len(mresp.Members[0].ClientURLs), mresp.Members[0].ClientURLs[0])
|
||||
}
|
||||
require.Lenf(t, mresp.Members, 1, "len(mresp.Members) expected 1, got %d (%+v)", len(mresp.Members), mresp.Members)
|
||||
require.Lenf(t, mresp.Members[0].ClientURLs, 1, "len(mresp.Members[0].ClientURLs) expected 1, got %d (%+v)", len(mresp.Members[0].ClientURLs), mresp.Members[0].ClientURLs[0])
|
||||
assert.Contains(t, mresp.Members, &pb.Member{Name: hostname, ClientURLs: []string{cts.caddr}})
|
||||
|
||||
// test proxy member add
|
||||
@ -82,12 +74,8 @@ func TestClusterProxyMemberList(t *testing.T) {
|
||||
|
||||
// check add member succ
|
||||
mresp, err = client.Cluster.MemberList(context.Background())
|
||||
if err != nil {
|
||||
t.Fatalf("err %v, want nil", err)
|
||||
}
|
||||
if len(mresp.Members) != 2 {
|
||||
t.Fatalf("len(mresp.Members) expected 2, got %d (%+v)", len(mresp.Members), mresp.Members)
|
||||
}
|
||||
require.NoErrorf(t, err, "err %v, want nil", err)
|
||||
require.Lenf(t, mresp.Members, 2, "len(mresp.Members) expected 2, got %d (%+v)", len(mresp.Members), mresp.Members)
|
||||
assert.Contains(t, mresp.Members, &pb.Member{Name: hostname, ClientURLs: []string{newMemberAddr}})
|
||||
|
||||
// test proxy member delete
|
||||
@ -97,12 +85,8 @@ func TestClusterProxyMemberList(t *testing.T) {
|
||||
|
||||
// check delete member succ
|
||||
mresp, err = client.Cluster.MemberList(context.Background())
|
||||
if err != nil {
|
||||
t.Fatalf("err %v, want nil", err)
|
||||
}
|
||||
if len(mresp.Members) != 1 {
|
||||
t.Fatalf("len(mresp.Members) expected 1, got %d (%+v)", len(mresp.Members), mresp.Members)
|
||||
}
|
||||
require.NoErrorf(t, err, "err %v, want nil", err)
|
||||
require.Lenf(t, mresp.Members, 1, "len(mresp.Members) expected 1, got %d (%+v)", len(mresp.Members), mresp.Members)
|
||||
assert.Contains(t, mresp.Members, &pb.Member{Name: hostname, ClientURLs: []string{cts.caddr}})
|
||||
}
|
||||
|
||||
@ -162,10 +146,7 @@ func newClusterProxyServer(lg *zap.Logger, endpoints []string, prefix string, t
|
||||
|
||||
func deregisterMember(c *clientv3.Client, prefix, addr string, t *testing.T) {
|
||||
em, err := endpoints.NewManager(c, prefix)
|
||||
if err != nil {
|
||||
t.Fatalf("new endpoint manager failed, err %v", err)
|
||||
}
|
||||
if err = em.DeleteEndpoint(c.Ctx(), prefix+"/"+addr); err != nil {
|
||||
t.Fatalf("delete endpoint failed, err %v", err)
|
||||
}
|
||||
require.NoErrorf(t, err, "new endpoint manager failed, err")
|
||||
err = em.DeleteEndpoint(c.Ctx(), prefix+"/"+addr)
|
||||
require.NoErrorf(t, err, "delete endpoint failed, err")
|
||||
}
|
||||
|
@ -44,13 +44,9 @@ func TestKVProxyRange(t *testing.T) {
|
||||
DialTimeout: 5 * time.Second,
|
||||
}
|
||||
client, err := integration2.NewClient(t, cfg)
|
||||
if err != nil {
|
||||
t.Fatalf("err = %v, want nil", err)
|
||||
}
|
||||
require.NoErrorf(t, err, "err = %v, want nil", err)
|
||||
_, err = client.Get(context.Background(), "foo")
|
||||
if err != nil {
|
||||
t.Fatalf("err = %v, want nil", err)
|
||||
}
|
||||
require.NoErrorf(t, err, "err = %v, want nil", err)
|
||||
client.Close()
|
||||
}
|
||||
|
||||
|
@ -18,6 +18,7 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
"go.uber.org/zap/zaptest"
|
||||
|
||||
clientv3 "go.etcd.io/etcd/client/v3"
|
||||
@ -40,12 +41,8 @@ func TestRegister(t *testing.T) {
|
||||
donec := grpcproxy.Register(zaptest.NewLogger(t), cli, testPrefix, paddr, 5)
|
||||
|
||||
ups := <-wa
|
||||
if len(ups) != 1 {
|
||||
t.Fatalf("len(ups) expected 1, got %d (%v)", len(ups), ups)
|
||||
}
|
||||
if ups[0].Endpoint.Addr != paddr {
|
||||
t.Fatalf("ups[0].Addr expected %q, got %q", paddr, ups[0].Endpoint.Addr)
|
||||
}
|
||||
require.Lenf(t, ups, 1, "len(ups) expected 1, got %d (%v)", len(ups), ups)
|
||||
require.Equalf(t, ups[0].Endpoint.Addr, paddr, "ups[0].Addr expected %q, got %q", paddr, ups[0].Endpoint.Addr)
|
||||
|
||||
cli.Close()
|
||||
clus.TakeClient(0)
|
||||
@ -58,12 +55,8 @@ func TestRegister(t *testing.T) {
|
||||
|
||||
func mustCreateWatcher(t *testing.T, c *clientv3.Client, prefix string) endpoints.WatchChannel {
|
||||
em, err := endpoints.NewManager(c, prefix)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create endpoints.Manager: %v", err)
|
||||
}
|
||||
require.NoErrorf(t, err, "failed to create endpoints.Manager")
|
||||
wc, err := em.NewWatchChannel(c.Ctx())
|
||||
if err != nil {
|
||||
t.Fatalf("failed to resolve %q (%v)", prefix, err)
|
||||
}
|
||||
require.NoErrorf(t, err, "failed to resolve %q", prefix)
|
||||
return wc
|
||||
}
|
||||
|
@ -132,9 +132,7 @@ func getWorker(ctx context.Context, t *testing.T, clus *integration.Cluster) {
|
||||
if resp == nil {
|
||||
continue
|
||||
}
|
||||
if prevRev > resp.Header.Revision {
|
||||
t.Fatalf("rev is less than previously observed revision, rev: %d, prevRev: %d", resp.Header.Revision, prevRev)
|
||||
}
|
||||
require.LessOrEqualf(t, prevRev, resp.Header.Revision, "rev is less than previously observed revision, rev: %d, prevRev: %d", resp.Header.Revision, prevRev)
|
||||
prevRev = resp.Header.Revision
|
||||
}
|
||||
}
|
||||
|
@ -93,9 +93,7 @@ func TestSnapshotV3RestoreMultiMemberAdd(t *testing.T) {
|
||||
mresp, err := cli2.MemberList(ctx)
|
||||
cancel()
|
||||
require.NoError(t, err)
|
||||
if len(mresp.Members) != 4 {
|
||||
t.Fatalf("expected 4 members, got %+v", mresp)
|
||||
}
|
||||
require.Lenf(t, mresp.Members, 4, "expected 4 members, got %+v", mresp)
|
||||
|
||||
// make sure restored cluster has kept all data on recovery
|
||||
var gresp *clientv3.GetResponse
|
||||
@ -104,11 +102,7 @@ func TestSnapshotV3RestoreMultiMemberAdd(t *testing.T) {
|
||||
cancel()
|
||||
require.NoError(t, err)
|
||||
for i := range gresp.Kvs {
|
||||
if string(gresp.Kvs[i].Key) != kvs[i].k {
|
||||
t.Fatalf("#%d: key expected %s, got %s", i, kvs[i].k, gresp.Kvs[i].Key)
|
||||
}
|
||||
if string(gresp.Kvs[i].Value) != kvs[i].v {
|
||||
t.Fatalf("#%d: value expected %s, got %s", i, kvs[i].v, gresp.Kvs[i].Value)
|
||||
}
|
||||
require.Equalf(t, string(gresp.Kvs[i].Key), kvs[i].k, "#%d: key expected %s, got %s", i, kvs[i].k, gresp.Kvs[i].Key)
|
||||
require.Equalf(t, string(gresp.Kvs[i].Value), kvs[i].v, "#%d: value expected %s, got %s", i, kvs[i].v, gresp.Kvs[i].Value)
|
||||
}
|
||||
}
|
||||
|
@ -88,9 +88,7 @@ func TestSnapshotV3RestoreSingle(t *testing.T) {
|
||||
var gresp *clientv3.GetResponse
|
||||
gresp, err = cli.Get(context.Background(), kvs[i].k)
|
||||
require.NoError(t, err)
|
||||
if string(gresp.Kvs[0].Value) != kvs[i].v {
|
||||
t.Fatalf("#%d: value expected %s, got %s", i, kvs[i].v, gresp.Kvs[0].Value)
|
||||
}
|
||||
require.Equalf(t, string(gresp.Kvs[0].Value), kvs[i].v, "#%d: value expected %s, got %s", i, kvs[i].v, gresp.Kvs[0].Value)
|
||||
}
|
||||
}
|
||||
|
||||
@ -121,9 +119,7 @@ func TestSnapshotV3RestoreMulti(t *testing.T) {
|
||||
var gresp *clientv3.GetResponse
|
||||
gresp, err = cli.Get(context.Background(), kvs[i].k)
|
||||
require.NoError(t, err)
|
||||
if string(gresp.Kvs[0].Value) != kvs[i].v {
|
||||
t.Fatalf("#%d: value expected %s, got %s", i, kvs[i].v, gresp.Kvs[0].Value)
|
||||
}
|
||||
require.Equalf(t, string(gresp.Kvs[0].Value), kvs[i].v, "#%d: value expected %s, got %s", i, kvs[i].v, gresp.Kvs[0].Value)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -132,12 +128,11 @@ func TestSnapshotV3RestoreMulti(t *testing.T) {
|
||||
func TestCorruptedBackupFileCheck(t *testing.T) {
|
||||
dbPath := testutils.MustAbsPath("testdata/corrupted_backup.db")
|
||||
integration2.BeforeTest(t)
|
||||
if _, err := os.Stat(dbPath); err != nil {
|
||||
t.Fatalf("test file [%s] does not exist: %v", dbPath, err)
|
||||
}
|
||||
_, err := os.Stat(dbPath)
|
||||
require.NoErrorf(t, err, "test file [%s] does not exist: %v", dbPath, err)
|
||||
|
||||
sp := snapshot.NewV3(zaptest.NewLogger(t))
|
||||
_, err := sp.Status(dbPath)
|
||||
_, err = sp.Status(dbPath)
|
||||
expectedErrKeywords := "snapshot file integrity check failed"
|
||||
/* example error message:
|
||||
snapshot file integrity check failed. 2 errors found.
|
||||
|
@ -101,7 +101,7 @@ func TestSet(t *testing.T) {
|
||||
assert.Equal(t, "set", e.Action)
|
||||
assert.Equal(t, "/foo", e.Node.Key)
|
||||
assert.False(t, e.Node.Dir)
|
||||
assert.Equal(t, "", *e.Node.Value)
|
||||
assert.Empty(t, *e.Node.Value)
|
||||
assert.Nil(t, e.Node.Nodes)
|
||||
assert.Nil(t, e.Node.Expiration)
|
||||
assert.Equal(t, int64(0), e.Node.TTL)
|
||||
@ -123,7 +123,7 @@ func TestSet(t *testing.T) {
|
||||
// check prevNode
|
||||
require.NotNil(t, e.PrevNode)
|
||||
assert.Equal(t, "/foo", e.PrevNode.Key)
|
||||
assert.Equal(t, "", *e.PrevNode.Value)
|
||||
assert.Empty(t, *e.PrevNode.Value)
|
||||
assert.Equal(t, uint64(1), e.PrevNode.ModifiedIndex)
|
||||
// Set /foo="baz" (for testing prevNode)
|
||||
eidx = 3
|
||||
@ -198,7 +198,7 @@ func TestStoreCreateValue(t *testing.T) {
|
||||
assert.Equal(t, "create", e.Action)
|
||||
assert.Equal(t, "/empty", e.Node.Key)
|
||||
assert.False(t, e.Node.Dir)
|
||||
assert.Equal(t, "", *e.Node.Value)
|
||||
assert.Empty(t, *e.Node.Value)
|
||||
assert.Nil(t, e.Node.Nodes)
|
||||
assert.Nil(t, e.Node.Expiration)
|
||||
assert.Equal(t, int64(0), e.Node.TTL)
|
||||
@ -271,7 +271,7 @@ func TestStoreUpdateValue(t *testing.T) {
|
||||
assert.Equal(t, "update", e.Action)
|
||||
assert.Equal(t, "/foo", e.Node.Key)
|
||||
assert.False(t, e.Node.Dir)
|
||||
assert.Equal(t, "", *e.Node.Value)
|
||||
assert.Empty(t, *e.Node.Value)
|
||||
assert.Equal(t, int64(0), e.Node.TTL)
|
||||
assert.Equal(t, uint64(3), e.Node.ModifiedIndex)
|
||||
// check prevNode
|
||||
@ -282,7 +282,7 @@ func TestStoreUpdateValue(t *testing.T) {
|
||||
|
||||
e, _ = s.Get("/foo", false, false)
|
||||
assert.Equal(t, eidx, e.EtcdIndex)
|
||||
assert.Equal(t, "", *e.Node.Value)
|
||||
assert.Empty(t, *e.Node.Value)
|
||||
}
|
||||
|
||||
// TestStoreUpdateFailsIfDirectory ensures that the store cannot update a directory.
|
||||
|
@ -110,23 +110,20 @@ func TestV3StorageQuotaApply(t *testing.T) {
|
||||
defer cancel()
|
||||
|
||||
// small quota machine should reject put
|
||||
if _, err := kvc0.Put(ctx, &pb.PutRequest{Key: key, Value: smallbuf}); err == nil {
|
||||
t.Fatalf("past-quota instance should reject put")
|
||||
}
|
||||
_, err = kvc0.Put(ctx, &pb.PutRequest{Key: key, Value: smallbuf})
|
||||
require.Errorf(t, err, "past-quota instance should reject put")
|
||||
|
||||
// large quota machine should reject put
|
||||
if _, err := kvc1.Put(ctx, &pb.PutRequest{Key: key, Value: smallbuf}); err == nil {
|
||||
t.Fatalf("past-quota instance should reject put")
|
||||
}
|
||||
_, err = kvc1.Put(ctx, &pb.PutRequest{Key: key, Value: smallbuf})
|
||||
require.Errorf(t, err, "past-quota instance should reject put")
|
||||
|
||||
// reset large quota node to ensure alarm persisted
|
||||
clus.Members[1].Stop(t)
|
||||
clus.Members[1].Restart(t)
|
||||
clus.WaitMembersForLeader(t, clus.Members)
|
||||
|
||||
if _, err := kvc1.Put(context.TODO(), &pb.PutRequest{Key: key, Value: smallbuf}); err == nil {
|
||||
t.Fatalf("alarmed instance should reject put after reset")
|
||||
}
|
||||
_, err = kvc1.Put(context.TODO(), &pb.PutRequest{Key: key, Value: smallbuf})
|
||||
require.Errorf(t, err, "alarmed instance should reject put after reset")
|
||||
}
|
||||
|
||||
// TestV3AlarmDeactivate ensures that space alarms can be deactivated so puts go through.
|
||||
@ -221,9 +218,7 @@ func TestV3CorruptAlarm(t *testing.T) {
|
||||
resp1, err1 := clus.Client(1).Get(context.TODO(), "abc")
|
||||
require.NoError(t, err1)
|
||||
|
||||
if resp0.Kvs[0].ModRevision == resp1.Kvs[0].ModRevision {
|
||||
t.Fatalf("matching ModRevision values")
|
||||
}
|
||||
require.NotEqualf(t, resp0.Kvs[0].ModRevision, resp1.Kvs[0].ModRevision, "matching ModRevision values")
|
||||
|
||||
for i := 0; i < 5; i++ {
|
||||
presp, perr := clus.Client(0).Put(context.TODO(), "abc", "aaa")
|
||||
@ -307,9 +302,7 @@ func TestV3CorruptAlarmWithLeaseCorrupted(t *testing.T) {
|
||||
resp1, err1 := clus.Members[2].Client.KV.Get(context.TODO(), "foo")
|
||||
require.NoError(t, err1)
|
||||
|
||||
if resp0.Header.Revision == resp1.Header.Revision {
|
||||
t.Fatalf("matching Revision values")
|
||||
}
|
||||
require.NotEqualf(t, resp0.Header.Revision, resp1.Header.Revision, "matching Revision values")
|
||||
|
||||
// Wait for CorruptCheckTime
|
||||
time.Sleep(time.Second)
|
||||
|
@ -44,9 +44,7 @@ func TestV3AuthEmptyUserGet(t *testing.T) {
|
||||
authSetupRoot(t, api.Auth)
|
||||
|
||||
_, err := api.KV.Range(ctx, &pb.RangeRequest{Key: []byte("abc")})
|
||||
if !eqErrGRPC(err, rpctypes.ErrUserEmpty) {
|
||||
t.Fatalf("got %v, expected %v", err, rpctypes.ErrUserEmpty)
|
||||
}
|
||||
require.Truef(t, eqErrGRPC(err, rpctypes.ErrUserEmpty), "got %v, expected %v", err, rpctypes.ErrUserEmpty)
|
||||
}
|
||||
|
||||
// TestV3AuthEmptyUserPut ensures that a put with an empty user will return an empty user error,
|
||||
@ -70,9 +68,7 @@ func TestV3AuthEmptyUserPut(t *testing.T) {
|
||||
// cluster terminating.
|
||||
for i := 0; i < 10; i++ {
|
||||
_, err := api.KV.Put(ctx, &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar")})
|
||||
if !eqErrGRPC(err, rpctypes.ErrUserEmpty) {
|
||||
t.Fatalf("got %v, expected %v", err, rpctypes.ErrUserEmpty)
|
||||
}
|
||||
require.Truef(t, eqErrGRPC(err, rpctypes.ErrUserEmpty), "got %v, expected %v", err, rpctypes.ErrUserEmpty)
|
||||
}
|
||||
}
|
||||
|
||||
@ -124,9 +120,7 @@ func TestV3AuthRevision(t *testing.T) {
|
||||
aresp, aerr := api.Auth.UserAdd(ctx, &pb.AuthUserAddRequest{Name: "root", Password: "123", Options: &authpb.UserAddOptions{NoPassword: false}})
|
||||
cancel()
|
||||
require.NoError(t, aerr)
|
||||
if aresp.Header.Revision != rev {
|
||||
t.Fatalf("revision expected %d, got %d", rev, aresp.Header.Revision)
|
||||
}
|
||||
require.Equalf(t, aresp.Header.Revision, rev, "revision expected %d, got %d", rev, aresp.Header.Revision)
|
||||
}
|
||||
|
||||
// TestV3AuthWithLeaseRevokeWithRoot ensures that granted leases
|
||||
@ -333,16 +327,12 @@ func TestV3AuthNonAuthorizedRPCs(t *testing.T) {
|
||||
key := "foo"
|
||||
val := "bar"
|
||||
_, err := nonAuthedKV.Put(context.TODO(), key, val)
|
||||
if err != nil {
|
||||
t.Fatalf("couldn't put key (%v)", err)
|
||||
}
|
||||
require.NoErrorf(t, err, "couldn't put key (%v)", err)
|
||||
|
||||
authSetupRoot(t, integration.ToGRPC(clus.Client(0)).Auth)
|
||||
|
||||
respput, err := nonAuthedKV.Put(context.TODO(), key, val)
|
||||
if !eqErrGRPC(err, rpctypes.ErrGRPCUserEmpty) {
|
||||
t.Fatalf("could put key (%v), it should cause an error of permission denied", respput)
|
||||
}
|
||||
require.Truef(t, eqErrGRPC(err, rpctypes.ErrGRPCUserEmpty), "could put key (%v), it should cause an error of permission denied", respput)
|
||||
}
|
||||
|
||||
func TestV3AuthOldRevConcurrent(t *testing.T) {
|
||||
@ -428,9 +418,7 @@ func TestV3AuthWatchErrorAndWatchId0(t *testing.T) {
|
||||
require.Error(t, watchResponse.Err()) // permission denied
|
||||
|
||||
_, err := c.Put(ctx, "k1", "val")
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error from Put: %v", err)
|
||||
}
|
||||
require.NoErrorf(t, err, "Unexpected error from Put: %v", err)
|
||||
|
||||
<-watchEndCh
|
||||
}
|
||||
|
@ -132,19 +132,14 @@ func TestElectionFailover(t *testing.T) {
|
||||
|
||||
// first leader (elected)
|
||||
e := concurrency.NewElection(ss[0], "test-election")
|
||||
if err := e.Campaign(context.TODO(), "foo"); err != nil {
|
||||
t.Fatalf("failed volunteer (%v)", err)
|
||||
}
|
||||
err := e.Campaign(context.TODO(), "foo")
|
||||
require.NoErrorf(t, err, "failed volunteer")
|
||||
|
||||
// check first leader
|
||||
resp, ok := <-e.Observe(cctx)
|
||||
if !ok {
|
||||
t.Fatalf("could not wait for first election; channel closed")
|
||||
}
|
||||
require.Truef(t, ok, "could not wait for first election; channel closed")
|
||||
s := string(resp.Kvs[0].Value)
|
||||
if s != "foo" {
|
||||
t.Fatalf("wrong election result. got %s, wanted foo", s)
|
||||
}
|
||||
require.Equalf(t, "foo", s, "wrong election result. got %s, wanted foo", s)
|
||||
|
||||
// next leader
|
||||
electedErrC := make(chan error, 1)
|
||||
@ -155,19 +150,15 @@ func TestElectionFailover(t *testing.T) {
|
||||
}()
|
||||
|
||||
// invoke leader failover
|
||||
err := ss[0].Close()
|
||||
err = ss[0].Close()
|
||||
require.NoError(t, err)
|
||||
|
||||
// check new leader
|
||||
e = concurrency.NewElection(ss[2], "test-election")
|
||||
resp, ok = <-e.Observe(cctx)
|
||||
if !ok {
|
||||
t.Fatalf("could not wait for second election; channel closed")
|
||||
}
|
||||
require.Truef(t, ok, "could not wait for second election; channel closed")
|
||||
s = string(resp.Kvs[0].Value)
|
||||
if s != "bar" {
|
||||
t.Fatalf("wrong election result. got %s, wanted bar", s)
|
||||
}
|
||||
require.Equalf(t, "bar", s, "wrong election result. got %s, wanted bar", s)
|
||||
|
||||
// leader must ack election (otherwise, Campaign may see closed conn)
|
||||
eer := <-electedErrC
|
||||
@ -288,9 +279,7 @@ func TestElectionObserveCompacted(t *testing.T) {
|
||||
if !ok {
|
||||
t.Fatal("failed to observe on compacted revision")
|
||||
}
|
||||
if string(v.Kvs[0].Value) != "abc" {
|
||||
t.Fatalf(`expected leader value "abc", got %q`, string(v.Kvs[0].Value))
|
||||
}
|
||||
require.Equalf(t, "abc", string(v.Kvs[0].Value), `expected leader value "abc", got %q`, string(v.Kvs[0].Value))
|
||||
}
|
||||
|
||||
// TestElectionWithAuthEnabled verifies the election interface when auth is enabled.
|
||||
|
@ -61,9 +61,7 @@ func TestFailover(t *testing.T) {
|
||||
// Create an etcd client before or after first server down
|
||||
t.Logf("Creating an etcd client [%s]", tc.name)
|
||||
cli, err := tc.testFunc(t, cc, clus)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create client: %v", err)
|
||||
}
|
||||
require.NoErrorf(t, err, "Failed to create client")
|
||||
defer cli.Close()
|
||||
|
||||
// Sanity test
|
||||
@ -142,12 +140,8 @@ func getWithRetries(t *testing.T, cli *clientv3.Client, key, val string, retryCo
|
||||
t.Logf("Failed to get key (%v)", getErr)
|
||||
return getErr
|
||||
}
|
||||
if len(resp.Kvs) != 1 {
|
||||
t.Fatalf("Expected 1 key, got %d", len(resp.Kvs))
|
||||
}
|
||||
if !bytes.Equal([]byte(val), resp.Kvs[0].Value) {
|
||||
t.Fatalf("Unexpected value, expected: %s, got: %s", val, resp.Kvs[0].Value)
|
||||
}
|
||||
require.Lenf(t, resp.Kvs, 1, "Expected 1 key, got %d", len(resp.Kvs))
|
||||
require.Truef(t, bytes.Equal([]byte(val), resp.Kvs[0].Value), "Unexpected value, expected: %s, got: %s", val, resp.Kvs[0].Value)
|
||||
return nil
|
||||
}()
|
||||
if err != nil {
|
||||
|
@ -52,32 +52,22 @@ func TestV3PutOverwrite(t *testing.T) {
|
||||
reqput := &pb.PutRequest{Key: key, Value: []byte("bar"), PrevKv: true}
|
||||
|
||||
respput, err := kvc.Put(context.TODO(), reqput)
|
||||
if err != nil {
|
||||
t.Fatalf("couldn't put key (%v)", err)
|
||||
}
|
||||
require.NoErrorf(t, err, "couldn't put key")
|
||||
|
||||
// overwrite
|
||||
reqput.Value = []byte("baz")
|
||||
respput2, err := kvc.Put(context.TODO(), reqput)
|
||||
if err != nil {
|
||||
t.Fatalf("couldn't put key (%v)", err)
|
||||
}
|
||||
if respput2.Header.Revision <= respput.Header.Revision {
|
||||
t.Fatalf("expected newer revision on overwrite, got %v <= %v",
|
||||
require.NoErrorf(t, err, "couldn't put key")
|
||||
require.Greaterf(t, respput2.Header.Revision, respput.Header.Revision, "expected newer revision on overwrite, got %v <= %v",
|
||||
respput2.Header.Revision, respput.Header.Revision)
|
||||
}
|
||||
if pkv := respput2.PrevKv; pkv == nil || string(pkv.Value) != "bar" {
|
||||
t.Fatalf("expected PrevKv=bar, got response %+v", respput2)
|
||||
}
|
||||
|
||||
reqrange := &pb.RangeRequest{Key: key}
|
||||
resprange, err := kvc.Range(context.TODO(), reqrange)
|
||||
if err != nil {
|
||||
t.Fatalf("couldn't get key (%v)", err)
|
||||
}
|
||||
if len(resprange.Kvs) != 1 {
|
||||
t.Fatalf("expected 1 key, got %v", len(resprange.Kvs))
|
||||
}
|
||||
require.NoErrorf(t, err, "couldn't get key")
|
||||
require.Lenf(t, resprange.Kvs, 1, "expected 1 key, got %v", len(resprange.Kvs))
|
||||
|
||||
kv := resprange.Kvs[0]
|
||||
if kv.ModRevision <= kv.CreateRevision {
|
||||
@ -107,9 +97,7 @@ func TestV3PutRestart(t *testing.T) {
|
||||
clus.Members[stopIdx].Stop(t)
|
||||
clus.Members[stopIdx].Restart(t)
|
||||
c, cerr := integration.NewClientV3(clus.Members[stopIdx])
|
||||
if cerr != nil {
|
||||
t.Fatalf("cannot create client: %v", cerr)
|
||||
}
|
||||
require.NoErrorf(t, cerr, "cannot create client")
|
||||
clus.Members[stopIdx].ServerClient = c
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.TODO(), 10*time.Second)
|
||||
@ -130,28 +118,21 @@ func TestV3CompactCurrentRev(t *testing.T) {
|
||||
kvc := integration.ToGRPC(clus.RandClient()).KV
|
||||
preq := &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar")}
|
||||
for i := 0; i < 3; i++ {
|
||||
if _, err := kvc.Put(context.Background(), preq); err != nil {
|
||||
t.Fatalf("couldn't put key (%v)", err)
|
||||
}
|
||||
_, err := kvc.Put(context.Background(), preq)
|
||||
require.NoErrorf(t, err, "couldn't put key")
|
||||
}
|
||||
// get key to add to proxy cache, if any
|
||||
_, err := kvc.Range(context.TODO(), &pb.RangeRequest{Key: []byte("foo")})
|
||||
require.NoError(t, err)
|
||||
// compact on current revision
|
||||
_, err = kvc.Compact(context.Background(), &pb.CompactionRequest{Revision: 4})
|
||||
if err != nil {
|
||||
t.Fatalf("couldn't compact kv space (%v)", err)
|
||||
}
|
||||
require.NoErrorf(t, err, "couldn't compact kv space")
|
||||
// key still exists when linearized?
|
||||
_, err = kvc.Range(context.Background(), &pb.RangeRequest{Key: []byte("foo")})
|
||||
if err != nil {
|
||||
t.Fatalf("couldn't get key after compaction (%v)", err)
|
||||
}
|
||||
require.NoErrorf(t, err, "couldn't get key after compaction")
|
||||
// key still exists when serialized?
|
||||
_, err = kvc.Range(context.Background(), &pb.RangeRequest{Key: []byte("foo"), Serializable: true})
|
||||
if err != nil {
|
||||
t.Fatalf("couldn't get serialized key after compaction (%v)", err)
|
||||
}
|
||||
require.NoErrorf(t, err, "couldn't get serialized key after compaction")
|
||||
}
|
||||
|
||||
// TestV3HashKV ensures that multiple calls of HashKV on same node return same hash and compact rev.
|
||||
|
Loading…
Reference in New Issue
Block a user