test: Remove separation between V2 and V3 cluster in integration tests
This commit is contained in:
@ -36,7 +36,7 @@ import (
|
||||
type watcherTest func(*testing.T, *watchctx)
|
||||
|
||||
type watchctx struct {
|
||||
clus *integration2.ClusterV3
|
||||
clus *integration2.Cluster
|
||||
w clientv3.Watcher
|
||||
kv clientv3.KV
|
||||
wclientMember int
|
||||
@ -47,7 +47,7 @@ type watchctx struct {
|
||||
func runWatchTest(t *testing.T, f watcherTest) {
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3, UseBridge: true})
|
||||
clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3, UseBridge: true})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
wclientMember := rand.Intn(3)
|
||||
@ -348,7 +348,7 @@ func putAndWatch(t *testing.T, wctx *watchctx, key, val string) {
|
||||
|
||||
func TestWatchResumeInitRev(t *testing.T) {
|
||||
integration2.BeforeTest(t)
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1, UseBridge: true})
|
||||
clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1, UseBridge: true})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
cli := clus.Client(0)
|
||||
@ -404,7 +404,7 @@ func TestWatchResumeInitRev(t *testing.T) {
|
||||
func TestWatchResumeCompacted(t *testing.T) {
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3, UseBridge: true})
|
||||
clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3, UseBridge: true})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
// create a waiting watcher at rev 1
|
||||
@ -491,7 +491,7 @@ func TestWatchResumeCompacted(t *testing.T) {
|
||||
func TestWatchCompactRevision(t *testing.T) {
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1})
|
||||
clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
// set some keys
|
||||
@ -540,7 +540,7 @@ func testWatchWithProgressNotify(t *testing.T, watchOnPut bool) {
|
||||
pi := 3 * time.Second
|
||||
defer func() { v3rpc.SetProgressReportInterval(oldpi) }()
|
||||
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3})
|
||||
clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
wc := clus.RandClient()
|
||||
@ -588,7 +588,7 @@ func TestConfigurableWatchProgressNotifyInterval(t *testing.T) {
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
progressInterval := 200 * time.Millisecond
|
||||
clus := integration2.NewClusterV3(t,
|
||||
clus := integration2.NewCluster(t,
|
||||
&integration2.ClusterConfig{
|
||||
Size: 3,
|
||||
WatchProgressNotifyInterval: progressInterval,
|
||||
@ -629,7 +629,7 @@ func TestWatchRequestProgress(t *testing.T) {
|
||||
|
||||
watchTimeout := 3 * time.Second
|
||||
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3})
|
||||
clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
wc := clus.RandClient()
|
||||
@ -688,7 +688,7 @@ func TestWatchRequestProgress(t *testing.T) {
|
||||
func TestWatchEventType(t *testing.T) {
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
cluster := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1})
|
||||
cluster := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1})
|
||||
defer cluster.Terminate(t)
|
||||
|
||||
client := cluster.RandClient()
|
||||
@ -762,7 +762,7 @@ func TestWatchEventType(t *testing.T) {
|
||||
func TestWatchErrConnClosed(t *testing.T) {
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1})
|
||||
clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
cli := clus.Client(0)
|
||||
@ -792,7 +792,7 @@ func TestWatchErrConnClosed(t *testing.T) {
|
||||
func TestWatchAfterClose(t *testing.T) {
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1})
|
||||
clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
cli := clus.Client(0)
|
||||
@ -820,7 +820,7 @@ func TestWatchAfterClose(t *testing.T) {
|
||||
func TestWatchWithRequireLeader(t *testing.T) {
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 3})
|
||||
clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
// Put a key for the non-require leader watch to read as an event.
|
||||
@ -894,7 +894,7 @@ func TestWatchWithRequireLeader(t *testing.T) {
|
||||
func TestWatchWithFilter(t *testing.T) {
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
cluster := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1})
|
||||
cluster := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1})
|
||||
defer cluster.Terminate(t)
|
||||
|
||||
client := cluster.RandClient()
|
||||
@ -933,7 +933,7 @@ func TestWatchWithFilter(t *testing.T) {
|
||||
func TestWatchWithCreatedNotification(t *testing.T) {
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
cluster := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1})
|
||||
cluster := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1})
|
||||
defer cluster.Terminate(t)
|
||||
|
||||
client := cluster.RandClient()
|
||||
@ -955,7 +955,7 @@ func TestWatchWithCreatedNotification(t *testing.T) {
|
||||
func TestWatchWithCreatedNotificationDropConn(t *testing.T) {
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
cluster := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1, UseBridge: true})
|
||||
cluster := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1, UseBridge: true})
|
||||
defer cluster.Terminate(t)
|
||||
|
||||
client := cluster.RandClient()
|
||||
@ -984,7 +984,7 @@ func TestWatchWithCreatedNotificationDropConn(t *testing.T) {
|
||||
func TestWatchCancelOnServer(t *testing.T) {
|
||||
integration2.BeforeTest(t)
|
||||
|
||||
cluster := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1})
|
||||
cluster := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1})
|
||||
defer cluster.Terminate(t)
|
||||
|
||||
client := cluster.RandClient()
|
||||
@ -1050,20 +1050,20 @@ func TestWatchCancelOnServer(t *testing.T) {
|
||||
// 4. watcher client finishes tearing down stream on "ctx"
|
||||
// 5. w2 comes back canceled
|
||||
func TestWatchOverlapContextCancel(t *testing.T) {
|
||||
f := func(clus *integration2.ClusterV3) {}
|
||||
f := func(clus *integration2.Cluster) {}
|
||||
testWatchOverlapContextCancel(t, f)
|
||||
}
|
||||
|
||||
func TestWatchOverlapDropConnContextCancel(t *testing.T) {
|
||||
f := func(clus *integration2.ClusterV3) {
|
||||
f := func(clus *integration2.Cluster) {
|
||||
clus.Members[0].Bridge().DropConnections()
|
||||
}
|
||||
testWatchOverlapContextCancel(t, f)
|
||||
}
|
||||
|
||||
func testWatchOverlapContextCancel(t *testing.T, f func(*integration2.ClusterV3)) {
|
||||
func testWatchOverlapContextCancel(t *testing.T, f func(*integration2.Cluster)) {
|
||||
integration2.BeforeTest(t)
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1, UseBridge: true})
|
||||
clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1, UseBridge: true})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
n := 100
|
||||
@ -1124,7 +1124,7 @@ func testWatchOverlapContextCancel(t *testing.T, f func(*integration2.ClusterV3)
|
||||
// closing the client does not return a client closing error.
|
||||
func TestWatchCancelAndCloseClient(t *testing.T) {
|
||||
integration2.BeforeTest(t)
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1})
|
||||
clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
cli := clus.Client(0)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
@ -1154,7 +1154,7 @@ func TestWatchCancelAndCloseClient(t *testing.T) {
|
||||
// then closes the watcher interface to ensure correct clean up.
|
||||
func TestWatchStressResumeClose(t *testing.T) {
|
||||
integration2.BeforeTest(t)
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1, UseBridge: true})
|
||||
clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1, UseBridge: true})
|
||||
defer clus.Terminate(t)
|
||||
cli := clus.Client(0)
|
||||
|
||||
@ -1176,7 +1176,7 @@ func TestWatchStressResumeClose(t *testing.T) {
|
||||
// its grpc stream is disconnected / reconnecting.
|
||||
func TestWatchCancelDisconnected(t *testing.T) {
|
||||
integration2.BeforeTest(t)
|
||||
clus := integration2.NewClusterV3(t, &integration2.ClusterConfig{Size: 1})
|
||||
clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
cli := clus.Client(0)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
|
Reference in New Issue
Block a user