Merge pull request #19593 from mmorel-35/usetesting/tests/e2e

tests/e2e: address Go 1.24 usetesting issues
This commit is contained in:
Benjamin Wang
2025-03-14 17:29:38 +00:00
committed by GitHub
36 changed files with 134 additions and 147 deletions

View File

@ -137,7 +137,7 @@ func testDowngradeUpgrade(t *testing.T, numberOfMembersToDowngrade int, clusterS
e2e.ValidateDowngradeInfo(t, epc, &pb.DowngradeInfo{Enabled: false}) e2e.ValidateDowngradeInfo(t, epc, &pb.DowngradeInfo{Enabled: false})
t.Log("Adding member to test membership, but a learner avoid breaking quorum") t.Log("Adding member to test membership, but a learner avoid breaking quorum")
resp, err := cc.MemberAddAsLearner(context.Background(), "fake1", []string{"http://127.0.0.1:1001"}) resp, err := cc.MemberAddAsLearner(t.Context(), "fake1", []string{"http://127.0.0.1:1001"})
require.NoError(t, err) require.NoError(t, err)
if triggerSnapshot { if triggerSnapshot {
t.Logf("Generating snapshot") t.Logf("Generating snapshot")
@ -145,7 +145,7 @@ func testDowngradeUpgrade(t *testing.T, numberOfMembersToDowngrade int, clusterS
verifySnapshot(t, epc) verifySnapshot(t, epc)
} }
t.Log("Removing learner to test membership") t.Log("Removing learner to test membership")
_, err = cc.MemberRemove(context.Background(), resp.Member.ID) _, err = cc.MemberRemove(t.Context(), resp.Member.ID)
require.NoError(t, err) require.NoError(t, err)
beforeMembers, beforeKV := getMembersAndKeys(t, cc) beforeMembers, beforeKV := getMembersAndKeys(t, cc)
@ -197,7 +197,7 @@ func testDowngradeUpgrade(t *testing.T, numberOfMembersToDowngrade int, clusterS
} }
t.Log("Adding learner to test membership, but avoid breaking quorum") t.Log("Adding learner to test membership, but avoid breaking quorum")
resp, err = cc.MemberAddAsLearner(context.Background(), "fake2", []string{"http://127.0.0.1:1002"}) resp, err = cc.MemberAddAsLearner(t.Context(), "fake2", []string{"http://127.0.0.1:1002"})
require.NoError(t, err) require.NoError(t, err)
if triggerSnapshot { if triggerSnapshot {
t.Logf("Generating snapshot") t.Logf("Generating snapshot")
@ -205,7 +205,7 @@ func testDowngradeUpgrade(t *testing.T, numberOfMembersToDowngrade int, clusterS
verifySnapshot(t, epc) verifySnapshot(t, epc)
} }
t.Log("Removing learner to test membership") t.Log("Removing learner to test membership")
_, err = cc.MemberRemove(context.Background(), resp.Member.ID) _, err = cc.MemberRemove(t.Context(), resp.Member.ID)
require.NoError(t, err) require.NoError(t, err)
beforeMembers, beforeKV = getMembersAndKeys(t, cc) beforeMembers, beforeKV = getMembersAndKeys(t, cc)
@ -229,7 +229,7 @@ func testDowngradeUpgrade(t *testing.T, numberOfMembersToDowngrade int, clusterS
} }
func newCluster(t *testing.T, clusterSize int, snapshotCount uint64) *e2e.EtcdProcessCluster { func newCluster(t *testing.T, clusterSize int, snapshotCount uint64) *e2e.EtcdProcessCluster {
epc, err := e2e.NewEtcdProcessCluster(context.TODO(), t, epc, err := e2e.NewEtcdProcessCluster(t.Context(), t,
e2e.WithClusterSize(clusterSize), e2e.WithClusterSize(clusterSize),
e2e.WithSnapshotCount(snapshotCount), e2e.WithSnapshotCount(snapshotCount),
e2e.WithKeepDataDir(true), e2e.WithKeepDataDir(true),
@ -246,7 +246,7 @@ func newCluster(t *testing.T, clusterSize int, snapshotCount uint64) *e2e.EtcdPr
} }
func generateSnapshot(t *testing.T, snapshotCount uint64, cc *e2e.EtcdctlV3) { func generateSnapshot(t *testing.T, snapshotCount uint64, cc *e2e.EtcdctlV3) {
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(t.Context())
defer cancel() defer cancel()
var i uint64 var i uint64
@ -286,7 +286,7 @@ func verifySnapshotMembers(t *testing.T, epc *e2e.EtcdProcessCluster, expectedMe
} }
func getMembersAndKeys(t *testing.T, cc *e2e.EtcdctlV3) (*clientv3.MemberListResponse, *clientv3.GetResponse) { func getMembersAndKeys(t *testing.T, cc *e2e.EtcdctlV3) (*clientv3.MemberListResponse, *clientv3.GetResponse) {
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(t.Context())
defer cancel() defer cancel()
kvs, err := cc.Get(ctx, "", config.GetOptions{Prefix: true}) kvs, err := cc.Get(ctx, "", config.GetOptions{Prefix: true})

View File

@ -69,7 +69,7 @@ func TestConnectionMultiplexing(t *testing.T) {
}, },
} { } {
t.Run(tc.name, func(t *testing.T) { t.Run(tc.name, func(t *testing.T) {
ctx := context.Background() ctx := t.Context()
cfg := e2e.NewConfig(e2e.WithClusterSize(1)) cfg := e2e.NewConfig(e2e.WithClusterSize(1))
cfg.Client.ConnectionType = tc.serverTLS cfg.Client.ConnectionType = tc.serverTLS
cfg.ClientHTTPSeparate = tc.separateHTTPPort cfg.ClientHTTPSeparate = tc.separateHTTPPort

View File

@ -99,7 +99,7 @@ func corruptTest(cx ctlCtx) {
func TestInPlaceRecovery(t *testing.T) { func TestInPlaceRecovery(t *testing.T) {
basePort := 20000 basePort := 20000
e2e.BeforeTest(t) e2e.BeforeTest(t)
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(t.Context())
defer cancel() defer cancel()
// Initialize the cluster. // Initialize the cluster.
@ -198,7 +198,7 @@ func TestPeriodicCheckDetectsCorruptionWithExperimentalFlag(t *testing.T) {
func testPeriodicCheckDetectsCorruption(t *testing.T, useExperimentalFlag bool) { func testPeriodicCheckDetectsCorruption(t *testing.T, useExperimentalFlag bool) {
checkTime := time.Second checkTime := time.Second
e2e.BeforeTest(t) e2e.BeforeTest(t)
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(t.Context())
defer cancel() defer cancel()
var corruptCheckTime e2e.EPClusterOption var corruptCheckTime e2e.EPClusterOption
if useExperimentalFlag { if useExperimentalFlag {
@ -233,7 +233,7 @@ func testPeriodicCheckDetectsCorruption(t *testing.T, useExperimentalFlag bool)
err = testutil.CorruptBBolt(datadir.ToBackendFileName(epc.Procs[0].Config().DataDirPath)) err = testutil.CorruptBBolt(datadir.ToBackendFileName(epc.Procs[0].Config().DataDirPath))
require.NoError(t, err) require.NoError(t, err)
err = epc.Procs[0].Restart(context.TODO()) err = epc.Procs[0].Restart(t.Context())
require.NoError(t, err) require.NoError(t, err)
time.Sleep(checkTime * 11 / 10) time.Sleep(checkTime * 11 / 10)
alarmResponse, err := cc.AlarmList(ctx) alarmResponse, err := cc.AlarmList(ctx)
@ -252,7 +252,7 @@ func TestCompactHashCheckDetectCorruptionWithFeatureGate(t *testing.T) {
func testCompactHashCheckDetectCorruption(t *testing.T, useFeatureGate bool) { func testCompactHashCheckDetectCorruption(t *testing.T, useFeatureGate bool) {
checkTime := time.Second checkTime := time.Second
e2e.BeforeTest(t) e2e.BeforeTest(t)
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(t.Context())
defer cancel() defer cancel()
opts := []e2e.EPClusterOption{e2e.WithKeepDataDir(true), e2e.WithCompactHashCheckTime(checkTime)} opts := []e2e.EPClusterOption{e2e.WithKeepDataDir(true), e2e.WithCompactHashCheckTime(checkTime)}
if useFeatureGate { if useFeatureGate {
@ -308,7 +308,7 @@ func TestCompactHashCheckDetectCorruptionInterruptWithExperimentalFlag(t *testin
func testCompactHashCheckDetectCorruptionInterrupt(t *testing.T, useFeatureGate bool, useExperimentalFlag bool) { func testCompactHashCheckDetectCorruptionInterrupt(t *testing.T, useFeatureGate bool, useExperimentalFlag bool) {
checkTime := time.Second checkTime := time.Second
e2e.BeforeTest(t) e2e.BeforeTest(t)
ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second) ctx, cancel := context.WithTimeout(t.Context(), 60*time.Second)
defer cancel() defer cancel()
slowCompactionNodeIndex := 1 slowCompactionNodeIndex := 1
@ -407,7 +407,7 @@ func TestCtlV3LinearizableRead(t *testing.T) {
func testCtlV3ReadAfterWrite(t *testing.T, ops ...clientv3.OpOption) { func testCtlV3ReadAfterWrite(t *testing.T, ops ...clientv3.OpOption) {
e2e.BeforeTest(t) e2e.BeforeTest(t)
ctx := context.Background() ctx := t.Context()
epc, err := e2e.NewEtcdProcessCluster(ctx, t, epc, err := e2e.NewEtcdProcessCluster(ctx, t,
e2e.WithClusterSize(1), e2e.WithClusterSize(1),

View File

@ -30,7 +30,7 @@ import (
func TestAuthCluster(t *testing.T) { func TestAuthCluster(t *testing.T) {
e2e.BeforeTest(t) e2e.BeforeTest(t)
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(t.Context())
defer cancel() defer cancel()
epc, err := e2e.NewEtcdProcessCluster(ctx, t, epc, err := e2e.NewEtcdProcessCluster(ctx, t,

View File

@ -43,7 +43,7 @@ func TestCtlV3AuthCertCNAndUsernameNoPassword(t *testing.T) {
func TestCtlV3AuthCertCNWithWithConcurrentOperation(t *testing.T) { func TestCtlV3AuthCertCNWithWithConcurrentOperation(t *testing.T) {
e2e.BeforeTest(t) e2e.BeforeTest(t)
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(t.Context())
defer cancel() defer cancel()
// apply the certificate which has `root` CommonName, // apply the certificate which has `root` CommonName,

View File

@ -110,7 +110,7 @@ func TestAuthority(t *testing.T) {
for _, clusterSize := range []int{1, 3} { for _, clusterSize := range []int{1, 3} {
t.Run(fmt.Sprintf("Size: %d, Scenario: %q", clusterSize, tc.name), func(t *testing.T) { t.Run(fmt.Sprintf("Size: %d, Scenario: %q", clusterSize, tc.name), func(t *testing.T) {
e2e.BeforeTest(t) e2e.BeforeTest(t)
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(t.Context())
defer cancel() defer cancel()
cfg := e2e.NewConfigNoTLS() cfg := e2e.NewConfigNoTLS()
@ -125,7 +125,7 @@ func TestAuthority(t *testing.T) {
cfg.BaseClientScheme = "unix" cfg.BaseClientScheme = "unix"
} }
epc, err := e2e.NewEtcdProcessCluster(context.TODO(), t, e2e.WithConfig(cfg)) epc, err := e2e.NewEtcdProcessCluster(t.Context(), t, e2e.WithConfig(cfg))
if err != nil { if err != nil {
t.Fatalf("could not start etcd process cluster (%v)", err) t.Fatalf("could not start etcd process cluster (%v)", err)
} }
@ -159,7 +159,7 @@ func templateEndpoints(t *testing.T, pattern string, clus *e2e.EtcdProcessCluste
func assertAuthority(t *testing.T, expectAuthorityPattern string, clus *e2e.EtcdProcessCluster) { func assertAuthority(t *testing.T, expectAuthorityPattern string, clus *e2e.EtcdProcessCluster) {
for i := range clus.Procs { for i := range clus.Procs {
line, _ := clus.Procs[i].Logs().ExpectWithContext(context.TODO(), expect.ExpectedResponse{Value: `http2: decoded hpack field header field ":authority"`}) line, _ := clus.Procs[i].Logs().ExpectWithContext(t.Context(), expect.ExpectedResponse{Value: `http2: decoded hpack field header field ":authority"`})
line = strings.TrimSuffix(line, "\n") line = strings.TrimSuffix(line, "\n")
line = strings.TrimSuffix(line, "\r") line = strings.TrimSuffix(line, "\r")

View File

@ -33,7 +33,7 @@ import (
func TestMemberReplace(t *testing.T) { func TestMemberReplace(t *testing.T) {
e2e.BeforeTest(t) e2e.BeforeTest(t)
ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second) ctx, cancel := context.WithTimeout(t.Context(), 20*time.Second)
defer cancel() defer cancel()
epc, err := e2e.NewEtcdProcessCluster(ctx, t) epc, err := e2e.NewEtcdProcessCluster(ctx, t)
@ -100,7 +100,7 @@ func TestMemberReplace(t *testing.T) {
func TestMemberReplaceWithLearner(t *testing.T) { func TestMemberReplaceWithLearner(t *testing.T) {
e2e.BeforeTest(t) e2e.BeforeTest(t)
ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second) ctx, cancel := context.WithTimeout(t.Context(), 20*time.Second)
defer cancel() defer cancel()
epc, err := e2e.NewEtcdProcessCluster(ctx, t) epc, err := e2e.NewEtcdProcessCluster(ctx, t)

View File

@ -15,7 +15,6 @@
package e2e package e2e
import ( import (
"context"
"encoding/json" "encoding/json"
"errors" "errors"
"fmt" "fmt"
@ -69,7 +68,7 @@ func TestCtlV3MemberUpdatePeerTLS(t *testing.T) {
func TestCtlV3ConsistentMemberList(t *testing.T) { func TestCtlV3ConsistentMemberList(t *testing.T) {
e2e.BeforeTest(t) e2e.BeforeTest(t)
ctx := context.Background() ctx := t.Context()
epc, err := e2e.NewEtcdProcessCluster(ctx, t, epc, err := e2e.NewEtcdProcessCluster(ctx, t,
e2e.WithClusterSize(1), e2e.WithClusterSize(1),
@ -290,7 +289,7 @@ func ctlV3MemberUpdate(cx ctlCtx, memberID, peerURL string) error {
func TestRemoveNonExistingMember(t *testing.T) { func TestRemoveNonExistingMember(t *testing.T) {
e2e.BeforeTest(t) e2e.BeforeTest(t)
ctx := context.Background() ctx := t.Context()
cfg := e2e.ConfigStandalone(*e2e.NewConfig()) cfg := e2e.ConfigStandalone(*e2e.NewConfig())
epc, err := e2e.NewEtcdProcessCluster(ctx, t, e2e.WithConfig(cfg)) epc, err := e2e.NewEtcdProcessCluster(ctx, t, e2e.WithConfig(cfg))

View File

@ -84,7 +84,7 @@ func testCtlV3MoveLeader(t *testing.T, cfg e2e.EtcdProcessClusterConfig, envVars
TLS: tcfg, TLS: tcfg,
}) })
require.NoError(t, err) require.NoError(t, err)
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) ctx, cancel := context.WithTimeout(t.Context(), 10*time.Second)
resp, err := cli.Status(ctx, ep) resp, err := cli.Status(ctx, ep)
if err != nil { if err != nil {
t.Fatalf("failed to get status from endpoint %s: %v", ep, err) t.Fatalf("failed to get status from endpoint %s: %v", ep, err)
@ -145,7 +145,7 @@ func setupEtcdctlTest(t *testing.T, cfg *e2e.EtcdProcessClusterConfig, quorum bo
if !quorum { if !quorum {
cfg = e2e.ConfigStandalone(*cfg) cfg = e2e.ConfigStandalone(*cfg)
} }
epc, err := e2e.NewEtcdProcessCluster(context.TODO(), t, e2e.WithConfig(cfg)) epc, err := e2e.NewEtcdProcessCluster(t.Context(), t, e2e.WithConfig(cfg))
if err != nil { if err != nil {
t.Fatalf("could not start etcd process cluster (%v)", err) t.Fatalf("could not start etcd process cluster (%v)", err)
} }

View File

@ -169,7 +169,7 @@ func testIssue6361(t *testing.T) {
e2e.BeforeTest(t) e2e.BeforeTest(t)
epc, err := e2e.NewEtcdProcessCluster(context.TODO(), t, epc, err := e2e.NewEtcdProcessCluster(t.Context(), t,
e2e.WithClusterSize(1), e2e.WithClusterSize(1),
e2e.WithKeepDataDir(true), e2e.WithKeepDataDir(true),
) )
@ -222,7 +222,7 @@ func testIssue6361(t *testing.T) {
epc.Procs[0].Config().Args[i+1] = newDataDir epc.Procs[0].Config().Args[i+1] = newDataDir
} }
} }
require.NoError(t, epc.Procs[0].Restart(context.TODO())) require.NoError(t, epc.Procs[0].Restart(t.Context()))
t.Log("Ensuring the restored member has the correct data...") t.Log("Ensuring the restored member has the correct data...")
for i := range kvs { for i := range kvs {
@ -294,7 +294,7 @@ func snapshotVersionTest(cx ctlCtx) {
func TestRestoreCompactionRevBump(t *testing.T) { func TestRestoreCompactionRevBump(t *testing.T) {
e2e.BeforeTest(t) e2e.BeforeTest(t)
epc, err := e2e.NewEtcdProcessCluster(context.TODO(), t, epc, err := e2e.NewEtcdProcessCluster(t.Context(), t,
e2e.WithClusterSize(1), e2e.WithClusterSize(1),
e2e.WithKeepDataDir(true), e2e.WithKeepDataDir(true),
) )
@ -309,13 +309,13 @@ func TestRestoreCompactionRevBump(t *testing.T) {
ctl := epc.Etcdctl() ctl := epc.Etcdctl()
watchCh := ctl.Watch(context.Background(), "foo", config.WatchOptions{Prefix: true}) watchCh := ctl.Watch(t.Context(), "foo", config.WatchOptions{Prefix: true})
// flake-fix: the watch can sometimes miss the first put below causing test failure // flake-fix: the watch can sometimes miss the first put below causing test failure
time.Sleep(100 * time.Millisecond) time.Sleep(100 * time.Millisecond)
kvs := []testutils.KV{{Key: "foo1", Val: "val1"}, {Key: "foo2", Val: "val2"}, {Key: "foo3", Val: "val3"}} kvs := []testutils.KV{{Key: "foo1", Val: "val1"}, {Key: "foo2", Val: "val2"}, {Key: "foo3", Val: "val3"}}
for i := range kvs { for i := range kvs {
require.NoError(t, ctl.Put(context.Background(), kvs[i].Key, kvs[i].Val, config.PutOptions{})) require.NoError(t, ctl.Put(t.Context(), kvs[i].Key, kvs[i].Val, config.PutOptions{}))
} }
watchTimeout := 1 * time.Second watchTimeout := 1 * time.Second
@ -337,10 +337,10 @@ func TestRestoreCompactionRevBump(t *testing.T) {
// add some more kvs that are not in the snapshot that will be lost after restore // add some more kvs that are not in the snapshot that will be lost after restore
unsnappedKVs := []testutils.KV{{Key: "unsnapped1", Val: "one"}, {Key: "unsnapped2", Val: "two"}, {Key: "unsnapped3", Val: "three"}} unsnappedKVs := []testutils.KV{{Key: "unsnapped1", Val: "one"}, {Key: "unsnapped2", Val: "two"}, {Key: "unsnapped3", Val: "three"}}
for i := range unsnappedKVs { for i := range unsnappedKVs {
require.NoError(t, ctl.Put(context.Background(), unsnappedKVs[i].Key, unsnappedKVs[i].Val, config.PutOptions{})) require.NoError(t, ctl.Put(t.Context(), unsnappedKVs[i].Key, unsnappedKVs[i].Val, config.PutOptions{}))
} }
membersBefore, err := ctl.MemberList(context.Background(), false) membersBefore, err := ctl.MemberList(t.Context(), false)
require.NoError(t, err) require.NoError(t, err)
t.Log("Stopping the original server...") t.Log("Stopping the original server...")
@ -374,12 +374,12 @@ func TestRestoreCompactionRevBump(t *testing.T) {
// Verify that initial snapshot is created by the restore operation // Verify that initial snapshot is created by the restore operation
verifySnapshotMembers(t, epc, membersBefore) verifySnapshotMembers(t, epc, membersBefore)
require.NoError(t, epc.Restart(context.Background())) require.NoError(t, epc.Restart(t.Context()))
t.Log("Ensuring the restored member has the correct data...") t.Log("Ensuring the restored member has the correct data...")
hasKVs(t, ctl, kvs, currentRev, baseRev) hasKVs(t, ctl, kvs, currentRev, baseRev)
for i := range unsnappedKVs { for i := range unsnappedKVs {
v, gerr := ctl.Get(context.Background(), unsnappedKVs[i].Key, config.GetOptions{}) v, gerr := ctl.Get(t.Context(), unsnappedKVs[i].Key, config.GetOptions{})
require.NoError(t, gerr) require.NoError(t, gerr)
require.Equal(t, int64(0), v.Count) require.Equal(t, int64(0), v.Count)
} }
@ -395,7 +395,7 @@ func TestRestoreCompactionRevBump(t *testing.T) {
// clients might restart the watch at the old base revision, that should not yield any new data // clients might restart the watch at the old base revision, that should not yield any new data
// everything up until bumpAmount+currentRev should return "already compacted" // everything up until bumpAmount+currentRev should return "already compacted"
for i := bumpAmount - 2; i < bumpAmount+currentRev; i++ { for i := bumpAmount - 2; i < bumpAmount+currentRev; i++ {
watchCh = ctl.Watch(context.Background(), "foo", config.WatchOptions{Prefix: true, Revision: int64(i)}) watchCh = ctl.Watch(t.Context(), "foo", config.WatchOptions{Prefix: true, Revision: int64(i)})
cancelResult := <-watchCh cancelResult := <-watchCh
require.Equal(t, v3rpc.ErrCompacted, cancelResult.Err()) require.Equal(t, v3rpc.ErrCompacted, cancelResult.Err())
require.Truef(t, cancelResult.Canceled, "expected ongoing watch to be cancelled after restoring with --mark-compacted") require.Truef(t, cancelResult.Canceled, "expected ongoing watch to be cancelled after restoring with --mark-compacted")
@ -403,10 +403,10 @@ func TestRestoreCompactionRevBump(t *testing.T) {
} }
// a watch after that revision should yield successful results when a new put arrives // a watch after that revision should yield successful results when a new put arrives
ctx, cancel := context.WithTimeout(context.Background(), watchTimeout*5) ctx, cancel := context.WithTimeout(t.Context(), watchTimeout*5)
defer cancel() defer cancel()
watchCh = ctl.Watch(ctx, "foo", config.WatchOptions{Prefix: true, Revision: int64(bumpAmount + currentRev + 1)}) watchCh = ctl.Watch(ctx, "foo", config.WatchOptions{Prefix: true, Revision: int64(bumpAmount + currentRev + 1)})
require.NoError(t, ctl.Put(context.Background(), "foo4", "val4", config.PutOptions{})) require.NoError(t, ctl.Put(t.Context(), "foo4", "val4", config.PutOptions{}))
watchRes, err = testutils.KeyValuesFromWatchChan(watchCh, 1, watchTimeout) watchRes, err = testutils.KeyValuesFromWatchChan(watchCh, 1, watchTimeout)
require.NoErrorf(t, err, "failed to get key-values from watch channel %s", err) require.NoErrorf(t, err, "failed to get key-values from watch channel %s", err)
require.Equal(t, []testutils.KV{{Key: "foo4", Val: "val4"}}, watchRes) require.Equal(t, []testutils.KV{{Key: "foo4", Val: "val4"}}, watchRes)
@ -414,7 +414,7 @@ func TestRestoreCompactionRevBump(t *testing.T) {
func hasKVs(t *testing.T, ctl *e2e.EtcdctlV3, kvs []testutils.KV, currentRev int, baseRev int) { func hasKVs(t *testing.T, ctl *e2e.EtcdctlV3, kvs []testutils.KV, currentRev int, baseRev int) {
for i := range kvs { for i := range kvs {
v, err := ctl.Get(context.Background(), kvs[i].Key, config.GetOptions{}) v, err := ctl.Get(t.Context(), kvs[i].Key, config.GetOptions{})
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, int64(1), v.Count) require.Equal(t, int64(1), v.Count)
require.Equal(t, kvs[i].Val, string(v.Kvs[0].Value)) require.Equal(t, kvs[i].Val, string(v.Kvs[0].Value))
@ -427,7 +427,7 @@ func hasKVs(t *testing.T, ctl *e2e.EtcdctlV3, kvs []testutils.KV, currentRev int
func TestBreakConsistentIndexNewerThanSnapshot(t *testing.T) { func TestBreakConsistentIndexNewerThanSnapshot(t *testing.T) {
e2e.BeforeTest(t) e2e.BeforeTest(t)
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) ctx, cancel := context.WithTimeout(t.Context(), 30*time.Second)
defer cancel() defer cancel()
var snapshotCount uint64 = 50 var snapshotCount uint64 = 50

View File

@ -15,7 +15,6 @@
package e2e package e2e
import ( import (
"context"
"fmt" "fmt"
"os" "os"
"strings" "strings"
@ -60,7 +59,7 @@ func TestClusterVersion(t *testing.T) {
e2e.WithRollingStart(tt.rollingStart), e2e.WithRollingStart(tt.rollingStart),
) )
epc, err := e2e.NewEtcdProcessCluster(context.TODO(), t, e2e.WithConfig(cfg)) epc, err := e2e.NewEtcdProcessCluster(t.Context(), t, e2e.WithConfig(cfg))
if err != nil { if err != nil {
t.Fatalf("could not start etcd process cluster (%v)", err) t.Fatalf("could not start etcd process cluster (%v)", err)
} }
@ -232,7 +231,7 @@ func testCtlWithOffline(t *testing.T, testFunc func(ctlCtx), testOfflineFunc fun
ret.cfg.KeepDataDir = true ret.cfg.KeepDataDir = true
} }
epc, err := e2e.NewEtcdProcessCluster(context.TODO(), t, e2e.WithConfig(&ret.cfg)) epc, err := e2e.NewEtcdProcessCluster(t.Context(), t, e2e.WithConfig(&ret.cfg))
if err != nil { if err != nil {
t.Fatalf("could not start etcd process cluster (%v)", err) t.Fatalf("could not start etcd process cluster (%v)", err)
} }

View File

@ -15,7 +15,6 @@
package e2e package e2e
import ( import (
"context"
"fmt" "fmt"
"testing" "testing"
"time" "time"
@ -48,7 +47,7 @@ func TestDefragNoSpace(t *testing.T) {
t.Run(tc.name, func(t *testing.T) { t.Run(tc.name, func(t *testing.T) {
e2e.BeforeTest(t) e2e.BeforeTest(t)
clus, err := e2e.NewEtcdProcessCluster(context.TODO(), t, clus, err := e2e.NewEtcdProcessCluster(t.Context(), t,
e2e.WithClusterSize(1), e2e.WithClusterSize(1),
e2e.WithGoFailEnabled(true), e2e.WithGoFailEnabled(true),
) )
@ -57,12 +56,12 @@ func TestDefragNoSpace(t *testing.T) {
member := clus.Procs[0] member := clus.Procs[0]
require.NoError(t, member.Failpoints().SetupHTTP(context.Background(), tc.failpoint, fmt.Sprintf(`return("%s")`, tc.err))) require.NoError(t, member.Failpoints().SetupHTTP(t.Context(), tc.failpoint, fmt.Sprintf(`return("%s")`, tc.err)))
require.ErrorContains(t, member.Etcdctl().Defragment(context.Background(), config.DefragOption{Timeout: time.Minute}), tc.err) require.ErrorContains(t, member.Etcdctl().Defragment(t.Context(), config.DefragOption{Timeout: time.Minute}), tc.err)
// Make sure etcd continues to run even after the failed defrag attempt // Make sure etcd continues to run even after the failed defrag attempt
require.NoError(t, member.Etcdctl().Put(context.Background(), "foo", "bar", config.PutOptions{})) require.NoError(t, member.Etcdctl().Put(t.Context(), "foo", "bar", config.PutOptions{}))
value, err := member.Etcdctl().Get(context.Background(), "foo", config.GetOptions{}) value, err := member.Etcdctl().Get(t.Context(), "foo", config.GetOptions{})
require.NoError(t, err) require.NoError(t, err)
require.Len(t, value.Kvs, 1) require.Len(t, value.Kvs, 1)
require.Equal(t, "bar", string(value.Kvs[0].Value)) require.Equal(t, "bar", string(value.Kvs[0].Value))

View File

@ -47,7 +47,7 @@ func testClusterUsingDiscovery(t *testing.T, size int, peerTLS bool) {
t.Skipf("%q does not exist", e2e.BinPath.EtcdLastRelease) t.Skipf("%q does not exist", e2e.BinPath.EtcdLastRelease)
} }
dc, err := e2e.NewEtcdProcessCluster(context.TODO(), t, dc, err := e2e.NewEtcdProcessCluster(t.Context(), t,
e2e.WithBasePort(2000), e2e.WithBasePort(2000),
e2e.WithVersion(e2e.LastVersion), e2e.WithVersion(e2e.LastVersion),
e2e.WithClusterSize(1), e2e.WithClusterSize(1),
@ -60,12 +60,12 @@ func testClusterUsingDiscovery(t *testing.T, size int, peerTLS bool) {
dcc := MustNewHTTPClient(t, dc.EndpointsHTTP(), nil) dcc := MustNewHTTPClient(t, dc.EndpointsHTTP(), nil)
dkapi := client.NewKeysAPI(dcc) dkapi := client.NewKeysAPI(dcc)
ctx, cancel := context.WithTimeout(context.Background(), integration.RequestTimeout) ctx, cancel := context.WithTimeout(t.Context(), integration.RequestTimeout)
_, err = dkapi.Create(ctx, "/_config/size", fmt.Sprintf("%d", size)) _, err = dkapi.Create(ctx, "/_config/size", fmt.Sprintf("%d", size))
require.NoError(t, err) require.NoError(t, err)
cancel() cancel()
c, err := e2e.NewEtcdProcessCluster(context.TODO(), t, c, err := e2e.NewEtcdProcessCluster(t.Context(), t,
e2e.WithBasePort(3000), e2e.WithBasePort(3000),
e2e.WithClusterSize(size), e2e.WithClusterSize(size),
e2e.WithIsPeerTLS(peerTLS), e2e.WithIsPeerTLS(peerTLS),

View File

@ -15,7 +15,6 @@
package e2e package e2e
import ( import (
"context"
"fmt" "fmt"
"strconv" "strconv"
"strings" "strings"
@ -55,7 +54,7 @@ func testClusterUsingV3Discovery(t *testing.T, discoveryClusterSize, targetClust
e2e.BeforeTest(t) e2e.BeforeTest(t)
// step 1: start the discovery service // step 1: start the discovery service
ds, err := e2e.NewEtcdProcessCluster(context.TODO(), t, ds, err := e2e.NewEtcdProcessCluster(t.Context(), t,
e2e.WithBasePort(2000), e2e.WithBasePort(2000),
e2e.WithClusterSize(discoveryClusterSize), e2e.WithClusterSize(discoveryClusterSize),
e2e.WithClientConnType(clientTLSType), e2e.WithClientConnType(clientTLSType),
@ -122,5 +121,5 @@ func bootstrapEtcdClusterUsingV3Discovery(t *testing.T, discoveryEndpoints []str
} }
// start the cluster // start the cluster
return e2e.StartEtcdProcessCluster(context.TODO(), t, epc, cfg) return e2e.StartEtcdProcessCluster(t.Context(), t, epc, cfg)
} }

View File

@ -40,7 +40,7 @@ func TestEtcdExampleConfig(t *testing.T) {
proc, err := e2e.SpawnCmd([]string{e2e.BinPath.Etcd, "--config-file", exampleConfigFile}, nil) proc, err := e2e.SpawnCmd([]string{e2e.BinPath.Etcd, "--config-file", exampleConfigFile}, nil)
require.NoError(t, err) require.NoError(t, err)
require.NoError(t, e2e.WaitReadyExpectProc(context.TODO(), proc, e2e.EtcdServerReadyLines)) require.NoError(t, e2e.WaitReadyExpectProc(t.Context(), proc, e2e.EtcdServerReadyLines))
require.NoError(t, proc.Stop()) require.NoError(t, proc.Stop())
} }
@ -80,7 +80,7 @@ func TestEtcdMultiPeer(t *testing.T) {
} }
for _, p := range procs { for _, p := range procs {
err := e2e.WaitReadyExpectProc(context.TODO(), p, e2e.EtcdServerReadyLines) err := e2e.WaitReadyExpectProc(t.Context(), p, e2e.EtcdServerReadyLines)
require.NoError(t, err) require.NoError(t, err)
} }
} }
@ -102,7 +102,7 @@ func TestEtcdUnixPeers(t *testing.T) {
) )
defer os.Remove("etcd.unix:1") defer os.Remove("etcd.unix:1")
require.NoError(t, err) require.NoError(t, err)
require.NoError(t, e2e.WaitReadyExpectProc(context.TODO(), proc, e2e.EtcdServerReadyLines)) require.NoError(t, e2e.WaitReadyExpectProc(t.Context(), proc, e2e.EtcdServerReadyLines))
require.NoError(t, proc.Stop()) require.NoError(t, proc.Stop())
} }
@ -150,7 +150,7 @@ func TestEtcdListenMetricsURLsWithMissingClientTLSInfo(t *testing.T) {
_ = proc.Close() _ = proc.Close()
}() }()
require.NoError(t, e2e.WaitReadyExpectProc(context.TODO(), proc, []string{embed.ErrMissingClientTLSInfoForMetricsURL.Error()})) require.NoError(t, e2e.WaitReadyExpectProc(t.Context(), proc, []string{embed.ErrMissingClientTLSInfoForMetricsURL.Error()}))
} }
// TestEtcdPeerCNAuth checks that the inter peer auth based on CN of cert is working correctly. // TestEtcdPeerCNAuth checks that the inter peer auth based on CN of cert is working correctly.
@ -224,7 +224,7 @@ func TestEtcdPeerCNAuth(t *testing.T) {
} else { } else {
expect = []string{"remote error: tls: bad certificate"} expect = []string{"remote error: tls: bad certificate"}
} }
err := e2e.WaitReadyExpectProc(context.TODO(), p, expect) err := e2e.WaitReadyExpectProc(t.Context(), p, expect)
require.NoError(t, err) require.NoError(t, err)
} }
} }
@ -311,7 +311,7 @@ func TestEtcdPeerMultiCNAuth(t *testing.T) {
} else { } else {
expect = []string{"remote error: tls: bad certificate"} expect = []string{"remote error: tls: bad certificate"}
} }
err := e2e.WaitReadyExpectProc(context.TODO(), p, expect) err := e2e.WaitReadyExpectProc(t.Context(), p, expect)
require.NoError(t, err) require.NoError(t, err)
} }
} }
@ -384,7 +384,7 @@ func TestEtcdPeerNameAuth(t *testing.T) {
} else { } else {
expect = []string{"client certificate authentication failed"} expect = []string{"client certificate authentication failed"}
} }
err := e2e.WaitReadyExpectProc(context.TODO(), p, expect) err := e2e.WaitReadyExpectProc(t.Context(), p, expect)
require.NoError(t, err) require.NoError(t, err)
} }
} }
@ -490,7 +490,7 @@ func TestEtcdPeerLocalAddr(t *testing.T) {
} else { } else {
expect = []string{"x509: certificate is valid for 127.0.0.1, not "} expect = []string{"x509: certificate is valid for 127.0.0.1, not "}
} }
err := e2e.WaitReadyExpectProc(context.TODO(), p, expect) err := e2e.WaitReadyExpectProc(t.Context(), p, expect)
require.NoError(t, err) require.NoError(t, err)
} }
} }
@ -570,7 +570,7 @@ func TestBootstrapDefragFlag(t *testing.T) {
proc, err := e2e.SpawnCmd([]string{e2e.BinPath.Etcd, "--experimental-bootstrap-defrag-threshold-megabytes", "1000"}, nil) proc, err := e2e.SpawnCmd([]string{e2e.BinPath.Etcd, "--experimental-bootstrap-defrag-threshold-megabytes", "1000"}, nil)
require.NoError(t, err) require.NoError(t, err)
require.NoError(t, e2e.WaitReadyExpectProc(context.TODO(), proc, []string{"Skipping defragmentation"})) require.NoError(t, e2e.WaitReadyExpectProc(t.Context(), proc, []string{"Skipping defragmentation"}))
require.NoError(t, proc.Stop()) require.NoError(t, proc.Stop())
// wait for the process to exit, otherwise test will have leaked goroutine // wait for the process to exit, otherwise test will have leaked goroutine
@ -582,7 +582,7 @@ func TestBootstrapDefragFlag(t *testing.T) {
func TestSnapshotCatchupEntriesFlag(t *testing.T) { func TestSnapshotCatchupEntriesFlag(t *testing.T) {
e2e.SkipInShortMode(t) e2e.SkipInShortMode(t)
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) ctx, cancel := context.WithTimeout(t.Context(), 10*time.Second)
defer cancel() defer cancel()
proc, err := e2e.SpawnCmd([]string{e2e.BinPath.Etcd, "--experimental-snapshot-catchup-entries", "1000"}, nil) proc, err := e2e.SpawnCmd([]string{e2e.BinPath.Etcd, "--experimental-snapshot-catchup-entries", "1000"}, nil)
@ -600,7 +600,7 @@ func TestSnapshotCatchupEntriesFlag(t *testing.T) {
// TestEtcdHealthyWithTinySnapshotCatchupEntries ensures multi-node etcd cluster remains healthy with 1 snapshot catch up entry // TestEtcdHealthyWithTinySnapshotCatchupEntries ensures multi-node etcd cluster remains healthy with 1 snapshot catch up entry
func TestEtcdHealthyWithTinySnapshotCatchupEntries(t *testing.T) { func TestEtcdHealthyWithTinySnapshotCatchupEntries(t *testing.T) {
e2e.BeforeTest(t) e2e.BeforeTest(t)
epc, err := e2e.NewEtcdProcessCluster(context.TODO(), t, epc, err := e2e.NewEtcdProcessCluster(t.Context(), t,
e2e.WithClusterSize(3), e2e.WithClusterSize(3),
e2e.WithSnapshotCount(1), e2e.WithSnapshotCount(1),
e2e.WithSnapshotCatchUpEntries(1), e2e.WithSnapshotCatchUpEntries(1),
@ -613,7 +613,7 @@ func TestEtcdHealthyWithTinySnapshotCatchupEntries(t *testing.T) {
}) })
// simulate 10 clients keep writing to etcd in parallel with no error // simulate 10 clients keep writing to etcd in parallel with no error
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) ctx, cancel := context.WithTimeout(t.Context(), 30*time.Second)
defer cancel() defer cancel()
g, ctx := errgroup.WithContext(ctx) g, ctx := errgroup.WithContext(ctx)
for i := 0; i < 10; i++ { for i := 0; i < 10; i++ {
@ -655,7 +655,7 @@ func TestEtcdTLSVersion(t *testing.T) {
}, nil, }, nil,
) )
assert.NoError(t, err) assert.NoError(t, err)
assert.NoErrorf(t, e2e.WaitReadyExpectProc(context.TODO(), proc, e2e.EtcdServerReadyLines), "did not receive expected output from etcd process") assert.NoErrorf(t, e2e.WaitReadyExpectProc(t.Context(), proc, e2e.EtcdServerReadyLines), "did not receive expected output from etcd process")
assert.NoError(t, proc.Stop()) assert.NoError(t, proc.Stop())
proc.Wait() // ensure the port has been released proc.Wait() // ensure the port has been released
@ -699,7 +699,7 @@ func TestEtcdDeprecatedFlags(t *testing.T) {
tc.args, nil, tc.args, nil,
) )
require.NoError(t, err) require.NoError(t, err)
require.NoError(t, e2e.WaitReadyExpectProc(context.TODO(), proc, []string{tc.expectedMsg})) require.NoError(t, e2e.WaitReadyExpectProc(t.Context(), proc, []string{tc.expectedMsg}))
require.NoError(t, proc.Stop()) require.NoError(t, proc.Stop())
proc.Wait() // ensure the port has been released proc.Wait() // ensure the port has been released
@ -727,7 +727,7 @@ func TestV2DeprecationEnforceDefaultValue(t *testing.T) {
append(commonArgs, "--v2-deprecation", optionLevel), nil, append(commonArgs, "--v2-deprecation", optionLevel), nil,
) )
require.NoError(t, err) require.NoError(t, err)
require.NoError(t, e2e.WaitReadyExpectProc(context.TODO(), proc, []string{expectedDeprecationLevelMsg})) require.NoError(t, e2e.WaitReadyExpectProc(t.Context(), proc, []string{expectedDeprecationLevelMsg}))
require.NoError(t, proc.Stop()) require.NoError(t, proc.Stop())
proc.Wait() // ensure the port has been released proc.Wait() // ensure the port has been released

View File

@ -33,7 +33,7 @@ import (
func TestGrpcProxyAutoSync(t *testing.T) { func TestGrpcProxyAutoSync(t *testing.T) {
e2e.SkipInShortMode(t) e2e.SkipInShortMode(t)
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(t.Context())
defer cancel() defer cancel()
epc, err := e2e.NewEtcdProcessCluster(ctx, t, e2e.WithClusterSize(1)) epc, err := e2e.NewEtcdProcessCluster(ctx, t, e2e.WithClusterSize(1))
@ -93,7 +93,7 @@ func TestGrpcProxyAutoSync(t *testing.T) {
func TestGrpcProxyTLSVersions(t *testing.T) { func TestGrpcProxyTLSVersions(t *testing.T) {
e2e.SkipInShortMode(t) e2e.SkipInShortMode(t)
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(t.Context())
defer cancel() defer cancel()
epc, err := e2e.NewEtcdProcessCluster(ctx, t, e2e.WithClusterSize(1)) epc, err := e2e.NewEtcdProcessCluster(ctx, t, e2e.WithClusterSize(1))

View File

@ -15,7 +15,6 @@
package e2e package e2e
import ( import (
"context"
"fmt" "fmt"
"testing" "testing"
"time" "time"
@ -89,7 +88,7 @@ func mixVersionsSnapshotTestByAddingMember(t *testing.T, cfg *e2e.EtcdProcessClu
} }
t.Logf("Create an etcd cluster with %d member", cfg.ClusterSize) t.Logf("Create an etcd cluster with %d member", cfg.ClusterSize)
epc, err := e2e.NewEtcdProcessCluster(context.TODO(), t, epc, err := e2e.NewEtcdProcessCluster(t.Context(), t,
e2e.WithConfig(cfg), e2e.WithConfig(cfg),
e2e.WithSnapshotCount(10), e2e.WithSnapshotCount(10),
) )
@ -107,9 +106,9 @@ func mixVersionsSnapshotTestByAddingMember(t *testing.T, cfg *e2e.EtcdProcessClu
newCfg.Version = newInstanceVersion newCfg.Version = newInstanceVersion
newCfg.ServerConfig.SnapshotCatchUpEntries = 10 newCfg.ServerConfig.SnapshotCatchUpEntries = 10
t.Log("Starting a new etcd instance") t.Log("Starting a new etcd instance")
_, err = epc.StartNewProc(context.TODO(), &newCfg, t, false /* addAsLearner */) _, err = epc.StartNewProc(t.Context(), &newCfg, t, false /* addAsLearner */)
require.NoErrorf(t, err, "failed to start the new etcd instance") require.NoErrorf(t, err, "failed to start the new etcd instance")
defer epc.CloseProc(context.TODO(), nil) defer epc.CloseProc(t.Context(), nil)
assertKVHash(t, epc) assertKVHash(t, epc)
} }
@ -136,7 +135,7 @@ func mixVersionsSnapshotTestByMockPartition(t *testing.T, cfg *e2e.EtcdProcessCl
e2e.WithSnapshotCatchUpEntries(10), e2e.WithSnapshotCatchUpEntries(10),
} }
t.Logf("Create an etcd cluster with %d member", cfg.ClusterSize) t.Logf("Create an etcd cluster with %d member", cfg.ClusterSize)
epc, err := e2e.NewEtcdProcessCluster(context.TODO(), t, clusterOptions...) epc, err := e2e.NewEtcdProcessCluster(t.Context(), t, clusterOptions...)
require.NoErrorf(t, err, "failed to start etcd cluster") require.NoErrorf(t, err, "failed to start etcd cluster")
defer func() { defer func() {
derr := epc.Close() derr := epc.Close()
@ -156,7 +155,7 @@ func mixVersionsSnapshotTestByMockPartition(t *testing.T, cfg *e2e.EtcdProcessCl
e2e.AssertProcessLogs(t, leaderEPC, "saved snapshot") e2e.AssertProcessLogs(t, leaderEPC, "saved snapshot")
t.Log("Restart the partitioned member") t.Log("Restart the partitioned member")
err = toPartitionedMember.Restart(context.TODO()) err = toPartitionedMember.Restart(t.Context())
require.NoError(t, err) require.NoError(t, err)
assertKVHash(t, epc) assertKVHash(t, epc)
@ -170,7 +169,7 @@ func writeKVs(t *testing.T, etcdctl *e2e.EtcdctlV3, startIdx, endIdx int) {
for i := startIdx; i < endIdx; i++ { for i := startIdx; i < endIdx; i++ {
key := fmt.Sprintf("key-%d", i) key := fmt.Sprintf("key-%d", i)
value := fmt.Sprintf("value-%d", i) value := fmt.Sprintf("value-%d", i)
err := etcdctl.Put(context.TODO(), key, value, config.PutOptions{}) err := etcdctl.Put(t.Context(), key, value, config.PutOptions{})
require.NoErrorf(t, err, "failed to put %q", key) require.NoErrorf(t, err, "failed to put %q", key)
} }
} }
@ -182,7 +181,7 @@ func assertKVHash(t *testing.T, epc *e2e.EtcdProcessCluster) {
} }
t.Log("Verify all nodes have exact same revision and hash") t.Log("Verify all nodes have exact same revision and hash")
assert.Eventually(t, func() bool { assert.Eventually(t, func() bool {
hashKvs, err := epc.Etcdctl().HashKV(context.TODO(), 0) hashKvs, err := epc.Etcdctl().HashKV(t.Context(), 0)
if err != nil { if err != nil {
t.Logf("failed to get HashKV: %v", err) t.Logf("failed to get HashKV: %v", err)
return false return false

View File

@ -15,7 +15,6 @@
package e2e package e2e
import ( import (
"context"
"fmt" "fmt"
"sync" "sync"
"testing" "testing"
@ -39,7 +38,7 @@ func TestReleaseUpgrade(t *testing.T) {
e2e.BeforeTest(t) e2e.BeforeTest(t)
epc, err := e2e.NewEtcdProcessCluster(context.TODO(), t, epc, err := e2e.NewEtcdProcessCluster(t.Context(), t,
e2e.WithVersion(e2e.LastVersion), e2e.WithVersion(e2e.LastVersion),
e2e.WithSnapshotCount(3), e2e.WithSnapshotCount(3),
e2e.WithBasePeerScheme("unix"), // to avoid port conflict e2e.WithBasePeerScheme("unix"), // to avoid port conflict
@ -82,7 +81,7 @@ func TestReleaseUpgrade(t *testing.T) {
epc.Procs[i].Config().KeepDataDir = true epc.Procs[i].Config().KeepDataDir = true
t.Logf("Restarting node in the new version: %v", i) t.Logf("Restarting node in the new version: %v", i)
if err = epc.Procs[i].Restart(context.TODO()); err != nil { if err = epc.Procs[i].Restart(t.Context()); err != nil {
t.Fatalf("error restarting etcd process (%v)", err) t.Fatalf("error restarting etcd process (%v)", err)
} }
@ -119,7 +118,7 @@ func TestReleaseUpgradeWithRestart(t *testing.T) {
e2e.BeforeTest(t) e2e.BeforeTest(t)
epc, err := e2e.NewEtcdProcessCluster(context.TODO(), t, epc, err := e2e.NewEtcdProcessCluster(t.Context(), t,
e2e.WithVersion(e2e.LastVersion), e2e.WithVersion(e2e.LastVersion),
e2e.WithSnapshotCount(10), e2e.WithSnapshotCount(10),
e2e.WithBasePeerScheme("unix"), e2e.WithBasePeerScheme("unix"),
@ -158,7 +157,7 @@ func TestReleaseUpgradeWithRestart(t *testing.T) {
go func(i int) { go func(i int) {
epc.Procs[i].Config().ExecPath = e2e.BinPath.Etcd epc.Procs[i].Config().ExecPath = e2e.BinPath.Etcd
epc.Procs[i].Config().KeepDataDir = true epc.Procs[i].Config().KeepDataDir = true
assert.NoErrorf(t, epc.Procs[i].Restart(context.TODO()), "error restarting etcd process") assert.NoErrorf(t, epc.Procs[i].Restart(t.Context()), "error restarting etcd process")
wg.Done() wg.Done()
}(i) }(i)
} }

View File

@ -136,7 +136,7 @@ func TestFailoverOnDefrag(t *testing.T) {
for _, tc := range tcs { for _, tc := range tcs {
t.Run(tc.name, func(t *testing.T) { t.Run(tc.name, func(t *testing.T) {
e2e.BeforeTest(t) e2e.BeforeTest(t)
clus, cerr := e2e.NewEtcdProcessCluster(context.TODO(), t, tc.clusterOptions...) clus, cerr := e2e.NewEtcdProcessCluster(t.Context(), t, tc.clusterOptions...)
require.NoError(t, cerr) require.NoError(t, cerr)
t.Cleanup(func() { clus.Stop() }) t.Cleanup(func() { clus.Stop() })
@ -165,7 +165,7 @@ func TestFailoverOnDefrag(t *testing.T) {
return lastErr return lastErr
default: default:
} }
getContext, cancel := context.WithTimeout(context.Background(), requestTimeout) getContext, cancel := context.WithTimeout(t.Context(), requestTimeout)
_, err := clusterClient.Get(getContext, "health") _, err := clusterClient.Get(getContext, "health")
cancel() cancel()
requestVolume++ requestVolume++
@ -199,6 +199,6 @@ func TestFailoverOnDefrag(t *testing.T) {
} }
func triggerDefrag(t *testing.T, member e2e.EtcdProcess) { func triggerDefrag(t *testing.T, member e2e.EtcdProcess) {
require.NoError(t, member.Failpoints().SetupHTTP(context.Background(), "defragBeforeCopy", `sleep("10s")`)) require.NoError(t, member.Failpoints().SetupHTTP(t.Context(), "defragBeforeCopy", `sleep("10s")`))
require.NoError(t, member.Etcdctl().Defragment(context.Background(), config.DefragOption{Timeout: time.Minute})) require.NoError(t, member.Etcdctl().Defragment(t.Context(), config.DefragOption{Timeout: time.Minute}))
} }

View File

@ -15,7 +15,6 @@
package e2e package e2e
import ( import (
"context"
"strings" "strings"
"testing" "testing"
@ -28,7 +27,7 @@ import (
var defaultGatewayEndpoint = "127.0.0.1:23790" var defaultGatewayEndpoint = "127.0.0.1:23790"
func TestGateway(t *testing.T) { func TestGateway(t *testing.T) {
ec, err := e2e.NewEtcdProcessCluster(context.TODO(), t) ec, err := e2e.NewEtcdProcessCluster(t.Context(), t)
require.NoError(t, err) require.NoError(t, err)
defer ec.Stop() defer ec.Stop()

View File

@ -47,7 +47,7 @@ func TestGracefulShutdown(t *testing.T) {
t.Run(tc.name, func(t *testing.T) { t.Run(tc.name, func(t *testing.T) {
testRunner := e2e.NewE2eRunner() testRunner := e2e.NewE2eRunner()
testRunner.BeforeTest(t) testRunner.BeforeTest(t)
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) ctx, cancel := context.WithTimeout(t.Context(), 10*time.Second)
defer cancel() defer cancel()
clus := testRunner.NewCluster(ctx, t, config.WithClusterSize(tc.clusterSize)) clus := testRunner.NewCluster(ctx, t, config.WithClusterSize(tc.clusterSize))
// clean up orphaned resources like closing member client. // clean up orphaned resources like closing member client.

View File

@ -17,7 +17,6 @@
package e2e package e2e
import ( import (
"context"
"fmt" "fmt"
"testing" "testing"
@ -63,7 +62,7 @@ func TestVerifyHashKVAfterCompact(t *testing.T) {
} }
} }
ctx := context.Background() ctx := t.Context()
cfg := e2e.NewConfigClientTLS() cfg := e2e.NewConfigClientTLS()
clus, err := e2e.NewEtcdProcessCluster(ctx, t, clus, err := e2e.NewEtcdProcessCluster(ctx, t,
@ -107,7 +106,7 @@ func TestVerifyHashKVAfterTwoCompactionsOnTombstone_MixVersions(t *testing.T) {
t.Skipf("%q does not exist", e2e.BinPath.EtcdLastRelease) t.Skipf("%q does not exist", e2e.BinPath.EtcdLastRelease)
} }
ctx := context.Background() ctx := t.Context()
cfg := e2e.NewConfigClientTLS() cfg := e2e.NewConfigClientTLS()
clus, err := e2e.NewEtcdProcessCluster(ctx, t, clus, err := e2e.NewEtcdProcessCluster(ctx, t,
@ -149,7 +148,7 @@ func TestVerifyHashKVAfterCompactionOnLastTombstone_MixVersions(t *testing.T) {
{"key0", "key1"}, {"key0", "key1"},
} { } {
t.Run(fmt.Sprintf("#%v", keys), func(t *testing.T) { t.Run(fmt.Sprintf("#%v", keys), func(t *testing.T) {
ctx := context.Background() ctx := t.Context()
cfg := e2e.NewConfigClientTLS() cfg := e2e.NewConfigClientTLS()
clus, err := e2e.NewEtcdProcessCluster(ctx, t, clus, err := e2e.NewEtcdProcessCluster(ctx, t,
@ -182,7 +181,7 @@ func populateDataForHashKV(t *testing.T, clus *e2e.EtcdProcessCluster, clientCfg
c := newClient(t, clus.EndpointsGRPC(), clientCfg) c := newClient(t, clus.EndpointsGRPC(), clientCfg)
defer c.Close() defer c.Close()
ctx := context.Background() ctx := t.Context()
totalOperations := 40 totalOperations := 40
var ( var (
@ -218,7 +217,7 @@ func populateDataForHashKV(t *testing.T, clus *e2e.EtcdProcessCluster, clientCfg
} }
func verifyConsistentHashKVAcrossAllMembers(t *testing.T, cli *e2e.EtcdctlV3, hashKVOnRev int64) { func verifyConsistentHashKVAcrossAllMembers(t *testing.T, cli *e2e.EtcdctlV3, hashKVOnRev int64) {
ctx := context.Background() ctx := t.Context()
t.Logf("HashKV on rev=%d", hashKVOnRev) t.Logf("HashKV on rev=%d", hashKVOnRev)
resp, err := cli.HashKV(ctx, hashKVOnRev) resp, err := cli.HashKV(ctx, hashKVOnRev)

View File

@ -150,7 +150,7 @@ func TestHTTPHealthHandler(t *testing.T) {
for _, tc := range tcs { for _, tc := range tcs {
t.Run(tc.name, func(t *testing.T) { t.Run(tc.name, func(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second) ctx, cancel := context.WithTimeout(t.Context(), 20*time.Second)
defer cancel() defer cancel()
clus, err := e2e.NewEtcdProcessCluster(ctx, t, tc.clusterOptions...) clus, err := e2e.NewEtcdProcessCluster(ctx, t, tc.clusterOptions...)
require.NoError(t, err) require.NoError(t, err)
@ -313,7 +313,7 @@ func TestHTTPLivezReadyzHandler(t *testing.T) {
for _, tc := range tcs { for _, tc := range tcs {
t.Run(tc.name, func(t *testing.T) { t.Run(tc.name, func(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second) ctx, cancel := context.WithTimeout(t.Context(), 20*time.Second)
defer cancel() defer cancel()
clus, err := e2e.NewEtcdProcessCluster(ctx, t, tc.clusterOptions...) clus, err := e2e.NewEtcdProcessCluster(ctx, t, tc.clusterOptions...)
require.NoError(t, err) require.NoError(t, err)
@ -336,7 +336,7 @@ func TestHTTPLivezReadyzHandler(t *testing.T) {
} }
func doHealthCheckAndVerify(t *testing.T, client *http.Client, url string, expectTimeoutError bool, expectStatusCode int, expectRespSubStrings []string) { func doHealthCheckAndVerify(t *testing.T, client *http.Client, url string, expectTimeoutError bool, expectStatusCode int, expectRespSubStrings []string) {
ctx, cancel := context.WithTimeout(context.Background(), healthCheckTimeout) ctx, cancel := context.WithTimeout(t.Context(), healthCheckTimeout)
req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil) req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil)
require.NoErrorf(t, err, "failed to creat request %+v", err) require.NoErrorf(t, err, "failed to creat request %+v", err)
resp, herr := client.Do(req) resp, herr := client.Do(req)

View File

@ -35,7 +35,7 @@ import (
func TestRecoverSnapshotBackend(t *testing.T) { func TestRecoverSnapshotBackend(t *testing.T) {
e2e.BeforeTest(t) e2e.BeforeTest(t)
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) ctx, cancel := context.WithTimeout(t.Context(), 30*time.Second)
defer cancel() defer cancel()
epc, err := e2e.NewEtcdProcessCluster(ctx, t, epc, err := e2e.NewEtcdProcessCluster(ctx, t,

View File

@ -15,7 +15,6 @@
package e2e package e2e
import ( import (
"context"
"encoding/json" "encoding/json"
"testing" "testing"
"time" "time"
@ -109,7 +108,7 @@ func TestNoErrorLogsDuringNormalOperations(t *testing.T) {
for _, tc := range tests { for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) { t.Run(tc.name, func(t *testing.T) {
e2e.BeforeTest(t) e2e.BeforeTest(t)
ctx := context.TODO() ctx := t.Context()
epc, err := e2e.NewEtcdProcessCluster(ctx, t, tc.options...) epc, err := e2e.NewEtcdProcessCluster(ctx, t, tc.options...)
require.NoError(t, err) require.NoError(t, err)

View File

@ -307,7 +307,7 @@ func TestNoMetricsMissing(t *testing.T) {
for _, tc := range testCases { for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) { t.Run(tc.name, func(t *testing.T) {
e2e.BeforeTest(t) e2e.BeforeTest(t)
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) ctx, cancel := context.WithTimeout(t.Context(), 10*time.Second)
defer cancel() defer cancel()
epc, err := e2e.NewEtcdProcessCluster(ctx, t, tc.options...) epc, err := e2e.NewEtcdProcessCluster(ctx, t, tc.options...)

View File

@ -15,7 +15,6 @@
package e2e package e2e
import ( import (
"context"
"testing" "testing"
"time" "time"
@ -28,7 +27,7 @@ import (
func TestWarningApplyDuration(t *testing.T) { func TestWarningApplyDuration(t *testing.T) {
e2e.BeforeTest(t) e2e.BeforeTest(t)
epc, err := e2e.NewEtcdProcessCluster(context.TODO(), t, epc, err := e2e.NewEtcdProcessCluster(t.Context(), t,
e2e.WithClusterSize(1), e2e.WithClusterSize(1),
e2e.WithWarningUnaryRequestDuration(time.Microsecond), e2e.WithWarningUnaryRequestDuration(time.Microsecond),
) )
@ -42,7 +41,7 @@ func TestWarningApplyDuration(t *testing.T) {
}) })
cc := epc.Etcdctl() cc := epc.Etcdctl()
err = cc.Put(context.TODO(), "foo", "bar", config.PutOptions{}) err = cc.Put(t.Context(), "foo", "bar", config.PutOptions{})
require.NoErrorf(t, err, "error on put") require.NoErrorf(t, err, "error on put")
// verify warning // verify warning
@ -55,7 +54,7 @@ func TestWarningApplyDuration(t *testing.T) {
func TestExperimentalWarningApplyDuration(t *testing.T) { func TestExperimentalWarningApplyDuration(t *testing.T) {
e2e.BeforeTest(t) e2e.BeforeTest(t)
epc, err := e2e.NewEtcdProcessCluster(context.TODO(), t, epc, err := e2e.NewEtcdProcessCluster(t.Context(), t,
e2e.WithClusterSize(1), e2e.WithClusterSize(1),
e2e.WithExperimentalWarningUnaryRequestDuration(time.Microsecond), e2e.WithExperimentalWarningUnaryRequestDuration(time.Microsecond),
) )
@ -69,7 +68,7 @@ func TestExperimentalWarningApplyDuration(t *testing.T) {
}) })
cc := epc.Etcdctl() cc := epc.Etcdctl()
err = cc.Put(context.TODO(), "foo", "bar", config.PutOptions{}) err = cc.Put(t.Context(), "foo", "bar", config.PutOptions{})
require.NoErrorf(t, err, "error on put") require.NoErrorf(t, err, "error on put")
// verify warning // verify warning
@ -79,7 +78,7 @@ func TestExperimentalWarningApplyDuration(t *testing.T) {
func TestBothWarningApplyDurationFlagsFail(t *testing.T) { func TestBothWarningApplyDurationFlagsFail(t *testing.T) {
e2e.BeforeTest(t) e2e.BeforeTest(t)
_, err := e2e.NewEtcdProcessCluster(context.TODO(), t, _, err := e2e.NewEtcdProcessCluster(t.Context(), t,
e2e.WithClusterSize(1), e2e.WithClusterSize(1),
e2e.WithWarningUnaryRequestDuration(time.Second), e2e.WithWarningUnaryRequestDuration(time.Second),
e2e.WithExperimentalWarningUnaryRequestDuration(time.Second), e2e.WithExperimentalWarningUnaryRequestDuration(time.Second),

View File

@ -15,7 +15,6 @@
package e2e package e2e
import ( import (
"context"
"fmt" "fmt"
"testing" "testing"
"time" "time"
@ -34,7 +33,7 @@ func TestReproduce17780(t *testing.T) {
compactionBatchLimit := 10 compactionBatchLimit := 10
ctx := context.TODO() ctx := t.Context()
clus, cerr := e2e.NewEtcdProcessCluster(ctx, t, clus, cerr := e2e.NewEtcdProcessCluster(ctx, t,
e2e.WithClusterSize(3), e2e.WithClusterSize(3),
e2e.WithGoFailEnabled(true), e2e.WithGoFailEnabled(true),

View File

@ -60,7 +60,7 @@ func TestRuntimeReconfigGrowClusterSize(t *testing.T) {
} }
for _, tc := range tcs { for _, tc := range tcs {
t.Run(tc.name, func(t *testing.T) { t.Run(tc.name, func(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) ctx, cancel := context.WithTimeout(t.Context(), 30*time.Second)
defer cancel() defer cancel()
epc, err := e2e.NewEtcdProcessCluster(ctx, t, e2e.WithClusterSize(tc.clusterSize)) epc, err := e2e.NewEtcdProcessCluster(ctx, t, e2e.WithClusterSize(tc.clusterSize))
@ -102,7 +102,7 @@ func TestRuntimeReconfigDecreaseClusterSize(t *testing.T) {
} }
for _, tc := range tcs { for _, tc := range tcs {
t.Run(tc.name, func(t *testing.T) { t.Run(tc.name, func(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) ctx, cancel := context.WithTimeout(t.Context(), 30*time.Second)
defer cancel() defer cancel()
epc, err := e2e.NewEtcdProcessCluster(ctx, t, e2e.WithClusterSize(tc.clusterSize)) epc, err := e2e.NewEtcdProcessCluster(ctx, t, e2e.WithClusterSize(tc.clusterSize))
@ -140,7 +140,7 @@ func TestRuntimeReconfigRollingUpgrade(t *testing.T) {
for _, tc := range tcs { for _, tc := range tcs {
t.Run(tc.name, func(t *testing.T) { t.Run(tc.name, func(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) ctx, cancel := context.WithTimeout(t.Context(), 30*time.Second)
defer cancel() defer cancel()
epc, err := e2e.NewEtcdProcessCluster(ctx, t, e2e.WithClusterSize(3)) epc, err := e2e.NewEtcdProcessCluster(ctx, t, e2e.WithClusterSize(3))

View File

@ -17,7 +17,6 @@
package e2e package e2e
import ( import (
"context"
"fmt" "fmt"
"path/filepath" "path/filepath"
"strings" "strings"
@ -138,7 +137,7 @@ func TestEtctlutlMigrate(t *testing.T) {
} }
dataDirPath := t.TempDir() dataDirPath := t.TempDir()
epc, err := e2e.NewEtcdProcessCluster(context.TODO(), t, epc, err := e2e.NewEtcdProcessCluster(t.Context(), t,
e2e.WithVersion(tc.clusterVersion), e2e.WithVersion(tc.clusterVersion),
e2e.WithDataDirPath(dataDirPath), e2e.WithDataDirPath(dataDirPath),
e2e.WithClusterSize(1), e2e.WithClusterSize(1),

View File

@ -72,7 +72,7 @@ func TestV2DeprecationWriteOnlyWAL(t *testing.T) {
e2e.WithEnableV2(true), e2e.WithEnableV2(true),
e2e.WithDataDirPath(dataDirPath), e2e.WithDataDirPath(dataDirPath),
)) ))
epc, err := e2e.NewEtcdProcessCluster(context.TODO(), t, e2e.WithConfig(cfg)) epc, err := e2e.NewEtcdProcessCluster(t.Context(), t, e2e.WithConfig(cfg))
require.NoError(t, err) require.NoError(t, err)
memberDataDir := epc.Procs[0].Config().DataDirPath memberDataDir := epc.Procs[0].Config().DataDirPath
@ -101,7 +101,7 @@ func TestV2DeprecationWriteOnlySnapshot(t *testing.T) {
e2e.WithDataDirPath(dataDirPath), e2e.WithDataDirPath(dataDirPath),
e2e.WithSnapshotCount(10), e2e.WithSnapshotCount(10),
)) ))
epc, err := e2e.NewEtcdProcessCluster(context.TODO(), t, e2e.WithConfig(cfg)) epc, err := e2e.NewEtcdProcessCluster(t.Context(), t, e2e.WithConfig(cfg))
require.NoError(t, err) require.NoError(t, err)
memberDataDir := epc.Procs[0].Config().DataDirPath memberDataDir := epc.Procs[0].Config().DataDirPath
@ -123,7 +123,7 @@ func TestV2DeprecationSnapshotMatches(t *testing.T) {
e2e.BeforeTest(t) e2e.BeforeTest(t)
lastReleaseData := t.TempDir() lastReleaseData := t.TempDir()
currentReleaseData := t.TempDir() currentReleaseData := t.TempDir()
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(t.Context())
defer cancel() defer cancel()
if !fileutil.Exist(e2e.BinPath.EtcdLastRelease) { if !fileutil.Exist(e2e.BinPath.EtcdLastRelease) {
@ -156,7 +156,7 @@ func TestV2DeprecationSnapshotMatches(t *testing.T) {
func TestV2DeprecationSnapshotRecover(t *testing.T) { func TestV2DeprecationSnapshotRecover(t *testing.T) {
e2e.BeforeTest(t) e2e.BeforeTest(t)
dataDir := t.TempDir() dataDir := t.TempDir()
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(t.Context())
defer cancel() defer cancel()
if !fileutil.Exist(e2e.BinPath.EtcdLastRelease) { if !fileutil.Exist(e2e.BinPath.EtcdLastRelease) {
@ -176,7 +176,7 @@ func TestV2DeprecationSnapshotRecover(t *testing.T) {
e2e.WithVersion(e2e.CurrentVersion), e2e.WithVersion(e2e.CurrentVersion),
e2e.WithDataDirPath(dataDir), e2e.WithDataDirPath(dataDir),
)) ))
epc, err = e2e.NewEtcdProcessCluster(context.TODO(), t, e2e.WithConfig(cfg)) epc, err = e2e.NewEtcdProcessCluster(t.Context(), t, e2e.WithConfig(cfg))
require.NoError(t, err) require.NoError(t, err)
cc = epc.Etcdctl() cc = epc.Etcdctl()
@ -198,7 +198,7 @@ func runEtcdAndCreateSnapshot(t testing.TB, serverVersion e2e.ClusterVersion, da
e2e.WithSnapshotCount(snapshotCount), e2e.WithSnapshotCount(snapshotCount),
e2e.WithKeepDataDir(true), e2e.WithKeepDataDir(true),
)) ))
epc, err := e2e.NewEtcdProcessCluster(context.TODO(), t, e2e.WithConfig(cfg)) epc, err := e2e.NewEtcdProcessCluster(t.Context(), t, e2e.WithConfig(cfg))
assert.NoError(t, err) assert.NoError(t, err)
return epc return epc
} }

View File

@ -88,7 +88,7 @@ func testCurlV3MaxStream(t *testing.T, reachLimit bool, opts ...ctlOption) {
// Step 2: create the cluster // Step 2: create the cluster
t.Log("Creating an etcd cluster") t.Log("Creating an etcd cluster")
epc, err := e2e.NewEtcdProcessCluster(context.TODO(), t, e2e.WithConfig(&cx.cfg)) epc, err := e2e.NewEtcdProcessCluster(t.Context(), t, e2e.WithConfig(&cx.cfg))
require.NoErrorf(t, err, "Failed to start etcd cluster") require.NoErrorf(t, err, "Failed to start etcd cluster")
cx.epc = epc cx.epc = epc
cx.dataDir = epc.Procs[0].Config().DataDirPath cx.dataDir = epc.Procs[0].Config().DataDirPath

View File

@ -54,7 +54,7 @@ func testCurlV3Watch(cx ctlCtx) {
func TestCurlWatchIssue19509(t *testing.T) { func TestCurlWatchIssue19509(t *testing.T) {
e2e.BeforeTest(t) e2e.BeforeTest(t)
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) ctx, cancel := context.WithTimeout(t.Context(), 10*time.Second)
defer cancel() defer cancel()
epc, err := e2e.NewEtcdProcessCluster(ctx, t, e2e.WithConfig(e2e.NewConfigClientTLS()), e2e.WithClusterSize(1)) epc, err := e2e.NewEtcdProcessCluster(ctx, t, e2e.WithConfig(e2e.NewConfigClientTLS()), e2e.WithClusterSize(1))
require.NoError(t, err) require.NoError(t, err)

View File

@ -56,7 +56,7 @@ func TestLeaseRevoke_ClientSwitchToOtherMember(t *testing.T) {
func testLeaseRevokeIssue(t *testing.T, clusterSize int, connectToOneFollower bool) { func testLeaseRevokeIssue(t *testing.T, clusterSize int, connectToOneFollower bool) {
e2e.BeforeTest(t) e2e.BeforeTest(t)
ctx := context.Background() ctx := t.Context()
t.Log("Starting a new etcd cluster") t.Log("Starting a new etcd cluster")
epc, err := e2e.NewEtcdProcessCluster(ctx, t, epc, err := e2e.NewEtcdProcessCluster(ctx, t,
@ -126,7 +126,7 @@ func testLeaseRevokeIssue(t *testing.T, clusterSize int, connectToOneFollower bo
err = epc.Procs[leaderIdx].Failpoints().SetupHTTP(ctx, "raftBeforeSave", `sleep("30s")`) err = epc.Procs[leaderIdx].Failpoints().SetupHTTP(ctx, "raftBeforeSave", `sleep("30s")`)
require.NoError(t, err) require.NoError(t, err)
cctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) cctx, cancel := context.WithTimeout(t.Context(), 5*time.Second)
t.Logf("Waiting for a new leader to be elected, old leader index: %d, old leader ID: %d", leaderIdx, oldLeaderID) t.Logf("Waiting for a new leader to be elected, old leader index: %d, old leader ID: %d", leaderIdx, oldLeaderID)
testutils.ExecuteUntil(cctx, t, func() { testutils.ExecuteUntil(cctx, t, func() {
for { for {

View File

@ -95,13 +95,13 @@ func TestWatchDelayForPeriodicProgressNotification(t *testing.T) {
cfg.Client = tc.client cfg.Client = tc.client
cfg.ClientHTTPSeparate = tc.clientHTTPSeparate cfg.ClientHTTPSeparate = tc.clientHTTPSeparate
t.Run(tc.name, func(t *testing.T) { t.Run(tc.name, func(t *testing.T) {
clus, err := e2e.NewEtcdProcessCluster(context.Background(), t, e2e.WithConfig(cfg)) clus, err := e2e.NewEtcdProcessCluster(t.Context(), t, e2e.WithConfig(cfg))
require.NoError(t, err) require.NoError(t, err)
defer clus.Close() defer clus.Close()
c := newClient(t, clus.EndpointsGRPC(), tc.client) c := newClient(t, clus.EndpointsGRPC(), tc.client)
require.NoError(t, fillEtcdWithData(context.Background(), c, tc.dbSizeBytes)) require.NoError(t, fillEtcdWithData(t.Context(), c, tc.dbSizeBytes))
ctx, cancel := context.WithTimeout(context.Background(), watchTestDuration) ctx, cancel := context.WithTimeout(t.Context(), watchTestDuration)
defer cancel() defer cancel()
g := errgroup.Group{} g := errgroup.Group{}
continuouslyExecuteGetAll(ctx, t, &g, c) continuouslyExecuteGetAll(ctx, t, &g, c)
@ -120,13 +120,13 @@ func TestWatchDelayForManualProgressNotification(t *testing.T) {
cfg.Client = tc.client cfg.Client = tc.client
cfg.ClientHTTPSeparate = tc.clientHTTPSeparate cfg.ClientHTTPSeparate = tc.clientHTTPSeparate
t.Run(tc.name, func(t *testing.T) { t.Run(tc.name, func(t *testing.T) {
clus, err := e2e.NewEtcdProcessCluster(context.Background(), t, e2e.WithConfig(cfg)) clus, err := e2e.NewEtcdProcessCluster(t.Context(), t, e2e.WithConfig(cfg))
require.NoError(t, err) require.NoError(t, err)
defer clus.Close() defer clus.Close()
c := newClient(t, clus.EndpointsGRPC(), tc.client) c := newClient(t, clus.EndpointsGRPC(), tc.client)
require.NoError(t, fillEtcdWithData(context.Background(), c, tc.dbSizeBytes)) require.NoError(t, fillEtcdWithData(t.Context(), c, tc.dbSizeBytes))
ctx, cancel := context.WithTimeout(context.Background(), watchTestDuration) ctx, cancel := context.WithTimeout(t.Context(), watchTestDuration)
defer cancel() defer cancel()
g := errgroup.Group{} g := errgroup.Group{}
continuouslyExecuteGetAll(ctx, t, &g, c) continuouslyExecuteGetAll(ctx, t, &g, c)
@ -157,13 +157,13 @@ func TestWatchDelayForEvent(t *testing.T) {
cfg.Client = tc.client cfg.Client = tc.client
cfg.ClientHTTPSeparate = tc.clientHTTPSeparate cfg.ClientHTTPSeparate = tc.clientHTTPSeparate
t.Run(tc.name, func(t *testing.T) { t.Run(tc.name, func(t *testing.T) {
clus, err := e2e.NewEtcdProcessCluster(context.Background(), t, e2e.WithConfig(cfg)) clus, err := e2e.NewEtcdProcessCluster(t.Context(), t, e2e.WithConfig(cfg))
require.NoError(t, err) require.NoError(t, err)
defer clus.Close() defer clus.Close()
c := newClient(t, clus.EndpointsGRPC(), tc.client) c := newClient(t, clus.EndpointsGRPC(), tc.client)
require.NoError(t, fillEtcdWithData(context.Background(), c, tc.dbSizeBytes)) require.NoError(t, fillEtcdWithData(t.Context(), c, tc.dbSizeBytes))
ctx, cancel := context.WithTimeout(context.Background(), watchTestDuration) ctx, cancel := context.WithTimeout(t.Context(), watchTestDuration)
defer cancel() defer cancel()
g := errgroup.Group{} g := errgroup.Group{}
g.Go(func() error { g.Go(func() error {
@ -270,14 +270,14 @@ func TestDeleteEventDrop_Issue18089(t *testing.T) {
cfg := e2e.DefaultConfig() cfg := e2e.DefaultConfig()
cfg.ClusterSize = 1 cfg.ClusterSize = 1
cfg.Client = e2e.ClientConfig{ConnectionType: e2e.ClientTLS} cfg.Client = e2e.ClientConfig{ConnectionType: e2e.ClientTLS}
clus, err := e2e.NewEtcdProcessCluster(context.Background(), t, e2e.WithConfig(cfg)) clus, err := e2e.NewEtcdProcessCluster(t.Context(), t, e2e.WithConfig(cfg))
require.NoError(t, err) require.NoError(t, err)
defer clus.Close() defer clus.Close()
c := newClient(t, clus.EndpointsGRPC(), cfg.Client) c := newClient(t, clus.EndpointsGRPC(), cfg.Client)
defer c.Close() defer c.Close()
ctx := context.Background() ctx := t.Context()
const ( const (
key = "k" key = "k"
v2 = "v2" v2 = "v2"
@ -345,14 +345,14 @@ func testStartWatcherFromCompactedRevision(t *testing.T, performCompactOnTombsto
e2e.BeforeTest(t) e2e.BeforeTest(t)
cfg := e2e.DefaultConfig() cfg := e2e.DefaultConfig()
cfg.Client = e2e.ClientConfig{ConnectionType: e2e.ClientTLS} cfg.Client = e2e.ClientConfig{ConnectionType: e2e.ClientTLS}
clus, err := e2e.NewEtcdProcessCluster(context.Background(), t, e2e.WithConfig(cfg), e2e.WithClusterSize(1)) clus, err := e2e.NewEtcdProcessCluster(t.Context(), t, e2e.WithConfig(cfg), e2e.WithClusterSize(1))
require.NoError(t, err) require.NoError(t, err)
defer clus.Close() defer clus.Close()
c := newClient(t, clus.EndpointsGRPC(), cfg.Client) c := newClient(t, clus.EndpointsGRPC(), cfg.Client)
defer c.Close() defer c.Close()
ctx := context.Background() ctx := t.Context()
key := "foo" key := "foo"
totalRev := 100 totalRev := 100
@ -494,11 +494,11 @@ func testStartWatcherFromCompactedRevision(t *testing.T, performCompactOnTombsto
func TestResumeCompactionOnTombstone(t *testing.T) { func TestResumeCompactionOnTombstone(t *testing.T) {
e2e.BeforeTest(t) e2e.BeforeTest(t)
ctx := context.Background() ctx := t.Context()
compactBatchLimit := 5 compactBatchLimit := 5
cfg := e2e.DefaultConfig() cfg := e2e.DefaultConfig()
clus, err := e2e.NewEtcdProcessCluster(context.Background(), clus, err := e2e.NewEtcdProcessCluster(t.Context(),
t, t,
e2e.WithConfig(cfg), e2e.WithConfig(cfg),
e2e.WithClusterSize(1), e2e.WithClusterSize(1),

View File

@ -15,7 +15,6 @@
package e2e package e2e
import ( import (
"context"
"encoding/json" "encoding/json"
"testing" "testing"
"time" "time"
@ -29,7 +28,7 @@ import (
func TestServerJsonLogging(t *testing.T) { func TestServerJsonLogging(t *testing.T) {
e2e.BeforeTest(t) e2e.BeforeTest(t)
epc, err := e2e.NewEtcdProcessCluster(context.TODO(), t, epc, err := e2e.NewEtcdProcessCluster(t.Context(), t,
e2e.WithClusterSize(1), e2e.WithClusterSize(1),
e2e.WithLogLevel("debug"), e2e.WithLogLevel("debug"),
) )
@ -115,7 +114,7 @@ func TestConnectionRejectMessage(t *testing.T) {
t.Log("Starting an etcd process and wait for it to get ready.") t.Log("Starting an etcd process and wait for it to get ready.")
p, err := e2e.SpawnCmd(commonArgs, nil) p, err := e2e.SpawnCmd(commonArgs, nil)
require.NoError(t, err) require.NoError(t, err)
err = e2e.WaitReadyExpectProc(context.TODO(), p, e2e.EtcdServerReadyLines) err = e2e.WaitReadyExpectProc(t.Context(), p, e2e.EtcdServerReadyLines)
require.NoError(t, err) require.NoError(t, err)
defer func() { defer func() {
p.Stop() p.Stop()
@ -127,7 +126,7 @@ func TestConnectionRejectMessage(t *testing.T) {
doneCh := make(chan struct{}, 1) doneCh := make(chan struct{}, 1)
go func() { go func() {
startedCh <- struct{}{} startedCh <- struct{}{}
verr := e2e.WaitReadyExpectProc(context.TODO(), p, []string{tc.expectedErrMsg}) verr := e2e.WaitReadyExpectProc(t.Context(), p, []string{tc.expectedErrMsg})
assert.NoError(t, verr) assert.NoError(t, verr)
doneCh <- struct{}{} doneCh <- struct{}{}
}() }()