Merge pull request #19461 from fuweid/fix-downgrade-issue
deflake: TestDowngradeCancellationAfterDowngrading1InClusterOf3
This commit is contained in:
commit
d52bd901b4
@ -174,7 +174,7 @@ func testDowngradeUpgrade(t *testing.T, numberOfMembersToDowngrade int, clusterS
|
|||||||
t.Logf("Elect members for operations on members: %v", membersToChange)
|
t.Logf("Elect members for operations on members: %v", membersToChange)
|
||||||
|
|
||||||
t.Logf("Starting downgrade process to %q", lastVersionStr)
|
t.Logf("Starting downgrade process to %q", lastVersionStr)
|
||||||
err = e2e.DowngradeUpgradeMembersByID(t, nil, epc, membersToChange, currentVersion, lastClusterVersion)
|
err = e2e.DowngradeUpgradeMembersByID(t, nil, epc, membersToChange, true, currentVersion, lastClusterVersion)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
if len(membersToChange) == len(epc.Procs) {
|
if len(membersToChange) == len(epc.Procs) {
|
||||||
e2e.AssertProcessLogs(t, epc.Procs[epc.WaitLeader(t)], "the cluster has been downgraded")
|
e2e.AssertProcessLogs(t, epc.Procs[epc.WaitLeader(t)], "the cluster has been downgraded")
|
||||||
@ -210,11 +210,12 @@ func testDowngradeUpgrade(t *testing.T, numberOfMembersToDowngrade int, clusterS
|
|||||||
beforeMembers, beforeKV = getMembersAndKeys(t, cc)
|
beforeMembers, beforeKV = getMembersAndKeys(t, cc)
|
||||||
|
|
||||||
t.Logf("Starting upgrade process to %q", currentVersionStr)
|
t.Logf("Starting upgrade process to %q", currentVersionStr)
|
||||||
err = e2e.DowngradeUpgradeMembersByID(t, nil, epc, membersToChange, lastClusterVersion, currentVersion)
|
downgradeEnabled := triggerCancellation == noCancellation && numberOfMembersToDowngrade < clusterSize
|
||||||
|
err = e2e.DowngradeUpgradeMembersByID(t, nil, epc, membersToChange, downgradeEnabled, lastClusterVersion, currentVersion)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
t.Log("Upgrade complete")
|
t.Log("Upgrade complete")
|
||||||
|
|
||||||
if triggerCancellation == noCancellation && numberOfMembersToDowngrade < clusterSize {
|
if downgradeEnabled {
|
||||||
t.Log("Downgrade should be still enabled")
|
t.Log("Downgrade should be still enabled")
|
||||||
e2e.ValidateDowngradeInfo(t, epc, &pb.DowngradeInfo{Enabled: true, TargetVersion: lastClusterVersion.String()})
|
e2e.ValidateDowngradeInfo(t, epc, &pb.DowngradeInfo{Enabled: true, TargetVersion: lastClusterVersion.String()})
|
||||||
} else {
|
} else {
|
||||||
|
@ -28,6 +28,7 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/coreos/go-semver/semver"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
"go.uber.org/zap/zaptest"
|
"go.uber.org/zap/zaptest"
|
||||||
|
|
||||||
@ -711,6 +712,21 @@ func (cfg *EtcdProcessClusterConfig) binaryPath(i int) string {
|
|||||||
return execPath
|
return execPath
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (epc *EtcdProcessCluster) MinServerVersion() (*semver.Version, error) {
|
||||||
|
var minVersion *semver.Version
|
||||||
|
for _, member := range epc.Procs {
|
||||||
|
ver, err := GetVersionFromBinary(member.Config().ExecPath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to get version from member %s binary: %w", member.Config().Name, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if minVersion == nil || ver.LessThan(*minVersion) {
|
||||||
|
minVersion = ver
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return minVersion, nil
|
||||||
|
}
|
||||||
|
|
||||||
func values(cfg embed.Config) map[string]string {
|
func values(cfg embed.Config) map[string]string {
|
||||||
fs := flag.NewFlagSet("etcd", flag.ContinueOnError)
|
fs := flag.NewFlagSet("etcd", flag.ContinueOnError)
|
||||||
cfg.AddFlags(fs)
|
cfg.AddFlags(fs)
|
||||||
|
@ -29,6 +29,7 @@ import (
|
|||||||
|
|
||||||
pb "go.etcd.io/etcd/api/v3/etcdserverpb"
|
pb "go.etcd.io/etcd/api/v3/etcdserverpb"
|
||||||
"go.etcd.io/etcd/api/v3/version"
|
"go.etcd.io/etcd/api/v3/version"
|
||||||
|
"go.etcd.io/etcd/server/v3/etcdserver"
|
||||||
"go.etcd.io/etcd/tests/v3/framework/testutils"
|
"go.etcd.io/etcd/tests/v3/framework/testutils"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -127,14 +128,14 @@ func ValidateDowngradeInfo(t *testing.T, clus *EtcdProcessCluster, expected *pb.
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func DowngradeUpgradeMembers(t *testing.T, lg *zap.Logger, clus *EtcdProcessCluster, numberOfMembersToChange int, currentVersion, targetVersion *semver.Version) error {
|
func DowngradeUpgradeMembers(t *testing.T, lg *zap.Logger, clus *EtcdProcessCluster, numberOfMembersToChange int, downgradeEnabled bool, currentVersion, targetVersion *semver.Version) error {
|
||||||
membersToChange := rand.Perm(len(clus.Procs))[:numberOfMembersToChange]
|
membersToChange := rand.Perm(len(clus.Procs))[:numberOfMembersToChange]
|
||||||
t.Logf("Elect members for operations on members: %v", membersToChange)
|
t.Logf("Elect members for operations on members: %v", membersToChange)
|
||||||
|
|
||||||
return DowngradeUpgradeMembersByID(t, lg, clus, membersToChange, currentVersion, targetVersion)
|
return DowngradeUpgradeMembersByID(t, lg, clus, membersToChange, downgradeEnabled, currentVersion, targetVersion)
|
||||||
}
|
}
|
||||||
|
|
||||||
func DowngradeUpgradeMembersByID(t *testing.T, lg *zap.Logger, clus *EtcdProcessCluster, membersToChange []int, currentVersion, targetVersion *semver.Version) error {
|
func DowngradeUpgradeMembersByID(t *testing.T, lg *zap.Logger, clus *EtcdProcessCluster, membersToChange []int, downgradeEnabled bool, currentVersion, targetVersion *semver.Version) error {
|
||||||
if lg == nil {
|
if lg == nil {
|
||||||
lg = clus.lg
|
lg = clus.lg
|
||||||
}
|
}
|
||||||
@ -162,15 +163,32 @@ func DowngradeUpgradeMembersByID(t *testing.T, lg *zap.Logger, clus *EtcdProcess
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
clusterVersion := targetVersion.String()
|
t.Log("Waiting health interval to make sure the leader propagates version to new processes")
|
||||||
if !isDowngrade && len(membersToChange) != len(clus.Procs) {
|
time.Sleep(etcdserver.HealthInterval)
|
||||||
clusterVersion = currentVersion.String()
|
|
||||||
}
|
|
||||||
lg.Info("Validating versions")
|
lg.Info("Validating versions")
|
||||||
|
clusterVersion := targetVersion
|
||||||
|
if !isDowngrade {
|
||||||
|
if downgradeEnabled {
|
||||||
|
// If the downgrade isn't cancelled yet, then the cluster
|
||||||
|
// version will always stay at the lower version, no matter
|
||||||
|
// what's the binary version of each member.
|
||||||
|
clusterVersion = currentVersion
|
||||||
|
} else {
|
||||||
|
// If the downgrade has already been cancelled, then the
|
||||||
|
// cluster version is the minimal server version.
|
||||||
|
minVer, err := clus.MinServerVersion()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to get min server version: %w", err)
|
||||||
|
}
|
||||||
|
clusterVersion = minVer
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
for _, memberID := range membersToChange {
|
for _, memberID := range membersToChange {
|
||||||
member := clus.Procs[memberID]
|
member := clus.Procs[memberID]
|
||||||
ValidateVersion(t, clus.Cfg, member, version.Versions{
|
ValidateVersion(t, clus.Cfg, member, version.Versions{
|
||||||
Cluster: clusterVersion,
|
Cluster: clusterVersion.String(),
|
||||||
Server: targetVersion.String(),
|
Server: targetVersion.String(),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -174,7 +174,7 @@ func (f memberDowngrade) Inject(ctx context.Context, t *testing.T, lg *zap.Logge
|
|||||||
time.Sleep(etcdserver.HealthInterval)
|
time.Sleep(etcdserver.HealthInterval)
|
||||||
e2e.DowngradeEnable(t, clus, lastVersion)
|
e2e.DowngradeEnable(t, clus, lastVersion)
|
||||||
|
|
||||||
err = e2e.DowngradeUpgradeMembers(t, lg, clus, numberOfMembersToDowngrade, currentVersion, lastVersion)
|
err = e2e.DowngradeUpgradeMembers(t, lg, clus, numberOfMembersToDowngrade, true, currentVersion, lastVersion)
|
||||||
time.Sleep(etcdserver.HealthInterval)
|
time.Sleep(etcdserver.HealthInterval)
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -228,7 +228,7 @@ func (f memberDowngradeUpgrade) Inject(ctx context.Context, t *testing.T, lg *za
|
|||||||
|
|
||||||
e2e.DowngradeEnable(t, clus, lastVersion)
|
e2e.DowngradeEnable(t, clus, lastVersion)
|
||||||
// downgrade all members first
|
// downgrade all members first
|
||||||
err = e2e.DowngradeUpgradeMembers(t, lg, clus, len(clus.Procs), currentVersion, lastVersion)
|
err = e2e.DowngradeUpgradeMembers(t, lg, clus, len(clus.Procs), true, currentVersion, lastVersion)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -241,7 +241,7 @@ func (f memberDowngradeUpgrade) Inject(ctx context.Context, t *testing.T, lg *za
|
|||||||
|
|
||||||
// partial upgrade the cluster
|
// partial upgrade the cluster
|
||||||
numberOfMembersToUpgrade := rand.Int()%len(clus.Procs) + 1
|
numberOfMembersToUpgrade := rand.Int()%len(clus.Procs) + 1
|
||||||
err = e2e.DowngradeUpgradeMembers(t, lg, clus, numberOfMembersToUpgrade, lastVersion, currentVersion)
|
err = e2e.DowngradeUpgradeMembers(t, lg, clus, numberOfMembersToUpgrade, false, lastVersion, currentVersion)
|
||||||
time.Sleep(etcdserver.HealthInterval)
|
time.Sleep(etcdserver.HealthInterval)
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user