feat: enable unparam lint
Signed-off-by: Wei Fu <fuweid89@gmail.com>
This commit is contained in:
parent
be83fffedb
commit
aea1cd0077
@ -8,7 +8,7 @@ package client
|
|||||||
|
|
||||||
import "net/http"
|
import "net/http"
|
||||||
|
|
||||||
func requestCanceler(tr CancelableTransport, req *http.Request) func() {
|
func requestCanceler(req *http.Request) func() {
|
||||||
ch := make(chan struct{})
|
ch := make(chan struct{})
|
||||||
req.Cancel = ch
|
req.Cancel = ch
|
||||||
|
|
||||||
|
@ -556,7 +556,7 @@ func (c *simpleHTTPClient) Do(ctx context.Context, act httpAction) (*http.Respon
|
|||||||
}
|
}
|
||||||
defer hcancel()
|
defer hcancel()
|
||||||
|
|
||||||
reqcancel := requestCanceler(c.transport, req)
|
reqcancel := requestCanceler(req)
|
||||||
|
|
||||||
rtchan := make(chan roundTripResponse, 1)
|
rtchan := make(chan roundTripResponse, 1)
|
||||||
go func() {
|
go func() {
|
||||||
|
@ -499,7 +499,7 @@ func (f fakeCancelContext) Done() <-chan struct{} {
|
|||||||
func (f fakeCancelContext) Err() error { return errFakeCancelContext }
|
func (f fakeCancelContext) Err() error { return errFakeCancelContext }
|
||||||
func (f fakeCancelContext) Value(key any) any { return 1 }
|
func (f fakeCancelContext) Value(key any) any { return 1 }
|
||||||
|
|
||||||
func withTimeout(parent context.Context, timeout time.Duration) (
|
func withTimeout(parent context.Context, _timeout time.Duration) (
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
cancel context.CancelFunc) {
|
cancel context.CancelFunc) {
|
||||||
ctx = parent
|
ctx = parent
|
||||||
|
@ -57,7 +57,7 @@ func TestNewKeepAliveListener(t *testing.T) {
|
|||||||
t.Fatalf("unable to create tmpfile: %v", err)
|
t.Fatalf("unable to create tmpfile: %v", err)
|
||||||
}
|
}
|
||||||
tlsInfo := TLSInfo{CertFile: tlsinfo.CertFile, KeyFile: tlsinfo.KeyFile}
|
tlsInfo := TLSInfo{CertFile: tlsinfo.CertFile, KeyFile: tlsinfo.KeyFile}
|
||||||
tlsInfo.parseFunc = fakeCertificateParserFunc(tls.Certificate{}, nil)
|
tlsInfo.parseFunc = fakeCertificateParserFunc(nil)
|
||||||
tlscfg, err := tlsInfo.ServerConfig()
|
tlscfg, err := tlsInfo.ServerConfig()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("unexpected serverConfig error: %v", err)
|
t.Fatalf("unexpected serverConfig error: %v", err)
|
||||||
|
@ -60,11 +60,7 @@ func newListener(addr, scheme string, opts ...ListenerOption) (net.Listener, err
|
|||||||
switch {
|
switch {
|
||||||
case lnOpts.IsSocketOpts():
|
case lnOpts.IsSocketOpts():
|
||||||
// new ListenConfig with socket options.
|
// new ListenConfig with socket options.
|
||||||
config, err := newListenConfig(lnOpts.socketOpts)
|
lnOpts.ListenConfig = newListenConfig(lnOpts.socketOpts)
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
lnOpts.ListenConfig = config
|
|
||||||
// check for timeout
|
// check for timeout
|
||||||
fallthrough
|
fallthrough
|
||||||
case lnOpts.IsTimeout(), lnOpts.IsSocketOpts():
|
case lnOpts.IsTimeout(), lnOpts.IsSocketOpts():
|
||||||
@ -129,7 +125,7 @@ func wrapTLS(scheme string, tlsinfo *TLSInfo, l net.Listener) (net.Listener, err
|
|||||||
return newTLSListener(l, tlsinfo, checkSAN)
|
return newTLSListener(l, tlsinfo, checkSAN)
|
||||||
}
|
}
|
||||||
|
|
||||||
func newListenConfig(sopts *SocketOpts) (net.ListenConfig, error) {
|
func newListenConfig(sopts *SocketOpts) net.ListenConfig {
|
||||||
lc := net.ListenConfig{}
|
lc := net.ListenConfig{}
|
||||||
if sopts != nil {
|
if sopts != nil {
|
||||||
ctls := getControls(sopts)
|
ctls := getControls(sopts)
|
||||||
@ -137,7 +133,7 @@ func newListenConfig(sopts *SocketOpts) (net.ListenConfig, error) {
|
|||||||
lc.Control = ctls.Control
|
lc.Control = ctls.Control
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return lc, nil
|
return lc
|
||||||
}
|
}
|
||||||
|
|
||||||
type TLSInfo struct {
|
type TLSInfo struct {
|
||||||
|
@ -28,7 +28,7 @@ import (
|
|||||||
"go.uber.org/zap/zaptest"
|
"go.uber.org/zap/zaptest"
|
||||||
)
|
)
|
||||||
|
|
||||||
func createSelfCert(t *testing.T, hosts ...string) (*TLSInfo, error) {
|
func createSelfCert(t *testing.T) (*TLSInfo, error) {
|
||||||
return createSelfCertEx(t, "127.0.0.1")
|
return createSelfCertEx(t, "127.0.0.1")
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -41,9 +41,9 @@ func createSelfCertEx(t *testing.T, host string, additionalUsages ...x509.ExtKey
|
|||||||
return &info, nil
|
return &info, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func fakeCertificateParserFunc(cert tls.Certificate, err error) func(certPEMBlock, keyPEMBlock []byte) (tls.Certificate, error) {
|
func fakeCertificateParserFunc(err error) func(certPEMBlock, keyPEMBlock []byte) (tls.Certificate, error) {
|
||||||
return func(certPEMBlock, keyPEMBlock []byte) (tls.Certificate, error) {
|
return func(certPEMBlock, keyPEMBlock []byte) (tls.Certificate, error) {
|
||||||
return cert, err
|
return tls.Certificate{}, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -367,7 +367,7 @@ func TestNewTransportTLSInfo(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for i, tt := range tests {
|
for i, tt := range tests {
|
||||||
tt.parseFunc = fakeCertificateParserFunc(tls.Certificate{}, nil)
|
tt.parseFunc = fakeCertificateParserFunc(nil)
|
||||||
trans, err := NewTransport(tt, time.Second)
|
trans, err := NewTransport(tt, time.Second)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Received unexpected error from NewTransport: %v", err)
|
t.Fatalf("Received unexpected error from NewTransport: %v", err)
|
||||||
@ -458,7 +458,7 @@ func TestTLSInfoParseFuncError(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for i, tt := range tests {
|
for i, tt := range tests {
|
||||||
tt.info.parseFunc = fakeCertificateParserFunc(tls.Certificate{}, errors.New("fake"))
|
tt.info.parseFunc = fakeCertificateParserFunc(errors.New("fake"))
|
||||||
|
|
||||||
if _, err = tt.info.ServerConfig(); err == nil {
|
if _, err = tt.info.ServerConfig(); err == nil {
|
||||||
t.Errorf("#%d: expected non-nil error from ServerConfig()", i)
|
t.Errorf("#%d: expected non-nil error from ServerConfig()", i)
|
||||||
@ -496,7 +496,7 @@ func TestTLSInfoConfigFuncs(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for i, tt := range tests {
|
for i, tt := range tests {
|
||||||
tt.info.parseFunc = fakeCertificateParserFunc(tls.Certificate{}, nil)
|
tt.info.parseFunc = fakeCertificateParserFunc(nil)
|
||||||
|
|
||||||
sCfg, err := tt.info.ServerConfig()
|
sCfg, err := tt.info.ServerConfig()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -220,7 +220,9 @@ func (c *Client) autoSync() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// dialSetupOpts gives the dial opts prior to any authentication.
|
// dialSetupOpts gives the dial opts prior to any authentication.
|
||||||
func (c *Client) dialSetupOpts(creds grpccredentials.TransportCredentials, dopts ...grpc.DialOption) (opts []grpc.DialOption, err error) {
|
func (c *Client) dialSetupOpts(creds grpccredentials.TransportCredentials, dopts ...grpc.DialOption) []grpc.DialOption {
|
||||||
|
var opts []grpc.DialOption
|
||||||
|
|
||||||
if c.cfg.DialKeepAliveTime > 0 {
|
if c.cfg.DialKeepAliveTime > 0 {
|
||||||
params := keepalive.ClientParameters{
|
params := keepalive.ClientParameters{
|
||||||
Time: c.cfg.DialKeepAliveTime,
|
Time: c.cfg.DialKeepAliveTime,
|
||||||
@ -248,7 +250,7 @@ func (c *Client) dialSetupOpts(creds grpccredentials.TransportCredentials, dopts
|
|||||||
grpc.WithUnaryInterceptor(c.unaryClientInterceptor(withMax(defaultUnaryMaxRetries), rrBackoff)),
|
grpc.WithUnaryInterceptor(c.unaryClientInterceptor(withMax(defaultUnaryMaxRetries), rrBackoff)),
|
||||||
)
|
)
|
||||||
|
|
||||||
return opts, nil
|
return opts
|
||||||
}
|
}
|
||||||
|
|
||||||
// Dial connects to a single endpoint using the client's config.
|
// Dial connects to a single endpoint using the client's config.
|
||||||
@ -289,10 +291,8 @@ func (c *Client) dialWithBalancer(dopts ...grpc.DialOption) (*grpc.ClientConn, e
|
|||||||
|
|
||||||
// dial configures and dials any grpc balancer target.
|
// dial configures and dials any grpc balancer target.
|
||||||
func (c *Client) dial(creds grpccredentials.TransportCredentials, dopts ...grpc.DialOption) (*grpc.ClientConn, error) {
|
func (c *Client) dial(creds grpccredentials.TransportCredentials, dopts ...grpc.DialOption) (*grpc.ClientConn, error) {
|
||||||
opts, err := c.dialSetupOpts(creds, dopts...)
|
opts := c.dialSetupOpts(creds, dopts...)
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to configure dialer: %v", err)
|
|
||||||
}
|
|
||||||
if c.authTokenBundle != nil {
|
if c.authTokenBundle != nil {
|
||||||
opts = append(opts, grpc.WithPerRPCCredentials(c.authTokenBundle.PerRPCCredentials()))
|
opts = append(opts, grpc.WithPerRPCCredentials(c.authTokenBundle.PerRPCCredentials()))
|
||||||
}
|
}
|
||||||
|
@ -90,7 +90,7 @@ func (e *Election) Campaign(ctx context.Context, val string) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err = waitDeletes(ctx, client, e.keyPrefix, e.leaderRev-1)
|
err = waitDeletes(ctx, client, e.keyPrefix, e.leaderRev-1)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// clean up in case of context cancel
|
// clean up in case of context cancel
|
||||||
select {
|
select {
|
||||||
|
@ -18,7 +18,6 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
|
|
||||||
pb "go.etcd.io/etcd/api/v3/etcdserverpb"
|
|
||||||
"go.etcd.io/etcd/api/v3/mvccpb"
|
"go.etcd.io/etcd/api/v3/mvccpb"
|
||||||
v3 "go.etcd.io/etcd/client/v3"
|
v3 "go.etcd.io/etcd/client/v3"
|
||||||
)
|
)
|
||||||
@ -47,19 +46,19 @@ func waitDelete(ctx context.Context, client *v3.Client, key string, rev int64) e
|
|||||||
|
|
||||||
// waitDeletes efficiently waits until all keys matching the prefix and no greater
|
// waitDeletes efficiently waits until all keys matching the prefix and no greater
|
||||||
// than the create revision are deleted.
|
// than the create revision are deleted.
|
||||||
func waitDeletes(ctx context.Context, client *v3.Client, pfx string, maxCreateRev int64) (*pb.ResponseHeader, error) {
|
func waitDeletes(ctx context.Context, client *v3.Client, pfx string, maxCreateRev int64) error {
|
||||||
getOpts := append(v3.WithLastCreate(), v3.WithMaxCreateRev(maxCreateRev))
|
getOpts := append(v3.WithLastCreate(), v3.WithMaxCreateRev(maxCreateRev))
|
||||||
for {
|
for {
|
||||||
resp, err := client.Get(ctx, pfx, getOpts...)
|
resp, err := client.Get(ctx, pfx, getOpts...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
if len(resp.Kvs) == 0 {
|
if len(resp.Kvs) == 0 {
|
||||||
return resp.Header, nil
|
return nil
|
||||||
}
|
}
|
||||||
lastKey := string(resp.Kvs[0].Key)
|
lastKey := string(resp.Kvs[0].Key)
|
||||||
if err = waitDelete(ctx, client, lastKey, resp.Header.Revision); err != nil {
|
if err = waitDelete(ctx, client, lastKey, resp.Header.Revision); err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -22,7 +22,7 @@ import (
|
|||||||
|
|
||||||
func exampleEndpoints() []string { return nil }
|
func exampleEndpoints() []string { return nil }
|
||||||
|
|
||||||
func forUnitTestsRunInMockedContext(mocking func(), example func()) {
|
func forUnitTestsRunInMockedContext(mocking func(), _example func()) {
|
||||||
mocking()
|
mocking()
|
||||||
// TODO: Call 'example' when mocking() provides realistic mocking of transport.
|
// TODO: Call 'example' when mocking() provides realistic mocking of transport.
|
||||||
|
|
||||||
|
@ -84,7 +84,7 @@ func (m *Mutex) Lock(ctx context.Context) error {
|
|||||||
client := m.s.Client()
|
client := m.s.Client()
|
||||||
// wait for deletion revisions prior to myKey
|
// wait for deletion revisions prior to myKey
|
||||||
// TODO: early termination if the session key is deleted before other session keys with smaller revisions.
|
// TODO: early termination if the session key is deleted before other session keys with smaller revisions.
|
||||||
_, werr := waitDeletes(ctx, client, m.pfx, m.myRev-1)
|
werr := waitDeletes(ctx, client, m.pfx, m.myRev-1)
|
||||||
// release lock key if wait failed
|
// release lock key if wait failed
|
||||||
if werr != nil {
|
if werr != nil {
|
||||||
m.Unlock(client.Ctx())
|
m.Unlock(client.Ctx())
|
||||||
|
@ -28,7 +28,7 @@ const (
|
|||||||
|
|
||||||
func exampleEndpoints() []string { return nil }
|
func exampleEndpoints() []string { return nil }
|
||||||
|
|
||||||
func forUnitTestsRunInMockedContext(mocking func(), example func()) {
|
func forUnitTestsRunInMockedContext(mocking func(), _example func()) {
|
||||||
mocking()
|
mocking()
|
||||||
// TODO: Call 'example' when mocking() provides realistic mocking of transport.
|
// TODO: Call 'example' when mocking() provides realistic mocking of transport.
|
||||||
|
|
||||||
|
@ -102,7 +102,7 @@ func RetryKVClient(c *Client) pb.KVClient {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
func (rkv *retryKVClient) Range(ctx context.Context, in *pb.RangeRequest, opts ...grpc.CallOption) (resp *pb.RangeResponse, err error) {
|
func (rkv *retryKVClient) Range(ctx context.Context, in *pb.RangeRequest, opts ...grpc.CallOption) (resp *pb.RangeResponse, err error) {
|
||||||
return rkv.kc.Range(ctx, in, append(opts, withRetryPolicy(repeatable))...)
|
return rkv.kc.Range(ctx, in, append(opts, withRepeatablePolicy())...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rkv *retryKVClient) Put(ctx context.Context, in *pb.PutRequest, opts ...grpc.CallOption) (resp *pb.PutResponse, err error) {
|
func (rkv *retryKVClient) Put(ctx context.Context, in *pb.PutRequest, opts ...grpc.CallOption) (resp *pb.PutResponse, err error) {
|
||||||
@ -133,23 +133,23 @@ func RetryLeaseClient(c *Client) pb.LeaseClient {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (rlc *retryLeaseClient) LeaseTimeToLive(ctx context.Context, in *pb.LeaseTimeToLiveRequest, opts ...grpc.CallOption) (resp *pb.LeaseTimeToLiveResponse, err error) {
|
func (rlc *retryLeaseClient) LeaseTimeToLive(ctx context.Context, in *pb.LeaseTimeToLiveRequest, opts ...grpc.CallOption) (resp *pb.LeaseTimeToLiveResponse, err error) {
|
||||||
return rlc.lc.LeaseTimeToLive(ctx, in, append(opts, withRetryPolicy(repeatable))...)
|
return rlc.lc.LeaseTimeToLive(ctx, in, append(opts, withRepeatablePolicy())...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rlc *retryLeaseClient) LeaseLeases(ctx context.Context, in *pb.LeaseLeasesRequest, opts ...grpc.CallOption) (resp *pb.LeaseLeasesResponse, err error) {
|
func (rlc *retryLeaseClient) LeaseLeases(ctx context.Context, in *pb.LeaseLeasesRequest, opts ...grpc.CallOption) (resp *pb.LeaseLeasesResponse, err error) {
|
||||||
return rlc.lc.LeaseLeases(ctx, in, append(opts, withRetryPolicy(repeatable))...)
|
return rlc.lc.LeaseLeases(ctx, in, append(opts, withRepeatablePolicy())...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rlc *retryLeaseClient) LeaseGrant(ctx context.Context, in *pb.LeaseGrantRequest, opts ...grpc.CallOption) (resp *pb.LeaseGrantResponse, err error) {
|
func (rlc *retryLeaseClient) LeaseGrant(ctx context.Context, in *pb.LeaseGrantRequest, opts ...grpc.CallOption) (resp *pb.LeaseGrantResponse, err error) {
|
||||||
return rlc.lc.LeaseGrant(ctx, in, append(opts, withRetryPolicy(repeatable))...)
|
return rlc.lc.LeaseGrant(ctx, in, append(opts, withRepeatablePolicy())...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rlc *retryLeaseClient) LeaseRevoke(ctx context.Context, in *pb.LeaseRevokeRequest, opts ...grpc.CallOption) (resp *pb.LeaseRevokeResponse, err error) {
|
func (rlc *retryLeaseClient) LeaseRevoke(ctx context.Context, in *pb.LeaseRevokeRequest, opts ...grpc.CallOption) (resp *pb.LeaseRevokeResponse, err error) {
|
||||||
return rlc.lc.LeaseRevoke(ctx, in, append(opts, withRetryPolicy(repeatable))...)
|
return rlc.lc.LeaseRevoke(ctx, in, append(opts, withRepeatablePolicy())...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rlc *retryLeaseClient) LeaseKeepAlive(ctx context.Context, opts ...grpc.CallOption) (stream pb.Lease_LeaseKeepAliveClient, err error) {
|
func (rlc *retryLeaseClient) LeaseKeepAlive(ctx context.Context, opts ...grpc.CallOption) (stream pb.Lease_LeaseKeepAliveClient, err error) {
|
||||||
return rlc.lc.LeaseKeepAlive(ctx, append(opts, withRetryPolicy(repeatable))...)
|
return rlc.lc.LeaseKeepAlive(ctx, append(opts, withRepeatablePolicy())...)
|
||||||
}
|
}
|
||||||
|
|
||||||
type retryClusterClient struct {
|
type retryClusterClient struct {
|
||||||
@ -164,7 +164,7 @@ func RetryClusterClient(c *Client) pb.ClusterClient {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (rcc *retryClusterClient) MemberList(ctx context.Context, in *pb.MemberListRequest, opts ...grpc.CallOption) (resp *pb.MemberListResponse, err error) {
|
func (rcc *retryClusterClient) MemberList(ctx context.Context, in *pb.MemberListRequest, opts ...grpc.CallOption) (resp *pb.MemberListResponse, err error) {
|
||||||
return rcc.cc.MemberList(ctx, in, append(opts, withRetryPolicy(repeatable))...)
|
return rcc.cc.MemberList(ctx, in, append(opts, withRepeatablePolicy())...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rcc *retryClusterClient) MemberAdd(ctx context.Context, in *pb.MemberAddRequest, opts ...grpc.CallOption) (resp *pb.MemberAddResponse, err error) {
|
func (rcc *retryClusterClient) MemberAdd(ctx context.Context, in *pb.MemberAddRequest, opts ...grpc.CallOption) (resp *pb.MemberAddResponse, err error) {
|
||||||
@ -195,27 +195,27 @@ func RetryMaintenanceClient(c *Client, conn *grpc.ClientConn) pb.MaintenanceClie
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (rmc *retryMaintenanceClient) Alarm(ctx context.Context, in *pb.AlarmRequest, opts ...grpc.CallOption) (resp *pb.AlarmResponse, err error) {
|
func (rmc *retryMaintenanceClient) Alarm(ctx context.Context, in *pb.AlarmRequest, opts ...grpc.CallOption) (resp *pb.AlarmResponse, err error) {
|
||||||
return rmc.mc.Alarm(ctx, in, append(opts, withRetryPolicy(repeatable))...)
|
return rmc.mc.Alarm(ctx, in, append(opts, withRepeatablePolicy())...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rmc *retryMaintenanceClient) Status(ctx context.Context, in *pb.StatusRequest, opts ...grpc.CallOption) (resp *pb.StatusResponse, err error) {
|
func (rmc *retryMaintenanceClient) Status(ctx context.Context, in *pb.StatusRequest, opts ...grpc.CallOption) (resp *pb.StatusResponse, err error) {
|
||||||
return rmc.mc.Status(ctx, in, append(opts, withRetryPolicy(repeatable))...)
|
return rmc.mc.Status(ctx, in, append(opts, withRepeatablePolicy())...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rmc *retryMaintenanceClient) Hash(ctx context.Context, in *pb.HashRequest, opts ...grpc.CallOption) (resp *pb.HashResponse, err error) {
|
func (rmc *retryMaintenanceClient) Hash(ctx context.Context, in *pb.HashRequest, opts ...grpc.CallOption) (resp *pb.HashResponse, err error) {
|
||||||
return rmc.mc.Hash(ctx, in, append(opts, withRetryPolicy(repeatable))...)
|
return rmc.mc.Hash(ctx, in, append(opts, withRepeatablePolicy())...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rmc *retryMaintenanceClient) HashKV(ctx context.Context, in *pb.HashKVRequest, opts ...grpc.CallOption) (resp *pb.HashKVResponse, err error) {
|
func (rmc *retryMaintenanceClient) HashKV(ctx context.Context, in *pb.HashKVRequest, opts ...grpc.CallOption) (resp *pb.HashKVResponse, err error) {
|
||||||
return rmc.mc.HashKV(ctx, in, append(opts, withRetryPolicy(repeatable))...)
|
return rmc.mc.HashKV(ctx, in, append(opts, withRepeatablePolicy())...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rmc *retryMaintenanceClient) Snapshot(ctx context.Context, in *pb.SnapshotRequest, opts ...grpc.CallOption) (stream pb.Maintenance_SnapshotClient, err error) {
|
func (rmc *retryMaintenanceClient) Snapshot(ctx context.Context, in *pb.SnapshotRequest, opts ...grpc.CallOption) (stream pb.Maintenance_SnapshotClient, err error) {
|
||||||
return rmc.mc.Snapshot(ctx, in, append(opts, withRetryPolicy(repeatable))...)
|
return rmc.mc.Snapshot(ctx, in, append(opts, withRepeatablePolicy())...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rmc *retryMaintenanceClient) MoveLeader(ctx context.Context, in *pb.MoveLeaderRequest, opts ...grpc.CallOption) (resp *pb.MoveLeaderResponse, err error) {
|
func (rmc *retryMaintenanceClient) MoveLeader(ctx context.Context, in *pb.MoveLeaderRequest, opts ...grpc.CallOption) (resp *pb.MoveLeaderResponse, err error) {
|
||||||
return rmc.mc.MoveLeader(ctx, in, append(opts, withRetryPolicy(repeatable))...)
|
return rmc.mc.MoveLeader(ctx, in, append(opts, withRepeatablePolicy())...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rmc *retryMaintenanceClient) Defragment(ctx context.Context, in *pb.DefragmentRequest, opts ...grpc.CallOption) (resp *pb.DefragmentResponse, err error) {
|
func (rmc *retryMaintenanceClient) Defragment(ctx context.Context, in *pb.DefragmentRequest, opts ...grpc.CallOption) (resp *pb.DefragmentResponse, err error) {
|
||||||
@ -238,19 +238,19 @@ func RetryAuthClient(c *Client) pb.AuthClient {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (rac *retryAuthClient) UserList(ctx context.Context, in *pb.AuthUserListRequest, opts ...grpc.CallOption) (resp *pb.AuthUserListResponse, err error) {
|
func (rac *retryAuthClient) UserList(ctx context.Context, in *pb.AuthUserListRequest, opts ...grpc.CallOption) (resp *pb.AuthUserListResponse, err error) {
|
||||||
return rac.ac.UserList(ctx, in, append(opts, withRetryPolicy(repeatable))...)
|
return rac.ac.UserList(ctx, in, append(opts, withRepeatablePolicy())...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rac *retryAuthClient) UserGet(ctx context.Context, in *pb.AuthUserGetRequest, opts ...grpc.CallOption) (resp *pb.AuthUserGetResponse, err error) {
|
func (rac *retryAuthClient) UserGet(ctx context.Context, in *pb.AuthUserGetRequest, opts ...grpc.CallOption) (resp *pb.AuthUserGetResponse, err error) {
|
||||||
return rac.ac.UserGet(ctx, in, append(opts, withRetryPolicy(repeatable))...)
|
return rac.ac.UserGet(ctx, in, append(opts, withRepeatablePolicy())...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rac *retryAuthClient) RoleGet(ctx context.Context, in *pb.AuthRoleGetRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleGetResponse, err error) {
|
func (rac *retryAuthClient) RoleGet(ctx context.Context, in *pb.AuthRoleGetRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleGetResponse, err error) {
|
||||||
return rac.ac.RoleGet(ctx, in, append(opts, withRetryPolicy(repeatable))...)
|
return rac.ac.RoleGet(ctx, in, append(opts, withRepeatablePolicy())...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rac *retryAuthClient) RoleList(ctx context.Context, in *pb.AuthRoleListRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleListResponse, err error) {
|
func (rac *retryAuthClient) RoleList(ctx context.Context, in *pb.AuthRoleListRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleListResponse, err error) {
|
||||||
return rac.ac.RoleList(ctx, in, append(opts, withRetryPolicy(repeatable))...)
|
return rac.ac.RoleList(ctx, in, append(opts, withRepeatablePolicy())...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rac *retryAuthClient) AuthEnable(ctx context.Context, in *pb.AuthEnableRequest, opts ...grpc.CallOption) (resp *pb.AuthEnableResponse, err error) {
|
func (rac *retryAuthClient) AuthEnable(ctx context.Context, in *pb.AuthEnableRequest, opts ...grpc.CallOption) (resp *pb.AuthEnableResponse, err error) {
|
||||||
|
@ -377,10 +377,10 @@ var (
|
|||||||
// with the next iteration.
|
// with the next iteration.
|
||||||
type backoffFunc func(attempt uint) time.Duration
|
type backoffFunc func(attempt uint) time.Duration
|
||||||
|
|
||||||
// withRetryPolicy sets the retry policy of this call.
|
// withRepeatablePolicy sets the repeatable policy of this call.
|
||||||
func withRetryPolicy(rp retryPolicy) retryOption {
|
func withRepeatablePolicy() retryOption {
|
||||||
return retryOption{applyFunc: func(o *options) {
|
return retryOption{applyFunc: func(o *options) {
|
||||||
o.retryPolicy = rp
|
o.retryPolicy = repeatable
|
||||||
}}
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -410,11 +410,6 @@ function govet_shadow_pass {
|
|||||||
run_for_modules generic_checker govet_shadow_per_package "${shadow}"
|
run_for_modules generic_checker govet_shadow_per_package "${shadow}"
|
||||||
}
|
}
|
||||||
|
|
||||||
function unparam_pass {
|
|
||||||
# TODO: transport/listener.go:129:60: newListenConfig - result 1 (error) is always nil
|
|
||||||
run_for_modules generic_checker run_go_tool "mvdan.cc/unparam"
|
|
||||||
}
|
|
||||||
|
|
||||||
function lint_pass {
|
function lint_pass {
|
||||||
run_for_modules generic_checker run golangci-lint run --config "${ETCD_ROOT_DIR}/tools/.golangci.yaml"
|
run_for_modules generic_checker run golangci-lint run --config "${ETCD_ROOT_DIR}/tools/.golangci.yaml"
|
||||||
}
|
}
|
||||||
|
@ -268,12 +268,10 @@ func StartEtcd(inCfg *Config) (e *Etcd, err error) {
|
|||||||
}
|
}
|
||||||
e.Server.Start()
|
e.Server.Start()
|
||||||
|
|
||||||
if err = e.servePeers(); err != nil {
|
e.servePeers()
|
||||||
return e, err
|
|
||||||
}
|
e.serveClients()
|
||||||
if err = e.serveClients(); err != nil {
|
|
||||||
return e, err
|
|
||||||
}
|
|
||||||
if err = e.serveMetrics(); err != nil {
|
if err = e.serveMetrics(); err != nil {
|
||||||
return e, err
|
return e, err
|
||||||
}
|
}
|
||||||
@ -561,7 +559,7 @@ func configurePeerListeners(cfg *Config) (peers []*peerListener, err error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// configure peer handlers after rafthttp.Transport started
|
// configure peer handlers after rafthttp.Transport started
|
||||||
func (e *Etcd) servePeers() (err error) {
|
func (e *Etcd) servePeers() {
|
||||||
ph := etcdhttp.NewPeerHandler(e.GetLogger(), e.Server)
|
ph := etcdhttp.NewPeerHandler(e.GetLogger(), e.Server)
|
||||||
|
|
||||||
for _, p := range e.Peers {
|
for _, p := range e.Peers {
|
||||||
@ -609,7 +607,6 @@ func (e *Etcd) servePeers() (err error) {
|
|||||||
e.errHandler(l.serve())
|
e.errHandler(l.serve())
|
||||||
}(pl)
|
}(pl)
|
||||||
}
|
}
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func configureClientListeners(cfg *Config) (sctxs map[string]*serveCtx, err error) {
|
func configureClientListeners(cfg *Config) (sctxs map[string]*serveCtx, err error) {
|
||||||
@ -727,7 +724,7 @@ func resolveUrl(u url.URL) (addr string, secure bool, network string) {
|
|||||||
return addr, secure, network
|
return addr, secure, network
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e *Etcd) serveClients() (err error) {
|
func (e *Etcd) serveClients() {
|
||||||
if !e.cfg.ClientTLSInfo.Empty() {
|
if !e.cfg.ClientTLSInfo.Empty() {
|
||||||
e.cfg.logger.Info(
|
e.cfg.logger.Info(
|
||||||
"starting with client TLS",
|
"starting with client TLS",
|
||||||
@ -771,7 +768,6 @@ func (e *Etcd) serveClients() (err error) {
|
|||||||
e.errHandler(s.serve(e.Server, &e.cfg.ClientTLSInfo, mux, e.errHandler, e.grpcGatewayDial(splitHttp), splitHttp, gopts...))
|
e.errHandler(s.serve(e.Server, &e.cfg.ClientTLSInfo, mux, e.errHandler, e.grpcGatewayDial(splitHttp), splitHttp, gopts...))
|
||||||
}(sctx)
|
}(sctx)
|
||||||
}
|
}
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e *Etcd) grpcGatewayDial(splitHttp bool) (grpcDial func(ctx context.Context) (*grpc.ClientConn, error)) {
|
func (e *Etcd) grpcGatewayDial(splitHttp bool) (grpcDial func(ctx context.Context) (*grpc.ClientConn, error)) {
|
||||||
|
@ -231,7 +231,7 @@ func startGRPCProxy(cmd *cobra.Command, args []string) {
|
|||||||
if grpcProxyAdvertiseClientURL != "" {
|
if grpcProxyAdvertiseClientURL != "" {
|
||||||
proxyClient = mustNewProxyClient(lg, tlsInfo)
|
proxyClient = mustNewProxyClient(lg, tlsInfo)
|
||||||
}
|
}
|
||||||
httpClient := mustNewHTTPClient(lg)
|
httpClient := mustNewHTTPClient()
|
||||||
|
|
||||||
srvhttp, httpl := mustHTTPListener(lg, m, tlsInfo, client, proxyClient)
|
srvhttp, httpl := mustHTTPListener(lg, m, tlsInfo, client, proxyClient)
|
||||||
|
|
||||||
@ -505,7 +505,7 @@ func newGRPCProxyServer(lg *zap.Logger, client *clientv3.Client) *grpc.Server {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func mustHTTPListener(lg *zap.Logger, m cmux.CMux, tlsinfo *transport.TLSInfo, c *clientv3.Client, proxy *clientv3.Client) (*http.Server, net.Listener) {
|
func mustHTTPListener(lg *zap.Logger, m cmux.CMux, tlsinfo *transport.TLSInfo, c *clientv3.Client, proxy *clientv3.Client) (*http.Server, net.Listener) {
|
||||||
httpClient := mustNewHTTPClient(lg)
|
httpClient := mustNewHTTPClient()
|
||||||
httpmux := http.NewServeMux()
|
httpmux := http.NewServeMux()
|
||||||
httpmux.HandleFunc("/", http.NotFound)
|
httpmux.HandleFunc("/", http.NotFound)
|
||||||
grpcproxy.HandleMetrics(httpmux, httpClient, c.Endpoints())
|
grpcproxy.HandleMetrics(httpmux, httpClient, c.Endpoints())
|
||||||
@ -535,7 +535,7 @@ func mustHTTPListener(lg *zap.Logger, m cmux.CMux, tlsinfo *transport.TLSInfo, c
|
|||||||
return srvhttp, m.Match(cmux.Any())
|
return srvhttp, m.Match(cmux.Any())
|
||||||
}
|
}
|
||||||
|
|
||||||
func mustNewHTTPClient(lg *zap.Logger) *http.Client {
|
func mustNewHTTPClient() *http.Client {
|
||||||
transport, err := newHTTPTransport(grpcProxyCA, grpcProxyCert, grpcProxyKey)
|
transport, err := newHTTPTransport(grpcProxyCA, grpcProxyCert, grpcProxyKey)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Fprintln(os.Stderr, err)
|
fmt.Fprintln(os.Stderr, err)
|
||||||
|
@ -544,7 +544,7 @@ func TestClusterAddMemberAsLearner(t *testing.T) {
|
|||||||
st := mockstore.NewRecorder()
|
st := mockstore.NewRecorder()
|
||||||
c := newTestCluster(t, nil)
|
c := newTestCluster(t, nil)
|
||||||
c.SetStore(st)
|
c.SetStore(st)
|
||||||
c.AddMember(newTestMemberAsLearner(1, nil, "node1", nil), true)
|
c.AddMember(newTestMemberAsLearner(1, []string{}, "node1", []string{"http://node1"}), true)
|
||||||
|
|
||||||
wactions := []testutil.Action{
|
wactions := []testutil.Action{
|
||||||
{
|
{
|
||||||
@ -552,7 +552,7 @@ func TestClusterAddMemberAsLearner(t *testing.T) {
|
|||||||
Params: []any{
|
Params: []any{
|
||||||
path.Join(StoreMembersPrefix, "1", "raftAttributes"),
|
path.Join(StoreMembersPrefix, "1", "raftAttributes"),
|
||||||
false,
|
false,
|
||||||
`{"peerURLs":null,"isLearner":true}`,
|
`{"peerURLs":[],"isLearner":true}`,
|
||||||
false,
|
false,
|
||||||
v2store.TTLOptionSet{ExpireTime: v2store.Permanent},
|
v2store.TTLOptionSet{ExpireTime: v2store.Permanent},
|
||||||
},
|
},
|
||||||
|
@ -96,11 +96,7 @@ func bootstrap(cfg config.ServerConfig) (b *bootstrappedServer, err error) {
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
s, err := bootstrapStorage(cfg, st, backend, bwal, cluster)
|
s := bootstrapStorage(cfg, st, backend, bwal, cluster)
|
||||||
if err != nil {
|
|
||||||
backend.Close()
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err = cluster.Finalize(cfg, s); err != nil {
|
if err = cluster.Finalize(cfg, s); err != nil {
|
||||||
backend.Close()
|
backend.Close()
|
||||||
@ -165,7 +161,7 @@ type bootstrappedRaft struct {
|
|||||||
storage *raft.MemoryStorage
|
storage *raft.MemoryStorage
|
||||||
}
|
}
|
||||||
|
|
||||||
func bootstrapStorage(cfg config.ServerConfig, st v2store.Store, be *bootstrappedBackend, wal *bootstrappedWAL, cl *bootstrapedCluster) (b *bootstrappedStorage, err error) {
|
func bootstrapStorage(cfg config.ServerConfig, st v2store.Store, be *bootstrappedBackend, wal *bootstrappedWAL, cl *bootstrapedCluster) *bootstrappedStorage {
|
||||||
if wal == nil {
|
if wal == nil {
|
||||||
wal = bootstrapNewWAL(cfg, cl)
|
wal = bootstrapNewWAL(cfg, cl)
|
||||||
}
|
}
|
||||||
@ -174,7 +170,7 @@ func bootstrapStorage(cfg config.ServerConfig, st v2store.Store, be *bootstrappe
|
|||||||
backend: be,
|
backend: be,
|
||||||
st: st,
|
st: st,
|
||||||
wal: wal,
|
wal: wal,
|
||||||
}, nil
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func bootstrapSnapshot(cfg config.ServerConfig) *snap.Snapshotter {
|
func bootstrapSnapshot(cfg config.ServerConfig) *snap.Snapshotter {
|
||||||
|
@ -512,7 +512,7 @@ func TestHashKVHandler(t *testing.T) {
|
|||||||
var revision = 1
|
var revision = 1
|
||||||
|
|
||||||
etcdSrv := &EtcdServer{}
|
etcdSrv := &EtcdServer{}
|
||||||
etcdSrv.cluster = newTestCluster(t, nil)
|
etcdSrv.cluster = newTestCluster(t)
|
||||||
etcdSrv.cluster.SetID(types.ID(localClusterID), types.ID(localClusterID))
|
etcdSrv.cluster.SetID(types.ID(localClusterID), types.ID(localClusterID))
|
||||||
be, _ := betesting.NewDefaultTmpBackend(t)
|
be, _ := betesting.NewDefaultTmpBackend(t)
|
||||||
defer betesting.Close(t, be)
|
defer betesting.Close(t, be)
|
||||||
|
@ -187,7 +187,7 @@ func TestApplyRepeat(t *testing.T) {
|
|||||||
n.readyc <- raft.Ready{
|
n.readyc <- raft.Ready{
|
||||||
SoftState: &raft.SoftState{RaftState: raft.StateLeader},
|
SoftState: &raft.SoftState{RaftState: raft.StateLeader},
|
||||||
}
|
}
|
||||||
cl := newTestCluster(t, nil)
|
cl := newTestCluster(t)
|
||||||
st := v2store.New()
|
st := v2store.New()
|
||||||
cl.SetStore(v2store.New())
|
cl.SetStore(v2store.New())
|
||||||
be, _ := betesting.NewDefaultTmpBackend(t)
|
be, _ := betesting.NewDefaultTmpBackend(t)
|
||||||
@ -1366,7 +1366,7 @@ func TestAddMember(t *testing.T) {
|
|||||||
n.readyc <- raft.Ready{
|
n.readyc <- raft.Ready{
|
||||||
SoftState: &raft.SoftState{RaftState: raft.StateLeader},
|
SoftState: &raft.SoftState{RaftState: raft.StateLeader},
|
||||||
}
|
}
|
||||||
cl := newTestCluster(t, nil)
|
cl := newTestCluster(t)
|
||||||
st := v2store.New()
|
st := v2store.New()
|
||||||
cl.SetStore(st)
|
cl.SetStore(st)
|
||||||
be, _ := betesting.NewDefaultTmpBackend(t)
|
be, _ := betesting.NewDefaultTmpBackend(t)
|
||||||
@ -1416,7 +1416,7 @@ func TestRemoveMember(t *testing.T) {
|
|||||||
n.readyc <- raft.Ready{
|
n.readyc <- raft.Ready{
|
||||||
SoftState: &raft.SoftState{RaftState: raft.StateLeader},
|
SoftState: &raft.SoftState{RaftState: raft.StateLeader},
|
||||||
}
|
}
|
||||||
cl := newTestCluster(t, nil)
|
cl := newTestCluster(t)
|
||||||
st := v2store.New()
|
st := v2store.New()
|
||||||
cl.SetStore(v2store.New())
|
cl.SetStore(v2store.New())
|
||||||
be, _ := betesting.NewDefaultTmpBackend(t)
|
be, _ := betesting.NewDefaultTmpBackend(t)
|
||||||
@ -1467,7 +1467,7 @@ func TestUpdateMember(t *testing.T) {
|
|||||||
n.readyc <- raft.Ready{
|
n.readyc <- raft.Ready{
|
||||||
SoftState: &raft.SoftState{RaftState: raft.StateLeader},
|
SoftState: &raft.SoftState{RaftState: raft.StateLeader},
|
||||||
}
|
}
|
||||||
cl := newTestCluster(t, nil)
|
cl := newTestCluster(t)
|
||||||
st := v2store.New()
|
st := v2store.New()
|
||||||
cl.SetStore(st)
|
cl.SetStore(st)
|
||||||
cl.SetBackend(schema.NewMembershipBackend(lg, be))
|
cl.SetBackend(schema.NewMembershipBackend(lg, be))
|
||||||
@ -1929,12 +1929,8 @@ func (n *nodeCommitter) Propose(ctx context.Context, data []byte) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func newTestCluster(t testing.TB, membs []*membership.Member) *membership.RaftCluster {
|
func newTestCluster(t testing.TB) *membership.RaftCluster {
|
||||||
c := membership.NewCluster(zaptest.NewLogger(t))
|
return membership.NewCluster(zaptest.NewLogger(t))
|
||||||
for _, m := range membs {
|
|
||||||
c.AddMember(m, true)
|
|
||||||
}
|
|
||||||
return c
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func newTestClusterWithBackend(t testing.TB, membs []*membership.Member, be backend.Backend) *membership.RaftCluster {
|
func newTestClusterWithBackend(t testing.TB, membs []*membership.Member, be backend.Backend) *membership.RaftCluster {
|
||||||
|
@ -71,7 +71,7 @@ func TestDowngradeWithUserAuth(t *testing.T) {
|
|||||||
testDowngradeWithAuth(t, false, true, WithAuth("user0", "user0Pass"))
|
testDowngradeWithAuth(t, false, true, WithAuth("user0", "user0Pass"))
|
||||||
}
|
}
|
||||||
|
|
||||||
func testDowngradeWithAuth(t *testing.T, expectConnectionError, expectOperationError bool, opts ...config.ClientOption) {
|
func testDowngradeWithAuth(t *testing.T, _expectConnectionError, _expectOperationError bool, _opts ...config.ClientOption) {
|
||||||
// TODO(ahrtr): finish this after we added interface methods `Downgrade` into `Client`
|
// TODO(ahrtr): finish this after we added interface methods `Downgrade` into `Client`
|
||||||
t.Skip()
|
t.Skip()
|
||||||
}
|
}
|
||||||
@ -121,7 +121,7 @@ func TestMoveLeaderWithUserAuth(t *testing.T) {
|
|||||||
testMoveLeaderWithAuth(t, false, true, WithAuth("user0", "user0Pass"))
|
testMoveLeaderWithAuth(t, false, true, WithAuth("user0", "user0Pass"))
|
||||||
}
|
}
|
||||||
|
|
||||||
func testMoveLeaderWithAuth(t *testing.T, expectConnectionError, expectOperationError bool, opts ...config.ClientOption) {
|
func testMoveLeaderWithAuth(t *testing.T, _expectConnectionError, _expectOperationError bool, _opts ...config.ClientOption) {
|
||||||
// TODO(ahrtr): finish this after we added interface methods `MoveLeader` into `Client`
|
// TODO(ahrtr): finish this after we added interface methods `MoveLeader` into `Client`
|
||||||
t.Skip()
|
t.Skip()
|
||||||
}
|
}
|
||||||
@ -145,7 +145,7 @@ func TestSnapshotWithUserAuth(t *testing.T) {
|
|||||||
testSnapshotWithAuth(t, false, true, WithAuth("user0", "user0Pass"))
|
testSnapshotWithAuth(t, false, true, WithAuth("user0", "user0Pass"))
|
||||||
}
|
}
|
||||||
|
|
||||||
func testSnapshotWithAuth(t *testing.T, expectConnectionError, expectOperationError bool, opts ...config.ClientOption) {
|
func testSnapshotWithAuth(t *testing.T, _expectConnectionError, _expectOperationError bool, _opts ...config.ClientOption) {
|
||||||
// TODO(ahrtr): finish this after we added interface methods `Snapshot` into `Client`
|
// TODO(ahrtr): finish this after we added interface methods `Snapshot` into `Client`
|
||||||
t.Skip()
|
t.Skip()
|
||||||
}
|
}
|
||||||
|
@ -28,21 +28,21 @@ import (
|
|||||||
"go.etcd.io/etcd/tests/v3/framework/e2e"
|
"go.etcd.io/etcd/tests/v3/framework/e2e"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestCtlV3PutTimeout(t *testing.T) { testCtl(t, putTest, withDialTimeout(0)) }
|
func TestCtlV3PutTimeout(t *testing.T) { testCtl(t, putTest, withDefaultDialTimeout()) }
|
||||||
func TestCtlV3PutClientTLSFlagByEnv(t *testing.T) {
|
func TestCtlV3PutClientTLSFlagByEnv(t *testing.T) {
|
||||||
testCtl(t, putTest, withCfg(*e2e.NewConfigClientTLS()), withFlagByEnv())
|
testCtl(t, putTest, withCfg(*e2e.NewConfigClientTLS()), withFlagByEnv())
|
||||||
}
|
}
|
||||||
func TestCtlV3PutIgnoreValue(t *testing.T) { testCtl(t, putTestIgnoreValue) }
|
func TestCtlV3PutIgnoreValue(t *testing.T) { testCtl(t, putTestIgnoreValue) }
|
||||||
func TestCtlV3PutIgnoreLease(t *testing.T) { testCtl(t, putTestIgnoreLease) }
|
func TestCtlV3PutIgnoreLease(t *testing.T) { testCtl(t, putTestIgnoreLease) }
|
||||||
|
|
||||||
func TestCtlV3GetTimeout(t *testing.T) { testCtl(t, getTest, withDialTimeout(0)) }
|
func TestCtlV3GetTimeout(t *testing.T) { testCtl(t, getTest, withDefaultDialTimeout()) }
|
||||||
|
|
||||||
func TestCtlV3GetFormat(t *testing.T) { testCtl(t, getFormatTest) }
|
func TestCtlV3GetFormat(t *testing.T) { testCtl(t, getFormatTest) }
|
||||||
func TestCtlV3GetRev(t *testing.T) { testCtl(t, getRevTest) }
|
func TestCtlV3GetRev(t *testing.T) { testCtl(t, getRevTest) }
|
||||||
func TestCtlV3GetKeysOnly(t *testing.T) { testCtl(t, getKeysOnlyTest) }
|
func TestCtlV3GetKeysOnly(t *testing.T) { testCtl(t, getKeysOnlyTest) }
|
||||||
func TestCtlV3GetCountOnly(t *testing.T) { testCtl(t, getCountOnlyTest) }
|
func TestCtlV3GetCountOnly(t *testing.T) { testCtl(t, getCountOnlyTest) }
|
||||||
|
|
||||||
func TestCtlV3DelTimeout(t *testing.T) { testCtl(t, delTest, withDialTimeout(0)) }
|
func TestCtlV3DelTimeout(t *testing.T) { testCtl(t, delTest, withDefaultDialTimeout()) }
|
||||||
|
|
||||||
func TestCtlV3GetRevokedCRL(t *testing.T) {
|
func TestCtlV3GetRevokedCRL(t *testing.T) {
|
||||||
cfg := e2e.NewConfig(
|
cfg := e2e.NewConfig(
|
||||||
|
@ -24,7 +24,7 @@ import (
|
|||||||
|
|
||||||
// TestCtlV3RoleAddTimeout tests add role with 0 grpc dial timeout while it tolerates dial timeout error.
|
// TestCtlV3RoleAddTimeout tests add role with 0 grpc dial timeout while it tolerates dial timeout error.
|
||||||
// This is unique in e2e test
|
// This is unique in e2e test
|
||||||
func TestCtlV3RoleAddTimeout(t *testing.T) { testCtl(t, roleAddTest, withDialTimeout(0)) }
|
func TestCtlV3RoleAddTimeout(t *testing.T) { testCtl(t, roleAddTest, withDefaultDialTimeout()) }
|
||||||
|
|
||||||
func roleAddTest(cx ctlCtx) {
|
func roleAddTest(cx ctlCtx) {
|
||||||
cmdSet := []struct {
|
cmdSet := []struct {
|
||||||
|
@ -447,5 +447,6 @@ func hasKVs(t *testing.T, ctl *e2e.EtcdctlV3, kvs []testutils.KV, currentRev int
|
|||||||
require.Equal(t, int64(baseRev+i), v.Kvs[0].CreateRevision)
|
require.Equal(t, int64(baseRev+i), v.Kvs[0].CreateRevision)
|
||||||
require.Equal(t, int64(baseRev+i), v.Kvs[0].ModRevision)
|
require.Equal(t, int64(baseRev+i), v.Kvs[0].ModRevision)
|
||||||
require.Equal(t, int64(1), v.Kvs[0].Version)
|
require.Equal(t, int64(1), v.Kvs[0].Version)
|
||||||
|
require.True(t, int64(currentRev) >= v.Kvs[0].ModRevision)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -158,6 +158,10 @@ func withCfg(cfg e2e.EtcdProcessClusterConfig) ctlOption {
|
|||||||
return func(cx *ctlCtx) { cx.cfg = cfg }
|
return func(cx *ctlCtx) { cx.cfg = cfg }
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func withDefaultDialTimeout() ctlOption {
|
||||||
|
return withDialTimeout(0)
|
||||||
|
}
|
||||||
|
|
||||||
func withDialTimeout(timeout time.Duration) ctlOption {
|
func withDialTimeout(timeout time.Duration) ctlOption {
|
||||||
return func(cx *ctlCtx) { cx.dialTimeout = timeout }
|
return func(cx *ctlCtx) { cx.dialTimeout = timeout }
|
||||||
}
|
}
|
||||||
|
@ -25,7 +25,7 @@ func TestCtlV3Watch(t *testing.T) { testCtl(t, watchTest) }
|
|||||||
func TestCtlV3WatchNoTLS(t *testing.T) { testCtl(t, watchTest, withCfg(*e2e.NewConfigNoTLS())) }
|
func TestCtlV3WatchNoTLS(t *testing.T) { testCtl(t, watchTest, withCfg(*e2e.NewConfigNoTLS())) }
|
||||||
func TestCtlV3WatchClientTLS(t *testing.T) { testCtl(t, watchTest, withCfg(*e2e.NewConfigClientTLS())) }
|
func TestCtlV3WatchClientTLS(t *testing.T) { testCtl(t, watchTest, withCfg(*e2e.NewConfigClientTLS())) }
|
||||||
func TestCtlV3WatchPeerTLS(t *testing.T) { testCtl(t, watchTest, withCfg(*e2e.NewConfigPeerTLS())) }
|
func TestCtlV3WatchPeerTLS(t *testing.T) { testCtl(t, watchTest, withCfg(*e2e.NewConfigPeerTLS())) }
|
||||||
func TestCtlV3WatchTimeout(t *testing.T) { testCtl(t, watchTest, withDialTimeout(0)) }
|
func TestCtlV3WatchTimeout(t *testing.T) { testCtl(t, watchTest, withDefaultDialTimeout()) }
|
||||||
|
|
||||||
func TestCtlV3WatchInteractive(t *testing.T) {
|
func TestCtlV3WatchInteractive(t *testing.T) {
|
||||||
testCtl(t, watchTest, withInteractive())
|
testCtl(t, watchTest, withInteractive())
|
||||||
|
@ -39,7 +39,7 @@ type bridge struct {
|
|||||||
mu sync.Mutex
|
mu sync.Mutex
|
||||||
}
|
}
|
||||||
|
|
||||||
func newBridge(dialer Dialer, listener net.Listener) (*bridge, error) {
|
func newBridge(dialer Dialer, listener net.Listener) *bridge {
|
||||||
b := &bridge{
|
b := &bridge{
|
||||||
// bridge "port" is ("%05d%05d0", port, pid) since go1.8 expects the port to be a number
|
// bridge "port" is ("%05d%05d0", port, pid) since go1.8 expects the port to be a number
|
||||||
dialer: dialer,
|
dialer: dialer,
|
||||||
@ -52,7 +52,7 @@ func newBridge(dialer Dialer, listener net.Listener) (*bridge, error) {
|
|||||||
close(b.pausec)
|
close(b.pausec)
|
||||||
b.wg.Add(1)
|
b.wg.Add(1)
|
||||||
go b.serveListen()
|
go b.serveListen()
|
||||||
return b, nil
|
return b
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *bridge) Close() {
|
func (b *bridge) Close() {
|
||||||
|
@ -829,11 +829,8 @@ func (m *Member) addBridge() (*bridge, error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("listen failed on bridge socket %s (%v)", bridgeAddr, err)
|
return nil, fmt.Errorf("listen failed on bridge socket %s (%v)", bridgeAddr, err)
|
||||||
}
|
}
|
||||||
m.GrpcBridge, err = newBridge(dialer{network: network, addr: grpcAddr}, bridgeListener)
|
m.GrpcBridge = newBridge(dialer{network: network, addr: grpcAddr}, bridgeListener)
|
||||||
if err != nil {
|
|
||||||
bridgeListener.Close()
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
addr := bridgeListener.Addr().String()
|
addr := bridgeListener.Addr().String()
|
||||||
m.Logger.Info("LISTEN BRIDGE SUCCESS", zap.String("grpc-address", addr), zap.String("member", m.Name))
|
m.Logger.Info("LISTEN BRIDGE SUCCESS", zap.String("grpc-address", addr), zap.String("member", m.Name))
|
||||||
m.GrpcURL = m.clientScheme() + "://" + addr
|
m.GrpcURL = m.clientScheme() + "://" + addr
|
||||||
|
@ -357,14 +357,11 @@ func (c integrationClient) Txn(ctx context.Context, compares, ifSucess, ifFail [
|
|||||||
}
|
}
|
||||||
cmps = append(cmps, *cmp)
|
cmps = append(cmps, *cmp)
|
||||||
}
|
}
|
||||||
succOps, err := getOps(ifSucess)
|
|
||||||
if err != nil {
|
succOps := getOps(ifSucess)
|
||||||
return nil, err
|
|
||||||
}
|
failOps := getOps(ifFail)
|
||||||
failOps, err := getOps(ifFail)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
txnrsp, err := txn.
|
txnrsp, err := txn.
|
||||||
If(cmps...).
|
If(cmps...).
|
||||||
Then(succOps...).
|
Then(succOps...).
|
||||||
@ -373,7 +370,7 @@ func (c integrationClient) Txn(ctx context.Context, compares, ifSucess, ifFail [
|
|||||||
return txnrsp, err
|
return txnrsp, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func getOps(ss []string) ([]clientv3.Op, error) {
|
func getOps(ss []string) []clientv3.Op {
|
||||||
var ops []clientv3.Op
|
var ops []clientv3.Op
|
||||||
for _, s := range ss {
|
for _, s := range ss {
|
||||||
s = strings.TrimSpace(s)
|
s = strings.TrimSpace(s)
|
||||||
@ -387,7 +384,7 @@ func getOps(ss []string) ([]clientv3.Op, error) {
|
|||||||
ops = append(ops, clientv3.OpDelete(args[1]))
|
ops = append(ops, clientv3.OpDelete(args[1]))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return ops, nil
|
return ops
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c integrationClient) Watch(ctx context.Context, key string, opts config.WatchOptions) clientv3.WatchChan {
|
func (c integrationClient) Watch(ctx context.Context, key string, opts config.WatchOptions) clientv3.WatchChan {
|
||||||
|
@ -50,11 +50,11 @@ func ExampleSTM_apply() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
exchange := func(stm concurrency.STM) error {
|
exchange := func(stm concurrency.STM) {
|
||||||
from, to := rand.Intn(totalAccounts), rand.Intn(totalAccounts)
|
from, to := rand.Intn(totalAccounts), rand.Intn(totalAccounts)
|
||||||
if from == to {
|
if from == to {
|
||||||
// nothing to do
|
// nothing to do
|
||||||
return nil
|
return
|
||||||
}
|
}
|
||||||
// read values
|
// read values
|
||||||
fromK, toK := fmt.Sprintf("accts/%d", from), fmt.Sprintf("accts/%d", to)
|
fromK, toK := fmt.Sprintf("accts/%d", from), fmt.Sprintf("accts/%d", to)
|
||||||
@ -70,7 +70,7 @@ func ExampleSTM_apply() {
|
|||||||
// write back
|
// write back
|
||||||
stm.Put(fromK, fmt.Sprintf("%d", fromInt))
|
stm.Put(fromK, fmt.Sprintf("%d", fromInt))
|
||||||
stm.Put(toK, fmt.Sprintf("%d", toInt))
|
stm.Put(toK, fmt.Sprintf("%d", toInt))
|
||||||
return nil
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// concurrently exchange values between accounts
|
// concurrently exchange values between accounts
|
||||||
@ -79,7 +79,10 @@ func ExampleSTM_apply() {
|
|||||||
for i := 0; i < 10; i++ {
|
for i := 0; i < 10; i++ {
|
||||||
go func() {
|
go func() {
|
||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
if _, serr := concurrency.NewSTM(cli, exchange); serr != nil {
|
if _, serr := concurrency.NewSTM(cli, func(stm concurrency.STM) error {
|
||||||
|
exchange(stm)
|
||||||
|
return nil
|
||||||
|
}); serr != nil {
|
||||||
log.Fatal(serr)
|
log.Fatal(serr)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
@ -26,7 +26,7 @@ var lazyCluster = integration.NewLazyCluster()
|
|||||||
|
|
||||||
func exampleEndpoints() []string { return lazyCluster.EndpointsGRPC() }
|
func exampleEndpoints() []string { return lazyCluster.EndpointsGRPC() }
|
||||||
|
|
||||||
func forUnitTestsRunInMockedContext(mocking func(), example func()) {
|
func forUnitTestsRunInMockedContext(_mocking func(), example func()) {
|
||||||
// For integration tests runs in the provided environment
|
// For integration tests runs in the provided environment
|
||||||
example()
|
example()
|
||||||
}
|
}
|
||||||
|
@ -29,7 +29,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func TestRevisionMonotonicWithLeaderPartitions(t *testing.T) {
|
func TestRevisionMonotonicWithLeaderPartitions(t *testing.T) {
|
||||||
testRevisionMonotonicWithFailures(t, 11*time.Second, func(clus *integration.Cluster) {
|
testRevisionMonotonicWithFailures(t, 12*time.Second, func(clus *integration.Cluster) {
|
||||||
for i := 0; i < 5; i++ {
|
for i := 0; i < 5; i++ {
|
||||||
leader := clus.WaitLeader(t)
|
leader := clus.WaitLeader(t)
|
||||||
time.Sleep(time.Second)
|
time.Sleep(time.Second)
|
||||||
|
@ -528,7 +528,7 @@ func testLeaseStress(t *testing.T, stresser func(context.Context, pb.LeaseClient
|
|||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
for i := 0; i < 300; i++ {
|
for i := 0; i < 300; i++ {
|
||||||
go func(i int) { errc <- stresser(ctx, integration.ToGRPC(clusterClient).Lease) }(i)
|
go func() { errc <- stresser(ctx, integration.ToGRPC(clusterClient).Lease) }()
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
for i := 0; i < 100; i++ {
|
for i := 0; i < 100; i++ {
|
||||||
|
@ -47,7 +47,7 @@ func TestSTMConflict(t *testing.T) {
|
|||||||
for i := range keys {
|
for i := range keys {
|
||||||
curEtcdc := clus.RandClient()
|
curEtcdc := clus.RandClient()
|
||||||
srcKey := keys[i]
|
srcKey := keys[i]
|
||||||
applyf := func(stm concurrency.STM) error {
|
applyf := func(stm concurrency.STM) {
|
||||||
src := stm.Get(srcKey)
|
src := stm.Get(srcKey)
|
||||||
// must be different key to avoid double-adding
|
// must be different key to avoid double-adding
|
||||||
dstKey := srcKey
|
dstKey := srcKey
|
||||||
@ -59,16 +59,21 @@ func TestSTMConflict(t *testing.T) {
|
|||||||
dstV, _ := strconv.ParseInt(dst, 10, 64)
|
dstV, _ := strconv.ParseInt(dst, 10, 64)
|
||||||
if srcV == 0 {
|
if srcV == 0 {
|
||||||
// can't rand.Intn on 0, so skip this transaction
|
// can't rand.Intn on 0, so skip this transaction
|
||||||
return nil
|
return
|
||||||
}
|
}
|
||||||
xfer := int64(rand.Intn(int(srcV)) / 2)
|
xfer := int64(rand.Intn(int(srcV)) / 2)
|
||||||
stm.Put(srcKey, fmt.Sprintf("%d", srcV-xfer))
|
stm.Put(srcKey, fmt.Sprintf("%d", srcV-xfer))
|
||||||
stm.Put(dstKey, fmt.Sprintf("%d", dstV+xfer))
|
stm.Put(dstKey, fmt.Sprintf("%d", dstV+xfer))
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
go func() {
|
go func() {
|
||||||
iso := concurrency.WithIsolation(concurrency.RepeatableReads)
|
iso := concurrency.WithIsolation(concurrency.RepeatableReads)
|
||||||
_, err := concurrency.NewSTM(curEtcdc, applyf, iso)
|
_, err := concurrency.NewSTM(curEtcdc,
|
||||||
|
func(stm concurrency.STM) error {
|
||||||
|
applyf(stm)
|
||||||
|
return nil
|
||||||
|
},
|
||||||
|
iso,
|
||||||
|
)
|
||||||
errc <- err
|
errc <- err
|
||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
|
@ -45,7 +45,7 @@ var (
|
|||||||
CompactAfterCommitBatchPanic, RaftBeforeLeaderSendPanic, BlackholePeerNetwork, DelayPeerNetwork,
|
CompactAfterCommitBatchPanic, RaftBeforeLeaderSendPanic, BlackholePeerNetwork, DelayPeerNetwork,
|
||||||
RaftBeforeFollowerSendPanic, RaftBeforeApplySnapPanic, RaftAfterApplySnapPanic, RaftAfterWALReleasePanic,
|
RaftBeforeFollowerSendPanic, RaftBeforeApplySnapPanic, RaftAfterApplySnapPanic, RaftAfterWALReleasePanic,
|
||||||
RaftBeforeSaveSnapPanic, RaftAfterSaveSnapPanic, BlackholeUntilSnapshot,
|
RaftBeforeSaveSnapPanic, RaftAfterSaveSnapPanic, BlackholeUntilSnapshot,
|
||||||
beforeApplyOneConfChangeSleep,
|
BeforeApplyOneConfChangeSleep,
|
||||||
MemberReplace,
|
MemberReplace,
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
@ -119,7 +119,7 @@ func Inject(ctx context.Context, t *testing.T, lg *zap.Logger, clus *e2e.EtcdPro
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func verifyClusterHealth(ctx context.Context, t *testing.T, clus *e2e.EtcdProcessCluster) error {
|
func verifyClusterHealth(ctx context.Context, _ *testing.T, clus *e2e.EtcdProcessCluster) error {
|
||||||
for i := 0; i < len(clus.Procs); i++ {
|
for i := 0; i < len(clus.Procs); i++ {
|
||||||
clusterClient, err := clientv3.New(clientv3.Config{
|
clusterClient, err := clientv3.New(clientv3.Config{
|
||||||
Endpoints: clus.Procs[i].EndpointsGRPC(),
|
Endpoints: clus.Procs[i].EndpointsGRPC(),
|
||||||
|
@ -53,7 +53,7 @@ var (
|
|||||||
RaftAfterWALReleasePanic Failpoint = goPanicFailpoint{"raftAfterWALRelease", triggerBlackhole{waitTillSnapshot: true}, Follower}
|
RaftAfterWALReleasePanic Failpoint = goPanicFailpoint{"raftAfterWALRelease", triggerBlackhole{waitTillSnapshot: true}, Follower}
|
||||||
RaftBeforeSaveSnapPanic Failpoint = goPanicFailpoint{"raftBeforeSaveSnap", triggerBlackhole{waitTillSnapshot: true}, Follower}
|
RaftBeforeSaveSnapPanic Failpoint = goPanicFailpoint{"raftBeforeSaveSnap", triggerBlackhole{waitTillSnapshot: true}, Follower}
|
||||||
RaftAfterSaveSnapPanic Failpoint = goPanicFailpoint{"raftAfterSaveSnap", triggerBlackhole{waitTillSnapshot: true}, Follower}
|
RaftAfterSaveSnapPanic Failpoint = goPanicFailpoint{"raftAfterSaveSnap", triggerBlackhole{waitTillSnapshot: true}, Follower}
|
||||||
beforeApplyOneConfChangeSleep Failpoint = killAndGofailSleep{"beforeApplyOneConfChange", time.Second}
|
BeforeApplyOneConfChangeSleep Failpoint = killAndGofailSleep{"beforeApplyOneConfChange", time.Second}
|
||||||
)
|
)
|
||||||
|
|
||||||
type goPanicFailpoint struct {
|
type goPanicFailpoint struct {
|
||||||
|
@ -162,11 +162,11 @@ var commonTestScenarios = []modelTestCase{
|
|||||||
{
|
{
|
||||||
name: "Stale Get need to match put if asking about matching revision",
|
name: "Stale Get need to match put if asking about matching revision",
|
||||||
operations: []testOperation{
|
operations: []testOperation{
|
||||||
{req: putRequest("key", "1"), resp: putResponse(2)},
|
{req: putRequest("key1", "1"), resp: putResponse(2)},
|
||||||
{req: staleGetRequest("key", 2), resp: getResponse("key", "1", 3, 2), expectFailure: true},
|
{req: staleGetRequest("key1", 2), resp: getResponse("key1", "1", 3, 2), expectFailure: true},
|
||||||
{req: staleGetRequest("key", 2), resp: getResponse("key", "1", 2, 3), expectFailure: true},
|
{req: staleGetRequest("key1", 2), resp: getResponse("key1", "1", 2, 3), expectFailure: true},
|
||||||
{req: staleGetRequest("key", 2), resp: getResponse("key", "2", 2, 2), expectFailure: true},
|
{req: staleGetRequest("key1", 2), resp: getResponse("key1", "2", 2, 2), expectFailure: true},
|
||||||
{req: staleGetRequest("key", 2), resp: getResponse("key", "1", 2, 2)},
|
{req: staleGetRequest("key1", 2), resp: getResponse("key1", "1", 2, 2)},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -23,7 +23,7 @@ import (
|
|||||||
"go.etcd.io/etcd/tests/v3/robustness/report"
|
"go.etcd.io/etcd/tests/v3/robustness/report"
|
||||||
)
|
)
|
||||||
|
|
||||||
func validateWatch(t *testing.T, lg *zap.Logger, cfg Config, reports []report.ClientReport, eventHistory []model.WatchEvent) []model.WatchEvent {
|
func validateWatch(t *testing.T, lg *zap.Logger, cfg Config, reports []report.ClientReport, eventHistory []model.WatchEvent) {
|
||||||
lg.Info("Validating watch")
|
lg.Info("Validating watch")
|
||||||
// Validate etcd watch properties defined in https://etcd.io/docs/v3.6/learning/api_guarantees/#watch-apis
|
// Validate etcd watch properties defined in https://etcd.io/docs/v3.6/learning/api_guarantees/#watch-apis
|
||||||
for _, r := range reports {
|
for _, r := range reports {
|
||||||
@ -34,7 +34,6 @@ func validateWatch(t *testing.T, lg *zap.Logger, cfg Config, reports []report.Cl
|
|||||||
validateReliable(t, eventHistory, r)
|
validateReliable(t, eventHistory, r)
|
||||||
validateResumable(t, eventHistory, r)
|
validateResumable(t, eventHistory, r)
|
||||||
}
|
}
|
||||||
return eventHistory
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func validateBookmarkable(t *testing.T, report report.ClientReport) {
|
func validateBookmarkable(t *testing.T, report report.ClientReport) {
|
||||||
|
@ -23,8 +23,9 @@ linters:
|
|||||||
- revive
|
- revive
|
||||||
- staticcheck
|
- staticcheck
|
||||||
- stylecheck
|
- stylecheck
|
||||||
- unused
|
|
||||||
- unconvert # Remove unnecessary type conversions
|
- unconvert # Remove unnecessary type conversions
|
||||||
|
- unparam
|
||||||
|
- unused
|
||||||
linters-settings: # please keep this alphabetized
|
linters-settings: # please keep this alphabetized
|
||||||
goimports:
|
goimports:
|
||||||
local-prefixes: go.etcd.io # Put imports beginning with prefix after 3rd-party packages.
|
local-prefixes: go.etcd.io # Put imports beginning with prefix after 3rd-party packages.
|
||||||
|
@ -18,7 +18,6 @@ require (
|
|||||||
gotest.tools/gotestsum v1.11.0
|
gotest.tools/gotestsum v1.11.0
|
||||||
gotest.tools/v3 v3.5.1
|
gotest.tools/v3 v3.5.1
|
||||||
honnef.co/go/tools v0.4.6
|
honnef.co/go/tools v0.4.6
|
||||||
mvdan.cc/unparam v0.0.0-20220316160445-06cc5682983b
|
|
||||||
)
|
)
|
||||||
|
|
||||||
require (
|
require (
|
||||||
|
@ -294,5 +294,3 @@ honnef.co/go/tools v0.4.6 h1:oFEHCKeID7to/3autwsWfnuv69j3NsfcXbvJKuIcep8=
|
|||||||
honnef.co/go/tools v0.4.6/go.mod h1:+rnGS1THNh8zMwnd2oVOTL9QF6vmfyG6ZXBULae2uc0=
|
honnef.co/go/tools v0.4.6/go.mod h1:+rnGS1THNh8zMwnd2oVOTL9QF6vmfyG6ZXBULae2uc0=
|
||||||
k8s.io/klog/v2 v2.80.1 h1:atnLQ121W371wYYFawwYx1aEY2eUfs4l3J72wtgAwV4=
|
k8s.io/klog/v2 v2.80.1 h1:atnLQ121W371wYYFawwYx1aEY2eUfs4l3J72wtgAwV4=
|
||||||
k8s.io/klog/v2 v2.80.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
|
k8s.io/klog/v2 v2.80.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
|
||||||
mvdan.cc/unparam v0.0.0-20220316160445-06cc5682983b h1:C8Pi6noat8BcrL9WnSRYeQ63fpkJk3hKVHtF5731kIw=
|
|
||||||
mvdan.cc/unparam v0.0.0-20220316160445-06cc5682983b/go.mod h1:WqFWCt8MGPoFSYGsQSiIORRlYVhkJsIk+n2MY6rhNbA=
|
|
||||||
|
@ -37,5 +37,4 @@ import (
|
|||||||
_ "gotest.tools/gotestsum"
|
_ "gotest.tools/gotestsum"
|
||||||
_ "gotest.tools/v3"
|
_ "gotest.tools/v3"
|
||||||
_ "honnef.co/go/tools/cmd/staticcheck"
|
_ "honnef.co/go/tools/cmd/staticcheck"
|
||||||
_ "mvdan.cc/unparam"
|
|
||||||
)
|
)
|
||||||
|
Loading…
Reference in New Issue
Block a user