Remove code used to make v2 proposals
Signed-off-by: Marek Siarkowicz <siarkowicz@google.com>
This commit is contained in:
@ -62,123 +62,6 @@ import (
|
||||
"go.etcd.io/raft/v3/raftpb"
|
||||
)
|
||||
|
||||
// TestDoLocalAction tests requests which do not need to go through raft to be applied,
|
||||
// and are served through local data.
|
||||
func TestDoLocalAction(t *testing.T) {
|
||||
tests := []struct {
|
||||
req pb.Request
|
||||
|
||||
wresp Response
|
||||
werr error
|
||||
wactions []testutil.Action
|
||||
}{
|
||||
{
|
||||
pb.Request{Method: "GET", ID: 1, Wait: true},
|
||||
Response{Watcher: v2store.NewNopWatcher()}, nil, []testutil.Action{{Name: "Watch"}},
|
||||
},
|
||||
{
|
||||
pb.Request{Method: "GET", ID: 1},
|
||||
Response{Event: &v2store.Event{}}, nil,
|
||||
[]testutil.Action{
|
||||
{
|
||||
Name: "Get",
|
||||
Params: []any{"", false, false},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
pb.Request{Method: "HEAD", ID: 1},
|
||||
Response{Event: &v2store.Event{}}, nil,
|
||||
[]testutil.Action{
|
||||
{
|
||||
Name: "Get",
|
||||
Params: []any{"", false, false},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
pb.Request{Method: "BADMETHOD", ID: 1},
|
||||
Response{}, errors.ErrUnknownMethod, []testutil.Action{},
|
||||
},
|
||||
}
|
||||
for i, tt := range tests {
|
||||
st := mockstore.NewRecorder()
|
||||
srv := &EtcdServer{
|
||||
lgMu: new(sync.RWMutex),
|
||||
lg: zaptest.NewLogger(t),
|
||||
v2store: st,
|
||||
reqIDGen: idutil.NewGenerator(0, time.Time{}),
|
||||
}
|
||||
resp, err := srv.Do(context.Background(), tt.req)
|
||||
|
||||
if err != tt.werr {
|
||||
t.Fatalf("#%d: err = %+v, want %+v", i, err, tt.werr)
|
||||
}
|
||||
if !reflect.DeepEqual(resp, tt.wresp) {
|
||||
t.Errorf("#%d: resp = %+v, want %+v", i, resp, tt.wresp)
|
||||
}
|
||||
gaction := st.Action()
|
||||
if !reflect.DeepEqual(gaction, tt.wactions) {
|
||||
t.Errorf("#%d: action = %+v, want %+v", i, gaction, tt.wactions)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestDoBadLocalAction tests server requests which do not need to go through consensus,
|
||||
// and return errors when they fetch from local data.
|
||||
func TestDoBadLocalAction(t *testing.T) {
|
||||
storeErr := fmt.Errorf("bah")
|
||||
tests := []struct {
|
||||
req pb.Request
|
||||
|
||||
wactions []testutil.Action
|
||||
}{
|
||||
{
|
||||
pb.Request{Method: "GET", ID: 1, Wait: true},
|
||||
[]testutil.Action{{Name: "Watch"}},
|
||||
},
|
||||
{
|
||||
pb.Request{Method: "GET", ID: 1},
|
||||
[]testutil.Action{
|
||||
{
|
||||
Name: "Get",
|
||||
Params: []any{"", false, false},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
pb.Request{Method: "HEAD", ID: 1},
|
||||
[]testutil.Action{
|
||||
{
|
||||
Name: "Get",
|
||||
Params: []any{"", false, false},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for i, tt := range tests {
|
||||
st := mockstore.NewErrRecorder(storeErr)
|
||||
srv := &EtcdServer{
|
||||
lgMu: new(sync.RWMutex),
|
||||
lg: zaptest.NewLogger(t),
|
||||
v2store: st,
|
||||
reqIDGen: idutil.NewGenerator(0, time.Time{}),
|
||||
}
|
||||
resp, err := srv.Do(context.Background(), tt.req)
|
||||
|
||||
if err != storeErr {
|
||||
t.Fatalf("#%d: err = %+v, want %+v", i, err, storeErr)
|
||||
}
|
||||
if !reflect.DeepEqual(resp, Response{}) {
|
||||
t.Errorf("#%d: resp = %+v, want %+v", i, resp, Response{})
|
||||
}
|
||||
gaction := st.Action()
|
||||
if !reflect.DeepEqual(gaction, tt.wactions) {
|
||||
t.Errorf("#%d: action = %+v, want %+v", i, gaction, tt.wactions)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestApplyRepeat tests that server handles repeat raft messages gracefully
|
||||
func TestApplyRepeat(t *testing.T) {
|
||||
lg := zaptest.NewLogger(t)
|
||||
@ -795,115 +678,6 @@ func TestApplyMultiConfChangeShouldStop(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestDoProposal(t *testing.T) {
|
||||
tests := []pb.Request{
|
||||
{Method: "POST", ID: 1},
|
||||
{Method: "PUT", ID: 1},
|
||||
{Method: "DELETE", ID: 1},
|
||||
{Method: "GET", ID: 1, Quorum: true},
|
||||
}
|
||||
for i, tt := range tests {
|
||||
st := mockstore.NewRecorder()
|
||||
r := newRaftNode(raftNodeConfig{
|
||||
lg: zaptest.NewLogger(t),
|
||||
Node: newNodeCommitter(),
|
||||
storage: mockstorage.NewStorageRecorder(""),
|
||||
raftStorage: raft.NewMemoryStorage(),
|
||||
transport: newNopTransporter(),
|
||||
})
|
||||
srv := &EtcdServer{
|
||||
lgMu: new(sync.RWMutex),
|
||||
lg: zaptest.NewLogger(t),
|
||||
Cfg: config.ServerConfig{Logger: zaptest.NewLogger(t), TickMs: 1, SnapshotCatchUpEntries: DefaultSnapshotCatchUpEntries},
|
||||
r: *r,
|
||||
v2store: st,
|
||||
reqIDGen: idutil.NewGenerator(0, time.Time{}),
|
||||
SyncTicker: &time.Ticker{},
|
||||
consistIndex: cindex.NewFakeConsistentIndex(0),
|
||||
}
|
||||
srv.applyV2 = &applierV2store{store: srv.v2store, cluster: srv.cluster}
|
||||
srv.start()
|
||||
resp, err := srv.Do(context.Background(), tt)
|
||||
srv.Stop()
|
||||
|
||||
action := st.Action()
|
||||
if len(action) != 1 {
|
||||
t.Errorf("#%d: len(action) = %d, want 1", i, len(action))
|
||||
}
|
||||
if err != nil {
|
||||
t.Fatalf("#%d: err = %v, want nil", i, err)
|
||||
}
|
||||
// resp.Index is set in Do() based on the raft state; may either be 0 or 1
|
||||
wresp := Response{Event: &v2store.Event{}, Index: resp.Index}
|
||||
if !reflect.DeepEqual(resp, wresp) {
|
||||
t.Errorf("#%d: resp = %v, want %v", i, resp, wresp)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestDoProposalCancelled(t *testing.T) {
|
||||
wt := mockwait.NewRecorder()
|
||||
srv := &EtcdServer{
|
||||
lgMu: new(sync.RWMutex),
|
||||
lg: zaptest.NewLogger(t),
|
||||
Cfg: config.ServerConfig{Logger: zaptest.NewLogger(t), TickMs: 1, SnapshotCatchUpEntries: DefaultSnapshotCatchUpEntries},
|
||||
r: *newRaftNode(raftNodeConfig{Node: newNodeNop()}),
|
||||
w: wt,
|
||||
reqIDGen: idutil.NewGenerator(0, time.Time{}),
|
||||
}
|
||||
srv.applyV2 = &applierV2store{store: srv.v2store, cluster: srv.cluster}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
cancel()
|
||||
_, err := srv.Do(ctx, pb.Request{Method: "PUT"})
|
||||
|
||||
if err != errors.ErrCanceled {
|
||||
t.Fatalf("err = %v, want %v", err, errors.ErrCanceled)
|
||||
}
|
||||
w := []testutil.Action{{Name: "Register"}, {Name: "Trigger"}}
|
||||
if !reflect.DeepEqual(wt.Action(), w) {
|
||||
t.Errorf("wt.action = %+v, want %+v", wt.Action(), w)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDoProposalTimeout(t *testing.T) {
|
||||
srv := &EtcdServer{
|
||||
lgMu: new(sync.RWMutex),
|
||||
lg: zaptest.NewLogger(t),
|
||||
Cfg: config.ServerConfig{Logger: zaptest.NewLogger(t), TickMs: 1, SnapshotCatchUpEntries: DefaultSnapshotCatchUpEntries},
|
||||
r: *newRaftNode(raftNodeConfig{Node: newNodeNop()}),
|
||||
w: mockwait.NewNop(),
|
||||
reqIDGen: idutil.NewGenerator(0, time.Time{}),
|
||||
}
|
||||
srv.applyV2 = &applierV2store{store: srv.v2store, cluster: srv.cluster}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 0)
|
||||
_, err := srv.Do(ctx, pb.Request{Method: "PUT"})
|
||||
cancel()
|
||||
if err != errors.ErrTimeout {
|
||||
t.Fatalf("err = %v, want %v", err, errors.ErrTimeout)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDoProposalStopped(t *testing.T) {
|
||||
srv := &EtcdServer{
|
||||
lgMu: new(sync.RWMutex),
|
||||
lg: zaptest.NewLogger(t),
|
||||
Cfg: config.ServerConfig{Logger: zaptest.NewLogger(t), TickMs: 1, SnapshotCatchUpEntries: DefaultSnapshotCatchUpEntries},
|
||||
r: *newRaftNode(raftNodeConfig{lg: zaptest.NewLogger(t), Node: newNodeNop()}),
|
||||
w: mockwait.NewNop(),
|
||||
reqIDGen: idutil.NewGenerator(0, time.Time{}),
|
||||
}
|
||||
srv.applyV2 = &applierV2store{store: srv.v2store, cluster: srv.cluster}
|
||||
|
||||
srv.stopping = make(chan struct{})
|
||||
close(srv.stopping)
|
||||
_, err := srv.Do(context.Background(), pb.Request{Method: "PUT", ID: 1})
|
||||
if err != errors.ErrStopped {
|
||||
t.Errorf("err = %v, want %v", err, errors.ErrStopped)
|
||||
}
|
||||
}
|
||||
|
||||
// TestSync tests sync 1. is nonblocking 2. proposes SYNC request.
|
||||
func TestSync(t *testing.T) {
|
||||
n := newNodeRecorder()
|
||||
@ -1190,73 +964,6 @@ func TestSnapshotOrdering(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// TestTriggerSnap for Applied > SnapshotCount should trigger a SaveSnap event
|
||||
func TestTriggerSnap(t *testing.T) {
|
||||
be, tmpPath := betesting.NewDefaultTmpBackend(t)
|
||||
defer func() {
|
||||
os.RemoveAll(tmpPath)
|
||||
}()
|
||||
|
||||
snapc := 10
|
||||
st := mockstore.NewRecorder()
|
||||
p := mockstorage.NewStorageRecorderStream("")
|
||||
r := newRaftNode(raftNodeConfig{
|
||||
lg: zaptest.NewLogger(t),
|
||||
Node: newNodeCommitter(),
|
||||
raftStorage: raft.NewMemoryStorage(),
|
||||
storage: p,
|
||||
transport: newNopTransporter(),
|
||||
})
|
||||
srv := &EtcdServer{
|
||||
lgMu: new(sync.RWMutex),
|
||||
lg: zaptest.NewLogger(t),
|
||||
Cfg: config.ServerConfig{Logger: zaptest.NewLogger(t), TickMs: 1, SnapshotCount: uint64(snapc), SnapshotCatchUpEntries: DefaultSnapshotCatchUpEntries},
|
||||
r: *r,
|
||||
v2store: st,
|
||||
reqIDGen: idutil.NewGenerator(0, time.Time{}),
|
||||
SyncTicker: &time.Ticker{},
|
||||
consistIndex: cindex.NewConsistentIndex(be),
|
||||
}
|
||||
srv.applyV2 = &applierV2store{store: srv.v2store, cluster: srv.cluster}
|
||||
|
||||
srv.kv = mvcc.New(zaptest.NewLogger(t), be, &lease.FakeLessor{}, mvcc.StoreConfig{})
|
||||
srv.be = be
|
||||
|
||||
cl := membership.NewCluster(zaptest.NewLogger(t))
|
||||
srv.cluster = cl
|
||||
|
||||
srv.start()
|
||||
|
||||
donec := make(chan struct{})
|
||||
go func() {
|
||||
defer close(donec)
|
||||
wcnt := 3 + snapc
|
||||
gaction, _ := p.Wait(wcnt)
|
||||
|
||||
// each operation is recorded as a Save
|
||||
// (SnapshotCount+1) * Puts + SaveSnap = (SnapshotCount+1) * Save + SaveSnap + Release
|
||||
if len(gaction) != wcnt {
|
||||
t.Logf("gaction: %v", gaction)
|
||||
t.Errorf("len(action) = %d, want %d", len(gaction), wcnt)
|
||||
return
|
||||
}
|
||||
if !reflect.DeepEqual(gaction[wcnt-2], testutil.Action{Name: "SaveSnap"}) {
|
||||
t.Errorf("action = %s, want SaveSnap", gaction[wcnt-2])
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(gaction[wcnt-1], testutil.Action{Name: "Release"}) {
|
||||
t.Errorf("action = %s, want Release", gaction[wcnt-1])
|
||||
}
|
||||
}()
|
||||
|
||||
for i := 0; i < snapc+1; i++ {
|
||||
srv.Do(context.Background(), pb.Request{Method: "PUT"})
|
||||
}
|
||||
|
||||
<-donec
|
||||
srv.Stop()
|
||||
}
|
||||
|
||||
// TestConcurrentApplyAndSnapshotV3 will send out snapshots concurrently with
|
||||
// proposals.
|
||||
func TestConcurrentApplyAndSnapshotV3(t *testing.T) {
|
||||
@ -1866,25 +1573,6 @@ func (n *nodeConfChangeCommitterRecorder) ApplyConfChange(conf raftpb.ConfChange
|
||||
return &raftpb.ConfState{}
|
||||
}
|
||||
|
||||
// nodeCommitter commits proposed data immediately.
|
||||
type nodeCommitter struct {
|
||||
readyNode
|
||||
index uint64
|
||||
}
|
||||
|
||||
func newNodeCommitter() raft.Node {
|
||||
return &nodeCommitter{*newNopReadyNode(), 0}
|
||||
}
|
||||
func (n *nodeCommitter) Propose(ctx context.Context, data []byte) error {
|
||||
n.index++
|
||||
ents := []raftpb.Entry{{Index: n.index, Data: data}}
|
||||
n.readyc <- raft.Ready{
|
||||
Entries: ents,
|
||||
CommittedEntries: ents,
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func newTestCluster(t testing.TB) *membership.RaftCluster {
|
||||
return membership.NewCluster(zaptest.NewLogger(t))
|
||||
}
|
||||
|
Reference in New Issue
Block a user