Fix deadlock in 'go test -tags cluster_proxy -v ./integration/... ./clientv3/...'
Cherry pick https://github.com/etcd-io/etcd/pull/12319 to 3.4. Signed-off-by: Benjamin Wang <wachao@vmware.com>
This commit is contained in:
@ -17,6 +17,7 @@
|
||||
package integration
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
|
||||
"go.etcd.io/etcd/clientv3"
|
||||
@ -35,16 +36,23 @@ var (
|
||||
const proxyNamespace = "proxy-namespace"
|
||||
|
||||
type grpcClientProxy struct {
|
||||
grpc grpcAPI
|
||||
wdonec <-chan struct{}
|
||||
kvdonec <-chan struct{}
|
||||
lpdonec <-chan struct{}
|
||||
ctx context.Context
|
||||
ctxCancel func()
|
||||
grpc grpcAPI
|
||||
wdonec <-chan struct{}
|
||||
kvdonec <-chan struct{}
|
||||
lpdonec <-chan struct{}
|
||||
}
|
||||
|
||||
func toGRPC(c *clientv3.Client) grpcAPI {
|
||||
pmu.Lock()
|
||||
defer pmu.Unlock()
|
||||
|
||||
// dedicated context bound to 'grpc-proxy' lifetype
|
||||
// (so in practice lifetime of the client connection to the proxy).
|
||||
// TODO: Refactor to a separate clientv3.Client instance instead of the context alone.
|
||||
ctx, ctxCancel := context.WithCancel(context.WithValue(context.TODO(), "_name", "grpcProxyContext"))
|
||||
|
||||
if v, ok := proxies[c]; ok {
|
||||
return v.grpc
|
||||
}
|
||||
@ -55,8 +63,8 @@ func toGRPC(c *clientv3.Client) grpcAPI {
|
||||
c.Lease = namespace.NewLease(c.Lease, proxyNamespace)
|
||||
// test coalescing/caching proxy
|
||||
kvp, kvpch := grpcproxy.NewKvProxy(c)
|
||||
wp, wpch := grpcproxy.NewWatchProxy(c)
|
||||
lp, lpch := grpcproxy.NewLeaseProxy(c)
|
||||
wp, wpch := grpcproxy.NewWatchProxy(ctx, c)
|
||||
lp, lpch := grpcproxy.NewLeaseProxy(ctx, c)
|
||||
mp := grpcproxy.NewMaintenanceProxy(c)
|
||||
clp, _ := grpcproxy.NewClusterProxy(c, "", "") // without registering proxy URLs
|
||||
authp := grpcproxy.NewAuthProxy(c)
|
||||
@ -73,20 +81,21 @@ func toGRPC(c *clientv3.Client) grpcAPI {
|
||||
adapter.LockServerToLockClient(lockp),
|
||||
adapter.ElectionServerToElectionClient(electp),
|
||||
}
|
||||
proxies[c] = grpcClientProxy{grpc: grpc, wdonec: wpch, kvdonec: kvpch, lpdonec: lpch}
|
||||
proxies[c] = grpcClientProxy{ctx: ctx, ctxCancel: ctxCancel, grpc: grpc, wdonec: wpch, kvdonec: kvpch, lpdonec: lpch}
|
||||
return grpc
|
||||
}
|
||||
|
||||
type proxyCloser struct {
|
||||
clientv3.Watcher
|
||||
wdonec <-chan struct{}
|
||||
kvdonec <-chan struct{}
|
||||
lclose func()
|
||||
lpdonec <-chan struct{}
|
||||
proxyCtxCancel func()
|
||||
wdonec <-chan struct{}
|
||||
kvdonec <-chan struct{}
|
||||
lclose func()
|
||||
lpdonec <-chan struct{}
|
||||
}
|
||||
|
||||
func (pc *proxyCloser) Close() error {
|
||||
// client ctx is canceled before calling close, so kv and lp will close out
|
||||
pc.proxyCtxCancel()
|
||||
<-pc.kvdonec
|
||||
err := pc.Watcher.Close()
|
||||
<-pc.wdonec
|
||||
@ -106,11 +115,12 @@ func newClientV3(cfg clientv3.Config) (*clientv3.Client, error) {
|
||||
lc := c.Lease
|
||||
c.Lease = clientv3.NewLeaseFromLeaseClient(rpc.Lease, c, cfg.DialTimeout)
|
||||
c.Watcher = &proxyCloser{
|
||||
Watcher: clientv3.NewWatchFromWatchClient(rpc.Watch, c),
|
||||
wdonec: proxies[c].wdonec,
|
||||
kvdonec: proxies[c].kvdonec,
|
||||
lclose: func() { lc.Close() },
|
||||
lpdonec: proxies[c].lpdonec,
|
||||
Watcher: clientv3.NewWatchFromWatchClient(rpc.Watch, c),
|
||||
wdonec: proxies[c].wdonec,
|
||||
kvdonec: proxies[c].kvdonec,
|
||||
lclose: func() { lc.Close() },
|
||||
lpdonec: proxies[c].lpdonec,
|
||||
proxyCtxCancel: proxies[c].ctxCancel,
|
||||
}
|
||||
pmu.Unlock()
|
||||
return c, nil
|
||||
|
Reference in New Issue
Block a user