Compare commits
44 Commits
Author | SHA1 | Date | |
---|---|---|---|
694728c496 | |||
1557f8b534 | |||
4b9bfa17ee | |||
8de0c0419a | |||
3039c639c0 | |||
91335d01bb | |||
a8c84ffc93 | |||
939337f450 | |||
2a6d50470d | |||
d62e39d5ca | |||
7f0f5e2b3c | |||
eb1589ad35 | |||
546d5fe835 | |||
fddae84ce2 | |||
6d406285e6 | |||
8dc20ead31 | |||
d3a3c3154e | |||
d5572964e1 | |||
ea51c25030 | |||
d1447a8f5a | |||
c28c14a5f4 | |||
f9eb75044a | |||
2250f71e23 | |||
52be1d7b19 | |||
712024d3e5 | |||
7d99afdc7c | |||
5ceea41af4 | |||
2f74456443 | |||
fc87ae4202 | |||
f1d7dd87da | |||
ad212d339b | |||
9f49665284 | |||
78f8d6e185 | |||
a954a0de53 | |||
0c3defdd2b | |||
814588d166 | |||
2d2932822c | |||
e211fb6de3 | |||
fb7e274309 | |||
4a61fcf42d | |||
4c8fa30dda | |||
01c4f35b30 | |||
15e9510d2c | |||
09b7fd4975 |
1
.gitignore
vendored
1
.gitignore
vendored
@ -1,3 +1,4 @@
|
|||||||
|
/agent-*
|
||||||
/coverage
|
/coverage
|
||||||
/gopath
|
/gopath
|
||||||
/gopath.proto
|
/gopath.proto
|
||||||
|
16
.semaphore.sh
Executable file
16
.semaphore.sh
Executable file
@ -0,0 +1,16 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
TEST_SUFFIX=$(date +%s | base64 | head -c 15)
|
||||||
|
|
||||||
|
TEST_OPTS="RELEASE_TEST=y INTEGRATION=y PASSES='build unit release integration_e2e functional' MANUAL_VER=v3.2.9"
|
||||||
|
if [ "$TEST_ARCH" == "386" ]; then
|
||||||
|
TEST_OPTS="GOARCH=386 PASSES='build unit integration_e2e'"
|
||||||
|
fi
|
||||||
|
|
||||||
|
docker run \
|
||||||
|
--rm \
|
||||||
|
--volume=`pwd`:/go/src/github.com/coreos/etcd \
|
||||||
|
gcr.io/etcd-development/etcd-test:go1.8.5 \
|
||||||
|
/bin/bash -c "${TEST_OPTS} ./test 2>&1 | tee test-${TEST_SUFFIX}.log"
|
||||||
|
|
||||||
|
! egrep "(--- FAIL:|panic: test timed out|appears to have leaked|Too many goroutines)" -B50 -A10 test-${TEST_SUFFIX}.log
|
73
.travis.yml
73
.travis.yml
@ -1,11 +1,13 @@
|
|||||||
dist: trusty
|
|
||||||
language: go
|
language: go
|
||||||
go_import_path: github.com/coreos/etcd
|
go_import_path: github.com/coreos/etcd
|
||||||
sudo: false
|
|
||||||
|
sudo: required
|
||||||
|
|
||||||
|
services: docker
|
||||||
|
|
||||||
go:
|
go:
|
||||||
- 1.8.3
|
- 1.8.5
|
||||||
- tip
|
- tip
|
||||||
|
|
||||||
notifications:
|
notifications:
|
||||||
on_success: never
|
on_success: never
|
||||||
@ -13,19 +15,25 @@ notifications:
|
|||||||
|
|
||||||
env:
|
env:
|
||||||
matrix:
|
matrix:
|
||||||
- TARGET=amd64
|
- TARGET=amd64
|
||||||
- TARGET=darwin-amd64
|
- TARGET=amd64-go-tip
|
||||||
- TARGET=windows-amd64
|
- TARGET=darwin-amd64
|
||||||
- TARGET=arm64
|
- TARGET=windows-amd64
|
||||||
- TARGET=arm
|
- TARGET=arm64
|
||||||
- TARGET=386
|
- TARGET=arm
|
||||||
- TARGET=ppc64le
|
- TARGET=386
|
||||||
|
- TARGET=ppc64le
|
||||||
|
|
||||||
matrix:
|
matrix:
|
||||||
fast_finish: true
|
fast_finish: true
|
||||||
allow_failures:
|
allow_failures:
|
||||||
- go: tip
|
- go: tip
|
||||||
|
env: TARGET=amd64-go-tip
|
||||||
exclude:
|
exclude:
|
||||||
|
- go: 1.8.5
|
||||||
|
env: TARGET=amd64-go-tip
|
||||||
|
- go: tip
|
||||||
|
env: TARGET=amd64
|
||||||
- go: tip
|
- go: tip
|
||||||
env: TARGET=darwin-amd64
|
env: TARGET=darwin-amd64
|
||||||
- go: tip
|
- go: tip
|
||||||
@ -39,45 +47,42 @@ matrix:
|
|||||||
- go: tip
|
- go: tip
|
||||||
env: TARGET=ppc64le
|
env: TARGET=ppc64le
|
||||||
|
|
||||||
addons:
|
|
||||||
apt:
|
|
||||||
sources:
|
|
||||||
- debian-sid
|
|
||||||
packages:
|
|
||||||
- libpcap-dev
|
|
||||||
- libaspell-dev
|
|
||||||
- libhunspell-dev
|
|
||||||
- shellcheck
|
|
||||||
|
|
||||||
before_install:
|
before_install:
|
||||||
- go get -v -u github.com/chzchzchz/goword
|
- docker pull gcr.io/etcd-development/etcd-test:go1.8.5
|
||||||
- go get -v -u github.com/coreos/license-bill-of-materials
|
|
||||||
- go get -v -u honnef.co/go/tools/cmd/gosimple
|
|
||||||
- go get -v -u honnef.co/go/tools/cmd/unused
|
|
||||||
- go get -v -u honnef.co/go/tools/cmd/staticcheck
|
|
||||||
- ./scripts/install-marker.sh amd64
|
|
||||||
|
|
||||||
# disable godep restore override
|
|
||||||
install:
|
install:
|
||||||
- pushd cmd/etcd && go get -t -v ./... && popd
|
- pushd cmd/etcd && go get -t -v ./... && popd
|
||||||
|
|
||||||
script:
|
script:
|
||||||
- >
|
- >
|
||||||
case "${TARGET}" in
|
case "${TARGET}" in
|
||||||
amd64)
|
amd64)
|
||||||
|
docker run --rm \
|
||||||
|
--volume=`pwd`:/go/src/github.com/coreos/etcd gcr.io/etcd-development/etcd-test:go1.8.5 \
|
||||||
|
/bin/bash -c "GOARCH=amd64 ./test"
|
||||||
|
;;
|
||||||
|
amd64-go-tip)
|
||||||
GOARCH=amd64 ./test
|
GOARCH=amd64 ./test
|
||||||
;;
|
;;
|
||||||
darwin-amd64)
|
darwin-amd64)
|
||||||
GO_BUILD_FLAGS="-a -v" GOPATH="" GOOS=darwin GOARCH=amd64 ./build
|
docker run --rm \
|
||||||
|
--volume=`pwd`:/go/src/github.com/coreos/etcd gcr.io/etcd-development/etcd-test:go1.8.5 \
|
||||||
|
/bin/bash -c "GO_BUILD_FLAGS='-a -v' GOOS=darwin GOARCH=amd64 ./build"
|
||||||
;;
|
;;
|
||||||
windows-amd64)
|
windows-amd64)
|
||||||
GO_BUILD_FLAGS="-a -v" GOPATH="" GOOS=windows GOARCH=amd64 ./build
|
docker run --rm \
|
||||||
|
--volume=`pwd`:/go/src/github.com/coreos/etcd gcr.io/etcd-development/etcd-test:go1.8.5 \
|
||||||
|
/bin/bash -c "GO_BUILD_FLAGS='-a -v' GOOS=windows GOARCH=amd64 ./build"
|
||||||
;;
|
;;
|
||||||
386)
|
386)
|
||||||
GOARCH=386 PASSES="build unit" ./test
|
docker run --rm \
|
||||||
|
--volume=`pwd`:/go/src/github.com/coreos/etcd gcr.io/etcd-development/etcd-test:go1.8.5 \
|
||||||
|
/bin/bash -c "GOARCH=386 PASSES='build unit' ./test"
|
||||||
;;
|
;;
|
||||||
*)
|
*)
|
||||||
# test building out of gopath
|
# test building out of gopath
|
||||||
GO_BUILD_FLAGS="-a -v" GOPATH="" GOARCH="${TARGET}" ./build
|
docker run --rm \
|
||||||
|
--volume=`pwd`:/go/src/github.com/coreos/etcd gcr.io/etcd-development/etcd-test:go1.8.5 \
|
||||||
|
/bin/bash -c "GO_BUILD_FLAGS='-a -v' GOARCH='${TARGET}' ./build"
|
||||||
;;
|
;;
|
||||||
esac
|
esac
|
||||||
|
57
Dockerfile-test
Normal file
57
Dockerfile-test
Normal file
@ -0,0 +1,57 @@
|
|||||||
|
FROM ubuntu:16.10
|
||||||
|
|
||||||
|
RUN rm /bin/sh && ln -s /bin/bash /bin/sh
|
||||||
|
RUN echo 'debconf debconf/frontend select Noninteractive' | debconf-set-selections
|
||||||
|
|
||||||
|
RUN apt-get -y update \
|
||||||
|
&& apt-get -y install \
|
||||||
|
build-essential \
|
||||||
|
gcc \
|
||||||
|
apt-utils \
|
||||||
|
pkg-config \
|
||||||
|
software-properties-common \
|
||||||
|
apt-transport-https \
|
||||||
|
libssl-dev \
|
||||||
|
sudo \
|
||||||
|
bash \
|
||||||
|
curl \
|
||||||
|
wget \
|
||||||
|
tar \
|
||||||
|
git \
|
||||||
|
netcat \
|
||||||
|
libaspell-dev \
|
||||||
|
libhunspell-dev \
|
||||||
|
hunspell-en-us \
|
||||||
|
aspell-en \
|
||||||
|
shellcheck \
|
||||||
|
&& apt-get -y update \
|
||||||
|
&& apt-get -y upgrade \
|
||||||
|
&& apt-get -y autoremove \
|
||||||
|
&& apt-get -y autoclean
|
||||||
|
|
||||||
|
ENV GOROOT /usr/local/go
|
||||||
|
ENV GOPATH /go
|
||||||
|
ENV PATH ${GOPATH}/bin:${GOROOT}/bin:${PATH}
|
||||||
|
ENV GO_VERSION REPLACE_ME_GO_VERSION
|
||||||
|
ENV GO_DOWNLOAD_URL https://storage.googleapis.com/golang
|
||||||
|
RUN rm -rf ${GOROOT} \
|
||||||
|
&& curl -s ${GO_DOWNLOAD_URL}/go${GO_VERSION}.linux-amd64.tar.gz | tar -v -C /usr/local/ -xz \
|
||||||
|
&& mkdir -p ${GOPATH}/src ${GOPATH}/bin \
|
||||||
|
&& go version
|
||||||
|
|
||||||
|
RUN mkdir -p ${GOPATH}/src/github.com/coreos/etcd
|
||||||
|
WORKDIR ${GOPATH}/src/github.com/coreos/etcd
|
||||||
|
|
||||||
|
ADD ./scripts/install-marker.sh /tmp/install-marker.sh
|
||||||
|
|
||||||
|
RUN go get -v -u -tags spell github.com/chzchzchz/goword \
|
||||||
|
&& go get -v -u github.com/coreos/license-bill-of-materials \
|
||||||
|
&& go get -v -u honnef.co/go/tools/cmd/gosimple \
|
||||||
|
&& go get -v -u honnef.co/go/tools/cmd/unused \
|
||||||
|
&& go get -v -u honnef.co/go/tools/cmd/staticcheck \
|
||||||
|
&& go get -v -u github.com/wadey/gocovmerge \
|
||||||
|
&& go get -v -u github.com/gordonklaus/ineffassign \
|
||||||
|
&& /tmp/install-marker.sh amd64 \
|
||||||
|
&& rm -f /tmp/install-marker.sh \
|
||||||
|
&& curl -s https://codecov.io/bash >/codecov \
|
||||||
|
&& chmod 700 /codecov
|
@ -60,7 +60,7 @@ For avoiding such a situation, the API layer performs *version number validation
|
|||||||
|
|
||||||
After authenticating with `Authenticate()`, a client can create a gRPC connection as it would without auth. In addition to the existing initialization process, the client must associate the token with the newly created connection. `grpc.WithPerRPCCredentials()` provides the functionality for this purpose.
|
After authenticating with `Authenticate()`, a client can create a gRPC connection as it would without auth. In addition to the existing initialization process, the client must associate the token with the newly created connection. `grpc.WithPerRPCCredentials()` provides the functionality for this purpose.
|
||||||
|
|
||||||
Every authenticated request from the client has a token. The token can be obtained with `grpc.metadata.FromContext()` in the server side. The server can obtain who is issuing the request and when the user was authorized. The information will be filled by the API layer in the header (`etcdserverpb.RequestHeader.Username` and `etcdserverpb.RequestHeader.AuthRevision`) of a raft log entry (`etcdserverpb.InternalRaftRequest`).
|
Every authenticated request from the client has a token. The token can be obtained with `grpc.metadata.FromIncomingContext()` in the server side. The server can obtain who is issuing the request and when the user was authorized. The information will be filled by the API layer in the header (`etcdserverpb.RequestHeader.Username` and `etcdserverpb.RequestHeader.AuthRevision`) of a raft log entry (`etcdserverpb.InternalRaftRequest`).
|
||||||
|
|
||||||
### Checking permission in the state machine
|
### Checking permission in the state machine
|
||||||
|
|
||||||
|
@ -1,6 +1,49 @@
|
|||||||
# Monitoring etcd
|
# Monitoring etcd
|
||||||
|
|
||||||
Each etcd server exports metrics under the `/metrics` path on its client port.
|
Each etcd server provides local monitoring information on its client port through http endpoints. The monitoring data is useful for both system health checking and cluster debugging.
|
||||||
|
|
||||||
|
## Debug endpoint
|
||||||
|
|
||||||
|
If `--debug` is set, the etcd server exports debugging information on its client port under the `/debug` path. Take care when setting `--debug`, since there will be degraded performance and verbose logging.
|
||||||
|
|
||||||
|
The `/debug/pprof` endpoint is the standard go runtime profiling endpoint. This can be used to profile CPU, heap, mutex, and goroutine utilization. For example, here `go tool pprof` gets the top 10 functions where etcd spends its time:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
$ go tool pprof http://localhost:2379/debug/pprof/profile
|
||||||
|
Fetching profile from http://localhost:2379/debug/pprof/profile
|
||||||
|
Please wait... (30s)
|
||||||
|
Saved profile in /home/etcd/pprof/pprof.etcd.localhost:2379.samples.cpu.001.pb.gz
|
||||||
|
Entering interactive mode (type "help" for commands)
|
||||||
|
(pprof) top10
|
||||||
|
310ms of 480ms total (64.58%)
|
||||||
|
Showing top 10 nodes out of 157 (cum >= 10ms)
|
||||||
|
flat flat% sum% cum cum%
|
||||||
|
130ms 27.08% 27.08% 130ms 27.08% runtime.futex
|
||||||
|
70ms 14.58% 41.67% 70ms 14.58% syscall.Syscall
|
||||||
|
20ms 4.17% 45.83% 20ms 4.17% github.com/coreos/etcd/cmd/vendor/golang.org/x/net/http2/hpack.huffmanDecode
|
||||||
|
20ms 4.17% 50.00% 30ms 6.25% runtime.pcvalue
|
||||||
|
20ms 4.17% 54.17% 50ms 10.42% runtime.schedule
|
||||||
|
10ms 2.08% 56.25% 10ms 2.08% github.com/coreos/etcd/cmd/vendor/github.com/coreos/etcd/etcdserver.(*EtcdServer).AuthInfoFromCtx
|
||||||
|
10ms 2.08% 58.33% 10ms 2.08% github.com/coreos/etcd/cmd/vendor/github.com/coreos/etcd/etcdserver.(*EtcdServer).Lead
|
||||||
|
10ms 2.08% 60.42% 10ms 2.08% github.com/coreos/etcd/cmd/vendor/github.com/coreos/etcd/pkg/wait.(*timeList).Trigger
|
||||||
|
10ms 2.08% 62.50% 10ms 2.08% github.com/coreos/etcd/cmd/vendor/github.com/prometheus/client_golang/prometheus.(*MetricVec).hashLabelValues
|
||||||
|
10ms 2.08% 64.58% 10ms 2.08% github.com/coreos/etcd/cmd/vendor/golang.org/x/net/http2.(*Framer).WriteHeaders
|
||||||
|
```
|
||||||
|
|
||||||
|
The `/debug/requests` endpoint gives gRPC traces and performance statistics through a web browser. For example, here is a `Range` request for the key `abc`:
|
||||||
|
|
||||||
|
```
|
||||||
|
When Elapsed (s)
|
||||||
|
2017/08/18 17:34:51.999317 0.000244 /etcdserverpb.KV/Range
|
||||||
|
17:34:51.999382 . 65 ... RPC: from 127.0.0.1:47204 deadline:4.999377747s
|
||||||
|
17:34:51.999395 . 13 ... recv: key:"abc"
|
||||||
|
17:34:51.999499 . 104 ... OK
|
||||||
|
17:34:51.999535 . 36 ... sent: header:<cluster_id:14841639068965178418 member_id:10276657743932975437 revision:15 raft_term:17 > kvs:<key:"abc" create_revision:6 mod_revision:14 version:9 value:"asda" > count:1
|
||||||
|
```
|
||||||
|
|
||||||
|
## Metrics endpoint
|
||||||
|
|
||||||
|
Each etcd server exports metrics under the `/metrics` path on its client port and optionally on interfaces given by `--listen-metrics-urls`.
|
||||||
|
|
||||||
The metrics can be fetched with `curl`:
|
The metrics can be fetched with `curl`:
|
||||||
|
|
||||||
@ -75,8 +118,6 @@ Access: proxy
|
|||||||
|
|
||||||
Then import the default [etcd dashboard template][template] and customize. For instance, if Prometheus data source name is `my-etcd`, the `datasource` field values in JSON also need to be `my-etcd`.
|
Then import the default [etcd dashboard template][template] and customize. For instance, if Prometheus data source name is `my-etcd`, the `datasource` field values in JSON also need to be `my-etcd`.
|
||||||
|
|
||||||
See the [demo][demo].
|
|
||||||
|
|
||||||
Sample dashboard:
|
Sample dashboard:
|
||||||
|
|
||||||

|

|
||||||
@ -85,4 +126,3 @@ Sample dashboard:
|
|||||||
[prometheus]: https://prometheus.io/
|
[prometheus]: https://prometheus.io/
|
||||||
[grafana]: http://grafana.org/
|
[grafana]: http://grafana.org/
|
||||||
[template]: ./grafana.json
|
[template]: ./grafana.json
|
||||||
[demo]: http://dash.etcd.io/dashboard/db/test-etcd
|
|
||||||
|
@ -6,7 +6,7 @@ This guide assumes operational knowledge of Amazon Web Services (AWS), specifica
|
|||||||
|
|
||||||
As a critical building block for distributed systems it is crucial to perform adequate capacity planning in order to support the intended cluster workload. As a highly available and strongly consistent data store increasing the number of nodes in an etcd cluster will generally affect performance adversely. This makes sense intuitively, as more nodes means more members for the leader to coordinate state across. The most direct way to increase throughput and decrease latency of an etcd cluster is allocate more disk I/O, network I/O, CPU, and memory to cluster members. In the event it is impossible to temporarily divert incoming requests to the cluster, scaling the EC2 instances which comprise the etcd cluster members one at a time may improve performance. It is, however, best to avoid bottlenecks through capacity planning.
|
As a critical building block for distributed systems it is crucial to perform adequate capacity planning in order to support the intended cluster workload. As a highly available and strongly consistent data store increasing the number of nodes in an etcd cluster will generally affect performance adversely. This makes sense intuitively, as more nodes means more members for the leader to coordinate state across. The most direct way to increase throughput and decrease latency of an etcd cluster is allocate more disk I/O, network I/O, CPU, and memory to cluster members. In the event it is impossible to temporarily divert incoming requests to the cluster, scaling the EC2 instances which comprise the etcd cluster members one at a time may improve performance. It is, however, best to avoid bottlenecks through capacity planning.
|
||||||
|
|
||||||
The etcd team has produced a [hardware recommendation guide]( ../op-guide/hardware.md) which is very useful for “ballparking” how many nodes and what instance type are necessary for a cluster.
|
The etcd team has produced a [hardware recommendation guide](../op-guide/hardware.md) which is very useful for “ballparking” how many nodes and what instance type are necessary for a cluster.
|
||||||
|
|
||||||
AWS provides a service for creating groups of EC2 instances which are dynamically sized to match load on the instances. Using an Auto Scaling Group ([ASG](http://docs.aws.amazon.com/autoscaling/latest/userguide/AutoScalingGroup.html)) to dynamically scale an etcd cluster is not recommended for several reasons including:
|
AWS provides a service for creating groups of EC2 instances which are dynamically sized to match load on the instances. Using an Auto Scaling Group ([ASG](http://docs.aws.amazon.com/autoscaling/latest/userguide/AutoScalingGroup.html)) to dynamically scale an etcd cluster is not recommended for several reasons including:
|
||||||
|
|
||||||
|
@ -992,7 +992,7 @@ func (as *authStore) AuthInfoFromTLS(ctx context.Context) *AuthInfo {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (as *authStore) AuthInfoFromCtx(ctx context.Context) (*AuthInfo, error) {
|
func (as *authStore) AuthInfoFromCtx(ctx context.Context) (*AuthInfo, error) {
|
||||||
md, ok := metadata.FromContext(ctx)
|
md, ok := metadata.FromIncomingContext(ctx)
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
@ -453,7 +453,8 @@ func TestAuthInfoFromCtx(t *testing.T) {
|
|||||||
t.Errorf("expected (nil, nil), got (%v, %v)", ai, err)
|
t.Errorf("expected (nil, nil), got (%v, %v)", ai, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
ctx = metadata.NewContext(context.Background(), metadata.New(map[string]string{"tokens": "dummy"}))
|
// as if it came from RPC
|
||||||
|
ctx = metadata.NewIncomingContext(context.Background(), metadata.New(map[string]string{"tokens": "dummy"}))
|
||||||
ai, err = as.AuthInfoFromCtx(ctx)
|
ai, err = as.AuthInfoFromCtx(ctx)
|
||||||
if err != nil && ai != nil {
|
if err != nil && ai != nil {
|
||||||
t.Errorf("expected (nil, nil), got (%v, %v)", ai, err)
|
t.Errorf("expected (nil, nil), got (%v, %v)", ai, err)
|
||||||
@ -465,19 +466,19 @@ func TestAuthInfoFromCtx(t *testing.T) {
|
|||||||
t.Error(err)
|
t.Error(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
ctx = metadata.NewContext(context.Background(), metadata.New(map[string]string{"token": "Invalid Token"}))
|
ctx = metadata.NewIncomingContext(context.Background(), metadata.New(map[string]string{"token": "Invalid Token"}))
|
||||||
_, err = as.AuthInfoFromCtx(ctx)
|
_, err = as.AuthInfoFromCtx(ctx)
|
||||||
if err != ErrInvalidAuthToken {
|
if err != ErrInvalidAuthToken {
|
||||||
t.Errorf("expected %v, got %v", ErrInvalidAuthToken, err)
|
t.Errorf("expected %v, got %v", ErrInvalidAuthToken, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
ctx = metadata.NewContext(context.Background(), metadata.New(map[string]string{"token": "Invalid.Token"}))
|
ctx = metadata.NewIncomingContext(context.Background(), metadata.New(map[string]string{"token": "Invalid.Token"}))
|
||||||
_, err = as.AuthInfoFromCtx(ctx)
|
_, err = as.AuthInfoFromCtx(ctx)
|
||||||
if err != ErrInvalidAuthToken {
|
if err != ErrInvalidAuthToken {
|
||||||
t.Errorf("expected %v, got %v", ErrInvalidAuthToken, err)
|
t.Errorf("expected %v, got %v", ErrInvalidAuthToken, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
ctx = metadata.NewContext(context.Background(), metadata.New(map[string]string{"token": resp.Token}))
|
ctx = metadata.NewIncomingContext(context.Background(), metadata.New(map[string]string{"token": resp.Token}))
|
||||||
ai, err = as.AuthInfoFromCtx(ctx)
|
ai, err = as.AuthInfoFromCtx(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Error(err)
|
t.Error(err)
|
||||||
@ -521,7 +522,7 @@ func TestAuthInfoFromCtxRace(t *testing.T) {
|
|||||||
donec := make(chan struct{})
|
donec := make(chan struct{})
|
||||||
go func() {
|
go func() {
|
||||||
defer close(donec)
|
defer close(donec)
|
||||||
ctx := metadata.NewContext(context.Background(), metadata.New(map[string]string{"token": "test"}))
|
ctx := metadata.NewIncomingContext(context.Background(), metadata.New(map[string]string{"token": "test"}))
|
||||||
as.AuthInfoFromCtx(ctx)
|
as.AuthInfoFromCtx(ctx)
|
||||||
}()
|
}()
|
||||||
as.UserAdd(&pb.AuthUserAddRequest{Name: "test"})
|
as.UserAdd(&pb.AuthUserAddRequest{Name: "test"})
|
||||||
|
@ -27,19 +27,19 @@
|
|||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"project": "github.com/boltdb/bolt",
|
"project": "github.com/cockroachdb/cmux",
|
||||||
"licenses": [
|
"licenses": [
|
||||||
{
|
{
|
||||||
"type": "MIT License",
|
"type": "Apache License 2.0",
|
||||||
"confidence": 1
|
"confidence": 1
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"project": "github.com/cockroachdb/cmux",
|
"project": "github.com/coreos/bbolt",
|
||||||
"licenses": [
|
"licenses": [
|
||||||
{
|
{
|
||||||
"type": "Apache License 2.0",
|
"type": "MIT License",
|
||||||
"confidence": 1
|
"confidence": 1
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
@ -345,12 +345,21 @@
|
|||||||
}
|
}
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"project": "google.golang.org/genproto/googleapis",
|
||||||
|
"licenses": [
|
||||||
|
{
|
||||||
|
"type": "Apache License 2.0",
|
||||||
|
"confidence": 1
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"project": "google.golang.org/grpc",
|
"project": "google.golang.org/grpc",
|
||||||
"licenses": [
|
"licenses": [
|
||||||
{
|
{
|
||||||
"type": "BSD 3-clause \"New\" or \"Revised\" License",
|
"type": "Apache License 2.0",
|
||||||
"confidence": 0.979253112033195
|
"confidence": 1
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
@ -372,12 +372,7 @@ func (c *httpClusterClient) Do(ctx context.Context, act httpAction) (*http.Respo
|
|||||||
if err == context.Canceled || err == context.DeadlineExceeded {
|
if err == context.Canceled || err == context.DeadlineExceeded {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
if isOneShot {
|
} else if resp.StatusCode/100 == 5 {
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if resp.StatusCode/100 == 5 {
|
|
||||||
switch resp.StatusCode {
|
switch resp.StatusCode {
|
||||||
case http.StatusInternalServerError, http.StatusServiceUnavailable:
|
case http.StatusInternalServerError, http.StatusServiceUnavailable:
|
||||||
// TODO: make sure this is a no leader response
|
// TODO: make sure this is a no leader response
|
||||||
@ -385,10 +380,16 @@ func (c *httpClusterClient) Do(ctx context.Context, act httpAction) (*http.Respo
|
|||||||
default:
|
default:
|
||||||
cerr.Errors = append(cerr.Errors, fmt.Errorf("client: etcd member %s returns server error [%s]", eps[k].String(), http.StatusText(resp.StatusCode)))
|
cerr.Errors = append(cerr.Errors, fmt.Errorf("client: etcd member %s returns server error [%s]", eps[k].String(), http.StatusText(resp.StatusCode)))
|
||||||
}
|
}
|
||||||
if isOneShot {
|
err = cerr.Errors[0]
|
||||||
return nil, nil, cerr.Errors[0]
|
}
|
||||||
|
if err != nil {
|
||||||
|
if !isOneShot {
|
||||||
|
continue
|
||||||
}
|
}
|
||||||
continue
|
c.Lock()
|
||||||
|
c.pinned = (k + 1) % leps
|
||||||
|
c.Unlock()
|
||||||
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
if k != pinned {
|
if k != pinned {
|
||||||
c.Lock()
|
c.Lock()
|
||||||
|
@ -16,6 +16,7 @@ package client
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
@ -304,7 +305,9 @@ func TestHTTPClusterClientDo(t *testing.T) {
|
|||||||
fakeErr := errors.New("fake!")
|
fakeErr := errors.New("fake!")
|
||||||
fakeURL := url.URL{}
|
fakeURL := url.URL{}
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
client *httpClusterClient
|
client *httpClusterClient
|
||||||
|
ctx context.Context
|
||||||
|
|
||||||
wantCode int
|
wantCode int
|
||||||
wantErr error
|
wantErr error
|
||||||
wantPinned int
|
wantPinned int
|
||||||
@ -395,10 +398,30 @@ func TestHTTPClusterClientDo(t *testing.T) {
|
|||||||
wantCode: http.StatusTeapot,
|
wantCode: http.StatusTeapot,
|
||||||
wantPinned: 1,
|
wantPinned: 1,
|
||||||
},
|
},
|
||||||
|
|
||||||
|
// 500-level errors cause one shot Do to fallthrough to next endpoint
|
||||||
|
{
|
||||||
|
client: &httpClusterClient{
|
||||||
|
endpoints: []url.URL{fakeURL, fakeURL},
|
||||||
|
clientFactory: newStaticHTTPClientFactory(
|
||||||
|
[]staticHTTPResponse{
|
||||||
|
{resp: http.Response{StatusCode: http.StatusBadGateway}},
|
||||||
|
{resp: http.Response{StatusCode: http.StatusTeapot}},
|
||||||
|
},
|
||||||
|
),
|
||||||
|
rand: rand.New(rand.NewSource(0)),
|
||||||
|
},
|
||||||
|
ctx: context.WithValue(context.Background(), &oneShotCtxValue, &oneShotCtxValue),
|
||||||
|
wantErr: fmt.Errorf("client: etcd member returns server error [Bad Gateway]"),
|
||||||
|
wantPinned: 1,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for i, tt := range tests {
|
for i, tt := range tests {
|
||||||
resp, _, err := tt.client.Do(context.Background(), nil)
|
if tt.ctx == nil {
|
||||||
|
tt.ctx = context.Background()
|
||||||
|
}
|
||||||
|
resp, _, err := tt.client.Do(tt.ctx, nil)
|
||||||
if !reflect.DeepEqual(tt.wantErr, err) {
|
if !reflect.DeepEqual(tt.wantErr, err) {
|
||||||
t.Errorf("#%d: got err=%v, want=%v", i, err, tt.wantErr)
|
t.Errorf("#%d: got err=%v, want=%v", i, err, tt.wantErr)
|
||||||
continue
|
continue
|
||||||
@ -407,11 +430,9 @@ func TestHTTPClusterClientDo(t *testing.T) {
|
|||||||
if resp == nil {
|
if resp == nil {
|
||||||
if tt.wantCode != 0 {
|
if tt.wantCode != 0 {
|
||||||
t.Errorf("#%d: resp is nil, want=%d", i, tt.wantCode)
|
t.Errorf("#%d: resp is nil, want=%d", i, tt.wantCode)
|
||||||
|
continue
|
||||||
}
|
}
|
||||||
continue
|
} else if resp.StatusCode != tt.wantCode {
|
||||||
}
|
|
||||||
|
|
||||||
if resp.StatusCode != tt.wantCode {
|
|
||||||
t.Errorf("#%d: resp code=%d, want=%d", i, resp.StatusCode, tt.wantCode)
|
t.Errorf("#%d: resp code=%d, want=%d", i, resp.StatusCode, tt.wantCode)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
# etcd/clientv3
|
# etcd/clientv3
|
||||||
|
|
||||||
[](https://godoc.org/github.com/coreos/etcd/clientv3)
|
[](https://godoc.org/github.com/coreos/etcd/clientv3)
|
||||||
|
|
||||||
`etcd/clientv3` is the official Go etcd client for v3.
|
`etcd/clientv3` is the official Go etcd client for v3.
|
||||||
|
|
||||||
|
@ -20,6 +20,7 @@ import (
|
|||||||
|
|
||||||
"github.com/coreos/etcd/auth/authpb"
|
"github.com/coreos/etcd/auth/authpb"
|
||||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||||
|
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
"google.golang.org/grpc"
|
"google.golang.org/grpc"
|
||||||
)
|
)
|
||||||
@ -104,16 +105,16 @@ type auth struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func NewAuth(c *Client) Auth {
|
func NewAuth(c *Client) Auth {
|
||||||
return &auth{remote: pb.NewAuthClient(c.ActiveConnection())}
|
return &auth{remote: RetryAuthClient(c)}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (auth *auth) AuthEnable(ctx context.Context) (*AuthEnableResponse, error) {
|
func (auth *auth) AuthEnable(ctx context.Context) (*AuthEnableResponse, error) {
|
||||||
resp, err := auth.remote.AuthEnable(ctx, &pb.AuthEnableRequest{}, grpc.FailFast(false))
|
resp, err := auth.remote.AuthEnable(ctx, &pb.AuthEnableRequest{})
|
||||||
return (*AuthEnableResponse)(resp), toErr(ctx, err)
|
return (*AuthEnableResponse)(resp), toErr(ctx, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (auth *auth) AuthDisable(ctx context.Context) (*AuthDisableResponse, error) {
|
func (auth *auth) AuthDisable(ctx context.Context) (*AuthDisableResponse, error) {
|
||||||
resp, err := auth.remote.AuthDisable(ctx, &pb.AuthDisableRequest{}, grpc.FailFast(false))
|
resp, err := auth.remote.AuthDisable(ctx, &pb.AuthDisableRequest{})
|
||||||
return (*AuthDisableResponse)(resp), toErr(ctx, err)
|
return (*AuthDisableResponse)(resp), toErr(ctx, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -138,12 +139,12 @@ func (auth *auth) UserGrantRole(ctx context.Context, user string, role string) (
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (auth *auth) UserGet(ctx context.Context, name string) (*AuthUserGetResponse, error) {
|
func (auth *auth) UserGet(ctx context.Context, name string) (*AuthUserGetResponse, error) {
|
||||||
resp, err := auth.remote.UserGet(ctx, &pb.AuthUserGetRequest{Name: name}, grpc.FailFast(false))
|
resp, err := auth.remote.UserGet(ctx, &pb.AuthUserGetRequest{Name: name})
|
||||||
return (*AuthUserGetResponse)(resp), toErr(ctx, err)
|
return (*AuthUserGetResponse)(resp), toErr(ctx, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (auth *auth) UserList(ctx context.Context) (*AuthUserListResponse, error) {
|
func (auth *auth) UserList(ctx context.Context) (*AuthUserListResponse, error) {
|
||||||
resp, err := auth.remote.UserList(ctx, &pb.AuthUserListRequest{}, grpc.FailFast(false))
|
resp, err := auth.remote.UserList(ctx, &pb.AuthUserListRequest{})
|
||||||
return (*AuthUserListResponse)(resp), toErr(ctx, err)
|
return (*AuthUserListResponse)(resp), toErr(ctx, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -168,12 +169,12 @@ func (auth *auth) RoleGrantPermission(ctx context.Context, name string, key, ran
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (auth *auth) RoleGet(ctx context.Context, role string) (*AuthRoleGetResponse, error) {
|
func (auth *auth) RoleGet(ctx context.Context, role string) (*AuthRoleGetResponse, error) {
|
||||||
resp, err := auth.remote.RoleGet(ctx, &pb.AuthRoleGetRequest{Role: role}, grpc.FailFast(false))
|
resp, err := auth.remote.RoleGet(ctx, &pb.AuthRoleGetRequest{Role: role})
|
||||||
return (*AuthRoleGetResponse)(resp), toErr(ctx, err)
|
return (*AuthRoleGetResponse)(resp), toErr(ctx, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (auth *auth) RoleList(ctx context.Context) (*AuthRoleListResponse, error) {
|
func (auth *auth) RoleList(ctx context.Context) (*AuthRoleListResponse, error) {
|
||||||
resp, err := auth.remote.RoleList(ctx, &pb.AuthRoleListRequest{}, grpc.FailFast(false))
|
resp, err := auth.remote.RoleList(ctx, &pb.AuthRoleListRequest{})
|
||||||
return (*AuthRoleListResponse)(resp), toErr(ctx, err)
|
return (*AuthRoleListResponse)(resp), toErr(ctx, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -201,7 +202,7 @@ type authenticator struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (auth *authenticator) authenticate(ctx context.Context, name string, password string) (*AuthenticateResponse, error) {
|
func (auth *authenticator) authenticate(ctx context.Context, name string, password string) (*AuthenticateResponse, error) {
|
||||||
resp, err := auth.remote.Authenticate(ctx, &pb.AuthenticateRequest{Name: name, Password: password}, grpc.FailFast(false))
|
resp, err := auth.remote.Authenticate(ctx, &pb.AuthenticateRequest{Name: name, Password: password})
|
||||||
return (*AuthenticateResponse)(resp), toErr(ctx, err)
|
return (*AuthenticateResponse)(resp), toErr(ctx, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1,356 +0,0 @@
|
|||||||
// Copyright 2016 The etcd Authors
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package clientv3
|
|
||||||
|
|
||||||
import (
|
|
||||||
"net/url"
|
|
||||||
"strings"
|
|
||||||
"sync"
|
|
||||||
|
|
||||||
"golang.org/x/net/context"
|
|
||||||
"google.golang.org/grpc"
|
|
||||||
"google.golang.org/grpc/codes"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ErrNoAddrAvilable is returned by Get() when the balancer does not have
|
|
||||||
// any active connection to endpoints at the time.
|
|
||||||
// This error is returned only when opts.BlockingWait is true.
|
|
||||||
var ErrNoAddrAvilable = grpc.Errorf(codes.Unavailable, "there is no address available")
|
|
||||||
|
|
||||||
// simpleBalancer does the bare minimum to expose multiple eps
|
|
||||||
// to the grpc reconnection code path
|
|
||||||
type simpleBalancer struct {
|
|
||||||
// addrs are the client's endpoints for grpc
|
|
||||||
addrs []grpc.Address
|
|
||||||
// notifyCh notifies grpc of the set of addresses for connecting
|
|
||||||
notifyCh chan []grpc.Address
|
|
||||||
|
|
||||||
// readyc closes once the first connection is up
|
|
||||||
readyc chan struct{}
|
|
||||||
readyOnce sync.Once
|
|
||||||
|
|
||||||
// mu protects upEps, pinAddr, and connectingAddr
|
|
||||||
mu sync.RWMutex
|
|
||||||
|
|
||||||
// upc closes when upEps transitions from empty to non-zero or the balancer closes.
|
|
||||||
upc chan struct{}
|
|
||||||
|
|
||||||
// downc closes when grpc calls down() on pinAddr
|
|
||||||
downc chan struct{}
|
|
||||||
|
|
||||||
// stopc is closed to signal updateNotifyLoop should stop.
|
|
||||||
stopc chan struct{}
|
|
||||||
|
|
||||||
// donec closes when all goroutines are exited
|
|
||||||
donec chan struct{}
|
|
||||||
|
|
||||||
// updateAddrsC notifies updateNotifyLoop to update addrs.
|
|
||||||
updateAddrsC chan struct{}
|
|
||||||
|
|
||||||
// grpc issues TLS cert checks using the string passed into dial so
|
|
||||||
// that string must be the host. To recover the full scheme://host URL,
|
|
||||||
// have a map from hosts to the original endpoint.
|
|
||||||
host2ep map[string]string
|
|
||||||
|
|
||||||
// pinAddr is the currently pinned address; set to the empty string on
|
|
||||||
// intialization and shutdown.
|
|
||||||
pinAddr string
|
|
||||||
|
|
||||||
closed bool
|
|
||||||
}
|
|
||||||
|
|
||||||
func newSimpleBalancer(eps []string) *simpleBalancer {
|
|
||||||
notifyCh := make(chan []grpc.Address, 1)
|
|
||||||
addrs := make([]grpc.Address, len(eps))
|
|
||||||
for i := range eps {
|
|
||||||
addrs[i].Addr = getHost(eps[i])
|
|
||||||
}
|
|
||||||
sb := &simpleBalancer{
|
|
||||||
addrs: addrs,
|
|
||||||
notifyCh: notifyCh,
|
|
||||||
readyc: make(chan struct{}),
|
|
||||||
upc: make(chan struct{}),
|
|
||||||
stopc: make(chan struct{}),
|
|
||||||
downc: make(chan struct{}),
|
|
||||||
donec: make(chan struct{}),
|
|
||||||
updateAddrsC: make(chan struct{}, 1),
|
|
||||||
host2ep: getHost2ep(eps),
|
|
||||||
}
|
|
||||||
close(sb.downc)
|
|
||||||
go sb.updateNotifyLoop()
|
|
||||||
return sb
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *simpleBalancer) Start(target string, config grpc.BalancerConfig) error { return nil }
|
|
||||||
|
|
||||||
func (b *simpleBalancer) ConnectNotify() <-chan struct{} {
|
|
||||||
b.mu.Lock()
|
|
||||||
defer b.mu.Unlock()
|
|
||||||
return b.upc
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *simpleBalancer) getEndpoint(host string) string {
|
|
||||||
b.mu.Lock()
|
|
||||||
defer b.mu.Unlock()
|
|
||||||
return b.host2ep[host]
|
|
||||||
}
|
|
||||||
|
|
||||||
func getHost2ep(eps []string) map[string]string {
|
|
||||||
hm := make(map[string]string, len(eps))
|
|
||||||
for i := range eps {
|
|
||||||
_, host, _ := parseEndpoint(eps[i])
|
|
||||||
hm[host] = eps[i]
|
|
||||||
}
|
|
||||||
return hm
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *simpleBalancer) updateAddrs(eps []string) {
|
|
||||||
np := getHost2ep(eps)
|
|
||||||
|
|
||||||
b.mu.Lock()
|
|
||||||
|
|
||||||
match := len(np) == len(b.host2ep)
|
|
||||||
for k, v := range np {
|
|
||||||
if b.host2ep[k] != v {
|
|
||||||
match = false
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if match {
|
|
||||||
// same endpoints, so no need to update address
|
|
||||||
b.mu.Unlock()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
b.host2ep = np
|
|
||||||
|
|
||||||
addrs := make([]grpc.Address, 0, len(eps))
|
|
||||||
for i := range eps {
|
|
||||||
addrs = append(addrs, grpc.Address{Addr: getHost(eps[i])})
|
|
||||||
}
|
|
||||||
b.addrs = addrs
|
|
||||||
|
|
||||||
// updating notifyCh can trigger new connections,
|
|
||||||
// only update addrs if all connections are down
|
|
||||||
// or addrs does not include pinAddr.
|
|
||||||
update := !hasAddr(addrs, b.pinAddr)
|
|
||||||
b.mu.Unlock()
|
|
||||||
|
|
||||||
if update {
|
|
||||||
select {
|
|
||||||
case b.updateAddrsC <- struct{}{}:
|
|
||||||
case <-b.stopc:
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func hasAddr(addrs []grpc.Address, targetAddr string) bool {
|
|
||||||
for _, addr := range addrs {
|
|
||||||
if targetAddr == addr.Addr {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *simpleBalancer) updateNotifyLoop() {
|
|
||||||
defer close(b.donec)
|
|
||||||
|
|
||||||
for {
|
|
||||||
b.mu.RLock()
|
|
||||||
upc, downc, addr := b.upc, b.downc, b.pinAddr
|
|
||||||
b.mu.RUnlock()
|
|
||||||
// downc or upc should be closed
|
|
||||||
select {
|
|
||||||
case <-downc:
|
|
||||||
downc = nil
|
|
||||||
default:
|
|
||||||
}
|
|
||||||
select {
|
|
||||||
case <-upc:
|
|
||||||
upc = nil
|
|
||||||
default:
|
|
||||||
}
|
|
||||||
switch {
|
|
||||||
case downc == nil && upc == nil:
|
|
||||||
// stale
|
|
||||||
select {
|
|
||||||
case <-b.stopc:
|
|
||||||
return
|
|
||||||
default:
|
|
||||||
}
|
|
||||||
case downc == nil:
|
|
||||||
b.notifyAddrs()
|
|
||||||
select {
|
|
||||||
case <-upc:
|
|
||||||
case <-b.updateAddrsC:
|
|
||||||
b.notifyAddrs()
|
|
||||||
case <-b.stopc:
|
|
||||||
return
|
|
||||||
}
|
|
||||||
case upc == nil:
|
|
||||||
select {
|
|
||||||
// close connections that are not the pinned address
|
|
||||||
case b.notifyCh <- []grpc.Address{{Addr: addr}}:
|
|
||||||
case <-downc:
|
|
||||||
case <-b.stopc:
|
|
||||||
return
|
|
||||||
}
|
|
||||||
select {
|
|
||||||
case <-downc:
|
|
||||||
case <-b.updateAddrsC:
|
|
||||||
case <-b.stopc:
|
|
||||||
return
|
|
||||||
}
|
|
||||||
b.notifyAddrs()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *simpleBalancer) notifyAddrs() {
|
|
||||||
b.mu.RLock()
|
|
||||||
addrs := b.addrs
|
|
||||||
b.mu.RUnlock()
|
|
||||||
select {
|
|
||||||
case b.notifyCh <- addrs:
|
|
||||||
case <-b.stopc:
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *simpleBalancer) Up(addr grpc.Address) func(error) {
|
|
||||||
b.mu.Lock()
|
|
||||||
defer b.mu.Unlock()
|
|
||||||
|
|
||||||
// gRPC might call Up after it called Close. We add this check
|
|
||||||
// to "fix" it up at application layer. Or our simplerBalancer
|
|
||||||
// might panic since b.upc is closed.
|
|
||||||
if b.closed {
|
|
||||||
return func(err error) {}
|
|
||||||
}
|
|
||||||
// gRPC might call Up on a stale address.
|
|
||||||
// Prevent updating pinAddr with a stale address.
|
|
||||||
if !hasAddr(b.addrs, addr.Addr) {
|
|
||||||
return func(err error) {}
|
|
||||||
}
|
|
||||||
if b.pinAddr != "" {
|
|
||||||
return func(err error) {}
|
|
||||||
}
|
|
||||||
// notify waiting Get()s and pin first connected address
|
|
||||||
close(b.upc)
|
|
||||||
b.downc = make(chan struct{})
|
|
||||||
b.pinAddr = addr.Addr
|
|
||||||
// notify client that a connection is up
|
|
||||||
b.readyOnce.Do(func() { close(b.readyc) })
|
|
||||||
return func(err error) {
|
|
||||||
b.mu.Lock()
|
|
||||||
b.upc = make(chan struct{})
|
|
||||||
close(b.downc)
|
|
||||||
b.pinAddr = ""
|
|
||||||
b.mu.Unlock()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *simpleBalancer) Get(ctx context.Context, opts grpc.BalancerGetOptions) (grpc.Address, func(), error) {
|
|
||||||
var (
|
|
||||||
addr string
|
|
||||||
closed bool
|
|
||||||
)
|
|
||||||
|
|
||||||
// If opts.BlockingWait is false (for fail-fast RPCs), it should return
|
|
||||||
// an address it has notified via Notify immediately instead of blocking.
|
|
||||||
if !opts.BlockingWait {
|
|
||||||
b.mu.RLock()
|
|
||||||
closed = b.closed
|
|
||||||
addr = b.pinAddr
|
|
||||||
b.mu.RUnlock()
|
|
||||||
if closed {
|
|
||||||
return grpc.Address{Addr: ""}, nil, grpc.ErrClientConnClosing
|
|
||||||
}
|
|
||||||
if addr == "" {
|
|
||||||
return grpc.Address{Addr: ""}, nil, ErrNoAddrAvilable
|
|
||||||
}
|
|
||||||
return grpc.Address{Addr: addr}, func() {}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
for {
|
|
||||||
b.mu.RLock()
|
|
||||||
ch := b.upc
|
|
||||||
b.mu.RUnlock()
|
|
||||||
select {
|
|
||||||
case <-ch:
|
|
||||||
case <-b.donec:
|
|
||||||
return grpc.Address{Addr: ""}, nil, grpc.ErrClientConnClosing
|
|
||||||
case <-ctx.Done():
|
|
||||||
return grpc.Address{Addr: ""}, nil, ctx.Err()
|
|
||||||
}
|
|
||||||
b.mu.RLock()
|
|
||||||
closed = b.closed
|
|
||||||
addr = b.pinAddr
|
|
||||||
b.mu.RUnlock()
|
|
||||||
// Close() which sets b.closed = true can be called before Get(), Get() must exit if balancer is closed.
|
|
||||||
if closed {
|
|
||||||
return grpc.Address{Addr: ""}, nil, grpc.ErrClientConnClosing
|
|
||||||
}
|
|
||||||
if addr != "" {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return grpc.Address{Addr: addr}, func() {}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *simpleBalancer) Notify() <-chan []grpc.Address { return b.notifyCh }
|
|
||||||
|
|
||||||
func (b *simpleBalancer) Close() error {
|
|
||||||
b.mu.Lock()
|
|
||||||
// In case gRPC calls close twice. TODO: remove the checking
|
|
||||||
// when we are sure that gRPC wont call close twice.
|
|
||||||
if b.closed {
|
|
||||||
b.mu.Unlock()
|
|
||||||
<-b.donec
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
b.closed = true
|
|
||||||
close(b.stopc)
|
|
||||||
b.pinAddr = ""
|
|
||||||
|
|
||||||
// In the case of following scenario:
|
|
||||||
// 1. upc is not closed; no pinned address
|
|
||||||
// 2. client issues an rpc, calling invoke(), which calls Get(), enters for loop, blocks
|
|
||||||
// 3. clientconn.Close() calls balancer.Close(); closed = true
|
|
||||||
// 4. for loop in Get() never exits since ctx is the context passed in by the client and may not be canceled
|
|
||||||
// we must close upc so Get() exits from blocking on upc
|
|
||||||
select {
|
|
||||||
case <-b.upc:
|
|
||||||
default:
|
|
||||||
// terminate all waiting Get()s
|
|
||||||
close(b.upc)
|
|
||||||
}
|
|
||||||
|
|
||||||
b.mu.Unlock()
|
|
||||||
|
|
||||||
// wait for updateNotifyLoop to finish
|
|
||||||
<-b.donec
|
|
||||||
close(b.notifyCh)
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func getHost(ep string) string {
|
|
||||||
url, uerr := url.Parse(ep)
|
|
||||||
if uerr != nil || !strings.Contains(ep, "://") {
|
|
||||||
return ep
|
|
||||||
}
|
|
||||||
return url.Host
|
|
||||||
}
|
|
@ -1,239 +0,0 @@
|
|||||||
// Copyright 2016 The etcd Authors
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package clientv3
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"net"
|
|
||||||
"sync"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
|
||||||
"github.com/coreos/etcd/pkg/testutil"
|
|
||||||
|
|
||||||
"golang.org/x/net/context"
|
|
||||||
"google.golang.org/grpc"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
endpoints = []string{"localhost:2379", "localhost:22379", "localhost:32379"}
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestBalancerGetUnblocking(t *testing.T) {
|
|
||||||
sb := newSimpleBalancer(endpoints)
|
|
||||||
defer sb.Close()
|
|
||||||
if addrs := <-sb.Notify(); len(addrs) != len(endpoints) {
|
|
||||||
t.Errorf("Initialize newSimpleBalancer should have triggered Notify() chan, but it didn't")
|
|
||||||
}
|
|
||||||
unblockingOpts := grpc.BalancerGetOptions{BlockingWait: false}
|
|
||||||
|
|
||||||
_, _, err := sb.Get(context.Background(), unblockingOpts)
|
|
||||||
if err != ErrNoAddrAvilable {
|
|
||||||
t.Errorf("Get() with no up endpoints should return ErrNoAddrAvailable, got: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
down1 := sb.Up(grpc.Address{Addr: endpoints[1]})
|
|
||||||
if addrs := <-sb.Notify(); len(addrs) != 1 {
|
|
||||||
t.Errorf("first Up() should have triggered balancer to send the first connected address via Notify chan so that other connections can be closed")
|
|
||||||
}
|
|
||||||
down2 := sb.Up(grpc.Address{Addr: endpoints[2]})
|
|
||||||
addrFirst, putFun, err := sb.Get(context.Background(), unblockingOpts)
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("Get() with up endpoints should success, got %v", err)
|
|
||||||
}
|
|
||||||
if addrFirst.Addr != endpoints[1] {
|
|
||||||
t.Errorf("Get() didn't return expected address, got %v", addrFirst)
|
|
||||||
}
|
|
||||||
if putFun == nil {
|
|
||||||
t.Errorf("Get() returned unexpected nil put function")
|
|
||||||
}
|
|
||||||
addrSecond, _, _ := sb.Get(context.Background(), unblockingOpts)
|
|
||||||
if addrFirst.Addr != addrSecond.Addr {
|
|
||||||
t.Errorf("Get() didn't return the same address as previous call, got %v and %v", addrFirst, addrSecond)
|
|
||||||
}
|
|
||||||
|
|
||||||
down1(errors.New("error"))
|
|
||||||
if addrs := <-sb.Notify(); len(addrs) != len(endpoints) {
|
|
||||||
t.Errorf("closing the only connection should triggered balancer to send the all endpoints via Notify chan so that we can establish a connection")
|
|
||||||
}
|
|
||||||
down2(errors.New("error"))
|
|
||||||
_, _, err = sb.Get(context.Background(), unblockingOpts)
|
|
||||||
if err != ErrNoAddrAvilable {
|
|
||||||
t.Errorf("Get() with no up endpoints should return ErrNoAddrAvailable, got: %v", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestBalancerGetBlocking(t *testing.T) {
|
|
||||||
sb := newSimpleBalancer(endpoints)
|
|
||||||
defer sb.Close()
|
|
||||||
if addrs := <-sb.Notify(); len(addrs) != len(endpoints) {
|
|
||||||
t.Errorf("Initialize newSimpleBalancer should have triggered Notify() chan, but it didn't")
|
|
||||||
}
|
|
||||||
blockingOpts := grpc.BalancerGetOptions{BlockingWait: true}
|
|
||||||
|
|
||||||
ctx, _ := context.WithTimeout(context.Background(), time.Millisecond*100)
|
|
||||||
_, _, err := sb.Get(ctx, blockingOpts)
|
|
||||||
if err != context.DeadlineExceeded {
|
|
||||||
t.Errorf("Get() with no up endpoints should timeout, got %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
downC := make(chan func(error), 1)
|
|
||||||
|
|
||||||
go func() {
|
|
||||||
// ensure sb.Up() will be called after sb.Get() to see if Up() releases blocking Get()
|
|
||||||
time.Sleep(time.Millisecond * 100)
|
|
||||||
f := sb.Up(grpc.Address{Addr: endpoints[1]})
|
|
||||||
if addrs := <-sb.Notify(); len(addrs) != 1 {
|
|
||||||
t.Errorf("first Up() should have triggered balancer to send the first connected address via Notify chan so that other connections can be closed")
|
|
||||||
}
|
|
||||||
downC <- f
|
|
||||||
}()
|
|
||||||
addrFirst, putFun, err := sb.Get(context.Background(), blockingOpts)
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("Get() with up endpoints should success, got %v", err)
|
|
||||||
}
|
|
||||||
if addrFirst.Addr != endpoints[1] {
|
|
||||||
t.Errorf("Get() didn't return expected address, got %v", addrFirst)
|
|
||||||
}
|
|
||||||
if putFun == nil {
|
|
||||||
t.Errorf("Get() returned unexpected nil put function")
|
|
||||||
}
|
|
||||||
down1 := <-downC
|
|
||||||
|
|
||||||
down2 := sb.Up(grpc.Address{Addr: endpoints[2]})
|
|
||||||
addrSecond, _, _ := sb.Get(context.Background(), blockingOpts)
|
|
||||||
if addrFirst.Addr != addrSecond.Addr {
|
|
||||||
t.Errorf("Get() didn't return the same address as previous call, got %v and %v", addrFirst, addrSecond)
|
|
||||||
}
|
|
||||||
|
|
||||||
down1(errors.New("error"))
|
|
||||||
if addrs := <-sb.Notify(); len(addrs) != len(endpoints) {
|
|
||||||
t.Errorf("closing the only connection should triggered balancer to send the all endpoints via Notify chan so that we can establish a connection")
|
|
||||||
}
|
|
||||||
down2(errors.New("error"))
|
|
||||||
ctx, _ = context.WithTimeout(context.Background(), time.Millisecond*100)
|
|
||||||
_, _, err = sb.Get(ctx, blockingOpts)
|
|
||||||
if err != context.DeadlineExceeded {
|
|
||||||
t.Errorf("Get() with no up endpoints should timeout, got %v", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// TestBalancerDoNotBlockOnClose ensures that balancer and grpc don't deadlock each other
|
|
||||||
// due to rapid open/close conn. The deadlock causes balancer.Close() to block forever.
|
|
||||||
// See issue: https://github.com/coreos/etcd/issues/7283 for more detail.
|
|
||||||
func TestBalancerDoNotBlockOnClose(t *testing.T) {
|
|
||||||
defer testutil.AfterTest(t)
|
|
||||||
|
|
||||||
kcl := newKillConnListener(t, 3)
|
|
||||||
defer kcl.close()
|
|
||||||
|
|
||||||
for i := 0; i < 5; i++ {
|
|
||||||
sb := newSimpleBalancer(kcl.endpoints())
|
|
||||||
conn, err := grpc.Dial("", grpc.WithInsecure(), grpc.WithBalancer(sb))
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
kvc := pb.NewKVClient(conn)
|
|
||||||
<-sb.readyc
|
|
||||||
|
|
||||||
var wg sync.WaitGroup
|
|
||||||
wg.Add(100)
|
|
||||||
cctx, cancel := context.WithCancel(context.TODO())
|
|
||||||
for j := 0; j < 100; j++ {
|
|
||||||
go func() {
|
|
||||||
defer wg.Done()
|
|
||||||
kvc.Range(cctx, &pb.RangeRequest{}, grpc.FailFast(false))
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
// balancer.Close() might block
|
|
||||||
// if balancer and grpc deadlock each other.
|
|
||||||
bclosec, cclosec := make(chan struct{}), make(chan struct{})
|
|
||||||
go func() {
|
|
||||||
defer close(bclosec)
|
|
||||||
sb.Close()
|
|
||||||
}()
|
|
||||||
go func() {
|
|
||||||
defer close(cclosec)
|
|
||||||
conn.Close()
|
|
||||||
}()
|
|
||||||
select {
|
|
||||||
case <-bclosec:
|
|
||||||
case <-time.After(3 * time.Second):
|
|
||||||
testutil.FatalStack(t, "balancer close timeout")
|
|
||||||
}
|
|
||||||
select {
|
|
||||||
case <-cclosec:
|
|
||||||
case <-time.After(3 * time.Second):
|
|
||||||
t.Fatal("grpc conn close timeout")
|
|
||||||
}
|
|
||||||
|
|
||||||
cancel()
|
|
||||||
wg.Wait()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// killConnListener listens incoming conn and kills it immediately.
|
|
||||||
type killConnListener struct {
|
|
||||||
wg sync.WaitGroup
|
|
||||||
eps []string
|
|
||||||
stopc chan struct{}
|
|
||||||
t *testing.T
|
|
||||||
}
|
|
||||||
|
|
||||||
func newKillConnListener(t *testing.T, size int) *killConnListener {
|
|
||||||
kcl := &killConnListener{stopc: make(chan struct{}), t: t}
|
|
||||||
|
|
||||||
for i := 0; i < size; i++ {
|
|
||||||
ln, err := net.Listen("tcp", ":0")
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
kcl.eps = append(kcl.eps, ln.Addr().String())
|
|
||||||
kcl.wg.Add(1)
|
|
||||||
go kcl.listen(ln)
|
|
||||||
}
|
|
||||||
return kcl
|
|
||||||
}
|
|
||||||
|
|
||||||
func (kcl *killConnListener) endpoints() []string {
|
|
||||||
return kcl.eps
|
|
||||||
}
|
|
||||||
|
|
||||||
func (kcl *killConnListener) listen(l net.Listener) {
|
|
||||||
go func() {
|
|
||||||
defer kcl.wg.Done()
|
|
||||||
for {
|
|
||||||
conn, err := l.Accept()
|
|
||||||
select {
|
|
||||||
case <-kcl.stopc:
|
|
||||||
return
|
|
||||||
default:
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
kcl.t.Fatal(err)
|
|
||||||
}
|
|
||||||
time.Sleep(1 * time.Millisecond)
|
|
||||||
conn.Close()
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
<-kcl.stopc
|
|
||||||
l.Close()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (kcl *killConnListener) close() {
|
|
||||||
close(kcl.stopc)
|
|
||||||
kcl.wg.Wait()
|
|
||||||
}
|
|
@ -31,7 +31,9 @@ import (
|
|||||||
"google.golang.org/grpc"
|
"google.golang.org/grpc"
|
||||||
"google.golang.org/grpc/codes"
|
"google.golang.org/grpc/codes"
|
||||||
"google.golang.org/grpc/credentials"
|
"google.golang.org/grpc/credentials"
|
||||||
|
"google.golang.org/grpc/keepalive"
|
||||||
"google.golang.org/grpc/metadata"
|
"google.golang.org/grpc/metadata"
|
||||||
|
"google.golang.org/grpc/status"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@ -51,18 +53,17 @@ type Client struct {
|
|||||||
conn *grpc.ClientConn
|
conn *grpc.ClientConn
|
||||||
dialerrc chan error
|
dialerrc chan error
|
||||||
|
|
||||||
cfg Config
|
cfg Config
|
||||||
creds *credentials.TransportCredentials
|
creds *credentials.TransportCredentials
|
||||||
balancer *simpleBalancer
|
balancer *healthBalancer
|
||||||
retryWrapper retryRpcFunc
|
mu sync.Mutex
|
||||||
retryAuthWrapper retryRpcFunc
|
|
||||||
|
|
||||||
ctx context.Context
|
ctx context.Context
|
||||||
cancel context.CancelFunc
|
cancel context.CancelFunc
|
||||||
|
|
||||||
// Username is a username for authentication
|
// Username is a user name for authentication.
|
||||||
Username string
|
Username string
|
||||||
// Password is a password for authentication
|
// Password is a password for authentication.
|
||||||
Password string
|
Password string
|
||||||
// tokenCred is an instance of WithPerRPCCredentials()'s argument
|
// tokenCred is an instance of WithPerRPCCredentials()'s argument
|
||||||
tokenCred *authTokenCredential
|
tokenCred *authTokenCredential
|
||||||
@ -116,8 +117,23 @@ func (c *Client) Endpoints() (eps []string) {
|
|||||||
|
|
||||||
// SetEndpoints updates client's endpoints.
|
// SetEndpoints updates client's endpoints.
|
||||||
func (c *Client) SetEndpoints(eps ...string) {
|
func (c *Client) SetEndpoints(eps ...string) {
|
||||||
|
c.mu.Lock()
|
||||||
c.cfg.Endpoints = eps
|
c.cfg.Endpoints = eps
|
||||||
c.balancer.updateAddrs(eps)
|
c.mu.Unlock()
|
||||||
|
c.balancer.updateAddrs(eps...)
|
||||||
|
|
||||||
|
// updating notifyCh can trigger new connections,
|
||||||
|
// need update addrs if all connections are down
|
||||||
|
// or addrs does not include pinAddr.
|
||||||
|
c.balancer.mu.RLock()
|
||||||
|
update := !hasAddr(c.balancer.addrs, c.balancer.pinAddr)
|
||||||
|
c.balancer.mu.RUnlock()
|
||||||
|
if update {
|
||||||
|
select {
|
||||||
|
case c.balancer.updateAddrsC <- notifyNext:
|
||||||
|
case <-c.balancer.stopc:
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Sync synchronizes client's endpoints with the known endpoints from the etcd membership.
|
// Sync synchronizes client's endpoints with the known endpoints from the etcd membership.
|
||||||
@ -144,8 +160,10 @@ func (c *Client) autoSync() {
|
|||||||
case <-c.ctx.Done():
|
case <-c.ctx.Done():
|
||||||
return
|
return
|
||||||
case <-time.After(c.cfg.AutoSyncInterval):
|
case <-time.After(c.cfg.AutoSyncInterval):
|
||||||
ctx, _ := context.WithTimeout(c.ctx, 5*time.Second)
|
ctx, cancel := context.WithTimeout(c.ctx, 5*time.Second)
|
||||||
if err := c.Sync(ctx); err != nil && err != c.ctx.Err() {
|
err := c.Sync(ctx)
|
||||||
|
cancel()
|
||||||
|
if err != nil && err != c.ctx.Err() {
|
||||||
logger.Println("Auto sync endpoints failed:", err)
|
logger.Println("Auto sync endpoints failed:", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -174,7 +192,7 @@ func parseEndpoint(endpoint string) (proto string, host string, scheme string) {
|
|||||||
host = endpoint
|
host = endpoint
|
||||||
url, uerr := url.Parse(endpoint)
|
url, uerr := url.Parse(endpoint)
|
||||||
if uerr != nil || !strings.Contains(endpoint, "://") {
|
if uerr != nil || !strings.Contains(endpoint, "://") {
|
||||||
return
|
return proto, host, scheme
|
||||||
}
|
}
|
||||||
scheme = url.Scheme
|
scheme = url.Scheme
|
||||||
|
|
||||||
@ -188,7 +206,7 @@ func parseEndpoint(endpoint string) (proto string, host string, scheme string) {
|
|||||||
default:
|
default:
|
||||||
proto, host = "", ""
|
proto, host = "", ""
|
||||||
}
|
}
|
||||||
return
|
return proto, host, scheme
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Client) processCreds(scheme string) (creds *credentials.TransportCredentials) {
|
func (c *Client) processCreds(scheme string) (creds *credentials.TransportCredentials) {
|
||||||
@ -207,7 +225,7 @@ func (c *Client) processCreds(scheme string) (creds *credentials.TransportCreden
|
|||||||
default:
|
default:
|
||||||
creds = nil
|
creds = nil
|
||||||
}
|
}
|
||||||
return
|
return creds
|
||||||
}
|
}
|
||||||
|
|
||||||
// dialSetupOpts gives the dial opts prior to any authentication
|
// dialSetupOpts gives the dial opts prior to any authentication
|
||||||
@ -215,10 +233,17 @@ func (c *Client) dialSetupOpts(endpoint string, dopts ...grpc.DialOption) (opts
|
|||||||
if c.cfg.DialTimeout > 0 {
|
if c.cfg.DialTimeout > 0 {
|
||||||
opts = []grpc.DialOption{grpc.WithTimeout(c.cfg.DialTimeout)}
|
opts = []grpc.DialOption{grpc.WithTimeout(c.cfg.DialTimeout)}
|
||||||
}
|
}
|
||||||
|
if c.cfg.DialKeepAliveTime > 0 {
|
||||||
|
params := keepalive.ClientParameters{
|
||||||
|
Time: c.cfg.DialKeepAliveTime,
|
||||||
|
Timeout: c.cfg.DialKeepAliveTimeout,
|
||||||
|
}
|
||||||
|
opts = append(opts, grpc.WithKeepaliveParams(params))
|
||||||
|
}
|
||||||
opts = append(opts, dopts...)
|
opts = append(opts, dopts...)
|
||||||
|
|
||||||
f := func(host string, t time.Duration) (net.Conn, error) {
|
f := func(host string, t time.Duration) (net.Conn, error) {
|
||||||
proto, host, _ := parseEndpoint(c.balancer.getEndpoint(host))
|
proto, host, _ := parseEndpoint(c.balancer.endpoint(host))
|
||||||
if host == "" && endpoint != "" {
|
if host == "" && endpoint != "" {
|
||||||
// dialing an endpoint not in the balancer; use
|
// dialing an endpoint not in the balancer; use
|
||||||
// endpoint passed into dial
|
// endpoint passed into dial
|
||||||
@ -311,7 +336,7 @@ func (c *Client) dial(endpoint string, dopts ...grpc.DialOption) (*grpc.ClientCo
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
if toErr(ctx, err) != rpctypes.ErrAuthNotEnabled {
|
if toErr(ctx, err) != rpctypes.ErrAuthNotEnabled {
|
||||||
if err == ctx.Err() && ctx.Err() != c.ctx.Err() {
|
if err == ctx.Err() && ctx.Err() != c.ctx.Err() {
|
||||||
err = grpc.ErrClientConnTimeout
|
err = context.DeadlineExceeded
|
||||||
}
|
}
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -333,7 +358,7 @@ func (c *Client) dial(endpoint string, dopts ...grpc.DialOption) (*grpc.ClientCo
|
|||||||
// when the cluster has a leader.
|
// when the cluster has a leader.
|
||||||
func WithRequireLeader(ctx context.Context) context.Context {
|
func WithRequireLeader(ctx context.Context) context.Context {
|
||||||
md := metadata.Pairs(rpctypes.MetadataRequireLeaderKey, rpctypes.MetadataHasLeader)
|
md := metadata.Pairs(rpctypes.MetadataRequireLeaderKey, rpctypes.MetadataHasLeader)
|
||||||
return metadata.NewContext(ctx, md)
|
return metadata.NewOutgoingContext(ctx, md)
|
||||||
}
|
}
|
||||||
|
|
||||||
func newClient(cfg *Config) (*Client, error) {
|
func newClient(cfg *Config) (*Client, error) {
|
||||||
@ -366,9 +391,12 @@ func newClient(cfg *Config) (*Client, error) {
|
|||||||
client.Password = cfg.Password
|
client.Password = cfg.Password
|
||||||
}
|
}
|
||||||
|
|
||||||
client.balancer = newSimpleBalancer(cfg.Endpoints)
|
client.balancer = newHealthBalancer(cfg.Endpoints, cfg.DialTimeout, func(ep string) (bool, error) {
|
||||||
|
return grpcHealthCheck(client, ep)
|
||||||
|
})
|
||||||
|
|
||||||
// use Endpoints[0] so that for https:// without any tls config given, then
|
// use Endpoints[0] so that for https:// without any tls config given, then
|
||||||
// grpc will assume the ServerName is in the endpoint.
|
// grpc will assume the certificate server name is the endpoint host.
|
||||||
conn, err := client.dial(cfg.Endpoints[0], grpc.WithBalancer(client.balancer))
|
conn, err := client.dial(cfg.Endpoints[0], grpc.WithBalancer(client.balancer))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
client.cancel()
|
client.cancel()
|
||||||
@ -376,21 +404,19 @@ func newClient(cfg *Config) (*Client, error) {
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
client.conn = conn
|
client.conn = conn
|
||||||
client.retryWrapper = client.newRetryWrapper()
|
|
||||||
client.retryAuthWrapper = client.newAuthRetryWrapper()
|
|
||||||
|
|
||||||
// wait for a connection
|
// wait for a connection
|
||||||
if cfg.DialTimeout > 0 {
|
if cfg.DialTimeout > 0 {
|
||||||
hasConn := false
|
hasConn := false
|
||||||
waitc := time.After(cfg.DialTimeout)
|
waitc := time.After(cfg.DialTimeout)
|
||||||
select {
|
select {
|
||||||
case <-client.balancer.readyc:
|
case <-client.balancer.ready():
|
||||||
hasConn = true
|
hasConn = true
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
case <-waitc:
|
case <-waitc:
|
||||||
}
|
}
|
||||||
if !hasConn {
|
if !hasConn {
|
||||||
err := grpc.ErrClientConnTimeout
|
err := context.DeadlineExceeded
|
||||||
select {
|
select {
|
||||||
case err = <-client.dialerrc:
|
case err = <-client.dialerrc:
|
||||||
default:
|
default:
|
||||||
@ -425,7 +451,7 @@ func (c *Client) checkVersion() (err error) {
|
|||||||
errc := make(chan error, len(c.cfg.Endpoints))
|
errc := make(chan error, len(c.cfg.Endpoints))
|
||||||
ctx, cancel := context.WithCancel(c.ctx)
|
ctx, cancel := context.WithCancel(c.ctx)
|
||||||
if c.cfg.DialTimeout > 0 {
|
if c.cfg.DialTimeout > 0 {
|
||||||
ctx, _ = context.WithTimeout(ctx, c.cfg.DialTimeout)
|
ctx, cancel = context.WithTimeout(ctx, c.cfg.DialTimeout)
|
||||||
}
|
}
|
||||||
wg.Add(len(c.cfg.Endpoints))
|
wg.Add(len(c.cfg.Endpoints))
|
||||||
for _, ep := range c.cfg.Endpoints {
|
for _, ep := range c.cfg.Endpoints {
|
||||||
@ -440,7 +466,7 @@ func (c *Client) checkVersion() (err error) {
|
|||||||
vs := strings.Split(resp.Version, ".")
|
vs := strings.Split(resp.Version, ".")
|
||||||
maj, min := 0, 0
|
maj, min := 0, 0
|
||||||
if len(vs) >= 2 {
|
if len(vs) >= 2 {
|
||||||
maj, rerr = strconv.Atoi(vs[0])
|
maj, _ = strconv.Atoi(vs[0])
|
||||||
min, rerr = strconv.Atoi(vs[1])
|
min, rerr = strconv.Atoi(vs[1])
|
||||||
}
|
}
|
||||||
if maj < 3 || (maj == 3 && min < 2) {
|
if maj < 3 || (maj == 3 && min < 2) {
|
||||||
@ -472,14 +498,14 @@ func isHaltErr(ctx context.Context, err error) bool {
|
|||||||
if err == nil {
|
if err == nil {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
code := grpc.Code(err)
|
ev, _ := status.FromError(err)
|
||||||
// Unavailable codes mean the system will be right back.
|
// Unavailable codes mean the system will be right back.
|
||||||
// (e.g., can't connect, lost leader)
|
// (e.g., can't connect, lost leader)
|
||||||
// Treat Internal codes as if something failed, leaving the
|
// Treat Internal codes as if something failed, leaving the
|
||||||
// system in an inconsistent state, but retrying could make progress.
|
// system in an inconsistent state, but retrying could make progress.
|
||||||
// (e.g., failed in middle of send, corrupted frame)
|
// (e.g., failed in middle of send, corrupted frame)
|
||||||
// TODO: are permanent Internal errors possible from grpc?
|
// TODO: are permanent Internal errors possible from grpc?
|
||||||
return code != codes.Unavailable && code != codes.Internal
|
return ev.Code() != codes.Unavailable && ev.Code() != codes.Internal
|
||||||
}
|
}
|
||||||
|
|
||||||
func toErr(ctx context.Context, err error) error {
|
func toErr(ctx context.Context, err error) error {
|
||||||
@ -490,7 +516,8 @@ func toErr(ctx context.Context, err error) error {
|
|||||||
if _, ok := err.(rpctypes.EtcdError); ok {
|
if _, ok := err.(rpctypes.EtcdError); ok {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
code := grpc.Code(err)
|
ev, _ := status.FromError(err)
|
||||||
|
code := ev.Code()
|
||||||
switch code {
|
switch code {
|
||||||
case codes.DeadlineExceeded:
|
case codes.DeadlineExceeded:
|
||||||
fallthrough
|
fallthrough
|
||||||
@ -499,7 +526,6 @@ func toErr(ctx context.Context, err error) error {
|
|||||||
err = ctx.Err()
|
err = ctx.Err()
|
||||||
}
|
}
|
||||||
case codes.Unavailable:
|
case codes.Unavailable:
|
||||||
err = ErrNoAvailableEndpoints
|
|
||||||
case codes.FailedPrecondition:
|
case codes.FailedPrecondition:
|
||||||
err = grpc.ErrClientConnClosing
|
err = grpc.ErrClientConnClosing
|
||||||
}
|
}
|
||||||
|
@ -22,8 +22,8 @@ import (
|
|||||||
|
|
||||||
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
|
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
|
||||||
"github.com/coreos/etcd/pkg/testutil"
|
"github.com/coreos/etcd/pkg/testutil"
|
||||||
|
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
"google.golang.org/grpc"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestDialCancel(t *testing.T) {
|
func TestDialCancel(t *testing.T) {
|
||||||
@ -45,7 +45,7 @@ func TestDialCancel(t *testing.T) {
|
|||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// connect to ipv4 blackhole so dial blocks
|
// connect to ipv4 black hole so dial blocks
|
||||||
c.SetEndpoints("http://254.0.0.1:12345")
|
c.SetEndpoints("http://254.0.0.1:12345")
|
||||||
|
|
||||||
// issue Get to force redial attempts
|
// issue Get to force redial attempts
|
||||||
@ -97,7 +97,7 @@ func TestDialTimeout(t *testing.T) {
|
|||||||
for i, cfg := range testCfgs {
|
for i, cfg := range testCfgs {
|
||||||
donec := make(chan error)
|
donec := make(chan error)
|
||||||
go func() {
|
go func() {
|
||||||
// without timeout, dial continues forever on ipv4 blackhole
|
// without timeout, dial continues forever on ipv4 black hole
|
||||||
c, err := New(cfg)
|
c, err := New(cfg)
|
||||||
if c != nil || err == nil {
|
if c != nil || err == nil {
|
||||||
t.Errorf("#%d: new client should fail", i)
|
t.Errorf("#%d: new client should fail", i)
|
||||||
@ -117,8 +117,8 @@ func TestDialTimeout(t *testing.T) {
|
|||||||
case <-time.After(5 * time.Second):
|
case <-time.After(5 * time.Second):
|
||||||
t.Errorf("#%d: failed to timeout dial on time", i)
|
t.Errorf("#%d: failed to timeout dial on time", i)
|
||||||
case err := <-donec:
|
case err := <-donec:
|
||||||
if err != grpc.ErrClientConnTimeout {
|
if err != context.DeadlineExceeded {
|
||||||
t.Errorf("#%d: unexpected error %v, want %v", i, err, grpc.ErrClientConnTimeout)
|
t.Errorf("#%d: unexpected error %v, want %v", i, err, context.DeadlineExceeded)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -15,11 +15,12 @@
|
|||||||
package clientv3util_test
|
package clientv3util_test
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
|
||||||
"log"
|
"log"
|
||||||
|
|
||||||
"github.com/coreos/etcd/clientv3"
|
"github.com/coreos/etcd/clientv3"
|
||||||
"github.com/coreos/etcd/clientv3/clientv3util"
|
"github.com/coreos/etcd/clientv3/clientv3util"
|
||||||
|
|
||||||
|
"golang.org/x/net/context"
|
||||||
)
|
)
|
||||||
|
|
||||||
func ExampleKeyExists_put() {
|
func ExampleKeyExists_put() {
|
||||||
@ -33,7 +34,7 @@ func ExampleKeyExists_put() {
|
|||||||
kvc := clientv3.NewKV(cli)
|
kvc := clientv3.NewKV(cli)
|
||||||
|
|
||||||
// perform a put only if key is missing
|
// perform a put only if key is missing
|
||||||
// It is useful to do the check (transactionally) to avoid overwriting
|
// It is useful to do the check atomically to avoid overwriting
|
||||||
// the existing key which would generate potentially unwanted events,
|
// the existing key which would generate potentially unwanted events,
|
||||||
// unless of course you wanted to do an overwrite no matter what.
|
// unless of course you wanted to do an overwrite no matter what.
|
||||||
_, err = kvc.Txn(context.Background()).
|
_, err = kvc.Txn(context.Background()).
|
||||||
|
@ -16,8 +16,8 @@ package clientv3
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||||
|
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
"google.golang.org/grpc"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type (
|
type (
|
||||||
@ -74,27 +74,19 @@ func (c *cluster) MemberRemove(ctx context.Context, id uint64) (*MemberRemoveRes
|
|||||||
|
|
||||||
func (c *cluster) MemberUpdate(ctx context.Context, id uint64, peerAddrs []string) (*MemberUpdateResponse, error) {
|
func (c *cluster) MemberUpdate(ctx context.Context, id uint64, peerAddrs []string) (*MemberUpdateResponse, error) {
|
||||||
// it is safe to retry on update.
|
// it is safe to retry on update.
|
||||||
for {
|
r := &pb.MemberUpdateRequest{ID: id, PeerURLs: peerAddrs}
|
||||||
r := &pb.MemberUpdateRequest{ID: id, PeerURLs: peerAddrs}
|
resp, err := c.remote.MemberUpdate(ctx, r)
|
||||||
resp, err := c.remote.MemberUpdate(ctx, r, grpc.FailFast(false))
|
if err == nil {
|
||||||
if err == nil {
|
return (*MemberUpdateResponse)(resp), nil
|
||||||
return (*MemberUpdateResponse)(resp), nil
|
|
||||||
}
|
|
||||||
if isHaltErr(ctx, err) {
|
|
||||||
return nil, toErr(ctx, err)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
return nil, toErr(ctx, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *cluster) MemberList(ctx context.Context) (*MemberListResponse, error) {
|
func (c *cluster) MemberList(ctx context.Context) (*MemberListResponse, error) {
|
||||||
// it is safe to retry on list.
|
// it is safe to retry on list.
|
||||||
for {
|
resp, err := c.remote.MemberList(ctx, &pb.MemberListRequest{})
|
||||||
resp, err := c.remote.MemberList(ctx, &pb.MemberListRequest{}, grpc.FailFast(false))
|
if err == nil {
|
||||||
if err == nil {
|
return (*MemberListResponse)(resp), nil
|
||||||
return (*MemberListResponse)(resp), nil
|
|
||||||
}
|
|
||||||
if isHaltErr(ctx, err) {
|
|
||||||
return nil, toErr(ctx, err)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
return nil, toErr(ctx, err)
|
||||||
}
|
}
|
||||||
|
@ -44,10 +44,8 @@ func (op CompactOp) toRequest() *pb.CompactionRequest {
|
|||||||
return &pb.CompactionRequest{Revision: op.revision, Physical: op.physical}
|
return &pb.CompactionRequest{Revision: op.revision, Physical: op.physical}
|
||||||
}
|
}
|
||||||
|
|
||||||
// WithCompactPhysical makes compact RPC call wait until
|
// WithCompactPhysical makes Compact wait until all compacted entries are
|
||||||
// the compaction is physically applied to the local database
|
// removed from the etcd server's storage.
|
||||||
// such that compacted entries are totally removed from the
|
|
||||||
// backend database.
|
|
||||||
func WithCompactPhysical() CompactOption {
|
func WithCompactPhysical() CompactOption {
|
||||||
return func(op *CompactOp) { op.physical = true }
|
return func(op *CompactOp) { op.physical = true }
|
||||||
}
|
}
|
||||||
|
@ -99,6 +99,7 @@ func (cmp *Cmp) ValueBytes() []byte {
|
|||||||
// WithValueBytes sets the byte slice for the comparison's value.
|
// WithValueBytes sets the byte slice for the comparison's value.
|
||||||
func (cmp *Cmp) WithValueBytes(v []byte) { cmp.TargetUnion.(*pb.Compare_Value).Value = v }
|
func (cmp *Cmp) WithValueBytes(v []byte) { cmp.TargetUnion.(*pb.Compare_Value).Value = v }
|
||||||
|
|
||||||
|
// mustInt64 panics if val isn't an int or int64. It returns an int64 otherwise.
|
||||||
func mustInt64(val interface{}) int64 {
|
func mustInt64(val interface{}) int64 {
|
||||||
if v, ok := val.(int64); ok {
|
if v, ok := val.(int64); ok {
|
||||||
return v
|
return v
|
||||||
@ -108,3 +109,12 @@ func mustInt64(val interface{}) int64 {
|
|||||||
}
|
}
|
||||||
panic("bad value")
|
panic("bad value")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// mustInt64orLeaseID panics if val isn't a LeaseID, int or int64. It returns an
|
||||||
|
// int64 otherwise.
|
||||||
|
func mustInt64orLeaseID(val interface{}) int64 {
|
||||||
|
if v, ok := val.(LeaseID); ok {
|
||||||
|
return int64(v)
|
||||||
|
}
|
||||||
|
return mustInt64(val)
|
||||||
|
}
|
||||||
|
@ -21,6 +21,7 @@ import (
|
|||||||
v3 "github.com/coreos/etcd/clientv3"
|
v3 "github.com/coreos/etcd/clientv3"
|
||||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||||
"github.com/coreos/etcd/mvcc/mvccpb"
|
"github.com/coreos/etcd/mvcc/mvccpb"
|
||||||
|
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -185,12 +186,12 @@ func (e *Election) observe(ctx context.Context, ch chan<- v3.GetResponse) {
|
|||||||
cancel()
|
cancel()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
// only accept PUTs; a DELETE will make observe() spin
|
// only accept puts; a delete will make observe() spin
|
||||||
for _, ev := range wr.Events {
|
for _, ev := range wr.Events {
|
||||||
if ev.Type == mvccpb.PUT {
|
if ev.Type == mvccpb.PUT {
|
||||||
hdr, kv = &wr.Header, ev.Kv
|
hdr, kv = &wr.Header, ev.Kv
|
||||||
// may have multiple revs; hdr.rev = the last rev
|
// may have multiple revs; hdr.rev = the last rev
|
||||||
// set to kv's rev in case batch has multiple PUTs
|
// set to kv's rev in case batch has multiple Puts
|
||||||
hdr.Revision = kv.ModRevision
|
hdr.Revision = kv.ModRevision
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
@ -213,6 +214,7 @@ func (e *Election) observe(ctx context.Context, ch chan<- v3.GetResponse) {
|
|||||||
for !keyDeleted {
|
for !keyDeleted {
|
||||||
wr, ok := <-wch
|
wr, ok := <-wch
|
||||||
if !ok {
|
if !ok {
|
||||||
|
cancel()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
for _, ev := range wr.Events {
|
for _, ev := range wr.Events {
|
||||||
@ -225,6 +227,7 @@ func (e *Election) observe(ctx context.Context, ch chan<- v3.GetResponse) {
|
|||||||
select {
|
select {
|
||||||
case ch <- *resp:
|
case ch <- *resp:
|
||||||
case <-cctx.Done():
|
case <-cctx.Done():
|
||||||
|
cancel()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -240,4 +243,4 @@ func (e *Election) Key() string { return e.leaderKey }
|
|||||||
func (e *Election) Rev() int64 { return e.leaderRev }
|
func (e *Election) Rev() int64 { return e.leaderRev }
|
||||||
|
|
||||||
// Header is the response header from the last successful election proposal.
|
// Header is the response header from the last successful election proposal.
|
||||||
func (m *Election) Header() *pb.ResponseHeader { return m.hdr }
|
func (e *Election) Header() *pb.ResponseHeader { return e.hdr }
|
||||||
|
@ -20,6 +20,7 @@ import (
|
|||||||
v3 "github.com/coreos/etcd/clientv3"
|
v3 "github.com/coreos/etcd/clientv3"
|
||||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||||
"github.com/coreos/etcd/mvcc/mvccpb"
|
"github.com/coreos/etcd/mvcc/mvccpb"
|
||||||
|
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -20,6 +20,7 @@ import (
|
|||||||
|
|
||||||
v3 "github.com/coreos/etcd/clientv3"
|
v3 "github.com/coreos/etcd/clientv3"
|
||||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||||
|
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -49,7 +50,9 @@ func (m *Mutex) Lock(ctx context.Context) error {
|
|||||||
put := v3.OpPut(m.myKey, "", v3.WithLease(s.Lease()))
|
put := v3.OpPut(m.myKey, "", v3.WithLease(s.Lease()))
|
||||||
// reuse key in case this session already holds the lock
|
// reuse key in case this session already holds the lock
|
||||||
get := v3.OpGet(m.myKey)
|
get := v3.OpGet(m.myKey)
|
||||||
resp, err := client.Txn(ctx).If(cmp).Then(put).Else(get).Commit()
|
// fetch current holder to complete uncontended path with only one RPC
|
||||||
|
getOwner := v3.OpGet(m.pfx, v3.WithFirstCreate()...)
|
||||||
|
resp, err := client.Txn(ctx).If(cmp).Then(put, getOwner).Else(get, getOwner).Commit()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -57,6 +60,12 @@ func (m *Mutex) Lock(ctx context.Context) error {
|
|||||||
if !resp.Succeeded {
|
if !resp.Succeeded {
|
||||||
m.myRev = resp.Responses[0].GetResponseRange().Kvs[0].CreateRevision
|
m.myRev = resp.Responses[0].GetResponseRange().Kvs[0].CreateRevision
|
||||||
}
|
}
|
||||||
|
// if no key on prefix / the minimum rev is key, already hold the lock
|
||||||
|
ownerKey := resp.Responses[1].GetResponseRange().Kvs
|
||||||
|
if len(ownerKey) == 0 || ownerKey[0].CreateRevision == m.myRev {
|
||||||
|
m.hdr = resp.Header
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// wait for deletion revisions prior to myKey
|
// wait for deletion revisions prior to myKey
|
||||||
hdr, werr := waitDeletes(ctx, client, m.pfx, m.myRev-1)
|
hdr, werr := waitDeletes(ctx, client, m.pfx, m.myRev-1)
|
||||||
|
@ -18,6 +18,7 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
v3 "github.com/coreos/etcd/clientv3"
|
v3 "github.com/coreos/etcd/clientv3"
|
||||||
|
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -53,6 +54,7 @@ func NewSession(client *v3.Client, opts ...SessionOption) (*Session, error) {
|
|||||||
ctx, cancel := context.WithCancel(ops.ctx)
|
ctx, cancel := context.WithCancel(ops.ctx)
|
||||||
keepAlive, err := client.KeepAlive(ctx, id)
|
keepAlive, err := client.KeepAlive(ctx, id)
|
||||||
if err != nil || keepAlive == nil {
|
if err != nil || keepAlive == nil {
|
||||||
|
cancel()
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -18,6 +18,7 @@ import (
|
|||||||
"math"
|
"math"
|
||||||
|
|
||||||
v3 "github.com/coreos/etcd/clientv3"
|
v3 "github.com/coreos/etcd/clientv3"
|
||||||
|
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -46,7 +47,7 @@ const (
|
|||||||
// SerializableSnapshot provides serializable isolation and also checks
|
// SerializableSnapshot provides serializable isolation and also checks
|
||||||
// for write conflicts.
|
// for write conflicts.
|
||||||
SerializableSnapshot Isolation = iota
|
SerializableSnapshot Isolation = iota
|
||||||
// Serializable reads within the same transactiona attempt return data
|
// Serializable reads within the same transaction attempt return data
|
||||||
// from the at the revision of the first read.
|
// from the at the revision of the first read.
|
||||||
Serializable
|
Serializable
|
||||||
// RepeatableReads reads within the same transaction attempt always
|
// RepeatableReads reads within the same transaction attempt always
|
||||||
@ -85,7 +86,7 @@ func WithPrefetch(keys ...string) stmOption {
|
|||||||
return func(so *stmOptions) { so.prefetch = append(so.prefetch, keys...) }
|
return func(so *stmOptions) { so.prefetch = append(so.prefetch, keys...) }
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewSTM initiates a new STM instance, using snapshot isolation by default.
|
// NewSTM initiates a new STM instance, using serializable snapshot isolation by default.
|
||||||
func NewSTM(c *v3.Client, apply func(STM) error, so ...stmOption) (*v3.TxnResponse, error) {
|
func NewSTM(c *v3.Client, apply func(STM) error, so ...stmOption) (*v3.TxnResponse, error) {
|
||||||
opts := &stmOptions{ctx: c.Ctx()}
|
opts := &stmOptions{ctx: c.Ctx()}
|
||||||
for _, f := range so {
|
for _, f := range so {
|
||||||
|
@ -33,10 +33,18 @@ type Config struct {
|
|||||||
// DialTimeout is the timeout for failing to establish a connection.
|
// DialTimeout is the timeout for failing to establish a connection.
|
||||||
DialTimeout time.Duration `json:"dial-timeout"`
|
DialTimeout time.Duration `json:"dial-timeout"`
|
||||||
|
|
||||||
|
// DialKeepAliveTime is the time in seconds after which client pings the server to see if
|
||||||
|
// transport is alive.
|
||||||
|
DialKeepAliveTime time.Duration `json:"dial-keep-alive-time"`
|
||||||
|
|
||||||
|
// DialKeepAliveTimeout is the time in seconds that the client waits for a response for the
|
||||||
|
// keep-alive probe. If the response is not received in this time, the connection is closed.
|
||||||
|
DialKeepAliveTimeout time.Duration `json:"dial-keep-alive-timeout"`
|
||||||
|
|
||||||
// TLS holds the client secure credentials, if any.
|
// TLS holds the client secure credentials, if any.
|
||||||
TLS *tls.Config
|
TLS *tls.Config
|
||||||
|
|
||||||
// Username is a username for authentication.
|
// Username is a user name for authentication.
|
||||||
Username string `json:"username"`
|
Username string `json:"username"`
|
||||||
|
|
||||||
// Password is a password for authentication.
|
// Password is a password for authentication.
|
||||||
|
@ -28,7 +28,7 @@
|
|||||||
// Make sure to close the client after using it. If the client is not closed, the
|
// Make sure to close the client after using it. If the client is not closed, the
|
||||||
// connection will have leaky goroutines.
|
// connection will have leaky goroutines.
|
||||||
//
|
//
|
||||||
// To specify client request timeout, pass context.WithTimeout to APIs:
|
// To specify a client request timeout, wrap the context with context.WithTimeout:
|
||||||
//
|
//
|
||||||
// ctx, cancel := context.WithTimeout(context.Background(), timeout)
|
// ctx, cancel := context.WithTimeout(context.Background(), timeout)
|
||||||
// resp, err := kvc.Put(ctx, "sample_key", "sample_value")
|
// resp, err := kvc.Put(ctx, "sample_key", "sample_value")
|
||||||
|
@ -1,113 +0,0 @@
|
|||||||
// Copyright 2016 The etcd Authors
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package clientv3_test
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"log"
|
|
||||||
|
|
||||||
"github.com/coreos/etcd/clientv3"
|
|
||||||
"golang.org/x/net/context"
|
|
||||||
)
|
|
||||||
|
|
||||||
func ExampleAuth() {
|
|
||||||
cli, err := clientv3.New(clientv3.Config{
|
|
||||||
Endpoints: endpoints,
|
|
||||||
DialTimeout: dialTimeout,
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
log.Fatal(err)
|
|
||||||
}
|
|
||||||
defer cli.Close()
|
|
||||||
|
|
||||||
if _, err = cli.RoleAdd(context.TODO(), "root"); err != nil {
|
|
||||||
log.Fatal(err)
|
|
||||||
}
|
|
||||||
if _, err = cli.UserAdd(context.TODO(), "root", "123"); err != nil {
|
|
||||||
log.Fatal(err)
|
|
||||||
}
|
|
||||||
if _, err = cli.UserGrantRole(context.TODO(), "root", "root"); err != nil {
|
|
||||||
log.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, err = cli.RoleAdd(context.TODO(), "r"); err != nil {
|
|
||||||
log.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, err = cli.RoleGrantPermission(
|
|
||||||
context.TODO(),
|
|
||||||
"r", // role name
|
|
||||||
"foo", // key
|
|
||||||
"zoo", // range end
|
|
||||||
clientv3.PermissionType(clientv3.PermReadWrite),
|
|
||||||
); err != nil {
|
|
||||||
log.Fatal(err)
|
|
||||||
}
|
|
||||||
if _, err = cli.UserAdd(context.TODO(), "u", "123"); err != nil {
|
|
||||||
log.Fatal(err)
|
|
||||||
}
|
|
||||||
if _, err = cli.UserGrantRole(context.TODO(), "u", "r"); err != nil {
|
|
||||||
log.Fatal(err)
|
|
||||||
}
|
|
||||||
if _, err = cli.AuthEnable(context.TODO()); err != nil {
|
|
||||||
log.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
cliAuth, err := clientv3.New(clientv3.Config{
|
|
||||||
Endpoints: endpoints,
|
|
||||||
DialTimeout: dialTimeout,
|
|
||||||
Username: "u",
|
|
||||||
Password: "123",
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
log.Fatal(err)
|
|
||||||
}
|
|
||||||
defer cliAuth.Close()
|
|
||||||
|
|
||||||
if _, err = cliAuth.Put(context.TODO(), "foo1", "bar"); err != nil {
|
|
||||||
log.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err = cliAuth.Txn(context.TODO()).
|
|
||||||
If(clientv3.Compare(clientv3.Value("zoo1"), ">", "abc")).
|
|
||||||
Then(clientv3.OpPut("zoo1", "XYZ")).
|
|
||||||
Else(clientv3.OpPut("zoo1", "ABC")).
|
|
||||||
Commit()
|
|
||||||
fmt.Println(err)
|
|
||||||
|
|
||||||
// now check the permission with the root account
|
|
||||||
rootCli, err := clientv3.New(clientv3.Config{
|
|
||||||
Endpoints: endpoints,
|
|
||||||
DialTimeout: dialTimeout,
|
|
||||||
Username: "root",
|
|
||||||
Password: "123",
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
log.Fatal(err)
|
|
||||||
}
|
|
||||||
defer rootCli.Close()
|
|
||||||
|
|
||||||
resp, err := rootCli.RoleGet(context.TODO(), "r")
|
|
||||||
if err != nil {
|
|
||||||
log.Fatal(err)
|
|
||||||
}
|
|
||||||
fmt.Printf("user u permission: key %q, range end %q\n", resp.Perm[0].Key, resp.Perm[0].RangeEnd)
|
|
||||||
|
|
||||||
if _, err = rootCli.AuthDisable(context.TODO()); err != nil {
|
|
||||||
log.Fatal(err)
|
|
||||||
}
|
|
||||||
// Output: etcdserver: permission denied
|
|
||||||
// user u permission: key "foo", range end "zoo"
|
|
||||||
}
|
|
@ -19,6 +19,7 @@ import (
|
|||||||
"log"
|
"log"
|
||||||
|
|
||||||
"github.com/coreos/etcd/clientv3"
|
"github.com/coreos/etcd/clientv3"
|
||||||
|
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -20,6 +20,7 @@ import (
|
|||||||
|
|
||||||
"github.com/coreos/etcd/clientv3"
|
"github.com/coreos/etcd/clientv3"
|
||||||
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
|
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
|
||||||
|
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -236,8 +237,11 @@ func ExampleKV_txn() {
|
|||||||
|
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), requestTimeout)
|
ctx, cancel := context.WithTimeout(context.Background(), requestTimeout)
|
||||||
_, err = kvc.Txn(ctx).
|
_, err = kvc.Txn(ctx).
|
||||||
If(clientv3.Compare(clientv3.Value("key"), ">", "abc")). // txn value comparisons are lexical
|
// txn value comparisons are lexical
|
||||||
Then(clientv3.OpPut("key", "XYZ")). // this runs, since 'xyz' > 'abc'
|
If(clientv3.Compare(clientv3.Value("key"), ">", "abc")).
|
||||||
|
// the "Then" runs, since "xyz" > "abc"
|
||||||
|
Then(clientv3.OpPut("key", "XYZ")).
|
||||||
|
// the "Else" does not run
|
||||||
Else(clientv3.OpPut("key", "ABC")).
|
Else(clientv3.OpPut("key", "ABC")).
|
||||||
Commit()
|
Commit()
|
||||||
cancel()
|
cancel()
|
||||||
|
@ -19,6 +19,7 @@ import (
|
|||||||
"log"
|
"log"
|
||||||
|
|
||||||
"github.com/coreos/etcd/clientv3"
|
"github.com/coreos/etcd/clientv3"
|
||||||
|
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -18,9 +18,8 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"log"
|
"log"
|
||||||
|
|
||||||
"golang.org/x/net/context"
|
|
||||||
|
|
||||||
"github.com/coreos/etcd/clientv3"
|
"github.com/coreos/etcd/clientv3"
|
||||||
|
"golang.org/x/net/context"
|
||||||
)
|
)
|
||||||
|
|
||||||
func ExampleMaintenance_status() {
|
func ExampleMaintenance_status() {
|
||||||
@ -34,20 +33,15 @@ func ExampleMaintenance_status() {
|
|||||||
}
|
}
|
||||||
defer cli.Close()
|
defer cli.Close()
|
||||||
|
|
||||||
// resp, err := cli.Status(context.Background(), ep)
|
resp, err := cli.Status(context.Background(), ep)
|
||||||
//
|
|
||||||
// or
|
|
||||||
//
|
|
||||||
mapi := clientv3.NewMaintenance(cli)
|
|
||||||
resp, err := mapi.Status(context.Background(), ep)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(err)
|
log.Fatal(err)
|
||||||
}
|
}
|
||||||
fmt.Printf("endpoint: %s / IsLeader: %v\n", ep, resp.Header.MemberId == resp.Leader)
|
fmt.Printf("endpoint: %s / Leader: %v\n", ep, resp.Header.MemberId == resp.Leader)
|
||||||
}
|
}
|
||||||
// endpoint: localhost:2379 / IsLeader: false
|
// endpoint: localhost:2379 / Leader: false
|
||||||
// endpoint: localhost:22379 / IsLeader: false
|
// endpoint: localhost:22379 / Leader: false
|
||||||
// endpoint: localhost:32379 / IsLeader: true
|
// endpoint: localhost:32379 / Leader: true
|
||||||
}
|
}
|
||||||
|
|
||||||
func ExampleMaintenance_defragment() {
|
func ExampleMaintenance_defragment() {
|
||||||
|
@ -43,10 +43,10 @@ func ExampleClient_metrics() {
|
|||||||
}
|
}
|
||||||
defer cli.Close()
|
defer cli.Close()
|
||||||
|
|
||||||
// get a key so it shows up in the metrics as a range rpc
|
// get a key so it shows up in the metrics as a range RPC
|
||||||
cli.Get(context.TODO(), "test_key")
|
cli.Get(context.TODO(), "test_key")
|
||||||
|
|
||||||
// listen for all prometheus metrics
|
// listen for all Prometheus metrics
|
||||||
ln, err := net.Listen("tcp", ":0")
|
ln, err := net.Listen("tcp", ":0")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(err)
|
log.Fatal(err)
|
||||||
@ -61,7 +61,7 @@ func ExampleClient_metrics() {
|
|||||||
<-donec
|
<-donec
|
||||||
}()
|
}()
|
||||||
|
|
||||||
// make an http request to fetch all prometheus metrics
|
// make an http request to fetch all Prometheus metrics
|
||||||
url := "http://" + ln.Addr().String() + "/metrics"
|
url := "http://" + ln.Addr().String() + "/metrics"
|
||||||
resp, err := http.Get(url)
|
resp, err := http.Get(url)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -80,5 +80,6 @@ func ExampleClient_metrics() {
|
|||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Output: grpc_client_started_total{grpc_method="Range",grpc_service="etcdserverpb.KV",grpc_type="unary"} 1
|
// Output:
|
||||||
|
// grpc_client_started_total{grpc_method="Range",grpc_service="etcdserverpb.KV",grpc_type="unary"} 1
|
||||||
}
|
}
|
||||||
|
@ -16,12 +16,14 @@ package clientv3_test
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"log"
|
"log"
|
||||||
|
"os"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/coreos/etcd/clientv3"
|
"github.com/coreos/etcd/clientv3"
|
||||||
"github.com/coreos/etcd/pkg/transport"
|
"github.com/coreos/etcd/pkg/transport"
|
||||||
"github.com/coreos/pkg/capnslog"
|
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
|
"google.golang.org/grpc/grpclog"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@ -31,8 +33,7 @@ var (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func Example() {
|
func Example() {
|
||||||
var plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "clientv3")
|
clientv3.SetLogger(grpclog.NewLoggerV2(os.Stderr, os.Stderr, os.Stderr))
|
||||||
clientv3.SetLogger(plog)
|
|
||||||
|
|
||||||
cli, err := clientv3.New(clientv3.Config{
|
cli, err := clientv3.New(clientv3.Config{
|
||||||
Endpoints: endpoints,
|
Endpoints: endpoints,
|
||||||
|
@ -19,6 +19,7 @@ import (
|
|||||||
"log"
|
"log"
|
||||||
|
|
||||||
"github.com/coreos/etcd/clientv3"
|
"github.com/coreos/etcd/clientv3"
|
||||||
|
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
627
clientv3/health_balancer.go
Normal file
627
clientv3/health_balancer.go
Normal file
@ -0,0 +1,627 @@
|
|||||||
|
// Copyright 2017 The etcd Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package clientv3
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"net/url"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"golang.org/x/net/context"
|
||||||
|
"google.golang.org/grpc"
|
||||||
|
"google.golang.org/grpc/codes"
|
||||||
|
healthpb "google.golang.org/grpc/health/grpc_health_v1"
|
||||||
|
"google.golang.org/grpc/status"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
minHealthRetryDuration = 3 * time.Second
|
||||||
|
unknownService = "unknown service grpc.health.v1.Health"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ErrNoAddrAvilable is returned by Get() when the balancer does not have
|
||||||
|
// any active connection to endpoints at the time.
|
||||||
|
// This error is returned only when opts.BlockingWait is true.
|
||||||
|
var ErrNoAddrAvilable = status.Error(codes.Unavailable, "there is no address available")
|
||||||
|
|
||||||
|
type healthCheckFunc func(ep string) (bool, error)
|
||||||
|
|
||||||
|
type notifyMsg int
|
||||||
|
|
||||||
|
const (
|
||||||
|
notifyReset notifyMsg = iota
|
||||||
|
notifyNext
|
||||||
|
)
|
||||||
|
|
||||||
|
// healthBalancer does the bare minimum to expose multiple eps
|
||||||
|
// to the grpc reconnection code path
|
||||||
|
type healthBalancer struct {
|
||||||
|
// addrs are the client's endpoint addresses for grpc
|
||||||
|
addrs []grpc.Address
|
||||||
|
|
||||||
|
// eps holds the raw endpoints from the client
|
||||||
|
eps []string
|
||||||
|
|
||||||
|
// notifyCh notifies grpc of the set of addresses for connecting
|
||||||
|
notifyCh chan []grpc.Address
|
||||||
|
|
||||||
|
// readyc closes once the first connection is up
|
||||||
|
readyc chan struct{}
|
||||||
|
readyOnce sync.Once
|
||||||
|
|
||||||
|
// healthCheck checks an endpoint's health.
|
||||||
|
healthCheck healthCheckFunc
|
||||||
|
healthCheckTimeout time.Duration
|
||||||
|
|
||||||
|
unhealthyMu sync.RWMutex
|
||||||
|
unhealthyHostPorts map[string]time.Time
|
||||||
|
|
||||||
|
// mu protects all fields below.
|
||||||
|
mu sync.RWMutex
|
||||||
|
|
||||||
|
// upc closes when pinAddr transitions from empty to non-empty or the balancer closes.
|
||||||
|
upc chan struct{}
|
||||||
|
|
||||||
|
// downc closes when grpc calls down() on pinAddr
|
||||||
|
downc chan struct{}
|
||||||
|
|
||||||
|
// stopc is closed to signal updateNotifyLoop should stop.
|
||||||
|
stopc chan struct{}
|
||||||
|
stopOnce sync.Once
|
||||||
|
wg sync.WaitGroup
|
||||||
|
|
||||||
|
// donec closes when all goroutines are exited
|
||||||
|
donec chan struct{}
|
||||||
|
|
||||||
|
// updateAddrsC notifies updateNotifyLoop to update addrs.
|
||||||
|
updateAddrsC chan notifyMsg
|
||||||
|
|
||||||
|
// grpc issues TLS cert checks using the string passed into dial so
|
||||||
|
// that string must be the host. To recover the full scheme://host URL,
|
||||||
|
// have a map from hosts to the original endpoint.
|
||||||
|
hostPort2ep map[string]string
|
||||||
|
|
||||||
|
// pinAddr is the currently pinned address; set to the empty string on
|
||||||
|
// initialization and shutdown.
|
||||||
|
pinAddr string
|
||||||
|
|
||||||
|
closed bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func newHealthBalancer(eps []string, timeout time.Duration, hc healthCheckFunc) *healthBalancer {
|
||||||
|
notifyCh := make(chan []grpc.Address)
|
||||||
|
addrs := eps2addrs(eps)
|
||||||
|
hb := &healthBalancer{
|
||||||
|
addrs: addrs,
|
||||||
|
eps: eps,
|
||||||
|
notifyCh: notifyCh,
|
||||||
|
readyc: make(chan struct{}),
|
||||||
|
healthCheck: hc,
|
||||||
|
unhealthyHostPorts: make(map[string]time.Time),
|
||||||
|
upc: make(chan struct{}),
|
||||||
|
stopc: make(chan struct{}),
|
||||||
|
downc: make(chan struct{}),
|
||||||
|
donec: make(chan struct{}),
|
||||||
|
updateAddrsC: make(chan notifyMsg),
|
||||||
|
hostPort2ep: getHostPort2ep(eps),
|
||||||
|
}
|
||||||
|
if timeout < minHealthRetryDuration {
|
||||||
|
timeout = minHealthRetryDuration
|
||||||
|
}
|
||||||
|
hb.healthCheckTimeout = timeout
|
||||||
|
|
||||||
|
close(hb.downc)
|
||||||
|
go hb.updateNotifyLoop()
|
||||||
|
hb.wg.Add(1)
|
||||||
|
go func() {
|
||||||
|
defer hb.wg.Done()
|
||||||
|
hb.updateUnhealthy()
|
||||||
|
}()
|
||||||
|
return hb
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *healthBalancer) Start(target string, config grpc.BalancerConfig) error { return nil }
|
||||||
|
|
||||||
|
func (b *healthBalancer) ConnectNotify() <-chan struct{} {
|
||||||
|
b.mu.Lock()
|
||||||
|
defer b.mu.Unlock()
|
||||||
|
return b.upc
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *healthBalancer) ready() <-chan struct{} { return b.readyc }
|
||||||
|
|
||||||
|
func (b *healthBalancer) endpoint(hostPort string) string {
|
||||||
|
b.mu.RLock()
|
||||||
|
defer b.mu.RUnlock()
|
||||||
|
return b.hostPort2ep[hostPort]
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *healthBalancer) pinned() string {
|
||||||
|
b.mu.RLock()
|
||||||
|
defer b.mu.RUnlock()
|
||||||
|
return b.pinAddr
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *healthBalancer) hostPortError(hostPort string, err error) {
|
||||||
|
if b.endpoint(hostPort) == "" {
|
||||||
|
if logger.V(4) {
|
||||||
|
logger.Infof("clientv3/balancer: %q is stale (skip marking as unhealthy on %q)", hostPort, err.Error())
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
b.unhealthyMu.Lock()
|
||||||
|
b.unhealthyHostPorts[hostPort] = time.Now()
|
||||||
|
b.unhealthyMu.Unlock()
|
||||||
|
if logger.V(4) {
|
||||||
|
logger.Infof("clientv3/balancer: %q is marked unhealthy (%q)", hostPort, err.Error())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *healthBalancer) removeUnhealthy(hostPort, msg string) {
|
||||||
|
if b.endpoint(hostPort) == "" {
|
||||||
|
if logger.V(4) {
|
||||||
|
logger.Infof("clientv3/balancer: %q was not in unhealthy (%q)", hostPort, msg)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
b.unhealthyMu.Lock()
|
||||||
|
delete(b.unhealthyHostPorts, hostPort)
|
||||||
|
b.unhealthyMu.Unlock()
|
||||||
|
if logger.V(4) {
|
||||||
|
logger.Infof("clientv3/balancer: %q is removed from unhealthy (%q)", hostPort, msg)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *healthBalancer) countUnhealthy() (count int) {
|
||||||
|
b.unhealthyMu.RLock()
|
||||||
|
count = len(b.unhealthyHostPorts)
|
||||||
|
b.unhealthyMu.RUnlock()
|
||||||
|
return count
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *healthBalancer) isUnhealthy(hostPort string) (unhealthy bool) {
|
||||||
|
b.unhealthyMu.RLock()
|
||||||
|
_, unhealthy = b.unhealthyHostPorts[hostPort]
|
||||||
|
b.unhealthyMu.RUnlock()
|
||||||
|
return unhealthy
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *healthBalancer) cleanupUnhealthy() {
|
||||||
|
b.unhealthyMu.Lock()
|
||||||
|
for k, v := range b.unhealthyHostPorts {
|
||||||
|
if time.Since(v) > b.healthCheckTimeout {
|
||||||
|
delete(b.unhealthyHostPorts, k)
|
||||||
|
if logger.V(4) {
|
||||||
|
logger.Infof("clientv3/balancer: removed %q from unhealthy after %v", k, b.healthCheckTimeout)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
b.unhealthyMu.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *healthBalancer) liveAddrs() ([]grpc.Address, map[string]struct{}) {
|
||||||
|
unhealthyCnt := b.countUnhealthy()
|
||||||
|
|
||||||
|
b.mu.RLock()
|
||||||
|
defer b.mu.RUnlock()
|
||||||
|
|
||||||
|
hbAddrs := b.addrs
|
||||||
|
if len(b.addrs) == 1 || unhealthyCnt == 0 || unhealthyCnt == len(b.addrs) {
|
||||||
|
liveHostPorts := make(map[string]struct{}, len(b.hostPort2ep))
|
||||||
|
for k := range b.hostPort2ep {
|
||||||
|
liveHostPorts[k] = struct{}{}
|
||||||
|
}
|
||||||
|
return hbAddrs, liveHostPorts
|
||||||
|
}
|
||||||
|
|
||||||
|
addrs := make([]grpc.Address, 0, len(b.addrs)-unhealthyCnt)
|
||||||
|
liveHostPorts := make(map[string]struct{}, len(addrs))
|
||||||
|
for _, addr := range b.addrs {
|
||||||
|
if !b.isUnhealthy(addr.Addr) {
|
||||||
|
addrs = append(addrs, addr)
|
||||||
|
liveHostPorts[addr.Addr] = struct{}{}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return addrs, liveHostPorts
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *healthBalancer) updateUnhealthy() {
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-time.After(b.healthCheckTimeout):
|
||||||
|
b.cleanupUnhealthy()
|
||||||
|
pinned := b.pinned()
|
||||||
|
if pinned == "" || b.isUnhealthy(pinned) {
|
||||||
|
select {
|
||||||
|
case b.updateAddrsC <- notifyNext:
|
||||||
|
case <-b.stopc:
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case <-b.stopc:
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *healthBalancer) updateAddrs(eps ...string) {
|
||||||
|
np := getHostPort2ep(eps)
|
||||||
|
|
||||||
|
b.mu.Lock()
|
||||||
|
defer b.mu.Unlock()
|
||||||
|
|
||||||
|
match := len(np) == len(b.hostPort2ep)
|
||||||
|
if match {
|
||||||
|
for k, v := range np {
|
||||||
|
if b.hostPort2ep[k] != v {
|
||||||
|
match = false
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if match {
|
||||||
|
// same endpoints, so no need to update address
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
b.hostPort2ep = np
|
||||||
|
b.addrs, b.eps = eps2addrs(eps), eps
|
||||||
|
|
||||||
|
b.unhealthyMu.Lock()
|
||||||
|
b.unhealthyHostPorts = make(map[string]time.Time)
|
||||||
|
b.unhealthyMu.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *healthBalancer) next() {
|
||||||
|
b.mu.RLock()
|
||||||
|
downc := b.downc
|
||||||
|
b.mu.RUnlock()
|
||||||
|
select {
|
||||||
|
case b.updateAddrsC <- notifyNext:
|
||||||
|
case <-b.stopc:
|
||||||
|
}
|
||||||
|
// wait until disconnect so new RPCs are not issued on old connection
|
||||||
|
select {
|
||||||
|
case <-downc:
|
||||||
|
case <-b.stopc:
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *healthBalancer) updateNotifyLoop() {
|
||||||
|
defer close(b.donec)
|
||||||
|
|
||||||
|
for {
|
||||||
|
b.mu.RLock()
|
||||||
|
upc, downc, addr := b.upc, b.downc, b.pinAddr
|
||||||
|
b.mu.RUnlock()
|
||||||
|
// downc or upc should be closed
|
||||||
|
select {
|
||||||
|
case <-downc:
|
||||||
|
downc = nil
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
select {
|
||||||
|
case <-upc:
|
||||||
|
upc = nil
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
switch {
|
||||||
|
case downc == nil && upc == nil:
|
||||||
|
// stale
|
||||||
|
select {
|
||||||
|
case <-b.stopc:
|
||||||
|
return
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
case downc == nil:
|
||||||
|
b.notifyAddrs(notifyReset)
|
||||||
|
select {
|
||||||
|
case <-upc:
|
||||||
|
case msg := <-b.updateAddrsC:
|
||||||
|
b.notifyAddrs(msg)
|
||||||
|
case <-b.stopc:
|
||||||
|
return
|
||||||
|
}
|
||||||
|
case upc == nil:
|
||||||
|
select {
|
||||||
|
// close connections that are not the pinned address
|
||||||
|
case b.notifyCh <- []grpc.Address{{Addr: addr}}:
|
||||||
|
case <-downc:
|
||||||
|
case <-b.stopc:
|
||||||
|
return
|
||||||
|
}
|
||||||
|
select {
|
||||||
|
case <-downc:
|
||||||
|
b.notifyAddrs(notifyReset)
|
||||||
|
case msg := <-b.updateAddrsC:
|
||||||
|
b.notifyAddrs(msg)
|
||||||
|
case <-b.stopc:
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *healthBalancer) notifyAddrs(msg notifyMsg) {
|
||||||
|
if msg == notifyNext {
|
||||||
|
select {
|
||||||
|
case b.notifyCh <- []grpc.Address{}:
|
||||||
|
case <-b.stopc:
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
b.mu.RLock()
|
||||||
|
pinAddr := b.pinAddr
|
||||||
|
downc := b.downc
|
||||||
|
b.mu.RUnlock()
|
||||||
|
addrs, hostPorts := b.liveAddrs()
|
||||||
|
|
||||||
|
var waitDown bool
|
||||||
|
if pinAddr != "" {
|
||||||
|
_, ok := hostPorts[pinAddr]
|
||||||
|
waitDown = !ok
|
||||||
|
}
|
||||||
|
|
||||||
|
select {
|
||||||
|
case b.notifyCh <- addrs:
|
||||||
|
if waitDown {
|
||||||
|
select {
|
||||||
|
case <-downc:
|
||||||
|
case <-b.stopc:
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case <-b.stopc:
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *healthBalancer) Up(addr grpc.Address) func(error) {
|
||||||
|
if !b.mayPin(addr) {
|
||||||
|
return func(err error) {}
|
||||||
|
}
|
||||||
|
|
||||||
|
b.mu.Lock()
|
||||||
|
defer b.mu.Unlock()
|
||||||
|
|
||||||
|
// gRPC might call Up after it called Close. We add this check
|
||||||
|
// to "fix" it up at application layer. Otherwise, will panic
|
||||||
|
// if b.upc is already closed.
|
||||||
|
if b.closed {
|
||||||
|
return func(err error) {}
|
||||||
|
}
|
||||||
|
|
||||||
|
// gRPC might call Up on a stale address.
|
||||||
|
// Prevent updating pinAddr with a stale address.
|
||||||
|
if !hasAddr(b.addrs, addr.Addr) {
|
||||||
|
return func(err error) {}
|
||||||
|
}
|
||||||
|
|
||||||
|
if b.pinAddr != "" {
|
||||||
|
if logger.V(4) {
|
||||||
|
logger.Infof("clientv3/balancer: %q is up but not pinned (already pinned %q)", addr.Addr, b.pinAddr)
|
||||||
|
}
|
||||||
|
return func(err error) {}
|
||||||
|
}
|
||||||
|
|
||||||
|
// notify waiting Get()s and pin first connected address
|
||||||
|
close(b.upc)
|
||||||
|
b.downc = make(chan struct{})
|
||||||
|
b.pinAddr = addr.Addr
|
||||||
|
if logger.V(4) {
|
||||||
|
logger.Infof("clientv3/balancer: pin %q", addr.Addr)
|
||||||
|
}
|
||||||
|
|
||||||
|
// notify client that a connection is up
|
||||||
|
b.readyOnce.Do(func() { close(b.readyc) })
|
||||||
|
|
||||||
|
return func(err error) {
|
||||||
|
// If connected to a black hole endpoint or a killed server, the gRPC ping
|
||||||
|
// timeout will induce a network I/O error, and retrying until success;
|
||||||
|
// finding healthy endpoint on retry could take several timeouts and redials.
|
||||||
|
// To avoid wasting retries, gray-list unhealthy endpoints.
|
||||||
|
b.hostPortError(addr.Addr, err)
|
||||||
|
|
||||||
|
b.mu.Lock()
|
||||||
|
b.upc = make(chan struct{})
|
||||||
|
close(b.downc)
|
||||||
|
b.pinAddr = ""
|
||||||
|
b.mu.Unlock()
|
||||||
|
if logger.V(4) {
|
||||||
|
logger.Infof("clientv3/balancer: unpin %q (%q)", addr.Addr, err.Error())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *healthBalancer) mayPin(addr grpc.Address) bool {
|
||||||
|
if b.endpoint(addr.Addr) == "" { // stale host:port
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
b.unhealthyMu.RLock()
|
||||||
|
unhealthyCnt := len(b.unhealthyHostPorts)
|
||||||
|
failedTime, bad := b.unhealthyHostPorts[addr.Addr]
|
||||||
|
b.unhealthyMu.RUnlock()
|
||||||
|
|
||||||
|
b.mu.RLock()
|
||||||
|
skip := len(b.addrs) == 1 || unhealthyCnt == 0 || len(b.addrs) == unhealthyCnt
|
||||||
|
b.mu.RUnlock()
|
||||||
|
if skip || !bad {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// prevent isolated member's endpoint from being infinitely retried, as follows:
|
||||||
|
// 1. keepalive pings detects GoAway with http2.ErrCodeEnhanceYourCalm
|
||||||
|
// 2. balancer 'Up' unpins with grpc: failed with network I/O error
|
||||||
|
// 3. grpc-healthcheck still SERVING, thus retry to pin
|
||||||
|
// instead, return before grpc-healthcheck if failed within healthcheck timeout
|
||||||
|
if elapsed := time.Since(failedTime); elapsed < b.healthCheckTimeout {
|
||||||
|
if logger.V(4) {
|
||||||
|
logger.Infof("clientv3/balancer: %q is up but not pinned (failed %v ago, require minimum %v after failure)", addr.Addr, elapsed, b.healthCheckTimeout)
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if ok, _ := b.healthCheck(addr.Addr); ok {
|
||||||
|
b.removeUnhealthy(addr.Addr, "health check success")
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
b.hostPortError(addr.Addr, errors.New("health check failed"))
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *healthBalancer) Get(ctx context.Context, opts grpc.BalancerGetOptions) (grpc.Address, func(), error) {
|
||||||
|
var (
|
||||||
|
addr string
|
||||||
|
closed bool
|
||||||
|
)
|
||||||
|
|
||||||
|
// If opts.BlockingWait is false (for fail-fast RPCs), it should return
|
||||||
|
// an address it has notified via Notify immediately instead of blocking.
|
||||||
|
if !opts.BlockingWait {
|
||||||
|
b.mu.RLock()
|
||||||
|
closed = b.closed
|
||||||
|
addr = b.pinAddr
|
||||||
|
b.mu.RUnlock()
|
||||||
|
if closed {
|
||||||
|
return grpc.Address{Addr: ""}, nil, grpc.ErrClientConnClosing
|
||||||
|
}
|
||||||
|
if addr == "" {
|
||||||
|
return grpc.Address{Addr: ""}, nil, ErrNoAddrAvilable
|
||||||
|
}
|
||||||
|
return grpc.Address{Addr: addr}, func() {}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
for {
|
||||||
|
b.mu.RLock()
|
||||||
|
ch := b.upc
|
||||||
|
b.mu.RUnlock()
|
||||||
|
select {
|
||||||
|
case <-ch:
|
||||||
|
case <-b.donec:
|
||||||
|
return grpc.Address{Addr: ""}, nil, grpc.ErrClientConnClosing
|
||||||
|
case <-ctx.Done():
|
||||||
|
return grpc.Address{Addr: ""}, nil, ctx.Err()
|
||||||
|
}
|
||||||
|
b.mu.RLock()
|
||||||
|
closed = b.closed
|
||||||
|
addr = b.pinAddr
|
||||||
|
b.mu.RUnlock()
|
||||||
|
// Close() which sets b.closed = true can be called before Get(), Get() must exit if balancer is closed.
|
||||||
|
if closed {
|
||||||
|
return grpc.Address{Addr: ""}, nil, grpc.ErrClientConnClosing
|
||||||
|
}
|
||||||
|
if addr != "" {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return grpc.Address{Addr: addr}, func() {}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *healthBalancer) Notify() <-chan []grpc.Address { return b.notifyCh }
|
||||||
|
|
||||||
|
func (b *healthBalancer) Close() error {
|
||||||
|
b.mu.Lock()
|
||||||
|
// In case gRPC calls close twice. TODO: remove the checking
|
||||||
|
// when we are sure that gRPC wont call close twice.
|
||||||
|
if b.closed {
|
||||||
|
b.mu.Unlock()
|
||||||
|
<-b.donec
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
b.closed = true
|
||||||
|
b.stopOnce.Do(func() { close(b.stopc) })
|
||||||
|
b.pinAddr = ""
|
||||||
|
|
||||||
|
// In the case of following scenario:
|
||||||
|
// 1. upc is not closed; no pinned address
|
||||||
|
// 2. client issues an RPC, calling invoke(), which calls Get(), enters for loop, blocks
|
||||||
|
// 3. client.conn.Close() calls balancer.Close(); closed = true
|
||||||
|
// 4. for loop in Get() never exits since ctx is the context passed in by the client and may not be canceled
|
||||||
|
// we must close upc so Get() exits from blocking on upc
|
||||||
|
select {
|
||||||
|
case <-b.upc:
|
||||||
|
default:
|
||||||
|
// terminate all waiting Get()s
|
||||||
|
close(b.upc)
|
||||||
|
}
|
||||||
|
|
||||||
|
b.mu.Unlock()
|
||||||
|
b.wg.Wait()
|
||||||
|
|
||||||
|
// wait for updateNotifyLoop to finish
|
||||||
|
<-b.donec
|
||||||
|
close(b.notifyCh)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func grpcHealthCheck(client *Client, ep string) (bool, error) {
|
||||||
|
conn, err := client.dial(ep)
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
defer conn.Close()
|
||||||
|
cli := healthpb.NewHealthClient(conn)
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
|
||||||
|
resp, err := cli.Check(ctx, &healthpb.HealthCheckRequest{})
|
||||||
|
cancel()
|
||||||
|
if err != nil {
|
||||||
|
if s, ok := status.FromError(err); ok && s.Code() == codes.Unavailable {
|
||||||
|
if s.Message() == unknownService { // etcd < v3.3.0
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
return resp.Status == healthpb.HealthCheckResponse_SERVING, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func hasAddr(addrs []grpc.Address, targetAddr string) bool {
|
||||||
|
for _, addr := range addrs {
|
||||||
|
if targetAddr == addr.Addr {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func getHost(ep string) string {
|
||||||
|
url, uerr := url.Parse(ep)
|
||||||
|
if uerr != nil || !strings.Contains(ep, "://") {
|
||||||
|
return ep
|
||||||
|
}
|
||||||
|
return url.Host
|
||||||
|
}
|
||||||
|
|
||||||
|
func eps2addrs(eps []string) []grpc.Address {
|
||||||
|
addrs := make([]grpc.Address, len(eps))
|
||||||
|
for i := range eps {
|
||||||
|
addrs[i].Addr = getHost(eps[i])
|
||||||
|
}
|
||||||
|
return addrs
|
||||||
|
}
|
||||||
|
|
||||||
|
func getHostPort2ep(eps []string) map[string]string {
|
||||||
|
hm := make(map[string]string, len(eps))
|
||||||
|
for i := range eps {
|
||||||
|
_, host, _ := parseEndpoint(eps[i])
|
||||||
|
hm[host] = eps[i]
|
||||||
|
}
|
||||||
|
return hm
|
||||||
|
}
|
211
clientv3/integration/black_hole_test.go
Normal file
211
clientv3/integration/black_hole_test.go
Normal file
@ -0,0 +1,211 @@
|
|||||||
|
// Copyright 2017 The etcd Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
// +build !cluster_proxy
|
||||||
|
|
||||||
|
package integration
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/coreos/etcd/clientv3"
|
||||||
|
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
|
||||||
|
"github.com/coreos/etcd/integration"
|
||||||
|
"github.com/coreos/etcd/pkg/testutil"
|
||||||
|
|
||||||
|
"golang.org/x/net/context"
|
||||||
|
)
|
||||||
|
|
||||||
|
// TestBalancerUnderBlackholeKeepAliveWatch tests when watch discovers it cannot talk to
|
||||||
|
// blackholed endpoint, client balancer switches to healthy one.
|
||||||
|
// TODO: test server-to-client keepalive ping
|
||||||
|
func TestBalancerUnderBlackholeKeepAliveWatch(t *testing.T) {
|
||||||
|
defer testutil.AfterTest(t)
|
||||||
|
|
||||||
|
clus := integration.NewClusterV3(t, &integration.ClusterConfig{
|
||||||
|
Size: 2,
|
||||||
|
GRPCKeepAliveMinTime: 1 * time.Millisecond, // avoid too_many_pings
|
||||||
|
})
|
||||||
|
defer clus.Terminate(t)
|
||||||
|
|
||||||
|
eps := []string{clus.Members[0].GRPCAddr(), clus.Members[1].GRPCAddr()}
|
||||||
|
|
||||||
|
ccfg := clientv3.Config{
|
||||||
|
Endpoints: []string{eps[0]},
|
||||||
|
DialTimeout: 1 * time.Second,
|
||||||
|
DialKeepAliveTime: 1 * time.Second,
|
||||||
|
DialKeepAliveTimeout: 500 * time.Millisecond,
|
||||||
|
}
|
||||||
|
|
||||||
|
// gRPC internal implementation related.
|
||||||
|
pingInterval := ccfg.DialKeepAliveTime + ccfg.DialKeepAliveTimeout
|
||||||
|
// 3s for slow machine to process watch and reset connections
|
||||||
|
// TODO: only send healthy endpoint to gRPC so gRPC wont waste time to
|
||||||
|
// dial for unhealthy endpoint.
|
||||||
|
// then we can reduce 3s to 1s.
|
||||||
|
timeout := pingInterval + 3*time.Second
|
||||||
|
|
||||||
|
cli, err := clientv3.New(ccfg)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
defer cli.Close()
|
||||||
|
|
||||||
|
wch := cli.Watch(context.Background(), "foo", clientv3.WithCreatedNotify())
|
||||||
|
if _, ok := <-wch; !ok {
|
||||||
|
t.Fatalf("watch failed on creation")
|
||||||
|
}
|
||||||
|
|
||||||
|
// endpoint can switch to eps[1] when it detects the failure of eps[0]
|
||||||
|
cli.SetEndpoints(eps...)
|
||||||
|
|
||||||
|
clus.Members[0].Blackhole()
|
||||||
|
|
||||||
|
if _, err = clus.Client(1).Put(context.TODO(), "foo", "bar"); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
select {
|
||||||
|
case <-wch:
|
||||||
|
case <-time.After(timeout):
|
||||||
|
t.Error("took too long to receive watch events")
|
||||||
|
}
|
||||||
|
|
||||||
|
clus.Members[0].Unblackhole()
|
||||||
|
|
||||||
|
// waiting for moving eps[0] out of unhealthy, so that it can be re-pined.
|
||||||
|
time.Sleep(ccfg.DialTimeout)
|
||||||
|
|
||||||
|
clus.Members[1].Blackhole()
|
||||||
|
|
||||||
|
// make sure client[0] can connect to eps[0] after remove the blackhole.
|
||||||
|
if _, err = clus.Client(0).Get(context.TODO(), "foo"); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if _, err = clus.Client(0).Put(context.TODO(), "foo", "bar1"); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-wch:
|
||||||
|
case <-time.After(timeout):
|
||||||
|
t.Error("took too long to receive watch events")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBalancerUnderBlackholeNoKeepAlivePut(t *testing.T) {
|
||||||
|
testBalancerUnderBlackholeNoKeepAlive(t, func(cli *clientv3.Client, ctx context.Context) error {
|
||||||
|
_, err := cli.Put(ctx, "foo", "bar")
|
||||||
|
if err == context.DeadlineExceeded || err == rpctypes.ErrTimeout {
|
||||||
|
return errExpected
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBalancerUnderBlackholeNoKeepAliveDelete(t *testing.T) {
|
||||||
|
testBalancerUnderBlackholeNoKeepAlive(t, func(cli *clientv3.Client, ctx context.Context) error {
|
||||||
|
_, err := cli.Delete(ctx, "foo")
|
||||||
|
if err == context.DeadlineExceeded || err == rpctypes.ErrTimeout {
|
||||||
|
return errExpected
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBalancerUnderBlackholeNoKeepAliveTxn(t *testing.T) {
|
||||||
|
testBalancerUnderBlackholeNoKeepAlive(t, func(cli *clientv3.Client, ctx context.Context) error {
|
||||||
|
_, err := cli.Txn(ctx).
|
||||||
|
If(clientv3.Compare(clientv3.Version("foo"), "=", 0)).
|
||||||
|
Then(clientv3.OpPut("foo", "bar")).
|
||||||
|
Else(clientv3.OpPut("foo", "baz")).Commit()
|
||||||
|
if err == context.DeadlineExceeded || err == rpctypes.ErrTimeout {
|
||||||
|
return errExpected
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBalancerUnderBlackholeNoKeepAliveLinearizableGet(t *testing.T) {
|
||||||
|
testBalancerUnderBlackholeNoKeepAlive(t, func(cli *clientv3.Client, ctx context.Context) error {
|
||||||
|
_, err := cli.Get(ctx, "a")
|
||||||
|
if err == context.DeadlineExceeded || err == rpctypes.ErrTimeout {
|
||||||
|
return errExpected
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBalancerUnderBlackholeNoKeepAliveSerializableGet(t *testing.T) {
|
||||||
|
testBalancerUnderBlackholeNoKeepAlive(t, func(cli *clientv3.Client, ctx context.Context) error {
|
||||||
|
_, err := cli.Get(ctx, "a", clientv3.WithSerializable())
|
||||||
|
if err == context.DeadlineExceeded {
|
||||||
|
return errExpected
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// testBalancerUnderBlackholeNoKeepAlive ensures that first request to blackholed endpoint
|
||||||
|
// fails due to context timeout, but succeeds on next try, with endpoint switch.
|
||||||
|
func testBalancerUnderBlackholeNoKeepAlive(t *testing.T, op func(*clientv3.Client, context.Context) error) {
|
||||||
|
defer testutil.AfterTest(t)
|
||||||
|
|
||||||
|
clus := integration.NewClusterV3(t, &integration.ClusterConfig{
|
||||||
|
Size: 2,
|
||||||
|
SkipCreatingClient: true,
|
||||||
|
})
|
||||||
|
defer clus.Terminate(t)
|
||||||
|
|
||||||
|
eps := []string{clus.Members[0].GRPCAddr(), clus.Members[1].GRPCAddr()}
|
||||||
|
|
||||||
|
ccfg := clientv3.Config{
|
||||||
|
Endpoints: []string{eps[0]},
|
||||||
|
DialTimeout: 1 * time.Second,
|
||||||
|
}
|
||||||
|
cli, err := clientv3.New(ccfg)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
defer cli.Close()
|
||||||
|
|
||||||
|
// wait for eps[0] to be pinned
|
||||||
|
mustWaitPinReady(t, cli)
|
||||||
|
|
||||||
|
// add all eps to list, so that when the original pined one fails
|
||||||
|
// the client can switch to other available eps
|
||||||
|
cli.SetEndpoints(eps...)
|
||||||
|
|
||||||
|
// blackhole eps[0]
|
||||||
|
clus.Members[0].Blackhole()
|
||||||
|
|
||||||
|
// fail first due to blackhole, retry should succeed
|
||||||
|
// TODO: first operation can succeed
|
||||||
|
// when gRPC supports better retry on non-delivered request
|
||||||
|
for i := 0; i < 2; i++ {
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
|
||||||
|
err = op(cli, ctx)
|
||||||
|
cancel()
|
||||||
|
if err == nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if i == 0 {
|
||||||
|
if err != errExpected {
|
||||||
|
t.Errorf("#%d: expected %v, got %v", i, errExpected, err)
|
||||||
|
}
|
||||||
|
} else if err != nil {
|
||||||
|
t.Errorf("#%d: failed with error %v", i, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
@ -21,6 +21,7 @@ import (
|
|||||||
"github.com/coreos/etcd/integration"
|
"github.com/coreos/etcd/integration"
|
||||||
"github.com/coreos/etcd/pkg/testutil"
|
"github.com/coreos/etcd/pkg/testutil"
|
||||||
"github.com/coreos/etcd/pkg/types"
|
"github.com/coreos/etcd/pkg/types"
|
||||||
|
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -16,6 +16,7 @@ package integration
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"math/rand"
|
"math/rand"
|
||||||
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@ -26,7 +27,6 @@ import (
|
|||||||
"github.com/coreos/etcd/pkg/transport"
|
"github.com/coreos/etcd/pkg/transport"
|
||||||
|
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
"google.golang.org/grpc"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@ -48,21 +48,21 @@ var (
|
|||||||
// TestDialTLSExpired tests client with expired certs fails to dial.
|
// TestDialTLSExpired tests client with expired certs fails to dial.
|
||||||
func TestDialTLSExpired(t *testing.T) {
|
func TestDialTLSExpired(t *testing.T) {
|
||||||
defer testutil.AfterTest(t)
|
defer testutil.AfterTest(t)
|
||||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, PeerTLS: &testTLSInfo, ClientTLS: &testTLSInfo})
|
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, PeerTLS: &testTLSInfo, ClientTLS: &testTLSInfo, SkipCreatingClient: true})
|
||||||
defer clus.Terminate(t)
|
defer clus.Terminate(t)
|
||||||
|
|
||||||
tls, err := testTLSInfoExpired.ClientConfig()
|
tls, err := testTLSInfoExpired.ClientConfig()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
// expect remote errors 'tls: bad certificate'
|
// expect remote errors "tls: bad certificate"
|
||||||
_, err = clientv3.New(clientv3.Config{
|
_, err = clientv3.New(clientv3.Config{
|
||||||
Endpoints: []string{clus.Members[0].GRPCAddr()},
|
Endpoints: []string{clus.Members[0].GRPCAddr()},
|
||||||
DialTimeout: 3 * time.Second,
|
DialTimeout: 3 * time.Second,
|
||||||
TLS: tls,
|
TLS: tls,
|
||||||
})
|
})
|
||||||
if err != grpc.ErrClientConnTimeout {
|
if err != context.DeadlineExceeded {
|
||||||
t.Fatalf("expected %v, got %v", grpc.ErrClientConnTimeout, err)
|
t.Fatalf("expected %v, got %v", context.DeadlineExceeded, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -70,19 +70,20 @@ func TestDialTLSExpired(t *testing.T) {
|
|||||||
// when TLS endpoints (https, unixs) are given but no tls config.
|
// when TLS endpoints (https, unixs) are given but no tls config.
|
||||||
func TestDialTLSNoConfig(t *testing.T) {
|
func TestDialTLSNoConfig(t *testing.T) {
|
||||||
defer testutil.AfterTest(t)
|
defer testutil.AfterTest(t)
|
||||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, ClientTLS: &testTLSInfo})
|
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, ClientTLS: &testTLSInfo, SkipCreatingClient: true})
|
||||||
defer clus.Terminate(t)
|
defer clus.Terminate(t)
|
||||||
// expect 'signed by unknown authority'
|
// expect "signed by unknown authority"
|
||||||
_, err := clientv3.New(clientv3.Config{
|
_, err := clientv3.New(clientv3.Config{
|
||||||
Endpoints: []string{clus.Members[0].GRPCAddr()},
|
Endpoints: []string{clus.Members[0].GRPCAddr()},
|
||||||
DialTimeout: time.Second,
|
DialTimeout: time.Second,
|
||||||
})
|
})
|
||||||
if err != grpc.ErrClientConnTimeout {
|
if err != context.DeadlineExceeded {
|
||||||
t.Fatalf("expected %v, got %v", grpc.ErrClientConnTimeout, err)
|
t.Fatalf("expected %v, got %v", context.DeadlineExceeded, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestDialSetEndpoints ensures SetEndpoints can replace unavailable endpoints with available ones.
|
// TestDialSetEndpointsBeforeFail ensures SetEndpoints can replace unavailable
|
||||||
|
// endpoints with available ones.
|
||||||
func TestDialSetEndpointsBeforeFail(t *testing.T) {
|
func TestDialSetEndpointsBeforeFail(t *testing.T) {
|
||||||
testDialSetEndpoints(t, true)
|
testDialSetEndpoints(t, true)
|
||||||
}
|
}
|
||||||
@ -94,7 +95,7 @@ func TestDialSetEndpointsAfterFail(t *testing.T) {
|
|||||||
// testDialSetEndpoints ensures SetEndpoints can replace unavailable endpoints with available ones.
|
// testDialSetEndpoints ensures SetEndpoints can replace unavailable endpoints with available ones.
|
||||||
func testDialSetEndpoints(t *testing.T, setBefore bool) {
|
func testDialSetEndpoints(t *testing.T, setBefore bool) {
|
||||||
defer testutil.AfterTest(t)
|
defer testutil.AfterTest(t)
|
||||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3, SkipCreatingClient: true})
|
||||||
defer clus.Terminate(t)
|
defer clus.Terminate(t)
|
||||||
|
|
||||||
// get endpoint list
|
// get endpoint list
|
||||||
@ -139,7 +140,7 @@ func TestSwitchSetEndpoints(t *testing.T) {
|
|||||||
eps := []string{clus.Members[1].GRPCAddr(), clus.Members[2].GRPCAddr()}
|
eps := []string{clus.Members[1].GRPCAddr(), clus.Members[2].GRPCAddr()}
|
||||||
|
|
||||||
cli := clus.Client(0)
|
cli := clus.Client(0)
|
||||||
clus.Members[0].InjectPartition(t, clus.Members[1:])
|
clus.Members[0].InjectPartition(t, clus.Members[1:]...)
|
||||||
|
|
||||||
cli.SetEndpoints(eps...)
|
cli.SetEndpoints(eps...)
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||||
@ -152,7 +153,7 @@ func TestSwitchSetEndpoints(t *testing.T) {
|
|||||||
func TestRejectOldCluster(t *testing.T) {
|
func TestRejectOldCluster(t *testing.T) {
|
||||||
defer testutil.AfterTest(t)
|
defer testutil.AfterTest(t)
|
||||||
// 2 endpoints to test multi-endpoint Status
|
// 2 endpoints to test multi-endpoint Status
|
||||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 2})
|
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 2, SkipCreatingClient: true})
|
||||||
defer clus.Terminate(t)
|
defer clus.Terminate(t)
|
||||||
|
|
||||||
cfg := clientv3.Config{
|
cfg := clientv3.Config{
|
||||||
@ -189,3 +190,17 @@ func TestDialForeignEndpoint(t *testing.T) {
|
|||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TestSetEndpointAndPut checks that a Put following a SetEndpoints
|
||||||
|
// to a working endpoint will always succeed.
|
||||||
|
func TestSetEndpointAndPut(t *testing.T) {
|
||||||
|
defer testutil.AfterTest(t)
|
||||||
|
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 2})
|
||||||
|
defer clus.Terminate(t)
|
||||||
|
|
||||||
|
clus.Client(1).SetEndpoints(clus.Members[0].GRPCAddr())
|
||||||
|
_, err := clus.Client(1).Put(context.TODO(), "foo", "bar")
|
||||||
|
if err != nil && !strings.Contains(err.Error(), "closing") {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -16,7 +16,6 @@ package integration
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"math/rand"
|
|
||||||
"os"
|
"os"
|
||||||
"reflect"
|
"reflect"
|
||||||
"strings"
|
"strings"
|
||||||
@ -28,6 +27,7 @@ import (
|
|||||||
"github.com/coreos/etcd/integration"
|
"github.com/coreos/etcd/integration"
|
||||||
"github.com/coreos/etcd/mvcc/mvccpb"
|
"github.com/coreos/etcd/mvcc/mvccpb"
|
||||||
"github.com/coreos/etcd/pkg/testutil"
|
"github.com/coreos/etcd/pkg/testutil"
|
||||||
|
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
"google.golang.org/grpc"
|
"google.golang.org/grpc"
|
||||||
)
|
)
|
||||||
@ -441,8 +441,8 @@ func TestKVGetErrConnClosed(t *testing.T) {
|
|||||||
go func() {
|
go func() {
|
||||||
defer close(donec)
|
defer close(donec)
|
||||||
_, err := cli.Get(context.TODO(), "foo")
|
_, err := cli.Get(context.TODO(), "foo")
|
||||||
if err != nil && err != grpc.ErrClientConnClosing {
|
if err != nil && err != context.Canceled && err != grpc.ErrClientConnClosing {
|
||||||
t.Fatalf("expected %v, got %v", grpc.ErrClientConnClosing, err)
|
t.Fatalf("expected %v or %v, got %v", context.Canceled, grpc.ErrClientConnClosing, err)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
@ -472,8 +472,9 @@ func TestKVNewAfterClose(t *testing.T) {
|
|||||||
|
|
||||||
donec := make(chan struct{})
|
donec := make(chan struct{})
|
||||||
go func() {
|
go func() {
|
||||||
if _, err := cli.Get(context.TODO(), "foo"); err != grpc.ErrClientConnClosing {
|
_, err := cli.Get(context.TODO(), "foo")
|
||||||
t.Fatalf("expected %v, got %v", grpc.ErrClientConnClosing, err)
|
if err != context.Canceled && err != grpc.ErrClientConnClosing {
|
||||||
|
t.Fatalf("expected %v or %v, got %v", context.Canceled, grpc.ErrClientConnClosing, err)
|
||||||
}
|
}
|
||||||
close(donec)
|
close(donec)
|
||||||
}()
|
}()
|
||||||
@ -790,7 +791,7 @@ func TestKVGetStoppedServerAndClose(t *testing.T) {
|
|||||||
// this Get fails and triggers an asynchronous connection retry
|
// this Get fails and triggers an asynchronous connection retry
|
||||||
_, err := cli.Get(ctx, "abc")
|
_, err := cli.Get(ctx, "abc")
|
||||||
cancel()
|
cancel()
|
||||||
if !strings.Contains(err.Error(), "context deadline") {
|
if err != nil && err != context.DeadlineExceeded {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -812,86 +813,51 @@ func TestKVPutStoppedServerAndClose(t *testing.T) {
|
|||||||
// grpc finds out the original connection is down due to the member shutdown.
|
// grpc finds out the original connection is down due to the member shutdown.
|
||||||
_, err := cli.Get(ctx, "abc")
|
_, err := cli.Get(ctx, "abc")
|
||||||
cancel()
|
cancel()
|
||||||
if !strings.Contains(err.Error(), "context deadline") {
|
if err != nil && err != context.DeadlineExceeded {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// this Put fails and triggers an asynchronous connection retry
|
// this Put fails and triggers an asynchronous connection retry
|
||||||
_, err = cli.Put(ctx, "abc", "123")
|
_, err = cli.Put(ctx, "abc", "123")
|
||||||
cancel()
|
cancel()
|
||||||
if !strings.Contains(err.Error(), "context deadline") {
|
if err != nil && err != context.DeadlineExceeded {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestKVGetOneEndpointDown ensures a client can connect and get if one endpoint is down
|
// TestKVPutAtMostOnce ensures that a Put will only occur at most once
|
||||||
func TestKVPutOneEndpointDown(t *testing.T) {
|
// in the presence of network errors.
|
||||||
|
func TestKVPutAtMostOnce(t *testing.T) {
|
||||||
defer testutil.AfterTest(t)
|
defer testutil.AfterTest(t)
|
||||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||||
defer clus.Terminate(t)
|
defer clus.Terminate(t)
|
||||||
|
|
||||||
// get endpoint list
|
if _, err := clus.Client(0).Put(context.TODO(), "k", "1"); err != nil {
|
||||||
eps := make([]string, 3)
|
|
||||||
for i := range eps {
|
|
||||||
eps[i] = clus.Members[i].GRPCAddr()
|
|
||||||
}
|
|
||||||
|
|
||||||
// make a dead node
|
|
||||||
clus.Members[rand.Intn(len(eps))].Stop(t)
|
|
||||||
|
|
||||||
// try to connect with dead node in the endpoint list
|
|
||||||
cfg := clientv3.Config{Endpoints: eps, DialTimeout: 1 * time.Second}
|
|
||||||
cli, err := clientv3.New(cfg)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
defer cli.Close()
|
|
||||||
ctx, cancel := context.WithTimeout(context.TODO(), 3*time.Second)
|
|
||||||
if _, err := cli.Get(ctx, "abc", clientv3.WithSerializable()); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
cancel()
|
|
||||||
}
|
|
||||||
|
|
||||||
// TestKVGetResetLoneEndpoint ensures that if an endpoint resets and all other
|
for i := 0; i < 10; i++ {
|
||||||
// endpoints are down, then it will reconnect.
|
clus.Members[0].DropConnections()
|
||||||
func TestKVGetResetLoneEndpoint(t *testing.T) {
|
donec := make(chan struct{})
|
||||||
defer testutil.AfterTest(t)
|
go func() {
|
||||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 2})
|
defer close(donec)
|
||||||
defer clus.Terminate(t)
|
for i := 0; i < 10; i++ {
|
||||||
|
clus.Members[0].DropConnections()
|
||||||
// get endpoint list
|
time.Sleep(5 * time.Millisecond)
|
||||||
eps := make([]string, 2)
|
}
|
||||||
for i := range eps {
|
}()
|
||||||
eps[i] = clus.Members[i].GRPCAddr()
|
_, err := clus.Client(0).Put(context.TODO(), "k", "v")
|
||||||
}
|
<-donec
|
||||||
|
if err != nil {
|
||||||
cfg := clientv3.Config{Endpoints: eps, DialTimeout: 500 * time.Millisecond}
|
break
|
||||||
cli, err := clientv3.New(cfg)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
defer cli.Close()
|
|
||||||
|
|
||||||
// disconnect everything
|
|
||||||
clus.Members[0].Stop(t)
|
|
||||||
clus.Members[1].Stop(t)
|
|
||||||
|
|
||||||
// have Get try to reconnect
|
|
||||||
donec := make(chan struct{})
|
|
||||||
go func() {
|
|
||||||
ctx, cancel := context.WithTimeout(context.TODO(), 5*time.Second)
|
|
||||||
if _, err := cli.Get(ctx, "abc", clientv3.WithSerializable()); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
}
|
||||||
cancel()
|
}
|
||||||
close(donec)
|
|
||||||
}()
|
resp, err := clus.Client(0).Get(context.TODO(), "k")
|
||||||
time.Sleep(500 * time.Millisecond)
|
if err != nil {
|
||||||
clus.Members[0].Restart(t)
|
t.Fatal(err)
|
||||||
select {
|
}
|
||||||
case <-time.After(10 * time.Second):
|
if resp.Kvs[0].Version > 11 {
|
||||||
t.Fatalf("timed out waiting for Get")
|
t.Fatalf("expected version <= 10, got %+v", resp.Kvs[0])
|
||||||
case <-donec:
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -26,6 +26,7 @@ import (
|
|||||||
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
|
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
|
||||||
"github.com/coreos/etcd/integration"
|
"github.com/coreos/etcd/integration"
|
||||||
"github.com/coreos/etcd/pkg/testutil"
|
"github.com/coreos/etcd/pkg/testutil"
|
||||||
|
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
"google.golang.org/grpc"
|
"google.golang.org/grpc"
|
||||||
)
|
)
|
||||||
@ -233,7 +234,7 @@ type leaseCh struct {
|
|||||||
ch <-chan *clientv3.LeaseKeepAliveResponse
|
ch <-chan *clientv3.LeaseKeepAliveResponse
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestLeaseKeepAliveNotFound ensures a revoked lease won't stop other keep alives
|
// TestLeaseKeepAliveNotFound ensures a revoked lease won't halt other leases.
|
||||||
func TestLeaseKeepAliveNotFound(t *testing.T) {
|
func TestLeaseKeepAliveNotFound(t *testing.T) {
|
||||||
defer testutil.AfterTest(t)
|
defer testutil.AfterTest(t)
|
||||||
|
|
||||||
@ -286,8 +287,10 @@ func TestLeaseGrantErrConnClosed(t *testing.T) {
|
|||||||
go func() {
|
go func() {
|
||||||
defer close(donec)
|
defer close(donec)
|
||||||
_, err := cli.Grant(context.TODO(), 5)
|
_, err := cli.Grant(context.TODO(), 5)
|
||||||
if err != nil && err != grpc.ErrClientConnClosing {
|
if err != nil && err != grpc.ErrClientConnClosing && err != context.Canceled {
|
||||||
t.Fatalf("expected %v, got %v", grpc.ErrClientConnClosing, err)
|
// grpc.ErrClientConnClosing if grpc-go balancer calls 'Get' after client.Close.
|
||||||
|
// context.Canceled if grpc-go balancer calls 'Get' with an inflight client.Close.
|
||||||
|
t.Fatalf("expected %v or %v, got %v", grpc.ErrClientConnClosing, context.Canceled, err)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
@ -316,8 +319,8 @@ func TestLeaseGrantNewAfterClose(t *testing.T) {
|
|||||||
|
|
||||||
donec := make(chan struct{})
|
donec := make(chan struct{})
|
||||||
go func() {
|
go func() {
|
||||||
if _, err := cli.Grant(context.TODO(), 5); err != grpc.ErrClientConnClosing {
|
if _, err := cli.Grant(context.TODO(), 5); err != context.Canceled && err != grpc.ErrClientConnClosing {
|
||||||
t.Fatalf("expected %v, got %v", grpc.ErrClientConnClosing, err)
|
t.Fatalf("expected %v or %v, got %v", err != context.Canceled, grpc.ErrClientConnClosing, err)
|
||||||
}
|
}
|
||||||
close(donec)
|
close(donec)
|
||||||
}()
|
}()
|
||||||
@ -348,8 +351,8 @@ func TestLeaseRevokeNewAfterClose(t *testing.T) {
|
|||||||
|
|
||||||
donec := make(chan struct{})
|
donec := make(chan struct{})
|
||||||
go func() {
|
go func() {
|
||||||
if _, err := cli.Revoke(context.TODO(), leaseID); err != grpc.ErrClientConnClosing {
|
if _, err := cli.Revoke(context.TODO(), leaseID); err != context.Canceled && err != grpc.ErrClientConnClosing {
|
||||||
t.Fatalf("expected %v, got %v", grpc.ErrClientConnClosing, err)
|
t.Fatalf("expected %v or %v, got %v", err != context.Canceled, grpc.ErrClientConnClosing, err)
|
||||||
}
|
}
|
||||||
close(donec)
|
close(donec)
|
||||||
}()
|
}()
|
||||||
@ -360,7 +363,7 @@ func TestLeaseRevokeNewAfterClose(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestLeaseKeepAliveCloseAfterDisconnectExpire ensures the keep alive channel is closed
|
// TestLeaseKeepAliveCloseAfterDisconnectRevoke ensures the keep alive channel is closed
|
||||||
// following a disconnection, lease revoke, then reconnect.
|
// following a disconnection, lease revoke, then reconnect.
|
||||||
func TestLeaseKeepAliveCloseAfterDisconnectRevoke(t *testing.T) {
|
func TestLeaseKeepAliveCloseAfterDisconnectRevoke(t *testing.T) {
|
||||||
defer testutil.AfterTest(t)
|
defer testutil.AfterTest(t)
|
||||||
@ -395,7 +398,7 @@ func TestLeaseKeepAliveCloseAfterDisconnectRevoke(t *testing.T) {
|
|||||||
|
|
||||||
clus.Members[0].Restart(t)
|
clus.Members[0].Restart(t)
|
||||||
|
|
||||||
// some keep-alives may still be buffered; drain until close
|
// some responses may still be buffered; drain until close
|
||||||
timer := time.After(time.Duration(kresp.TTL) * time.Second)
|
timer := time.After(time.Duration(kresp.TTL) * time.Second)
|
||||||
for kresp != nil {
|
for kresp != nil {
|
||||||
select {
|
select {
|
||||||
@ -482,7 +485,8 @@ func TestLeaseTimeToLive(t *testing.T) {
|
|||||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||||
defer clus.Terminate(t)
|
defer clus.Terminate(t)
|
||||||
|
|
||||||
lapi := clus.RandClient()
|
c := clus.RandClient()
|
||||||
|
lapi := c
|
||||||
|
|
||||||
resp, err := lapi.Grant(context.Background(), 10)
|
resp, err := lapi.Grant(context.Background(), 10)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -497,6 +501,11 @@ func TestLeaseTimeToLive(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// linearized read to ensure Puts propagated to server backing lapi
|
||||||
|
if _, err := c.Get(context.TODO(), "abc"); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
lresp, lerr := lapi.TimeToLive(context.Background(), resp.ID, clientv3.WithAttachedKeys())
|
lresp, lerr := lapi.TimeToLive(context.Background(), resp.ID, clientv3.WithAttachedKeys())
|
||||||
if lerr != nil {
|
if lerr != nil {
|
||||||
t.Fatal(lerr)
|
t.Fatal(lerr)
|
||||||
@ -545,8 +554,7 @@ func TestLeaseTimeToLiveLeaseNotFound(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
lresp, err := cli.TimeToLive(context.Background(), resp.ID)
|
lresp, err := cli.TimeToLive(context.Background(), resp.ID)
|
||||||
// TimeToLive() doesn't return LeaseNotFound error
|
// TimeToLive() should return a response with TTL=-1.
|
||||||
// but return a response with TTL to be -1
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("expected err to be nil")
|
t.Fatalf("expected err to be nil")
|
||||||
}
|
}
|
||||||
@ -636,8 +644,8 @@ func TestLeaseKeepAliveLoopExit(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestV3LeaseFailureOverlap issues Grant and Keepalive requests to a cluster
|
// TestV3LeaseFailureOverlap issues Grant and KeepAlive requests to a cluster
|
||||||
// before, during, and after quorum loss to confirm Grant/Keepalive tolerates
|
// before, during, and after quorum loss to confirm Grant/KeepAlive tolerates
|
||||||
// transient cluster failure.
|
// transient cluster failure.
|
||||||
func TestV3LeaseFailureOverlap(t *testing.T) {
|
func TestV3LeaseFailureOverlap(t *testing.T) {
|
||||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 2})
|
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 2})
|
||||||
|
@ -14,8 +14,16 @@
|
|||||||
|
|
||||||
package integration
|
package integration
|
||||||
|
|
||||||
import "github.com/coreos/pkg/capnslog"
|
import (
|
||||||
|
"io/ioutil"
|
||||||
|
|
||||||
|
"github.com/coreos/etcd/clientv3"
|
||||||
|
|
||||||
|
"github.com/coreos/pkg/capnslog"
|
||||||
|
"google.golang.org/grpc/grpclog"
|
||||||
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
capnslog.SetGlobalLogLevel(capnslog.INFO)
|
capnslog.SetGlobalLogLevel(capnslog.CRITICAL)
|
||||||
|
clientv3.SetLogger(grpclog.NewLoggerV2(ioutil.Discard, ioutil.Discard, ioutil.Discard))
|
||||||
}
|
}
|
||||||
|
@ -44,7 +44,7 @@ func TestV3ClientMetrics(t *testing.T) {
|
|||||||
err error
|
err error
|
||||||
)
|
)
|
||||||
|
|
||||||
// listen for all prometheus metrics
|
// listen for all Prometheus metrics
|
||||||
donec := make(chan struct{})
|
donec := make(chan struct{})
|
||||||
go func() {
|
go func() {
|
||||||
defer close(donec)
|
defer close(donec)
|
||||||
@ -65,7 +65,7 @@ func TestV3ClientMetrics(t *testing.T) {
|
|||||||
|
|
||||||
url := "unix://" + addr + "/metrics"
|
url := "unix://" + addr + "/metrics"
|
||||||
|
|
||||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, SkipCreatingClient: true})
|
||||||
defer clus.Terminate(t)
|
defer clus.Terminate(t)
|
||||||
|
|
||||||
cfg := clientv3.Config{
|
cfg := clientv3.Config{
|
||||||
|
@ -25,6 +25,7 @@ import (
|
|||||||
"github.com/coreos/etcd/integration"
|
"github.com/coreos/etcd/integration"
|
||||||
"github.com/coreos/etcd/mvcc/mvccpb"
|
"github.com/coreos/etcd/mvcc/mvccpb"
|
||||||
"github.com/coreos/etcd/pkg/testutil"
|
"github.com/coreos/etcd/pkg/testutil"
|
||||||
|
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -15,7 +15,6 @@
|
|||||||
package integration
|
package integration
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
|
||||||
"reflect"
|
"reflect"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
@ -24,6 +23,8 @@ import (
|
|||||||
"github.com/coreos/etcd/integration"
|
"github.com/coreos/etcd/integration"
|
||||||
"github.com/coreos/etcd/mvcc/mvccpb"
|
"github.com/coreos/etcd/mvcc/mvccpb"
|
||||||
"github.com/coreos/etcd/pkg/testutil"
|
"github.com/coreos/etcd/pkg/testutil"
|
||||||
|
|
||||||
|
"golang.org/x/net/context"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestNamespacePutGet(t *testing.T) {
|
func TestNamespacePutGet(t *testing.T) {
|
||||||
|
260
clientv3/integration/network_partition_test.go
Normal file
260
clientv3/integration/network_partition_test.go
Normal file
@ -0,0 +1,260 @@
|
|||||||
|
// Copyright 2017 The etcd Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
// +build !cluster_proxy
|
||||||
|
|
||||||
|
package integration
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/coreos/etcd/clientv3"
|
||||||
|
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
|
||||||
|
"github.com/coreos/etcd/integration"
|
||||||
|
"github.com/coreos/etcd/pkg/testutil"
|
||||||
|
|
||||||
|
"golang.org/x/net/context"
|
||||||
|
)
|
||||||
|
|
||||||
|
var errExpected = errors.New("expected error")
|
||||||
|
|
||||||
|
// TestBalancerUnderNetworkPartitionPut tests when one member becomes isolated,
|
||||||
|
// first Put request fails, and following retry succeeds with client balancer
|
||||||
|
// switching to others.
|
||||||
|
func TestBalancerUnderNetworkPartitionPut(t *testing.T) {
|
||||||
|
testBalancerUnderNetworkPartition(t, func(cli *clientv3.Client, ctx context.Context) error {
|
||||||
|
_, err := cli.Put(ctx, "a", "b")
|
||||||
|
if err == context.DeadlineExceeded || err == rpctypes.ErrTimeout {
|
||||||
|
return errExpected
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}, time.Second)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBalancerUnderNetworkPartitionDelete(t *testing.T) {
|
||||||
|
testBalancerUnderNetworkPartition(t, func(cli *clientv3.Client, ctx context.Context) error {
|
||||||
|
_, err := cli.Delete(ctx, "a")
|
||||||
|
if err == context.DeadlineExceeded || err == rpctypes.ErrTimeout {
|
||||||
|
return errExpected
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}, time.Second)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBalancerUnderNetworkPartitionTxn(t *testing.T) {
|
||||||
|
testBalancerUnderNetworkPartition(t, func(cli *clientv3.Client, ctx context.Context) error {
|
||||||
|
_, err := cli.Txn(ctx).
|
||||||
|
If(clientv3.Compare(clientv3.Version("foo"), "=", 0)).
|
||||||
|
Then(clientv3.OpPut("foo", "bar")).
|
||||||
|
Else(clientv3.OpPut("foo", "baz")).Commit()
|
||||||
|
if err == context.DeadlineExceeded || err == rpctypes.ErrTimeout {
|
||||||
|
return errExpected
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}, time.Second)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestBalancerUnderNetworkPartitionLinearizableGetWithLongTimeout tests
|
||||||
|
// when one member becomes isolated, first quorum Get request succeeds
|
||||||
|
// by switching endpoints within the timeout (long enough to cover endpoint switch).
|
||||||
|
func TestBalancerUnderNetworkPartitionLinearizableGetWithLongTimeout(t *testing.T) {
|
||||||
|
testBalancerUnderNetworkPartition(t, func(cli *clientv3.Client, ctx context.Context) error {
|
||||||
|
_, err := cli.Get(ctx, "a")
|
||||||
|
return err
|
||||||
|
}, 7*time.Second)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestBalancerUnderNetworkPartitionLinearizableGetWithShortTimeout tests
|
||||||
|
// when one member becomes isolated, first quorum Get request fails,
|
||||||
|
// and following retry succeeds with client balancer switching to others.
|
||||||
|
func TestBalancerUnderNetworkPartitionLinearizableGetWithShortTimeout(t *testing.T) {
|
||||||
|
testBalancerUnderNetworkPartition(t, func(cli *clientv3.Client, ctx context.Context) error {
|
||||||
|
_, err := cli.Get(ctx, "a")
|
||||||
|
if err == context.DeadlineExceeded {
|
||||||
|
return errExpected
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}, time.Second)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBalancerUnderNetworkPartitionSerializableGet(t *testing.T) {
|
||||||
|
testBalancerUnderNetworkPartition(t, func(cli *clientv3.Client, ctx context.Context) error {
|
||||||
|
_, err := cli.Get(ctx, "a", clientv3.WithSerializable())
|
||||||
|
return err
|
||||||
|
}, time.Second)
|
||||||
|
}
|
||||||
|
|
||||||
|
func testBalancerUnderNetworkPartition(t *testing.T, op func(*clientv3.Client, context.Context) error, timeout time.Duration) {
|
||||||
|
defer testutil.AfterTest(t)
|
||||||
|
|
||||||
|
clus := integration.NewClusterV3(t, &integration.ClusterConfig{
|
||||||
|
Size: 3,
|
||||||
|
GRPCKeepAliveMinTime: time.Millisecond, // avoid too_many_pings
|
||||||
|
SkipCreatingClient: true,
|
||||||
|
})
|
||||||
|
defer clus.Terminate(t)
|
||||||
|
|
||||||
|
eps := []string{clus.Members[0].GRPCAddr(), clus.Members[1].GRPCAddr(), clus.Members[2].GRPCAddr()}
|
||||||
|
|
||||||
|
// expect pin eps[0]
|
||||||
|
ccfg := clientv3.Config{
|
||||||
|
Endpoints: []string{eps[0]},
|
||||||
|
DialTimeout: 3 * time.Second,
|
||||||
|
}
|
||||||
|
cli, err := clientv3.New(ccfg)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
defer cli.Close()
|
||||||
|
|
||||||
|
// wait for eps[0] to be pinned
|
||||||
|
mustWaitPinReady(t, cli)
|
||||||
|
|
||||||
|
// add other endpoints for later endpoint switch
|
||||||
|
cli.SetEndpoints(eps...)
|
||||||
|
clus.Members[0].InjectPartition(t, clus.Members[1:]...)
|
||||||
|
|
||||||
|
for i := 0; i < 2; i++ {
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), timeout)
|
||||||
|
err = op(cli, ctx)
|
||||||
|
cancel()
|
||||||
|
if err == nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if err != errExpected {
|
||||||
|
t.Errorf("#%d: expected %v, got %v", i, errExpected, err)
|
||||||
|
}
|
||||||
|
// give enough time for endpoint switch
|
||||||
|
// TODO: remove random sleep by syncing directly with balancer
|
||||||
|
if i == 0 {
|
||||||
|
time.Sleep(5 * time.Second)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("balancer did not switch in time (%v)", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestBalancerUnderNetworkPartitionLinearizableGetLeaderElection ensures balancer
|
||||||
|
// switches endpoint when leader fails and linearizable get requests returns
|
||||||
|
// "etcdserver: request timed out".
|
||||||
|
func TestBalancerUnderNetworkPartitionLinearizableGetLeaderElection(t *testing.T) {
|
||||||
|
defer testutil.AfterTest(t)
|
||||||
|
|
||||||
|
clus := integration.NewClusterV3(t, &integration.ClusterConfig{
|
||||||
|
Size: 3,
|
||||||
|
SkipCreatingClient: true,
|
||||||
|
})
|
||||||
|
defer clus.Terminate(t)
|
||||||
|
eps := []string{clus.Members[0].GRPCAddr(), clus.Members[1].GRPCAddr(), clus.Members[2].GRPCAddr()}
|
||||||
|
|
||||||
|
lead := clus.WaitLeader(t)
|
||||||
|
|
||||||
|
timeout := 3 * clus.Members[(lead+1)%2].ServerConfig.ReqTimeout()
|
||||||
|
|
||||||
|
cli, err := clientv3.New(clientv3.Config{
|
||||||
|
Endpoints: []string{eps[(lead+1)%2]},
|
||||||
|
DialTimeout: 1 * time.Second,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
defer cli.Close()
|
||||||
|
|
||||||
|
// wait for non-leader to be pinned
|
||||||
|
mustWaitPinReady(t, cli)
|
||||||
|
|
||||||
|
// add all eps to list, so that when the original pined one fails
|
||||||
|
// the client can switch to other available eps
|
||||||
|
cli.SetEndpoints(eps[lead], eps[(lead+1)%2])
|
||||||
|
|
||||||
|
// isolate leader
|
||||||
|
clus.Members[lead].InjectPartition(t, clus.Members[(lead+1)%3], clus.Members[(lead+2)%3])
|
||||||
|
|
||||||
|
// expects balancer endpoint switch while ongoing leader election
|
||||||
|
ctx, cancel := context.WithTimeout(context.TODO(), timeout)
|
||||||
|
_, err = cli.Get(ctx, "a")
|
||||||
|
cancel()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBalancerUnderNetworkPartitionWatchLeader(t *testing.T) {
|
||||||
|
testBalancerUnderNetworkPartitionWatch(t, true)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBalancerUnderNetworkPartitionWatchFollower(t *testing.T) {
|
||||||
|
testBalancerUnderNetworkPartitionWatch(t, false)
|
||||||
|
}
|
||||||
|
|
||||||
|
// testBalancerUnderNetworkPartitionWatch ensures watch stream
|
||||||
|
// to a partitioned node be closed when context requires leader.
|
||||||
|
func testBalancerUnderNetworkPartitionWatch(t *testing.T, isolateLeader bool) {
|
||||||
|
defer testutil.AfterTest(t)
|
||||||
|
|
||||||
|
clus := integration.NewClusterV3(t, &integration.ClusterConfig{
|
||||||
|
Size: 3,
|
||||||
|
SkipCreatingClient: true,
|
||||||
|
})
|
||||||
|
defer clus.Terminate(t)
|
||||||
|
|
||||||
|
eps := []string{clus.Members[0].GRPCAddr(), clus.Members[1].GRPCAddr(), clus.Members[2].GRPCAddr()}
|
||||||
|
|
||||||
|
target := clus.WaitLeader(t)
|
||||||
|
if !isolateLeader {
|
||||||
|
target = (target + 1) % 3
|
||||||
|
}
|
||||||
|
|
||||||
|
// pin eps[target]
|
||||||
|
watchCli, err := clientv3.New(clientv3.Config{Endpoints: []string{eps[target]}})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
defer watchCli.Close()
|
||||||
|
|
||||||
|
// wait for eps[target] to be pinned
|
||||||
|
mustWaitPinReady(t, watchCli)
|
||||||
|
|
||||||
|
// add all eps to list, so that when the original pined one fails
|
||||||
|
// the client can switch to other available eps
|
||||||
|
watchCli.SetEndpoints(eps...)
|
||||||
|
|
||||||
|
wch := watchCli.Watch(clientv3.WithRequireLeader(context.Background()), "foo", clientv3.WithCreatedNotify())
|
||||||
|
select {
|
||||||
|
case <-wch:
|
||||||
|
case <-time.After(3 * time.Second):
|
||||||
|
t.Fatal("took too long to create watch")
|
||||||
|
}
|
||||||
|
|
||||||
|
// isolate eps[target]
|
||||||
|
clus.Members[target].InjectPartition(t,
|
||||||
|
clus.Members[(target+1)%3],
|
||||||
|
clus.Members[(target+2)%3],
|
||||||
|
)
|
||||||
|
|
||||||
|
select {
|
||||||
|
case ev := <-wch:
|
||||||
|
if len(ev.Events) != 0 {
|
||||||
|
t.Fatal("expected no event")
|
||||||
|
}
|
||||||
|
if err = ev.Err(); err != rpctypes.ErrNoLeader {
|
||||||
|
t.Fatalf("expected %v, got %v", rpctypes.ErrNoLeader, err)
|
||||||
|
}
|
||||||
|
case <-time.After(3 * time.Second): // enough time to detect leader lost
|
||||||
|
t.Fatal("took too long to detect leader lost")
|
||||||
|
}
|
||||||
|
}
|
@ -20,6 +20,7 @@ import (
|
|||||||
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
|
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
|
||||||
"github.com/coreos/etcd/integration"
|
"github.com/coreos/etcd/integration"
|
||||||
"github.com/coreos/etcd/pkg/testutil"
|
"github.com/coreos/etcd/pkg/testutil"
|
||||||
|
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
352
clientv3/integration/server_shutdown_test.go
Normal file
352
clientv3/integration/server_shutdown_test.go
Normal file
@ -0,0 +1,352 @@
|
|||||||
|
// Copyright 2017 The etcd Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package integration
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/coreos/etcd/clientv3"
|
||||||
|
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
|
||||||
|
"github.com/coreos/etcd/integration"
|
||||||
|
"github.com/coreos/etcd/pkg/testutil"
|
||||||
|
|
||||||
|
"golang.org/x/net/context"
|
||||||
|
)
|
||||||
|
|
||||||
|
// TestBalancerUnderServerShutdownWatch expects that watch client
|
||||||
|
// switch its endpoints when the member of the pinned endpoint fails.
|
||||||
|
func TestBalancerUnderServerShutdownWatch(t *testing.T) {
|
||||||
|
defer testutil.AfterTest(t)
|
||||||
|
|
||||||
|
clus := integration.NewClusterV3(t, &integration.ClusterConfig{
|
||||||
|
Size: 3,
|
||||||
|
SkipCreatingClient: true,
|
||||||
|
})
|
||||||
|
defer clus.Terminate(t)
|
||||||
|
|
||||||
|
eps := []string{clus.Members[0].GRPCAddr(), clus.Members[1].GRPCAddr(), clus.Members[2].GRPCAddr()}
|
||||||
|
|
||||||
|
lead := clus.WaitLeader(t)
|
||||||
|
|
||||||
|
// pin eps[lead]
|
||||||
|
watchCli, err := clientv3.New(clientv3.Config{Endpoints: []string{eps[lead]}})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
defer watchCli.Close()
|
||||||
|
|
||||||
|
// wait for eps[lead] to be pinned
|
||||||
|
mustWaitPinReady(t, watchCli)
|
||||||
|
|
||||||
|
// add all eps to list, so that when the original pined one fails
|
||||||
|
// the client can switch to other available eps
|
||||||
|
watchCli.SetEndpoints(eps...)
|
||||||
|
|
||||||
|
key, val := "foo", "bar"
|
||||||
|
wch := watchCli.Watch(context.Background(), key, clientv3.WithCreatedNotify())
|
||||||
|
select {
|
||||||
|
case <-wch:
|
||||||
|
case <-time.After(3 * time.Second):
|
||||||
|
t.Fatal("took too long to create watch")
|
||||||
|
}
|
||||||
|
|
||||||
|
donec := make(chan struct{})
|
||||||
|
go func() {
|
||||||
|
defer close(donec)
|
||||||
|
|
||||||
|
// switch to others when eps[lead] is shut down
|
||||||
|
select {
|
||||||
|
case ev := <-wch:
|
||||||
|
if werr := ev.Err(); werr != nil {
|
||||||
|
t.Fatal(werr)
|
||||||
|
}
|
||||||
|
if len(ev.Events) != 1 {
|
||||||
|
t.Fatalf("expected one event, got %+v", ev)
|
||||||
|
}
|
||||||
|
if !bytes.Equal(ev.Events[0].Kv.Value, []byte(val)) {
|
||||||
|
t.Fatalf("expected %q, got %+v", val, ev.Events[0].Kv)
|
||||||
|
}
|
||||||
|
case <-time.After(7 * time.Second):
|
||||||
|
t.Fatal("took too long to receive events")
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
// shut down eps[lead]
|
||||||
|
clus.Members[lead].Terminate(t)
|
||||||
|
|
||||||
|
// writes to eps[lead+1]
|
||||||
|
putCli, err := clientv3.New(clientv3.Config{Endpoints: []string{eps[(lead+1)%3]}})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
defer putCli.Close()
|
||||||
|
for {
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
|
||||||
|
_, err = putCli.Put(ctx, key, val)
|
||||||
|
cancel()
|
||||||
|
if err == nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if err == context.DeadlineExceeded || err == rpctypes.ErrTimeout || err == rpctypes.ErrTimeoutDueToLeaderFail {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-donec:
|
||||||
|
case <-time.After(5 * time.Second): // enough time for balancer switch
|
||||||
|
t.Fatal("took too long to receive events")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBalancerUnderServerShutdownPut(t *testing.T) {
|
||||||
|
testBalancerUnderServerShutdownMutable(t, func(cli *clientv3.Client, ctx context.Context) error {
|
||||||
|
_, err := cli.Put(ctx, "foo", "bar")
|
||||||
|
return err
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBalancerUnderServerShutdownDelete(t *testing.T) {
|
||||||
|
testBalancerUnderServerShutdownMutable(t, func(cli *clientv3.Client, ctx context.Context) error {
|
||||||
|
_, err := cli.Delete(ctx, "foo")
|
||||||
|
return err
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBalancerUnderServerShutdownTxn(t *testing.T) {
|
||||||
|
testBalancerUnderServerShutdownMutable(t, func(cli *clientv3.Client, ctx context.Context) error {
|
||||||
|
_, err := cli.Txn(ctx).
|
||||||
|
If(clientv3.Compare(clientv3.Version("foo"), "=", 0)).
|
||||||
|
Then(clientv3.OpPut("foo", "bar")).
|
||||||
|
Else(clientv3.OpPut("foo", "baz")).Commit()
|
||||||
|
return err
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// testBalancerUnderServerShutdownMutable expects that when the member of
|
||||||
|
// the pinned endpoint is shut down, the balancer switches its endpoints
|
||||||
|
// and all subsequent put/delete/txn requests succeed with new endpoints.
|
||||||
|
func testBalancerUnderServerShutdownMutable(t *testing.T, op func(*clientv3.Client, context.Context) error) {
|
||||||
|
defer testutil.AfterTest(t)
|
||||||
|
|
||||||
|
clus := integration.NewClusterV3(t, &integration.ClusterConfig{
|
||||||
|
Size: 3,
|
||||||
|
SkipCreatingClient: true,
|
||||||
|
})
|
||||||
|
defer clus.Terminate(t)
|
||||||
|
|
||||||
|
eps := []string{clus.Members[0].GRPCAddr(), clus.Members[1].GRPCAddr(), clus.Members[2].GRPCAddr()}
|
||||||
|
|
||||||
|
// pin eps[0]
|
||||||
|
cli, err := clientv3.New(clientv3.Config{Endpoints: []string{eps[0]}})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
defer cli.Close()
|
||||||
|
|
||||||
|
// wait for eps[0] to be pinned
|
||||||
|
mustWaitPinReady(t, cli)
|
||||||
|
|
||||||
|
// add all eps to list, so that when the original pined one fails
|
||||||
|
// the client can switch to other available eps
|
||||||
|
cli.SetEndpoints(eps...)
|
||||||
|
|
||||||
|
// shut down eps[0]
|
||||||
|
clus.Members[0].Terminate(t)
|
||||||
|
|
||||||
|
// switched to others when eps[0] was explicitly shut down
|
||||||
|
// and following request should succeed
|
||||||
|
// TODO: remove this (expose client connection state?)
|
||||||
|
time.Sleep(time.Second)
|
||||||
|
|
||||||
|
cctx, ccancel := context.WithTimeout(context.Background(), time.Second)
|
||||||
|
err = op(cli, cctx)
|
||||||
|
ccancel()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBalancerUnderServerShutdownGetLinearizable(t *testing.T) {
|
||||||
|
testBalancerUnderServerShutdownImmutable(t, func(cli *clientv3.Client, ctx context.Context) error {
|
||||||
|
_, err := cli.Get(ctx, "foo")
|
||||||
|
return err
|
||||||
|
}, 7*time.Second) // give enough time for leader election, balancer switch
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBalancerUnderServerShutdownGetSerializable(t *testing.T) {
|
||||||
|
testBalancerUnderServerShutdownImmutable(t, func(cli *clientv3.Client, ctx context.Context) error {
|
||||||
|
_, err := cli.Get(ctx, "foo", clientv3.WithSerializable())
|
||||||
|
return err
|
||||||
|
}, 2*time.Second)
|
||||||
|
}
|
||||||
|
|
||||||
|
// testBalancerUnderServerShutdownImmutable expects that when the member of
|
||||||
|
// the pinned endpoint is shut down, the balancer switches its endpoints
|
||||||
|
// and all subsequent range requests succeed with new endpoints.
|
||||||
|
func testBalancerUnderServerShutdownImmutable(t *testing.T, op func(*clientv3.Client, context.Context) error, timeout time.Duration) {
|
||||||
|
defer testutil.AfterTest(t)
|
||||||
|
|
||||||
|
clus := integration.NewClusterV3(t, &integration.ClusterConfig{
|
||||||
|
Size: 3,
|
||||||
|
SkipCreatingClient: true,
|
||||||
|
})
|
||||||
|
defer clus.Terminate(t)
|
||||||
|
|
||||||
|
eps := []string{clus.Members[0].GRPCAddr(), clus.Members[1].GRPCAddr(), clus.Members[2].GRPCAddr()}
|
||||||
|
|
||||||
|
// pin eps[0]
|
||||||
|
cli, err := clientv3.New(clientv3.Config{Endpoints: []string{eps[0]}})
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("failed to create client: %v", err)
|
||||||
|
}
|
||||||
|
defer cli.Close()
|
||||||
|
|
||||||
|
// wait for eps[0] to be pinned
|
||||||
|
mustWaitPinReady(t, cli)
|
||||||
|
|
||||||
|
// add all eps to list, so that when the original pined one fails
|
||||||
|
// the client can switch to other available eps
|
||||||
|
cli.SetEndpoints(eps...)
|
||||||
|
|
||||||
|
// shut down eps[0]
|
||||||
|
clus.Members[0].Terminate(t)
|
||||||
|
|
||||||
|
// switched to others when eps[0] was explicitly shut down
|
||||||
|
// and following request should succeed
|
||||||
|
cctx, ccancel := context.WithTimeout(context.Background(), timeout)
|
||||||
|
err = op(cli, cctx)
|
||||||
|
ccancel()
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("failed to finish range request in time %v (timeout %v)", err, timeout)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBalancerUnderServerStopInflightLinearizableGetOnRestart(t *testing.T) {
|
||||||
|
tt := []pinTestOpt{
|
||||||
|
{pinLeader: true, stopPinFirst: true},
|
||||||
|
{pinLeader: true, stopPinFirst: false},
|
||||||
|
{pinLeader: false, stopPinFirst: true},
|
||||||
|
{pinLeader: false, stopPinFirst: false},
|
||||||
|
}
|
||||||
|
for i := range tt {
|
||||||
|
testBalancerUnderServerStopInflightRangeOnRestart(t, true, tt[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBalancerUnderServerStopInflightSerializableGetOnRestart(t *testing.T) {
|
||||||
|
tt := []pinTestOpt{
|
||||||
|
{pinLeader: true, stopPinFirst: true},
|
||||||
|
{pinLeader: true, stopPinFirst: false},
|
||||||
|
{pinLeader: false, stopPinFirst: true},
|
||||||
|
{pinLeader: false, stopPinFirst: false},
|
||||||
|
}
|
||||||
|
for i := range tt {
|
||||||
|
testBalancerUnderServerStopInflightRangeOnRestart(t, false, tt[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type pinTestOpt struct {
|
||||||
|
pinLeader bool
|
||||||
|
stopPinFirst bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// testBalancerUnderServerStopInflightRangeOnRestart expects
|
||||||
|
// inflight range request reconnects on server restart.
|
||||||
|
func testBalancerUnderServerStopInflightRangeOnRestart(t *testing.T, linearizable bool, opt pinTestOpt) {
|
||||||
|
defer testutil.AfterTest(t)
|
||||||
|
|
||||||
|
cfg := &integration.ClusterConfig{
|
||||||
|
Size: 2,
|
||||||
|
SkipCreatingClient: true,
|
||||||
|
}
|
||||||
|
if linearizable {
|
||||||
|
cfg.Size = 3
|
||||||
|
}
|
||||||
|
|
||||||
|
clus := integration.NewClusterV3(t, cfg)
|
||||||
|
defer clus.Terminate(t)
|
||||||
|
eps := []string{clus.Members[0].GRPCAddr(), clus.Members[1].GRPCAddr()}
|
||||||
|
if linearizable {
|
||||||
|
eps = append(eps, clus.Members[2].GRPCAddr())
|
||||||
|
}
|
||||||
|
|
||||||
|
lead := clus.WaitLeader(t)
|
||||||
|
|
||||||
|
target := lead
|
||||||
|
if !opt.pinLeader {
|
||||||
|
target = (target + 1) % 2
|
||||||
|
}
|
||||||
|
|
||||||
|
// pin eps[target]
|
||||||
|
cli, err := clientv3.New(clientv3.Config{Endpoints: []string{eps[target]}})
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("failed to create client: %v", err)
|
||||||
|
}
|
||||||
|
defer cli.Close()
|
||||||
|
|
||||||
|
// wait for eps[target] to be pinned
|
||||||
|
mustWaitPinReady(t, cli)
|
||||||
|
|
||||||
|
// add all eps to list, so that when the original pined one fails
|
||||||
|
// the client can switch to other available eps
|
||||||
|
cli.SetEndpoints(eps...)
|
||||||
|
|
||||||
|
if opt.stopPinFirst {
|
||||||
|
clus.Members[target].Stop(t)
|
||||||
|
// give some time for balancer switch before stopping the other
|
||||||
|
time.Sleep(time.Second)
|
||||||
|
clus.Members[(target+1)%2].Stop(t)
|
||||||
|
} else {
|
||||||
|
clus.Members[(target+1)%2].Stop(t)
|
||||||
|
// balancer cannot pin other member since it's already stopped
|
||||||
|
clus.Members[target].Stop(t)
|
||||||
|
}
|
||||||
|
|
||||||
|
// 3-second is the minimum interval between endpoint being marked
|
||||||
|
// as unhealthy and being removed from unhealthy, so possibly
|
||||||
|
// takes >5-second to unpin and repin an endpoint
|
||||||
|
// TODO: decrease timeout when balancer switch rewrite
|
||||||
|
clientTimeout := 7 * time.Second
|
||||||
|
|
||||||
|
var gops []clientv3.OpOption
|
||||||
|
if !linearizable {
|
||||||
|
gops = append(gops, clientv3.WithSerializable())
|
||||||
|
}
|
||||||
|
|
||||||
|
donec, readyc := make(chan struct{}), make(chan struct{}, 1)
|
||||||
|
go func() {
|
||||||
|
defer close(donec)
|
||||||
|
ctx, cancel := context.WithTimeout(context.TODO(), clientTimeout)
|
||||||
|
readyc <- struct{}{}
|
||||||
|
_, err := cli.Get(ctx, "abc", gops...)
|
||||||
|
cancel()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
<-readyc
|
||||||
|
clus.Members[target].Restart(t)
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-time.After(clientTimeout + 3*time.Second):
|
||||||
|
t.Fatalf("timed out waiting for Get [linearizable: %v, opt: %+v]", linearizable, opt)
|
||||||
|
case <-donec:
|
||||||
|
}
|
||||||
|
}
|
@ -24,6 +24,7 @@ import (
|
|||||||
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
|
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
|
||||||
"github.com/coreos/etcd/integration"
|
"github.com/coreos/etcd/integration"
|
||||||
"github.com/coreos/etcd/pkg/testutil"
|
"github.com/coreos/etcd/pkg/testutil"
|
||||||
|
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -100,6 +101,8 @@ func TestTxnWriteFail(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestTxnReadRetry(t *testing.T) {
|
func TestTxnReadRetry(t *testing.T) {
|
||||||
|
t.Skipf("skipping txn read retry test: re-enable after we do retry on txn read request")
|
||||||
|
|
||||||
defer testutil.AfterTest(t)
|
defer testutil.AfterTest(t)
|
||||||
|
|
||||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||||
@ -129,7 +132,6 @@ func TestTxnReadRetry(t *testing.T) {
|
|||||||
t.Fatalf("waited too long")
|
t.Fatalf("waited too long")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestTxnSuccess(t *testing.T) {
|
func TestTxnSuccess(t *testing.T) {
|
||||||
defer testutil.AfterTest(t)
|
defer testutil.AfterTest(t)
|
||||||
|
|
||||||
|
@ -21,6 +21,7 @@ import (
|
|||||||
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
|
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
|
||||||
"github.com/coreos/etcd/integration"
|
"github.com/coreos/etcd/integration"
|
||||||
"github.com/coreos/etcd/pkg/testutil"
|
"github.com/coreos/etcd/pkg/testutil"
|
||||||
|
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -62,7 +63,7 @@ func TestUserErrorAuth(t *testing.T) {
|
|||||||
authapi := clus.RandClient()
|
authapi := clus.RandClient()
|
||||||
authSetupRoot(t, authapi.Auth)
|
authSetupRoot(t, authapi.Auth)
|
||||||
|
|
||||||
// un-authenticated client
|
// unauthenticated client
|
||||||
if _, err := authapi.UserAdd(context.TODO(), "foo", "bar"); err != rpctypes.ErrUserNotFound {
|
if _, err := authapi.UserAdd(context.TODO(), "foo", "bar"); err != rpctypes.ErrUserNotFound {
|
||||||
t.Fatalf("expected %v, got %v", rpctypes.ErrUserNotFound, err)
|
t.Fatalf("expected %v, got %v", rpctypes.ErrUserNotFound, err)
|
||||||
}
|
}
|
||||||
|
35
clientv3/integration/util.go
Normal file
35
clientv3/integration/util.go
Normal file
@ -0,0 +1,35 @@
|
|||||||
|
// Copyright 2017 The etcd Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package integration
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/coreos/etcd/clientv3"
|
||||||
|
|
||||||
|
"golang.org/x/net/context"
|
||||||
|
)
|
||||||
|
|
||||||
|
// mustWaitPinReady waits up to 3-second until connection is up (pin endpoint).
|
||||||
|
// Fatal on time-out.
|
||||||
|
func mustWaitPinReady(t *testing.T, cli *clientv3.Client) {
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
|
||||||
|
_, err := cli.Get(ctx, "foo")
|
||||||
|
cancel()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
}
|
@ -28,8 +28,10 @@ import (
|
|||||||
"github.com/coreos/etcd/integration"
|
"github.com/coreos/etcd/integration"
|
||||||
mvccpb "github.com/coreos/etcd/mvcc/mvccpb"
|
mvccpb "github.com/coreos/etcd/mvcc/mvccpb"
|
||||||
"github.com/coreos/etcd/pkg/testutil"
|
"github.com/coreos/etcd/pkg/testutil"
|
||||||
|
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
"google.golang.org/grpc"
|
"google.golang.org/grpc"
|
||||||
|
"google.golang.org/grpc/metadata"
|
||||||
)
|
)
|
||||||
|
|
||||||
type watcherTest func(*testing.T, *watchctx)
|
type watcherTest func(*testing.T, *watchctx)
|
||||||
@ -51,8 +53,8 @@ func runWatchTest(t *testing.T, f watcherTest) {
|
|||||||
|
|
||||||
wclientMember := rand.Intn(3)
|
wclientMember := rand.Intn(3)
|
||||||
w := clus.Client(wclientMember).Watcher
|
w := clus.Client(wclientMember).Watcher
|
||||||
// select a different client from wclient so puts succeed if
|
// select a different client for KV operations so puts succeed if
|
||||||
// a test knocks out the watcher client
|
// a test knocks out the watcher client.
|
||||||
kvMember := rand.Intn(3)
|
kvMember := rand.Intn(3)
|
||||||
for kvMember == wclientMember {
|
for kvMember == wclientMember {
|
||||||
kvMember = rand.Intn(3)
|
kvMember = rand.Intn(3)
|
||||||
@ -309,7 +311,7 @@ func testWatchCancelRunning(t *testing.T, wctx *watchctx) {
|
|||||||
select {
|
select {
|
||||||
case <-time.After(time.Second):
|
case <-time.After(time.Second):
|
||||||
t.Fatalf("took too long to cancel")
|
t.Fatalf("took too long to cancel")
|
||||||
case v, ok := <-wctx.ch:
|
case _, ok := <-wctx.ch:
|
||||||
if !ok {
|
if !ok {
|
||||||
// closed before getting put; OK
|
// closed before getting put; OK
|
||||||
break
|
break
|
||||||
@ -318,8 +320,8 @@ func testWatchCancelRunning(t *testing.T, wctx *watchctx) {
|
|||||||
select {
|
select {
|
||||||
case <-time.After(time.Second):
|
case <-time.After(time.Second):
|
||||||
t.Fatalf("took too long to close")
|
t.Fatalf("took too long to close")
|
||||||
case v, ok = <-wctx.ch:
|
case v, ok2 := <-wctx.ch:
|
||||||
if ok {
|
if ok2 {
|
||||||
t.Fatalf("expected watcher channel to close, got %v", v)
|
t.Fatalf("expected watcher channel to close, got %v", v)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -800,7 +802,8 @@ func TestWatchWithFilter(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestWatchWithCreatedNotification checks that createdNotification works.
|
// TestWatchWithCreatedNotification checks that WithCreatedNotify returns a
|
||||||
|
// Created watch response.
|
||||||
func TestWatchWithCreatedNotification(t *testing.T) {
|
func TestWatchWithCreatedNotification(t *testing.T) {
|
||||||
cluster := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
cluster := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||||
defer cluster.Terminate(t)
|
defer cluster.Terminate(t)
|
||||||
@ -837,8 +840,7 @@ func TestWatchWithCreatedNotificationDropConn(t *testing.T) {
|
|||||||
|
|
||||||
cluster.Members[0].DropConnections()
|
cluster.Members[0].DropConnections()
|
||||||
|
|
||||||
// try to receive from watch channel again
|
// check watch channel doesn't post another watch response.
|
||||||
// ensure it doesn't post another createNotify
|
|
||||||
select {
|
select {
|
||||||
case wresp := <-wch:
|
case wresp := <-wch:
|
||||||
t.Fatalf("got unexpected watch response: %+v\n", wresp)
|
t.Fatalf("got unexpected watch response: %+v\n", wresp)
|
||||||
@ -856,10 +858,26 @@ func TestWatchCancelOnServer(t *testing.T) {
|
|||||||
client := cluster.RandClient()
|
client := cluster.RandClient()
|
||||||
numWatches := 10
|
numWatches := 10
|
||||||
|
|
||||||
|
// The grpc proxy starts watches to detect leadership after the proxy server
|
||||||
|
// returns as started; to avoid racing on the proxy's internal watches, wait
|
||||||
|
// until require leader watches get create responses to ensure the leadership
|
||||||
|
// watches have started.
|
||||||
|
for {
|
||||||
|
ctx, cancel := context.WithCancel(clientv3.WithRequireLeader(context.TODO()))
|
||||||
|
ww := client.Watch(ctx, "a", clientv3.WithCreatedNotify())
|
||||||
|
wresp := <-ww
|
||||||
|
cancel()
|
||||||
|
if wresp.Err() == nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
cancels := make([]context.CancelFunc, numWatches)
|
cancels := make([]context.CancelFunc, numWatches)
|
||||||
for i := 0; i < numWatches; i++ {
|
for i := 0; i < numWatches; i++ {
|
||||||
// use WithTimeout to force separate streams in client
|
// force separate streams in client
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
|
md := metadata.Pairs("some-key", fmt.Sprintf("%d", i))
|
||||||
|
mctx := metadata.NewOutgoingContext(context.Background(), md)
|
||||||
|
ctx, cancel := context.WithCancel(mctx)
|
||||||
cancels[i] = cancel
|
cancels[i] = cancel
|
||||||
w := client.Watch(ctx, fmt.Sprintf("%d", i), clientv3.WithCreatedNotify())
|
w := client.Watch(ctx, fmt.Sprintf("%d", i), clientv3.WithCreatedNotify())
|
||||||
<-w
|
<-w
|
||||||
@ -885,7 +903,7 @@ func TestWatchCancelOnServer(t *testing.T) {
|
|||||||
t.Fatalf("expected n=2 and err=nil, got n=%d and err=%v", n, serr)
|
t.Fatalf("expected n=2 and err=nil, got n=%d and err=%v", n, serr)
|
||||||
}
|
}
|
||||||
|
|
||||||
if maxWatchV-minWatchV != numWatches {
|
if maxWatchV-minWatchV < numWatches {
|
||||||
t.Fatalf("expected %d canceled watchers, got %d", numWatches, maxWatchV-minWatchV)
|
t.Fatalf("expected %d canceled watchers, got %d", numWatches, maxWatchV-minWatchV)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -916,12 +934,12 @@ func testWatchOverlapContextCancel(t *testing.T, f func(*integration.ClusterV3))
|
|||||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||||
defer clus.Terminate(t)
|
defer clus.Terminate(t)
|
||||||
|
|
||||||
// each unique context "%v" has a unique grpc stream
|
|
||||||
n := 100
|
n := 100
|
||||||
ctxs, ctxc := make([]context.Context, 5), make([]chan struct{}, 5)
|
ctxs, ctxc := make([]context.Context, 5), make([]chan struct{}, 5)
|
||||||
for i := range ctxs {
|
for i := range ctxs {
|
||||||
// make "%v" unique
|
// make unique stream
|
||||||
ctxs[i] = context.WithValue(context.TODO(), "key", i)
|
md := metadata.Pairs("some-key", fmt.Sprintf("%d", i))
|
||||||
|
ctxs[i] = metadata.NewOutgoingContext(context.Background(), md)
|
||||||
// limits the maximum number of outstanding watchers per stream
|
// limits the maximum number of outstanding watchers per stream
|
||||||
ctxc[i] = make(chan struct{}, 2)
|
ctxc[i] = make(chan struct{}, 2)
|
||||||
}
|
}
|
||||||
@ -946,7 +964,7 @@ func testWatchOverlapContextCancel(t *testing.T, f func(*integration.ClusterV3))
|
|||||||
t.Fatalf("unexpected closed channel %p", wch)
|
t.Fatalf("unexpected closed channel %p", wch)
|
||||||
}
|
}
|
||||||
// may take a second or two to reestablish a watcher because of
|
// may take a second or two to reestablish a watcher because of
|
||||||
// grpc backoff policies for disconnects
|
// grpc back off policies for disconnects
|
||||||
case <-time.After(5 * time.Second):
|
case <-time.After(5 * time.Second):
|
||||||
t.Errorf("timed out waiting for watch on %p", wch)
|
t.Errorf("timed out waiting for watch on %p", wch)
|
||||||
}
|
}
|
||||||
@ -970,7 +988,7 @@ func testWatchOverlapContextCancel(t *testing.T, f func(*integration.ClusterV3))
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestWatchCanelAndCloseClient ensures that canceling a watcher then immediately
|
// TestWatchCancelAndCloseClient ensures that canceling a watcher then immediately
|
||||||
// closing the client does not return a client closing error.
|
// closing the client does not return a client closing error.
|
||||||
func TestWatchCancelAndCloseClient(t *testing.T) {
|
func TestWatchCancelAndCloseClient(t *testing.T) {
|
||||||
defer testutil.AfterTest(t)
|
defer testutil.AfterTest(t)
|
||||||
|
@ -16,8 +16,8 @@ package clientv3
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||||
|
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
"google.golang.org/grpc"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type (
|
type (
|
||||||
@ -66,11 +66,26 @@ type OpResponse struct {
|
|||||||
put *PutResponse
|
put *PutResponse
|
||||||
get *GetResponse
|
get *GetResponse
|
||||||
del *DeleteResponse
|
del *DeleteResponse
|
||||||
|
txn *TxnResponse
|
||||||
}
|
}
|
||||||
|
|
||||||
func (op OpResponse) Put() *PutResponse { return op.put }
|
func (op OpResponse) Put() *PutResponse { return op.put }
|
||||||
func (op OpResponse) Get() *GetResponse { return op.get }
|
func (op OpResponse) Get() *GetResponse { return op.get }
|
||||||
func (op OpResponse) Del() *DeleteResponse { return op.del }
|
func (op OpResponse) Del() *DeleteResponse { return op.del }
|
||||||
|
func (op OpResponse) Txn() *TxnResponse { return op.txn }
|
||||||
|
|
||||||
|
func (resp *PutResponse) OpResponse() OpResponse {
|
||||||
|
return OpResponse{put: resp}
|
||||||
|
}
|
||||||
|
func (resp *GetResponse) OpResponse() OpResponse {
|
||||||
|
return OpResponse{get: resp}
|
||||||
|
}
|
||||||
|
func (resp *DeleteResponse) OpResponse() OpResponse {
|
||||||
|
return OpResponse{del: resp}
|
||||||
|
}
|
||||||
|
func (resp *TxnResponse) OpResponse() OpResponse {
|
||||||
|
return OpResponse{txn: resp}
|
||||||
|
}
|
||||||
|
|
||||||
type kv struct {
|
type kv struct {
|
||||||
remote pb.KVClient
|
remote pb.KVClient
|
||||||
@ -115,29 +130,11 @@ func (kv *kv) Txn(ctx context.Context) Txn {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (kv *kv) Do(ctx context.Context, op Op) (OpResponse, error) {
|
func (kv *kv) Do(ctx context.Context, op Op) (OpResponse, error) {
|
||||||
for {
|
|
||||||
resp, err := kv.do(ctx, op)
|
|
||||||
if err == nil {
|
|
||||||
return resp, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if isHaltErr(ctx, err) {
|
|
||||||
return resp, toErr(ctx, err)
|
|
||||||
}
|
|
||||||
// do not retry on modifications
|
|
||||||
if op.isWrite() {
|
|
||||||
return resp, toErr(ctx, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (kv *kv) do(ctx context.Context, op Op) (OpResponse, error) {
|
|
||||||
var err error
|
var err error
|
||||||
switch op.t {
|
switch op.t {
|
||||||
// TODO: handle other ops
|
|
||||||
case tRange:
|
case tRange:
|
||||||
var resp *pb.RangeResponse
|
var resp *pb.RangeResponse
|
||||||
resp, err = kv.remote.Range(ctx, op.toRangeRequest(), grpc.FailFast(false))
|
resp, err = kv.remote.Range(ctx, op.toRangeRequest())
|
||||||
if err == nil {
|
if err == nil {
|
||||||
return OpResponse{get: (*GetResponse)(resp)}, nil
|
return OpResponse{get: (*GetResponse)(resp)}, nil
|
||||||
}
|
}
|
||||||
@ -155,8 +152,14 @@ func (kv *kv) do(ctx context.Context, op Op) (OpResponse, error) {
|
|||||||
if err == nil {
|
if err == nil {
|
||||||
return OpResponse{del: (*DeleteResponse)(resp)}, nil
|
return OpResponse{del: (*DeleteResponse)(resp)}, nil
|
||||||
}
|
}
|
||||||
|
case tTxn:
|
||||||
|
var resp *pb.TxnResponse
|
||||||
|
resp, err = kv.remote.Txn(ctx, op.toTxnRequest())
|
||||||
|
if err == nil {
|
||||||
|
return OpResponse{txn: (*TxnResponse)(resp)}, nil
|
||||||
|
}
|
||||||
default:
|
default:
|
||||||
panic("Unknown op")
|
panic("Unknown op")
|
||||||
}
|
}
|
||||||
return OpResponse{}, err
|
return OpResponse{}, toErr(ctx, err)
|
||||||
}
|
}
|
||||||
|
@ -20,8 +20,8 @@ import (
|
|||||||
|
|
||||||
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
|
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
|
||||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||||
|
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
"google.golang.org/grpc"
|
|
||||||
"google.golang.org/grpc/metadata"
|
"google.golang.org/grpc/metadata"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -30,7 +30,7 @@ type (
|
|||||||
LeaseID int64
|
LeaseID int64
|
||||||
)
|
)
|
||||||
|
|
||||||
// LeaseGrantResponse is used to convert the protobuf grant response.
|
// LeaseGrantResponse wraps the protobuf message LeaseGrantResponse.
|
||||||
type LeaseGrantResponse struct {
|
type LeaseGrantResponse struct {
|
||||||
*pb.ResponseHeader
|
*pb.ResponseHeader
|
||||||
ID LeaseID
|
ID LeaseID
|
||||||
@ -38,14 +38,14 @@ type LeaseGrantResponse struct {
|
|||||||
Error string
|
Error string
|
||||||
}
|
}
|
||||||
|
|
||||||
// LeaseKeepAliveResponse is used to convert the protobuf keepalive response.
|
// LeaseKeepAliveResponse wraps the protobuf message LeaseKeepAliveResponse.
|
||||||
type LeaseKeepAliveResponse struct {
|
type LeaseKeepAliveResponse struct {
|
||||||
*pb.ResponseHeader
|
*pb.ResponseHeader
|
||||||
ID LeaseID
|
ID LeaseID
|
||||||
TTL int64
|
TTL int64
|
||||||
}
|
}
|
||||||
|
|
||||||
// LeaseTimeToLiveResponse is used to convert the protobuf lease timetolive response.
|
// LeaseTimeToLiveResponse wraps the protobuf message LeaseTimeToLiveResponse.
|
||||||
type LeaseTimeToLiveResponse struct {
|
type LeaseTimeToLiveResponse struct {
|
||||||
*pb.ResponseHeader
|
*pb.ResponseHeader
|
||||||
ID LeaseID `json:"id"`
|
ID LeaseID `json:"id"`
|
||||||
@ -60,6 +60,12 @@ type LeaseTimeToLiveResponse struct {
|
|||||||
Keys [][]byte `json:"keys"`
|
Keys [][]byte `json:"keys"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// LeaseStatus represents a lease status.
|
||||||
|
type LeaseStatus struct {
|
||||||
|
ID LeaseID `json:"id"`
|
||||||
|
// TODO: TTL int64
|
||||||
|
}
|
||||||
|
|
||||||
const (
|
const (
|
||||||
// defaultTTL is the assumed lease TTL used for the first keepalive
|
// defaultTTL is the assumed lease TTL used for the first keepalive
|
||||||
// deadline before the actual TTL is known to the client.
|
// deadline before the actual TTL is known to the client.
|
||||||
@ -101,7 +107,7 @@ type Lease interface {
|
|||||||
// KeepAlive keeps the given lease alive forever.
|
// KeepAlive keeps the given lease alive forever.
|
||||||
KeepAlive(ctx context.Context, id LeaseID) (<-chan *LeaseKeepAliveResponse, error)
|
KeepAlive(ctx context.Context, id LeaseID) (<-chan *LeaseKeepAliveResponse, error)
|
||||||
|
|
||||||
// KeepAliveOnce renews the lease once. In most of the cases, Keepalive
|
// KeepAliveOnce renews the lease once. In most of the cases, KeepAlive
|
||||||
// should be used instead of KeepAliveOnce.
|
// should be used instead of KeepAliveOnce.
|
||||||
KeepAliveOnce(ctx context.Context, id LeaseID) (*LeaseKeepAliveResponse, error)
|
KeepAliveOnce(ctx context.Context, id LeaseID) (*LeaseKeepAliveResponse, error)
|
||||||
|
|
||||||
@ -167,56 +173,43 @@ func NewLeaseFromLeaseClient(remote pb.LeaseClient, keepAliveTimeout time.Durati
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (l *lessor) Grant(ctx context.Context, ttl int64) (*LeaseGrantResponse, error) {
|
func (l *lessor) Grant(ctx context.Context, ttl int64) (*LeaseGrantResponse, error) {
|
||||||
for {
|
r := &pb.LeaseGrantRequest{TTL: ttl}
|
||||||
r := &pb.LeaseGrantRequest{TTL: ttl}
|
resp, err := l.remote.LeaseGrant(ctx, r)
|
||||||
resp, err := l.remote.LeaseGrant(ctx, r)
|
if err == nil {
|
||||||
if err == nil {
|
gresp := &LeaseGrantResponse{
|
||||||
gresp := &LeaseGrantResponse{
|
ResponseHeader: resp.GetHeader(),
|
||||||
ResponseHeader: resp.GetHeader(),
|
ID: LeaseID(resp.ID),
|
||||||
ID: LeaseID(resp.ID),
|
TTL: resp.TTL,
|
||||||
TTL: resp.TTL,
|
Error: resp.Error,
|
||||||
Error: resp.Error,
|
|
||||||
}
|
|
||||||
return gresp, nil
|
|
||||||
}
|
|
||||||
if isHaltErr(ctx, err) {
|
|
||||||
return nil, toErr(ctx, err)
|
|
||||||
}
|
}
|
||||||
|
return gresp, nil
|
||||||
}
|
}
|
||||||
|
return nil, toErr(ctx, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (l *lessor) Revoke(ctx context.Context, id LeaseID) (*LeaseRevokeResponse, error) {
|
func (l *lessor) Revoke(ctx context.Context, id LeaseID) (*LeaseRevokeResponse, error) {
|
||||||
for {
|
r := &pb.LeaseRevokeRequest{ID: int64(id)}
|
||||||
r := &pb.LeaseRevokeRequest{ID: int64(id)}
|
resp, err := l.remote.LeaseRevoke(ctx, r)
|
||||||
resp, err := l.remote.LeaseRevoke(ctx, r)
|
if err == nil {
|
||||||
|
return (*LeaseRevokeResponse)(resp), nil
|
||||||
if err == nil {
|
|
||||||
return (*LeaseRevokeResponse)(resp), nil
|
|
||||||
}
|
|
||||||
if isHaltErr(ctx, err) {
|
|
||||||
return nil, toErr(ctx, err)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
return nil, toErr(ctx, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (l *lessor) TimeToLive(ctx context.Context, id LeaseID, opts ...LeaseOption) (*LeaseTimeToLiveResponse, error) {
|
func (l *lessor) TimeToLive(ctx context.Context, id LeaseID, opts ...LeaseOption) (*LeaseTimeToLiveResponse, error) {
|
||||||
for {
|
r := toLeaseTimeToLiveRequest(id, opts...)
|
||||||
r := toLeaseTimeToLiveRequest(id, opts...)
|
resp, err := l.remote.LeaseTimeToLive(ctx, r)
|
||||||
resp, err := l.remote.LeaseTimeToLive(ctx, r, grpc.FailFast(false))
|
if err == nil {
|
||||||
if err == nil {
|
gresp := &LeaseTimeToLiveResponse{
|
||||||
gresp := &LeaseTimeToLiveResponse{
|
ResponseHeader: resp.GetHeader(),
|
||||||
ResponseHeader: resp.GetHeader(),
|
ID: LeaseID(resp.ID),
|
||||||
ID: LeaseID(resp.ID),
|
TTL: resp.TTL,
|
||||||
TTL: resp.TTL,
|
GrantedTTL: resp.GrantedTTL,
|
||||||
GrantedTTL: resp.GrantedTTL,
|
Keys: resp.Keys,
|
||||||
Keys: resp.Keys,
|
|
||||||
}
|
|
||||||
return gresp, nil
|
|
||||||
}
|
|
||||||
if isHaltErr(ctx, err) {
|
|
||||||
return nil, toErr(ctx, err)
|
|
||||||
}
|
}
|
||||||
|
return gresp, nil
|
||||||
}
|
}
|
||||||
|
return nil, toErr(ctx, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (l *lessor) KeepAlive(ctx context.Context, id LeaseID) (<-chan *LeaseKeepAliveResponse, error) {
|
func (l *lessor) KeepAlive(ctx context.Context, id LeaseID) (<-chan *LeaseKeepAliveResponse, error) {
|
||||||
@ -314,7 +307,7 @@ func (l *lessor) keepAliveCtxCloser(id LeaseID, ctx context.Context, donec <-cha
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// closeRequireLeader scans all keep alives for ctxs that have require leader
|
// closeRequireLeader scans keepAlives for ctxs that have require leader
|
||||||
// and closes the associated channels.
|
// and closes the associated channels.
|
||||||
func (l *lessor) closeRequireLeader() {
|
func (l *lessor) closeRequireLeader() {
|
||||||
l.mu.Lock()
|
l.mu.Lock()
|
||||||
@ -323,7 +316,7 @@ func (l *lessor) closeRequireLeader() {
|
|||||||
reqIdxs := 0
|
reqIdxs := 0
|
||||||
// find all required leader channels, close, mark as nil
|
// find all required leader channels, close, mark as nil
|
||||||
for i, ctx := range ka.ctxs {
|
for i, ctx := range ka.ctxs {
|
||||||
md, ok := metadata.FromContext(ctx)
|
md, ok := metadata.FromOutgoingContext(ctx)
|
||||||
if !ok {
|
if !ok {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@ -357,7 +350,7 @@ func (l *lessor) keepAliveOnce(ctx context.Context, id LeaseID) (*LeaseKeepAlive
|
|||||||
cctx, cancel := context.WithCancel(ctx)
|
cctx, cancel := context.WithCancel(ctx)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
stream, err := l.remote.LeaseKeepAlive(cctx, grpc.FailFast(false))
|
stream, err := l.remote.LeaseKeepAlive(cctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, toErr(ctx, err)
|
return nil, toErr(ctx, err)
|
||||||
}
|
}
|
||||||
@ -386,7 +379,7 @@ func (l *lessor) recvKeepAliveLoop() (gerr error) {
|
|||||||
close(l.donec)
|
close(l.donec)
|
||||||
l.loopErr = gerr
|
l.loopErr = gerr
|
||||||
for _, ka := range l.keepAlives {
|
for _, ka := range l.keepAlives {
|
||||||
ka.Close()
|
ka.close()
|
||||||
}
|
}
|
||||||
l.keepAlives = make(map[LeaseID]*keepAlive)
|
l.keepAlives = make(map[LeaseID]*keepAlive)
|
||||||
l.mu.Unlock()
|
l.mu.Unlock()
|
||||||
@ -401,7 +394,6 @@ func (l *lessor) recvKeepAliveLoop() (gerr error) {
|
|||||||
} else {
|
} else {
|
||||||
for {
|
for {
|
||||||
resp, err := stream.Recv()
|
resp, err := stream.Recv()
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if canceledByCaller(l.stopCtx, err) {
|
if canceledByCaller(l.stopCtx, err) {
|
||||||
return err
|
return err
|
||||||
@ -426,10 +418,10 @@ func (l *lessor) recvKeepAliveLoop() (gerr error) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// resetRecv opens a new lease stream and starts sending LeaseKeepAliveRequests
|
// resetRecv opens a new lease stream and starts sending keep alive requests.
|
||||||
func (l *lessor) resetRecv() (pb.Lease_LeaseKeepAliveClient, error) {
|
func (l *lessor) resetRecv() (pb.Lease_LeaseKeepAliveClient, error) {
|
||||||
sctx, cancel := context.WithCancel(l.stopCtx)
|
sctx, cancel := context.WithCancel(l.stopCtx)
|
||||||
stream, err := l.remote.LeaseKeepAlive(sctx, grpc.FailFast(false))
|
stream, err := l.remote.LeaseKeepAlive(sctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
cancel()
|
cancel()
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -467,7 +459,7 @@ func (l *lessor) recvKeepAlive(resp *pb.LeaseKeepAliveResponse) {
|
|||||||
if karesp.TTL <= 0 {
|
if karesp.TTL <= 0 {
|
||||||
// lease expired; close all keep alive channels
|
// lease expired; close all keep alive channels
|
||||||
delete(l.keepAlives, karesp.ID)
|
delete(l.keepAlives, karesp.ID)
|
||||||
ka.Close()
|
ka.close()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -497,7 +489,7 @@ func (l *lessor) deadlineLoop() {
|
|||||||
for id, ka := range l.keepAlives {
|
for id, ka := range l.keepAlives {
|
||||||
if ka.deadline.Before(now) {
|
if ka.deadline.Before(now) {
|
||||||
// waited too long for response; lease may be expired
|
// waited too long for response; lease may be expired
|
||||||
ka.Close()
|
ka.close()
|
||||||
delete(l.keepAlives, id)
|
delete(l.keepAlives, id)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -505,7 +497,7 @@ func (l *lessor) deadlineLoop() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// sendKeepAliveLoop sends LeaseKeepAliveRequests for the lifetime of a lease stream
|
// sendKeepAliveLoop sends keep alive requests for the lifetime of the given stream.
|
||||||
func (l *lessor) sendKeepAliveLoop(stream pb.Lease_LeaseKeepAliveClient) {
|
func (l *lessor) sendKeepAliveLoop(stream pb.Lease_LeaseKeepAliveClient) {
|
||||||
for {
|
for {
|
||||||
var tosend []LeaseID
|
var tosend []LeaseID
|
||||||
@ -539,7 +531,7 @@ func (l *lessor) sendKeepAliveLoop(stream pb.Lease_LeaseKeepAliveClient) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ka *keepAlive) Close() {
|
func (ka *keepAlive) close() {
|
||||||
close(ka.donec)
|
close(ka.donec)
|
||||||
for _, ch := range ka.chs {
|
for _, ch := range ka.chs {
|
||||||
close(ch)
|
close(ch)
|
||||||
|
@ -16,36 +16,35 @@ package clientv3
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"log"
|
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"google.golang.org/grpc/grpclog"
|
"google.golang.org/grpc/grpclog"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Logger is the logger used by client library.
|
// Logger is the logger used by client library.
|
||||||
// It implements grpclog.Logger interface.
|
// It implements grpclog.LoggerV2 interface.
|
||||||
type Logger grpclog.Logger
|
type Logger grpclog.LoggerV2
|
||||||
|
|
||||||
var (
|
var (
|
||||||
logger settableLogger
|
logger settableLogger
|
||||||
)
|
)
|
||||||
|
|
||||||
type settableLogger struct {
|
type settableLogger struct {
|
||||||
l grpclog.Logger
|
l grpclog.LoggerV2
|
||||||
mu sync.RWMutex
|
mu sync.RWMutex
|
||||||
}
|
}
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
// disable client side logs by default
|
// disable client side logs by default
|
||||||
logger.mu.Lock()
|
logger.mu.Lock()
|
||||||
logger.l = log.New(ioutil.Discard, "", 0)
|
logger.l = grpclog.NewLoggerV2(ioutil.Discard, ioutil.Discard, ioutil.Discard)
|
||||||
|
|
||||||
// logger has to override the grpclog at initialization so that
|
// logger has to override the grpclog at initialization so that
|
||||||
// any changes to the grpclog go through logger with locking
|
// any changes to the grpclog go through logger with locking
|
||||||
// instead of through SetLogger
|
// instead of through SetLogger
|
||||||
//
|
//
|
||||||
// now updates only happen through settableLogger.set
|
// now updates only happen through settableLogger.set
|
||||||
grpclog.SetLogger(&logger)
|
grpclog.SetLoggerV2(&logger)
|
||||||
logger.mu.Unlock()
|
logger.mu.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -62,6 +61,7 @@ func GetLogger() Logger {
|
|||||||
func (s *settableLogger) set(l Logger) {
|
func (s *settableLogger) set(l Logger) {
|
||||||
s.mu.Lock()
|
s.mu.Lock()
|
||||||
logger.l = l
|
logger.l = l
|
||||||
|
grpclog.SetLoggerV2(&logger)
|
||||||
s.mu.Unlock()
|
s.mu.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -72,11 +72,25 @@ func (s *settableLogger) get() Logger {
|
|||||||
return l
|
return l
|
||||||
}
|
}
|
||||||
|
|
||||||
// implement the grpclog.Logger interface
|
// implement the grpclog.LoggerV2 interface
|
||||||
|
|
||||||
|
func (s *settableLogger) Info(args ...interface{}) { s.get().Info(args...) }
|
||||||
|
func (s *settableLogger) Infof(format string, args ...interface{}) { s.get().Infof(format, args...) }
|
||||||
|
func (s *settableLogger) Infoln(args ...interface{}) { s.get().Infoln(args...) }
|
||||||
|
func (s *settableLogger) Warning(args ...interface{}) { s.get().Warning(args...) }
|
||||||
|
func (s *settableLogger) Warningf(format string, args ...interface{}) {
|
||||||
|
s.get().Warningf(format, args...)
|
||||||
|
}
|
||||||
|
func (s *settableLogger) Warningln(args ...interface{}) { s.get().Warningln(args...) }
|
||||||
|
func (s *settableLogger) Error(args ...interface{}) { s.get().Error(args...) }
|
||||||
|
func (s *settableLogger) Errorf(format string, args ...interface{}) {
|
||||||
|
s.get().Errorf(format, args...)
|
||||||
|
}
|
||||||
|
func (s *settableLogger) Errorln(args ...interface{}) { s.get().Errorln(args...) }
|
||||||
func (s *settableLogger) Fatal(args ...interface{}) { s.get().Fatal(args...) }
|
func (s *settableLogger) Fatal(args ...interface{}) { s.get().Fatal(args...) }
|
||||||
func (s *settableLogger) Fatalf(format string, args ...interface{}) { s.get().Fatalf(format, args...) }
|
func (s *settableLogger) Fatalf(format string, args ...interface{}) { s.get().Fatalf(format, args...) }
|
||||||
func (s *settableLogger) Fatalln(args ...interface{}) { s.get().Fatalln(args...) }
|
func (s *settableLogger) Fatalln(args ...interface{}) { s.get().Fatalln(args...) }
|
||||||
func (s *settableLogger) Print(args ...interface{}) { s.get().Print(args...) }
|
func (s *settableLogger) Print(args ...interface{}) { s.get().Info(args...) }
|
||||||
func (s *settableLogger) Printf(format string, args ...interface{}) { s.get().Printf(format, args...) }
|
func (s *settableLogger) Printf(format string, args ...interface{}) { s.get().Infof(format, args...) }
|
||||||
func (s *settableLogger) Println(args ...interface{}) { s.get().Println(args...) }
|
func (s *settableLogger) Println(args ...interface{}) { s.get().Infoln(args...) }
|
||||||
|
func (s *settableLogger) V(l int) bool { return s.get().V(l) }
|
||||||
|
@ -32,17 +32,23 @@ func init() { auth.BcryptCost = bcrypt.MinCost }
|
|||||||
|
|
||||||
// TestMain sets up an etcd cluster if running the examples.
|
// TestMain sets up an etcd cluster if running the examples.
|
||||||
func TestMain(m *testing.M) {
|
func TestMain(m *testing.M) {
|
||||||
useCluster := true // default to running all tests
|
useCluster, hasRunArg := false, false // default to running only Test*
|
||||||
for _, arg := range os.Args {
|
for _, arg := range os.Args {
|
||||||
if strings.HasPrefix(arg, "-test.run=") {
|
if strings.HasPrefix(arg, "-test.run=") {
|
||||||
exp := strings.Split(arg, "=")[1]
|
exp := strings.Split(arg, "=")[1]
|
||||||
match, err := regexp.MatchString(exp, "Example")
|
match, err := regexp.MatchString(exp, "Example")
|
||||||
useCluster = (err == nil && match) || strings.Contains(exp, "Example")
|
useCluster = (err == nil && match) || strings.Contains(exp, "Example")
|
||||||
|
hasRunArg = true
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if !hasRunArg {
|
||||||
|
// force only running Test* if no args given to avoid leak false
|
||||||
|
// positives from having a long-running cluster for the examples.
|
||||||
|
os.Args = append(os.Args, "-test.run=Test")
|
||||||
|
}
|
||||||
|
|
||||||
v := 0
|
var v int
|
||||||
if useCluster {
|
if useCluster {
|
||||||
cfg := integration.ClusterConfig{Size: 3}
|
cfg := integration.ClusterConfig{Size: 3}
|
||||||
clus := integration.NewClusterV3(nil, &cfg)
|
clus := integration.NewClusterV3(nil, &cfg)
|
||||||
|
@ -20,7 +20,6 @@ import (
|
|||||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||||
|
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
"google.golang.org/grpc"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type (
|
type (
|
||||||
@ -37,7 +36,7 @@ type Maintenance interface {
|
|||||||
// AlarmDisarm disarms a given alarm.
|
// AlarmDisarm disarms a given alarm.
|
||||||
AlarmDisarm(ctx context.Context, m *AlarmMember) (*AlarmResponse, error)
|
AlarmDisarm(ctx context.Context, m *AlarmMember) (*AlarmResponse, error)
|
||||||
|
|
||||||
// Defragment defragments storage backend of the etcd member with given endpoint.
|
// Defragment releases wasted space from internal fragmentation on a given etcd member.
|
||||||
// Defragment is only needed when deleting a large number of keys and want to reclaim
|
// Defragment is only needed when deleting a large number of keys and want to reclaim
|
||||||
// the resources.
|
// the resources.
|
||||||
// Defragment is an expensive operation. User should avoid defragmenting multiple members
|
// Defragment is an expensive operation. User should avoid defragmenting multiple members
|
||||||
@ -49,7 +48,7 @@ type Maintenance interface {
|
|||||||
// Status gets the status of the endpoint.
|
// Status gets the status of the endpoint.
|
||||||
Status(ctx context.Context, endpoint string) (*StatusResponse, error)
|
Status(ctx context.Context, endpoint string) (*StatusResponse, error)
|
||||||
|
|
||||||
// Snapshot provides a reader for a snapshot of a backend.
|
// Snapshot provides a reader for a point-in-time snapshot of etcd.
|
||||||
Snapshot(ctx context.Context) (io.ReadCloser, error)
|
Snapshot(ctx context.Context) (io.ReadCloser, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -66,9 +65,9 @@ func NewMaintenance(c *Client) Maintenance {
|
|||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
cancel := func() { conn.Close() }
|
cancel := func() { conn.Close() }
|
||||||
return pb.NewMaintenanceClient(conn), cancel, nil
|
return RetryMaintenanceClient(c, conn), cancel, nil
|
||||||
},
|
},
|
||||||
remote: pb.NewMaintenanceClient(c.conn),
|
remote: RetryMaintenanceClient(c, c.conn),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -87,15 +86,11 @@ func (m *maintenance) AlarmList(ctx context.Context) (*AlarmResponse, error) {
|
|||||||
MemberID: 0, // all
|
MemberID: 0, // all
|
||||||
Alarm: pb.AlarmType_NONE, // all
|
Alarm: pb.AlarmType_NONE, // all
|
||||||
}
|
}
|
||||||
for {
|
resp, err := m.remote.Alarm(ctx, req)
|
||||||
resp, err := m.remote.Alarm(ctx, req, grpc.FailFast(false))
|
if err == nil {
|
||||||
if err == nil {
|
return (*AlarmResponse)(resp), nil
|
||||||
return (*AlarmResponse)(resp), nil
|
|
||||||
}
|
|
||||||
if isHaltErr(ctx, err) {
|
|
||||||
return nil, toErr(ctx, err)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
return nil, toErr(ctx, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *maintenance) AlarmDisarm(ctx context.Context, am *AlarmMember) (*AlarmResponse, error) {
|
func (m *maintenance) AlarmDisarm(ctx context.Context, am *AlarmMember) (*AlarmResponse, error) {
|
||||||
@ -121,7 +116,7 @@ func (m *maintenance) AlarmDisarm(ctx context.Context, am *AlarmMember) (*AlarmR
|
|||||||
return &ret, nil
|
return &ret, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
resp, err := m.remote.Alarm(ctx, req, grpc.FailFast(false))
|
resp, err := m.remote.Alarm(ctx, req)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
return (*AlarmResponse)(resp), nil
|
return (*AlarmResponse)(resp), nil
|
||||||
}
|
}
|
||||||
@ -134,7 +129,7 @@ func (m *maintenance) Defragment(ctx context.Context, endpoint string) (*Defragm
|
|||||||
return nil, toErr(ctx, err)
|
return nil, toErr(ctx, err)
|
||||||
}
|
}
|
||||||
defer cancel()
|
defer cancel()
|
||||||
resp, err := remote.Defragment(ctx, &pb.DefragmentRequest{}, grpc.FailFast(false))
|
resp, err := remote.Defragment(ctx, &pb.DefragmentRequest{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, toErr(ctx, err)
|
return nil, toErr(ctx, err)
|
||||||
}
|
}
|
||||||
@ -147,7 +142,7 @@ func (m *maintenance) Status(ctx context.Context, endpoint string) (*StatusRespo
|
|||||||
return nil, toErr(ctx, err)
|
return nil, toErr(ctx, err)
|
||||||
}
|
}
|
||||||
defer cancel()
|
defer cancel()
|
||||||
resp, err := remote.Status(ctx, &pb.StatusRequest{}, grpc.FailFast(false))
|
resp, err := remote.Status(ctx, &pb.StatusRequest{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, toErr(ctx, err)
|
return nil, toErr(ctx, err)
|
||||||
}
|
}
|
||||||
@ -155,7 +150,7 @@ func (m *maintenance) Status(ctx context.Context, endpoint string) (*StatusRespo
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (m *maintenance) Snapshot(ctx context.Context) (io.ReadCloser, error) {
|
func (m *maintenance) Snapshot(ctx context.Context) (io.ReadCloser, error) {
|
||||||
ss, err := m.remote.Snapshot(ctx, &pb.SnapshotRequest{}, grpc.FailFast(false))
|
ss, err := m.remote.Snapshot(ctx, &pb.SnapshotRequest{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, toErr(ctx, err)
|
return nil, toErr(ctx, err)
|
||||||
}
|
}
|
||||||
|
@ -17,6 +17,7 @@ package mirror
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/coreos/etcd/clientv3"
|
"github.com/coreos/etcd/clientv3"
|
||||||
|
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -15,11 +15,11 @@
|
|||||||
package namespace
|
package namespace
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"golang.org/x/net/context"
|
|
||||||
|
|
||||||
"github.com/coreos/etcd/clientv3"
|
"github.com/coreos/etcd/clientv3"
|
||||||
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
|
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
|
||||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||||
|
|
||||||
|
"golang.org/x/net/context"
|
||||||
)
|
)
|
||||||
|
|
||||||
type kvPrefix struct {
|
type kvPrefix struct {
|
||||||
|
@ -17,9 +17,9 @@ package namespace
|
|||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
|
||||||
"golang.org/x/net/context"
|
|
||||||
|
|
||||||
"github.com/coreos/etcd/clientv3"
|
"github.com/coreos/etcd/clientv3"
|
||||||
|
|
||||||
|
"golang.org/x/net/context"
|
||||||
)
|
)
|
||||||
|
|
||||||
type leasePrefix struct {
|
type leasePrefix struct {
|
||||||
|
@ -17,9 +17,9 @@ package namespace
|
|||||||
import (
|
import (
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"golang.org/x/net/context"
|
|
||||||
|
|
||||||
"github.com/coreos/etcd/clientv3"
|
"github.com/coreos/etcd/clientv3"
|
||||||
|
|
||||||
|
"golang.org/x/net/context"
|
||||||
)
|
)
|
||||||
|
|
||||||
type watcherPrefix struct {
|
type watcherPrefix struct {
|
||||||
|
@ -19,11 +19,12 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
etcd "github.com/coreos/etcd/clientv3"
|
etcd "github.com/coreos/etcd/clientv3"
|
||||||
"golang.org/x/net/context"
|
|
||||||
|
|
||||||
"google.golang.org/grpc"
|
|
||||||
"google.golang.org/grpc/codes"
|
"google.golang.org/grpc/codes"
|
||||||
"google.golang.org/grpc/naming"
|
"google.golang.org/grpc/naming"
|
||||||
|
"google.golang.org/grpc/status"
|
||||||
|
|
||||||
|
"golang.org/x/net/context"
|
||||||
)
|
)
|
||||||
|
|
||||||
var ErrWatcherClosed = fmt.Errorf("naming: watch closed")
|
var ErrWatcherClosed = fmt.Errorf("naming: watch closed")
|
||||||
@ -39,13 +40,13 @@ func (gr *GRPCResolver) Update(ctx context.Context, target string, nm naming.Upd
|
|||||||
case naming.Add:
|
case naming.Add:
|
||||||
var v []byte
|
var v []byte
|
||||||
if v, err = json.Marshal(nm); err != nil {
|
if v, err = json.Marshal(nm); err != nil {
|
||||||
return grpc.Errorf(codes.InvalidArgument, err.Error())
|
return status.Error(codes.InvalidArgument, err.Error())
|
||||||
}
|
}
|
||||||
_, err = gr.Client.KV.Put(ctx, target+"/"+nm.Addr, string(v), opts...)
|
_, err = gr.Client.KV.Put(ctx, target+"/"+nm.Addr, string(v), opts...)
|
||||||
case naming.Delete:
|
case naming.Delete:
|
||||||
_, err = gr.Client.Delete(ctx, target+"/"+nm.Addr, opts...)
|
_, err = gr.Client.Delete(ctx, target+"/"+nm.Addr, opts...)
|
||||||
default:
|
default:
|
||||||
return grpc.Errorf(codes.InvalidArgument, "naming: bad naming op")
|
return status.Error(codes.InvalidArgument, "naming: bad naming op")
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -80,7 +81,7 @@ func (gw *gRPCWatcher) Next() ([]*naming.Update, error) {
|
|||||||
// process new events on target/*
|
// process new events on target/*
|
||||||
wr, ok := <-gw.wch
|
wr, ok := <-gw.wch
|
||||||
if !ok {
|
if !ok {
|
||||||
gw.err = grpc.Errorf(codes.Unavailable, "%s", ErrWatcherClosed)
|
gw.err = status.Error(codes.Unavailable, ErrWatcherClosed.Error())
|
||||||
return nil, gw.err
|
return nil, gw.err
|
||||||
}
|
}
|
||||||
if gw.err = wr.Err(); gw.err != nil {
|
if gw.err = wr.Err(); gw.err != nil {
|
||||||
|
@ -19,12 +19,12 @@ import (
|
|||||||
"reflect"
|
"reflect"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"golang.org/x/net/context"
|
|
||||||
"google.golang.org/grpc/naming"
|
|
||||||
|
|
||||||
etcd "github.com/coreos/etcd/clientv3"
|
etcd "github.com/coreos/etcd/clientv3"
|
||||||
"github.com/coreos/etcd/integration"
|
"github.com/coreos/etcd/integration"
|
||||||
"github.com/coreos/etcd/pkg/testutil"
|
"github.com/coreos/etcd/pkg/testutil"
|
||||||
|
|
||||||
|
"golang.org/x/net/context"
|
||||||
|
"google.golang.org/grpc/naming"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestGRPCResolver(t *testing.T) {
|
func TestGRPCResolver(t *testing.T) {
|
||||||
@ -66,10 +66,13 @@ func TestGRPCResolver(t *testing.T) {
|
|||||||
|
|
||||||
delOp := naming.Update{Op: naming.Delete, Addr: "127.0.0.1"}
|
delOp := naming.Update{Op: naming.Delete, Addr: "127.0.0.1"}
|
||||||
err = r.Update(context.TODO(), "foo", delOp)
|
err = r.Update(context.TODO(), "foo", delOp)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to udpate %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
us, err = w.Next()
|
us, err = w.Next()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal("failed to get udpate", err)
|
t.Fatalf("failed to get udpate %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
wu = &naming.Update{
|
wu = &naming.Update{
|
||||||
@ -83,7 +86,7 @@ func TestGRPCResolver(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestGRPCResolverMultiInit ensures the resolver will initialize
|
// TestGRPCResolverMulti ensures the resolver will initialize
|
||||||
// correctly with multiple hosts and correctly receive multiple
|
// correctly with multiple hosts and correctly receive multiple
|
||||||
// updates in a single revision.
|
// updates in a single revision.
|
||||||
func TestGRPCResolverMulti(t *testing.T) {
|
func TestGRPCResolverMulti(t *testing.T) {
|
||||||
|
@ -23,6 +23,7 @@ const (
|
|||||||
tRange opType = iota + 1
|
tRange opType = iota + 1
|
||||||
tPut
|
tPut
|
||||||
tDeleteRange
|
tDeleteRange
|
||||||
|
tTxn
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@ -67,9 +68,17 @@ type Op struct {
|
|||||||
// for put
|
// for put
|
||||||
val []byte
|
val []byte
|
||||||
leaseID LeaseID
|
leaseID LeaseID
|
||||||
|
|
||||||
|
// txn
|
||||||
|
cmps []Cmp
|
||||||
|
thenOps []Op
|
||||||
|
elseOps []Op
|
||||||
}
|
}
|
||||||
|
|
||||||
// accesors / mutators
|
// accessors / mutators
|
||||||
|
|
||||||
|
func (op Op) IsTxn() bool { return op.t == tTxn }
|
||||||
|
func (op Op) Txn() ([]Cmp, []Op, []Op) { return op.cmps, op.thenOps, op.elseOps }
|
||||||
|
|
||||||
// KeyBytes returns the byte slice holding the Op's key.
|
// KeyBytes returns the byte slice holding the Op's key.
|
||||||
func (op Op) KeyBytes() []byte { return op.key }
|
func (op Op) KeyBytes() []byte { return op.key }
|
||||||
@ -80,6 +89,39 @@ func (op *Op) WithKeyBytes(key []byte) { op.key = key }
|
|||||||
// RangeBytes returns the byte slice holding with the Op's range end, if any.
|
// RangeBytes returns the byte slice holding with the Op's range end, if any.
|
||||||
func (op Op) RangeBytes() []byte { return op.end }
|
func (op Op) RangeBytes() []byte { return op.end }
|
||||||
|
|
||||||
|
// Rev returns the requested revision, if any.
|
||||||
|
func (op Op) Rev() int64 { return op.rev }
|
||||||
|
|
||||||
|
// IsPut returns true iff the operation is a Put.
|
||||||
|
func (op Op) IsPut() bool { return op.t == tPut }
|
||||||
|
|
||||||
|
// IsGet returns true iff the operation is a Get.
|
||||||
|
func (op Op) IsGet() bool { return op.t == tRange }
|
||||||
|
|
||||||
|
// IsDelete returns true iff the operation is a Delete.
|
||||||
|
func (op Op) IsDelete() bool { return op.t == tDeleteRange }
|
||||||
|
|
||||||
|
// IsSerializable returns true if the serializable field is true.
|
||||||
|
func (op Op) IsSerializable() bool { return op.serializable == true }
|
||||||
|
|
||||||
|
// IsKeysOnly returns whether keysOnly is set.
|
||||||
|
func (op Op) IsKeysOnly() bool { return op.keysOnly == true }
|
||||||
|
|
||||||
|
// IsCountOnly returns whether countOnly is set.
|
||||||
|
func (op Op) IsCountOnly() bool { return op.countOnly == true }
|
||||||
|
|
||||||
|
// MinModRev returns the operation's minimum modify revision.
|
||||||
|
func (op Op) MinModRev() int64 { return op.minModRev }
|
||||||
|
|
||||||
|
// MaxModRev returns the operation's maximum modify revision.
|
||||||
|
func (op Op) MaxModRev() int64 { return op.maxModRev }
|
||||||
|
|
||||||
|
// MinCreateRev returns the operation's minimum create revision.
|
||||||
|
func (op Op) MinCreateRev() int64 { return op.minCreateRev }
|
||||||
|
|
||||||
|
// MaxCreateRev returns the operation's maximum create revision.
|
||||||
|
func (op Op) MaxCreateRev() int64 { return op.maxCreateRev }
|
||||||
|
|
||||||
// WithRangeBytes sets the byte slice for the Op's range end.
|
// WithRangeBytes sets the byte slice for the Op's range end.
|
||||||
func (op *Op) WithRangeBytes(end []byte) { op.end = end }
|
func (op *Op) WithRangeBytes(end []byte) { op.end = end }
|
||||||
|
|
||||||
@ -113,6 +155,22 @@ func (op Op) toRangeRequest() *pb.RangeRequest {
|
|||||||
return r
|
return r
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (op Op) toTxnRequest() *pb.TxnRequest {
|
||||||
|
thenOps := make([]*pb.RequestOp, len(op.thenOps))
|
||||||
|
for i, tOp := range op.thenOps {
|
||||||
|
thenOps[i] = tOp.toRequestOp()
|
||||||
|
}
|
||||||
|
elseOps := make([]*pb.RequestOp, len(op.elseOps))
|
||||||
|
for i, eOp := range op.elseOps {
|
||||||
|
elseOps[i] = eOp.toRequestOp()
|
||||||
|
}
|
||||||
|
cmps := make([]*pb.Compare, len(op.cmps))
|
||||||
|
for i := range op.cmps {
|
||||||
|
cmps[i] = (*pb.Compare)(&op.cmps[i])
|
||||||
|
}
|
||||||
|
return &pb.TxnRequest{Compare: cmps, Success: thenOps, Failure: elseOps}
|
||||||
|
}
|
||||||
|
|
||||||
func (op Op) toRequestOp() *pb.RequestOp {
|
func (op Op) toRequestOp() *pb.RequestOp {
|
||||||
switch op.t {
|
switch op.t {
|
||||||
case tRange:
|
case tRange:
|
||||||
@ -129,6 +187,19 @@ func (op Op) toRequestOp() *pb.RequestOp {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (op Op) isWrite() bool {
|
func (op Op) isWrite() bool {
|
||||||
|
if op.t == tTxn {
|
||||||
|
for _, tOp := range op.thenOps {
|
||||||
|
if tOp.isWrite() {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for _, tOp := range op.elseOps {
|
||||||
|
if tOp.isWrite() {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
return op.t != tRange
|
return op.t != tRange
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -194,6 +265,10 @@ func OpPut(key, val string, opts ...OpOption) Op {
|
|||||||
return ret
|
return ret
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func OpTxn(cmps []Cmp, thenOps []Op, elseOps []Op) Op {
|
||||||
|
return Op{t: tTxn, cmps: cmps, thenOps: thenOps, elseOps: elseOps}
|
||||||
|
}
|
||||||
|
|
||||||
func opWatch(key string, opts ...OpOption) Op {
|
func opWatch(key string, opts ...OpOption) Op {
|
||||||
ret := Op{t: tRange, key: []byte(key)}
|
ret := Op{t: tRange, key: []byte(key)}
|
||||||
ret.applyOpts(opts)
|
ret.applyOpts(opts)
|
||||||
@ -247,9 +322,9 @@ func WithSort(target SortTarget, order SortOrder) OpOption {
|
|||||||
if target == SortByKey && order == SortAscend {
|
if target == SortByKey && order == SortAscend {
|
||||||
// If order != SortNone, server fetches the entire key-space,
|
// If order != SortNone, server fetches the entire key-space,
|
||||||
// and then applies the sort and limit, if provided.
|
// and then applies the sort and limit, if provided.
|
||||||
// Since current mvcc.Range implementation returns results
|
// Since by default the server returns results sorted by keys
|
||||||
// sorted by keys in lexicographically ascending order,
|
// in lexicographically ascending order, the client should ignore
|
||||||
// client should ignore SortOrder if the target is SortByKey.
|
// SortOrder if the target is SortByKey.
|
||||||
order = SortNone
|
order = SortNone
|
||||||
}
|
}
|
||||||
op.sort = &SortOption{target, order}
|
op.sort = &SortOption{target, order}
|
||||||
@ -390,7 +465,7 @@ func WithPrevKV() OpOption {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// WithIgnoreValue updates the key using its current value.
|
// WithIgnoreValue updates the key using its current value.
|
||||||
// Empty value should be passed when ignore_value is set.
|
// This option can not be combined with non-empty values.
|
||||||
// Returns an error if the key does not exist.
|
// Returns an error if the key does not exist.
|
||||||
func WithIgnoreValue() OpOption {
|
func WithIgnoreValue() OpOption {
|
||||||
return func(op *Op) {
|
return func(op *Op) {
|
||||||
@ -399,7 +474,7 @@ func WithIgnoreValue() OpOption {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// WithIgnoreLease updates the key using its current lease.
|
// WithIgnoreLease updates the key using its current lease.
|
||||||
// Empty lease should be passed when ignore_lease is set.
|
// This option can not be combined with WithLease.
|
||||||
// Returns an error if the key does not exist.
|
// Returns an error if the key does not exist.
|
||||||
func WithIgnoreLease() OpOption {
|
func WithIgnoreLease() OpOption {
|
||||||
return func(op *Op) {
|
return func(op *Op) {
|
||||||
@ -424,8 +499,7 @@ func (op *LeaseOp) applyOpts(opts []LeaseOption) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// WithAttachedKeys requests lease timetolive API to return
|
// WithAttachedKeys makes TimeToLive list the keys attached to the given lease ID.
|
||||||
// attached keys of given lease ID.
|
|
||||||
func WithAttachedKeys() LeaseOption {
|
func WithAttachedKeys() LeaseOption {
|
||||||
return func(op *LeaseOp) { op.attachedKeys = true }
|
return func(op *LeaseOp) { op.attachedKeys = true }
|
||||||
}
|
}
|
||||||
|
30
clientv3/ready_wait.go
Normal file
30
clientv3/ready_wait.go
Normal file
@ -0,0 +1,30 @@
|
|||||||
|
// Copyright 2017 The etcd Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package clientv3
|
||||||
|
|
||||||
|
import "golang.org/x/net/context"
|
||||||
|
|
||||||
|
// TODO: remove this when "FailFast=false" is fixed.
|
||||||
|
// See https://github.com/grpc/grpc-go/issues/1532.
|
||||||
|
func readyWait(rpcCtx, clientCtx context.Context, ready <-chan struct{}) error {
|
||||||
|
select {
|
||||||
|
case <-ready:
|
||||||
|
return nil
|
||||||
|
case <-rpcCtx.Done():
|
||||||
|
return rpcCtx.Err()
|
||||||
|
case <-clientCtx.Done():
|
||||||
|
return clientCtx.Err()
|
||||||
|
}
|
||||||
|
}
|
@ -17,135 +17,183 @@ package clientv3
|
|||||||
import (
|
import (
|
||||||
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
|
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
|
||||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||||
|
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
"google.golang.org/grpc"
|
"google.golang.org/grpc"
|
||||||
"google.golang.org/grpc/codes"
|
"google.golang.org/grpc/codes"
|
||||||
|
"google.golang.org/grpc/status"
|
||||||
)
|
)
|
||||||
|
|
||||||
type rpcFunc func(ctx context.Context) error
|
type rpcFunc func(ctx context.Context) error
|
||||||
type retryRpcFunc func(context.Context, rpcFunc) error
|
type retryRPCFunc func(context.Context, rpcFunc) error
|
||||||
|
type retryStopErrFunc func(error) bool
|
||||||
|
|
||||||
func (c *Client) newRetryWrapper() retryRpcFunc {
|
func isRepeatableStopError(err error) bool {
|
||||||
|
eErr := rpctypes.Error(err)
|
||||||
|
// always stop retry on etcd errors
|
||||||
|
if serverErr, ok := eErr.(rpctypes.EtcdError); ok && serverErr.Code() != codes.Unavailable {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
// only retry if unavailable
|
||||||
|
ev, _ := status.FromError(err)
|
||||||
|
return ev.Code() != codes.Unavailable
|
||||||
|
}
|
||||||
|
|
||||||
|
func isNonRepeatableStopError(err error) bool {
|
||||||
|
ev, _ := status.FromError(err)
|
||||||
|
if ev.Code() != codes.Unavailable {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
desc := rpctypes.ErrorDesc(err)
|
||||||
|
return desc != "there is no address available" && desc != "there is no connection available"
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Client) newRetryWrapper(isStop retryStopErrFunc) retryRPCFunc {
|
||||||
return func(rpcCtx context.Context, f rpcFunc) error {
|
return func(rpcCtx context.Context, f rpcFunc) error {
|
||||||
for {
|
for {
|
||||||
|
if err := readyWait(rpcCtx, c.ctx, c.balancer.ConnectNotify()); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
pinned := c.balancer.pinned()
|
||||||
err := f(rpcCtx)
|
err := f(rpcCtx)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
if logger.V(4) {
|
||||||
eErr := rpctypes.Error(err)
|
logger.Infof("clientv3/retry: error %q on pinned endpoint %q", err.Error(), pinned)
|
||||||
// always stop retry on etcd errors
|
|
||||||
if _, ok := eErr.(rpctypes.EtcdError); ok {
|
|
||||||
return err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// only retry if unavailable
|
if s, ok := status.FromError(err); ok && (s.Code() == codes.Unavailable || s.Code() == codes.DeadlineExceeded || s.Code() == codes.Internal) {
|
||||||
if grpc.Code(err) != codes.Unavailable {
|
// mark this before endpoint switch is triggered
|
||||||
return err
|
c.balancer.hostPortError(pinned, err)
|
||||||
|
c.balancer.next()
|
||||||
|
if logger.V(4) {
|
||||||
|
logger.Infof("clientv3/retry: switching from %q due to error %q", pinned, err.Error())
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
select {
|
if isStop(err) {
|
||||||
case <-c.balancer.ConnectNotify():
|
return err
|
||||||
case <-rpcCtx.Done():
|
|
||||||
return rpcCtx.Err()
|
|
||||||
case <-c.ctx.Done():
|
|
||||||
return c.ctx.Err()
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Client) newAuthRetryWrapper() retryRpcFunc {
|
func (c *Client) newAuthRetryWrapper() retryRPCFunc {
|
||||||
return func(rpcCtx context.Context, f rpcFunc) error {
|
return func(rpcCtx context.Context, f rpcFunc) error {
|
||||||
for {
|
for {
|
||||||
|
pinned := c.balancer.pinned()
|
||||||
err := f(rpcCtx)
|
err := f(rpcCtx)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
if logger.V(4) {
|
||||||
|
logger.Infof("clientv3/auth-retry: error %q on pinned endpoint %q", err.Error(), pinned)
|
||||||
|
}
|
||||||
// always stop retry on etcd errors other than invalid auth token
|
// always stop retry on etcd errors other than invalid auth token
|
||||||
if rpctypes.Error(err) == rpctypes.ErrInvalidAuthToken {
|
if rpctypes.Error(err) == rpctypes.ErrInvalidAuthToken {
|
||||||
gterr := c.getToken(rpcCtx)
|
gterr := c.getToken(rpcCtx)
|
||||||
if gterr != nil {
|
if gterr != nil {
|
||||||
|
if logger.V(4) {
|
||||||
|
logger.Infof("clientv3/auth-retry: cannot retry due to error %q(%q) on pinned endpoint %q", err.Error(), gterr.Error(), pinned)
|
||||||
|
}
|
||||||
return err // return the original error for simplicity
|
return err // return the original error for simplicity
|
||||||
}
|
}
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// RetryKVClient implements a KVClient that uses the client's FailFast retry policy.
|
// RetryKVClient implements a KVClient.
|
||||||
func RetryKVClient(c *Client) pb.KVClient {
|
func RetryKVClient(c *Client) pb.KVClient {
|
||||||
retryWrite := &retryWriteKVClient{pb.NewKVClient(c.conn), c.retryWrapper}
|
repeatableRetry := c.newRetryWrapper(isRepeatableStopError)
|
||||||
return &retryKVClient{&retryWriteKVClient{retryWrite, c.retryAuthWrapper}}
|
nonRepeatableRetry := c.newRetryWrapper(isNonRepeatableStopError)
|
||||||
|
conn := pb.NewKVClient(c.conn)
|
||||||
|
retryBasic := &retryKVClient{&nonRepeatableKVClient{conn, nonRepeatableRetry}, repeatableRetry}
|
||||||
|
retryAuthWrapper := c.newAuthRetryWrapper()
|
||||||
|
return &retryKVClient{
|
||||||
|
&nonRepeatableKVClient{retryBasic, retryAuthWrapper},
|
||||||
|
retryAuthWrapper}
|
||||||
}
|
}
|
||||||
|
|
||||||
type retryKVClient struct {
|
type retryKVClient struct {
|
||||||
*retryWriteKVClient
|
*nonRepeatableKVClient
|
||||||
|
repeatableRetry retryRPCFunc
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rkv *retryKVClient) Range(ctx context.Context, in *pb.RangeRequest, opts ...grpc.CallOption) (resp *pb.RangeResponse, err error) {
|
func (rkv *retryKVClient) Range(ctx context.Context, in *pb.RangeRequest, opts ...grpc.CallOption) (resp *pb.RangeResponse, err error) {
|
||||||
err = rkv.retryf(ctx, func(rctx context.Context) error {
|
err = rkv.repeatableRetry(ctx, func(rctx context.Context) error {
|
||||||
resp, err = rkv.retryWriteKVClient.Range(rctx, in, opts...)
|
resp, err = rkv.kc.Range(rctx, in, opts...)
|
||||||
return err
|
return err
|
||||||
})
|
})
|
||||||
return resp, err
|
return resp, err
|
||||||
}
|
}
|
||||||
|
|
||||||
type retryWriteKVClient struct {
|
type nonRepeatableKVClient struct {
|
||||||
pb.KVClient
|
kc pb.KVClient
|
||||||
retryf retryRpcFunc
|
nonRepeatableRetry retryRPCFunc
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rkv *retryWriteKVClient) Put(ctx context.Context, in *pb.PutRequest, opts ...grpc.CallOption) (resp *pb.PutResponse, err error) {
|
func (rkv *nonRepeatableKVClient) Put(ctx context.Context, in *pb.PutRequest, opts ...grpc.CallOption) (resp *pb.PutResponse, err error) {
|
||||||
err = rkv.retryf(ctx, func(rctx context.Context) error {
|
err = rkv.nonRepeatableRetry(ctx, func(rctx context.Context) error {
|
||||||
resp, err = rkv.KVClient.Put(rctx, in, opts...)
|
resp, err = rkv.kc.Put(rctx, in, opts...)
|
||||||
return err
|
return err
|
||||||
})
|
})
|
||||||
return resp, err
|
return resp, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rkv *retryWriteKVClient) DeleteRange(ctx context.Context, in *pb.DeleteRangeRequest, opts ...grpc.CallOption) (resp *pb.DeleteRangeResponse, err error) {
|
func (rkv *nonRepeatableKVClient) DeleteRange(ctx context.Context, in *pb.DeleteRangeRequest, opts ...grpc.CallOption) (resp *pb.DeleteRangeResponse, err error) {
|
||||||
err = rkv.retryf(ctx, func(rctx context.Context) error {
|
err = rkv.nonRepeatableRetry(ctx, func(rctx context.Context) error {
|
||||||
resp, err = rkv.KVClient.DeleteRange(rctx, in, opts...)
|
resp, err = rkv.kc.DeleteRange(rctx, in, opts...)
|
||||||
return err
|
return err
|
||||||
})
|
})
|
||||||
return resp, err
|
return resp, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rkv *retryWriteKVClient) Txn(ctx context.Context, in *pb.TxnRequest, opts ...grpc.CallOption) (resp *pb.TxnResponse, err error) {
|
func (rkv *nonRepeatableKVClient) Txn(ctx context.Context, in *pb.TxnRequest, opts ...grpc.CallOption) (resp *pb.TxnResponse, err error) {
|
||||||
err = rkv.retryf(ctx, func(rctx context.Context) error {
|
// TODO: repeatableRetry if read-only txn
|
||||||
resp, err = rkv.KVClient.Txn(rctx, in, opts...)
|
err = rkv.nonRepeatableRetry(ctx, func(rctx context.Context) error {
|
||||||
|
resp, err = rkv.kc.Txn(rctx, in, opts...)
|
||||||
return err
|
return err
|
||||||
})
|
})
|
||||||
return resp, err
|
return resp, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rkv *retryWriteKVClient) Compact(ctx context.Context, in *pb.CompactionRequest, opts ...grpc.CallOption) (resp *pb.CompactionResponse, err error) {
|
func (rkv *nonRepeatableKVClient) Compact(ctx context.Context, in *pb.CompactionRequest, opts ...grpc.CallOption) (resp *pb.CompactionResponse, err error) {
|
||||||
err = rkv.retryf(ctx, func(rctx context.Context) error {
|
err = rkv.nonRepeatableRetry(ctx, func(rctx context.Context) error {
|
||||||
resp, err = rkv.KVClient.Compact(rctx, in, opts...)
|
resp, err = rkv.kc.Compact(rctx, in, opts...)
|
||||||
return err
|
return err
|
||||||
})
|
})
|
||||||
return resp, err
|
return resp, err
|
||||||
}
|
}
|
||||||
|
|
||||||
type retryLeaseClient struct {
|
type retryLeaseClient struct {
|
||||||
pb.LeaseClient
|
lc pb.LeaseClient
|
||||||
retryf retryRpcFunc
|
repeatableRetry retryRPCFunc
|
||||||
}
|
}
|
||||||
|
|
||||||
// RetryLeaseClient implements a LeaseClient that uses the client's FailFast retry policy.
|
// RetryLeaseClient implements a LeaseClient.
|
||||||
func RetryLeaseClient(c *Client) pb.LeaseClient {
|
func RetryLeaseClient(c *Client) pb.LeaseClient {
|
||||||
retry := &retryLeaseClient{pb.NewLeaseClient(c.conn), c.retryWrapper}
|
retry := &retryLeaseClient{
|
||||||
return &retryLeaseClient{retry, c.retryAuthWrapper}
|
pb.NewLeaseClient(c.conn),
|
||||||
|
c.newRetryWrapper(isRepeatableStopError),
|
||||||
|
}
|
||||||
|
return &retryLeaseClient{retry, c.newAuthRetryWrapper()}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rlc *retryLeaseClient) LeaseTimeToLive(ctx context.Context, in *pb.LeaseTimeToLiveRequest, opts ...grpc.CallOption) (resp *pb.LeaseTimeToLiveResponse, err error) {
|
||||||
|
err = rlc.repeatableRetry(ctx, func(rctx context.Context) error {
|
||||||
|
resp, err = rlc.lc.LeaseTimeToLive(rctx, in, opts...)
|
||||||
|
return err
|
||||||
|
})
|
||||||
|
return resp, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rlc *retryLeaseClient) LeaseGrant(ctx context.Context, in *pb.LeaseGrantRequest, opts ...grpc.CallOption) (resp *pb.LeaseGrantResponse, err error) {
|
func (rlc *retryLeaseClient) LeaseGrant(ctx context.Context, in *pb.LeaseGrantRequest, opts ...grpc.CallOption) (resp *pb.LeaseGrantResponse, err error) {
|
||||||
err = rlc.retryf(ctx, func(rctx context.Context) error {
|
err = rlc.repeatableRetry(ctx, func(rctx context.Context) error {
|
||||||
resp, err = rlc.LeaseClient.LeaseGrant(rctx, in, opts...)
|
resp, err = rlc.lc.LeaseGrant(rctx, in, opts...)
|
||||||
return err
|
return err
|
||||||
})
|
})
|
||||||
return resp, err
|
return resp, err
|
||||||
@ -153,140 +201,270 @@ func (rlc *retryLeaseClient) LeaseGrant(ctx context.Context, in *pb.LeaseGrantRe
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (rlc *retryLeaseClient) LeaseRevoke(ctx context.Context, in *pb.LeaseRevokeRequest, opts ...grpc.CallOption) (resp *pb.LeaseRevokeResponse, err error) {
|
func (rlc *retryLeaseClient) LeaseRevoke(ctx context.Context, in *pb.LeaseRevokeRequest, opts ...grpc.CallOption) (resp *pb.LeaseRevokeResponse, err error) {
|
||||||
err = rlc.retryf(ctx, func(rctx context.Context) error {
|
err = rlc.repeatableRetry(ctx, func(rctx context.Context) error {
|
||||||
resp, err = rlc.LeaseClient.LeaseRevoke(rctx, in, opts...)
|
resp, err = rlc.lc.LeaseRevoke(rctx, in, opts...)
|
||||||
return err
|
return err
|
||||||
})
|
})
|
||||||
return resp, err
|
return resp, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (rlc *retryLeaseClient) LeaseKeepAlive(ctx context.Context, opts ...grpc.CallOption) (stream pb.Lease_LeaseKeepAliveClient, err error) {
|
||||||
|
err = rlc.repeatableRetry(ctx, func(rctx context.Context) error {
|
||||||
|
stream, err = rlc.lc.LeaseKeepAlive(rctx, opts...)
|
||||||
|
return err
|
||||||
|
})
|
||||||
|
return stream, err
|
||||||
|
}
|
||||||
|
|
||||||
type retryClusterClient struct {
|
type retryClusterClient struct {
|
||||||
pb.ClusterClient
|
*nonRepeatableClusterClient
|
||||||
retryf retryRpcFunc
|
repeatableRetry retryRPCFunc
|
||||||
}
|
}
|
||||||
|
|
||||||
// RetryClusterClient implements a ClusterClient that uses the client's FailFast retry policy.
|
// RetryClusterClient implements a ClusterClient.
|
||||||
func RetryClusterClient(c *Client) pb.ClusterClient {
|
func RetryClusterClient(c *Client) pb.ClusterClient {
|
||||||
return &retryClusterClient{pb.NewClusterClient(c.conn), c.retryWrapper}
|
repeatableRetry := c.newRetryWrapper(isRepeatableStopError)
|
||||||
|
nonRepeatableRetry := c.newRetryWrapper(isNonRepeatableStopError)
|
||||||
|
cc := pb.NewClusterClient(c.conn)
|
||||||
|
return &retryClusterClient{&nonRepeatableClusterClient{cc, nonRepeatableRetry}, repeatableRetry}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rcc *retryClusterClient) MemberAdd(ctx context.Context, in *pb.MemberAddRequest, opts ...grpc.CallOption) (resp *pb.MemberAddResponse, err error) {
|
func (rcc *retryClusterClient) MemberList(ctx context.Context, in *pb.MemberListRequest, opts ...grpc.CallOption) (resp *pb.MemberListResponse, err error) {
|
||||||
err = rcc.retryf(ctx, func(rctx context.Context) error {
|
err = rcc.repeatableRetry(ctx, func(rctx context.Context) error {
|
||||||
resp, err = rcc.ClusterClient.MemberAdd(rctx, in, opts...)
|
resp, err = rcc.cc.MemberList(rctx, in, opts...)
|
||||||
return err
|
return err
|
||||||
})
|
})
|
||||||
return resp, err
|
return resp, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rcc *retryClusterClient) MemberRemove(ctx context.Context, in *pb.MemberRemoveRequest, opts ...grpc.CallOption) (resp *pb.MemberRemoveResponse, err error) {
|
type nonRepeatableClusterClient struct {
|
||||||
err = rcc.retryf(ctx, func(rctx context.Context) error {
|
cc pb.ClusterClient
|
||||||
resp, err = rcc.ClusterClient.MemberRemove(rctx, in, opts...)
|
nonRepeatableRetry retryRPCFunc
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rcc *nonRepeatableClusterClient) MemberAdd(ctx context.Context, in *pb.MemberAddRequest, opts ...grpc.CallOption) (resp *pb.MemberAddResponse, err error) {
|
||||||
|
err = rcc.nonRepeatableRetry(ctx, func(rctx context.Context) error {
|
||||||
|
resp, err = rcc.cc.MemberAdd(rctx, in, opts...)
|
||||||
return err
|
return err
|
||||||
})
|
})
|
||||||
return resp, err
|
return resp, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rcc *retryClusterClient) MemberUpdate(ctx context.Context, in *pb.MemberUpdateRequest, opts ...grpc.CallOption) (resp *pb.MemberUpdateResponse, err error) {
|
func (rcc *nonRepeatableClusterClient) MemberRemove(ctx context.Context, in *pb.MemberRemoveRequest, opts ...grpc.CallOption) (resp *pb.MemberRemoveResponse, err error) {
|
||||||
err = rcc.retryf(ctx, func(rctx context.Context) error {
|
err = rcc.nonRepeatableRetry(ctx, func(rctx context.Context) error {
|
||||||
resp, err = rcc.ClusterClient.MemberUpdate(rctx, in, opts...)
|
resp, err = rcc.cc.MemberRemove(rctx, in, opts...)
|
||||||
|
return err
|
||||||
|
})
|
||||||
|
return resp, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rcc *nonRepeatableClusterClient) MemberUpdate(ctx context.Context, in *pb.MemberUpdateRequest, opts ...grpc.CallOption) (resp *pb.MemberUpdateResponse, err error) {
|
||||||
|
err = rcc.nonRepeatableRetry(ctx, func(rctx context.Context) error {
|
||||||
|
resp, err = rcc.cc.MemberUpdate(rctx, in, opts...)
|
||||||
|
return err
|
||||||
|
})
|
||||||
|
return resp, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// RetryMaintenanceClient implements a Maintenance.
|
||||||
|
func RetryMaintenanceClient(c *Client, conn *grpc.ClientConn) pb.MaintenanceClient {
|
||||||
|
repeatableRetry := c.newRetryWrapper(isRepeatableStopError)
|
||||||
|
nonRepeatableRetry := c.newRetryWrapper(isNonRepeatableStopError)
|
||||||
|
mc := pb.NewMaintenanceClient(conn)
|
||||||
|
return &retryMaintenanceClient{&nonRepeatableMaintenanceClient{mc, nonRepeatableRetry}, repeatableRetry}
|
||||||
|
}
|
||||||
|
|
||||||
|
type retryMaintenanceClient struct {
|
||||||
|
*nonRepeatableMaintenanceClient
|
||||||
|
repeatableRetry retryRPCFunc
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rmc *retryMaintenanceClient) Alarm(ctx context.Context, in *pb.AlarmRequest, opts ...grpc.CallOption) (resp *pb.AlarmResponse, err error) {
|
||||||
|
err = rmc.repeatableRetry(ctx, func(rctx context.Context) error {
|
||||||
|
resp, err = rmc.mc.Alarm(rctx, in, opts...)
|
||||||
|
return err
|
||||||
|
})
|
||||||
|
return resp, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rmc *retryMaintenanceClient) Status(ctx context.Context, in *pb.StatusRequest, opts ...grpc.CallOption) (resp *pb.StatusResponse, err error) {
|
||||||
|
err = rmc.repeatableRetry(ctx, func(rctx context.Context) error {
|
||||||
|
resp, err = rmc.mc.Status(rctx, in, opts...)
|
||||||
|
return err
|
||||||
|
})
|
||||||
|
return resp, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rmc *retryMaintenanceClient) Hash(ctx context.Context, in *pb.HashRequest, opts ...grpc.CallOption) (resp *pb.HashResponse, err error) {
|
||||||
|
err = rmc.repeatableRetry(ctx, func(rctx context.Context) error {
|
||||||
|
resp, err = rmc.mc.Hash(rctx, in, opts...)
|
||||||
|
return err
|
||||||
|
})
|
||||||
|
return resp, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rmc *retryMaintenanceClient) Snapshot(ctx context.Context, in *pb.SnapshotRequest, opts ...grpc.CallOption) (stream pb.Maintenance_SnapshotClient, err error) {
|
||||||
|
err = rmc.repeatableRetry(ctx, func(rctx context.Context) error {
|
||||||
|
stream, err = rmc.mc.Snapshot(rctx, in, opts...)
|
||||||
|
return err
|
||||||
|
})
|
||||||
|
return stream, err
|
||||||
|
}
|
||||||
|
|
||||||
|
type nonRepeatableMaintenanceClient struct {
|
||||||
|
mc pb.MaintenanceClient
|
||||||
|
nonRepeatableRetry retryRPCFunc
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rmc *nonRepeatableMaintenanceClient) Defragment(ctx context.Context, in *pb.DefragmentRequest, opts ...grpc.CallOption) (resp *pb.DefragmentResponse, err error) {
|
||||||
|
err = rmc.nonRepeatableRetry(ctx, func(rctx context.Context) error {
|
||||||
|
resp, err = rmc.mc.Defragment(rctx, in, opts...)
|
||||||
return err
|
return err
|
||||||
})
|
})
|
||||||
return resp, err
|
return resp, err
|
||||||
}
|
}
|
||||||
|
|
||||||
type retryAuthClient struct {
|
type retryAuthClient struct {
|
||||||
pb.AuthClient
|
*nonRepeatableAuthClient
|
||||||
retryf retryRpcFunc
|
repeatableRetry retryRPCFunc
|
||||||
}
|
}
|
||||||
|
|
||||||
// RetryAuthClient implements a AuthClient that uses the client's FailFast retry policy.
|
// RetryAuthClient implements a AuthClient.
|
||||||
func RetryAuthClient(c *Client) pb.AuthClient {
|
func RetryAuthClient(c *Client) pb.AuthClient {
|
||||||
return &retryAuthClient{pb.NewAuthClient(c.conn), c.retryWrapper}
|
repeatableRetry := c.newRetryWrapper(isRepeatableStopError)
|
||||||
|
nonRepeatableRetry := c.newRetryWrapper(isNonRepeatableStopError)
|
||||||
|
ac := pb.NewAuthClient(c.conn)
|
||||||
|
return &retryAuthClient{&nonRepeatableAuthClient{ac, nonRepeatableRetry}, repeatableRetry}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rac *retryAuthClient) AuthEnable(ctx context.Context, in *pb.AuthEnableRequest, opts ...grpc.CallOption) (resp *pb.AuthEnableResponse, err error) {
|
func (rac *retryAuthClient) UserList(ctx context.Context, in *pb.AuthUserListRequest, opts ...grpc.CallOption) (resp *pb.AuthUserListResponse, err error) {
|
||||||
err = rac.retryf(ctx, func(rctx context.Context) error {
|
err = rac.repeatableRetry(ctx, func(rctx context.Context) error {
|
||||||
resp, err = rac.AuthClient.AuthEnable(rctx, in, opts...)
|
resp, err = rac.ac.UserList(rctx, in, opts...)
|
||||||
return err
|
return err
|
||||||
})
|
})
|
||||||
return resp, err
|
return resp, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rac *retryAuthClient) AuthDisable(ctx context.Context, in *pb.AuthDisableRequest, opts ...grpc.CallOption) (resp *pb.AuthDisableResponse, err error) {
|
func (rac *retryAuthClient) UserGet(ctx context.Context, in *pb.AuthUserGetRequest, opts ...grpc.CallOption) (resp *pb.AuthUserGetResponse, err error) {
|
||||||
err = rac.retryf(ctx, func(rctx context.Context) error {
|
err = rac.repeatableRetry(ctx, func(rctx context.Context) error {
|
||||||
resp, err = rac.AuthClient.AuthDisable(rctx, in, opts...)
|
resp, err = rac.ac.UserGet(rctx, in, opts...)
|
||||||
return err
|
return err
|
||||||
})
|
})
|
||||||
return resp, err
|
return resp, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rac *retryAuthClient) UserAdd(ctx context.Context, in *pb.AuthUserAddRequest, opts ...grpc.CallOption) (resp *pb.AuthUserAddResponse, err error) {
|
func (rac *retryAuthClient) RoleGet(ctx context.Context, in *pb.AuthRoleGetRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleGetResponse, err error) {
|
||||||
err = rac.retryf(ctx, func(rctx context.Context) error {
|
err = rac.repeatableRetry(ctx, func(rctx context.Context) error {
|
||||||
resp, err = rac.AuthClient.UserAdd(rctx, in, opts...)
|
resp, err = rac.ac.RoleGet(rctx, in, opts...)
|
||||||
return err
|
return err
|
||||||
})
|
})
|
||||||
return resp, err
|
return resp, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rac *retryAuthClient) UserDelete(ctx context.Context, in *pb.AuthUserDeleteRequest, opts ...grpc.CallOption) (resp *pb.AuthUserDeleteResponse, err error) {
|
func (rac *retryAuthClient) RoleList(ctx context.Context, in *pb.AuthRoleListRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleListResponse, err error) {
|
||||||
err = rac.retryf(ctx, func(rctx context.Context) error {
|
err = rac.repeatableRetry(ctx, func(rctx context.Context) error {
|
||||||
resp, err = rac.AuthClient.UserDelete(rctx, in, opts...)
|
resp, err = rac.ac.RoleList(rctx, in, opts...)
|
||||||
return err
|
return err
|
||||||
})
|
})
|
||||||
return resp, err
|
return resp, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rac *retryAuthClient) UserChangePassword(ctx context.Context, in *pb.AuthUserChangePasswordRequest, opts ...grpc.CallOption) (resp *pb.AuthUserChangePasswordResponse, err error) {
|
type nonRepeatableAuthClient struct {
|
||||||
err = rac.retryf(ctx, func(rctx context.Context) error {
|
ac pb.AuthClient
|
||||||
resp, err = rac.AuthClient.UserChangePassword(rctx, in, opts...)
|
nonRepeatableRetry retryRPCFunc
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rac *nonRepeatableAuthClient) AuthEnable(ctx context.Context, in *pb.AuthEnableRequest, opts ...grpc.CallOption) (resp *pb.AuthEnableResponse, err error) {
|
||||||
|
err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error {
|
||||||
|
resp, err = rac.ac.AuthEnable(rctx, in, opts...)
|
||||||
return err
|
return err
|
||||||
})
|
})
|
||||||
return resp, err
|
return resp, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rac *retryAuthClient) UserGrantRole(ctx context.Context, in *pb.AuthUserGrantRoleRequest, opts ...grpc.CallOption) (resp *pb.AuthUserGrantRoleResponse, err error) {
|
func (rac *nonRepeatableAuthClient) AuthDisable(ctx context.Context, in *pb.AuthDisableRequest, opts ...grpc.CallOption) (resp *pb.AuthDisableResponse, err error) {
|
||||||
err = rac.retryf(ctx, func(rctx context.Context) error {
|
err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error {
|
||||||
resp, err = rac.AuthClient.UserGrantRole(rctx, in, opts...)
|
resp, err = rac.ac.AuthDisable(rctx, in, opts...)
|
||||||
return err
|
return err
|
||||||
})
|
})
|
||||||
return resp, err
|
return resp, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rac *retryAuthClient) UserRevokeRole(ctx context.Context, in *pb.AuthUserRevokeRoleRequest, opts ...grpc.CallOption) (resp *pb.AuthUserRevokeRoleResponse, err error) {
|
func (rac *nonRepeatableAuthClient) UserAdd(ctx context.Context, in *pb.AuthUserAddRequest, opts ...grpc.CallOption) (resp *pb.AuthUserAddResponse, err error) {
|
||||||
err = rac.retryf(ctx, func(rctx context.Context) error {
|
err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error {
|
||||||
resp, err = rac.AuthClient.UserRevokeRole(rctx, in, opts...)
|
resp, err = rac.ac.UserAdd(rctx, in, opts...)
|
||||||
return err
|
return err
|
||||||
})
|
})
|
||||||
return resp, err
|
return resp, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rac *retryAuthClient) RoleAdd(ctx context.Context, in *pb.AuthRoleAddRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleAddResponse, err error) {
|
func (rac *nonRepeatableAuthClient) UserDelete(ctx context.Context, in *pb.AuthUserDeleteRequest, opts ...grpc.CallOption) (resp *pb.AuthUserDeleteResponse, err error) {
|
||||||
err = rac.retryf(ctx, func(rctx context.Context) error {
|
err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error {
|
||||||
resp, err = rac.AuthClient.RoleAdd(rctx, in, opts...)
|
resp, err = rac.ac.UserDelete(rctx, in, opts...)
|
||||||
return err
|
return err
|
||||||
})
|
})
|
||||||
return resp, err
|
return resp, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rac *retryAuthClient) RoleDelete(ctx context.Context, in *pb.AuthRoleDeleteRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleDeleteResponse, err error) {
|
func (rac *nonRepeatableAuthClient) UserChangePassword(ctx context.Context, in *pb.AuthUserChangePasswordRequest, opts ...grpc.CallOption) (resp *pb.AuthUserChangePasswordResponse, err error) {
|
||||||
err = rac.retryf(ctx, func(rctx context.Context) error {
|
err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error {
|
||||||
resp, err = rac.AuthClient.RoleDelete(rctx, in, opts...)
|
resp, err = rac.ac.UserChangePassword(rctx, in, opts...)
|
||||||
return err
|
return err
|
||||||
})
|
})
|
||||||
return resp, err
|
return resp, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rac *retryAuthClient) RoleGrantPermission(ctx context.Context, in *pb.AuthRoleGrantPermissionRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleGrantPermissionResponse, err error) {
|
func (rac *nonRepeatableAuthClient) UserGrantRole(ctx context.Context, in *pb.AuthUserGrantRoleRequest, opts ...grpc.CallOption) (resp *pb.AuthUserGrantRoleResponse, err error) {
|
||||||
err = rac.retryf(ctx, func(rctx context.Context) error {
|
err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error {
|
||||||
resp, err = rac.AuthClient.RoleGrantPermission(rctx, in, opts...)
|
resp, err = rac.ac.UserGrantRole(rctx, in, opts...)
|
||||||
return err
|
return err
|
||||||
})
|
})
|
||||||
return resp, err
|
return resp, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rac *retryAuthClient) RoleRevokePermission(ctx context.Context, in *pb.AuthRoleRevokePermissionRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleRevokePermissionResponse, err error) {
|
func (rac *nonRepeatableAuthClient) UserRevokeRole(ctx context.Context, in *pb.AuthUserRevokeRoleRequest, opts ...grpc.CallOption) (resp *pb.AuthUserRevokeRoleResponse, err error) {
|
||||||
err = rac.retryf(ctx, func(rctx context.Context) error {
|
err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error {
|
||||||
resp, err = rac.AuthClient.RoleRevokePermission(rctx, in, opts...)
|
resp, err = rac.ac.UserRevokeRole(rctx, in, opts...)
|
||||||
|
return err
|
||||||
|
})
|
||||||
|
return resp, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rac *nonRepeatableAuthClient) RoleAdd(ctx context.Context, in *pb.AuthRoleAddRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleAddResponse, err error) {
|
||||||
|
err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error {
|
||||||
|
resp, err = rac.ac.RoleAdd(rctx, in, opts...)
|
||||||
|
return err
|
||||||
|
})
|
||||||
|
return resp, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rac *nonRepeatableAuthClient) RoleDelete(ctx context.Context, in *pb.AuthRoleDeleteRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleDeleteResponse, err error) {
|
||||||
|
err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error {
|
||||||
|
resp, err = rac.ac.RoleDelete(rctx, in, opts...)
|
||||||
|
return err
|
||||||
|
})
|
||||||
|
return resp, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rac *nonRepeatableAuthClient) RoleGrantPermission(ctx context.Context, in *pb.AuthRoleGrantPermissionRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleGrantPermissionResponse, err error) {
|
||||||
|
err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error {
|
||||||
|
resp, err = rac.ac.RoleGrantPermission(rctx, in, opts...)
|
||||||
|
return err
|
||||||
|
})
|
||||||
|
return resp, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rac *nonRepeatableAuthClient) RoleRevokePermission(ctx context.Context, in *pb.AuthRoleRevokePermissionRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleRevokePermissionResponse, err error) {
|
||||||
|
err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error {
|
||||||
|
resp, err = rac.ac.RoleRevokePermission(rctx, in, opts...)
|
||||||
|
return err
|
||||||
|
})
|
||||||
|
return resp, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rac *nonRepeatableAuthClient) Authenticate(ctx context.Context, in *pb.AuthenticateRequest, opts ...grpc.CallOption) (resp *pb.AuthenticateResponse, err error) {
|
||||||
|
err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error {
|
||||||
|
resp, err = rac.ac.Authenticate(rctx, in, opts...)
|
||||||
return err
|
return err
|
||||||
})
|
})
|
||||||
return resp, err
|
return resp, err
|
||||||
|
@ -18,13 +18,13 @@ import (
|
|||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||||
|
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
"google.golang.org/grpc"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Txn is the interface that wraps mini-transactions.
|
// Txn is the interface that wraps mini-transactions.
|
||||||
//
|
//
|
||||||
// Tx.If(
|
// Txn(context.TODO()).If(
|
||||||
// Compare(Value(k1), ">", v1),
|
// Compare(Value(k1), ">", v1),
|
||||||
// Compare(Version(k1), "=", 2)
|
// Compare(Version(k1), "=", 2)
|
||||||
// ).Then(
|
// ).Then(
|
||||||
@ -135,30 +135,14 @@ func (txn *txn) Else(ops ...Op) Txn {
|
|||||||
func (txn *txn) Commit() (*TxnResponse, error) {
|
func (txn *txn) Commit() (*TxnResponse, error) {
|
||||||
txn.mu.Lock()
|
txn.mu.Lock()
|
||||||
defer txn.mu.Unlock()
|
defer txn.mu.Unlock()
|
||||||
for {
|
|
||||||
resp, err := txn.commit()
|
|
||||||
if err == nil {
|
|
||||||
return resp, err
|
|
||||||
}
|
|
||||||
if isHaltErr(txn.ctx, err) {
|
|
||||||
return nil, toErr(txn.ctx, err)
|
|
||||||
}
|
|
||||||
if txn.isWrite {
|
|
||||||
return nil, toErr(txn.ctx, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (txn *txn) commit() (*TxnResponse, error) {
|
|
||||||
r := &pb.TxnRequest{Compare: txn.cmps, Success: txn.sus, Failure: txn.fas}
|
r := &pb.TxnRequest{Compare: txn.cmps, Success: txn.sus, Failure: txn.fas}
|
||||||
|
|
||||||
var opts []grpc.CallOption
|
var resp *pb.TxnResponse
|
||||||
if !txn.isWrite {
|
var err error
|
||||||
opts = []grpc.CallOption{grpc.FailFast(false)}
|
resp, err = txn.kv.remote.Txn(txn.ctx, r)
|
||||||
}
|
|
||||||
resp, err := txn.kv.remote.Txn(txn.ctx, r, opts...)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, toErr(txn.ctx, err)
|
||||||
}
|
}
|
||||||
return (*TxnResponse)(resp), nil
|
return (*TxnResponse)(resp), nil
|
||||||
}
|
}
|
||||||
|
@ -22,9 +22,12 @@ import (
|
|||||||
v3rpc "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
|
v3rpc "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
|
||||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||||
mvccpb "github.com/coreos/etcd/mvcc/mvccpb"
|
mvccpb "github.com/coreos/etcd/mvcc/mvccpb"
|
||||||
|
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
"google.golang.org/grpc"
|
"google.golang.org/grpc"
|
||||||
"google.golang.org/grpc/codes"
|
"google.golang.org/grpc/codes"
|
||||||
|
"google.golang.org/grpc/metadata"
|
||||||
|
"google.golang.org/grpc/status"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@ -40,10 +43,9 @@ type WatchChan <-chan WatchResponse
|
|||||||
|
|
||||||
type Watcher interface {
|
type Watcher interface {
|
||||||
// Watch watches on a key or prefix. The watched events will be returned
|
// Watch watches on a key or prefix. The watched events will be returned
|
||||||
// through the returned channel.
|
// through the returned channel. If revisions waiting to be sent over the
|
||||||
// If the watch is slow or the required rev is compacted, the watch request
|
// watch are compacted, then the watch will be canceled by the server, the
|
||||||
// might be canceled from the server-side and the chan will be closed.
|
// client will post a compacted error watch response, and the channel will close.
|
||||||
// 'opts' can be: 'WithRev' and/or 'WithPrefix'.
|
|
||||||
Watch(ctx context.Context, key string, opts ...OpOption) WatchChan
|
Watch(ctx context.Context, key string, opts ...OpOption) WatchChan
|
||||||
|
|
||||||
// Close closes the watcher and cancels all watch requests.
|
// Close closes the watcher and cancels all watch requests.
|
||||||
@ -90,7 +92,7 @@ func (wr *WatchResponse) Err() error {
|
|||||||
return v3rpc.ErrCompacted
|
return v3rpc.ErrCompacted
|
||||||
case wr.Canceled:
|
case wr.Canceled:
|
||||||
if len(wr.cancelReason) != 0 {
|
if len(wr.cancelReason) != 0 {
|
||||||
return v3rpc.Error(grpc.Errorf(codes.FailedPrecondition, "%s", wr.cancelReason))
|
return v3rpc.Error(status.Error(codes.FailedPrecondition, wr.cancelReason))
|
||||||
}
|
}
|
||||||
return v3rpc.ErrFutureRev
|
return v3rpc.ErrFutureRev
|
||||||
}
|
}
|
||||||
@ -135,7 +137,7 @@ type watchGrpcStream struct {
|
|||||||
respc chan *pb.WatchResponse
|
respc chan *pb.WatchResponse
|
||||||
// donec closes to broadcast shutdown
|
// donec closes to broadcast shutdown
|
||||||
donec chan struct{}
|
donec chan struct{}
|
||||||
// errc transmits errors from grpc Recv to the watch stream reconn logic
|
// errc transmits errors from grpc Recv to the watch stream reconnect logic
|
||||||
errc chan error
|
errc chan error
|
||||||
// closingc gets the watcherStream of closing watchers
|
// closingc gets the watcherStream of closing watchers
|
||||||
closingc chan *watcherStream
|
closingc chan *watcherStream
|
||||||
@ -214,16 +216,15 @@ func (w *watcher) newWatcherGrpcStream(inctx context.Context) *watchGrpcStream {
|
|||||||
owner: w,
|
owner: w,
|
||||||
remote: w.remote,
|
remote: w.remote,
|
||||||
ctx: ctx,
|
ctx: ctx,
|
||||||
ctxKey: fmt.Sprintf("%v", inctx),
|
ctxKey: streamKeyFromCtx(inctx),
|
||||||
cancel: cancel,
|
cancel: cancel,
|
||||||
substreams: make(map[int64]*watcherStream),
|
substreams: make(map[int64]*watcherStream),
|
||||||
|
respc: make(chan *pb.WatchResponse),
|
||||||
respc: make(chan *pb.WatchResponse),
|
reqc: make(chan *watchRequest),
|
||||||
reqc: make(chan *watchRequest),
|
donec: make(chan struct{}),
|
||||||
donec: make(chan struct{}),
|
errc: make(chan error, 1),
|
||||||
errc: make(chan error, 1),
|
closingc: make(chan *watcherStream),
|
||||||
closingc: make(chan *watcherStream),
|
resumec: make(chan struct{}),
|
||||||
resumec: make(chan struct{}),
|
|
||||||
}
|
}
|
||||||
go wgs.run()
|
go wgs.run()
|
||||||
return wgs
|
return wgs
|
||||||
@ -254,7 +255,7 @@ func (w *watcher) Watch(ctx context.Context, key string, opts ...OpOption) Watch
|
|||||||
}
|
}
|
||||||
|
|
||||||
ok := false
|
ok := false
|
||||||
ctxKey := fmt.Sprintf("%v", ctx)
|
ctxKey := streamKeyFromCtx(ctx)
|
||||||
|
|
||||||
// find or allocate appropriate grpc watch stream
|
// find or allocate appropriate grpc watch stream
|
||||||
w.mu.Lock()
|
w.mu.Lock()
|
||||||
@ -317,14 +318,14 @@ func (w *watcher) Close() (err error) {
|
|||||||
w.streams = nil
|
w.streams = nil
|
||||||
w.mu.Unlock()
|
w.mu.Unlock()
|
||||||
for _, wgs := range streams {
|
for _, wgs := range streams {
|
||||||
if werr := wgs.Close(); werr != nil {
|
if werr := wgs.close(); werr != nil {
|
||||||
err = werr
|
err = werr
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *watchGrpcStream) Close() (err error) {
|
func (w *watchGrpcStream) close() (err error) {
|
||||||
w.cancel()
|
w.cancel()
|
||||||
<-w.donec
|
<-w.donec
|
||||||
select {
|
select {
|
||||||
@ -435,7 +436,7 @@ func (w *watchGrpcStream) run() {
|
|||||||
initReq: *wreq,
|
initReq: *wreq,
|
||||||
id: -1,
|
id: -1,
|
||||||
outc: outc,
|
outc: outc,
|
||||||
// unbufffered so resumes won't cause repeat events
|
// unbuffered so resumes won't cause repeat events
|
||||||
recvc: make(chan *WatchResponse),
|
recvc: make(chan *WatchResponse),
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -487,7 +488,7 @@ func (w *watchGrpcStream) run() {
|
|||||||
req := &pb.WatchRequest{RequestUnion: cr}
|
req := &pb.WatchRequest{RequestUnion: cr}
|
||||||
wc.Send(req)
|
wc.Send(req)
|
||||||
}
|
}
|
||||||
// watch client failed to recv; spawn another if possible
|
// watch client failed on Recv; spawn another if possible
|
||||||
case err := <-w.errc:
|
case err := <-w.errc:
|
||||||
if isHaltErr(w.ctx, err) || toErr(w.ctx, err) == v3rpc.ErrNoLeader {
|
if isHaltErr(w.ctx, err) || toErr(w.ctx, err) == v3rpc.ErrNoLeader {
|
||||||
closeErr = err
|
closeErr = err
|
||||||
@ -749,7 +750,7 @@ func (w *watchGrpcStream) waitCancelSubstreams(stopc <-chan struct{}) <-chan str
|
|||||||
return donec
|
return donec
|
||||||
}
|
}
|
||||||
|
|
||||||
// joinSubstream waits for all substream goroutines to complete
|
// joinSubstreams waits for all substream goroutines to complete.
|
||||||
func (w *watchGrpcStream) joinSubstreams() {
|
func (w *watchGrpcStream) joinSubstreams() {
|
||||||
for _, ws := range w.substreams {
|
for _, ws := range w.substreams {
|
||||||
<-ws.donec
|
<-ws.donec
|
||||||
@ -761,7 +762,9 @@ func (w *watchGrpcStream) joinSubstreams() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// openWatchClient retries opening a watchclient until retryConnection fails
|
// openWatchClient retries opening a watch client until success or halt.
|
||||||
|
// manually retry in case "ws==nil && err==nil"
|
||||||
|
// TODO: remove FailFast=false
|
||||||
func (w *watchGrpcStream) openWatchClient() (ws pb.Watch_WatchClient, err error) {
|
func (w *watchGrpcStream) openWatchClient() (ws pb.Watch_WatchClient, err error) {
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
@ -782,7 +785,7 @@ func (w *watchGrpcStream) openWatchClient() (ws pb.Watch_WatchClient, err error)
|
|||||||
return ws, nil
|
return ws, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// toPB converts an internal watch request structure to its protobuf messagefunc (wr *watchRequest)
|
// toPB converts an internal watch request structure to its protobuf WatchRequest structure.
|
||||||
func (wr *watchRequest) toPB() *pb.WatchRequest {
|
func (wr *watchRequest) toPB() *pb.WatchRequest {
|
||||||
req := &pb.WatchCreateRequest{
|
req := &pb.WatchCreateRequest{
|
||||||
StartRevision: wr.rev,
|
StartRevision: wr.rev,
|
||||||
@ -795,3 +798,10 @@ func (wr *watchRequest) toPB() *pb.WatchRequest {
|
|||||||
cr := &pb.WatchRequest_CreateRequest{CreateRequest: req}
|
cr := &pb.WatchRequest_CreateRequest{CreateRequest: req}
|
||||||
return &pb.WatchRequest{RequestUnion: cr}
|
return &pb.WatchRequest{RequestUnion: cr}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func streamKeyFromCtx(ctx context.Context) string {
|
||||||
|
if md, ok := metadata.FromOutgoingContext(ctx); ok {
|
||||||
|
return fmt.Sprintf("%+v", md)
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
@ -33,7 +33,11 @@ type yamlConfig struct {
|
|||||||
InsecureSkipTLSVerify bool `json:"insecure-skip-tls-verify"`
|
InsecureSkipTLSVerify bool `json:"insecure-skip-tls-verify"`
|
||||||
Certfile string `json:"cert-file"`
|
Certfile string `json:"cert-file"`
|
||||||
Keyfile string `json:"key-file"`
|
Keyfile string `json:"key-file"`
|
||||||
CAfile string `json:"ca-file"`
|
TrustedCAfile string `json:"trusted-ca-file"`
|
||||||
|
|
||||||
|
// CAfile is being deprecated. Use 'TrustedCAfile' instead.
|
||||||
|
// TODO: deprecate this in v4
|
||||||
|
CAfile string `json:"ca-file"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewConfig creates a new clientv3.Config from a yaml file.
|
// NewConfig creates a new clientv3.Config from a yaml file.
|
||||||
@ -66,8 +70,11 @@ func NewConfig(fpath string) (*clientv3.Config, error) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if yc.CAfile != "" {
|
if yc.CAfile != "" && yc.TrustedCAfile == "" {
|
||||||
cp, err = tlsutil.NewCertPool([]string{yc.CAfile})
|
yc.TrustedCAfile = yc.CAfile
|
||||||
|
}
|
||||||
|
if yc.TrustedCAfile != "" {
|
||||||
|
cp, err = tlsutil.NewCertPool([]string{yc.TrustedCAfile})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -50,7 +50,7 @@ func TestConfigFromFile(t *testing.T) {
|
|||||||
&yamlConfig{
|
&yamlConfig{
|
||||||
Keyfile: privateKeyPath,
|
Keyfile: privateKeyPath,
|
||||||
Certfile: certPath,
|
Certfile: certPath,
|
||||||
CAfile: caPath,
|
TrustedCAfile: caPath,
|
||||||
InsecureSkipTLSVerify: true,
|
InsecureSkipTLSVerify: true,
|
||||||
},
|
},
|
||||||
false,
|
false,
|
||||||
@ -64,9 +64,9 @@ func TestConfigFromFile(t *testing.T) {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
&yamlConfig{
|
&yamlConfig{
|
||||||
Keyfile: privateKeyPath,
|
Keyfile: privateKeyPath,
|
||||||
Certfile: certPath,
|
Certfile: certPath,
|
||||||
CAfile: "bad",
|
TrustedCAfile: "bad",
|
||||||
},
|
},
|
||||||
true,
|
true,
|
||||||
},
|
},
|
||||||
@ -113,7 +113,7 @@ func TestConfigFromFile(t *testing.T) {
|
|||||||
if tt.ym.Certfile != "" && len(cfg.TLS.Certificates) == 0 {
|
if tt.ym.Certfile != "" && len(cfg.TLS.Certificates) == 0 {
|
||||||
t.Errorf("#%d: failed to load in cert", i)
|
t.Errorf("#%d: failed to load in cert", i)
|
||||||
}
|
}
|
||||||
if tt.ym.CAfile != "" && cfg.TLS.RootCAs == nil {
|
if tt.ym.TrustedCAfile != "" && cfg.TLS.RootCAs == nil {
|
||||||
t.Errorf("#%d: failed to load in ca cert", i)
|
t.Errorf("#%d: failed to load in ca cert", i)
|
||||||
}
|
}
|
||||||
if cfg.TLS.InsecureSkipVerify != tt.ym.InsecureSkipTLSVerify {
|
if cfg.TLS.InsecureSkipVerify != tt.ym.InsecureSkipTLSVerify {
|
||||||
|
@ -5,3 +5,6 @@ const maxMapSize = 0x7FFFFFFF // 2GB
|
|||||||
|
|
||||||
// maxAllocSize is the size used when creating array pointers.
|
// maxAllocSize is the size used when creating array pointers.
|
||||||
const maxAllocSize = 0xFFFFFFF
|
const maxAllocSize = 0xFFFFFFF
|
||||||
|
|
||||||
|
// Are unaligned load/stores broken on this arch?
|
||||||
|
var brokenUnaligned = false
|
@ -5,3 +5,6 @@ const maxMapSize = 0xFFFFFFFFFFFF // 256TB
|
|||||||
|
|
||||||
// maxAllocSize is the size used when creating array pointers.
|
// maxAllocSize is the size used when creating array pointers.
|
||||||
const maxAllocSize = 0x7FFFFFFF
|
const maxAllocSize = 0x7FFFFFFF
|
||||||
|
|
||||||
|
// Are unaligned load/stores broken on this arch?
|
||||||
|
var brokenUnaligned = false
|
28
cmd/vendor/github.com/coreos/bbolt/bolt_arm.go
generated
vendored
Normal file
28
cmd/vendor/github.com/coreos/bbolt/bolt_arm.go
generated
vendored
Normal file
@ -0,0 +1,28 @@
|
|||||||
|
package bolt
|
||||||
|
|
||||||
|
import "unsafe"
|
||||||
|
|
||||||
|
// maxMapSize represents the largest mmap size supported by Bolt.
|
||||||
|
const maxMapSize = 0x7FFFFFFF // 2GB
|
||||||
|
|
||||||
|
// maxAllocSize is the size used when creating array pointers.
|
||||||
|
const maxAllocSize = 0xFFFFFFF
|
||||||
|
|
||||||
|
// Are unaligned load/stores broken on this arch?
|
||||||
|
var brokenUnaligned bool
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
// Simple check to see whether this arch handles unaligned load/stores
|
||||||
|
// correctly.
|
||||||
|
|
||||||
|
// ARM9 and older devices require load/stores to be from/to aligned
|
||||||
|
// addresses. If not, the lower 2 bits are cleared and that address is
|
||||||
|
// read in a jumbled up order.
|
||||||
|
|
||||||
|
// See http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.faqs/ka15414.html
|
||||||
|
|
||||||
|
raw := [6]byte{0xfe, 0xef, 0x11, 0x22, 0x22, 0x11}
|
||||||
|
val := *(*uint32)(unsafe.Pointer(uintptr(unsafe.Pointer(&raw)) + 2))
|
||||||
|
|
||||||
|
brokenUnaligned = val != 0x11222211
|
||||||
|
}
|
@ -7,3 +7,6 @@ const maxMapSize = 0xFFFFFFFFFFFF // 256TB
|
|||||||
|
|
||||||
// maxAllocSize is the size used when creating array pointers.
|
// maxAllocSize is the size used when creating array pointers.
|
||||||
const maxAllocSize = 0x7FFFFFFF
|
const maxAllocSize = 0x7FFFFFFF
|
||||||
|
|
||||||
|
// Are unaligned load/stores broken on this arch?
|
||||||
|
var brokenUnaligned = false
|
12
cmd/vendor/github.com/coreos/bbolt/bolt_mips64x.go
generated
vendored
Normal file
12
cmd/vendor/github.com/coreos/bbolt/bolt_mips64x.go
generated
vendored
Normal file
@ -0,0 +1,12 @@
|
|||||||
|
// +build mips64 mips64le
|
||||||
|
|
||||||
|
package bolt
|
||||||
|
|
||||||
|
// maxMapSize represents the largest mmap size supported by Bolt.
|
||||||
|
const maxMapSize = 0x8000000000 // 512GB
|
||||||
|
|
||||||
|
// maxAllocSize is the size used when creating array pointers.
|
||||||
|
const maxAllocSize = 0x7FFFFFFF
|
||||||
|
|
||||||
|
// Are unaligned load/stores broken on this arch?
|
||||||
|
var brokenUnaligned = false
|
@ -1,7 +1,12 @@
|
|||||||
|
// +build mips mipsle
|
||||||
|
|
||||||
package bolt
|
package bolt
|
||||||
|
|
||||||
// maxMapSize represents the largest mmap size supported by Bolt.
|
// maxMapSize represents the largest mmap size supported by Bolt.
|
||||||
const maxMapSize = 0x7FFFFFFF // 2GB
|
const maxMapSize = 0x40000000 // 1GB
|
||||||
|
|
||||||
// maxAllocSize is the size used when creating array pointers.
|
// maxAllocSize is the size used when creating array pointers.
|
||||||
const maxAllocSize = 0xFFFFFFF
|
const maxAllocSize = 0xFFFFFFF
|
||||||
|
|
||||||
|
// Are unaligned load/stores broken on this arch?
|
||||||
|
var brokenUnaligned = false
|
@ -7,3 +7,6 @@ const maxMapSize = 0xFFFFFFFFFFFF // 256TB
|
|||||||
|
|
||||||
// maxAllocSize is the size used when creating array pointers.
|
// maxAllocSize is the size used when creating array pointers.
|
||||||
const maxAllocSize = 0x7FFFFFFF
|
const maxAllocSize = 0x7FFFFFFF
|
||||||
|
|
||||||
|
// Are unaligned load/stores broken on this arch?
|
||||||
|
var brokenUnaligned = false
|
@ -7,3 +7,6 @@ const maxMapSize = 0xFFFFFFFFFFFF // 256TB
|
|||||||
|
|
||||||
// maxAllocSize is the size used when creating array pointers.
|
// maxAllocSize is the size used when creating array pointers.
|
||||||
const maxAllocSize = 0x7FFFFFFF
|
const maxAllocSize = 0x7FFFFFFF
|
||||||
|
|
||||||
|
// Are unaligned load/stores broken on this arch?
|
||||||
|
var brokenUnaligned = false
|
@ -7,3 +7,6 @@ const maxMapSize = 0xFFFFFFFFFFFF // 256TB
|
|||||||
|
|
||||||
// maxAllocSize is the size used when creating array pointers.
|
// maxAllocSize is the size used when creating array pointers.
|
||||||
const maxAllocSize = 0x7FFFFFFF
|
const maxAllocSize = 0x7FFFFFFF
|
||||||
|
|
||||||
|
// Are unaligned load/stores broken on this arch?
|
||||||
|
var brokenUnaligned = false
|
@ -13,29 +13,32 @@ import (
|
|||||||
// flock acquires an advisory lock on a file descriptor.
|
// flock acquires an advisory lock on a file descriptor.
|
||||||
func flock(db *DB, mode os.FileMode, exclusive bool, timeout time.Duration) error {
|
func flock(db *DB, mode os.FileMode, exclusive bool, timeout time.Duration) error {
|
||||||
var t time.Time
|
var t time.Time
|
||||||
|
if timeout != 0 {
|
||||||
|
t = time.Now()
|
||||||
|
}
|
||||||
|
fd := db.file.Fd()
|
||||||
|
flag := syscall.LOCK_NB
|
||||||
|
if exclusive {
|
||||||
|
flag |= syscall.LOCK_EX
|
||||||
|
} else {
|
||||||
|
flag |= syscall.LOCK_SH
|
||||||
|
}
|
||||||
for {
|
for {
|
||||||
// If we're beyond our timeout then return an error.
|
// Attempt to obtain an exclusive lock.
|
||||||
// This can only occur after we've attempted a flock once.
|
err := syscall.Flock(int(fd), flag)
|
||||||
if t.IsZero() {
|
|
||||||
t = time.Now()
|
|
||||||
} else if timeout > 0 && time.Since(t) > timeout {
|
|
||||||
return ErrTimeout
|
|
||||||
}
|
|
||||||
flag := syscall.LOCK_SH
|
|
||||||
if exclusive {
|
|
||||||
flag = syscall.LOCK_EX
|
|
||||||
}
|
|
||||||
|
|
||||||
// Otherwise attempt to obtain an exclusive lock.
|
|
||||||
err := syscall.Flock(int(db.file.Fd()), flag|syscall.LOCK_NB)
|
|
||||||
if err == nil {
|
if err == nil {
|
||||||
return nil
|
return nil
|
||||||
} else if err != syscall.EWOULDBLOCK {
|
} else if err != syscall.EWOULDBLOCK {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// If we timed out then return an error.
|
||||||
|
if timeout != 0 && time.Since(t) > timeout-flockRetryTimeout {
|
||||||
|
return ErrTimeout
|
||||||
|
}
|
||||||
|
|
||||||
// Wait for a bit and try again.
|
// Wait for a bit and try again.
|
||||||
time.Sleep(50 * time.Millisecond)
|
time.Sleep(flockRetryTimeout)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -13,34 +13,33 @@ import (
|
|||||||
// flock acquires an advisory lock on a file descriptor.
|
// flock acquires an advisory lock on a file descriptor.
|
||||||
func flock(db *DB, mode os.FileMode, exclusive bool, timeout time.Duration) error {
|
func flock(db *DB, mode os.FileMode, exclusive bool, timeout time.Duration) error {
|
||||||
var t time.Time
|
var t time.Time
|
||||||
|
if timeout != 0 {
|
||||||
|
t = time.Now()
|
||||||
|
}
|
||||||
|
fd := db.file.Fd()
|
||||||
|
var lockType int16
|
||||||
|
if exclusive {
|
||||||
|
lockType = syscall.F_WRLCK
|
||||||
|
} else {
|
||||||
|
lockType = syscall.F_RDLCK
|
||||||
|
}
|
||||||
for {
|
for {
|
||||||
// If we're beyond our timeout then return an error.
|
// Attempt to obtain an exclusive lock.
|
||||||
// This can only occur after we've attempted a flock once.
|
lock := syscall.Flock_t{Type: lockType}
|
||||||
if t.IsZero() {
|
err := syscall.FcntlFlock(fd, syscall.F_SETLK, &lock)
|
||||||
t = time.Now()
|
|
||||||
} else if timeout > 0 && time.Since(t) > timeout {
|
|
||||||
return ErrTimeout
|
|
||||||
}
|
|
||||||
var lock syscall.Flock_t
|
|
||||||
lock.Start = 0
|
|
||||||
lock.Len = 0
|
|
||||||
lock.Pid = 0
|
|
||||||
lock.Whence = 0
|
|
||||||
lock.Pid = 0
|
|
||||||
if exclusive {
|
|
||||||
lock.Type = syscall.F_WRLCK
|
|
||||||
} else {
|
|
||||||
lock.Type = syscall.F_RDLCK
|
|
||||||
}
|
|
||||||
err := syscall.FcntlFlock(db.file.Fd(), syscall.F_SETLK, &lock)
|
|
||||||
if err == nil {
|
if err == nil {
|
||||||
return nil
|
return nil
|
||||||
} else if err != syscall.EAGAIN {
|
} else if err != syscall.EAGAIN {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// If we timed out then return an error.
|
||||||
|
if timeout != 0 && time.Since(t) > timeout-flockRetryTimeout {
|
||||||
|
return ErrTimeout
|
||||||
|
}
|
||||||
|
|
||||||
// Wait for a bit and try again.
|
// Wait for a bit and try again.
|
||||||
time.Sleep(50 * time.Millisecond)
|
time.Sleep(flockRetryTimeout)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -59,29 +59,30 @@ func flock(db *DB, mode os.FileMode, exclusive bool, timeout time.Duration) erro
|
|||||||
db.lockfile = f
|
db.lockfile = f
|
||||||
|
|
||||||
var t time.Time
|
var t time.Time
|
||||||
|
if timeout != 0 {
|
||||||
|
t = time.Now()
|
||||||
|
}
|
||||||
|
fd := f.Fd()
|
||||||
|
var flag uint32 = flagLockFailImmediately
|
||||||
|
if exclusive {
|
||||||
|
flag |= flagLockExclusive
|
||||||
|
}
|
||||||
for {
|
for {
|
||||||
// If we're beyond our timeout then return an error.
|
// Attempt to obtain an exclusive lock.
|
||||||
// This can only occur after we've attempted a flock once.
|
err := lockFileEx(syscall.Handle(fd), flag, 0, 1, 0, &syscall.Overlapped{})
|
||||||
if t.IsZero() {
|
|
||||||
t = time.Now()
|
|
||||||
} else if timeout > 0 && time.Since(t) > timeout {
|
|
||||||
return ErrTimeout
|
|
||||||
}
|
|
||||||
|
|
||||||
var flag uint32 = flagLockFailImmediately
|
|
||||||
if exclusive {
|
|
||||||
flag |= flagLockExclusive
|
|
||||||
}
|
|
||||||
|
|
||||||
err := lockFileEx(syscall.Handle(db.lockfile.Fd()), flag, 0, 1, 0, &syscall.Overlapped{})
|
|
||||||
if err == nil {
|
if err == nil {
|
||||||
return nil
|
return nil
|
||||||
} else if err != errLockViolation {
|
} else if err != errLockViolation {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// If we timed oumercit then return an error.
|
||||||
|
if timeout != 0 && time.Since(t) > timeout-flockRetryTimeout {
|
||||||
|
return ErrTimeout
|
||||||
|
}
|
||||||
|
|
||||||
// Wait for a bit and try again.
|
// Wait for a bit and try again.
|
||||||
time.Sleep(50 * time.Millisecond)
|
time.Sleep(flockRetryTimeout)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -89,7 +90,7 @@ func flock(db *DB, mode os.FileMode, exclusive bool, timeout time.Duration) erro
|
|||||||
func funlock(db *DB) error {
|
func funlock(db *DB) error {
|
||||||
err := unlockFileEx(syscall.Handle(db.lockfile.Fd()), 0, 1, 0, &syscall.Overlapped{})
|
err := unlockFileEx(syscall.Handle(db.lockfile.Fd()), 0, 1, 0, &syscall.Overlapped{})
|
||||||
db.lockfile.Close()
|
db.lockfile.Close()
|
||||||
os.Remove(db.path+lockExt)
|
os.Remove(db.path + lockExt)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
@ -14,13 +14,6 @@ const (
|
|||||||
MaxValueSize = (1 << 31) - 2
|
MaxValueSize = (1 << 31) - 2
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
|
||||||
maxUint = ^uint(0)
|
|
||||||
minUint = 0
|
|
||||||
maxInt = int(^uint(0) >> 1)
|
|
||||||
minInt = -maxInt - 1
|
|
||||||
)
|
|
||||||
|
|
||||||
const bucketHeaderSize = int(unsafe.Sizeof(bucket{}))
|
const bucketHeaderSize = int(unsafe.Sizeof(bucket{}))
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@ -130,9 +123,17 @@ func (b *Bucket) Bucket(name []byte) *Bucket {
|
|||||||
func (b *Bucket) openBucket(value []byte) *Bucket {
|
func (b *Bucket) openBucket(value []byte) *Bucket {
|
||||||
var child = newBucket(b.tx)
|
var child = newBucket(b.tx)
|
||||||
|
|
||||||
|
// If unaligned load/stores are broken on this arch and value is
|
||||||
|
// unaligned simply clone to an aligned byte array.
|
||||||
|
unaligned := brokenUnaligned && uintptr(unsafe.Pointer(&value[0]))&3 != 0
|
||||||
|
|
||||||
|
if unaligned {
|
||||||
|
value = cloneBytes(value)
|
||||||
|
}
|
||||||
|
|
||||||
// If this is a writable transaction then we need to copy the bucket entry.
|
// If this is a writable transaction then we need to copy the bucket entry.
|
||||||
// Read-only transactions can point directly at the mmap entry.
|
// Read-only transactions can point directly at the mmap entry.
|
||||||
if b.tx.writable {
|
if b.tx.writable && !unaligned {
|
||||||
child.bucket = &bucket{}
|
child.bucket = &bucket{}
|
||||||
*child.bucket = *(*bucket)(unsafe.Pointer(&value[0]))
|
*child.bucket = *(*bucket)(unsafe.Pointer(&value[0]))
|
||||||
} else {
|
} else {
|
||||||
@ -167,9 +168,8 @@ func (b *Bucket) CreateBucket(key []byte) (*Bucket, error) {
|
|||||||
if bytes.Equal(key, k) {
|
if bytes.Equal(key, k) {
|
||||||
if (flags & bucketLeafFlag) != 0 {
|
if (flags & bucketLeafFlag) != 0 {
|
||||||
return nil, ErrBucketExists
|
return nil, ErrBucketExists
|
||||||
} else {
|
|
||||||
return nil, ErrIncompatibleValue
|
|
||||||
}
|
}
|
||||||
|
return nil, ErrIncompatibleValue
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create empty, inline bucket.
|
// Create empty, inline bucket.
|
||||||
@ -316,7 +316,12 @@ func (b *Bucket) Delete(key []byte) error {
|
|||||||
|
|
||||||
// Move cursor to correct position.
|
// Move cursor to correct position.
|
||||||
c := b.Cursor()
|
c := b.Cursor()
|
||||||
_, _, flags := c.seek(key)
|
k, _, flags := c.seek(key)
|
||||||
|
|
||||||
|
// Return nil if the key doesn't exist.
|
||||||
|
if !bytes.Equal(key, k) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// Return an error if there is already existing bucket value.
|
// Return an error if there is already existing bucket value.
|
||||||
if (flags & bucketLeafFlag) != 0 {
|
if (flags & bucketLeafFlag) != 0 {
|
||||||
@ -329,6 +334,28 @@ func (b *Bucket) Delete(key []byte) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Sequence returns the current integer for the bucket without incrementing it.
|
||||||
|
func (b *Bucket) Sequence() uint64 { return b.bucket.sequence }
|
||||||
|
|
||||||
|
// SetSequence updates the sequence number for the bucket.
|
||||||
|
func (b *Bucket) SetSequence(v uint64) error {
|
||||||
|
if b.tx.db == nil {
|
||||||
|
return ErrTxClosed
|
||||||
|
} else if !b.Writable() {
|
||||||
|
return ErrTxNotWritable
|
||||||
|
}
|
||||||
|
|
||||||
|
// Materialize the root node if it hasn't been already so that the
|
||||||
|
// bucket will be saved during commit.
|
||||||
|
if b.rootNode == nil {
|
||||||
|
_ = b.node(b.root, nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Increment and return the sequence.
|
||||||
|
b.bucket.sequence = v
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// NextSequence returns an autoincrementing integer for the bucket.
|
// NextSequence returns an autoincrementing integer for the bucket.
|
||||||
func (b *Bucket) NextSequence() (uint64, error) {
|
func (b *Bucket) NextSequence() (uint64, error) {
|
||||||
if b.tx.db == nil {
|
if b.tx.db == nil {
|
207
cmd/vendor/github.com/boltdb/bolt/db.go → cmd/vendor/github.com/coreos/bbolt/db.go
generated
vendored
207
cmd/vendor/github.com/boltdb/bolt/db.go → cmd/vendor/github.com/coreos/bbolt/db.go
generated
vendored
@ -7,8 +7,7 @@ import (
|
|||||||
"log"
|
"log"
|
||||||
"os"
|
"os"
|
||||||
"runtime"
|
"runtime"
|
||||||
"runtime/debug"
|
"sort"
|
||||||
"strings"
|
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
"unsafe"
|
"unsafe"
|
||||||
@ -23,6 +22,8 @@ const version = 2
|
|||||||
// Represents a marker value to indicate that a file is a Bolt DB.
|
// Represents a marker value to indicate that a file is a Bolt DB.
|
||||||
const magic uint32 = 0xED0CDAED
|
const magic uint32 = 0xED0CDAED
|
||||||
|
|
||||||
|
const pgidNoFreelist pgid = 0xffffffffffffffff
|
||||||
|
|
||||||
// IgnoreNoSync specifies whether the NoSync field of a DB is ignored when
|
// IgnoreNoSync specifies whether the NoSync field of a DB is ignored when
|
||||||
// syncing changes to a file. This is required as some operating systems,
|
// syncing changes to a file. This is required as some operating systems,
|
||||||
// such as OpenBSD, do not have a unified buffer cache (UBC) and writes
|
// such as OpenBSD, do not have a unified buffer cache (UBC) and writes
|
||||||
@ -39,6 +40,9 @@ const (
|
|||||||
// default page size for db is set to the OS page size.
|
// default page size for db is set to the OS page size.
|
||||||
var defaultPageSize = os.Getpagesize()
|
var defaultPageSize = os.Getpagesize()
|
||||||
|
|
||||||
|
// The time elapsed between consecutive file locking attempts.
|
||||||
|
const flockRetryTimeout = 50 * time.Millisecond
|
||||||
|
|
||||||
// DB represents a collection of buckets persisted to a file on disk.
|
// DB represents a collection of buckets persisted to a file on disk.
|
||||||
// All data access is performed through transactions which can be obtained through the DB.
|
// All data access is performed through transactions which can be obtained through the DB.
|
||||||
// All the functions on DB will return a ErrDatabaseNotOpen if accessed before Open() is called.
|
// All the functions on DB will return a ErrDatabaseNotOpen if accessed before Open() is called.
|
||||||
@ -61,6 +65,11 @@ type DB struct {
|
|||||||
// THIS IS UNSAFE. PLEASE USE WITH CAUTION.
|
// THIS IS UNSAFE. PLEASE USE WITH CAUTION.
|
||||||
NoSync bool
|
NoSync bool
|
||||||
|
|
||||||
|
// When true, skips syncing freelist to disk. This improves the database
|
||||||
|
// write performance under normal operation, but requires a full database
|
||||||
|
// re-sync during recovery.
|
||||||
|
NoFreelistSync bool
|
||||||
|
|
||||||
// When true, skips the truncate call when growing the database.
|
// When true, skips the truncate call when growing the database.
|
||||||
// Setting this to true is only safe on non-ext3/ext4 systems.
|
// Setting this to true is only safe on non-ext3/ext4 systems.
|
||||||
// Skipping truncation avoids preallocation of hard drive space and
|
// Skipping truncation avoids preallocation of hard drive space and
|
||||||
@ -107,9 +116,11 @@ type DB struct {
|
|||||||
opened bool
|
opened bool
|
||||||
rwtx *Tx
|
rwtx *Tx
|
||||||
txs []*Tx
|
txs []*Tx
|
||||||
freelist *freelist
|
|
||||||
stats Stats
|
stats Stats
|
||||||
|
|
||||||
|
freelist *freelist
|
||||||
|
freelistLoad sync.Once
|
||||||
|
|
||||||
pagePool sync.Pool
|
pagePool sync.Pool
|
||||||
|
|
||||||
batchMu sync.Mutex
|
batchMu sync.Mutex
|
||||||
@ -148,14 +159,17 @@ func (db *DB) String() string {
|
|||||||
// If the file does not exist then it will be created automatically.
|
// If the file does not exist then it will be created automatically.
|
||||||
// Passing in nil options will cause Bolt to open the database with the default options.
|
// Passing in nil options will cause Bolt to open the database with the default options.
|
||||||
func Open(path string, mode os.FileMode, options *Options) (*DB, error) {
|
func Open(path string, mode os.FileMode, options *Options) (*DB, error) {
|
||||||
var db = &DB{opened: true}
|
db := &DB{
|
||||||
|
opened: true,
|
||||||
|
}
|
||||||
// Set default options if no options are provided.
|
// Set default options if no options are provided.
|
||||||
if options == nil {
|
if options == nil {
|
||||||
options = DefaultOptions
|
options = DefaultOptions
|
||||||
}
|
}
|
||||||
|
db.NoSync = options.NoSync
|
||||||
db.NoGrowSync = options.NoGrowSync
|
db.NoGrowSync = options.NoGrowSync
|
||||||
db.MmapFlags = options.MmapFlags
|
db.MmapFlags = options.MmapFlags
|
||||||
|
db.NoFreelistSync = options.NoFreelistSync
|
||||||
|
|
||||||
// Set default values for later DB operations.
|
// Set default values for later DB operations.
|
||||||
db.MaxBatchSize = DefaultMaxBatchSize
|
db.MaxBatchSize = DefaultMaxBatchSize
|
||||||
@ -184,6 +198,7 @@ func Open(path string, mode os.FileMode, options *Options) (*DB, error) {
|
|||||||
// The database file is locked using the shared lock (more than one process may
|
// The database file is locked using the shared lock (more than one process may
|
||||||
// hold a lock at the same time) otherwise (options.ReadOnly is set).
|
// hold a lock at the same time) otherwise (options.ReadOnly is set).
|
||||||
if err := flock(db, mode, !db.readOnly, options.Timeout); err != nil {
|
if err := flock(db, mode, !db.readOnly, options.Timeout); err != nil {
|
||||||
|
db.lockfile = nil // make 'unused' happy. TODO: rework locks
|
||||||
_ = db.close()
|
_ = db.close()
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -191,6 +206,11 @@ func Open(path string, mode os.FileMode, options *Options) (*DB, error) {
|
|||||||
// Default values for test hooks
|
// Default values for test hooks
|
||||||
db.ops.writeAt = db.file.WriteAt
|
db.ops.writeAt = db.file.WriteAt
|
||||||
|
|
||||||
|
if db.pageSize = options.PageSize; db.pageSize == 0 {
|
||||||
|
// Set the default page size to the OS page size.
|
||||||
|
db.pageSize = defaultPageSize
|
||||||
|
}
|
||||||
|
|
||||||
// Initialize the database if it doesn't exist.
|
// Initialize the database if it doesn't exist.
|
||||||
if info, err := db.file.Stat(); err != nil {
|
if info, err := db.file.Stat(); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -202,20 +222,21 @@ func Open(path string, mode os.FileMode, options *Options) (*DB, error) {
|
|||||||
} else {
|
} else {
|
||||||
// Read the first meta page to determine the page size.
|
// Read the first meta page to determine the page size.
|
||||||
var buf [0x1000]byte
|
var buf [0x1000]byte
|
||||||
if _, err := db.file.ReadAt(buf[:], 0); err == nil {
|
// If we can't read the page size, but can read a page, assume
|
||||||
m := db.pageInBuffer(buf[:], 0).meta()
|
// it's the same as the OS or one given -- since that's how the
|
||||||
if err := m.validate(); err != nil {
|
// page size was chosen in the first place.
|
||||||
// If we can't read the page size, we can assume it's the same
|
//
|
||||||
// as the OS -- since that's how the page size was chosen in the
|
// If the first page is invalid and this OS uses a different
|
||||||
// first place.
|
// page size than what the database was created with then we
|
||||||
//
|
// are out of luck and cannot access the database.
|
||||||
// If the first page is invalid and this OS uses a different
|
//
|
||||||
// page size than what the database was created with then we
|
// TODO: scan for next page
|
||||||
// are out of luck and cannot access the database.
|
if bw, err := db.file.ReadAt(buf[:], 0); err == nil && bw == len(buf) {
|
||||||
db.pageSize = os.Getpagesize()
|
if m := db.pageInBuffer(buf[:], 0).meta(); m.validate() == nil {
|
||||||
} else {
|
|
||||||
db.pageSize = int(m.pageSize)
|
db.pageSize = int(m.pageSize)
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
return nil, ErrInvalid
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -232,14 +253,50 @@ func Open(path string, mode os.FileMode, options *Options) (*DB, error) {
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Read in the freelist.
|
if db.readOnly {
|
||||||
db.freelist = newFreelist()
|
return db, nil
|
||||||
db.freelist.read(db.page(db.meta().freelist))
|
}
|
||||||
|
|
||||||
|
db.loadFreelist()
|
||||||
|
|
||||||
|
// Flush freelist when transitioning from no sync to sync so
|
||||||
|
// NoFreelistSync unaware boltdb can open the db later.
|
||||||
|
if !db.NoFreelistSync && !db.hasSyncedFreelist() {
|
||||||
|
tx, err := db.Begin(true)
|
||||||
|
if tx != nil {
|
||||||
|
err = tx.Commit()
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
_ = db.close()
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Mark the database as opened and return.
|
// Mark the database as opened and return.
|
||||||
return db, nil
|
return db, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// loadFreelist reads the freelist if it is synced, or reconstructs it
|
||||||
|
// by scanning the DB if it is not synced. It assumes there are no
|
||||||
|
// concurrent accesses being made to the freelist.
|
||||||
|
func (db *DB) loadFreelist() {
|
||||||
|
db.freelistLoad.Do(func() {
|
||||||
|
db.freelist = newFreelist()
|
||||||
|
if !db.hasSyncedFreelist() {
|
||||||
|
// Reconstruct free list by scanning the DB.
|
||||||
|
db.freelist.readIDs(db.freepages())
|
||||||
|
} else {
|
||||||
|
// Read free list from freelist page.
|
||||||
|
db.freelist.read(db.page(db.meta().freelist))
|
||||||
|
}
|
||||||
|
db.stats.FreePageN = len(db.freelist.ids)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (db *DB) hasSyncedFreelist() bool {
|
||||||
|
return db.meta().freelist != pgidNoFreelist
|
||||||
|
}
|
||||||
|
|
||||||
// mmap opens the underlying memory-mapped file and initializes the meta references.
|
// mmap opens the underlying memory-mapped file and initializes the meta references.
|
||||||
// minsz is the minimum size that the new mmap can be.
|
// minsz is the minimum size that the new mmap can be.
|
||||||
func (db *DB) mmap(minsz int) error {
|
func (db *DB) mmap(minsz int) error {
|
||||||
@ -341,9 +398,6 @@ func (db *DB) mmapSize(size int) (int, error) {
|
|||||||
|
|
||||||
// init creates a new database file and initializes its meta pages.
|
// init creates a new database file and initializes its meta pages.
|
||||||
func (db *DB) init() error {
|
func (db *DB) init() error {
|
||||||
// Set the page size to the OS page size.
|
|
||||||
db.pageSize = os.Getpagesize()
|
|
||||||
|
|
||||||
// Create two meta pages on a buffer.
|
// Create two meta pages on a buffer.
|
||||||
buf := make([]byte, db.pageSize*4)
|
buf := make([]byte, db.pageSize*4)
|
||||||
for i := 0; i < 2; i++ {
|
for i := 0; i < 2; i++ {
|
||||||
@ -526,21 +580,36 @@ func (db *DB) beginRWTx() (*Tx, error) {
|
|||||||
t := &Tx{writable: true}
|
t := &Tx{writable: true}
|
||||||
t.init(db)
|
t.init(db)
|
||||||
db.rwtx = t
|
db.rwtx = t
|
||||||
|
db.freePages()
|
||||||
|
return t, nil
|
||||||
|
}
|
||||||
|
|
||||||
// Free any pages associated with closed read-only transactions.
|
// freePages releases any pages associated with closed read-only transactions.
|
||||||
var minid txid = 0xFFFFFFFFFFFFFFFF
|
func (db *DB) freePages() {
|
||||||
for _, t := range db.txs {
|
// Free all pending pages prior to earliest open transaction.
|
||||||
if t.meta.txid < minid {
|
sort.Sort(txsById(db.txs))
|
||||||
minid = t.meta.txid
|
minid := txid(0xFFFFFFFFFFFFFFFF)
|
||||||
}
|
if len(db.txs) > 0 {
|
||||||
|
minid = db.txs[0].meta.txid
|
||||||
}
|
}
|
||||||
if minid > 0 {
|
if minid > 0 {
|
||||||
db.freelist.release(minid - 1)
|
db.freelist.release(minid - 1)
|
||||||
}
|
}
|
||||||
|
// Release unused txid extents.
|
||||||
return t, nil
|
for _, t := range db.txs {
|
||||||
|
db.freelist.releaseRange(minid, t.meta.txid-1)
|
||||||
|
minid = t.meta.txid + 1
|
||||||
|
}
|
||||||
|
db.freelist.releaseRange(minid, txid(0xFFFFFFFFFFFFFFFF))
|
||||||
|
// Any page both allocated and freed in an extent is safe to release.
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type txsById []*Tx
|
||||||
|
|
||||||
|
func (t txsById) Len() int { return len(t) }
|
||||||
|
func (t txsById) Swap(i, j int) { t[i], t[j] = t[j], t[i] }
|
||||||
|
func (t txsById) Less(i, j int) bool { return t[i].meta.txid < t[j].meta.txid }
|
||||||
|
|
||||||
// removeTx removes a transaction from the database.
|
// removeTx removes a transaction from the database.
|
||||||
func (db *DB) removeTx(tx *Tx) {
|
func (db *DB) removeTx(tx *Tx) {
|
||||||
// Release the read lock on the mmap.
|
// Release the read lock on the mmap.
|
||||||
@ -552,7 +621,10 @@ func (db *DB) removeTx(tx *Tx) {
|
|||||||
// Remove the transaction.
|
// Remove the transaction.
|
||||||
for i, t := range db.txs {
|
for i, t := range db.txs {
|
||||||
if t == tx {
|
if t == tx {
|
||||||
db.txs = append(db.txs[:i], db.txs[i+1:]...)
|
last := len(db.txs) - 1
|
||||||
|
db.txs[i] = db.txs[last]
|
||||||
|
db.txs[last] = nil
|
||||||
|
db.txs = db.txs[:last]
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -630,11 +702,7 @@ func (db *DB) View(fn func(*Tx) error) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := t.Rollback(); err != nil {
|
return t.Rollback()
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Batch calls fn as part of a batch. It behaves similar to Update,
|
// Batch calls fn as part of a batch. It behaves similar to Update,
|
||||||
@ -823,7 +891,7 @@ func (db *DB) meta() *meta {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// allocate returns a contiguous block of memory starting at a given page.
|
// allocate returns a contiguous block of memory starting at a given page.
|
||||||
func (db *DB) allocate(count int) (*page, error) {
|
func (db *DB) allocate(txid txid, count int) (*page, error) {
|
||||||
// Allocate a temporary buffer for the page.
|
// Allocate a temporary buffer for the page.
|
||||||
var buf []byte
|
var buf []byte
|
||||||
if count == 1 {
|
if count == 1 {
|
||||||
@ -835,7 +903,7 @@ func (db *DB) allocate(count int) (*page, error) {
|
|||||||
p.overflow = uint32(count - 1)
|
p.overflow = uint32(count - 1)
|
||||||
|
|
||||||
// Use pages from the freelist if they are available.
|
// Use pages from the freelist if they are available.
|
||||||
if p.id = db.freelist.allocate(count); p.id != 0 {
|
if p.id = db.freelist.allocate(txid, count); p.id != 0 {
|
||||||
return p, nil
|
return p, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -890,6 +958,38 @@ func (db *DB) IsReadOnly() bool {
|
|||||||
return db.readOnly
|
return db.readOnly
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (db *DB) freepages() []pgid {
|
||||||
|
tx, err := db.beginTx()
|
||||||
|
defer func() {
|
||||||
|
err = tx.Rollback()
|
||||||
|
if err != nil {
|
||||||
|
panic("freepages: failed to rollback tx")
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
if err != nil {
|
||||||
|
panic("freepages: failed to open read only tx")
|
||||||
|
}
|
||||||
|
|
||||||
|
reachable := make(map[pgid]*page)
|
||||||
|
nofreed := make(map[pgid]bool)
|
||||||
|
ech := make(chan error)
|
||||||
|
go func() {
|
||||||
|
for e := range ech {
|
||||||
|
panic(fmt.Sprintf("freepages: failed to get all reachable pages (%v)", e))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
tx.checkBucket(&tx.root, reachable, nofreed, ech)
|
||||||
|
close(ech)
|
||||||
|
|
||||||
|
var fids []pgid
|
||||||
|
for i := pgid(2); i < db.meta().pgid; i++ {
|
||||||
|
if _, ok := reachable[i]; !ok {
|
||||||
|
fids = append(fids, i)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return fids
|
||||||
|
}
|
||||||
|
|
||||||
// Options represents the options that can be set when opening a database.
|
// Options represents the options that can be set when opening a database.
|
||||||
type Options struct {
|
type Options struct {
|
||||||
// Timeout is the amount of time to wait to obtain a file lock.
|
// Timeout is the amount of time to wait to obtain a file lock.
|
||||||
@ -900,6 +1000,10 @@ type Options struct {
|
|||||||
// Sets the DB.NoGrowSync flag before memory mapping the file.
|
// Sets the DB.NoGrowSync flag before memory mapping the file.
|
||||||
NoGrowSync bool
|
NoGrowSync bool
|
||||||
|
|
||||||
|
// Do not sync freelist to disk. This improves the database write performance
|
||||||
|
// under normal operation, but requires a full database re-sync during recovery.
|
||||||
|
NoFreelistSync bool
|
||||||
|
|
||||||
// Open database in read-only mode. Uses flock(..., LOCK_SH |LOCK_NB) to
|
// Open database in read-only mode. Uses flock(..., LOCK_SH |LOCK_NB) to
|
||||||
// grab a shared lock (UNIX).
|
// grab a shared lock (UNIX).
|
||||||
ReadOnly bool
|
ReadOnly bool
|
||||||
@ -916,6 +1020,14 @@ type Options struct {
|
|||||||
// If initialMmapSize is smaller than the previous database size,
|
// If initialMmapSize is smaller than the previous database size,
|
||||||
// it takes no effect.
|
// it takes no effect.
|
||||||
InitialMmapSize int
|
InitialMmapSize int
|
||||||
|
|
||||||
|
// PageSize overrides the default OS page size.
|
||||||
|
PageSize int
|
||||||
|
|
||||||
|
// NoSync sets the initial value of DB.NoSync. Normally this can just be
|
||||||
|
// set directly on the DB itself when returned from Open(), but this option
|
||||||
|
// is useful in APIs which expose Options but not the underlying DB.
|
||||||
|
NoSync bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// DefaultOptions represent the options used if nil options are passed into Open().
|
// DefaultOptions represent the options used if nil options are passed into Open().
|
||||||
@ -952,15 +1064,11 @@ func (s *Stats) Sub(other *Stats) Stats {
|
|||||||
diff.PendingPageN = s.PendingPageN
|
diff.PendingPageN = s.PendingPageN
|
||||||
diff.FreeAlloc = s.FreeAlloc
|
diff.FreeAlloc = s.FreeAlloc
|
||||||
diff.FreelistInuse = s.FreelistInuse
|
diff.FreelistInuse = s.FreelistInuse
|
||||||
diff.TxN = other.TxN - s.TxN
|
diff.TxN = s.TxN - other.TxN
|
||||||
diff.TxStats = s.TxStats.Sub(&other.TxStats)
|
diff.TxStats = s.TxStats.Sub(&other.TxStats)
|
||||||
return diff
|
return diff
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Stats) add(other *Stats) {
|
|
||||||
s.TxStats.add(&other.TxStats)
|
|
||||||
}
|
|
||||||
|
|
||||||
type Info struct {
|
type Info struct {
|
||||||
Data uintptr
|
Data uintptr
|
||||||
PageSize int
|
PageSize int
|
||||||
@ -999,7 +1107,8 @@ func (m *meta) copy(dest *meta) {
|
|||||||
func (m *meta) write(p *page) {
|
func (m *meta) write(p *page) {
|
||||||
if m.root.root >= m.pgid {
|
if m.root.root >= m.pgid {
|
||||||
panic(fmt.Sprintf("root bucket pgid (%d) above high water mark (%d)", m.root.root, m.pgid))
|
panic(fmt.Sprintf("root bucket pgid (%d) above high water mark (%d)", m.root.root, m.pgid))
|
||||||
} else if m.freelist >= m.pgid {
|
} else if m.freelist >= m.pgid && m.freelist != pgidNoFreelist {
|
||||||
|
// TODO: reject pgidNoFreeList if !NoFreelistSync
|
||||||
panic(fmt.Sprintf("freelist pgid (%d) above high water mark (%d)", m.freelist, m.pgid))
|
panic(fmt.Sprintf("freelist pgid (%d) above high water mark (%d)", m.freelist, m.pgid))
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1026,11 +1135,3 @@ func _assert(condition bool, msg string, v ...interface{}) {
|
|||||||
panic(fmt.Sprintf("assertion failed: "+msg, v...))
|
panic(fmt.Sprintf("assertion failed: "+msg, v...))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func warn(v ...interface{}) { fmt.Fprintln(os.Stderr, v...) }
|
|
||||||
func warnf(msg string, v ...interface{}) { fmt.Fprintf(os.Stderr, msg+"\n", v...) }
|
|
||||||
|
|
||||||
func printstack() {
|
|
||||||
stack := strings.Join(strings.Split(string(debug.Stack()), "\n")[2:], "\n")
|
|
||||||
fmt.Fprintln(os.Stderr, stack)
|
|
||||||
}
|
|
0
cmd/vendor/github.com/boltdb/bolt/doc.go → cmd/vendor/github.com/coreos/bbolt/doc.go
generated
vendored
0
cmd/vendor/github.com/boltdb/bolt/doc.go → cmd/vendor/github.com/coreos/bbolt/doc.go
generated
vendored
@ -6,25 +6,40 @@ import (
|
|||||||
"unsafe"
|
"unsafe"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// txPending holds a list of pgids and corresponding allocation txns
|
||||||
|
// that are pending to be freed.
|
||||||
|
type txPending struct {
|
||||||
|
ids []pgid
|
||||||
|
alloctx []txid // txids allocating the ids
|
||||||
|
lastReleaseBegin txid // beginning txid of last matching releaseRange
|
||||||
|
}
|
||||||
|
|
||||||
// freelist represents a list of all pages that are available for allocation.
|
// freelist represents a list of all pages that are available for allocation.
|
||||||
// It also tracks pages that have been freed but are still in use by open transactions.
|
// It also tracks pages that have been freed but are still in use by open transactions.
|
||||||
type freelist struct {
|
type freelist struct {
|
||||||
ids []pgid // all free and available free page ids.
|
ids []pgid // all free and available free page ids.
|
||||||
pending map[txid][]pgid // mapping of soon-to-be free page ids by tx.
|
allocs map[pgid]txid // mapping of txid that allocated a pgid.
|
||||||
cache map[pgid]bool // fast lookup of all free and pending page ids.
|
pending map[txid]*txPending // mapping of soon-to-be free page ids by tx.
|
||||||
|
cache map[pgid]bool // fast lookup of all free and pending page ids.
|
||||||
}
|
}
|
||||||
|
|
||||||
// newFreelist returns an empty, initialized freelist.
|
// newFreelist returns an empty, initialized freelist.
|
||||||
func newFreelist() *freelist {
|
func newFreelist() *freelist {
|
||||||
return &freelist{
|
return &freelist{
|
||||||
pending: make(map[txid][]pgid),
|
allocs: make(map[pgid]txid),
|
||||||
|
pending: make(map[txid]*txPending),
|
||||||
cache: make(map[pgid]bool),
|
cache: make(map[pgid]bool),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// size returns the size of the page after serialization.
|
// size returns the size of the page after serialization.
|
||||||
func (f *freelist) size() int {
|
func (f *freelist) size() int {
|
||||||
return pageHeaderSize + (int(unsafe.Sizeof(pgid(0))) * f.count())
|
n := f.count()
|
||||||
|
if n >= 0xFFFF {
|
||||||
|
// The first element will be used to store the count. See freelist.write.
|
||||||
|
n++
|
||||||
|
}
|
||||||
|
return pageHeaderSize + (int(unsafe.Sizeof(pgid(0))) * n)
|
||||||
}
|
}
|
||||||
|
|
||||||
// count returns count of pages on the freelist
|
// count returns count of pages on the freelist
|
||||||
@ -40,27 +55,26 @@ func (f *freelist) free_count() int {
|
|||||||
// pending_count returns count of pending pages
|
// pending_count returns count of pending pages
|
||||||
func (f *freelist) pending_count() int {
|
func (f *freelist) pending_count() int {
|
||||||
var count int
|
var count int
|
||||||
for _, list := range f.pending {
|
for _, txp := range f.pending {
|
||||||
count += len(list)
|
count += len(txp.ids)
|
||||||
}
|
}
|
||||||
return count
|
return count
|
||||||
}
|
}
|
||||||
|
|
||||||
// all returns a list of all free ids and all pending ids in one sorted list.
|
// copyall copies into dst a list of all free ids and all pending ids in one sorted list.
|
||||||
func (f *freelist) all() []pgid {
|
// f.count returns the minimum length required for dst.
|
||||||
m := make(pgids, 0)
|
func (f *freelist) copyall(dst []pgid) {
|
||||||
|
m := make(pgids, 0, f.pending_count())
|
||||||
for _, list := range f.pending {
|
for _, txp := range f.pending {
|
||||||
m = append(m, list...)
|
m = append(m, txp.ids...)
|
||||||
}
|
}
|
||||||
|
|
||||||
sort.Sort(m)
|
sort.Sort(m)
|
||||||
return pgids(f.ids).merge(m)
|
mergepgids(dst, f.ids, m)
|
||||||
}
|
}
|
||||||
|
|
||||||
// allocate returns the starting page id of a contiguous list of pages of a given size.
|
// allocate returns the starting page id of a contiguous list of pages of a given size.
|
||||||
// If a contiguous block cannot be found then 0 is returned.
|
// If a contiguous block cannot be found then 0 is returned.
|
||||||
func (f *freelist) allocate(n int) pgid {
|
func (f *freelist) allocate(txid txid, n int) pgid {
|
||||||
if len(f.ids) == 0 {
|
if len(f.ids) == 0 {
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
@ -93,7 +107,7 @@ func (f *freelist) allocate(n int) pgid {
|
|||||||
for i := pgid(0); i < pgid(n); i++ {
|
for i := pgid(0); i < pgid(n); i++ {
|
||||||
delete(f.cache, initial+i)
|
delete(f.cache, initial+i)
|
||||||
}
|
}
|
||||||
|
f.allocs[initial] = txid
|
||||||
return initial
|
return initial
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -110,28 +124,73 @@ func (f *freelist) free(txid txid, p *page) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Free page and all its overflow pages.
|
// Free page and all its overflow pages.
|
||||||
var ids = f.pending[txid]
|
txp := f.pending[txid]
|
||||||
|
if txp == nil {
|
||||||
|
txp = &txPending{}
|
||||||
|
f.pending[txid] = txp
|
||||||
|
}
|
||||||
|
allocTxid, ok := f.allocs[p.id]
|
||||||
|
if ok {
|
||||||
|
delete(f.allocs, p.id)
|
||||||
|
} else if (p.flags & freelistPageFlag) != 0 {
|
||||||
|
// Freelist is always allocated by prior tx.
|
||||||
|
allocTxid = txid - 1
|
||||||
|
}
|
||||||
|
|
||||||
for id := p.id; id <= p.id+pgid(p.overflow); id++ {
|
for id := p.id; id <= p.id+pgid(p.overflow); id++ {
|
||||||
// Verify that page is not already free.
|
// Verify that page is not already free.
|
||||||
if f.cache[id] {
|
if f.cache[id] {
|
||||||
panic(fmt.Sprintf("page %d already freed", id))
|
panic(fmt.Sprintf("page %d already freed", id))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Add to the freelist and cache.
|
// Add to the freelist and cache.
|
||||||
ids = append(ids, id)
|
txp.ids = append(txp.ids, id)
|
||||||
|
txp.alloctx = append(txp.alloctx, allocTxid)
|
||||||
f.cache[id] = true
|
f.cache[id] = true
|
||||||
}
|
}
|
||||||
f.pending[txid] = ids
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// release moves all page ids for a transaction id (or older) to the freelist.
|
// release moves all page ids for a transaction id (or older) to the freelist.
|
||||||
func (f *freelist) release(txid txid) {
|
func (f *freelist) release(txid txid) {
|
||||||
m := make(pgids, 0)
|
m := make(pgids, 0)
|
||||||
for tid, ids := range f.pending {
|
for tid, txp := range f.pending {
|
||||||
if tid <= txid {
|
if tid <= txid {
|
||||||
// Move transaction's pending pages to the available freelist.
|
// Move transaction's pending pages to the available freelist.
|
||||||
// Don't remove from the cache since the page is still free.
|
// Don't remove from the cache since the page is still free.
|
||||||
m = append(m, ids...)
|
m = append(m, txp.ids...)
|
||||||
|
delete(f.pending, tid)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
sort.Sort(m)
|
||||||
|
f.ids = pgids(f.ids).merge(m)
|
||||||
|
}
|
||||||
|
|
||||||
|
// releaseRange moves pending pages allocated within an extent [begin,end] to the free list.
|
||||||
|
func (f *freelist) releaseRange(begin, end txid) {
|
||||||
|
if begin > end {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
var m pgids
|
||||||
|
for tid, txp := range f.pending {
|
||||||
|
if tid < begin || tid > end {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// Don't recompute freed pages if ranges haven't updated.
|
||||||
|
if txp.lastReleaseBegin == begin {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
for i := 0; i < len(txp.ids); i++ {
|
||||||
|
if atx := txp.alloctx[i]; atx < begin || atx > end {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
m = append(m, txp.ids[i])
|
||||||
|
txp.ids[i] = txp.ids[len(txp.ids)-1]
|
||||||
|
txp.ids = txp.ids[:len(txp.ids)-1]
|
||||||
|
txp.alloctx[i] = txp.alloctx[len(txp.alloctx)-1]
|
||||||
|
txp.alloctx = txp.alloctx[:len(txp.alloctx)-1]
|
||||||
|
i--
|
||||||
|
}
|
||||||
|
txp.lastReleaseBegin = begin
|
||||||
|
if len(txp.ids) == 0 {
|
||||||
delete(f.pending, tid)
|
delete(f.pending, tid)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -142,12 +201,29 @@ func (f *freelist) release(txid txid) {
|
|||||||
// rollback removes the pages from a given pending tx.
|
// rollback removes the pages from a given pending tx.
|
||||||
func (f *freelist) rollback(txid txid) {
|
func (f *freelist) rollback(txid txid) {
|
||||||
// Remove page ids from cache.
|
// Remove page ids from cache.
|
||||||
for _, id := range f.pending[txid] {
|
txp := f.pending[txid]
|
||||||
delete(f.cache, id)
|
if txp == nil {
|
||||||
|
return
|
||||||
}
|
}
|
||||||
|
var m pgids
|
||||||
// Remove pages from pending list.
|
for i, pgid := range txp.ids {
|
||||||
|
delete(f.cache, pgid)
|
||||||
|
tx := txp.alloctx[i]
|
||||||
|
if tx == 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if tx != txid {
|
||||||
|
// Pending free aborted; restore page back to alloc list.
|
||||||
|
f.allocs[pgid] = tx
|
||||||
|
} else {
|
||||||
|
// Freed page was allocated by this txn; OK to throw away.
|
||||||
|
m = append(m, pgid)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Remove pages from pending list and mark as free if allocated by txid.
|
||||||
delete(f.pending, txid)
|
delete(f.pending, txid)
|
||||||
|
sort.Sort(m)
|
||||||
|
f.ids = pgids(f.ids).merge(m)
|
||||||
}
|
}
|
||||||
|
|
||||||
// freed returns whether a given page is in the free list.
|
// freed returns whether a given page is in the free list.
|
||||||
@ -157,6 +233,9 @@ func (f *freelist) freed(pgid pgid) bool {
|
|||||||
|
|
||||||
// read initializes the freelist from a freelist page.
|
// read initializes the freelist from a freelist page.
|
||||||
func (f *freelist) read(p *page) {
|
func (f *freelist) read(p *page) {
|
||||||
|
if (p.flags & freelistPageFlag) == 0 {
|
||||||
|
panic(fmt.Sprintf("invalid freelist page: %d, page type is %s", p.id, p.typ()))
|
||||||
|
}
|
||||||
// If the page.count is at the max uint16 value (64k) then it's considered
|
// If the page.count is at the max uint16 value (64k) then it's considered
|
||||||
// an overflow and the size of the freelist is stored as the first element.
|
// an overflow and the size of the freelist is stored as the first element.
|
||||||
idx, count := 0, int(p.count)
|
idx, count := 0, int(p.count)
|
||||||
@ -169,7 +248,7 @@ func (f *freelist) read(p *page) {
|
|||||||
if count == 0 {
|
if count == 0 {
|
||||||
f.ids = nil
|
f.ids = nil
|
||||||
} else {
|
} else {
|
||||||
ids := ((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[idx:count]
|
ids := ((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[idx : idx+count]
|
||||||
f.ids = make([]pgid, len(ids))
|
f.ids = make([]pgid, len(ids))
|
||||||
copy(f.ids, ids)
|
copy(f.ids, ids)
|
||||||
|
|
||||||
@ -181,27 +260,33 @@ func (f *freelist) read(p *page) {
|
|||||||
f.reindex()
|
f.reindex()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// read initializes the freelist from a given list of ids.
|
||||||
|
func (f *freelist) readIDs(ids []pgid) {
|
||||||
|
f.ids = ids
|
||||||
|
f.reindex()
|
||||||
|
}
|
||||||
|
|
||||||
// write writes the page ids onto a freelist page. All free and pending ids are
|
// write writes the page ids onto a freelist page. All free and pending ids are
|
||||||
// saved to disk since in the event of a program crash, all pending ids will
|
// saved to disk since in the event of a program crash, all pending ids will
|
||||||
// become free.
|
// become free.
|
||||||
func (f *freelist) write(p *page) error {
|
func (f *freelist) write(p *page) error {
|
||||||
// Combine the old free pgids and pgids waiting on an open transaction.
|
// Combine the old free pgids and pgids waiting on an open transaction.
|
||||||
ids := f.all()
|
|
||||||
|
|
||||||
// Update the header flag.
|
// Update the header flag.
|
||||||
p.flags |= freelistPageFlag
|
p.flags |= freelistPageFlag
|
||||||
|
|
||||||
// The page.count can only hold up to 64k elements so if we overflow that
|
// The page.count can only hold up to 64k elements so if we overflow that
|
||||||
// number then we handle it by putting the size in the first element.
|
// number then we handle it by putting the size in the first element.
|
||||||
if len(ids) == 0 {
|
lenids := f.count()
|
||||||
p.count = uint16(len(ids))
|
if lenids == 0 {
|
||||||
} else if len(ids) < 0xFFFF {
|
p.count = uint16(lenids)
|
||||||
p.count = uint16(len(ids))
|
} else if lenids < 0xFFFF {
|
||||||
copy(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[:], ids)
|
p.count = uint16(lenids)
|
||||||
|
f.copyall(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[:])
|
||||||
} else {
|
} else {
|
||||||
p.count = 0xFFFF
|
p.count = 0xFFFF
|
||||||
((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[0] = pgid(len(ids))
|
((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[0] = pgid(lenids)
|
||||||
copy(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[1:], ids)
|
f.copyall(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[1:])
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
@ -213,8 +298,8 @@ func (f *freelist) reload(p *page) {
|
|||||||
|
|
||||||
// Build a cache of only pending pages.
|
// Build a cache of only pending pages.
|
||||||
pcache := make(map[pgid]bool)
|
pcache := make(map[pgid]bool)
|
||||||
for _, pendingIDs := range f.pending {
|
for _, txp := range f.pending {
|
||||||
for _, pendingID := range pendingIDs {
|
for _, pendingID := range txp.ids {
|
||||||
pcache[pendingID] = true
|
pcache[pendingID] = true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -236,12 +321,12 @@ func (f *freelist) reload(p *page) {
|
|||||||
|
|
||||||
// reindex rebuilds the free cache based on available and pending free lists.
|
// reindex rebuilds the free cache based on available and pending free lists.
|
||||||
func (f *freelist) reindex() {
|
func (f *freelist) reindex() {
|
||||||
f.cache = make(map[pgid]bool)
|
f.cache = make(map[pgid]bool, len(f.ids))
|
||||||
for _, id := range f.ids {
|
for _, id := range f.ids {
|
||||||
f.cache[id] = true
|
f.cache[id] = true
|
||||||
}
|
}
|
||||||
for _, pendingIDs := range f.pending {
|
for _, txp := range f.pending {
|
||||||
for _, pendingID := range pendingIDs {
|
for _, pendingID := range txp.ids {
|
||||||
f.cache[pendingID] = true
|
f.cache[pendingID] = true
|
||||||
}
|
}
|
||||||
}
|
}
|
@ -365,7 +365,7 @@ func (n *node) spill() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Allocate contiguous space for the node.
|
// Allocate contiguous space for the node.
|
||||||
p, err := tx.allocate((node.size() / tx.db.pageSize) + 1)
|
p, err := tx.allocate((node.size() + tx.db.pageSize - 1) / tx.db.pageSize)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
31
cmd/vendor/github.com/boltdb/bolt/page.go → cmd/vendor/github.com/coreos/bbolt/page.go
generated
vendored
31
cmd/vendor/github.com/boltdb/bolt/page.go → cmd/vendor/github.com/coreos/bbolt/page.go
generated
vendored
@ -145,12 +145,33 @@ func (a pgids) merge(b pgids) pgids {
|
|||||||
// Return the opposite slice if one is nil.
|
// Return the opposite slice if one is nil.
|
||||||
if len(a) == 0 {
|
if len(a) == 0 {
|
||||||
return b
|
return b
|
||||||
} else if len(b) == 0 {
|
}
|
||||||
|
if len(b) == 0 {
|
||||||
return a
|
return a
|
||||||
}
|
}
|
||||||
|
merged := make(pgids, len(a)+len(b))
|
||||||
|
mergepgids(merged, a, b)
|
||||||
|
return merged
|
||||||
|
}
|
||||||
|
|
||||||
// Create a list to hold all elements from both lists.
|
// mergepgids copies the sorted union of a and b into dst.
|
||||||
merged := make(pgids, 0, len(a)+len(b))
|
// If dst is too small, it panics.
|
||||||
|
func mergepgids(dst, a, b pgids) {
|
||||||
|
if len(dst) < len(a)+len(b) {
|
||||||
|
panic(fmt.Errorf("mergepgids bad len %d < %d + %d", len(dst), len(a), len(b)))
|
||||||
|
}
|
||||||
|
// Copy in the opposite slice if one is nil.
|
||||||
|
if len(a) == 0 {
|
||||||
|
copy(dst, b)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if len(b) == 0 {
|
||||||
|
copy(dst, a)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Merged will hold all elements from both lists.
|
||||||
|
merged := dst[:0]
|
||||||
|
|
||||||
// Assign lead to the slice with a lower starting value, follow to the higher value.
|
// Assign lead to the slice with a lower starting value, follow to the higher value.
|
||||||
lead, follow := a, b
|
lead, follow := a, b
|
||||||
@ -172,7 +193,5 @@ func (a pgids) merge(b pgids) pgids {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Append what's left in follow.
|
// Append what's left in follow.
|
||||||
merged = append(merged, follow...)
|
_ = append(merged, follow...)
|
||||||
|
|
||||||
return merged
|
|
||||||
}
|
}
|
81
cmd/vendor/github.com/boltdb/bolt/tx.go → cmd/vendor/github.com/coreos/bbolt/tx.go
generated
vendored
81
cmd/vendor/github.com/boltdb/bolt/tx.go → cmd/vendor/github.com/coreos/bbolt/tx.go
generated
vendored
@ -126,10 +126,7 @@ func (tx *Tx) DeleteBucket(name []byte) error {
|
|||||||
// the error is returned to the caller.
|
// the error is returned to the caller.
|
||||||
func (tx *Tx) ForEach(fn func(name []byte, b *Bucket) error) error {
|
func (tx *Tx) ForEach(fn func(name []byte, b *Bucket) error) error {
|
||||||
return tx.root.ForEach(func(k, v []byte) error {
|
return tx.root.ForEach(func(k, v []byte) error {
|
||||||
if err := fn(k, tx.root.Bucket(k)); err != nil {
|
return fn(k, tx.root.Bucket(k))
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -169,28 +166,18 @@ func (tx *Tx) Commit() error {
|
|||||||
// Free the old root bucket.
|
// Free the old root bucket.
|
||||||
tx.meta.root.root = tx.root.root
|
tx.meta.root.root = tx.root.root
|
||||||
|
|
||||||
opgid := tx.meta.pgid
|
// Free the old freelist because commit writes out a fresh freelist.
|
||||||
|
if tx.meta.freelist != pgidNoFreelist {
|
||||||
// Free the freelist and allocate new pages for it. This will overestimate
|
tx.db.freelist.free(tx.meta.txid, tx.db.page(tx.meta.freelist))
|
||||||
// the size of the freelist but not underestimate the size (which would be bad).
|
|
||||||
tx.db.freelist.free(tx.meta.txid, tx.db.page(tx.meta.freelist))
|
|
||||||
p, err := tx.allocate((tx.db.freelist.size() / tx.db.pageSize) + 1)
|
|
||||||
if err != nil {
|
|
||||||
tx.rollback()
|
|
||||||
return err
|
|
||||||
}
|
}
|
||||||
if err := tx.db.freelist.write(p); err != nil {
|
|
||||||
tx.rollback()
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
tx.meta.freelist = p.id
|
|
||||||
|
|
||||||
// If the high water mark has moved up then attempt to grow the database.
|
if !tx.db.NoFreelistSync {
|
||||||
if tx.meta.pgid > opgid {
|
err := tx.commitFreelist()
|
||||||
if err := tx.db.grow(int(tx.meta.pgid+1) * tx.db.pageSize); err != nil {
|
if err != nil {
|
||||||
tx.rollback()
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
tx.meta.freelist = pgidNoFreelist
|
||||||
}
|
}
|
||||||
|
|
||||||
// Write dirty pages to disk.
|
// Write dirty pages to disk.
|
||||||
@ -235,6 +222,31 @@ func (tx *Tx) Commit() error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (tx *Tx) commitFreelist() error {
|
||||||
|
// Allocate new pages for the new free list. This will overestimate
|
||||||
|
// the size of the freelist but not underestimate the size (which would be bad).
|
||||||
|
opgid := tx.meta.pgid
|
||||||
|
p, err := tx.allocate((tx.db.freelist.size() / tx.db.pageSize) + 1)
|
||||||
|
if err != nil {
|
||||||
|
tx.rollback()
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := tx.db.freelist.write(p); err != nil {
|
||||||
|
tx.rollback()
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
tx.meta.freelist = p.id
|
||||||
|
// If the high water mark has moved up then attempt to grow the database.
|
||||||
|
if tx.meta.pgid > opgid {
|
||||||
|
if err := tx.db.grow(int(tx.meta.pgid+1) * tx.db.pageSize); err != nil {
|
||||||
|
tx.rollback()
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// Rollback closes the transaction and ignores all previous updates. Read-only
|
// Rollback closes the transaction and ignores all previous updates. Read-only
|
||||||
// transactions must be rolled back and not committed.
|
// transactions must be rolled back and not committed.
|
||||||
func (tx *Tx) Rollback() error {
|
func (tx *Tx) Rollback() error {
|
||||||
@ -305,7 +317,11 @@ func (tx *Tx) WriteTo(w io.Writer) (n int64, err error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
defer func() { _ = f.Close() }()
|
defer func() {
|
||||||
|
if cerr := f.Close(); err == nil {
|
||||||
|
err = cerr
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
// Generate a meta page. We use the same page data for both meta pages.
|
// Generate a meta page. We use the same page data for both meta pages.
|
||||||
buf := make([]byte, tx.db.pageSize)
|
buf := make([]byte, tx.db.pageSize)
|
||||||
@ -333,7 +349,7 @@ func (tx *Tx) WriteTo(w io.Writer) (n int64, err error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Move past the meta pages in the file.
|
// Move past the meta pages in the file.
|
||||||
if _, err := f.Seek(int64(tx.db.pageSize*2), os.SEEK_SET); err != nil {
|
if _, err := f.Seek(int64(tx.db.pageSize*2), io.SeekStart); err != nil {
|
||||||
return n, fmt.Errorf("seek: %s", err)
|
return n, fmt.Errorf("seek: %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -344,7 +360,7 @@ func (tx *Tx) WriteTo(w io.Writer) (n int64, err error) {
|
|||||||
return n, err
|
return n, err
|
||||||
}
|
}
|
||||||
|
|
||||||
return n, f.Close()
|
return n, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// CopyFile copies the entire database to file at the given path.
|
// CopyFile copies the entire database to file at the given path.
|
||||||
@ -379,9 +395,14 @@ func (tx *Tx) Check() <-chan error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (tx *Tx) check(ch chan error) {
|
func (tx *Tx) check(ch chan error) {
|
||||||
|
// Force loading free list if opened in ReadOnly mode.
|
||||||
|
tx.db.loadFreelist()
|
||||||
|
|
||||||
// Check if any pages are double freed.
|
// Check if any pages are double freed.
|
||||||
freed := make(map[pgid]bool)
|
freed := make(map[pgid]bool)
|
||||||
for _, id := range tx.db.freelist.all() {
|
all := make([]pgid, tx.db.freelist.count())
|
||||||
|
tx.db.freelist.copyall(all)
|
||||||
|
for _, id := range all {
|
||||||
if freed[id] {
|
if freed[id] {
|
||||||
ch <- fmt.Errorf("page %d: already freed", id)
|
ch <- fmt.Errorf("page %d: already freed", id)
|
||||||
}
|
}
|
||||||
@ -392,8 +413,10 @@ func (tx *Tx) check(ch chan error) {
|
|||||||
reachable := make(map[pgid]*page)
|
reachable := make(map[pgid]*page)
|
||||||
reachable[0] = tx.page(0) // meta0
|
reachable[0] = tx.page(0) // meta0
|
||||||
reachable[1] = tx.page(1) // meta1
|
reachable[1] = tx.page(1) // meta1
|
||||||
for i := uint32(0); i <= tx.page(tx.meta.freelist).overflow; i++ {
|
if tx.meta.freelist != pgidNoFreelist {
|
||||||
reachable[tx.meta.freelist+pgid(i)] = tx.page(tx.meta.freelist)
|
for i := uint32(0); i <= tx.page(tx.meta.freelist).overflow; i++ {
|
||||||
|
reachable[tx.meta.freelist+pgid(i)] = tx.page(tx.meta.freelist)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Recursively check buckets.
|
// Recursively check buckets.
|
||||||
@ -451,7 +474,7 @@ func (tx *Tx) checkBucket(b *Bucket, reachable map[pgid]*page, freed map[pgid]bo
|
|||||||
|
|
||||||
// allocate returns a contiguous block of memory starting at a given page.
|
// allocate returns a contiguous block of memory starting at a given page.
|
||||||
func (tx *Tx) allocate(count int) (*page, error) {
|
func (tx *Tx) allocate(count int) (*page, error) {
|
||||||
p, err := tx.db.allocate(count)
|
p, err := tx.db.allocate(tx.meta.txid, count)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
35
cmd/vendor/github.com/cpuguy83/go-md2man/md2man/roff.go
generated
vendored
35
cmd/vendor/github.com/cpuguy83/go-md2man/md2man/roff.go
generated
vendored
@ -118,11 +118,24 @@ func (r *roffRenderer) Paragraph(out *bytes.Buffer, text func() bool) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: This might now work
|
|
||||||
func (r *roffRenderer) Table(out *bytes.Buffer, header []byte, body []byte, columnData []int) {
|
func (r *roffRenderer) Table(out *bytes.Buffer, header []byte, body []byte, columnData []int) {
|
||||||
out.WriteString(".TS\nallbox;\n")
|
out.WriteString("\n.TS\nallbox;\n")
|
||||||
|
|
||||||
|
max_delims := 0
|
||||||
|
lines := strings.Split(strings.TrimRight(string(header), "\n")+"\n"+strings.TrimRight(string(body), "\n"), "\n")
|
||||||
|
for _, w := range lines {
|
||||||
|
cur_delims := strings.Count(w, "\t")
|
||||||
|
if cur_delims > max_delims {
|
||||||
|
max_delims = cur_delims
|
||||||
|
}
|
||||||
|
}
|
||||||
|
out.Write([]byte(strings.Repeat("l ", max_delims+1) + "\n"))
|
||||||
|
out.Write([]byte(strings.Repeat("l ", max_delims+1) + ".\n"))
|
||||||
out.Write(header)
|
out.Write(header)
|
||||||
|
if len(header) > 0 {
|
||||||
|
out.Write([]byte("\n"))
|
||||||
|
}
|
||||||
|
|
||||||
out.Write(body)
|
out.Write(body)
|
||||||
out.WriteString("\n.TE\n")
|
out.WriteString("\n.TE\n")
|
||||||
}
|
}
|
||||||
@ -132,24 +145,30 @@ func (r *roffRenderer) TableRow(out *bytes.Buffer, text []byte) {
|
|||||||
out.WriteString("\n")
|
out.WriteString("\n")
|
||||||
}
|
}
|
||||||
out.Write(text)
|
out.Write(text)
|
||||||
out.WriteString("\n")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *roffRenderer) TableHeaderCell(out *bytes.Buffer, text []byte, align int) {
|
func (r *roffRenderer) TableHeaderCell(out *bytes.Buffer, text []byte, align int) {
|
||||||
if out.Len() > 0 {
|
if out.Len() > 0 {
|
||||||
out.WriteString(" ")
|
out.WriteString("\t")
|
||||||
}
|
}
|
||||||
out.Write(text)
|
if len(text) == 0 {
|
||||||
out.WriteString(" ")
|
text = []byte{' '}
|
||||||
|
}
|
||||||
|
out.Write([]byte("\\fB\\fC" + string(text) + "\\fR"))
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: This is probably broken
|
|
||||||
func (r *roffRenderer) TableCell(out *bytes.Buffer, text []byte, align int) {
|
func (r *roffRenderer) TableCell(out *bytes.Buffer, text []byte, align int) {
|
||||||
if out.Len() > 0 {
|
if out.Len() > 0 {
|
||||||
out.WriteString("\t")
|
out.WriteString("\t")
|
||||||
}
|
}
|
||||||
|
if len(text) > 30 {
|
||||||
|
text = append([]byte("T{\n"), text...)
|
||||||
|
text = append(text, []byte("\nT}")...)
|
||||||
|
}
|
||||||
|
if len(text) == 0 {
|
||||||
|
text = []byte{' '}
|
||||||
|
}
|
||||||
out.Write(text)
|
out.Write(text)
|
||||||
out.WriteString("\t")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *roffRenderer) Footnotes(out *bytes.Buffer, text func() bool) {
|
func (r *roffRenderer) Footnotes(out *bytes.Buffer, text func() bool) {
|
||||||
|
112
cmd/vendor/github.com/gogo/protobuf/proto/decode.go
generated
vendored
112
cmd/vendor/github.com/gogo/protobuf/proto/decode.go
generated
vendored
@ -61,7 +61,6 @@ var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for
|
|||||||
// int32, int64, uint32, uint64, bool, and enum
|
// int32, int64, uint32, uint64, bool, and enum
|
||||||
// protocol buffer types.
|
// protocol buffer types.
|
||||||
func DecodeVarint(buf []byte) (x uint64, n int) {
|
func DecodeVarint(buf []byte) (x uint64, n int) {
|
||||||
// x, n already 0
|
|
||||||
for shift := uint(0); shift < 64; shift += 7 {
|
for shift := uint(0); shift < 64; shift += 7 {
|
||||||
if n >= len(buf) {
|
if n >= len(buf) {
|
||||||
return 0, 0
|
return 0, 0
|
||||||
@ -78,13 +77,7 @@ func DecodeVarint(buf []byte) (x uint64, n int) {
|
|||||||
return 0, 0
|
return 0, 0
|
||||||
}
|
}
|
||||||
|
|
||||||
// DecodeVarint reads a varint-encoded integer from the Buffer.
|
func (p *Buffer) decodeVarintSlow() (x uint64, err error) {
|
||||||
// This is the format for the
|
|
||||||
// int32, int64, uint32, uint64, bool, and enum
|
|
||||||
// protocol buffer types.
|
|
||||||
func (p *Buffer) DecodeVarint() (x uint64, err error) {
|
|
||||||
// x, err already 0
|
|
||||||
|
|
||||||
i := p.index
|
i := p.index
|
||||||
l := len(p.buf)
|
l := len(p.buf)
|
||||||
|
|
||||||
@ -107,6 +100,107 @@ func (p *Buffer) DecodeVarint() (x uint64, err error) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// DecodeVarint reads a varint-encoded integer from the Buffer.
|
||||||
|
// This is the format for the
|
||||||
|
// int32, int64, uint32, uint64, bool, and enum
|
||||||
|
// protocol buffer types.
|
||||||
|
func (p *Buffer) DecodeVarint() (x uint64, err error) {
|
||||||
|
i := p.index
|
||||||
|
buf := p.buf
|
||||||
|
|
||||||
|
if i >= len(buf) {
|
||||||
|
return 0, io.ErrUnexpectedEOF
|
||||||
|
} else if buf[i] < 0x80 {
|
||||||
|
p.index++
|
||||||
|
return uint64(buf[i]), nil
|
||||||
|
} else if len(buf)-i < 10 {
|
||||||
|
return p.decodeVarintSlow()
|
||||||
|
}
|
||||||
|
|
||||||
|
var b uint64
|
||||||
|
// we already checked the first byte
|
||||||
|
x = uint64(buf[i]) - 0x80
|
||||||
|
i++
|
||||||
|
|
||||||
|
b = uint64(buf[i])
|
||||||
|
i++
|
||||||
|
x += b << 7
|
||||||
|
if b&0x80 == 0 {
|
||||||
|
goto done
|
||||||
|
}
|
||||||
|
x -= 0x80 << 7
|
||||||
|
|
||||||
|
b = uint64(buf[i])
|
||||||
|
i++
|
||||||
|
x += b << 14
|
||||||
|
if b&0x80 == 0 {
|
||||||
|
goto done
|
||||||
|
}
|
||||||
|
x -= 0x80 << 14
|
||||||
|
|
||||||
|
b = uint64(buf[i])
|
||||||
|
i++
|
||||||
|
x += b << 21
|
||||||
|
if b&0x80 == 0 {
|
||||||
|
goto done
|
||||||
|
}
|
||||||
|
x -= 0x80 << 21
|
||||||
|
|
||||||
|
b = uint64(buf[i])
|
||||||
|
i++
|
||||||
|
x += b << 28
|
||||||
|
if b&0x80 == 0 {
|
||||||
|
goto done
|
||||||
|
}
|
||||||
|
x -= 0x80 << 28
|
||||||
|
|
||||||
|
b = uint64(buf[i])
|
||||||
|
i++
|
||||||
|
x += b << 35
|
||||||
|
if b&0x80 == 0 {
|
||||||
|
goto done
|
||||||
|
}
|
||||||
|
x -= 0x80 << 35
|
||||||
|
|
||||||
|
b = uint64(buf[i])
|
||||||
|
i++
|
||||||
|
x += b << 42
|
||||||
|
if b&0x80 == 0 {
|
||||||
|
goto done
|
||||||
|
}
|
||||||
|
x -= 0x80 << 42
|
||||||
|
|
||||||
|
b = uint64(buf[i])
|
||||||
|
i++
|
||||||
|
x += b << 49
|
||||||
|
if b&0x80 == 0 {
|
||||||
|
goto done
|
||||||
|
}
|
||||||
|
x -= 0x80 << 49
|
||||||
|
|
||||||
|
b = uint64(buf[i])
|
||||||
|
i++
|
||||||
|
x += b << 56
|
||||||
|
if b&0x80 == 0 {
|
||||||
|
goto done
|
||||||
|
}
|
||||||
|
x -= 0x80 << 56
|
||||||
|
|
||||||
|
b = uint64(buf[i])
|
||||||
|
i++
|
||||||
|
x += b << 63
|
||||||
|
if b&0x80 == 0 {
|
||||||
|
goto done
|
||||||
|
}
|
||||||
|
// x -= 0x80 << 63 // Always zero.
|
||||||
|
|
||||||
|
return 0, errOverflow
|
||||||
|
|
||||||
|
done:
|
||||||
|
p.index = i
|
||||||
|
return x, nil
|
||||||
|
}
|
||||||
|
|
||||||
// DecodeFixed64 reads a 64-bit integer from the Buffer.
|
// DecodeFixed64 reads a 64-bit integer from the Buffer.
|
||||||
// This is the format for the
|
// This is the format for the
|
||||||
// fixed64, sfixed64, and double protocol buffer types.
|
// fixed64, sfixed64, and double protocol buffer types.
|
||||||
@ -340,6 +434,8 @@ func (p *Buffer) DecodeGroup(pb Message) error {
|
|||||||
// Buffer and places the decoded result in pb. If the struct
|
// Buffer and places the decoded result in pb. If the struct
|
||||||
// underlying pb does not match the data in the buffer, the results can be
|
// underlying pb does not match the data in the buffer, the results can be
|
||||||
// unpredictable.
|
// unpredictable.
|
||||||
|
//
|
||||||
|
// Unlike proto.Unmarshal, this does not reset pb before starting to unmarshal.
|
||||||
func (p *Buffer) Unmarshal(pb Message) error {
|
func (p *Buffer) Unmarshal(pb Message) error {
|
||||||
// If the object can unmarshal itself, let it.
|
// If the object can unmarshal itself, let it.
|
||||||
if u, ok := pb.(Unmarshaler); ok {
|
if u, ok := pb.(Unmarshaler); ok {
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user