Compare commits

...

116 Commits

Author SHA1 Message Date
fb5cd6f1c7 version: 3.2.14
Signed-off-by: Gyuho Lee <gyuhox@gmail.com>
2018-01-11 11:19:37 -08:00
6999bbb47b mvcc: check null before set FillPercent not to panic
Since CreateBucketIfNotExists() can return nil when it gets an error,
accessing FillPercent must be done after a nil check, not to cause
a panic.
2018-01-08 17:46:06 -08:00
df4036ab73 etcdserver/api/v3rpc: debug user cancellation and log warning for rest
The context error with cancel code is typically for user cancellation which
should be at debug level. For other error codes we should display a warning.

Fixes #9085
2018-01-08 17:46:01 -08:00
848590e99e version: bump up to 3.2.13+git
Signed-off-by: Gyuho Lee <gyuhox@gmail.com>
2018-01-02 14:32:36 -08:00
95a726a27e version: bump up to 3.2.13
Signed-off-by: Gyuho Lee <gyuhox@gmail.com>
2018-01-02 13:29:56 -08:00
288ef7d6fc embed: fix gRPC server panic on GracefulStop
Cherry-pick https://github.com/coreos/etcd/pull/8987.

Signed-off-by: Gyuho Lee <gyuhox@gmail.com>
2018-01-02 13:29:40 -08:00
7b7722ed97 integration: test GracefulStop on secure embedded server
Signed-off-by: Gyuho Lee <gyuhox@gmail.com>
2018-01-02 12:44:28 -08:00
8a358f832a clientv3/integration: fix TestKVLargeRequests with -tags cluster_proxy
Signed-off-by: Gyuho Lee <gyuhox@gmail.com>
2018-01-02 11:12:44 -08:00
2a63909648 tools/functional-tester: remove duplicate grpclog set
Signed-off-by: Gyuho Lee <gyuhox@gmail.com>
2018-01-02 11:12:38 -08:00
7fb1fafe0c etcdserver/api/v3rpc: set grpclog once
Signed-off-by: Gyuho Lee <gyuhox@gmail.com>

Conflicts:
	etcdserver/api/v3rpc/grpc.go
2018-01-02 11:12:33 -08:00
7025d7c665 etcdserver,embed: discard gRPC info logs when debug is off
Signed-off-by: Gyuho Lee <gyuhox@gmail.com>

Conflicts:
	embed/etcd.go
	etcdserver/api/v3rpc/grpc.go
	etcdserver/config.go
2018-01-02 11:12:26 -08:00
4ab213a4ec etcdserver/api/v3rpc: log stream error with debug level
Signed-off-by: Gyuho Lee <gyuhox@gmail.com>
2018-01-02 11:11:38 -08:00
bb27a63e64 version: bump up to 3.2.12+git
Signed-off-by: Gyuho Lee <gyuhox@gmail.com>
2018-01-02 11:11:25 -08:00
b19dae0065 version: bump up to 3.2.12
Signed-off-by: Gyuho Lee <gyuhox@gmail.com>
2017-12-20 12:49:55 -08:00
c8915bdb04 integration: bump up wait leader timeout for slow CIs
Signed-off-by: Gyuho Lee <gyuhox@gmail.com>
2017-12-20 12:47:58 -08:00
b6896aa951 clientv3/integration: fix TestKVPutError
Signed-off-by: Gyuho Lee <gyuhox@gmail.com>
2017-12-20 12:47:49 -08:00
452ccd693d clientv3/integration: test large KV requests
Signed-off-by: Gyuho Lee <gyuhox@gmail.com>
2017-12-20 12:47:30 -08:00
348b25f3dc clientv3: call other APIs with default gRPC call options
Signed-off-by: Gyuho Lee <gyuhox@gmail.com>
2017-12-20 12:41:57 -08:00
c67e6d5f5e clientv3: call KV/Txn APIs with default gRPC call options
Signed-off-by: Gyuho Lee <gyuhox@gmail.com>
2017-12-20 12:39:11 -08:00
e82f0557ac clientv3: configure gRPC message limits in Config
Signed-off-by: Gyuho Lee <gyuhox@gmail.com>
2017-12-20 12:22:48 -08:00
4cebdd274c integration: remove typo in "TestV3LargeRequests"
Signed-off-by: Gyuho Lee <gyuhox@gmail.com>
2017-12-20 10:10:45 -08:00
0363c4b1ef integration: test large request response back from server
Address https://github.com/coreos/etcd/issues/9043.
Won't fix it, but we need test coverage on response back
from server as well.

Signed-off-by: Gyuho Lee <gyuhox@gmail.com>
2017-12-20 10:10:15 -08:00
5579dc200d test: bump up clientv3/integration timeout
Signed-off-by: Gyuho Lee <gyuhox@gmail.com>
2017-12-20 10:09:47 -08:00
3fd6e7e1de vendor: pin grpc v1.7.5, grpc-gateway v1.3.0 (no code change)
Signed-off-by: Gyuho Lee <gyuhox@gmail.com>
2017-12-19 13:04:45 -08:00
1fa227da71 integration: add "TestV3AuthWithLeaseRevokeWithRoot"
Signed-off-by: Gyuho Lee <gyuhox@gmail.com>
2017-12-15 09:17:30 -08:00
47f6d32e3e Merge pull request #9013 from gyuho/automated-cherry-pick-of-#8999-origin-release-3.2
Automated cherry pick of #8999
2017-12-14 09:40:21 -08:00
0265457183 compactor: fix error message of Revision compactor
Reorder the parameters so that Noticef can output the error properly.
2017-12-14 08:39:22 -08:00
04ec94f8d1 semaphore: run upgrade tests against v3.2.11
Signed-off-by: Gyuho Lee <gyuhox@gmail.com>
2017-12-07 14:36:30 -08:00
ed4d70888c semaphore.sh: do not fail on "Too many goroutines"
To not fail on "pkg/testutil" unit tests.

Signed-off-by: Gyuho Lee <gyuhox@gmail.com>
2017-12-05 17:03:32 -08:00
b9aa507f66 hack: sync with master branch (needed for release)
Signed-off-by: Gyuho Lee <gyuhox@gmail.com>
2017-12-05 11:11:58 -08:00
4ed57689cb gitignore: sync with master branch
Signed-off-by: Gyuho Lee <gyuhox@gmail.com>
2017-12-05 11:09:26 -08:00
a2850218b2 scripts/build-docker: build both gcr.io and quay.io images
Signed-off-by: Gyuho Lee <gyuhox@gmail.com>
2017-12-05 11:08:34 -08:00
fc25300cf0 version: bump up to v3.2.11+git
Signed-off-by: Gyuho Lee <gyuhox@gmail.com>
2017-12-05 11:07:43 -08:00
1e1dbb2392 version: bump up to v3.2.11
Signed-off-by: Gyuho Lee <gyuhox@gmail.com>
2017-12-04 14:24:14 -08:00
ff1f08c93f vendor: upgrade grpc/grpc-go to v1.7.4
Signed-off-by: Gyuho Lee <gyuhox@gmail.com>
2017-12-04 14:23:43 -08:00
78fb932156 Merge pull request #8947 from gyuho/automated-cherry-pick-of-#8939-origin-release-3.2
Automated cherry pick of #8939
2017-12-04 09:35:30 -08:00
c142134a28 Documentation/op-guide: remove non-released flag in monitoring.md
Signed-off-by: Gyuho Lee <gyuhox@gmail.com>
2017-12-04 09:27:45 -08:00
b44b91462e etcdmain: add more details to TLS HandshakeFailure
Signed-off-by: Gyu-Ho Lee <gyuhox@gmail.com>
2017-12-01 09:50:57 -08:00
5921b2c035 api/v3rpc: log grpc stream send/recv errors in server-side
Signed-off-by: Gyu-Ho Lee <gyuhox@gmail.com>
2017-11-30 11:21:48 -08:00
a19672befc version: bump up to v3.2.10+git
Signed-off-by: Gyu-Ho Lee <gyuhox@gmail.com>
2017-11-17 11:10:48 -08:00
694728c496 version: bump up to v3.2.10
Signed-off-by: Gyu-Ho Lee <gyuhox@gmail.com>
2017-11-16 13:20:19 -08:00
1557f8b534 Merge pull request #8813 from jpbetz/bbolt-3.2
vendor: Backport bbolt freelist corruption and fragmentation fixes to 3.2 branch
2017-11-16 13:15:04 -08:00
4b9bfa17ee test: Clean agent directories on disk before functional test runs, not after
This is primarily so CI tooling can capture the agent logs after the functional tester runs.
2017-11-16 12:44:30 -08:00
8de0c0419a vendor: Switch from boltdb v1.3.0 to coreos/bbolt v1.3.1-coreos.3 2017-11-16 12:43:17 -08:00
3039c639c0 Merge pull request #8867 from gyuho/clientv3-backport-to-release-3.2
clientv3: backport new balancer to release-3.2, upgrade gRPC to v1.7.3
2017-11-16 10:12:40 -08:00
91335d01bb proxy/grpcproxy: wait until register before Serve
It was fatal-ing with:

grpclog.Fatalf("grpc: Server.RegisterService after Server.Serve for %q", sd.ServiceName)

Signed-off-by: Gyu-Ho Lee <gyuhox@gmail.com>
2017-11-16 09:34:11 -08:00
a8c84ffc93 clientv3: fix client balancer with gRPC v1.7
Signed-off-by: Gyu-Ho Lee <gyuhox@gmail.com>
2017-11-16 09:05:06 -08:00
939337f450 *: add max requests bytes, keepalive to server, blackhole methods to integration
Signed-off-by: Gyu-Ho Lee <gyuhox@gmail.com>
2017-11-16 09:05:06 -08:00
2a6d50470d *: use grpclog.NewLoggerV2
Signed-off-by: Gyu-Ho Lee <gyuhox@gmail.com>
2017-11-16 09:05:06 -08:00
d62e39d5ca *: deprecate "metadata.NewContext"
Signed-off-by: Gyu-Ho Lee <gyuhox@gmail.com>
2017-11-16 09:05:06 -08:00
7f0f5e2b3c bill-of-materials: regenerate
Signed-off-by: Gyu-Ho Lee <gyuhox@gmail.com>
2017-11-16 09:05:06 -08:00
eb1589ad35 *: regenerate proto
Signed-off-by: Gyu-Ho Lee <gyuhox@gmail.com>
2017-11-16 09:05:06 -08:00
546d5fe835 scripts/genproto: update protobuf, grpc-gateway gen
Signed-off-by: Gyu-Ho Lee <gyuhox@gmail.com>
2017-11-16 09:05:06 -08:00
fddae84ce2 vendor: update grpc, grpc-gateway, protobuf
Signed-off-by: Gyu-Ho Lee <gyuhox@gmail.com>
2017-11-16 09:05:02 -08:00
6d406285e6 glide: update grpc, grpc-gateway
Signed-off-by: Gyu-Ho Lee <gyuhox@gmail.com>
2017-11-16 05:32:02 -08:00
8dc20ead31 Merge pull request #8886 from gyuho/qqq
release-3.2: Revert "embed: fix HTTPs + DNS SRV discovery"
2017-11-15 14:47:57 -08:00
d3a3c3154e Revert "embed: fix HTTPs + DNS SRV discovery"
This reverts commit f79d5aaca4.
2017-11-15 14:46:32 -08:00
d5572964e1 Merge pull request #8874 from gyuho/release-branch
release-3.2: fix unit test script, remove old tests, backport functional testing data dir commands
2017-11-15 14:41:41 -08:00
ea51c25030 clientv3: remove balancer tests
Signed-off-by: Gyu-Ho Lee <gyuhox@gmail.com>
2017-11-15 00:15:06 -08:00
d1447a8f5a test: fix unit tests, remove some unnecessary tests
Unit tests weren't running in CIs.
And removing some unnecessary tests (v2 client, Examples)
in release branch.

Signed-off-by: Gyu-Ho Lee <gyuhox@gmail.com>
2017-11-15 00:15:02 -08:00
c28c14a5f4 test: Clean agent directories on disk before functional test runs, not after
This is primarily so CI tooling can capture the agent logs after the functional tester runs.

Signed-off-by: Gyu-Ho Lee <gyuhox@gmail.com>
2017-11-14 17:08:26 -08:00
f9eb75044a semaphore: priotize time out test fails
Signed-off-by: Gyu-Ho Lee <gyuhox@gmail.com>
2017-11-14 17:04:31 -08:00
2250f71e23 semaphore: manually pin v3.2.9 for release upgrade tests
Signed-off-by: Gyu-Ho Lee <gyuhox@gmail.com>
2017-11-08 12:45:00 -08:00
52be1d7b19 hack/scripts-dev: add Makefile, Dockerfile-test
Signed-off-by: Gyu-Ho Lee <gyuhox@gmail.com>
2017-11-06 14:13:10 -08:00
712024d3e5 semaphore.sh: fail tests with "(--- FAIL:|leak)"
Signed-off-by: Gyu-Ho Lee <gyuhox@gmail.com>
2017-11-03 10:59:01 -07:00
7d99afdc7c test: fail tests with "--- FAIL:"
To differentiate from gRPC client log "TRANSIENT_FAILURE"

Signed-off-by: Gyu-Ho Lee <gyuhox@gmail.com>
2017-11-03 10:58:43 -07:00
5ceea41af4 travis: upgrade Go to 1.8.5, and use container
Signed-off-by: Gyu-Ho Lee <gyuhox@gmail.com>
2017-10-25 19:45:33 -07:00
2f74456443 semaphore.sh: add to release-3.2 branch
Signed-off-by: Gyu-Ho Lee <gyuhox@gmail.com>
2017-10-25 19:44:05 -07:00
fc87ae4202 version: bump up to v3.2.9+git
Signed-off-by: Gyu-Ho Lee <gyuhox@gmail.com>
2017-10-25 19:43:30 -07:00
f1d7dd87da version: bump up to v3.2.9
Signed-off-by: Gyu-Ho Lee <gyuhox@gmail.com>
2017-10-06 08:58:06 -07:00
ad212d339b Makefile: sync with master branch on test commands
Signed-off-by: Gyu-Ho Lee <gyuhox@gmail.com>
2017-10-06 08:57:42 -07:00
9f49665284 Documentation/op-guide: remove git merge line in monitoring.md
Signed-off-by: Gyu-Ho Lee <gyuhox@gmail.com>
2017-10-06 08:55:56 -07:00
78f8d6e185 embed: fix HTTPs + DNS SRV discovery 2017-10-05 16:03:22 -07:00
a954a0de53 Makefile: initial commit, update Dockerfile
Signed-off-by: Gyu-Ho Lee <gyuhox@gmail.com>
2017-10-05 10:31:24 -07:00
0c3defdd2b vendor: update 'golang.org/x/crypto'
To include 6c586e17d9.

Signed-off-by: Gyu-Ho Lee <gyuhox@gmail.com>
2017-10-05 09:49:00 -07:00
814588d166 travis: use Go 1.8.4
Signed-off-by: Gyu-Ho Lee <gyuhox@gmail.com>
2017-10-05 09:19:29 -07:00
2d2932822c version: bump up to 3.2.8+git
Signed-off-by: Gyu-Ho Lee <gyuhox@gmail.com>
2017-10-05 09:18:48 -07:00
e211fb6de3 version: bump up to 3.2.8
Signed-off-by: Gyu-Ho Lee <gyuhox@gmail.com>
2017-09-26 02:41:18 +09:00
fb7e274309 Documentation/op-guide: remove grafana demo link
The dashboard was removed during Tectonic migration
in AWS, while the Grafana still runs in GCP.

Signed-off-by: Gyu-Ho Lee <gyuhox@gmail.com>
2017-09-26 02:40:59 +09:00
4a61fcf42d docs: remove link-breaking space 2017-09-20 08:11:02 +09:00
4c8fa30dda e2e: test no value is returned in TestCtlV3GetKeysOnly
Test was checking key name is returned, but was not correctly checking
no value is returned.
2017-09-14 04:42:06 +09:00
01c4f35b30 grpcproxy: respect KeysOnly flag
Fixes #8478
2017-09-14 04:41:58 +09:00
15e9510d2c client: fail over to next endpoint on oneshot failure
Fixes #8515
2017-09-08 13:28:55 -07:00
09b7fd4975 version: bump up to 3.2.7+git
Signed-off-by: Gyu-Ho Lee <gyuhox@gmail.com>
2017-09-01 14:03:26 -07:00
bb66589f8c version: bump up to 3.2.7
Signed-off-by: Gyu-Ho Lee <gyuhox@gmail.com>
2017-09-01 09:15:15 -07:00
267a2fc8c9 integration: check concurrent auth ops don't cause old rev errors 2017-08-25 13:13:56 -07:00
1fc300ecbd testutil: don't panic on AssertNil on non-nil errors 2017-08-25 13:13:26 -07:00
877d0ce469 etcdserver: consolidate error checking for v3_server functions
Duplicated error checking code moved into raftRequest/raftRequestOnce.
2017-08-23 14:39:59 -07:00
2188513161 concurrency: retry snapshot serializable stm if writes since first header rev
Was checking the rset key mod rev, which does not work.
2017-08-22 20:53:47 -07:00
5c7cff66b6 integration: test serializable snapshot STM with old readset revisions
Was hanging.
2017-08-22 20:53:41 -07:00
8c99ab80bd version: bump up to 3.2.6+git
Signed-off-by: Gyu-Ho Lee <gyuhox@gmail.com>
2017-08-21 13:07:06 -07:00
9d43462d17 version: bump up to 3.2.6
Signed-off-by: Gyu-Ho Lee <gyuhox@gmail.com>
2017-08-21 10:40:55 -07:00
78d68226e6 mvcc: sending events after restore
Fixes: #8411
2017-08-21 10:39:46 -07:00
e9d576c3d6 Documentation: fix broken link on FAQ
Signed-off-by: Gyu-Ho Lee <gyuhox@gmail.com>
2017-08-18 08:43:14 -07:00
1c578cd442 e2e: test booting etcd with multiple peer listeners 2017-08-18 08:43:06 -07:00
b97714b3e6 embed: associate peer serve() listener with corresponding peer
Fixes #8383
2017-08-17 14:16:56 -07:00
ce0a61ff67 dl_build: fix minor typo 2017-08-17 11:44:02 -07:00
74783a38ae docs: revising to match sidebar structure. 2017-08-16 17:03:28 -07:00
d372ff96a0 docs: link fix. 2017-08-16 17:03:12 -07:00
8c7b9db9cc docs: slight rearranging of top two sections. 2017-08-16 17:02:05 -07:00
f1fb342305 docs: adding an index for upgrade pages. 2017-08-16 17:01:49 -07:00
fa0f278783 embed: add 'enable-pprof' tag for config file
Fix https://github.com/coreos/etcd/issues/8402.

Signed-off-by: Gyu-Ho Lee <gyuhox@gmail.com>
2017-08-15 11:52:48 -07:00
e197c14847 mvcc: test keys gauge is reloaded correctly on restore 2017-08-10 12:59:24 -07:00
e7bf5477de mvcc: reset keys gauge on restore
Fixes #8388
2017-08-10 12:59:19 -07:00
f81b72fd93 version: bump up to v3.2.5+git
Signed-off-by: Gyu-Ho Lee <gyuhox@gmail.com>
2017-08-04 11:30:57 -07:00
d0d1a87aa9 version: bump up to v3.2.5
Signed-off-by: Gyu-Ho Lee <gyuhox@gmail.com>
2017-08-04 08:40:50 -07:00
7c6a9a7317 contrib/raftexample: use bytes.Buffer.String (no 'string()')
Signed-off-by: Gyu-Ho Lee <gyuhox@gmail.com>
2017-08-04 08:40:29 -07:00
be8f102efb grpcproxy: forward PrevKv flag in Put 2017-08-04 07:32:17 -07:00
3003901447 integration: test Put with PrevKey=true
Was missing in proxy.
2017-08-04 07:32:11 -07:00
157cfac31b ctlv3/command: remove double-quote typos in fields printer
Signed-off-by: Gyu-Ho Lee <gyuhox@gmail.com>
2017-08-01 17:25:53 -07:00
40a1704e6f ctlv3: exit non-zero on unhealty ep command 2017-07-31 16:00:23 -07:00
30981ecb0a e2e/docker: docker image for testing wildcard DNS 2017-07-24 09:54:55 -07:00
f65a11ced5 fixtures: generate wildcard DNS SAN cert
DNS: *.etcd.local
2017-07-24 09:54:55 -07:00
db4838d4eb transport: use reverse lookup to match wildcard DNS SAN
Fixes #8268
2017-07-24 09:54:55 -07:00
8ab42fb045 *: move v2http handlers without /v2 prefix to etcdhttp
Lets --enable-v2=false configurations provide /metrics, /health, etc.

Fixes #8167
2017-07-24 09:54:48 -07:00
ff9a0a3527 version: bump up to 3.2.4+git
Signed-off-by: Gyu-Ho Lee <gyuhox@gmail.com>
2017-07-24 09:14:34 -07:00
404 changed files with 24932 additions and 6660 deletions

7
.gitignore vendored
View File

@ -1,15 +1,22 @@
/agent-*
/coverage
/covdir
/gopath
/gopath.proto
/go-bindata
/release
/machine*
/bin
.Dockerfile-test
.vagrant
*.etcd
*.log
/etcd
*.swp
/hack/insta-discovery/.env
*.test
tools/functional-tester/docker/bin
hack/scripts-dev/docker-dns/.Dockerfile
hack/scripts-dev/docker-dns-srv/.Dockerfile
hack/tls-setup/certs
.idea

16
.semaphore.sh Executable file
View File

@ -0,0 +1,16 @@
#!/usr/bin/env bash
TEST_SUFFIX=$(date +%s | base64 | head -c 15)
TEST_OPTS="RELEASE_TEST=y INTEGRATION=y PASSES='build unit release integration_e2e functional' MANUAL_VER=v3.2.11"
if [ "$TEST_ARCH" == "386" ]; then
TEST_OPTS="GOARCH=386 PASSES='build unit integration_e2e'"
fi
docker run \
--rm \
--volume=`pwd`:/go/src/github.com/coreos/etcd \
gcr.io/etcd-development/etcd-test:go1.8.5 \
/bin/bash -c "${TEST_OPTS} ./test 2>&1 | tee test-${TEST_SUFFIX}.log"
! egrep "(--- FAIL:|panic: test timed out|appears to have leaked)" -B50 -A10 test-${TEST_SUFFIX}.log

View File

@ -1,11 +1,13 @@
dist: trusty
language: go
go_import_path: github.com/coreos/etcd
sudo: false
sudo: required
services: docker
go:
- 1.8.3
- tip
- 1.8.5
- tip
notifications:
on_success: never
@ -13,19 +15,25 @@ notifications:
env:
matrix:
- TARGET=amd64
- TARGET=darwin-amd64
- TARGET=windows-amd64
- TARGET=arm64
- TARGET=arm
- TARGET=386
- TARGET=ppc64le
- TARGET=amd64
- TARGET=amd64-go-tip
- TARGET=darwin-amd64
- TARGET=windows-amd64
- TARGET=arm64
- TARGET=arm
- TARGET=386
- TARGET=ppc64le
matrix:
fast_finish: true
allow_failures:
- go: tip
- go: tip
env: TARGET=amd64-go-tip
exclude:
- go: 1.8.5
env: TARGET=amd64-go-tip
- go: tip
env: TARGET=amd64
- go: tip
env: TARGET=darwin-amd64
- go: tip
@ -39,45 +47,42 @@ matrix:
- go: tip
env: TARGET=ppc64le
addons:
apt:
sources:
- debian-sid
packages:
- libpcap-dev
- libaspell-dev
- libhunspell-dev
- shellcheck
before_install:
- go get -v -u github.com/chzchzchz/goword
- go get -v -u github.com/coreos/license-bill-of-materials
- go get -v -u honnef.co/go/tools/cmd/gosimple
- go get -v -u honnef.co/go/tools/cmd/unused
- go get -v -u honnef.co/go/tools/cmd/staticcheck
- ./scripts/install-marker.sh amd64
- docker pull gcr.io/etcd-development/etcd-test:go1.8.5
# disable godep restore override
install:
- pushd cmd/etcd && go get -t -v ./... && popd
- pushd cmd/etcd && go get -t -v ./... && popd
script:
- >
case "${TARGET}" in
amd64)
docker run --rm \
--volume=`pwd`:/go/src/github.com/coreos/etcd gcr.io/etcd-development/etcd-test:go1.8.5 \
/bin/bash -c "GOARCH=amd64 ./test"
;;
amd64-go-tip)
GOARCH=amd64 ./test
;;
darwin-amd64)
GO_BUILD_FLAGS="-a -v" GOPATH="" GOOS=darwin GOARCH=amd64 ./build
docker run --rm \
--volume=`pwd`:/go/src/github.com/coreos/etcd gcr.io/etcd-development/etcd-test:go1.8.5 \
/bin/bash -c "GO_BUILD_FLAGS='-a -v' GOOS=darwin GOARCH=amd64 ./build"
;;
windows-amd64)
GO_BUILD_FLAGS="-a -v" GOPATH="" GOOS=windows GOARCH=amd64 ./build
docker run --rm \
--volume=`pwd`:/go/src/github.com/coreos/etcd gcr.io/etcd-development/etcd-test:go1.8.5 \
/bin/bash -c "GO_BUILD_FLAGS='-a -v' GOOS=windows GOARCH=amd64 ./build"
;;
386)
GOARCH=386 PASSES="build unit" ./test
docker run --rm \
--volume=`pwd`:/go/src/github.com/coreos/etcd gcr.io/etcd-development/etcd-test:go1.8.5 \
/bin/bash -c "GOARCH=386 PASSES='build unit' ./test"
;;
*)
# test building out of gopath
GO_BUILD_FLAGS="-a -v" GOPATH="" GOARCH="${TARGET}" ./build
docker run --rm \
--volume=`pwd`:/go/src/github.com/coreos/etcd gcr.io/etcd-development/etcd-test:go1.8.5 \
/bin/bash -c "GO_BUILD_FLAGS='-a -v' GOARCH='${TARGET}' ./build"
;;
esac

57
Dockerfile-test Normal file
View File

@ -0,0 +1,57 @@
FROM ubuntu:16.10
RUN rm /bin/sh && ln -s /bin/bash /bin/sh
RUN echo 'debconf debconf/frontend select Noninteractive' | debconf-set-selections
RUN apt-get -y update \
&& apt-get -y install \
build-essential \
gcc \
apt-utils \
pkg-config \
software-properties-common \
apt-transport-https \
libssl-dev \
sudo \
bash \
curl \
wget \
tar \
git \
netcat \
libaspell-dev \
libhunspell-dev \
hunspell-en-us \
aspell-en \
shellcheck \
&& apt-get -y update \
&& apt-get -y upgrade \
&& apt-get -y autoremove \
&& apt-get -y autoclean
ENV GOROOT /usr/local/go
ENV GOPATH /go
ENV PATH ${GOPATH}/bin:${GOROOT}/bin:${PATH}
ENV GO_VERSION REPLACE_ME_GO_VERSION
ENV GO_DOWNLOAD_URL https://storage.googleapis.com/golang
RUN rm -rf ${GOROOT} \
&& curl -s ${GO_DOWNLOAD_URL}/go${GO_VERSION}.linux-amd64.tar.gz | tar -v -C /usr/local/ -xz \
&& mkdir -p ${GOPATH}/src ${GOPATH}/bin \
&& go version
RUN mkdir -p ${GOPATH}/src/github.com/coreos/etcd
WORKDIR ${GOPATH}/src/github.com/coreos/etcd
ADD ./scripts/install-marker.sh /tmp/install-marker.sh
RUN go get -v -u -tags spell github.com/chzchzchz/goword \
&& go get -v -u github.com/coreos/license-bill-of-materials \
&& go get -v -u honnef.co/go/tools/cmd/gosimple \
&& go get -v -u honnef.co/go/tools/cmd/unused \
&& go get -v -u honnef.co/go/tools/cmd/staticcheck \
&& go get -v -u github.com/wadey/gocovmerge \
&& go get -v -u github.com/gordonklaus/ineffassign \
&& /tmp/install-marker.sh amd64 \
&& rm -f /tmp/install-marker.sh \
&& curl -s https://codecov.io/bash >/codecov \
&& chmod 700 /codecov

View File

@ -2,7 +2,7 @@
## System requirements
The etcd performance benchmarks run etcd on 8 vCPU, 16GB RAM, 50GB SSD GCE instances, but any relatively modern machine with low latency storage and a few gigabytes of memory should suffice for most use cases. Applications with large v2 data stores will require more memory than a large v3 data store since data is kept in anonymous memory instead of memory mapped from a file. than For running etcd on a cloud provider, we suggest at least a medium instance on AWS or a standard-1 instance on GCE.
The etcd performance benchmarks run etcd on 8 vCPU, 16GB RAM, 50GB SSD GCE instances, but any relatively modern machine with low latency storage and a few gigabytes of memory should suffice for most use cases. Applications with large v2 data stores will require more memory than a large v3 data store since data is kept in anonymous memory instead of memory mapped from a file. For running etcd on a cloud provider, we suggest at least a medium instance on AWS or a standard-1 instance on GCE.
## Download the pre-built binary

View File

@ -47,12 +47,19 @@ Administrators who need to create reliable and scalable key-value stores for the
- [Amazon Web Services][aws_platform]
- [FreeBSD][freebsd_platform]
### Upgrading and compatibility
### Security
- [Migrate applications from using API v2 to API v3][v2_migration]
- [Upgrading a v2.3 cluster to v3.0][v3_upgrade]
- [Upgrading a v3.0 cluster to v3.1][v31_upgrade]
- [Upgrading a v3.1 cluster to v3.2][v32_upgrade]
- [TLS][security]
- [Role-based access control][authentication]
### Maintenance and troubleshooting
- [Frequently asked questions][common questions]
- [Monitoring][monitoring]
- [Maintenance][maintenance]
- [Failure modes][failures]
- [Disaster recovery][recovery]
- [Upgrading][upgrading]
## Learning
@ -106,8 +113,6 @@ Answers to [common questions] about etcd.
[freebsd_platform]: platforms/freebsd.md
[aws_platform]: platforms/aws.md
[experimental]: dev-guide/experimental_apis.md
[v3_upgrade]: upgrades/upgrade_3_0.md
[v31_upgrade]: upgrades/upgrade_3_1.md
[v32_upgrade]: upgrades/upgrade_3_2.md
[authentication]: op-guide/authentication.md
[auth_design]: learning/auth_design.md
[upgrading]: upgrades/upgrading-etcd.md

View File

@ -60,7 +60,7 @@ For avoiding such a situation, the API layer performs *version number validation
After authenticating with `Authenticate()`, a client can create a gRPC connection as it would without auth. In addition to the existing initialization process, the client must associate the token with the newly created connection. `grpc.WithPerRPCCredentials()` provides the functionality for this purpose.
Every authenticated request from the client has a token. The token can be obtained with `grpc.metadata.FromContext()` in the server side. The server can obtain who is issuing the request and when the user was authorized. The information will be filled by the API layer in the header (`etcdserverpb.RequestHeader.Username` and `etcdserverpb.RequestHeader.AuthRevision`) of a raft log entry (`etcdserverpb.InternalRaftRequest`).
Every authenticated request from the client has a token. The token can be obtained with `grpc.metadata.FromIncomingContext()` in the server side. The server can obtain who is issuing the request and when the user was authorized. The information will be filled by the API layer in the header (`etcdserverpb.RequestHeader.Username` and `etcdserverpb.RequestHeader.AuthRevision`) of a raft log entry (`etcdserverpb.InternalRaftRequest`).
### Checking permission in the state machine

View File

@ -1,17 +1,17 @@
# Why etcd
# etcd versus other key-value stores
The name "etcd" originated from two ideas, the unix "/etc" folder and "d"istibuted systems. The "/etc" folder is a place to store configuration data for a single system whereas etcd stores configuration information for large scale distributed systems. Hence, a "d"istributed "/etc" is "etcd".
etcd stores metadata in a consistent and fault-tolerant way. Distributed systems use etcd as a consistent key-value store for configuration management, service discovery, and coordinating distributed work. Common distributed patterns using etcd include [leader election][etcd-etcdctl-elect], [distributed locks][etcd-etcdctl-lock], and monitoring machine liveness.
etcd is designed as a general substrate for large scale distributed systems. These are systems that will never tolerate split-brain operation and are willing to sacrifice availability to achieve this end. etcd stores metadata in a consistent and fault-tolerant way. An etcd cluster is meant to provide key-value storage with best of class stability, reliability, scalability and performance.
Distributed systems use etcd as a consistent key-value store for configuration management, service discovery, and coordinating distributed work. Many [organizations][production-users] use etcd to implement production systems such as container schedulers, service discovery services, and distributed data storage. Common distributed patterns using etcd include [leader election][etcd-etcdctl-elect], [distributed locks][etcd-etcdctl-lock], and monitoring machine liveness.
## Use cases
- Container Linux by CoreOS: Application running on [Container Linux][container-linux] gets automatic, zero-downtime Linux kernel updates. Container Linux uses [locksmith] to coordinate updates. locksmith implements a distributed semaphore over etcd to ensure only a subset of a cluster is rebooting at any given time.
- Container Linux by CoreOS: Applications running on [Container Linux][container-linux] get automatic, zero-downtime Linux kernel updates. Container Linux uses [locksmith] to coordinate updates. Locksmith implements a distributed semaphore over etcd to ensure only a subset of a cluster is rebooting at any given time.
- [Kubernetes][kubernetes] stores configuration data into etcd for service discovery and cluster management; etcd's consistency is crucial for correctly scheduling and operating services. The Kubernetes API server persists cluster state into etcd. It uses etcd's watch API to monitor the cluster and roll out critical configuration changes.
## etcd versus other key-value stores
When deciding whether to use etcd as a key-value store, its worth keeping in mind etcds main goal. Namely, etcd is designed as a general substrate for large scale distributed systems. These are systems that will never tolerate split-brain operation and are willing to sacrifice availability to achieve this end. An etcd cluster is meant to provide consistent key-value storage with best of class stability, reliability, scalability and performance. The upshot of this focus is many [organizations][production-users] already use etcd to implement production systems such as container schedulers, service discovery services, distributed data storage, and more.
## Comparison chart
Perhaps etcd already seems like a good fit, but as with all technological decisions, proceed with caution. Please note this documentation is written by the etcd team. Although the ideal is a disinterested comparison of technology and features, the authors expertise and biases obviously favor etcd. Use only as directed.
@ -84,7 +84,7 @@ For distributed coordination, choosing etcd can help prevent operational headach
[tidb]: https://github.com/pingcap/tidb
[etcd-v3lock]: https://godoc.org/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb
[etcd-v3election]: https://godoc.org/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb
[etcd-etcdctl-lock]: ../../etcdctl/README.md#lock-lockname
[etcd-etcdctl-lock]: ../../etcdctl/README.md#lock-lockname-command-arg1-arg2-
[etcd-etcdctl-elect]: ../../etcdctl/README.md#elect-options-election-name-proposal
[etcd-mvcc]: data_model.md
[etcd-recipe]: https://godoc.org/github.com/coreos/etcd/contrib/recipes
@ -113,4 +113,3 @@ For distributed coordination, choosing etcd can help prevent operational headach
[container-linux]: https://coreos.com/why
[locksmith]: https://github.com/coreos/locksmith
[kubernetes]: http://kubernetes.io/docs/whatisk8s

View File

@ -1,6 +1,45 @@
# Monitoring etcd
Each etcd server exports metrics under the `/metrics` path on its client port.
Each etcd server provides local monitoring information on its client port through http endpoints. The monitoring data is useful for both system health checking and cluster debugging.
## Debug endpoint
If `--debug` is set, the etcd server exports debugging information on its client port under the `/debug` path. Take care when setting `--debug`, since there will be degraded performance and verbose logging.
The `/debug/pprof` endpoint is the standard go runtime profiling endpoint. This can be used to profile CPU, heap, mutex, and goroutine utilization. For example, here `go tool pprof` gets the top 10 functions where etcd spends its time:
```sh
$ go tool pprof http://localhost:2379/debug/pprof/profile
Fetching profile from http://localhost:2379/debug/pprof/profile
Please wait... (30s)
Saved profile in /home/etcd/pprof/pprof.etcd.localhost:2379.samples.cpu.001.pb.gz
Entering interactive mode (type "help" for commands)
(pprof) top10
310ms of 480ms total (64.58%)
Showing top 10 nodes out of 157 (cum >= 10ms)
flat flat% sum% cum cum%
130ms 27.08% 27.08% 130ms 27.08% runtime.futex
70ms 14.58% 41.67% 70ms 14.58% syscall.Syscall
20ms 4.17% 45.83% 20ms 4.17% github.com/coreos/etcd/cmd/vendor/golang.org/x/net/http2/hpack.huffmanDecode
20ms 4.17% 50.00% 30ms 6.25% runtime.pcvalue
20ms 4.17% 54.17% 50ms 10.42% runtime.schedule
10ms 2.08% 56.25% 10ms 2.08% github.com/coreos/etcd/cmd/vendor/github.com/coreos/etcd/etcdserver.(*EtcdServer).AuthInfoFromCtx
10ms 2.08% 58.33% 10ms 2.08% github.com/coreos/etcd/cmd/vendor/github.com/coreos/etcd/etcdserver.(*EtcdServer).Lead
10ms 2.08% 60.42% 10ms 2.08% github.com/coreos/etcd/cmd/vendor/github.com/coreos/etcd/pkg/wait.(*timeList).Trigger
10ms 2.08% 62.50% 10ms 2.08% github.com/coreos/etcd/cmd/vendor/github.com/prometheus/client_golang/prometheus.(*MetricVec).hashLabelValues
10ms 2.08% 64.58% 10ms 2.08% github.com/coreos/etcd/cmd/vendor/golang.org/x/net/http2.(*Framer).WriteHeaders
```
The `/debug/requests` endpoint gives gRPC traces and performance statistics through a web browser. For example, here is a `Range` request for the key `abc`:
```
When Elapsed (s)
2017/08/18 17:34:51.999317 0.000244 /etcdserverpb.KV/Range
17:34:51.999382 . 65 ... RPC: from 127.0.0.1:47204 deadline:4.999377747s
17:34:51.999395 . 13 ... recv: key:"abc"
17:34:51.999499 . 104 ... OK
17:34:51.999535 . 36 ... sent: header:<cluster_id:14841639068965178418 member_id:10276657743932975437 revision:15 raft_term:17 > kvs:<key:"abc" create_revision:6 mod_revision:14 version:9 value:"asda" > count:1
```
The metrics can be fetched with `curl`:
@ -75,8 +114,6 @@ Access: proxy
Then import the default [etcd dashboard template][template] and customize. For instance, if Prometheus data source name is `my-etcd`, the `datasource` field values in JSON also need to be `my-etcd`.
See the [demo][demo].
Sample dashboard:
![](./etcd-sample-grafana.png)
@ -85,4 +122,3 @@ Sample dashboard:
[prometheus]: https://prometheus.io/
[grafana]: http://grafana.org/
[template]: ./grafana.json
[demo]: http://dash.etcd.io/dashboard/db/test-etcd

View File

@ -6,7 +6,7 @@ This guide assumes operational knowledge of Amazon Web Services (AWS), specifica
As a critical building block for distributed systems it is crucial to perform adequate capacity planning in order to support the intended cluster workload. As a highly available and strongly consistent data store increasing the number of nodes in an etcd cluster will generally affect performance adversely. This makes sense intuitively, as more nodes means more members for the leader to coordinate state across. The most direct way to increase throughput and decrease latency of an etcd cluster is allocate more disk I/O, network I/O, CPU, and memory to cluster members. In the event it is impossible to temporarily divert incoming requests to the cluster, scaling the EC2 instances which comprise the etcd cluster members one at a time may improve performance. It is, however, best to avoid bottlenecks through capacity planning.
The etcd team has produced a [hardware recommendation guide]( ../op-guide/hardware.md) which is very useful for “ballparking” how many nodes and what instance type are necessary for a cluster.
The etcd team has produced a [hardware recommendation guide](../op-guide/hardware.md) which is very useful for “ballparking” how many nodes and what instance type are necessary for a cluster.
AWS provides a service for creating groups of EC2 instances which are dynamically sized to match load on the instances. Using an Auto Scaling Group ([ASG](http://docs.aws.amazon.com/autoscaling/latest/userguide/AutoScalingGroup.html)) to dynamically scale an etcd cluster is not recommended for several reasons including:

View File

@ -0,0 +1,19 @@
# Upgrading etcd clusters and applications
This section contains documents specific to upgrading etcd clusters and applications.
## Moving from etcd API v2 to API v3
* [Migrate applications from using API v2 to API v3][migrate-apps]
## Upgrading an etcd v3.x cluster
* [Upgrade etcd from 3.0 to 3.1][upgrade-3-1]
* [Upgrade etcd from 3.1 to 3.2][upgrade-3-2]
## Upgrading from etcd v2.3
* [Upgrade a v2.3 cluster to v3.0][upgrade-cluster]
[migrate-apps]: ../op-guide/v2-migration.md
[upgrade-cluster]: upgrade_3_0.md
[upgrade-3-1]: upgrade_3_1.md
[upgrade-3-2]: upgrade_3_2.md

View File

@ -1,170 +1,85 @@
**This is the documentation for etcd2 releases. Read [etcd3 doc][v3-docs] for etcd3 releases.**
# Documentation
[v3-docs]: ../docs.md#documentation
etcd is a distributed key-value store designed to reliably and quickly preserve and provide access to critical data. It enables reliable distributed coordination through distributed locking, leader elections, and write barriers. An etcd cluster is intended for high availability and permanent data storage and retrieval.
This is the etcd v2 documentation set. For more recent versions, please see the [etcd v3 guides][etcd-v3].
## Communicating with etcd v2
Reading and writing into the etcd keyspace is done via a simple, RESTful HTTP API, or using language-specific libraries that wrap the HTTP API with higher level primitives.
### Reading and Writing
- [Client API Documentation][api]
- [Libraries, Tools, and Language Bindings][libraries]
- [Admin API Documentation][admin-api]
- [Members API][members-api]
### Security, Auth, Access control
- [Security Model][security]
- [Auth and Security][auth_api]
- [Authentication Guide][authentication]
## etcd v2 Cluster Administration
Configuration values are distributed within the cluster for your applications to read. Values can be changed programmatically and smart applications can reconfigure automatically. You'll never again have to run a configuration management tool on every machine in order to change a single config value.
### General Info
- [etcd Proxies][proxy]
- [Production Users][production-users]
- [Admin Guide][admin_guide]
- [Configuration Flags][configuration]
- [Frequently Asked Questions][faq]
### Initial Setup
- [Tuning etcd Clusters][tuning]
- [Discovery Service Protocol][discovery_protocol]
- [Running etcd under Docker][docker_guide]
### Live Reconfiguration
- [Runtime Configuration][runtime-configuration]
### Debugging etcd
- [Metrics Collection][metrics]
- [Error Code][errorcode]
- [Reporting Bugs][reporting_bugs]
### Migration
- [Upgrade etcd to 2.3][upgrade_2_3]
- [Upgrade etcd to 2.2][upgrade_2_2]
- [Upgrade to etcd 2.1][upgrade_2_1]
- [Snapshot Migration (0.4.x to 2.x)][04_to_2_snapshot_migration]
- [Backward Compatibility][backward_compatibility]
# etcd2
[![Go Report Card](https://goreportcard.com/badge/github.com/coreos/etcd)](https://goreportcard.com/report/github.com/coreos/etcd)
[![Build Status](https://travis-ci.org/coreos/etcd.svg?branch=master)](https://travis-ci.org/coreos/etcd)
[![Build Status](https://semaphoreci.com/api/v1/coreos/etcd/branches/master/shields_badge.svg)](https://semaphoreci.com/coreos/etcd)
[![Docker Repository on Quay.io](https://quay.io/repository/coreos/etcd-git/status "Docker Repository on Quay.io")](https://quay.io/repository/coreos/etcd-git)
**Note**: The `master` branch may be in an *unstable or even broken state* during development. Please use [releases][github-release] instead of the `master` branch in order to get stable binaries.
![etcd Logo](../../logos/etcd-horizontal-color.png)
etcd is a distributed, consistent key-value store for shared configuration and service discovery, with a focus on being:
* *Simple*: curl'able user-facing API (HTTP+JSON)
* *Secure*: optional SSL client cert authentication
* *Fast*: benchmarked 1000s of writes/s per instance
* *Reliable*: properly distributed using Raft
etcd is written in Go and uses the [Raft][raft] consensus algorithm to manage a highly-available replicated log.
etcd is used [in production by many companies](./production-users.md), and the development team stands behind it in critical deployment scenarios, where etcd is frequently teamed with applications such as [Kubernetes][k8s], [fleet][fleet], [locksmith][locksmith], [vulcand][vulcand], and many others.
See [etcdctl][etcdctl] for a simple command line client.
Or feel free to just use `curl`, as in the examples below.
[raft]: https://raft.github.io/
[k8s]: http://kubernetes.io/
[fleet]: https://github.com/coreos/fleet
[locksmith]: https://github.com/coreos/locksmith
[vulcand]: https://github.com/vulcand/vulcand
[etcdctl]: https://github.com/coreos/etcd/tree/master/etcdctl
## Getting Started
### Getting etcd
The easiest way to get etcd is to use one of the pre-built release binaries which are available for OSX, Linux, Windows, AppC (ACI), and Docker. Instructions for using these binaries are on the [GitHub releases page][github-release].
For those wanting to try the very latest version, you can build the latest version of etcd from the `master` branch.
You will first need [*Go*](https://golang.org/) installed on your machine (version 1.5+ is required).
All development occurs on `master`, including new features and bug fixes.
Bug fixes are first targeted at `master` and subsequently ported to release branches, as described in the [branch management][branch-management] guide.
[github-release]: https://github.com/coreos/etcd/releases/
[branch-management]: branch_management.md
### Running etcd
First start a single-member cluster of etcd:
```sh
./bin/etcd
```
This will bring up etcd listening on port 2379 for client communication and on port 2380 for server-to-server communication.
Next, let's set a single key, and then retrieve it:
```
curl -L http://127.0.0.1:2379/v2/keys/mykey -XPUT -d value="this is awesome"
curl -L http://127.0.0.1:2379/v2/keys/mykey
```
You have successfully started an etcd and written a key to the store.
### etcd TCP ports
The [official etcd ports][iana-ports] are 2379 for client requests, and 2380 for peer communication. To maintain compatibility, some etcd configuration and documentation continues to refer to the legacy ports 4001 and 7001, but all new etcd use and discussion should adopt the IANA-assigned ports. The legacy ports 4001 and 7001 will be fully deprecated, and support for their use removed, in future etcd releases.
[iana-ports]: http://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.txt
### Running local etcd cluster
First install [goreman](https://github.com/mattn/goreman), which manages Procfile-based applications.
Our [Procfile script](../../V2Procfile) will set up a local example cluster. You can start it with:
```sh
goreman start
```
This will bring up 3 etcd members `infra1`, `infra2` and `infra3` and etcd proxy `proxy`, which runs locally and composes a cluster.
You can write a key to the cluster and retrieve the value back from any member or proxy.
### Next Steps
Now it's time to dig into the full etcd API and other guides.
- Explore the full [API][api].
- Set up a [multi-machine cluster][clustering].
- Learn the [config format, env variables and flags][configuration].
- Find [language bindings and tools][libraries-and-tools].
- Use TLS to [secure an etcd cluster][security].
- [Tune etcd][tuning].
- [Upgrade from 0.4.9+ to 2.2.0][upgrade].
[api]: ./api.md
[clustering]: ./clustering.md
[configuration]: ./configuration.md
[libraries-and-tools]: ./libraries-and-tools.md
[security]: ./security.md
[tuning]: ./tuning.md
[upgrade]: ./04_to_2_snapshot_migration.md
## Contact
- Mailing list: [etcd-dev](https://groups.google.com/forum/?hl=en#!forum/etcd-dev)
- IRC: #[etcd](irc://irc.freenode.org:6667/#etcd) on freenode.org
- Planning/Roadmap: [milestones](https://github.com/coreos/etcd/milestones), [roadmap](../../ROADMAP.md)
- Bugs: [issues](https://github.com/coreos/etcd/issues)
## Contributing
See [CONTRIBUTING](../../CONTRIBUTING.md) for details on submitting patches and the contribution workflow.
## Reporting bugs
See [reporting bugs](reporting_bugs.md) for details about reporting any issue you may encounter.
## Known bugs
[GH518](https://github.com/coreos/etcd/issues/518) is a known bug. Issue is that:
```
curl http://127.0.0.1:2379/v2/keys/foo -XPUT -d value=bar
curl http://127.0.0.1:2379/v2/keys/foo -XPUT -d dir=true -d prevExist=true
```
If the previous node is a key and client tries to overwrite it with `dir=true`, it does not give warnings such as `Not a directory`. Instead, the key is set to empty value.
## Project Details
### Versioning
#### Service Versioning
etcd uses [semantic versioning](http://semver.org)
New minor versions may add additional features to the API.
You can get the version of etcd by issuing a request to /version:
```sh
curl -L http://127.0.0.1:2379/version
```
#### API Versioning
The `v2` API responses should not change after the 2.0.0 release but new features will be added over time.
#### 32-bit and other unsupported systems
etcd has known issues on 32-bit systems due to a bug in the Go runtime. See #[358][358] for more information.
To avoid inadvertently running a possibly unstable etcd server, `etcd` on unsupported architectures will print
a warning message and immediately exit if the environment variable `ETCD_UNSUPPORTED_ARCH` is not set to
the target architecture.
Currently only the amd64 architecture is officially supported by `etcd`.
[358]: https://github.com/coreos/etcd/issues/358
### License
etcd is under the Apache 2.0 license. See the [LICENSE](../../LICENSE) file for details.
[etcd-v3]: ../docs.md
[api]: api.md
[libraries]: libraries-and-tools.md
[admin-api]: other_apis.md
[members-api]: members_api.md
[security]: security.md
[auth_api]: auth_api.md
[authentication]: authentication.md
[proxy]: proxy.md
[production-users]: production-users.md
[admin_guide]: admin_guide.md
[configuration]: configuration.md
[faq]: faq.md
[tuning]: tuning.md
[discovery_protocol]: discovery_protocol.md
[docker_guide]: docker_guide.md
[runtime-configuration]: runtime-configuration.md
[metrics]: metrics.md
[errorcode]: errorcode.md
[reporting_bugs]: reporting_bugs.md
[upgrade_2_3]: upgrade_2_3.md
[upgrade_2_2]: upgrade_2_2.md
[upgrade_2_1]: upgrade_2_1.md
[04_to_2_snapshot_migration]: 04_to_2_snapshot_migration.md
[backward_compatibility]: backward_compatibility.md

View File

@ -992,7 +992,7 @@ func (as *authStore) AuthInfoFromTLS(ctx context.Context) *AuthInfo {
}
func (as *authStore) AuthInfoFromCtx(ctx context.Context) (*AuthInfo, error) {
md, ok := metadata.FromContext(ctx)
md, ok := metadata.FromIncomingContext(ctx)
if !ok {
return nil, nil
}

View File

@ -453,7 +453,8 @@ func TestAuthInfoFromCtx(t *testing.T) {
t.Errorf("expected (nil, nil), got (%v, %v)", ai, err)
}
ctx = metadata.NewContext(context.Background(), metadata.New(map[string]string{"tokens": "dummy"}))
// as if it came from RPC
ctx = metadata.NewIncomingContext(context.Background(), metadata.New(map[string]string{"tokens": "dummy"}))
ai, err = as.AuthInfoFromCtx(ctx)
if err != nil && ai != nil {
t.Errorf("expected (nil, nil), got (%v, %v)", ai, err)
@ -465,19 +466,19 @@ func TestAuthInfoFromCtx(t *testing.T) {
t.Error(err)
}
ctx = metadata.NewContext(context.Background(), metadata.New(map[string]string{"token": "Invalid Token"}))
ctx = metadata.NewIncomingContext(context.Background(), metadata.New(map[string]string{"token": "Invalid Token"}))
_, err = as.AuthInfoFromCtx(ctx)
if err != ErrInvalidAuthToken {
t.Errorf("expected %v, got %v", ErrInvalidAuthToken, err)
}
ctx = metadata.NewContext(context.Background(), metadata.New(map[string]string{"token": "Invalid.Token"}))
ctx = metadata.NewIncomingContext(context.Background(), metadata.New(map[string]string{"token": "Invalid.Token"}))
_, err = as.AuthInfoFromCtx(ctx)
if err != ErrInvalidAuthToken {
t.Errorf("expected %v, got %v", ErrInvalidAuthToken, err)
}
ctx = metadata.NewContext(context.Background(), metadata.New(map[string]string{"token": resp.Token}))
ctx = metadata.NewIncomingContext(context.Background(), metadata.New(map[string]string{"token": resp.Token}))
ai, err = as.AuthInfoFromCtx(ctx)
if err != nil {
t.Error(err)
@ -521,7 +522,7 @@ func TestAuthInfoFromCtxRace(t *testing.T) {
donec := make(chan struct{})
go func() {
defer close(donec)
ctx := metadata.NewContext(context.Background(), metadata.New(map[string]string{"token": "test"}))
ctx := metadata.NewIncomingContext(context.Background(), metadata.New(map[string]string{"token": "test"}))
as.AuthInfoFromCtx(ctx)
}()
as.UserAdd(&pb.AuthUserAddRequest{Name: "test"})

View File

@ -27,19 +27,19 @@
]
},
{
"project": "github.com/boltdb/bolt",
"project": "github.com/cockroachdb/cmux",
"licenses": [
{
"type": "MIT License",
"type": "Apache License 2.0",
"confidence": 1
}
]
},
{
"project": "github.com/cockroachdb/cmux",
"project": "github.com/coreos/bbolt",
"licenses": [
{
"type": "Apache License 2.0",
"type": "MIT License",
"confidence": 1
}
]
@ -345,12 +345,21 @@
}
]
},
{
"project": "google.golang.org/genproto/googleapis",
"licenses": [
{
"type": "Apache License 2.0",
"confidence": 1
}
]
},
{
"project": "google.golang.org/grpc",
"licenses": [
{
"type": "BSD 3-clause \"New\" or \"Revised\" License",
"confidence": 0.979253112033195
"type": "Apache License 2.0",
"confidence": 1
}
]
},

View File

@ -372,12 +372,7 @@ func (c *httpClusterClient) Do(ctx context.Context, act httpAction) (*http.Respo
if err == context.Canceled || err == context.DeadlineExceeded {
return nil, nil, err
}
if isOneShot {
return nil, nil, err
}
continue
}
if resp.StatusCode/100 == 5 {
} else if resp.StatusCode/100 == 5 {
switch resp.StatusCode {
case http.StatusInternalServerError, http.StatusServiceUnavailable:
// TODO: make sure this is a no leader response
@ -385,10 +380,16 @@ func (c *httpClusterClient) Do(ctx context.Context, act httpAction) (*http.Respo
default:
cerr.Errors = append(cerr.Errors, fmt.Errorf("client: etcd member %s returns server error [%s]", eps[k].String(), http.StatusText(resp.StatusCode)))
}
if isOneShot {
return nil, nil, cerr.Errors[0]
err = cerr.Errors[0]
}
if err != nil {
if !isOneShot {
continue
}
continue
c.Lock()
c.pinned = (k + 1) % leps
c.Unlock()
return nil, nil, err
}
if k != pinned {
c.Lock()

View File

@ -16,6 +16,7 @@ package client
import (
"errors"
"fmt"
"io"
"io/ioutil"
"math/rand"
@ -304,7 +305,9 @@ func TestHTTPClusterClientDo(t *testing.T) {
fakeErr := errors.New("fake!")
fakeURL := url.URL{}
tests := []struct {
client *httpClusterClient
client *httpClusterClient
ctx context.Context
wantCode int
wantErr error
wantPinned int
@ -395,10 +398,30 @@ func TestHTTPClusterClientDo(t *testing.T) {
wantCode: http.StatusTeapot,
wantPinned: 1,
},
// 500-level errors cause one shot Do to fallthrough to next endpoint
{
client: &httpClusterClient{
endpoints: []url.URL{fakeURL, fakeURL},
clientFactory: newStaticHTTPClientFactory(
[]staticHTTPResponse{
{resp: http.Response{StatusCode: http.StatusBadGateway}},
{resp: http.Response{StatusCode: http.StatusTeapot}},
},
),
rand: rand.New(rand.NewSource(0)),
},
ctx: context.WithValue(context.Background(), &oneShotCtxValue, &oneShotCtxValue),
wantErr: fmt.Errorf("client: etcd member returns server error [Bad Gateway]"),
wantPinned: 1,
},
}
for i, tt := range tests {
resp, _, err := tt.client.Do(context.Background(), nil)
if tt.ctx == nil {
tt.ctx = context.Background()
}
resp, _, err := tt.client.Do(tt.ctx, nil)
if !reflect.DeepEqual(tt.wantErr, err) {
t.Errorf("#%d: got err=%v, want=%v", i, err, tt.wantErr)
continue
@ -407,11 +430,9 @@ func TestHTTPClusterClientDo(t *testing.T) {
if resp == nil {
if tt.wantCode != 0 {
t.Errorf("#%d: resp is nil, want=%d", i, tt.wantCode)
continue
}
continue
}
if resp.StatusCode != tt.wantCode {
} else if resp.StatusCode != tt.wantCode {
t.Errorf("#%d: resp code=%d, want=%d", i, resp.StatusCode, tt.wantCode)
continue
}

View File

@ -1,6 +1,6 @@
# etcd/clientv3
[![Godoc](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](https://godoc.org/github.com/coreos/etcd/clientv3)
[![Godoc](https://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](https://godoc.org/github.com/coreos/etcd/clientv3)
`etcd/clientv3` is the official Go etcd client for v3.

View File

@ -20,6 +20,7 @@ import (
"github.com/coreos/etcd/auth/authpb"
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
"golang.org/x/net/context"
"google.golang.org/grpc"
)
@ -100,60 +101,65 @@ type Auth interface {
}
type auth struct {
remote pb.AuthClient
remote pb.AuthClient
callOpts []grpc.CallOption
}
func NewAuth(c *Client) Auth {
return &auth{remote: pb.NewAuthClient(c.ActiveConnection())}
api := &auth{remote: RetryAuthClient(c)}
if c != nil {
api.callOpts = c.callOpts
}
return api
}
func (auth *auth) AuthEnable(ctx context.Context) (*AuthEnableResponse, error) {
resp, err := auth.remote.AuthEnable(ctx, &pb.AuthEnableRequest{}, grpc.FailFast(false))
resp, err := auth.remote.AuthEnable(ctx, &pb.AuthEnableRequest{}, auth.callOpts...)
return (*AuthEnableResponse)(resp), toErr(ctx, err)
}
func (auth *auth) AuthDisable(ctx context.Context) (*AuthDisableResponse, error) {
resp, err := auth.remote.AuthDisable(ctx, &pb.AuthDisableRequest{}, grpc.FailFast(false))
resp, err := auth.remote.AuthDisable(ctx, &pb.AuthDisableRequest{}, auth.callOpts...)
return (*AuthDisableResponse)(resp), toErr(ctx, err)
}
func (auth *auth) UserAdd(ctx context.Context, name string, password string) (*AuthUserAddResponse, error) {
resp, err := auth.remote.UserAdd(ctx, &pb.AuthUserAddRequest{Name: name, Password: password})
resp, err := auth.remote.UserAdd(ctx, &pb.AuthUserAddRequest{Name: name, Password: password}, auth.callOpts...)
return (*AuthUserAddResponse)(resp), toErr(ctx, err)
}
func (auth *auth) UserDelete(ctx context.Context, name string) (*AuthUserDeleteResponse, error) {
resp, err := auth.remote.UserDelete(ctx, &pb.AuthUserDeleteRequest{Name: name})
resp, err := auth.remote.UserDelete(ctx, &pb.AuthUserDeleteRequest{Name: name}, auth.callOpts...)
return (*AuthUserDeleteResponse)(resp), toErr(ctx, err)
}
func (auth *auth) UserChangePassword(ctx context.Context, name string, password string) (*AuthUserChangePasswordResponse, error) {
resp, err := auth.remote.UserChangePassword(ctx, &pb.AuthUserChangePasswordRequest{Name: name, Password: password})
resp, err := auth.remote.UserChangePassword(ctx, &pb.AuthUserChangePasswordRequest{Name: name, Password: password}, auth.callOpts...)
return (*AuthUserChangePasswordResponse)(resp), toErr(ctx, err)
}
func (auth *auth) UserGrantRole(ctx context.Context, user string, role string) (*AuthUserGrantRoleResponse, error) {
resp, err := auth.remote.UserGrantRole(ctx, &pb.AuthUserGrantRoleRequest{User: user, Role: role})
resp, err := auth.remote.UserGrantRole(ctx, &pb.AuthUserGrantRoleRequest{User: user, Role: role}, auth.callOpts...)
return (*AuthUserGrantRoleResponse)(resp), toErr(ctx, err)
}
func (auth *auth) UserGet(ctx context.Context, name string) (*AuthUserGetResponse, error) {
resp, err := auth.remote.UserGet(ctx, &pb.AuthUserGetRequest{Name: name}, grpc.FailFast(false))
resp, err := auth.remote.UserGet(ctx, &pb.AuthUserGetRequest{Name: name}, auth.callOpts...)
return (*AuthUserGetResponse)(resp), toErr(ctx, err)
}
func (auth *auth) UserList(ctx context.Context) (*AuthUserListResponse, error) {
resp, err := auth.remote.UserList(ctx, &pb.AuthUserListRequest{}, grpc.FailFast(false))
resp, err := auth.remote.UserList(ctx, &pb.AuthUserListRequest{}, auth.callOpts...)
return (*AuthUserListResponse)(resp), toErr(ctx, err)
}
func (auth *auth) UserRevokeRole(ctx context.Context, name string, role string) (*AuthUserRevokeRoleResponse, error) {
resp, err := auth.remote.UserRevokeRole(ctx, &pb.AuthUserRevokeRoleRequest{Name: name, Role: role})
resp, err := auth.remote.UserRevokeRole(ctx, &pb.AuthUserRevokeRoleRequest{Name: name, Role: role}, auth.callOpts...)
return (*AuthUserRevokeRoleResponse)(resp), toErr(ctx, err)
}
func (auth *auth) RoleAdd(ctx context.Context, name string) (*AuthRoleAddResponse, error) {
resp, err := auth.remote.RoleAdd(ctx, &pb.AuthRoleAddRequest{Name: name})
resp, err := auth.remote.RoleAdd(ctx, &pb.AuthRoleAddRequest{Name: name}, auth.callOpts...)
return (*AuthRoleAddResponse)(resp), toErr(ctx, err)
}
@ -163,27 +169,27 @@ func (auth *auth) RoleGrantPermission(ctx context.Context, name string, key, ran
RangeEnd: []byte(rangeEnd),
PermType: authpb.Permission_Type(permType),
}
resp, err := auth.remote.RoleGrantPermission(ctx, &pb.AuthRoleGrantPermissionRequest{Name: name, Perm: perm})
resp, err := auth.remote.RoleGrantPermission(ctx, &pb.AuthRoleGrantPermissionRequest{Name: name, Perm: perm}, auth.callOpts...)
return (*AuthRoleGrantPermissionResponse)(resp), toErr(ctx, err)
}
func (auth *auth) RoleGet(ctx context.Context, role string) (*AuthRoleGetResponse, error) {
resp, err := auth.remote.RoleGet(ctx, &pb.AuthRoleGetRequest{Role: role}, grpc.FailFast(false))
resp, err := auth.remote.RoleGet(ctx, &pb.AuthRoleGetRequest{Role: role}, auth.callOpts...)
return (*AuthRoleGetResponse)(resp), toErr(ctx, err)
}
func (auth *auth) RoleList(ctx context.Context) (*AuthRoleListResponse, error) {
resp, err := auth.remote.RoleList(ctx, &pb.AuthRoleListRequest{}, grpc.FailFast(false))
resp, err := auth.remote.RoleList(ctx, &pb.AuthRoleListRequest{}, auth.callOpts...)
return (*AuthRoleListResponse)(resp), toErr(ctx, err)
}
func (auth *auth) RoleRevokePermission(ctx context.Context, role string, key, rangeEnd string) (*AuthRoleRevokePermissionResponse, error) {
resp, err := auth.remote.RoleRevokePermission(ctx, &pb.AuthRoleRevokePermissionRequest{Role: role, Key: key, RangeEnd: rangeEnd})
resp, err := auth.remote.RoleRevokePermission(ctx, &pb.AuthRoleRevokePermissionRequest{Role: role, Key: key, RangeEnd: rangeEnd}, auth.callOpts...)
return (*AuthRoleRevokePermissionResponse)(resp), toErr(ctx, err)
}
func (auth *auth) RoleDelete(ctx context.Context, role string) (*AuthRoleDeleteResponse, error) {
resp, err := auth.remote.RoleDelete(ctx, &pb.AuthRoleDeleteRequest{Role: role})
resp, err := auth.remote.RoleDelete(ctx, &pb.AuthRoleDeleteRequest{Role: role}, auth.callOpts...)
return (*AuthRoleDeleteResponse)(resp), toErr(ctx, err)
}
@ -196,12 +202,13 @@ func StrToPermissionType(s string) (PermissionType, error) {
}
type authenticator struct {
conn *grpc.ClientConn // conn in-use
remote pb.AuthClient
conn *grpc.ClientConn // conn in-use
remote pb.AuthClient
callOpts []grpc.CallOption
}
func (auth *authenticator) authenticate(ctx context.Context, name string, password string) (*AuthenticateResponse, error) {
resp, err := auth.remote.Authenticate(ctx, &pb.AuthenticateRequest{Name: name, Password: password}, grpc.FailFast(false))
resp, err := auth.remote.Authenticate(ctx, &pb.AuthenticateRequest{Name: name, Password: password}, auth.callOpts...)
return (*AuthenticateResponse)(resp), toErr(ctx, err)
}
@ -209,14 +216,18 @@ func (auth *authenticator) close() {
auth.conn.Close()
}
func newAuthenticator(endpoint string, opts []grpc.DialOption) (*authenticator, error) {
func newAuthenticator(endpoint string, opts []grpc.DialOption, c *Client) (*authenticator, error) {
conn, err := grpc.Dial(endpoint, opts...)
if err != nil {
return nil, err
}
return &authenticator{
api := &authenticator{
conn: conn,
remote: pb.NewAuthClient(conn),
}, nil
}
if c != nil {
api.callOpts = c.callOpts
}
return api, nil
}

View File

@ -1,356 +0,0 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package clientv3
import (
"net/url"
"strings"
"sync"
"golang.org/x/net/context"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
)
// ErrNoAddrAvilable is returned by Get() when the balancer does not have
// any active connection to endpoints at the time.
// This error is returned only when opts.BlockingWait is true.
var ErrNoAddrAvilable = grpc.Errorf(codes.Unavailable, "there is no address available")
// simpleBalancer does the bare minimum to expose multiple eps
// to the grpc reconnection code path
type simpleBalancer struct {
// addrs are the client's endpoints for grpc
addrs []grpc.Address
// notifyCh notifies grpc of the set of addresses for connecting
notifyCh chan []grpc.Address
// readyc closes once the first connection is up
readyc chan struct{}
readyOnce sync.Once
// mu protects upEps, pinAddr, and connectingAddr
mu sync.RWMutex
// upc closes when upEps transitions from empty to non-zero or the balancer closes.
upc chan struct{}
// downc closes when grpc calls down() on pinAddr
downc chan struct{}
// stopc is closed to signal updateNotifyLoop should stop.
stopc chan struct{}
// donec closes when all goroutines are exited
donec chan struct{}
// updateAddrsC notifies updateNotifyLoop to update addrs.
updateAddrsC chan struct{}
// grpc issues TLS cert checks using the string passed into dial so
// that string must be the host. To recover the full scheme://host URL,
// have a map from hosts to the original endpoint.
host2ep map[string]string
// pinAddr is the currently pinned address; set to the empty string on
// intialization and shutdown.
pinAddr string
closed bool
}
func newSimpleBalancer(eps []string) *simpleBalancer {
notifyCh := make(chan []grpc.Address, 1)
addrs := make([]grpc.Address, len(eps))
for i := range eps {
addrs[i].Addr = getHost(eps[i])
}
sb := &simpleBalancer{
addrs: addrs,
notifyCh: notifyCh,
readyc: make(chan struct{}),
upc: make(chan struct{}),
stopc: make(chan struct{}),
downc: make(chan struct{}),
donec: make(chan struct{}),
updateAddrsC: make(chan struct{}, 1),
host2ep: getHost2ep(eps),
}
close(sb.downc)
go sb.updateNotifyLoop()
return sb
}
func (b *simpleBalancer) Start(target string, config grpc.BalancerConfig) error { return nil }
func (b *simpleBalancer) ConnectNotify() <-chan struct{} {
b.mu.Lock()
defer b.mu.Unlock()
return b.upc
}
func (b *simpleBalancer) getEndpoint(host string) string {
b.mu.Lock()
defer b.mu.Unlock()
return b.host2ep[host]
}
func getHost2ep(eps []string) map[string]string {
hm := make(map[string]string, len(eps))
for i := range eps {
_, host, _ := parseEndpoint(eps[i])
hm[host] = eps[i]
}
return hm
}
func (b *simpleBalancer) updateAddrs(eps []string) {
np := getHost2ep(eps)
b.mu.Lock()
match := len(np) == len(b.host2ep)
for k, v := range np {
if b.host2ep[k] != v {
match = false
break
}
}
if match {
// same endpoints, so no need to update address
b.mu.Unlock()
return
}
b.host2ep = np
addrs := make([]grpc.Address, 0, len(eps))
for i := range eps {
addrs = append(addrs, grpc.Address{Addr: getHost(eps[i])})
}
b.addrs = addrs
// updating notifyCh can trigger new connections,
// only update addrs if all connections are down
// or addrs does not include pinAddr.
update := !hasAddr(addrs, b.pinAddr)
b.mu.Unlock()
if update {
select {
case b.updateAddrsC <- struct{}{}:
case <-b.stopc:
}
}
}
func hasAddr(addrs []grpc.Address, targetAddr string) bool {
for _, addr := range addrs {
if targetAddr == addr.Addr {
return true
}
}
return false
}
func (b *simpleBalancer) updateNotifyLoop() {
defer close(b.donec)
for {
b.mu.RLock()
upc, downc, addr := b.upc, b.downc, b.pinAddr
b.mu.RUnlock()
// downc or upc should be closed
select {
case <-downc:
downc = nil
default:
}
select {
case <-upc:
upc = nil
default:
}
switch {
case downc == nil && upc == nil:
// stale
select {
case <-b.stopc:
return
default:
}
case downc == nil:
b.notifyAddrs()
select {
case <-upc:
case <-b.updateAddrsC:
b.notifyAddrs()
case <-b.stopc:
return
}
case upc == nil:
select {
// close connections that are not the pinned address
case b.notifyCh <- []grpc.Address{{Addr: addr}}:
case <-downc:
case <-b.stopc:
return
}
select {
case <-downc:
case <-b.updateAddrsC:
case <-b.stopc:
return
}
b.notifyAddrs()
}
}
}
func (b *simpleBalancer) notifyAddrs() {
b.mu.RLock()
addrs := b.addrs
b.mu.RUnlock()
select {
case b.notifyCh <- addrs:
case <-b.stopc:
}
}
func (b *simpleBalancer) Up(addr grpc.Address) func(error) {
b.mu.Lock()
defer b.mu.Unlock()
// gRPC might call Up after it called Close. We add this check
// to "fix" it up at application layer. Or our simplerBalancer
// might panic since b.upc is closed.
if b.closed {
return func(err error) {}
}
// gRPC might call Up on a stale address.
// Prevent updating pinAddr with a stale address.
if !hasAddr(b.addrs, addr.Addr) {
return func(err error) {}
}
if b.pinAddr != "" {
return func(err error) {}
}
// notify waiting Get()s and pin first connected address
close(b.upc)
b.downc = make(chan struct{})
b.pinAddr = addr.Addr
// notify client that a connection is up
b.readyOnce.Do(func() { close(b.readyc) })
return func(err error) {
b.mu.Lock()
b.upc = make(chan struct{})
close(b.downc)
b.pinAddr = ""
b.mu.Unlock()
}
}
func (b *simpleBalancer) Get(ctx context.Context, opts grpc.BalancerGetOptions) (grpc.Address, func(), error) {
var (
addr string
closed bool
)
// If opts.BlockingWait is false (for fail-fast RPCs), it should return
// an address it has notified via Notify immediately instead of blocking.
if !opts.BlockingWait {
b.mu.RLock()
closed = b.closed
addr = b.pinAddr
b.mu.RUnlock()
if closed {
return grpc.Address{Addr: ""}, nil, grpc.ErrClientConnClosing
}
if addr == "" {
return grpc.Address{Addr: ""}, nil, ErrNoAddrAvilable
}
return grpc.Address{Addr: addr}, func() {}, nil
}
for {
b.mu.RLock()
ch := b.upc
b.mu.RUnlock()
select {
case <-ch:
case <-b.donec:
return grpc.Address{Addr: ""}, nil, grpc.ErrClientConnClosing
case <-ctx.Done():
return grpc.Address{Addr: ""}, nil, ctx.Err()
}
b.mu.RLock()
closed = b.closed
addr = b.pinAddr
b.mu.RUnlock()
// Close() which sets b.closed = true can be called before Get(), Get() must exit if balancer is closed.
if closed {
return grpc.Address{Addr: ""}, nil, grpc.ErrClientConnClosing
}
if addr != "" {
break
}
}
return grpc.Address{Addr: addr}, func() {}, nil
}
func (b *simpleBalancer) Notify() <-chan []grpc.Address { return b.notifyCh }
func (b *simpleBalancer) Close() error {
b.mu.Lock()
// In case gRPC calls close twice. TODO: remove the checking
// when we are sure that gRPC wont call close twice.
if b.closed {
b.mu.Unlock()
<-b.donec
return nil
}
b.closed = true
close(b.stopc)
b.pinAddr = ""
// In the case of following scenario:
// 1. upc is not closed; no pinned address
// 2. client issues an rpc, calling invoke(), which calls Get(), enters for loop, blocks
// 3. clientconn.Close() calls balancer.Close(); closed = true
// 4. for loop in Get() never exits since ctx is the context passed in by the client and may not be canceled
// we must close upc so Get() exits from blocking on upc
select {
case <-b.upc:
default:
// terminate all waiting Get()s
close(b.upc)
}
b.mu.Unlock()
// wait for updateNotifyLoop to finish
<-b.donec
close(b.notifyCh)
return nil
}
func getHost(ep string) string {
url, uerr := url.Parse(ep)
if uerr != nil || !strings.Contains(ep, "://") {
return ep
}
return url.Host
}

View File

@ -1,239 +0,0 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package clientv3
import (
"errors"
"net"
"sync"
"testing"
"time"
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
"github.com/coreos/etcd/pkg/testutil"
"golang.org/x/net/context"
"google.golang.org/grpc"
)
var (
endpoints = []string{"localhost:2379", "localhost:22379", "localhost:32379"}
)
func TestBalancerGetUnblocking(t *testing.T) {
sb := newSimpleBalancer(endpoints)
defer sb.Close()
if addrs := <-sb.Notify(); len(addrs) != len(endpoints) {
t.Errorf("Initialize newSimpleBalancer should have triggered Notify() chan, but it didn't")
}
unblockingOpts := grpc.BalancerGetOptions{BlockingWait: false}
_, _, err := sb.Get(context.Background(), unblockingOpts)
if err != ErrNoAddrAvilable {
t.Errorf("Get() with no up endpoints should return ErrNoAddrAvailable, got: %v", err)
}
down1 := sb.Up(grpc.Address{Addr: endpoints[1]})
if addrs := <-sb.Notify(); len(addrs) != 1 {
t.Errorf("first Up() should have triggered balancer to send the first connected address via Notify chan so that other connections can be closed")
}
down2 := sb.Up(grpc.Address{Addr: endpoints[2]})
addrFirst, putFun, err := sb.Get(context.Background(), unblockingOpts)
if err != nil {
t.Errorf("Get() with up endpoints should success, got %v", err)
}
if addrFirst.Addr != endpoints[1] {
t.Errorf("Get() didn't return expected address, got %v", addrFirst)
}
if putFun == nil {
t.Errorf("Get() returned unexpected nil put function")
}
addrSecond, _, _ := sb.Get(context.Background(), unblockingOpts)
if addrFirst.Addr != addrSecond.Addr {
t.Errorf("Get() didn't return the same address as previous call, got %v and %v", addrFirst, addrSecond)
}
down1(errors.New("error"))
if addrs := <-sb.Notify(); len(addrs) != len(endpoints) {
t.Errorf("closing the only connection should triggered balancer to send the all endpoints via Notify chan so that we can establish a connection")
}
down2(errors.New("error"))
_, _, err = sb.Get(context.Background(), unblockingOpts)
if err != ErrNoAddrAvilable {
t.Errorf("Get() with no up endpoints should return ErrNoAddrAvailable, got: %v", err)
}
}
func TestBalancerGetBlocking(t *testing.T) {
sb := newSimpleBalancer(endpoints)
defer sb.Close()
if addrs := <-sb.Notify(); len(addrs) != len(endpoints) {
t.Errorf("Initialize newSimpleBalancer should have triggered Notify() chan, but it didn't")
}
blockingOpts := grpc.BalancerGetOptions{BlockingWait: true}
ctx, _ := context.WithTimeout(context.Background(), time.Millisecond*100)
_, _, err := sb.Get(ctx, blockingOpts)
if err != context.DeadlineExceeded {
t.Errorf("Get() with no up endpoints should timeout, got %v", err)
}
downC := make(chan func(error), 1)
go func() {
// ensure sb.Up() will be called after sb.Get() to see if Up() releases blocking Get()
time.Sleep(time.Millisecond * 100)
f := sb.Up(grpc.Address{Addr: endpoints[1]})
if addrs := <-sb.Notify(); len(addrs) != 1 {
t.Errorf("first Up() should have triggered balancer to send the first connected address via Notify chan so that other connections can be closed")
}
downC <- f
}()
addrFirst, putFun, err := sb.Get(context.Background(), blockingOpts)
if err != nil {
t.Errorf("Get() with up endpoints should success, got %v", err)
}
if addrFirst.Addr != endpoints[1] {
t.Errorf("Get() didn't return expected address, got %v", addrFirst)
}
if putFun == nil {
t.Errorf("Get() returned unexpected nil put function")
}
down1 := <-downC
down2 := sb.Up(grpc.Address{Addr: endpoints[2]})
addrSecond, _, _ := sb.Get(context.Background(), blockingOpts)
if addrFirst.Addr != addrSecond.Addr {
t.Errorf("Get() didn't return the same address as previous call, got %v and %v", addrFirst, addrSecond)
}
down1(errors.New("error"))
if addrs := <-sb.Notify(); len(addrs) != len(endpoints) {
t.Errorf("closing the only connection should triggered balancer to send the all endpoints via Notify chan so that we can establish a connection")
}
down2(errors.New("error"))
ctx, _ = context.WithTimeout(context.Background(), time.Millisecond*100)
_, _, err = sb.Get(ctx, blockingOpts)
if err != context.DeadlineExceeded {
t.Errorf("Get() with no up endpoints should timeout, got %v", err)
}
}
// TestBalancerDoNotBlockOnClose ensures that balancer and grpc don't deadlock each other
// due to rapid open/close conn. The deadlock causes balancer.Close() to block forever.
// See issue: https://github.com/coreos/etcd/issues/7283 for more detail.
func TestBalancerDoNotBlockOnClose(t *testing.T) {
defer testutil.AfterTest(t)
kcl := newKillConnListener(t, 3)
defer kcl.close()
for i := 0; i < 5; i++ {
sb := newSimpleBalancer(kcl.endpoints())
conn, err := grpc.Dial("", grpc.WithInsecure(), grpc.WithBalancer(sb))
if err != nil {
t.Fatal(err)
}
kvc := pb.NewKVClient(conn)
<-sb.readyc
var wg sync.WaitGroup
wg.Add(100)
cctx, cancel := context.WithCancel(context.TODO())
for j := 0; j < 100; j++ {
go func() {
defer wg.Done()
kvc.Range(cctx, &pb.RangeRequest{}, grpc.FailFast(false))
}()
}
// balancer.Close() might block
// if balancer and grpc deadlock each other.
bclosec, cclosec := make(chan struct{}), make(chan struct{})
go func() {
defer close(bclosec)
sb.Close()
}()
go func() {
defer close(cclosec)
conn.Close()
}()
select {
case <-bclosec:
case <-time.After(3 * time.Second):
testutil.FatalStack(t, "balancer close timeout")
}
select {
case <-cclosec:
case <-time.After(3 * time.Second):
t.Fatal("grpc conn close timeout")
}
cancel()
wg.Wait()
}
}
// killConnListener listens incoming conn and kills it immediately.
type killConnListener struct {
wg sync.WaitGroup
eps []string
stopc chan struct{}
t *testing.T
}
func newKillConnListener(t *testing.T, size int) *killConnListener {
kcl := &killConnListener{stopc: make(chan struct{}), t: t}
for i := 0; i < size; i++ {
ln, err := net.Listen("tcp", ":0")
if err != nil {
t.Fatal(err)
}
kcl.eps = append(kcl.eps, ln.Addr().String())
kcl.wg.Add(1)
go kcl.listen(ln)
}
return kcl
}
func (kcl *killConnListener) endpoints() []string {
return kcl.eps
}
func (kcl *killConnListener) listen(l net.Listener) {
go func() {
defer kcl.wg.Done()
for {
conn, err := l.Accept()
select {
case <-kcl.stopc:
return
default:
}
if err != nil {
kcl.t.Fatal(err)
}
time.Sleep(1 * time.Millisecond)
conn.Close()
}
}()
<-kcl.stopc
l.Close()
}
func (kcl *killConnListener) close() {
close(kcl.stopc)
kcl.wg.Wait()
}

View File

@ -31,7 +31,9 @@ import (
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/keepalive"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/status"
)
var (
@ -51,21 +53,22 @@ type Client struct {
conn *grpc.ClientConn
dialerrc chan error
cfg Config
creds *credentials.TransportCredentials
balancer *simpleBalancer
retryWrapper retryRpcFunc
retryAuthWrapper retryRpcFunc
cfg Config
creds *credentials.TransportCredentials
balancer *healthBalancer
mu *sync.Mutex
ctx context.Context
cancel context.CancelFunc
// Username is a username for authentication
// Username is a user name for authentication.
Username string
// Password is a password for authentication
// Password is a password for authentication.
Password string
// tokenCred is an instance of WithPerRPCCredentials()'s argument
tokenCred *authTokenCredential
callOpts []grpc.CallOption
}
// New creates a new etcdv3 client from a given configuration.
@ -116,8 +119,23 @@ func (c *Client) Endpoints() (eps []string) {
// SetEndpoints updates client's endpoints.
func (c *Client) SetEndpoints(eps ...string) {
c.mu.Lock()
c.cfg.Endpoints = eps
c.balancer.updateAddrs(eps)
c.mu.Unlock()
c.balancer.updateAddrs(eps...)
// updating notifyCh can trigger new connections,
// need update addrs if all connections are down
// or addrs does not include pinAddr.
c.balancer.mu.RLock()
update := !hasAddr(c.balancer.addrs, c.balancer.pinAddr)
c.balancer.mu.RUnlock()
if update {
select {
case c.balancer.updateAddrsC <- notifyNext:
case <-c.balancer.stopc:
}
}
}
// Sync synchronizes client's endpoints with the known endpoints from the etcd membership.
@ -144,8 +162,10 @@ func (c *Client) autoSync() {
case <-c.ctx.Done():
return
case <-time.After(c.cfg.AutoSyncInterval):
ctx, _ := context.WithTimeout(c.ctx, 5*time.Second)
if err := c.Sync(ctx); err != nil && err != c.ctx.Err() {
ctx, cancel := context.WithTimeout(c.ctx, 5*time.Second)
err := c.Sync(ctx)
cancel()
if err != nil && err != c.ctx.Err() {
logger.Println("Auto sync endpoints failed:", err)
}
}
@ -174,7 +194,7 @@ func parseEndpoint(endpoint string) (proto string, host string, scheme string) {
host = endpoint
url, uerr := url.Parse(endpoint)
if uerr != nil || !strings.Contains(endpoint, "://") {
return
return proto, host, scheme
}
scheme = url.Scheme
@ -188,7 +208,7 @@ func parseEndpoint(endpoint string) (proto string, host string, scheme string) {
default:
proto, host = "", ""
}
return
return proto, host, scheme
}
func (c *Client) processCreds(scheme string) (creds *credentials.TransportCredentials) {
@ -207,7 +227,7 @@ func (c *Client) processCreds(scheme string) (creds *credentials.TransportCreden
default:
creds = nil
}
return
return creds
}
// dialSetupOpts gives the dial opts prior to any authentication
@ -215,10 +235,17 @@ func (c *Client) dialSetupOpts(endpoint string, dopts ...grpc.DialOption) (opts
if c.cfg.DialTimeout > 0 {
opts = []grpc.DialOption{grpc.WithTimeout(c.cfg.DialTimeout)}
}
if c.cfg.DialKeepAliveTime > 0 {
params := keepalive.ClientParameters{
Time: c.cfg.DialKeepAliveTime,
Timeout: c.cfg.DialKeepAliveTimeout,
}
opts = append(opts, grpc.WithKeepaliveParams(params))
}
opts = append(opts, dopts...)
f := func(host string, t time.Duration) (net.Conn, error) {
proto, host, _ := parseEndpoint(c.balancer.getEndpoint(host))
proto, host, _ := parseEndpoint(c.balancer.endpoint(host))
if host == "" && endpoint != "" {
// dialing an endpoint not in the balancer; use
// endpoint passed into dial
@ -270,7 +297,7 @@ func (c *Client) getToken(ctx context.Context) error {
endpoint := c.cfg.Endpoints[i]
host := getHost(endpoint)
// use dial options without dopts to avoid reusing the client balancer
auth, err = newAuthenticator(host, c.dialSetupOpts(endpoint))
auth, err = newAuthenticator(host, c.dialSetupOpts(endpoint), c)
if err != nil {
continue
}
@ -311,7 +338,7 @@ func (c *Client) dial(endpoint string, dopts ...grpc.DialOption) (*grpc.ClientCo
if err != nil {
if toErr(ctx, err) != rpctypes.ErrAuthNotEnabled {
if err == ctx.Err() && ctx.Err() != c.ctx.Err() {
err = grpc.ErrClientConnTimeout
err = context.DeadlineExceeded
}
return nil, err
}
@ -333,7 +360,7 @@ func (c *Client) dial(endpoint string, dopts ...grpc.DialOption) (*grpc.ClientCo
// when the cluster has a leader.
func WithRequireLeader(ctx context.Context) context.Context {
md := metadata.Pairs(rpctypes.MetadataRequireLeaderKey, rpctypes.MetadataHasLeader)
return metadata.NewContext(ctx, md)
return metadata.NewOutgoingContext(ctx, md)
}
func newClient(cfg *Config) (*Client, error) {
@ -360,15 +387,37 @@ func newClient(cfg *Config) (*Client, error) {
creds: creds,
ctx: ctx,
cancel: cancel,
mu: new(sync.Mutex),
callOpts: defaultCallOpts,
}
if cfg.Username != "" && cfg.Password != "" {
client.Username = cfg.Username
client.Password = cfg.Password
}
if cfg.MaxCallSendMsgSize > 0 || cfg.MaxCallRecvMsgSize > 0 {
if cfg.MaxCallRecvMsgSize > 0 && cfg.MaxCallSendMsgSize > cfg.MaxCallRecvMsgSize {
return nil, fmt.Errorf("gRPC message recv limit (%d bytes) must be greater than send limit (%d bytes)", cfg.MaxCallRecvMsgSize, cfg.MaxCallSendMsgSize)
}
callOpts := []grpc.CallOption{
defaultFailFast,
defaultMaxCallSendMsgSize,
defaultMaxCallRecvMsgSize,
}
if cfg.MaxCallSendMsgSize > 0 {
callOpts[1] = grpc.MaxCallSendMsgSize(cfg.MaxCallSendMsgSize)
}
if cfg.MaxCallRecvMsgSize > 0 {
callOpts[2] = grpc.MaxCallRecvMsgSize(cfg.MaxCallRecvMsgSize)
}
client.callOpts = callOpts
}
client.balancer = newHealthBalancer(cfg.Endpoints, cfg.DialTimeout, func(ep string) (bool, error) {
return grpcHealthCheck(client, ep)
})
client.balancer = newSimpleBalancer(cfg.Endpoints)
// use Endpoints[0] so that for https:// without any tls config given, then
// grpc will assume the ServerName is in the endpoint.
// grpc will assume the certificate server name is the endpoint host.
conn, err := client.dial(cfg.Endpoints[0], grpc.WithBalancer(client.balancer))
if err != nil {
client.cancel()
@ -376,21 +425,19 @@ func newClient(cfg *Config) (*Client, error) {
return nil, err
}
client.conn = conn
client.retryWrapper = client.newRetryWrapper()
client.retryAuthWrapper = client.newAuthRetryWrapper()
// wait for a connection
if cfg.DialTimeout > 0 {
hasConn := false
waitc := time.After(cfg.DialTimeout)
select {
case <-client.balancer.readyc:
case <-client.balancer.ready():
hasConn = true
case <-ctx.Done():
case <-waitc:
}
if !hasConn {
err := grpc.ErrClientConnTimeout
err := context.DeadlineExceeded
select {
case err = <-client.dialerrc:
default:
@ -425,7 +472,7 @@ func (c *Client) checkVersion() (err error) {
errc := make(chan error, len(c.cfg.Endpoints))
ctx, cancel := context.WithCancel(c.ctx)
if c.cfg.DialTimeout > 0 {
ctx, _ = context.WithTimeout(ctx, c.cfg.DialTimeout)
ctx, cancel = context.WithTimeout(ctx, c.cfg.DialTimeout)
}
wg.Add(len(c.cfg.Endpoints))
for _, ep := range c.cfg.Endpoints {
@ -440,7 +487,7 @@ func (c *Client) checkVersion() (err error) {
vs := strings.Split(resp.Version, ".")
maj, min := 0, 0
if len(vs) >= 2 {
maj, rerr = strconv.Atoi(vs[0])
maj, _ = strconv.Atoi(vs[0])
min, rerr = strconv.Atoi(vs[1])
}
if maj < 3 || (maj == 3 && min < 2) {
@ -472,14 +519,14 @@ func isHaltErr(ctx context.Context, err error) bool {
if err == nil {
return false
}
code := grpc.Code(err)
ev, _ := status.FromError(err)
// Unavailable codes mean the system will be right back.
// (e.g., can't connect, lost leader)
// Treat Internal codes as if something failed, leaving the
// system in an inconsistent state, but retrying could make progress.
// (e.g., failed in middle of send, corrupted frame)
// TODO: are permanent Internal errors possible from grpc?
return code != codes.Unavailable && code != codes.Internal
return ev.Code() != codes.Unavailable && ev.Code() != codes.Internal
}
func toErr(ctx context.Context, err error) error {
@ -490,7 +537,8 @@ func toErr(ctx context.Context, err error) error {
if _, ok := err.(rpctypes.EtcdError); ok {
return err
}
code := grpc.Code(err)
ev, _ := status.FromError(err)
code := ev.Code()
switch code {
case codes.DeadlineExceeded:
fallthrough
@ -499,7 +547,6 @@ func toErr(ctx context.Context, err error) error {
err = ctx.Err()
}
case codes.Unavailable:
err = ErrNoAvailableEndpoints
case codes.FailedPrecondition:
err = grpc.ErrClientConnClosing
}

View File

@ -22,8 +22,8 @@ import (
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
"github.com/coreos/etcd/pkg/testutil"
"golang.org/x/net/context"
"google.golang.org/grpc"
)
func TestDialCancel(t *testing.T) {
@ -45,7 +45,7 @@ func TestDialCancel(t *testing.T) {
t.Fatal(err)
}
// connect to ipv4 blackhole so dial blocks
// connect to ipv4 black hole so dial blocks
c.SetEndpoints("http://254.0.0.1:12345")
// issue Get to force redial attempts
@ -97,7 +97,7 @@ func TestDialTimeout(t *testing.T) {
for i, cfg := range testCfgs {
donec := make(chan error)
go func() {
// without timeout, dial continues forever on ipv4 blackhole
// without timeout, dial continues forever on ipv4 black hole
c, err := New(cfg)
if c != nil || err == nil {
t.Errorf("#%d: new client should fail", i)
@ -117,8 +117,8 @@ func TestDialTimeout(t *testing.T) {
case <-time.After(5 * time.Second):
t.Errorf("#%d: failed to timeout dial on time", i)
case err := <-donec:
if err != grpc.ErrClientConnTimeout {
t.Errorf("#%d: unexpected error %v, want %v", i, err, grpc.ErrClientConnTimeout)
if err != context.DeadlineExceeded {
t.Errorf("#%d: unexpected error %v, want %v", i, err, context.DeadlineExceeded)
}
}
}

View File

@ -15,11 +15,12 @@
package clientv3util_test
import (
"context"
"log"
"github.com/coreos/etcd/clientv3"
"github.com/coreos/etcd/clientv3/clientv3util"
"golang.org/x/net/context"
)
func ExampleKeyExists_put() {
@ -33,7 +34,7 @@ func ExampleKeyExists_put() {
kvc := clientv3.NewKV(cli)
// perform a put only if key is missing
// It is useful to do the check (transactionally) to avoid overwriting
// It is useful to do the check atomically to avoid overwriting
// the existing key which would generate potentially unwanted events,
// unless of course you wanted to do an overwrite no matter what.
_, err = kvc.Txn(context.Background()).

View File

@ -16,6 +16,7 @@ package clientv3
import (
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
"golang.org/x/net/context"
"google.golang.org/grpc"
)
@ -43,20 +44,29 @@ type Cluster interface {
}
type cluster struct {
remote pb.ClusterClient
remote pb.ClusterClient
callOpts []grpc.CallOption
}
func NewCluster(c *Client) Cluster {
return &cluster{remote: RetryClusterClient(c)}
api := &cluster{remote: RetryClusterClient(c)}
if c != nil {
api.callOpts = c.callOpts
}
return api
}
func NewClusterFromClusterClient(remote pb.ClusterClient) Cluster {
return &cluster{remote: remote}
func NewClusterFromClusterClient(remote pb.ClusterClient, c *Client) Cluster {
api := &cluster{remote: remote}
if c != nil {
api.callOpts = c.callOpts
}
return api
}
func (c *cluster) MemberAdd(ctx context.Context, peerAddrs []string) (*MemberAddResponse, error) {
r := &pb.MemberAddRequest{PeerURLs: peerAddrs}
resp, err := c.remote.MemberAdd(ctx, r)
resp, err := c.remote.MemberAdd(ctx, r, c.callOpts...)
if err != nil {
return nil, toErr(ctx, err)
}
@ -65,7 +75,7 @@ func (c *cluster) MemberAdd(ctx context.Context, peerAddrs []string) (*MemberAdd
func (c *cluster) MemberRemove(ctx context.Context, id uint64) (*MemberRemoveResponse, error) {
r := &pb.MemberRemoveRequest{ID: id}
resp, err := c.remote.MemberRemove(ctx, r)
resp, err := c.remote.MemberRemove(ctx, r, c.callOpts...)
if err != nil {
return nil, toErr(ctx, err)
}
@ -74,27 +84,19 @@ func (c *cluster) MemberRemove(ctx context.Context, id uint64) (*MemberRemoveRes
func (c *cluster) MemberUpdate(ctx context.Context, id uint64, peerAddrs []string) (*MemberUpdateResponse, error) {
// it is safe to retry on update.
for {
r := &pb.MemberUpdateRequest{ID: id, PeerURLs: peerAddrs}
resp, err := c.remote.MemberUpdate(ctx, r, grpc.FailFast(false))
if err == nil {
return (*MemberUpdateResponse)(resp), nil
}
if isHaltErr(ctx, err) {
return nil, toErr(ctx, err)
}
r := &pb.MemberUpdateRequest{ID: id, PeerURLs: peerAddrs}
resp, err := c.remote.MemberUpdate(ctx, r, c.callOpts...)
if err == nil {
return (*MemberUpdateResponse)(resp), nil
}
return nil, toErr(ctx, err)
}
func (c *cluster) MemberList(ctx context.Context) (*MemberListResponse, error) {
// it is safe to retry on list.
for {
resp, err := c.remote.MemberList(ctx, &pb.MemberListRequest{}, grpc.FailFast(false))
if err == nil {
return (*MemberListResponse)(resp), nil
}
if isHaltErr(ctx, err) {
return nil, toErr(ctx, err)
}
resp, err := c.remote.MemberList(ctx, &pb.MemberListRequest{}, c.callOpts...)
if err == nil {
return (*MemberListResponse)(resp), nil
}
return nil, toErr(ctx, err)
}

View File

@ -44,10 +44,8 @@ func (op CompactOp) toRequest() *pb.CompactionRequest {
return &pb.CompactionRequest{Revision: op.revision, Physical: op.physical}
}
// WithCompactPhysical makes compact RPC call wait until
// the compaction is physically applied to the local database
// such that compacted entries are totally removed from the
// backend database.
// WithCompactPhysical makes Compact wait until all compacted entries are
// removed from the etcd server's storage.
func WithCompactPhysical() CompactOption {
return func(op *CompactOp) { op.physical = true }
}

View File

@ -99,6 +99,7 @@ func (cmp *Cmp) ValueBytes() []byte {
// WithValueBytes sets the byte slice for the comparison's value.
func (cmp *Cmp) WithValueBytes(v []byte) { cmp.TargetUnion.(*pb.Compare_Value).Value = v }
// mustInt64 panics if val isn't an int or int64. It returns an int64 otherwise.
func mustInt64(val interface{}) int64 {
if v, ok := val.(int64); ok {
return v
@ -108,3 +109,12 @@ func mustInt64(val interface{}) int64 {
}
panic("bad value")
}
// mustInt64orLeaseID panics if val isn't a LeaseID, int or int64. It returns an
// int64 otherwise.
func mustInt64orLeaseID(val interface{}) int64 {
if v, ok := val.(LeaseID); ok {
return int64(v)
}
return mustInt64(val)
}

View File

@ -21,6 +21,7 @@ import (
v3 "github.com/coreos/etcd/clientv3"
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
"github.com/coreos/etcd/mvcc/mvccpb"
"golang.org/x/net/context"
)
@ -185,12 +186,12 @@ func (e *Election) observe(ctx context.Context, ch chan<- v3.GetResponse) {
cancel()
return
}
// only accept PUTs; a DELETE will make observe() spin
// only accept puts; a delete will make observe() spin
for _, ev := range wr.Events {
if ev.Type == mvccpb.PUT {
hdr, kv = &wr.Header, ev.Kv
// may have multiple revs; hdr.rev = the last rev
// set to kv's rev in case batch has multiple PUTs
// set to kv's rev in case batch has multiple Puts
hdr.Revision = kv.ModRevision
break
}
@ -213,6 +214,7 @@ func (e *Election) observe(ctx context.Context, ch chan<- v3.GetResponse) {
for !keyDeleted {
wr, ok := <-wch
if !ok {
cancel()
return
}
for _, ev := range wr.Events {
@ -225,6 +227,7 @@ func (e *Election) observe(ctx context.Context, ch chan<- v3.GetResponse) {
select {
case ch <- *resp:
case <-cctx.Done():
cancel()
return
}
}
@ -240,4 +243,4 @@ func (e *Election) Key() string { return e.leaderKey }
func (e *Election) Rev() int64 { return e.leaderRev }
// Header is the response header from the last successful election proposal.
func (m *Election) Header() *pb.ResponseHeader { return m.hdr }
func (e *Election) Header() *pb.ResponseHeader { return e.hdr }

View File

@ -20,6 +20,7 @@ import (
v3 "github.com/coreos/etcd/clientv3"
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
"github.com/coreos/etcd/mvcc/mvccpb"
"golang.org/x/net/context"
)

View File

@ -20,6 +20,7 @@ import (
v3 "github.com/coreos/etcd/clientv3"
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
"golang.org/x/net/context"
)
@ -49,7 +50,9 @@ func (m *Mutex) Lock(ctx context.Context) error {
put := v3.OpPut(m.myKey, "", v3.WithLease(s.Lease()))
// reuse key in case this session already holds the lock
get := v3.OpGet(m.myKey)
resp, err := client.Txn(ctx).If(cmp).Then(put).Else(get).Commit()
// fetch current holder to complete uncontended path with only one RPC
getOwner := v3.OpGet(m.pfx, v3.WithFirstCreate()...)
resp, err := client.Txn(ctx).If(cmp).Then(put, getOwner).Else(get, getOwner).Commit()
if err != nil {
return err
}
@ -57,6 +60,12 @@ func (m *Mutex) Lock(ctx context.Context) error {
if !resp.Succeeded {
m.myRev = resp.Responses[0].GetResponseRange().Kvs[0].CreateRevision
}
// if no key on prefix / the minimum rev is key, already hold the lock
ownerKey := resp.Responses[1].GetResponseRange().Kvs
if len(ownerKey) == 0 || ownerKey[0].CreateRevision == m.myRev {
m.hdr = resp.Header
return nil
}
// wait for deletion revisions prior to myKey
hdr, werr := waitDeletes(ctx, client, m.pfx, m.myRev-1)

View File

@ -18,6 +18,7 @@ import (
"time"
v3 "github.com/coreos/etcd/clientv3"
"golang.org/x/net/context"
)
@ -53,6 +54,7 @@ func NewSession(client *v3.Client, opts ...SessionOption) (*Session, error) {
ctx, cancel := context.WithCancel(ops.ctx)
keepAlive, err := client.KeepAlive(ctx, id)
if err != nil || keepAlive == nil {
cancel()
return nil, err
}

View File

@ -18,6 +18,7 @@ import (
"math"
v3 "github.com/coreos/etcd/clientv3"
"golang.org/x/net/context"
)
@ -46,7 +47,7 @@ const (
// SerializableSnapshot provides serializable isolation and also checks
// for write conflicts.
SerializableSnapshot Isolation = iota
// Serializable reads within the same transactiona attempt return data
// Serializable reads within the same transaction attempt return data
// from the at the revision of the first read.
Serializable
// RepeatableReads reads within the same transaction attempt always
@ -85,7 +86,7 @@ func WithPrefetch(keys ...string) stmOption {
return func(so *stmOptions) { so.prefetch = append(so.prefetch, keys...) }
}
// NewSTM initiates a new STM instance, using snapshot isolation by default.
// NewSTM initiates a new STM instance, using serializable snapshot isolation by default.
func NewSTM(c *v3.Client, apply func(STM) error, so ...stmOption) (*v3.TxnResponse, error) {
opts := &stmOptions{ctx: c.Ctx()}
for _, f := range so {
@ -193,11 +194,12 @@ func (rs readSet) add(keys []string, txnresp *v3.TxnResponse) {
}
}
// first returns the store revision from the first fetch
func (rs readSet) first() int64 {
ret := int64(math.MaxInt64 - 1)
for _, resp := range rs {
if len(resp.Kvs) > 0 && resp.Kvs[0].ModRevision < ret {
ret = resp.Kvs[0].ModRevision
if rev := resp.Header.Revision; rev < ret {
ret = rev
}
}
return ret

View File

@ -33,10 +33,31 @@ type Config struct {
// DialTimeout is the timeout for failing to establish a connection.
DialTimeout time.Duration `json:"dial-timeout"`
// DialKeepAliveTime is the time in seconds after which client pings the server to see if
// transport is alive.
DialKeepAliveTime time.Duration `json:"dial-keep-alive-time"`
// DialKeepAliveTimeout is the time in seconds that the client waits for a response for the
// keep-alive probe. If the response is not received in this time, the connection is closed.
DialKeepAliveTimeout time.Duration `json:"dial-keep-alive-timeout"`
// MaxCallSendMsgSize is the client-side request send limit in bytes.
// If 0, it defaults to 2.0 MiB (2 * 1024 * 1024).
// Make sure that "MaxCallSendMsgSize" < server-side default send/recv limit.
// ("--max-request-bytes" flag to etcd or "embed.Config.MaxRequestBytes").
MaxCallSendMsgSize int
// MaxCallRecvMsgSize is the client-side response receive limit.
// If 0, it defaults to "math.MaxInt32", because range response can
// easily exceed request send limits.
// Make sure that "MaxCallRecvMsgSize" >= server-side default send/recv limit.
// ("--max-request-bytes" flag to etcd or "embed.Config.MaxRequestBytes").
MaxCallRecvMsgSize int
// TLS holds the client secure credentials, if any.
TLS *tls.Config
// Username is a username for authentication.
// Username is a user name for authentication.
Username string `json:"username"`
// Password is a password for authentication.

View File

@ -28,7 +28,7 @@
// Make sure to close the client after using it. If the client is not closed, the
// connection will have leaky goroutines.
//
// To specify client request timeout, pass context.WithTimeout to APIs:
// To specify a client request timeout, wrap the context with context.WithTimeout:
//
// ctx, cancel := context.WithTimeout(context.Background(), timeout)
// resp, err := kvc.Put(ctx, "sample_key", "sample_value")

View File

@ -1,113 +0,0 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package clientv3_test
import (
"fmt"
"log"
"github.com/coreos/etcd/clientv3"
"golang.org/x/net/context"
)
func ExampleAuth() {
cli, err := clientv3.New(clientv3.Config{
Endpoints: endpoints,
DialTimeout: dialTimeout,
})
if err != nil {
log.Fatal(err)
}
defer cli.Close()
if _, err = cli.RoleAdd(context.TODO(), "root"); err != nil {
log.Fatal(err)
}
if _, err = cli.UserAdd(context.TODO(), "root", "123"); err != nil {
log.Fatal(err)
}
if _, err = cli.UserGrantRole(context.TODO(), "root", "root"); err != nil {
log.Fatal(err)
}
if _, err = cli.RoleAdd(context.TODO(), "r"); err != nil {
log.Fatal(err)
}
if _, err = cli.RoleGrantPermission(
context.TODO(),
"r", // role name
"foo", // key
"zoo", // range end
clientv3.PermissionType(clientv3.PermReadWrite),
); err != nil {
log.Fatal(err)
}
if _, err = cli.UserAdd(context.TODO(), "u", "123"); err != nil {
log.Fatal(err)
}
if _, err = cli.UserGrantRole(context.TODO(), "u", "r"); err != nil {
log.Fatal(err)
}
if _, err = cli.AuthEnable(context.TODO()); err != nil {
log.Fatal(err)
}
cliAuth, err := clientv3.New(clientv3.Config{
Endpoints: endpoints,
DialTimeout: dialTimeout,
Username: "u",
Password: "123",
})
if err != nil {
log.Fatal(err)
}
defer cliAuth.Close()
if _, err = cliAuth.Put(context.TODO(), "foo1", "bar"); err != nil {
log.Fatal(err)
}
_, err = cliAuth.Txn(context.TODO()).
If(clientv3.Compare(clientv3.Value("zoo1"), ">", "abc")).
Then(clientv3.OpPut("zoo1", "XYZ")).
Else(clientv3.OpPut("zoo1", "ABC")).
Commit()
fmt.Println(err)
// now check the permission with the root account
rootCli, err := clientv3.New(clientv3.Config{
Endpoints: endpoints,
DialTimeout: dialTimeout,
Username: "root",
Password: "123",
})
if err != nil {
log.Fatal(err)
}
defer rootCli.Close()
resp, err := rootCli.RoleGet(context.TODO(), "r")
if err != nil {
log.Fatal(err)
}
fmt.Printf("user u permission: key %q, range end %q\n", resp.Perm[0].Key, resp.Perm[0].RangeEnd)
if _, err = rootCli.AuthDisable(context.TODO()); err != nil {
log.Fatal(err)
}
// Output: etcdserver: permission denied
// user u permission: key "foo", range end "zoo"
}

View File

@ -19,6 +19,7 @@ import (
"log"
"github.com/coreos/etcd/clientv3"
"golang.org/x/net/context"
)

View File

@ -20,6 +20,7 @@ import (
"github.com/coreos/etcd/clientv3"
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
"golang.org/x/net/context"
)
@ -236,8 +237,11 @@ func ExampleKV_txn() {
ctx, cancel := context.WithTimeout(context.Background(), requestTimeout)
_, err = kvc.Txn(ctx).
If(clientv3.Compare(clientv3.Value("key"), ">", "abc")). // txn value comparisons are lexical
Then(clientv3.OpPut("key", "XYZ")). // this runs, since 'xyz' > 'abc'
// txn value comparisons are lexical
If(clientv3.Compare(clientv3.Value("key"), ">", "abc")).
// the "Then" runs, since "xyz" > "abc"
Then(clientv3.OpPut("key", "XYZ")).
// the "Else" does not run
Else(clientv3.OpPut("key", "ABC")).
Commit()
cancel()

View File

@ -19,6 +19,7 @@ import (
"log"
"github.com/coreos/etcd/clientv3"
"golang.org/x/net/context"
)

View File

@ -18,9 +18,8 @@ import (
"fmt"
"log"
"golang.org/x/net/context"
"github.com/coreos/etcd/clientv3"
"golang.org/x/net/context"
)
func ExampleMaintenance_status() {
@ -34,20 +33,15 @@ func ExampleMaintenance_status() {
}
defer cli.Close()
// resp, err := cli.Status(context.Background(), ep)
//
// or
//
mapi := clientv3.NewMaintenance(cli)
resp, err := mapi.Status(context.Background(), ep)
resp, err := cli.Status(context.Background(), ep)
if err != nil {
log.Fatal(err)
}
fmt.Printf("endpoint: %s / IsLeader: %v\n", ep, resp.Header.MemberId == resp.Leader)
fmt.Printf("endpoint: %s / Leader: %v\n", ep, resp.Header.MemberId == resp.Leader)
}
// endpoint: localhost:2379 / IsLeader: false
// endpoint: localhost:22379 / IsLeader: false
// endpoint: localhost:32379 / IsLeader: true
// endpoint: localhost:2379 / Leader: false
// endpoint: localhost:22379 / Leader: false
// endpoint: localhost:32379 / Leader: true
}
func ExampleMaintenance_defragment() {

View File

@ -43,10 +43,10 @@ func ExampleClient_metrics() {
}
defer cli.Close()
// get a key so it shows up in the metrics as a range rpc
// get a key so it shows up in the metrics as a range RPC
cli.Get(context.TODO(), "test_key")
// listen for all prometheus metrics
// listen for all Prometheus metrics
ln, err := net.Listen("tcp", ":0")
if err != nil {
log.Fatal(err)
@ -61,7 +61,7 @@ func ExampleClient_metrics() {
<-donec
}()
// make an http request to fetch all prometheus metrics
// make an http request to fetch all Prometheus metrics
url := "http://" + ln.Addr().String() + "/metrics"
resp, err := http.Get(url)
if err != nil {
@ -80,5 +80,6 @@ func ExampleClient_metrics() {
break
}
}
// Output: grpc_client_started_total{grpc_method="Range",grpc_service="etcdserverpb.KV",grpc_type="unary"} 1
// Output:
// grpc_client_started_total{grpc_method="Range",grpc_service="etcdserverpb.KV",grpc_type="unary"} 1
}

View File

@ -16,12 +16,14 @@ package clientv3_test
import (
"log"
"os"
"time"
"github.com/coreos/etcd/clientv3"
"github.com/coreos/etcd/pkg/transport"
"github.com/coreos/pkg/capnslog"
"golang.org/x/net/context"
"google.golang.org/grpc/grpclog"
)
var (
@ -31,8 +33,7 @@ var (
)
func Example() {
var plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "clientv3")
clientv3.SetLogger(plog)
clientv3.SetLogger(grpclog.NewLoggerV2(os.Stderr, os.Stderr, os.Stderr))
cli, err := clientv3.New(clientv3.Config{
Endpoints: endpoints,

View File

@ -19,6 +19,7 @@ import (
"log"
"github.com/coreos/etcd/clientv3"
"golang.org/x/net/context"
)

46
clientv3/grpc_options.go Normal file
View File

@ -0,0 +1,46 @@
// Copyright 2017 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package clientv3
import (
"math"
"google.golang.org/grpc"
)
var (
// Disable gRPC internal retrial logic
// TODO: enable when gRPC retry is stable (FailFast=false)
// Reference:
// - https://github.com/grpc/grpc-go/issues/1532
// - https://github.com/grpc/proposal/blob/master/A6-client-retries.md
defaultFailFast = grpc.FailFast(true)
// client-side request send limit, gRPC default is math.MaxInt32
// Make sure that "client-side send limit < server-side default send/recv limit"
// Same value as "embed.DefaultMaxRequestBytes" plus gRPC overhead bytes
defaultMaxCallSendMsgSize = grpc.MaxCallSendMsgSize(2 * 1024 * 1024)
// client-side response receive limit, gRPC default is 4MB
// Make sure that "client-side receive limit >= server-side default send/recv limit"
// because range response can easily exceed request send limits
// Default to math.MaxInt32; writes exceeding server-side send limit fails anyway
defaultMaxCallRecvMsgSize = grpc.MaxCallRecvMsgSize(math.MaxInt32)
)
// defaultCallOpts defines a list of default "gRPC.CallOption".
// Some options are exposed to "clientv3.Config".
// Defaults will be overridden by the settings in "clientv3.Config".
var defaultCallOpts = []grpc.CallOption{defaultFailFast, defaultMaxCallSendMsgSize, defaultMaxCallRecvMsgSize}

627
clientv3/health_balancer.go Normal file
View File

@ -0,0 +1,627 @@
// Copyright 2017 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package clientv3
import (
"errors"
"net/url"
"strings"
"sync"
"time"
"golang.org/x/net/context"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
healthpb "google.golang.org/grpc/health/grpc_health_v1"
"google.golang.org/grpc/status"
)
const (
minHealthRetryDuration = 3 * time.Second
unknownService = "unknown service grpc.health.v1.Health"
)
// ErrNoAddrAvilable is returned by Get() when the balancer does not have
// any active connection to endpoints at the time.
// This error is returned only when opts.BlockingWait is true.
var ErrNoAddrAvilable = status.Error(codes.Unavailable, "there is no address available")
type healthCheckFunc func(ep string) (bool, error)
type notifyMsg int
const (
notifyReset notifyMsg = iota
notifyNext
)
// healthBalancer does the bare minimum to expose multiple eps
// to the grpc reconnection code path
type healthBalancer struct {
// addrs are the client's endpoint addresses for grpc
addrs []grpc.Address
// eps holds the raw endpoints from the client
eps []string
// notifyCh notifies grpc of the set of addresses for connecting
notifyCh chan []grpc.Address
// readyc closes once the first connection is up
readyc chan struct{}
readyOnce sync.Once
// healthCheck checks an endpoint's health.
healthCheck healthCheckFunc
healthCheckTimeout time.Duration
unhealthyMu sync.RWMutex
unhealthyHostPorts map[string]time.Time
// mu protects all fields below.
mu sync.RWMutex
// upc closes when pinAddr transitions from empty to non-empty or the balancer closes.
upc chan struct{}
// downc closes when grpc calls down() on pinAddr
downc chan struct{}
// stopc is closed to signal updateNotifyLoop should stop.
stopc chan struct{}
stopOnce sync.Once
wg sync.WaitGroup
// donec closes when all goroutines are exited
donec chan struct{}
// updateAddrsC notifies updateNotifyLoop to update addrs.
updateAddrsC chan notifyMsg
// grpc issues TLS cert checks using the string passed into dial so
// that string must be the host. To recover the full scheme://host URL,
// have a map from hosts to the original endpoint.
hostPort2ep map[string]string
// pinAddr is the currently pinned address; set to the empty string on
// initialization and shutdown.
pinAddr string
closed bool
}
func newHealthBalancer(eps []string, timeout time.Duration, hc healthCheckFunc) *healthBalancer {
notifyCh := make(chan []grpc.Address)
addrs := eps2addrs(eps)
hb := &healthBalancer{
addrs: addrs,
eps: eps,
notifyCh: notifyCh,
readyc: make(chan struct{}),
healthCheck: hc,
unhealthyHostPorts: make(map[string]time.Time),
upc: make(chan struct{}),
stopc: make(chan struct{}),
downc: make(chan struct{}),
donec: make(chan struct{}),
updateAddrsC: make(chan notifyMsg),
hostPort2ep: getHostPort2ep(eps),
}
if timeout < minHealthRetryDuration {
timeout = minHealthRetryDuration
}
hb.healthCheckTimeout = timeout
close(hb.downc)
go hb.updateNotifyLoop()
hb.wg.Add(1)
go func() {
defer hb.wg.Done()
hb.updateUnhealthy()
}()
return hb
}
func (b *healthBalancer) Start(target string, config grpc.BalancerConfig) error { return nil }
func (b *healthBalancer) ConnectNotify() <-chan struct{} {
b.mu.Lock()
defer b.mu.Unlock()
return b.upc
}
func (b *healthBalancer) ready() <-chan struct{} { return b.readyc }
func (b *healthBalancer) endpoint(hostPort string) string {
b.mu.RLock()
defer b.mu.RUnlock()
return b.hostPort2ep[hostPort]
}
func (b *healthBalancer) pinned() string {
b.mu.RLock()
defer b.mu.RUnlock()
return b.pinAddr
}
func (b *healthBalancer) hostPortError(hostPort string, err error) {
if b.endpoint(hostPort) == "" {
if logger.V(4) {
logger.Infof("clientv3/balancer: %q is stale (skip marking as unhealthy on %q)", hostPort, err.Error())
}
return
}
b.unhealthyMu.Lock()
b.unhealthyHostPorts[hostPort] = time.Now()
b.unhealthyMu.Unlock()
if logger.V(4) {
logger.Infof("clientv3/balancer: %q is marked unhealthy (%q)", hostPort, err.Error())
}
}
func (b *healthBalancer) removeUnhealthy(hostPort, msg string) {
if b.endpoint(hostPort) == "" {
if logger.V(4) {
logger.Infof("clientv3/balancer: %q was not in unhealthy (%q)", hostPort, msg)
}
return
}
b.unhealthyMu.Lock()
delete(b.unhealthyHostPorts, hostPort)
b.unhealthyMu.Unlock()
if logger.V(4) {
logger.Infof("clientv3/balancer: %q is removed from unhealthy (%q)", hostPort, msg)
}
}
func (b *healthBalancer) countUnhealthy() (count int) {
b.unhealthyMu.RLock()
count = len(b.unhealthyHostPorts)
b.unhealthyMu.RUnlock()
return count
}
func (b *healthBalancer) isUnhealthy(hostPort string) (unhealthy bool) {
b.unhealthyMu.RLock()
_, unhealthy = b.unhealthyHostPorts[hostPort]
b.unhealthyMu.RUnlock()
return unhealthy
}
func (b *healthBalancer) cleanupUnhealthy() {
b.unhealthyMu.Lock()
for k, v := range b.unhealthyHostPorts {
if time.Since(v) > b.healthCheckTimeout {
delete(b.unhealthyHostPorts, k)
if logger.V(4) {
logger.Infof("clientv3/balancer: removed %q from unhealthy after %v", k, b.healthCheckTimeout)
}
}
}
b.unhealthyMu.Unlock()
}
func (b *healthBalancer) liveAddrs() ([]grpc.Address, map[string]struct{}) {
unhealthyCnt := b.countUnhealthy()
b.mu.RLock()
defer b.mu.RUnlock()
hbAddrs := b.addrs
if len(b.addrs) == 1 || unhealthyCnt == 0 || unhealthyCnt == len(b.addrs) {
liveHostPorts := make(map[string]struct{}, len(b.hostPort2ep))
for k := range b.hostPort2ep {
liveHostPorts[k] = struct{}{}
}
return hbAddrs, liveHostPorts
}
addrs := make([]grpc.Address, 0, len(b.addrs)-unhealthyCnt)
liveHostPorts := make(map[string]struct{}, len(addrs))
for _, addr := range b.addrs {
if !b.isUnhealthy(addr.Addr) {
addrs = append(addrs, addr)
liveHostPorts[addr.Addr] = struct{}{}
}
}
return addrs, liveHostPorts
}
func (b *healthBalancer) updateUnhealthy() {
for {
select {
case <-time.After(b.healthCheckTimeout):
b.cleanupUnhealthy()
pinned := b.pinned()
if pinned == "" || b.isUnhealthy(pinned) {
select {
case b.updateAddrsC <- notifyNext:
case <-b.stopc:
return
}
}
case <-b.stopc:
return
}
}
}
func (b *healthBalancer) updateAddrs(eps ...string) {
np := getHostPort2ep(eps)
b.mu.Lock()
defer b.mu.Unlock()
match := len(np) == len(b.hostPort2ep)
if match {
for k, v := range np {
if b.hostPort2ep[k] != v {
match = false
break
}
}
}
if match {
// same endpoints, so no need to update address
return
}
b.hostPort2ep = np
b.addrs, b.eps = eps2addrs(eps), eps
b.unhealthyMu.Lock()
b.unhealthyHostPorts = make(map[string]time.Time)
b.unhealthyMu.Unlock()
}
func (b *healthBalancer) next() {
b.mu.RLock()
downc := b.downc
b.mu.RUnlock()
select {
case b.updateAddrsC <- notifyNext:
case <-b.stopc:
}
// wait until disconnect so new RPCs are not issued on old connection
select {
case <-downc:
case <-b.stopc:
}
}
func (b *healthBalancer) updateNotifyLoop() {
defer close(b.donec)
for {
b.mu.RLock()
upc, downc, addr := b.upc, b.downc, b.pinAddr
b.mu.RUnlock()
// downc or upc should be closed
select {
case <-downc:
downc = nil
default:
}
select {
case <-upc:
upc = nil
default:
}
switch {
case downc == nil && upc == nil:
// stale
select {
case <-b.stopc:
return
default:
}
case downc == nil:
b.notifyAddrs(notifyReset)
select {
case <-upc:
case msg := <-b.updateAddrsC:
b.notifyAddrs(msg)
case <-b.stopc:
return
}
case upc == nil:
select {
// close connections that are not the pinned address
case b.notifyCh <- []grpc.Address{{Addr: addr}}:
case <-downc:
case <-b.stopc:
return
}
select {
case <-downc:
b.notifyAddrs(notifyReset)
case msg := <-b.updateAddrsC:
b.notifyAddrs(msg)
case <-b.stopc:
return
}
}
}
}
func (b *healthBalancer) notifyAddrs(msg notifyMsg) {
if msg == notifyNext {
select {
case b.notifyCh <- []grpc.Address{}:
case <-b.stopc:
return
}
}
b.mu.RLock()
pinAddr := b.pinAddr
downc := b.downc
b.mu.RUnlock()
addrs, hostPorts := b.liveAddrs()
var waitDown bool
if pinAddr != "" {
_, ok := hostPorts[pinAddr]
waitDown = !ok
}
select {
case b.notifyCh <- addrs:
if waitDown {
select {
case <-downc:
case <-b.stopc:
}
}
case <-b.stopc:
}
}
func (b *healthBalancer) Up(addr grpc.Address) func(error) {
if !b.mayPin(addr) {
return func(err error) {}
}
b.mu.Lock()
defer b.mu.Unlock()
// gRPC might call Up after it called Close. We add this check
// to "fix" it up at application layer. Otherwise, will panic
// if b.upc is already closed.
if b.closed {
return func(err error) {}
}
// gRPC might call Up on a stale address.
// Prevent updating pinAddr with a stale address.
if !hasAddr(b.addrs, addr.Addr) {
return func(err error) {}
}
if b.pinAddr != "" {
if logger.V(4) {
logger.Infof("clientv3/balancer: %q is up but not pinned (already pinned %q)", addr.Addr, b.pinAddr)
}
return func(err error) {}
}
// notify waiting Get()s and pin first connected address
close(b.upc)
b.downc = make(chan struct{})
b.pinAddr = addr.Addr
if logger.V(4) {
logger.Infof("clientv3/balancer: pin %q", addr.Addr)
}
// notify client that a connection is up
b.readyOnce.Do(func() { close(b.readyc) })
return func(err error) {
// If connected to a black hole endpoint or a killed server, the gRPC ping
// timeout will induce a network I/O error, and retrying until success;
// finding healthy endpoint on retry could take several timeouts and redials.
// To avoid wasting retries, gray-list unhealthy endpoints.
b.hostPortError(addr.Addr, err)
b.mu.Lock()
b.upc = make(chan struct{})
close(b.downc)
b.pinAddr = ""
b.mu.Unlock()
if logger.V(4) {
logger.Infof("clientv3/balancer: unpin %q (%q)", addr.Addr, err.Error())
}
}
}
func (b *healthBalancer) mayPin(addr grpc.Address) bool {
if b.endpoint(addr.Addr) == "" { // stale host:port
return false
}
b.unhealthyMu.RLock()
unhealthyCnt := len(b.unhealthyHostPorts)
failedTime, bad := b.unhealthyHostPorts[addr.Addr]
b.unhealthyMu.RUnlock()
b.mu.RLock()
skip := len(b.addrs) == 1 || unhealthyCnt == 0 || len(b.addrs) == unhealthyCnt
b.mu.RUnlock()
if skip || !bad {
return true
}
// prevent isolated member's endpoint from being infinitely retried, as follows:
// 1. keepalive pings detects GoAway with http2.ErrCodeEnhanceYourCalm
// 2. balancer 'Up' unpins with grpc: failed with network I/O error
// 3. grpc-healthcheck still SERVING, thus retry to pin
// instead, return before grpc-healthcheck if failed within healthcheck timeout
if elapsed := time.Since(failedTime); elapsed < b.healthCheckTimeout {
if logger.V(4) {
logger.Infof("clientv3/balancer: %q is up but not pinned (failed %v ago, require minimum %v after failure)", addr.Addr, elapsed, b.healthCheckTimeout)
}
return false
}
if ok, _ := b.healthCheck(addr.Addr); ok {
b.removeUnhealthy(addr.Addr, "health check success")
return true
}
b.hostPortError(addr.Addr, errors.New("health check failed"))
return false
}
func (b *healthBalancer) Get(ctx context.Context, opts grpc.BalancerGetOptions) (grpc.Address, func(), error) {
var (
addr string
closed bool
)
// If opts.BlockingWait is false (for fail-fast RPCs), it should return
// an address it has notified via Notify immediately instead of blocking.
if !opts.BlockingWait {
b.mu.RLock()
closed = b.closed
addr = b.pinAddr
b.mu.RUnlock()
if closed {
return grpc.Address{Addr: ""}, nil, grpc.ErrClientConnClosing
}
if addr == "" {
return grpc.Address{Addr: ""}, nil, ErrNoAddrAvilable
}
return grpc.Address{Addr: addr}, func() {}, nil
}
for {
b.mu.RLock()
ch := b.upc
b.mu.RUnlock()
select {
case <-ch:
case <-b.donec:
return grpc.Address{Addr: ""}, nil, grpc.ErrClientConnClosing
case <-ctx.Done():
return grpc.Address{Addr: ""}, nil, ctx.Err()
}
b.mu.RLock()
closed = b.closed
addr = b.pinAddr
b.mu.RUnlock()
// Close() which sets b.closed = true can be called before Get(), Get() must exit if balancer is closed.
if closed {
return grpc.Address{Addr: ""}, nil, grpc.ErrClientConnClosing
}
if addr != "" {
break
}
}
return grpc.Address{Addr: addr}, func() {}, nil
}
func (b *healthBalancer) Notify() <-chan []grpc.Address { return b.notifyCh }
func (b *healthBalancer) Close() error {
b.mu.Lock()
// In case gRPC calls close twice. TODO: remove the checking
// when we are sure that gRPC wont call close twice.
if b.closed {
b.mu.Unlock()
<-b.donec
return nil
}
b.closed = true
b.stopOnce.Do(func() { close(b.stopc) })
b.pinAddr = ""
// In the case of following scenario:
// 1. upc is not closed; no pinned address
// 2. client issues an RPC, calling invoke(), which calls Get(), enters for loop, blocks
// 3. client.conn.Close() calls balancer.Close(); closed = true
// 4. for loop in Get() never exits since ctx is the context passed in by the client and may not be canceled
// we must close upc so Get() exits from blocking on upc
select {
case <-b.upc:
default:
// terminate all waiting Get()s
close(b.upc)
}
b.mu.Unlock()
b.wg.Wait()
// wait for updateNotifyLoop to finish
<-b.donec
close(b.notifyCh)
return nil
}
func grpcHealthCheck(client *Client, ep string) (bool, error) {
conn, err := client.dial(ep)
if err != nil {
return false, err
}
defer conn.Close()
cli := healthpb.NewHealthClient(conn)
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
resp, err := cli.Check(ctx, &healthpb.HealthCheckRequest{})
cancel()
if err != nil {
if s, ok := status.FromError(err); ok && s.Code() == codes.Unavailable {
if s.Message() == unknownService { // etcd < v3.3.0
return true, nil
}
}
return false, err
}
return resp.Status == healthpb.HealthCheckResponse_SERVING, nil
}
func hasAddr(addrs []grpc.Address, targetAddr string) bool {
for _, addr := range addrs {
if targetAddr == addr.Addr {
return true
}
}
return false
}
func getHost(ep string) string {
url, uerr := url.Parse(ep)
if uerr != nil || !strings.Contains(ep, "://") {
return ep
}
return url.Host
}
func eps2addrs(eps []string) []grpc.Address {
addrs := make([]grpc.Address, len(eps))
for i := range eps {
addrs[i].Addr = getHost(eps[i])
}
return addrs
}
func getHostPort2ep(eps []string) map[string]string {
hm := make(map[string]string, len(eps))
for i := range eps {
_, host, _ := parseEndpoint(eps[i])
hm[host] = eps[i]
}
return hm
}

View File

@ -0,0 +1,211 @@
// Copyright 2017 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build !cluster_proxy
package integration
import (
"testing"
"time"
"github.com/coreos/etcd/clientv3"
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
"github.com/coreos/etcd/integration"
"github.com/coreos/etcd/pkg/testutil"
"golang.org/x/net/context"
)
// TestBalancerUnderBlackholeKeepAliveWatch tests when watch discovers it cannot talk to
// blackholed endpoint, client balancer switches to healthy one.
// TODO: test server-to-client keepalive ping
func TestBalancerUnderBlackholeKeepAliveWatch(t *testing.T) {
defer testutil.AfterTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{
Size: 2,
GRPCKeepAliveMinTime: 1 * time.Millisecond, // avoid too_many_pings
})
defer clus.Terminate(t)
eps := []string{clus.Members[0].GRPCAddr(), clus.Members[1].GRPCAddr()}
ccfg := clientv3.Config{
Endpoints: []string{eps[0]},
DialTimeout: 1 * time.Second,
DialKeepAliveTime: 1 * time.Second,
DialKeepAliveTimeout: 500 * time.Millisecond,
}
// gRPC internal implementation related.
pingInterval := ccfg.DialKeepAliveTime + ccfg.DialKeepAliveTimeout
// 3s for slow machine to process watch and reset connections
// TODO: only send healthy endpoint to gRPC so gRPC wont waste time to
// dial for unhealthy endpoint.
// then we can reduce 3s to 1s.
timeout := pingInterval + 3*time.Second
cli, err := clientv3.New(ccfg)
if err != nil {
t.Fatal(err)
}
defer cli.Close()
wch := cli.Watch(context.Background(), "foo", clientv3.WithCreatedNotify())
if _, ok := <-wch; !ok {
t.Fatalf("watch failed on creation")
}
// endpoint can switch to eps[1] when it detects the failure of eps[0]
cli.SetEndpoints(eps...)
clus.Members[0].Blackhole()
if _, err = clus.Client(1).Put(context.TODO(), "foo", "bar"); err != nil {
t.Fatal(err)
}
select {
case <-wch:
case <-time.After(timeout):
t.Error("took too long to receive watch events")
}
clus.Members[0].Unblackhole()
// waiting for moving eps[0] out of unhealthy, so that it can be re-pined.
time.Sleep(ccfg.DialTimeout)
clus.Members[1].Blackhole()
// make sure client[0] can connect to eps[0] after remove the blackhole.
if _, err = clus.Client(0).Get(context.TODO(), "foo"); err != nil {
t.Fatal(err)
}
if _, err = clus.Client(0).Put(context.TODO(), "foo", "bar1"); err != nil {
t.Fatal(err)
}
select {
case <-wch:
case <-time.After(timeout):
t.Error("took too long to receive watch events")
}
}
func TestBalancerUnderBlackholeNoKeepAlivePut(t *testing.T) {
testBalancerUnderBlackholeNoKeepAlive(t, func(cli *clientv3.Client, ctx context.Context) error {
_, err := cli.Put(ctx, "foo", "bar")
if err == context.DeadlineExceeded || err == rpctypes.ErrTimeout {
return errExpected
}
return err
})
}
func TestBalancerUnderBlackholeNoKeepAliveDelete(t *testing.T) {
testBalancerUnderBlackholeNoKeepAlive(t, func(cli *clientv3.Client, ctx context.Context) error {
_, err := cli.Delete(ctx, "foo")
if err == context.DeadlineExceeded || err == rpctypes.ErrTimeout {
return errExpected
}
return err
})
}
func TestBalancerUnderBlackholeNoKeepAliveTxn(t *testing.T) {
testBalancerUnderBlackholeNoKeepAlive(t, func(cli *clientv3.Client, ctx context.Context) error {
_, err := cli.Txn(ctx).
If(clientv3.Compare(clientv3.Version("foo"), "=", 0)).
Then(clientv3.OpPut("foo", "bar")).
Else(clientv3.OpPut("foo", "baz")).Commit()
if err == context.DeadlineExceeded || err == rpctypes.ErrTimeout {
return errExpected
}
return err
})
}
func TestBalancerUnderBlackholeNoKeepAliveLinearizableGet(t *testing.T) {
testBalancerUnderBlackholeNoKeepAlive(t, func(cli *clientv3.Client, ctx context.Context) error {
_, err := cli.Get(ctx, "a")
if err == context.DeadlineExceeded || err == rpctypes.ErrTimeout {
return errExpected
}
return err
})
}
func TestBalancerUnderBlackholeNoKeepAliveSerializableGet(t *testing.T) {
testBalancerUnderBlackholeNoKeepAlive(t, func(cli *clientv3.Client, ctx context.Context) error {
_, err := cli.Get(ctx, "a", clientv3.WithSerializable())
if err == context.DeadlineExceeded {
return errExpected
}
return err
})
}
// testBalancerUnderBlackholeNoKeepAlive ensures that first request to blackholed endpoint
// fails due to context timeout, but succeeds on next try, with endpoint switch.
func testBalancerUnderBlackholeNoKeepAlive(t *testing.T, op func(*clientv3.Client, context.Context) error) {
defer testutil.AfterTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{
Size: 2,
SkipCreatingClient: true,
})
defer clus.Terminate(t)
eps := []string{clus.Members[0].GRPCAddr(), clus.Members[1].GRPCAddr()}
ccfg := clientv3.Config{
Endpoints: []string{eps[0]},
DialTimeout: 1 * time.Second,
}
cli, err := clientv3.New(ccfg)
if err != nil {
t.Fatal(err)
}
defer cli.Close()
// wait for eps[0] to be pinned
mustWaitPinReady(t, cli)
// add all eps to list, so that when the original pined one fails
// the client can switch to other available eps
cli.SetEndpoints(eps...)
// blackhole eps[0]
clus.Members[0].Blackhole()
// fail first due to blackhole, retry should succeed
// TODO: first operation can succeed
// when gRPC supports better retry on non-delivered request
for i := 0; i < 2; i++ {
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
err = op(cli, ctx)
cancel()
if err == nil {
break
}
if i == 0 {
if err != errExpected {
t.Errorf("#%d: expected %v, got %v", i, errExpected, err)
}
} else if err != nil {
t.Errorf("#%d: failed with error %v", i, err)
}
}
}

View File

@ -21,6 +21,7 @@ import (
"github.com/coreos/etcd/integration"
"github.com/coreos/etcd/pkg/testutil"
"github.com/coreos/etcd/pkg/types"
"golang.org/x/net/context"
)

View File

@ -16,6 +16,7 @@ package integration
import (
"math/rand"
"strings"
"testing"
"time"
@ -26,7 +27,6 @@ import (
"github.com/coreos/etcd/pkg/transport"
"golang.org/x/net/context"
"google.golang.org/grpc"
)
var (
@ -48,21 +48,21 @@ var (
// TestDialTLSExpired tests client with expired certs fails to dial.
func TestDialTLSExpired(t *testing.T) {
defer testutil.AfterTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, PeerTLS: &testTLSInfo, ClientTLS: &testTLSInfo})
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, PeerTLS: &testTLSInfo, ClientTLS: &testTLSInfo, SkipCreatingClient: true})
defer clus.Terminate(t)
tls, err := testTLSInfoExpired.ClientConfig()
if err != nil {
t.Fatal(err)
}
// expect remote errors 'tls: bad certificate'
// expect remote errors "tls: bad certificate"
_, err = clientv3.New(clientv3.Config{
Endpoints: []string{clus.Members[0].GRPCAddr()},
DialTimeout: 3 * time.Second,
TLS: tls,
})
if err != grpc.ErrClientConnTimeout {
t.Fatalf("expected %v, got %v", grpc.ErrClientConnTimeout, err)
if err != context.DeadlineExceeded {
t.Fatalf("expected %v, got %v", context.DeadlineExceeded, err)
}
}
@ -70,19 +70,20 @@ func TestDialTLSExpired(t *testing.T) {
// when TLS endpoints (https, unixs) are given but no tls config.
func TestDialTLSNoConfig(t *testing.T) {
defer testutil.AfterTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, ClientTLS: &testTLSInfo})
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, ClientTLS: &testTLSInfo, SkipCreatingClient: true})
defer clus.Terminate(t)
// expect 'signed by unknown authority'
// expect "signed by unknown authority"
_, err := clientv3.New(clientv3.Config{
Endpoints: []string{clus.Members[0].GRPCAddr()},
DialTimeout: time.Second,
})
if err != grpc.ErrClientConnTimeout {
t.Fatalf("expected %v, got %v", grpc.ErrClientConnTimeout, err)
if err != context.DeadlineExceeded {
t.Fatalf("expected %v, got %v", context.DeadlineExceeded, err)
}
}
// TestDialSetEndpoints ensures SetEndpoints can replace unavailable endpoints with available ones.
// TestDialSetEndpointsBeforeFail ensures SetEndpoints can replace unavailable
// endpoints with available ones.
func TestDialSetEndpointsBeforeFail(t *testing.T) {
testDialSetEndpoints(t, true)
}
@ -94,7 +95,7 @@ func TestDialSetEndpointsAfterFail(t *testing.T) {
// testDialSetEndpoints ensures SetEndpoints can replace unavailable endpoints with available ones.
func testDialSetEndpoints(t *testing.T, setBefore bool) {
defer testutil.AfterTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3, SkipCreatingClient: true})
defer clus.Terminate(t)
// get endpoint list
@ -139,7 +140,7 @@ func TestSwitchSetEndpoints(t *testing.T) {
eps := []string{clus.Members[1].GRPCAddr(), clus.Members[2].GRPCAddr()}
cli := clus.Client(0)
clus.Members[0].InjectPartition(t, clus.Members[1:])
clus.Members[0].InjectPartition(t, clus.Members[1:]...)
cli.SetEndpoints(eps...)
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
@ -152,7 +153,7 @@ func TestSwitchSetEndpoints(t *testing.T) {
func TestRejectOldCluster(t *testing.T) {
defer testutil.AfterTest(t)
// 2 endpoints to test multi-endpoint Status
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 2})
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 2, SkipCreatingClient: true})
defer clus.Terminate(t)
cfg := clientv3.Config{
@ -182,10 +183,24 @@ func TestDialForeignEndpoint(t *testing.T) {
// grpc can return a lazy connection that's not connected yet; confirm
// that it can communicate with the cluster.
kvc := clientv3.NewKVFromKVClient(pb.NewKVClient(conn))
kvc := clientv3.NewKVFromKVClient(pb.NewKVClient(conn), clus.Client(0))
ctx, cancel := context.WithTimeout(context.TODO(), 5*time.Second)
defer cancel()
if _, gerr := kvc.Get(ctx, "abc"); gerr != nil {
t.Fatal(err)
}
}
// TestSetEndpointAndPut checks that a Put following a SetEndpoints
// to a working endpoint will always succeed.
func TestSetEndpointAndPut(t *testing.T) {
defer testutil.AfterTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 2})
defer clus.Terminate(t)
clus.Client(1).SetEndpoints(clus.Members[0].GRPCAddr())
_, err := clus.Client(1).Put(context.TODO(), "foo", "bar")
if err != nil && !strings.Contains(err.Error(), "closing") {
t.Fatal(err)
}
}

View File

@ -16,7 +16,6 @@ package integration
import (
"bytes"
"math/rand"
"os"
"reflect"
"strings"
@ -28,8 +27,10 @@ import (
"github.com/coreos/etcd/integration"
"github.com/coreos/etcd/mvcc/mvccpb"
"github.com/coreos/etcd/pkg/testutil"
"golang.org/x/net/context"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
)
func TestKVPutError(t *testing.T) {
@ -39,7 +40,7 @@ func TestKVPutError(t *testing.T) {
maxReqBytes = 1.5 * 1024 * 1024 // hard coded max in v3_server.go
quota = int64(int(maxReqBytes) + 8*os.Getpagesize())
)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, QuotaBackendBytes: quota})
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, QuotaBackendBytes: quota, ClientMaxCallSendMsgSize: 100 * 1024 * 1024})
defer clus.Terminate(t)
kv := clus.RandClient()
@ -441,8 +442,8 @@ func TestKVGetErrConnClosed(t *testing.T) {
go func() {
defer close(donec)
_, err := cli.Get(context.TODO(), "foo")
if err != nil && err != grpc.ErrClientConnClosing {
t.Fatalf("expected %v, got %v", grpc.ErrClientConnClosing, err)
if err != nil && err != context.Canceled && err != grpc.ErrClientConnClosing {
t.Fatalf("expected %v or %v, got %v", context.Canceled, grpc.ErrClientConnClosing, err)
}
}()
@ -472,8 +473,9 @@ func TestKVNewAfterClose(t *testing.T) {
donec := make(chan struct{})
go func() {
if _, err := cli.Get(context.TODO(), "foo"); err != grpc.ErrClientConnClosing {
t.Fatalf("expected %v, got %v", grpc.ErrClientConnClosing, err)
_, err := cli.Get(context.TODO(), "foo")
if err != context.Canceled && err != grpc.ErrClientConnClosing {
t.Fatalf("expected %v or %v, got %v", context.Canceled, grpc.ErrClientConnClosing, err)
}
close(donec)
}()
@ -790,7 +792,7 @@ func TestKVGetStoppedServerAndClose(t *testing.T) {
// this Get fails and triggers an asynchronous connection retry
_, err := cli.Get(ctx, "abc")
cancel()
if !strings.Contains(err.Error(), "context deadline") {
if err != nil && err != context.DeadlineExceeded {
t.Fatal(err)
}
}
@ -812,86 +814,143 @@ func TestKVPutStoppedServerAndClose(t *testing.T) {
// grpc finds out the original connection is down due to the member shutdown.
_, err := cli.Get(ctx, "abc")
cancel()
if !strings.Contains(err.Error(), "context deadline") {
if err != nil && err != context.DeadlineExceeded {
t.Fatal(err)
}
// this Put fails and triggers an asynchronous connection retry
_, err = cli.Put(ctx, "abc", "123")
cancel()
if !strings.Contains(err.Error(), "context deadline") {
if err != nil && err != context.DeadlineExceeded {
t.Fatal(err)
}
}
// TestKVGetOneEndpointDown ensures a client can connect and get if one endpoint is down
func TestKVPutOneEndpointDown(t *testing.T) {
// TestKVPutAtMostOnce ensures that a Put will only occur at most once
// in the presence of network errors.
func TestKVPutAtMostOnce(t *testing.T) {
defer testutil.AfterTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
defer clus.Terminate(t)
// get endpoint list
eps := make([]string, 3)
for i := range eps {
eps[i] = clus.Members[i].GRPCAddr()
}
// make a dead node
clus.Members[rand.Intn(len(eps))].Stop(t)
// try to connect with dead node in the endpoint list
cfg := clientv3.Config{Endpoints: eps, DialTimeout: 1 * time.Second}
cli, err := clientv3.New(cfg)
if err != nil {
if _, err := clus.Client(0).Put(context.TODO(), "k", "1"); err != nil {
t.Fatal(err)
}
defer cli.Close()
ctx, cancel := context.WithTimeout(context.TODO(), 3*time.Second)
if _, err := cli.Get(ctx, "abc", clientv3.WithSerializable()); err != nil {
t.Fatal(err)
}
cancel()
}
// TestKVGetResetLoneEndpoint ensures that if an endpoint resets and all other
// endpoints are down, then it will reconnect.
func TestKVGetResetLoneEndpoint(t *testing.T) {
defer testutil.AfterTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 2})
defer clus.Terminate(t)
// get endpoint list
eps := make([]string, 2)
for i := range eps {
eps[i] = clus.Members[i].GRPCAddr()
}
cfg := clientv3.Config{Endpoints: eps, DialTimeout: 500 * time.Millisecond}
cli, err := clientv3.New(cfg)
if err != nil {
t.Fatal(err)
}
defer cli.Close()
// disconnect everything
clus.Members[0].Stop(t)
clus.Members[1].Stop(t)
// have Get try to reconnect
donec := make(chan struct{})
go func() {
ctx, cancel := context.WithTimeout(context.TODO(), 5*time.Second)
if _, err := cli.Get(ctx, "abc", clientv3.WithSerializable()); err != nil {
t.Fatal(err)
for i := 0; i < 10; i++ {
clus.Members[0].DropConnections()
donec := make(chan struct{})
go func() {
defer close(donec)
for i := 0; i < 10; i++ {
clus.Members[0].DropConnections()
time.Sleep(5 * time.Millisecond)
}
}()
_, err := clus.Client(0).Put(context.TODO(), "k", "v")
<-donec
if err != nil {
break
}
cancel()
close(donec)
}()
time.Sleep(500 * time.Millisecond)
clus.Members[0].Restart(t)
select {
case <-time.After(10 * time.Second):
t.Fatalf("timed out waiting for Get")
case <-donec:
}
resp, err := clus.Client(0).Get(context.TODO(), "k")
if err != nil {
t.Fatal(err)
}
if resp.Kvs[0].Version > 11 {
t.Fatalf("expected version <= 10, got %+v", resp.Kvs[0])
}
}
// TestKVLargeRequests tests various client/server side request limits.
func TestKVLargeRequests(t *testing.T) {
defer testutil.AfterTest(t)
tests := []struct {
// make sure that "MaxCallSendMsgSize" < server-side default send/recv limit
maxRequestBytesServer uint
maxCallSendBytesClient int
maxCallRecvBytesClient int
valueSize int
expectError error
}{
{
maxRequestBytesServer: 1,
maxCallSendBytesClient: 0,
maxCallRecvBytesClient: 0,
valueSize: 1024,
expectError: rpctypes.ErrRequestTooLarge,
},
// without proper client-side receive size limit
// "code = ResourceExhausted desc = grpc: received message larger than max (5242929 vs. 4194304)"
{
maxRequestBytesServer: 7*1024*1024 + 512*1024,
maxCallSendBytesClient: 7 * 1024 * 1024,
maxCallRecvBytesClient: 0,
valueSize: 5 * 1024 * 1024,
expectError: nil,
},
{
maxRequestBytesServer: 10 * 1024 * 1024,
maxCallSendBytesClient: 100 * 1024 * 1024,
maxCallRecvBytesClient: 0,
valueSize: 10 * 1024 * 1024,
expectError: rpctypes.ErrRequestTooLarge,
},
{
maxRequestBytesServer: 10 * 1024 * 1024,
maxCallSendBytesClient: 10 * 1024 * 1024,
maxCallRecvBytesClient: 0,
valueSize: 10 * 1024 * 1024,
expectError: grpc.Errorf(codes.ResourceExhausted, "grpc: trying to send message larger than max "),
},
{
maxRequestBytesServer: 10 * 1024 * 1024,
maxCallSendBytesClient: 100 * 1024 * 1024,
maxCallRecvBytesClient: 0,
valueSize: 10*1024*1024 + 5,
expectError: rpctypes.ErrRequestTooLarge,
},
{
maxRequestBytesServer: 10 * 1024 * 1024,
maxCallSendBytesClient: 10 * 1024 * 1024,
maxCallRecvBytesClient: 0,
valueSize: 10*1024*1024 + 5,
expectError: grpc.Errorf(codes.ResourceExhausted, "grpc: trying to send message larger than max "),
},
}
for i, test := range tests {
clus := integration.NewClusterV3(t,
&integration.ClusterConfig{
Size: 1,
MaxRequestBytes: test.maxRequestBytesServer,
ClientMaxCallSendMsgSize: test.maxCallSendBytesClient,
ClientMaxCallRecvMsgSize: test.maxCallRecvBytesClient,
},
)
cli := clus.Client(0)
_, err := cli.Put(context.TODO(), "foo", strings.Repeat("a", test.valueSize))
if _, ok := err.(rpctypes.EtcdError); ok {
if err != test.expectError {
t.Errorf("#%d: expected %v, got %v", i, test.expectError, err)
}
} else if err != nil && !strings.HasPrefix(err.Error(), test.expectError.Error()) {
t.Errorf("#%d: expected %v, got %v", i, test.expectError, err)
}
// put request went through, now expects large response back
if err == nil {
_, err = cli.Get(context.TODO(), "foo")
if err != nil {
t.Errorf("#%d: get expected no error, got %v", i, err)
}
}
clus.Terminate(t)
}
}

View File

@ -26,6 +26,7 @@ import (
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
"github.com/coreos/etcd/integration"
"github.com/coreos/etcd/pkg/testutil"
"golang.org/x/net/context"
"google.golang.org/grpc"
)
@ -233,7 +234,7 @@ type leaseCh struct {
ch <-chan *clientv3.LeaseKeepAliveResponse
}
// TestLeaseKeepAliveNotFound ensures a revoked lease won't stop other keep alives
// TestLeaseKeepAliveNotFound ensures a revoked lease won't halt other leases.
func TestLeaseKeepAliveNotFound(t *testing.T) {
defer testutil.AfterTest(t)
@ -286,8 +287,10 @@ func TestLeaseGrantErrConnClosed(t *testing.T) {
go func() {
defer close(donec)
_, err := cli.Grant(context.TODO(), 5)
if err != nil && err != grpc.ErrClientConnClosing {
t.Fatalf("expected %v, got %v", grpc.ErrClientConnClosing, err)
if err != nil && err != grpc.ErrClientConnClosing && err != context.Canceled {
// grpc.ErrClientConnClosing if grpc-go balancer calls 'Get' after client.Close.
// context.Canceled if grpc-go balancer calls 'Get' with an inflight client.Close.
t.Fatalf("expected %v or %v, got %v", grpc.ErrClientConnClosing, context.Canceled, err)
}
}()
@ -316,8 +319,8 @@ func TestLeaseGrantNewAfterClose(t *testing.T) {
donec := make(chan struct{})
go func() {
if _, err := cli.Grant(context.TODO(), 5); err != grpc.ErrClientConnClosing {
t.Fatalf("expected %v, got %v", grpc.ErrClientConnClosing, err)
if _, err := cli.Grant(context.TODO(), 5); err != context.Canceled && err != grpc.ErrClientConnClosing {
t.Fatalf("expected %v or %v, got %v", err != context.Canceled, grpc.ErrClientConnClosing, err)
}
close(donec)
}()
@ -348,8 +351,8 @@ func TestLeaseRevokeNewAfterClose(t *testing.T) {
donec := make(chan struct{})
go func() {
if _, err := cli.Revoke(context.TODO(), leaseID); err != grpc.ErrClientConnClosing {
t.Fatalf("expected %v, got %v", grpc.ErrClientConnClosing, err)
if _, err := cli.Revoke(context.TODO(), leaseID); err != context.Canceled && err != grpc.ErrClientConnClosing {
t.Fatalf("expected %v or %v, got %v", err != context.Canceled, grpc.ErrClientConnClosing, err)
}
close(donec)
}()
@ -360,7 +363,7 @@ func TestLeaseRevokeNewAfterClose(t *testing.T) {
}
}
// TestLeaseKeepAliveCloseAfterDisconnectExpire ensures the keep alive channel is closed
// TestLeaseKeepAliveCloseAfterDisconnectRevoke ensures the keep alive channel is closed
// following a disconnection, lease revoke, then reconnect.
func TestLeaseKeepAliveCloseAfterDisconnectRevoke(t *testing.T) {
defer testutil.AfterTest(t)
@ -395,7 +398,7 @@ func TestLeaseKeepAliveCloseAfterDisconnectRevoke(t *testing.T) {
clus.Members[0].Restart(t)
// some keep-alives may still be buffered; drain until close
// some responses may still be buffered; drain until close
timer := time.After(time.Duration(kresp.TTL) * time.Second)
for kresp != nil {
select {
@ -482,7 +485,8 @@ func TestLeaseTimeToLive(t *testing.T) {
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
defer clus.Terminate(t)
lapi := clus.RandClient()
c := clus.RandClient()
lapi := c
resp, err := lapi.Grant(context.Background(), 10)
if err != nil {
@ -497,6 +501,11 @@ func TestLeaseTimeToLive(t *testing.T) {
}
}
// linearized read to ensure Puts propagated to server backing lapi
if _, err := c.Get(context.TODO(), "abc"); err != nil {
t.Fatal(err)
}
lresp, lerr := lapi.TimeToLive(context.Background(), resp.ID, clientv3.WithAttachedKeys())
if lerr != nil {
t.Fatal(lerr)
@ -545,8 +554,7 @@ func TestLeaseTimeToLiveLeaseNotFound(t *testing.T) {
}
lresp, err := cli.TimeToLive(context.Background(), resp.ID)
// TimeToLive() doesn't return LeaseNotFound error
// but return a response with TTL to be -1
// TimeToLive() should return a response with TTL=-1.
if err != nil {
t.Fatalf("expected err to be nil")
}
@ -636,8 +644,8 @@ func TestLeaseKeepAliveLoopExit(t *testing.T) {
}
}
// TestV3LeaseFailureOverlap issues Grant and Keepalive requests to a cluster
// before, during, and after quorum loss to confirm Grant/Keepalive tolerates
// TestV3LeaseFailureOverlap issues Grant and KeepAlive requests to a cluster
// before, during, and after quorum loss to confirm Grant/KeepAlive tolerates
// transient cluster failure.
func TestV3LeaseFailureOverlap(t *testing.T) {
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 2})

View File

@ -14,8 +14,16 @@
package integration
import "github.com/coreos/pkg/capnslog"
import (
"io/ioutil"
"github.com/coreos/etcd/clientv3"
"github.com/coreos/pkg/capnslog"
"google.golang.org/grpc/grpclog"
)
func init() {
capnslog.SetGlobalLogLevel(capnslog.INFO)
capnslog.SetGlobalLogLevel(capnslog.CRITICAL)
clientv3.SetLogger(grpclog.NewLoggerV2(ioutil.Discard, ioutil.Discard, ioutil.Discard))
}

View File

@ -44,7 +44,7 @@ func TestV3ClientMetrics(t *testing.T) {
err error
)
// listen for all prometheus metrics
// listen for all Prometheus metrics
donec := make(chan struct{})
go func() {
defer close(donec)
@ -65,7 +65,7 @@ func TestV3ClientMetrics(t *testing.T) {
url := "unix://" + addr + "/metrics"
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, SkipCreatingClient: true})
defer clus.Terminate(t)
cfg := clientv3.Config{

View File

@ -25,6 +25,7 @@ import (
"github.com/coreos/etcd/integration"
"github.com/coreos/etcd/mvcc/mvccpb"
"github.com/coreos/etcd/pkg/testutil"
"golang.org/x/net/context"
)

View File

@ -15,7 +15,6 @@
package integration
import (
"context"
"reflect"
"testing"
@ -24,6 +23,8 @@ import (
"github.com/coreos/etcd/integration"
"github.com/coreos/etcd/mvcc/mvccpb"
"github.com/coreos/etcd/pkg/testutil"
"golang.org/x/net/context"
)
func TestNamespacePutGet(t *testing.T) {

View File

@ -0,0 +1,260 @@
// Copyright 2017 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build !cluster_proxy
package integration
import (
"errors"
"testing"
"time"
"github.com/coreos/etcd/clientv3"
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
"github.com/coreos/etcd/integration"
"github.com/coreos/etcd/pkg/testutil"
"golang.org/x/net/context"
)
var errExpected = errors.New("expected error")
// TestBalancerUnderNetworkPartitionPut tests when one member becomes isolated,
// first Put request fails, and following retry succeeds with client balancer
// switching to others.
func TestBalancerUnderNetworkPartitionPut(t *testing.T) {
testBalancerUnderNetworkPartition(t, func(cli *clientv3.Client, ctx context.Context) error {
_, err := cli.Put(ctx, "a", "b")
if err == context.DeadlineExceeded || err == rpctypes.ErrTimeout {
return errExpected
}
return err
}, time.Second)
}
func TestBalancerUnderNetworkPartitionDelete(t *testing.T) {
testBalancerUnderNetworkPartition(t, func(cli *clientv3.Client, ctx context.Context) error {
_, err := cli.Delete(ctx, "a")
if err == context.DeadlineExceeded || err == rpctypes.ErrTimeout {
return errExpected
}
return err
}, time.Second)
}
func TestBalancerUnderNetworkPartitionTxn(t *testing.T) {
testBalancerUnderNetworkPartition(t, func(cli *clientv3.Client, ctx context.Context) error {
_, err := cli.Txn(ctx).
If(clientv3.Compare(clientv3.Version("foo"), "=", 0)).
Then(clientv3.OpPut("foo", "bar")).
Else(clientv3.OpPut("foo", "baz")).Commit()
if err == context.DeadlineExceeded || err == rpctypes.ErrTimeout {
return errExpected
}
return err
}, time.Second)
}
// TestBalancerUnderNetworkPartitionLinearizableGetWithLongTimeout tests
// when one member becomes isolated, first quorum Get request succeeds
// by switching endpoints within the timeout (long enough to cover endpoint switch).
func TestBalancerUnderNetworkPartitionLinearizableGetWithLongTimeout(t *testing.T) {
testBalancerUnderNetworkPartition(t, func(cli *clientv3.Client, ctx context.Context) error {
_, err := cli.Get(ctx, "a")
return err
}, 7*time.Second)
}
// TestBalancerUnderNetworkPartitionLinearizableGetWithShortTimeout tests
// when one member becomes isolated, first quorum Get request fails,
// and following retry succeeds with client balancer switching to others.
func TestBalancerUnderNetworkPartitionLinearizableGetWithShortTimeout(t *testing.T) {
testBalancerUnderNetworkPartition(t, func(cli *clientv3.Client, ctx context.Context) error {
_, err := cli.Get(ctx, "a")
if err == context.DeadlineExceeded {
return errExpected
}
return err
}, time.Second)
}
func TestBalancerUnderNetworkPartitionSerializableGet(t *testing.T) {
testBalancerUnderNetworkPartition(t, func(cli *clientv3.Client, ctx context.Context) error {
_, err := cli.Get(ctx, "a", clientv3.WithSerializable())
return err
}, time.Second)
}
func testBalancerUnderNetworkPartition(t *testing.T, op func(*clientv3.Client, context.Context) error, timeout time.Duration) {
defer testutil.AfterTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{
Size: 3,
GRPCKeepAliveMinTime: time.Millisecond, // avoid too_many_pings
SkipCreatingClient: true,
})
defer clus.Terminate(t)
eps := []string{clus.Members[0].GRPCAddr(), clus.Members[1].GRPCAddr(), clus.Members[2].GRPCAddr()}
// expect pin eps[0]
ccfg := clientv3.Config{
Endpoints: []string{eps[0]},
DialTimeout: 3 * time.Second,
}
cli, err := clientv3.New(ccfg)
if err != nil {
t.Fatal(err)
}
defer cli.Close()
// wait for eps[0] to be pinned
mustWaitPinReady(t, cli)
// add other endpoints for later endpoint switch
cli.SetEndpoints(eps...)
clus.Members[0].InjectPartition(t, clus.Members[1:]...)
for i := 0; i < 2; i++ {
ctx, cancel := context.WithTimeout(context.Background(), timeout)
err = op(cli, ctx)
cancel()
if err == nil {
break
}
if err != errExpected {
t.Errorf("#%d: expected %v, got %v", i, errExpected, err)
}
// give enough time for endpoint switch
// TODO: remove random sleep by syncing directly with balancer
if i == 0 {
time.Sleep(5 * time.Second)
}
}
if err != nil {
t.Errorf("balancer did not switch in time (%v)", err)
}
}
// TestBalancerUnderNetworkPartitionLinearizableGetLeaderElection ensures balancer
// switches endpoint when leader fails and linearizable get requests returns
// "etcdserver: request timed out".
func TestBalancerUnderNetworkPartitionLinearizableGetLeaderElection(t *testing.T) {
defer testutil.AfterTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{
Size: 3,
SkipCreatingClient: true,
})
defer clus.Terminate(t)
eps := []string{clus.Members[0].GRPCAddr(), clus.Members[1].GRPCAddr(), clus.Members[2].GRPCAddr()}
lead := clus.WaitLeader(t)
timeout := 3 * clus.Members[(lead+1)%2].ServerConfig.ReqTimeout()
cli, err := clientv3.New(clientv3.Config{
Endpoints: []string{eps[(lead+1)%2]},
DialTimeout: 1 * time.Second,
})
if err != nil {
t.Fatal(err)
}
defer cli.Close()
// wait for non-leader to be pinned
mustWaitPinReady(t, cli)
// add all eps to list, so that when the original pined one fails
// the client can switch to other available eps
cli.SetEndpoints(eps[lead], eps[(lead+1)%2])
// isolate leader
clus.Members[lead].InjectPartition(t, clus.Members[(lead+1)%3], clus.Members[(lead+2)%3])
// expects balancer endpoint switch while ongoing leader election
ctx, cancel := context.WithTimeout(context.TODO(), timeout)
_, err = cli.Get(ctx, "a")
cancel()
if err != nil {
t.Fatal(err)
}
}
func TestBalancerUnderNetworkPartitionWatchLeader(t *testing.T) {
testBalancerUnderNetworkPartitionWatch(t, true)
}
func TestBalancerUnderNetworkPartitionWatchFollower(t *testing.T) {
testBalancerUnderNetworkPartitionWatch(t, false)
}
// testBalancerUnderNetworkPartitionWatch ensures watch stream
// to a partitioned node be closed when context requires leader.
func testBalancerUnderNetworkPartitionWatch(t *testing.T, isolateLeader bool) {
defer testutil.AfterTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{
Size: 3,
SkipCreatingClient: true,
})
defer clus.Terminate(t)
eps := []string{clus.Members[0].GRPCAddr(), clus.Members[1].GRPCAddr(), clus.Members[2].GRPCAddr()}
target := clus.WaitLeader(t)
if !isolateLeader {
target = (target + 1) % 3
}
// pin eps[target]
watchCli, err := clientv3.New(clientv3.Config{Endpoints: []string{eps[target]}})
if err != nil {
t.Fatal(err)
}
defer watchCli.Close()
// wait for eps[target] to be pinned
mustWaitPinReady(t, watchCli)
// add all eps to list, so that when the original pined one fails
// the client can switch to other available eps
watchCli.SetEndpoints(eps...)
wch := watchCli.Watch(clientv3.WithRequireLeader(context.Background()), "foo", clientv3.WithCreatedNotify())
select {
case <-wch:
case <-time.After(3 * time.Second):
t.Fatal("took too long to create watch")
}
// isolate eps[target]
clus.Members[target].InjectPartition(t,
clus.Members[(target+1)%3],
clus.Members[(target+2)%3],
)
select {
case ev := <-wch:
if len(ev.Events) != 0 {
t.Fatal("expected no event")
}
if err = ev.Err(); err != rpctypes.ErrNoLeader {
t.Fatalf("expected %v, got %v", rpctypes.ErrNoLeader, err)
}
case <-time.After(3 * time.Second): // enough time to detect leader lost
t.Fatal("took too long to detect leader lost")
}
}

View File

@ -20,6 +20,7 @@ import (
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
"github.com/coreos/etcd/integration"
"github.com/coreos/etcd/pkg/testutil"
"golang.org/x/net/context"
)

View File

@ -0,0 +1,352 @@
// Copyright 2017 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package integration
import (
"bytes"
"testing"
"time"
"github.com/coreos/etcd/clientv3"
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
"github.com/coreos/etcd/integration"
"github.com/coreos/etcd/pkg/testutil"
"golang.org/x/net/context"
)
// TestBalancerUnderServerShutdownWatch expects that watch client
// switch its endpoints when the member of the pinned endpoint fails.
func TestBalancerUnderServerShutdownWatch(t *testing.T) {
defer testutil.AfterTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{
Size: 3,
SkipCreatingClient: true,
})
defer clus.Terminate(t)
eps := []string{clus.Members[0].GRPCAddr(), clus.Members[1].GRPCAddr(), clus.Members[2].GRPCAddr()}
lead := clus.WaitLeader(t)
// pin eps[lead]
watchCli, err := clientv3.New(clientv3.Config{Endpoints: []string{eps[lead]}})
if err != nil {
t.Fatal(err)
}
defer watchCli.Close()
// wait for eps[lead] to be pinned
mustWaitPinReady(t, watchCli)
// add all eps to list, so that when the original pined one fails
// the client can switch to other available eps
watchCli.SetEndpoints(eps...)
key, val := "foo", "bar"
wch := watchCli.Watch(context.Background(), key, clientv3.WithCreatedNotify())
select {
case <-wch:
case <-time.After(3 * time.Second):
t.Fatal("took too long to create watch")
}
donec := make(chan struct{})
go func() {
defer close(donec)
// switch to others when eps[lead] is shut down
select {
case ev := <-wch:
if werr := ev.Err(); werr != nil {
t.Fatal(werr)
}
if len(ev.Events) != 1 {
t.Fatalf("expected one event, got %+v", ev)
}
if !bytes.Equal(ev.Events[0].Kv.Value, []byte(val)) {
t.Fatalf("expected %q, got %+v", val, ev.Events[0].Kv)
}
case <-time.After(7 * time.Second):
t.Fatal("took too long to receive events")
}
}()
// shut down eps[lead]
clus.Members[lead].Terminate(t)
// writes to eps[lead+1]
putCli, err := clientv3.New(clientv3.Config{Endpoints: []string{eps[(lead+1)%3]}})
if err != nil {
t.Fatal(err)
}
defer putCli.Close()
for {
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
_, err = putCli.Put(ctx, key, val)
cancel()
if err == nil {
break
}
if err == context.DeadlineExceeded || err == rpctypes.ErrTimeout || err == rpctypes.ErrTimeoutDueToLeaderFail {
continue
}
t.Fatal(err)
}
select {
case <-donec:
case <-time.After(5 * time.Second): // enough time for balancer switch
t.Fatal("took too long to receive events")
}
}
func TestBalancerUnderServerShutdownPut(t *testing.T) {
testBalancerUnderServerShutdownMutable(t, func(cli *clientv3.Client, ctx context.Context) error {
_, err := cli.Put(ctx, "foo", "bar")
return err
})
}
func TestBalancerUnderServerShutdownDelete(t *testing.T) {
testBalancerUnderServerShutdownMutable(t, func(cli *clientv3.Client, ctx context.Context) error {
_, err := cli.Delete(ctx, "foo")
return err
})
}
func TestBalancerUnderServerShutdownTxn(t *testing.T) {
testBalancerUnderServerShutdownMutable(t, func(cli *clientv3.Client, ctx context.Context) error {
_, err := cli.Txn(ctx).
If(clientv3.Compare(clientv3.Version("foo"), "=", 0)).
Then(clientv3.OpPut("foo", "bar")).
Else(clientv3.OpPut("foo", "baz")).Commit()
return err
})
}
// testBalancerUnderServerShutdownMutable expects that when the member of
// the pinned endpoint is shut down, the balancer switches its endpoints
// and all subsequent put/delete/txn requests succeed with new endpoints.
func testBalancerUnderServerShutdownMutable(t *testing.T, op func(*clientv3.Client, context.Context) error) {
defer testutil.AfterTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{
Size: 3,
SkipCreatingClient: true,
})
defer clus.Terminate(t)
eps := []string{clus.Members[0].GRPCAddr(), clus.Members[1].GRPCAddr(), clus.Members[2].GRPCAddr()}
// pin eps[0]
cli, err := clientv3.New(clientv3.Config{Endpoints: []string{eps[0]}})
if err != nil {
t.Fatal(err)
}
defer cli.Close()
// wait for eps[0] to be pinned
mustWaitPinReady(t, cli)
// add all eps to list, so that when the original pined one fails
// the client can switch to other available eps
cli.SetEndpoints(eps...)
// shut down eps[0]
clus.Members[0].Terminate(t)
// switched to others when eps[0] was explicitly shut down
// and following request should succeed
// TODO: remove this (expose client connection state?)
time.Sleep(time.Second)
cctx, ccancel := context.WithTimeout(context.Background(), time.Second)
err = op(cli, cctx)
ccancel()
if err != nil {
t.Fatal(err)
}
}
func TestBalancerUnderServerShutdownGetLinearizable(t *testing.T) {
testBalancerUnderServerShutdownImmutable(t, func(cli *clientv3.Client, ctx context.Context) error {
_, err := cli.Get(ctx, "foo")
return err
}, 7*time.Second) // give enough time for leader election, balancer switch
}
func TestBalancerUnderServerShutdownGetSerializable(t *testing.T) {
testBalancerUnderServerShutdownImmutable(t, func(cli *clientv3.Client, ctx context.Context) error {
_, err := cli.Get(ctx, "foo", clientv3.WithSerializable())
return err
}, 2*time.Second)
}
// testBalancerUnderServerShutdownImmutable expects that when the member of
// the pinned endpoint is shut down, the balancer switches its endpoints
// and all subsequent range requests succeed with new endpoints.
func testBalancerUnderServerShutdownImmutable(t *testing.T, op func(*clientv3.Client, context.Context) error, timeout time.Duration) {
defer testutil.AfterTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{
Size: 3,
SkipCreatingClient: true,
})
defer clus.Terminate(t)
eps := []string{clus.Members[0].GRPCAddr(), clus.Members[1].GRPCAddr(), clus.Members[2].GRPCAddr()}
// pin eps[0]
cli, err := clientv3.New(clientv3.Config{Endpoints: []string{eps[0]}})
if err != nil {
t.Errorf("failed to create client: %v", err)
}
defer cli.Close()
// wait for eps[0] to be pinned
mustWaitPinReady(t, cli)
// add all eps to list, so that when the original pined one fails
// the client can switch to other available eps
cli.SetEndpoints(eps...)
// shut down eps[0]
clus.Members[0].Terminate(t)
// switched to others when eps[0] was explicitly shut down
// and following request should succeed
cctx, ccancel := context.WithTimeout(context.Background(), timeout)
err = op(cli, cctx)
ccancel()
if err != nil {
t.Errorf("failed to finish range request in time %v (timeout %v)", err, timeout)
}
}
func TestBalancerUnderServerStopInflightLinearizableGetOnRestart(t *testing.T) {
tt := []pinTestOpt{
{pinLeader: true, stopPinFirst: true},
{pinLeader: true, stopPinFirst: false},
{pinLeader: false, stopPinFirst: true},
{pinLeader: false, stopPinFirst: false},
}
for i := range tt {
testBalancerUnderServerStopInflightRangeOnRestart(t, true, tt[i])
}
}
func TestBalancerUnderServerStopInflightSerializableGetOnRestart(t *testing.T) {
tt := []pinTestOpt{
{pinLeader: true, stopPinFirst: true},
{pinLeader: true, stopPinFirst: false},
{pinLeader: false, stopPinFirst: true},
{pinLeader: false, stopPinFirst: false},
}
for i := range tt {
testBalancerUnderServerStopInflightRangeOnRestart(t, false, tt[i])
}
}
type pinTestOpt struct {
pinLeader bool
stopPinFirst bool
}
// testBalancerUnderServerStopInflightRangeOnRestart expects
// inflight range request reconnects on server restart.
func testBalancerUnderServerStopInflightRangeOnRestart(t *testing.T, linearizable bool, opt pinTestOpt) {
defer testutil.AfterTest(t)
cfg := &integration.ClusterConfig{
Size: 2,
SkipCreatingClient: true,
}
if linearizable {
cfg.Size = 3
}
clus := integration.NewClusterV3(t, cfg)
defer clus.Terminate(t)
eps := []string{clus.Members[0].GRPCAddr(), clus.Members[1].GRPCAddr()}
if linearizable {
eps = append(eps, clus.Members[2].GRPCAddr())
}
lead := clus.WaitLeader(t)
target := lead
if !opt.pinLeader {
target = (target + 1) % 2
}
// pin eps[target]
cli, err := clientv3.New(clientv3.Config{Endpoints: []string{eps[target]}})
if err != nil {
t.Errorf("failed to create client: %v", err)
}
defer cli.Close()
// wait for eps[target] to be pinned
mustWaitPinReady(t, cli)
// add all eps to list, so that when the original pined one fails
// the client can switch to other available eps
cli.SetEndpoints(eps...)
if opt.stopPinFirst {
clus.Members[target].Stop(t)
// give some time for balancer switch before stopping the other
time.Sleep(time.Second)
clus.Members[(target+1)%2].Stop(t)
} else {
clus.Members[(target+1)%2].Stop(t)
// balancer cannot pin other member since it's already stopped
clus.Members[target].Stop(t)
}
// 3-second is the minimum interval between endpoint being marked
// as unhealthy and being removed from unhealthy, so possibly
// takes >5-second to unpin and repin an endpoint
// TODO: decrease timeout when balancer switch rewrite
clientTimeout := 7 * time.Second
var gops []clientv3.OpOption
if !linearizable {
gops = append(gops, clientv3.WithSerializable())
}
donec, readyc := make(chan struct{}), make(chan struct{}, 1)
go func() {
defer close(donec)
ctx, cancel := context.WithTimeout(context.TODO(), clientTimeout)
readyc <- struct{}{}
_, err := cli.Get(ctx, "abc", gops...)
cancel()
if err != nil {
t.Fatal(err)
}
}()
<-readyc
clus.Members[target].Restart(t)
select {
case <-time.After(clientTimeout + 3*time.Second):
t.Fatalf("timed out waiting for Get [linearizable: %v, opt: %+v]", linearizable, opt)
case <-donec:
}
}

View File

@ -24,6 +24,7 @@ import (
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
"github.com/coreos/etcd/integration"
"github.com/coreos/etcd/pkg/testutil"
"golang.org/x/net/context"
)
@ -100,6 +101,8 @@ func TestTxnWriteFail(t *testing.T) {
}
func TestTxnReadRetry(t *testing.T) {
t.Skipf("skipping txn read retry test: re-enable after we do retry on txn read request")
defer testutil.AfterTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
@ -129,7 +132,6 @@ func TestTxnReadRetry(t *testing.T) {
t.Fatalf("waited too long")
}
}
func TestTxnSuccess(t *testing.T) {
defer testutil.AfterTest(t)

View File

@ -21,6 +21,7 @@ import (
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
"github.com/coreos/etcd/integration"
"github.com/coreos/etcd/pkg/testutil"
"golang.org/x/net/context"
)
@ -62,7 +63,7 @@ func TestUserErrorAuth(t *testing.T) {
authapi := clus.RandClient()
authSetupRoot(t, authapi.Auth)
// un-authenticated client
// unauthenticated client
if _, err := authapi.UserAdd(context.TODO(), "foo", "bar"); err != rpctypes.ErrUserNotFound {
t.Fatalf("expected %v, got %v", rpctypes.ErrUserNotFound, err)
}

View File

@ -0,0 +1,35 @@
// Copyright 2017 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package integration
import (
"testing"
"time"
"github.com/coreos/etcd/clientv3"
"golang.org/x/net/context"
)
// mustWaitPinReady waits up to 3-second until connection is up (pin endpoint).
// Fatal on time-out.
func mustWaitPinReady(t *testing.T, cli *clientv3.Client) {
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
_, err := cli.Get(ctx, "foo")
cancel()
if err != nil {
t.Fatal(err)
}
}

View File

@ -28,8 +28,10 @@ import (
"github.com/coreos/etcd/integration"
mvccpb "github.com/coreos/etcd/mvcc/mvccpb"
"github.com/coreos/etcd/pkg/testutil"
"golang.org/x/net/context"
"google.golang.org/grpc"
"google.golang.org/grpc/metadata"
)
type watcherTest func(*testing.T, *watchctx)
@ -51,8 +53,8 @@ func runWatchTest(t *testing.T, f watcherTest) {
wclientMember := rand.Intn(3)
w := clus.Client(wclientMember).Watcher
// select a different client from wclient so puts succeed if
// a test knocks out the watcher client
// select a different client for KV operations so puts succeed if
// a test knocks out the watcher client.
kvMember := rand.Intn(3)
for kvMember == wclientMember {
kvMember = rand.Intn(3)
@ -309,7 +311,7 @@ func testWatchCancelRunning(t *testing.T, wctx *watchctx) {
select {
case <-time.After(time.Second):
t.Fatalf("took too long to cancel")
case v, ok := <-wctx.ch:
case _, ok := <-wctx.ch:
if !ok {
// closed before getting put; OK
break
@ -318,8 +320,8 @@ func testWatchCancelRunning(t *testing.T, wctx *watchctx) {
select {
case <-time.After(time.Second):
t.Fatalf("took too long to close")
case v, ok = <-wctx.ch:
if ok {
case v, ok2 := <-wctx.ch:
if ok2 {
t.Fatalf("expected watcher channel to close, got %v", v)
}
}
@ -800,7 +802,8 @@ func TestWatchWithFilter(t *testing.T) {
}
}
// TestWatchWithCreatedNotification checks that createdNotification works.
// TestWatchWithCreatedNotification checks that WithCreatedNotify returns a
// Created watch response.
func TestWatchWithCreatedNotification(t *testing.T) {
cluster := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
defer cluster.Terminate(t)
@ -837,8 +840,7 @@ func TestWatchWithCreatedNotificationDropConn(t *testing.T) {
cluster.Members[0].DropConnections()
// try to receive from watch channel again
// ensure it doesn't post another createNotify
// check watch channel doesn't post another watch response.
select {
case wresp := <-wch:
t.Fatalf("got unexpected watch response: %+v\n", wresp)
@ -856,10 +858,26 @@ func TestWatchCancelOnServer(t *testing.T) {
client := cluster.RandClient()
numWatches := 10
// The grpc proxy starts watches to detect leadership after the proxy server
// returns as started; to avoid racing on the proxy's internal watches, wait
// until require leader watches get create responses to ensure the leadership
// watches have started.
for {
ctx, cancel := context.WithCancel(clientv3.WithRequireLeader(context.TODO()))
ww := client.Watch(ctx, "a", clientv3.WithCreatedNotify())
wresp := <-ww
cancel()
if wresp.Err() == nil {
break
}
}
cancels := make([]context.CancelFunc, numWatches)
for i := 0; i < numWatches; i++ {
// use WithTimeout to force separate streams in client
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
// force separate streams in client
md := metadata.Pairs("some-key", fmt.Sprintf("%d", i))
mctx := metadata.NewOutgoingContext(context.Background(), md)
ctx, cancel := context.WithCancel(mctx)
cancels[i] = cancel
w := client.Watch(ctx, fmt.Sprintf("%d", i), clientv3.WithCreatedNotify())
<-w
@ -885,7 +903,7 @@ func TestWatchCancelOnServer(t *testing.T) {
t.Fatalf("expected n=2 and err=nil, got n=%d and err=%v", n, serr)
}
if maxWatchV-minWatchV != numWatches {
if maxWatchV-minWatchV < numWatches {
t.Fatalf("expected %d canceled watchers, got %d", numWatches, maxWatchV-minWatchV)
}
}
@ -916,12 +934,12 @@ func testWatchOverlapContextCancel(t *testing.T, f func(*integration.ClusterV3))
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
defer clus.Terminate(t)
// each unique context "%v" has a unique grpc stream
n := 100
ctxs, ctxc := make([]context.Context, 5), make([]chan struct{}, 5)
for i := range ctxs {
// make "%v" unique
ctxs[i] = context.WithValue(context.TODO(), "key", i)
// make unique stream
md := metadata.Pairs("some-key", fmt.Sprintf("%d", i))
ctxs[i] = metadata.NewOutgoingContext(context.Background(), md)
// limits the maximum number of outstanding watchers per stream
ctxc[i] = make(chan struct{}, 2)
}
@ -946,7 +964,7 @@ func testWatchOverlapContextCancel(t *testing.T, f func(*integration.ClusterV3))
t.Fatalf("unexpected closed channel %p", wch)
}
// may take a second or two to reestablish a watcher because of
// grpc backoff policies for disconnects
// grpc back off policies for disconnects
case <-time.After(5 * time.Second):
t.Errorf("timed out waiting for watch on %p", wch)
}
@ -970,7 +988,7 @@ func testWatchOverlapContextCancel(t *testing.T, f func(*integration.ClusterV3))
}
}
// TestWatchCanelAndCloseClient ensures that canceling a watcher then immediately
// TestWatchCancelAndCloseClient ensures that canceling a watcher then immediately
// closing the client does not return a client closing error.
func TestWatchCancelAndCloseClient(t *testing.T) {
defer testutil.AfterTest(t)

View File

@ -16,6 +16,7 @@ package clientv3
import (
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
"golang.org/x/net/context"
"google.golang.org/grpc"
)
@ -66,22 +67,46 @@ type OpResponse struct {
put *PutResponse
get *GetResponse
del *DeleteResponse
txn *TxnResponse
}
func (op OpResponse) Put() *PutResponse { return op.put }
func (op OpResponse) Get() *GetResponse { return op.get }
func (op OpResponse) Del() *DeleteResponse { return op.del }
func (op OpResponse) Txn() *TxnResponse { return op.txn }
func (resp *PutResponse) OpResponse() OpResponse {
return OpResponse{put: resp}
}
func (resp *GetResponse) OpResponse() OpResponse {
return OpResponse{get: resp}
}
func (resp *DeleteResponse) OpResponse() OpResponse {
return OpResponse{del: resp}
}
func (resp *TxnResponse) OpResponse() OpResponse {
return OpResponse{txn: resp}
}
type kv struct {
remote pb.KVClient
remote pb.KVClient
callOpts []grpc.CallOption
}
func NewKV(c *Client) KV {
return &kv{remote: RetryKVClient(c)}
api := &kv{remote: RetryKVClient(c)}
if c != nil {
api.callOpts = c.callOpts
}
return api
}
func NewKVFromKVClient(remote pb.KVClient) KV {
return &kv{remote: remote}
func NewKVFromKVClient(remote pb.KVClient, c *Client) KV {
api := &kv{remote: remote}
if c != nil {
api.callOpts = c.callOpts
}
return api
}
func (kv *kv) Put(ctx context.Context, key, val string, opts ...OpOption) (*PutResponse, error) {
@ -100,7 +125,7 @@ func (kv *kv) Delete(ctx context.Context, key string, opts ...OpOption) (*Delete
}
func (kv *kv) Compact(ctx context.Context, rev int64, opts ...CompactOption) (*CompactResponse, error) {
resp, err := kv.remote.Compact(ctx, OpCompact(rev, opts...).toRequest())
resp, err := kv.remote.Compact(ctx, OpCompact(rev, opts...).toRequest(), kv.callOpts...)
if err != nil {
return nil, toErr(ctx, err)
}
@ -109,54 +134,43 @@ func (kv *kv) Compact(ctx context.Context, rev int64, opts ...CompactOption) (*C
func (kv *kv) Txn(ctx context.Context) Txn {
return &txn{
kv: kv,
ctx: ctx,
kv: kv,
ctx: ctx,
callOpts: kv.callOpts,
}
}
func (kv *kv) Do(ctx context.Context, op Op) (OpResponse, error) {
for {
resp, err := kv.do(ctx, op)
if err == nil {
return resp, nil
}
if isHaltErr(ctx, err) {
return resp, toErr(ctx, err)
}
// do not retry on modifications
if op.isWrite() {
return resp, toErr(ctx, err)
}
}
}
func (kv *kv) do(ctx context.Context, op Op) (OpResponse, error) {
var err error
switch op.t {
// TODO: handle other ops
case tRange:
var resp *pb.RangeResponse
resp, err = kv.remote.Range(ctx, op.toRangeRequest(), grpc.FailFast(false))
resp, err = kv.remote.Range(ctx, op.toRangeRequest(), kv.callOpts...)
if err == nil {
return OpResponse{get: (*GetResponse)(resp)}, nil
}
case tPut:
var resp *pb.PutResponse
r := &pb.PutRequest{Key: op.key, Value: op.val, Lease: int64(op.leaseID), PrevKv: op.prevKV, IgnoreValue: op.ignoreValue, IgnoreLease: op.ignoreLease}
resp, err = kv.remote.Put(ctx, r)
resp, err = kv.remote.Put(ctx, r, kv.callOpts...)
if err == nil {
return OpResponse{put: (*PutResponse)(resp)}, nil
}
case tDeleteRange:
var resp *pb.DeleteRangeResponse
r := &pb.DeleteRangeRequest{Key: op.key, RangeEnd: op.end, PrevKv: op.prevKV}
resp, err = kv.remote.DeleteRange(ctx, r)
resp, err = kv.remote.DeleteRange(ctx, r, kv.callOpts...)
if err == nil {
return OpResponse{del: (*DeleteResponse)(resp)}, nil
}
case tTxn:
var resp *pb.TxnResponse
resp, err = kv.remote.Txn(ctx, op.toTxnRequest(), kv.callOpts...)
if err == nil {
return OpResponse{txn: (*TxnResponse)(resp)}, nil
}
default:
panic("Unknown op")
}
return OpResponse{}, err
return OpResponse{}, toErr(ctx, err)
}

View File

@ -20,6 +20,7 @@ import (
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
"golang.org/x/net/context"
"google.golang.org/grpc"
"google.golang.org/grpc/metadata"
@ -30,7 +31,7 @@ type (
LeaseID int64
)
// LeaseGrantResponse is used to convert the protobuf grant response.
// LeaseGrantResponse wraps the protobuf message LeaseGrantResponse.
type LeaseGrantResponse struct {
*pb.ResponseHeader
ID LeaseID
@ -38,14 +39,14 @@ type LeaseGrantResponse struct {
Error string
}
// LeaseKeepAliveResponse is used to convert the protobuf keepalive response.
// LeaseKeepAliveResponse wraps the protobuf message LeaseKeepAliveResponse.
type LeaseKeepAliveResponse struct {
*pb.ResponseHeader
ID LeaseID
TTL int64
}
// LeaseTimeToLiveResponse is used to convert the protobuf lease timetolive response.
// LeaseTimeToLiveResponse wraps the protobuf message LeaseTimeToLiveResponse.
type LeaseTimeToLiveResponse struct {
*pb.ResponseHeader
ID LeaseID `json:"id"`
@ -60,6 +61,12 @@ type LeaseTimeToLiveResponse struct {
Keys [][]byte `json:"keys"`
}
// LeaseStatus represents a lease status.
type LeaseStatus struct {
ID LeaseID `json:"id"`
// TODO: TTL int64
}
const (
// defaultTTL is the assumed lease TTL used for the first keepalive
// deadline before the actual TTL is known to the client.
@ -101,7 +108,7 @@ type Lease interface {
// KeepAlive keeps the given lease alive forever.
KeepAlive(ctx context.Context, id LeaseID) (<-chan *LeaseKeepAliveResponse, error)
// KeepAliveOnce renews the lease once. In most of the cases, Keepalive
// KeepAliveOnce renews the lease once. In most of the cases, KeepAlive
// should be used instead of KeepAliveOnce.
KeepAliveOnce(ctx context.Context, id LeaseID) (*LeaseKeepAliveResponse, error)
@ -133,6 +140,8 @@ type lessor struct {
// firstKeepAliveOnce ensures stream starts after first KeepAlive call.
firstKeepAliveOnce sync.Once
callOpts []grpc.CallOption
}
// keepAlive multiplexes a keepalive for a lease over multiple channels
@ -148,10 +157,10 @@ type keepAlive struct {
}
func NewLease(c *Client) Lease {
return NewLeaseFromLeaseClient(RetryLeaseClient(c), c.cfg.DialTimeout+time.Second)
return NewLeaseFromLeaseClient(RetryLeaseClient(c), c, c.cfg.DialTimeout+time.Second)
}
func NewLeaseFromLeaseClient(remote pb.LeaseClient, keepAliveTimeout time.Duration) Lease {
func NewLeaseFromLeaseClient(remote pb.LeaseClient, c *Client, keepAliveTimeout time.Duration) Lease {
l := &lessor{
donec: make(chan struct{}),
keepAlives: make(map[LeaseID]*keepAlive),
@ -161,62 +170,52 @@ func NewLeaseFromLeaseClient(remote pb.LeaseClient, keepAliveTimeout time.Durati
if l.firstKeepAliveTimeout == time.Second {
l.firstKeepAliveTimeout = defaultTTL
}
if c != nil {
l.callOpts = c.callOpts
}
reqLeaderCtx := WithRequireLeader(context.Background())
l.stopCtx, l.stopCancel = context.WithCancel(reqLeaderCtx)
return l
}
func (l *lessor) Grant(ctx context.Context, ttl int64) (*LeaseGrantResponse, error) {
for {
r := &pb.LeaseGrantRequest{TTL: ttl}
resp, err := l.remote.LeaseGrant(ctx, r)
if err == nil {
gresp := &LeaseGrantResponse{
ResponseHeader: resp.GetHeader(),
ID: LeaseID(resp.ID),
TTL: resp.TTL,
Error: resp.Error,
}
return gresp, nil
}
if isHaltErr(ctx, err) {
return nil, toErr(ctx, err)
r := &pb.LeaseGrantRequest{TTL: ttl}
resp, err := l.remote.LeaseGrant(ctx, r, l.callOpts...)
if err == nil {
gresp := &LeaseGrantResponse{
ResponseHeader: resp.GetHeader(),
ID: LeaseID(resp.ID),
TTL: resp.TTL,
Error: resp.Error,
}
return gresp, nil
}
return nil, toErr(ctx, err)
}
func (l *lessor) Revoke(ctx context.Context, id LeaseID) (*LeaseRevokeResponse, error) {
for {
r := &pb.LeaseRevokeRequest{ID: int64(id)}
resp, err := l.remote.LeaseRevoke(ctx, r)
if err == nil {
return (*LeaseRevokeResponse)(resp), nil
}
if isHaltErr(ctx, err) {
return nil, toErr(ctx, err)
}
r := &pb.LeaseRevokeRequest{ID: int64(id)}
resp, err := l.remote.LeaseRevoke(ctx, r, l.callOpts...)
if err == nil {
return (*LeaseRevokeResponse)(resp), nil
}
return nil, toErr(ctx, err)
}
func (l *lessor) TimeToLive(ctx context.Context, id LeaseID, opts ...LeaseOption) (*LeaseTimeToLiveResponse, error) {
for {
r := toLeaseTimeToLiveRequest(id, opts...)
resp, err := l.remote.LeaseTimeToLive(ctx, r, grpc.FailFast(false))
if err == nil {
gresp := &LeaseTimeToLiveResponse{
ResponseHeader: resp.GetHeader(),
ID: LeaseID(resp.ID),
TTL: resp.TTL,
GrantedTTL: resp.GrantedTTL,
Keys: resp.Keys,
}
return gresp, nil
}
if isHaltErr(ctx, err) {
return nil, toErr(ctx, err)
r := toLeaseTimeToLiveRequest(id, opts...)
resp, err := l.remote.LeaseTimeToLive(ctx, r, l.callOpts...)
if err == nil {
gresp := &LeaseTimeToLiveResponse{
ResponseHeader: resp.GetHeader(),
ID: LeaseID(resp.ID),
TTL: resp.TTL,
GrantedTTL: resp.GrantedTTL,
Keys: resp.Keys,
}
return gresp, nil
}
return nil, toErr(ctx, err)
}
func (l *lessor) KeepAlive(ctx context.Context, id LeaseID) (<-chan *LeaseKeepAliveResponse, error) {
@ -314,7 +313,7 @@ func (l *lessor) keepAliveCtxCloser(id LeaseID, ctx context.Context, donec <-cha
}
}
// closeRequireLeader scans all keep alives for ctxs that have require leader
// closeRequireLeader scans keepAlives for ctxs that have require leader
// and closes the associated channels.
func (l *lessor) closeRequireLeader() {
l.mu.Lock()
@ -323,7 +322,7 @@ func (l *lessor) closeRequireLeader() {
reqIdxs := 0
// find all required leader channels, close, mark as nil
for i, ctx := range ka.ctxs {
md, ok := metadata.FromContext(ctx)
md, ok := metadata.FromOutgoingContext(ctx)
if !ok {
continue
}
@ -357,7 +356,7 @@ func (l *lessor) keepAliveOnce(ctx context.Context, id LeaseID) (*LeaseKeepAlive
cctx, cancel := context.WithCancel(ctx)
defer cancel()
stream, err := l.remote.LeaseKeepAlive(cctx, grpc.FailFast(false))
stream, err := l.remote.LeaseKeepAlive(cctx, l.callOpts...)
if err != nil {
return nil, toErr(ctx, err)
}
@ -386,7 +385,7 @@ func (l *lessor) recvKeepAliveLoop() (gerr error) {
close(l.donec)
l.loopErr = gerr
for _, ka := range l.keepAlives {
ka.Close()
ka.close()
}
l.keepAlives = make(map[LeaseID]*keepAlive)
l.mu.Unlock()
@ -401,7 +400,6 @@ func (l *lessor) recvKeepAliveLoop() (gerr error) {
} else {
for {
resp, err := stream.Recv()
if err != nil {
if canceledByCaller(l.stopCtx, err) {
return err
@ -426,10 +424,10 @@ func (l *lessor) recvKeepAliveLoop() (gerr error) {
}
}
// resetRecv opens a new lease stream and starts sending LeaseKeepAliveRequests
// resetRecv opens a new lease stream and starts sending keep alive requests.
func (l *lessor) resetRecv() (pb.Lease_LeaseKeepAliveClient, error) {
sctx, cancel := context.WithCancel(l.stopCtx)
stream, err := l.remote.LeaseKeepAlive(sctx, grpc.FailFast(false))
stream, err := l.remote.LeaseKeepAlive(sctx, l.callOpts...)
if err != nil {
cancel()
return nil, err
@ -467,7 +465,7 @@ func (l *lessor) recvKeepAlive(resp *pb.LeaseKeepAliveResponse) {
if karesp.TTL <= 0 {
// lease expired; close all keep alive channels
delete(l.keepAlives, karesp.ID)
ka.Close()
ka.close()
return
}
@ -497,7 +495,7 @@ func (l *lessor) deadlineLoop() {
for id, ka := range l.keepAlives {
if ka.deadline.Before(now) {
// waited too long for response; lease may be expired
ka.Close()
ka.close()
delete(l.keepAlives, id)
}
}
@ -505,7 +503,7 @@ func (l *lessor) deadlineLoop() {
}
}
// sendKeepAliveLoop sends LeaseKeepAliveRequests for the lifetime of a lease stream
// sendKeepAliveLoop sends keep alive requests for the lifetime of the given stream.
func (l *lessor) sendKeepAliveLoop(stream pb.Lease_LeaseKeepAliveClient) {
for {
var tosend []LeaseID
@ -539,7 +537,7 @@ func (l *lessor) sendKeepAliveLoop(stream pb.Lease_LeaseKeepAliveClient) {
}
}
func (ka *keepAlive) Close() {
func (ka *keepAlive) close() {
close(ka.donec)
for _, ch := range ka.chs {
close(ch)

View File

@ -16,36 +16,35 @@ package clientv3
import (
"io/ioutil"
"log"
"sync"
"google.golang.org/grpc/grpclog"
)
// Logger is the logger used by client library.
// It implements grpclog.Logger interface.
type Logger grpclog.Logger
// It implements grpclog.LoggerV2 interface.
type Logger grpclog.LoggerV2
var (
logger settableLogger
)
type settableLogger struct {
l grpclog.Logger
l grpclog.LoggerV2
mu sync.RWMutex
}
func init() {
// disable client side logs by default
logger.mu.Lock()
logger.l = log.New(ioutil.Discard, "", 0)
logger.l = grpclog.NewLoggerV2(ioutil.Discard, ioutil.Discard, ioutil.Discard)
// logger has to override the grpclog at initialization so that
// any changes to the grpclog go through logger with locking
// instead of through SetLogger
//
// now updates only happen through settableLogger.set
grpclog.SetLogger(&logger)
grpclog.SetLoggerV2(&logger)
logger.mu.Unlock()
}
@ -62,6 +61,7 @@ func GetLogger() Logger {
func (s *settableLogger) set(l Logger) {
s.mu.Lock()
logger.l = l
grpclog.SetLoggerV2(&logger)
s.mu.Unlock()
}
@ -72,11 +72,25 @@ func (s *settableLogger) get() Logger {
return l
}
// implement the grpclog.Logger interface
// implement the grpclog.LoggerV2 interface
func (s *settableLogger) Info(args ...interface{}) { s.get().Info(args...) }
func (s *settableLogger) Infof(format string, args ...interface{}) { s.get().Infof(format, args...) }
func (s *settableLogger) Infoln(args ...interface{}) { s.get().Infoln(args...) }
func (s *settableLogger) Warning(args ...interface{}) { s.get().Warning(args...) }
func (s *settableLogger) Warningf(format string, args ...interface{}) {
s.get().Warningf(format, args...)
}
func (s *settableLogger) Warningln(args ...interface{}) { s.get().Warningln(args...) }
func (s *settableLogger) Error(args ...interface{}) { s.get().Error(args...) }
func (s *settableLogger) Errorf(format string, args ...interface{}) {
s.get().Errorf(format, args...)
}
func (s *settableLogger) Errorln(args ...interface{}) { s.get().Errorln(args...) }
func (s *settableLogger) Fatal(args ...interface{}) { s.get().Fatal(args...) }
func (s *settableLogger) Fatalf(format string, args ...interface{}) { s.get().Fatalf(format, args...) }
func (s *settableLogger) Fatalln(args ...interface{}) { s.get().Fatalln(args...) }
func (s *settableLogger) Print(args ...interface{}) { s.get().Print(args...) }
func (s *settableLogger) Printf(format string, args ...interface{}) { s.get().Printf(format, args...) }
func (s *settableLogger) Println(args ...interface{}) { s.get().Println(args...) }
func (s *settableLogger) Print(args ...interface{}) { s.get().Info(args...) }
func (s *settableLogger) Printf(format string, args ...interface{}) { s.get().Infof(format, args...) }
func (s *settableLogger) Println(args ...interface{}) { s.get().Infoln(args...) }
func (s *settableLogger) V(l int) bool { return s.get().V(l) }

View File

@ -32,17 +32,23 @@ func init() { auth.BcryptCost = bcrypt.MinCost }
// TestMain sets up an etcd cluster if running the examples.
func TestMain(m *testing.M) {
useCluster := true // default to running all tests
useCluster, hasRunArg := false, false // default to running only Test*
for _, arg := range os.Args {
if strings.HasPrefix(arg, "-test.run=") {
exp := strings.Split(arg, "=")[1]
match, err := regexp.MatchString(exp, "Example")
useCluster = (err == nil && match) || strings.Contains(exp, "Example")
hasRunArg = true
break
}
}
if !hasRunArg {
// force only running Test* if no args given to avoid leak false
// positives from having a long-running cluster for the examples.
os.Args = append(os.Args, "-test.run=Test")
}
v := 0
var v int
if useCluster {
cfg := integration.ClusterConfig{Size: 3}
clus := integration.NewClusterV3(nil, &cfg)

View File

@ -37,7 +37,7 @@ type Maintenance interface {
// AlarmDisarm disarms a given alarm.
AlarmDisarm(ctx context.Context, m *AlarmMember) (*AlarmResponse, error)
// Defragment defragments storage backend of the etcd member with given endpoint.
// Defragment releases wasted space from internal fragmentation on a given etcd member.
// Defragment is only needed when deleting a large number of keys and want to reclaim
// the resources.
// Defragment is an expensive operation. User should avoid defragmenting multiple members
@ -49,36 +49,45 @@ type Maintenance interface {
// Status gets the status of the endpoint.
Status(ctx context.Context, endpoint string) (*StatusResponse, error)
// Snapshot provides a reader for a snapshot of a backend.
// Snapshot provides a reader for a point-in-time snapshot of etcd.
Snapshot(ctx context.Context) (io.ReadCloser, error)
}
type maintenance struct {
dial func(endpoint string) (pb.MaintenanceClient, func(), error)
remote pb.MaintenanceClient
dial func(endpoint string) (pb.MaintenanceClient, func(), error)
remote pb.MaintenanceClient
callOpts []grpc.CallOption
}
func NewMaintenance(c *Client) Maintenance {
return &maintenance{
api := &maintenance{
dial: func(endpoint string) (pb.MaintenanceClient, func(), error) {
conn, err := c.dial(endpoint)
if err != nil {
return nil, nil, err
}
cancel := func() { conn.Close() }
return pb.NewMaintenanceClient(conn), cancel, nil
return RetryMaintenanceClient(c, conn), cancel, nil
},
remote: pb.NewMaintenanceClient(c.conn),
remote: RetryMaintenanceClient(c, c.conn),
}
if c != nil {
api.callOpts = c.callOpts
}
return api
}
func NewMaintenanceFromMaintenanceClient(remote pb.MaintenanceClient) Maintenance {
return &maintenance{
func NewMaintenanceFromMaintenanceClient(remote pb.MaintenanceClient, c *Client) Maintenance {
api := &maintenance{
dial: func(string) (pb.MaintenanceClient, func(), error) {
return remote, func() {}, nil
},
remote: remote,
}
if c != nil {
api.callOpts = c.callOpts
}
return api
}
func (m *maintenance) AlarmList(ctx context.Context) (*AlarmResponse, error) {
@ -87,15 +96,11 @@ func (m *maintenance) AlarmList(ctx context.Context) (*AlarmResponse, error) {
MemberID: 0, // all
Alarm: pb.AlarmType_NONE, // all
}
for {
resp, err := m.remote.Alarm(ctx, req, grpc.FailFast(false))
if err == nil {
return (*AlarmResponse)(resp), nil
}
if isHaltErr(ctx, err) {
return nil, toErr(ctx, err)
}
resp, err := m.remote.Alarm(ctx, req, m.callOpts...)
if err == nil {
return (*AlarmResponse)(resp), nil
}
return nil, toErr(ctx, err)
}
func (m *maintenance) AlarmDisarm(ctx context.Context, am *AlarmMember) (*AlarmResponse, error) {
@ -121,7 +126,7 @@ func (m *maintenance) AlarmDisarm(ctx context.Context, am *AlarmMember) (*AlarmR
return &ret, nil
}
resp, err := m.remote.Alarm(ctx, req, grpc.FailFast(false))
resp, err := m.remote.Alarm(ctx, req, m.callOpts...)
if err == nil {
return (*AlarmResponse)(resp), nil
}
@ -134,7 +139,7 @@ func (m *maintenance) Defragment(ctx context.Context, endpoint string) (*Defragm
return nil, toErr(ctx, err)
}
defer cancel()
resp, err := remote.Defragment(ctx, &pb.DefragmentRequest{}, grpc.FailFast(false))
resp, err := remote.Defragment(ctx, &pb.DefragmentRequest{}, m.callOpts...)
if err != nil {
return nil, toErr(ctx, err)
}
@ -147,7 +152,7 @@ func (m *maintenance) Status(ctx context.Context, endpoint string) (*StatusRespo
return nil, toErr(ctx, err)
}
defer cancel()
resp, err := remote.Status(ctx, &pb.StatusRequest{}, grpc.FailFast(false))
resp, err := remote.Status(ctx, &pb.StatusRequest{}, m.callOpts...)
if err != nil {
return nil, toErr(ctx, err)
}
@ -155,7 +160,7 @@ func (m *maintenance) Status(ctx context.Context, endpoint string) (*StatusRespo
}
func (m *maintenance) Snapshot(ctx context.Context) (io.ReadCloser, error) {
ss, err := m.remote.Snapshot(ctx, &pb.SnapshotRequest{}, grpc.FailFast(false))
ss, err := m.remote.Snapshot(ctx, &pb.SnapshotRequest{}, m.callOpts...)
if err != nil {
return nil, toErr(ctx, err)
}

View File

@ -17,6 +17,7 @@ package mirror
import (
"github.com/coreos/etcd/clientv3"
"golang.org/x/net/context"
)

View File

@ -15,11 +15,11 @@
package namespace
import (
"golang.org/x/net/context"
"github.com/coreos/etcd/clientv3"
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
"golang.org/x/net/context"
)
type kvPrefix struct {

View File

@ -17,9 +17,9 @@ package namespace
import (
"bytes"
"golang.org/x/net/context"
"github.com/coreos/etcd/clientv3"
"golang.org/x/net/context"
)
type leasePrefix struct {

View File

@ -17,9 +17,9 @@ package namespace
import (
"sync"
"golang.org/x/net/context"
"github.com/coreos/etcd/clientv3"
"golang.org/x/net/context"
)
type watcherPrefix struct {

View File

@ -19,11 +19,12 @@ import (
"fmt"
etcd "github.com/coreos/etcd/clientv3"
"golang.org/x/net/context"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/naming"
"google.golang.org/grpc/status"
"golang.org/x/net/context"
)
var ErrWatcherClosed = fmt.Errorf("naming: watch closed")
@ -39,13 +40,13 @@ func (gr *GRPCResolver) Update(ctx context.Context, target string, nm naming.Upd
case naming.Add:
var v []byte
if v, err = json.Marshal(nm); err != nil {
return grpc.Errorf(codes.InvalidArgument, err.Error())
return status.Error(codes.InvalidArgument, err.Error())
}
_, err = gr.Client.KV.Put(ctx, target+"/"+nm.Addr, string(v), opts...)
case naming.Delete:
_, err = gr.Client.Delete(ctx, target+"/"+nm.Addr, opts...)
default:
return grpc.Errorf(codes.InvalidArgument, "naming: bad naming op")
return status.Error(codes.InvalidArgument, "naming: bad naming op")
}
return err
}
@ -80,7 +81,7 @@ func (gw *gRPCWatcher) Next() ([]*naming.Update, error) {
// process new events on target/*
wr, ok := <-gw.wch
if !ok {
gw.err = grpc.Errorf(codes.Unavailable, "%s", ErrWatcherClosed)
gw.err = status.Error(codes.Unavailable, ErrWatcherClosed.Error())
return nil, gw.err
}
if gw.err = wr.Err(); gw.err != nil {

View File

@ -19,12 +19,12 @@ import (
"reflect"
"testing"
"golang.org/x/net/context"
"google.golang.org/grpc/naming"
etcd "github.com/coreos/etcd/clientv3"
"github.com/coreos/etcd/integration"
"github.com/coreos/etcd/pkg/testutil"
"golang.org/x/net/context"
"google.golang.org/grpc/naming"
)
func TestGRPCResolver(t *testing.T) {
@ -66,10 +66,13 @@ func TestGRPCResolver(t *testing.T) {
delOp := naming.Update{Op: naming.Delete, Addr: "127.0.0.1"}
err = r.Update(context.TODO(), "foo", delOp)
if err != nil {
t.Fatalf("failed to udpate %v", err)
}
us, err = w.Next()
if err != nil {
t.Fatal("failed to get udpate", err)
t.Fatalf("failed to get udpate %v", err)
}
wu = &naming.Update{
@ -83,7 +86,7 @@ func TestGRPCResolver(t *testing.T) {
}
}
// TestGRPCResolverMultiInit ensures the resolver will initialize
// TestGRPCResolverMulti ensures the resolver will initialize
// correctly with multiple hosts and correctly receive multiple
// updates in a single revision.
func TestGRPCResolverMulti(t *testing.T) {

View File

@ -23,6 +23,7 @@ const (
tRange opType = iota + 1
tPut
tDeleteRange
tTxn
)
var (
@ -67,9 +68,17 @@ type Op struct {
// for put
val []byte
leaseID LeaseID
// txn
cmps []Cmp
thenOps []Op
elseOps []Op
}
// accesors / mutators
// accessors / mutators
func (op Op) IsTxn() bool { return op.t == tTxn }
func (op Op) Txn() ([]Cmp, []Op, []Op) { return op.cmps, op.thenOps, op.elseOps }
// KeyBytes returns the byte slice holding the Op's key.
func (op Op) KeyBytes() []byte { return op.key }
@ -80,6 +89,39 @@ func (op *Op) WithKeyBytes(key []byte) { op.key = key }
// RangeBytes returns the byte slice holding with the Op's range end, if any.
func (op Op) RangeBytes() []byte { return op.end }
// Rev returns the requested revision, if any.
func (op Op) Rev() int64 { return op.rev }
// IsPut returns true iff the operation is a Put.
func (op Op) IsPut() bool { return op.t == tPut }
// IsGet returns true iff the operation is a Get.
func (op Op) IsGet() bool { return op.t == tRange }
// IsDelete returns true iff the operation is a Delete.
func (op Op) IsDelete() bool { return op.t == tDeleteRange }
// IsSerializable returns true if the serializable field is true.
func (op Op) IsSerializable() bool { return op.serializable == true }
// IsKeysOnly returns whether keysOnly is set.
func (op Op) IsKeysOnly() bool { return op.keysOnly == true }
// IsCountOnly returns whether countOnly is set.
func (op Op) IsCountOnly() bool { return op.countOnly == true }
// MinModRev returns the operation's minimum modify revision.
func (op Op) MinModRev() int64 { return op.minModRev }
// MaxModRev returns the operation's maximum modify revision.
func (op Op) MaxModRev() int64 { return op.maxModRev }
// MinCreateRev returns the operation's minimum create revision.
func (op Op) MinCreateRev() int64 { return op.minCreateRev }
// MaxCreateRev returns the operation's maximum create revision.
func (op Op) MaxCreateRev() int64 { return op.maxCreateRev }
// WithRangeBytes sets the byte slice for the Op's range end.
func (op *Op) WithRangeBytes(end []byte) { op.end = end }
@ -113,6 +155,22 @@ func (op Op) toRangeRequest() *pb.RangeRequest {
return r
}
func (op Op) toTxnRequest() *pb.TxnRequest {
thenOps := make([]*pb.RequestOp, len(op.thenOps))
for i, tOp := range op.thenOps {
thenOps[i] = tOp.toRequestOp()
}
elseOps := make([]*pb.RequestOp, len(op.elseOps))
for i, eOp := range op.elseOps {
elseOps[i] = eOp.toRequestOp()
}
cmps := make([]*pb.Compare, len(op.cmps))
for i := range op.cmps {
cmps[i] = (*pb.Compare)(&op.cmps[i])
}
return &pb.TxnRequest{Compare: cmps, Success: thenOps, Failure: elseOps}
}
func (op Op) toRequestOp() *pb.RequestOp {
switch op.t {
case tRange:
@ -129,6 +187,19 @@ func (op Op) toRequestOp() *pb.RequestOp {
}
func (op Op) isWrite() bool {
if op.t == tTxn {
for _, tOp := range op.thenOps {
if tOp.isWrite() {
return true
}
}
for _, tOp := range op.elseOps {
if tOp.isWrite() {
return true
}
}
return false
}
return op.t != tRange
}
@ -194,6 +265,10 @@ func OpPut(key, val string, opts ...OpOption) Op {
return ret
}
func OpTxn(cmps []Cmp, thenOps []Op, elseOps []Op) Op {
return Op{t: tTxn, cmps: cmps, thenOps: thenOps, elseOps: elseOps}
}
func opWatch(key string, opts ...OpOption) Op {
ret := Op{t: tRange, key: []byte(key)}
ret.applyOpts(opts)
@ -247,9 +322,9 @@ func WithSort(target SortTarget, order SortOrder) OpOption {
if target == SortByKey && order == SortAscend {
// If order != SortNone, server fetches the entire key-space,
// and then applies the sort and limit, if provided.
// Since current mvcc.Range implementation returns results
// sorted by keys in lexicographically ascending order,
// client should ignore SortOrder if the target is SortByKey.
// Since by default the server returns results sorted by keys
// in lexicographically ascending order, the client should ignore
// SortOrder if the target is SortByKey.
order = SortNone
}
op.sort = &SortOption{target, order}
@ -390,7 +465,7 @@ func WithPrevKV() OpOption {
}
// WithIgnoreValue updates the key using its current value.
// Empty value should be passed when ignore_value is set.
// This option can not be combined with non-empty values.
// Returns an error if the key does not exist.
func WithIgnoreValue() OpOption {
return func(op *Op) {
@ -399,7 +474,7 @@ func WithIgnoreValue() OpOption {
}
// WithIgnoreLease updates the key using its current lease.
// Empty lease should be passed when ignore_lease is set.
// This option can not be combined with WithLease.
// Returns an error if the key does not exist.
func WithIgnoreLease() OpOption {
return func(op *Op) {
@ -424,8 +499,7 @@ func (op *LeaseOp) applyOpts(opts []LeaseOption) {
}
}
// WithAttachedKeys requests lease timetolive API to return
// attached keys of given lease ID.
// WithAttachedKeys makes TimeToLive list the keys attached to the given lease ID.
func WithAttachedKeys() LeaseOption {
return func(op *LeaseOp) { op.attachedKeys = true }
}

30
clientv3/ready_wait.go Normal file
View File

@ -0,0 +1,30 @@
// Copyright 2017 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package clientv3
import "golang.org/x/net/context"
// TODO: remove this when "FailFast=false" is fixed.
// See https://github.com/grpc/grpc-go/issues/1532.
func readyWait(rpcCtx, clientCtx context.Context, ready <-chan struct{}) error {
select {
case <-ready:
return nil
case <-rpcCtx.Done():
return rpcCtx.Err()
case <-clientCtx.Done():
return clientCtx.Err()
}
}

View File

@ -17,135 +17,183 @@ package clientv3
import (
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
"golang.org/x/net/context"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
type rpcFunc func(ctx context.Context) error
type retryRpcFunc func(context.Context, rpcFunc) error
type retryRPCFunc func(context.Context, rpcFunc) error
type retryStopErrFunc func(error) bool
func (c *Client) newRetryWrapper() retryRpcFunc {
func isRepeatableStopError(err error) bool {
eErr := rpctypes.Error(err)
// always stop retry on etcd errors
if serverErr, ok := eErr.(rpctypes.EtcdError); ok && serverErr.Code() != codes.Unavailable {
return true
}
// only retry if unavailable
ev, _ := status.FromError(err)
return ev.Code() != codes.Unavailable
}
func isNonRepeatableStopError(err error) bool {
ev, _ := status.FromError(err)
if ev.Code() != codes.Unavailable {
return true
}
desc := rpctypes.ErrorDesc(err)
return desc != "there is no address available" && desc != "there is no connection available"
}
func (c *Client) newRetryWrapper(isStop retryStopErrFunc) retryRPCFunc {
return func(rpcCtx context.Context, f rpcFunc) error {
for {
if err := readyWait(rpcCtx, c.ctx, c.balancer.ConnectNotify()); err != nil {
return err
}
pinned := c.balancer.pinned()
err := f(rpcCtx)
if err == nil {
return nil
}
eErr := rpctypes.Error(err)
// always stop retry on etcd errors
if _, ok := eErr.(rpctypes.EtcdError); ok {
return err
if logger.V(4) {
logger.Infof("clientv3/retry: error %q on pinned endpoint %q", err.Error(), pinned)
}
// only retry if unavailable
if grpc.Code(err) != codes.Unavailable {
return err
if s, ok := status.FromError(err); ok && (s.Code() == codes.Unavailable || s.Code() == codes.DeadlineExceeded || s.Code() == codes.Internal) {
// mark this before endpoint switch is triggered
c.balancer.hostPortError(pinned, err)
c.balancer.next()
if logger.V(4) {
logger.Infof("clientv3/retry: switching from %q due to error %q", pinned, err.Error())
}
}
select {
case <-c.balancer.ConnectNotify():
case <-rpcCtx.Done():
return rpcCtx.Err()
case <-c.ctx.Done():
return c.ctx.Err()
if isStop(err) {
return err
}
}
}
}
func (c *Client) newAuthRetryWrapper() retryRpcFunc {
func (c *Client) newAuthRetryWrapper() retryRPCFunc {
return func(rpcCtx context.Context, f rpcFunc) error {
for {
pinned := c.balancer.pinned()
err := f(rpcCtx)
if err == nil {
return nil
}
if logger.V(4) {
logger.Infof("clientv3/auth-retry: error %q on pinned endpoint %q", err.Error(), pinned)
}
// always stop retry on etcd errors other than invalid auth token
if rpctypes.Error(err) == rpctypes.ErrInvalidAuthToken {
gterr := c.getToken(rpcCtx)
if gterr != nil {
if logger.V(4) {
logger.Infof("clientv3/auth-retry: cannot retry due to error %q(%q) on pinned endpoint %q", err.Error(), gterr.Error(), pinned)
}
return err // return the original error for simplicity
}
continue
}
return err
}
}
}
// RetryKVClient implements a KVClient that uses the client's FailFast retry policy.
// RetryKVClient implements a KVClient.
func RetryKVClient(c *Client) pb.KVClient {
retryWrite := &retryWriteKVClient{pb.NewKVClient(c.conn), c.retryWrapper}
return &retryKVClient{&retryWriteKVClient{retryWrite, c.retryAuthWrapper}}
repeatableRetry := c.newRetryWrapper(isRepeatableStopError)
nonRepeatableRetry := c.newRetryWrapper(isNonRepeatableStopError)
conn := pb.NewKVClient(c.conn)
retryBasic := &retryKVClient{&nonRepeatableKVClient{conn, nonRepeatableRetry}, repeatableRetry}
retryAuthWrapper := c.newAuthRetryWrapper()
return &retryKVClient{
&nonRepeatableKVClient{retryBasic, retryAuthWrapper},
retryAuthWrapper}
}
type retryKVClient struct {
*retryWriteKVClient
*nonRepeatableKVClient
repeatableRetry retryRPCFunc
}
func (rkv *retryKVClient) Range(ctx context.Context, in *pb.RangeRequest, opts ...grpc.CallOption) (resp *pb.RangeResponse, err error) {
err = rkv.retryf(ctx, func(rctx context.Context) error {
resp, err = rkv.retryWriteKVClient.Range(rctx, in, opts...)
err = rkv.repeatableRetry(ctx, func(rctx context.Context) error {
resp, err = rkv.kc.Range(rctx, in, opts...)
return err
})
return resp, err
}
type retryWriteKVClient struct {
pb.KVClient
retryf retryRpcFunc
type nonRepeatableKVClient struct {
kc pb.KVClient
nonRepeatableRetry retryRPCFunc
}
func (rkv *retryWriteKVClient) Put(ctx context.Context, in *pb.PutRequest, opts ...grpc.CallOption) (resp *pb.PutResponse, err error) {
err = rkv.retryf(ctx, func(rctx context.Context) error {
resp, err = rkv.KVClient.Put(rctx, in, opts...)
func (rkv *nonRepeatableKVClient) Put(ctx context.Context, in *pb.PutRequest, opts ...grpc.CallOption) (resp *pb.PutResponse, err error) {
err = rkv.nonRepeatableRetry(ctx, func(rctx context.Context) error {
resp, err = rkv.kc.Put(rctx, in, opts...)
return err
})
return resp, err
}
func (rkv *retryWriteKVClient) DeleteRange(ctx context.Context, in *pb.DeleteRangeRequest, opts ...grpc.CallOption) (resp *pb.DeleteRangeResponse, err error) {
err = rkv.retryf(ctx, func(rctx context.Context) error {
resp, err = rkv.KVClient.DeleteRange(rctx, in, opts...)
func (rkv *nonRepeatableKVClient) DeleteRange(ctx context.Context, in *pb.DeleteRangeRequest, opts ...grpc.CallOption) (resp *pb.DeleteRangeResponse, err error) {
err = rkv.nonRepeatableRetry(ctx, func(rctx context.Context) error {
resp, err = rkv.kc.DeleteRange(rctx, in, opts...)
return err
})
return resp, err
}
func (rkv *retryWriteKVClient) Txn(ctx context.Context, in *pb.TxnRequest, opts ...grpc.CallOption) (resp *pb.TxnResponse, err error) {
err = rkv.retryf(ctx, func(rctx context.Context) error {
resp, err = rkv.KVClient.Txn(rctx, in, opts...)
func (rkv *nonRepeatableKVClient) Txn(ctx context.Context, in *pb.TxnRequest, opts ...grpc.CallOption) (resp *pb.TxnResponse, err error) {
// TODO: repeatableRetry if read-only txn
err = rkv.nonRepeatableRetry(ctx, func(rctx context.Context) error {
resp, err = rkv.kc.Txn(rctx, in, opts...)
return err
})
return resp, err
}
func (rkv *retryWriteKVClient) Compact(ctx context.Context, in *pb.CompactionRequest, opts ...grpc.CallOption) (resp *pb.CompactionResponse, err error) {
err = rkv.retryf(ctx, func(rctx context.Context) error {
resp, err = rkv.KVClient.Compact(rctx, in, opts...)
func (rkv *nonRepeatableKVClient) Compact(ctx context.Context, in *pb.CompactionRequest, opts ...grpc.CallOption) (resp *pb.CompactionResponse, err error) {
err = rkv.nonRepeatableRetry(ctx, func(rctx context.Context) error {
resp, err = rkv.kc.Compact(rctx, in, opts...)
return err
})
return resp, err
}
type retryLeaseClient struct {
pb.LeaseClient
retryf retryRpcFunc
lc pb.LeaseClient
repeatableRetry retryRPCFunc
}
// RetryLeaseClient implements a LeaseClient that uses the client's FailFast retry policy.
// RetryLeaseClient implements a LeaseClient.
func RetryLeaseClient(c *Client) pb.LeaseClient {
retry := &retryLeaseClient{pb.NewLeaseClient(c.conn), c.retryWrapper}
return &retryLeaseClient{retry, c.retryAuthWrapper}
retry := &retryLeaseClient{
pb.NewLeaseClient(c.conn),
c.newRetryWrapper(isRepeatableStopError),
}
return &retryLeaseClient{retry, c.newAuthRetryWrapper()}
}
func (rlc *retryLeaseClient) LeaseTimeToLive(ctx context.Context, in *pb.LeaseTimeToLiveRequest, opts ...grpc.CallOption) (resp *pb.LeaseTimeToLiveResponse, err error) {
err = rlc.repeatableRetry(ctx, func(rctx context.Context) error {
resp, err = rlc.lc.LeaseTimeToLive(rctx, in, opts...)
return err
})
return resp, err
}
func (rlc *retryLeaseClient) LeaseGrant(ctx context.Context, in *pb.LeaseGrantRequest, opts ...grpc.CallOption) (resp *pb.LeaseGrantResponse, err error) {
err = rlc.retryf(ctx, func(rctx context.Context) error {
resp, err = rlc.LeaseClient.LeaseGrant(rctx, in, opts...)
err = rlc.repeatableRetry(ctx, func(rctx context.Context) error {
resp, err = rlc.lc.LeaseGrant(rctx, in, opts...)
return err
})
return resp, err
@ -153,140 +201,270 @@ func (rlc *retryLeaseClient) LeaseGrant(ctx context.Context, in *pb.LeaseGrantRe
}
func (rlc *retryLeaseClient) LeaseRevoke(ctx context.Context, in *pb.LeaseRevokeRequest, opts ...grpc.CallOption) (resp *pb.LeaseRevokeResponse, err error) {
err = rlc.retryf(ctx, func(rctx context.Context) error {
resp, err = rlc.LeaseClient.LeaseRevoke(rctx, in, opts...)
err = rlc.repeatableRetry(ctx, func(rctx context.Context) error {
resp, err = rlc.lc.LeaseRevoke(rctx, in, opts...)
return err
})
return resp, err
}
func (rlc *retryLeaseClient) LeaseKeepAlive(ctx context.Context, opts ...grpc.CallOption) (stream pb.Lease_LeaseKeepAliveClient, err error) {
err = rlc.repeatableRetry(ctx, func(rctx context.Context) error {
stream, err = rlc.lc.LeaseKeepAlive(rctx, opts...)
return err
})
return stream, err
}
type retryClusterClient struct {
pb.ClusterClient
retryf retryRpcFunc
*nonRepeatableClusterClient
repeatableRetry retryRPCFunc
}
// RetryClusterClient implements a ClusterClient that uses the client's FailFast retry policy.
// RetryClusterClient implements a ClusterClient.
func RetryClusterClient(c *Client) pb.ClusterClient {
return &retryClusterClient{pb.NewClusterClient(c.conn), c.retryWrapper}
repeatableRetry := c.newRetryWrapper(isRepeatableStopError)
nonRepeatableRetry := c.newRetryWrapper(isNonRepeatableStopError)
cc := pb.NewClusterClient(c.conn)
return &retryClusterClient{&nonRepeatableClusterClient{cc, nonRepeatableRetry}, repeatableRetry}
}
func (rcc *retryClusterClient) MemberAdd(ctx context.Context, in *pb.MemberAddRequest, opts ...grpc.CallOption) (resp *pb.MemberAddResponse, err error) {
err = rcc.retryf(ctx, func(rctx context.Context) error {
resp, err = rcc.ClusterClient.MemberAdd(rctx, in, opts...)
func (rcc *retryClusterClient) MemberList(ctx context.Context, in *pb.MemberListRequest, opts ...grpc.CallOption) (resp *pb.MemberListResponse, err error) {
err = rcc.repeatableRetry(ctx, func(rctx context.Context) error {
resp, err = rcc.cc.MemberList(rctx, in, opts...)
return err
})
return resp, err
}
func (rcc *retryClusterClient) MemberRemove(ctx context.Context, in *pb.MemberRemoveRequest, opts ...grpc.CallOption) (resp *pb.MemberRemoveResponse, err error) {
err = rcc.retryf(ctx, func(rctx context.Context) error {
resp, err = rcc.ClusterClient.MemberRemove(rctx, in, opts...)
type nonRepeatableClusterClient struct {
cc pb.ClusterClient
nonRepeatableRetry retryRPCFunc
}
func (rcc *nonRepeatableClusterClient) MemberAdd(ctx context.Context, in *pb.MemberAddRequest, opts ...grpc.CallOption) (resp *pb.MemberAddResponse, err error) {
err = rcc.nonRepeatableRetry(ctx, func(rctx context.Context) error {
resp, err = rcc.cc.MemberAdd(rctx, in, opts...)
return err
})
return resp, err
}
func (rcc *retryClusterClient) MemberUpdate(ctx context.Context, in *pb.MemberUpdateRequest, opts ...grpc.CallOption) (resp *pb.MemberUpdateResponse, err error) {
err = rcc.retryf(ctx, func(rctx context.Context) error {
resp, err = rcc.ClusterClient.MemberUpdate(rctx, in, opts...)
func (rcc *nonRepeatableClusterClient) MemberRemove(ctx context.Context, in *pb.MemberRemoveRequest, opts ...grpc.CallOption) (resp *pb.MemberRemoveResponse, err error) {
err = rcc.nonRepeatableRetry(ctx, func(rctx context.Context) error {
resp, err = rcc.cc.MemberRemove(rctx, in, opts...)
return err
})
return resp, err
}
func (rcc *nonRepeatableClusterClient) MemberUpdate(ctx context.Context, in *pb.MemberUpdateRequest, opts ...grpc.CallOption) (resp *pb.MemberUpdateResponse, err error) {
err = rcc.nonRepeatableRetry(ctx, func(rctx context.Context) error {
resp, err = rcc.cc.MemberUpdate(rctx, in, opts...)
return err
})
return resp, err
}
// RetryMaintenanceClient implements a Maintenance.
func RetryMaintenanceClient(c *Client, conn *grpc.ClientConn) pb.MaintenanceClient {
repeatableRetry := c.newRetryWrapper(isRepeatableStopError)
nonRepeatableRetry := c.newRetryWrapper(isNonRepeatableStopError)
mc := pb.NewMaintenanceClient(conn)
return &retryMaintenanceClient{&nonRepeatableMaintenanceClient{mc, nonRepeatableRetry}, repeatableRetry}
}
type retryMaintenanceClient struct {
*nonRepeatableMaintenanceClient
repeatableRetry retryRPCFunc
}
func (rmc *retryMaintenanceClient) Alarm(ctx context.Context, in *pb.AlarmRequest, opts ...grpc.CallOption) (resp *pb.AlarmResponse, err error) {
err = rmc.repeatableRetry(ctx, func(rctx context.Context) error {
resp, err = rmc.mc.Alarm(rctx, in, opts...)
return err
})
return resp, err
}
func (rmc *retryMaintenanceClient) Status(ctx context.Context, in *pb.StatusRequest, opts ...grpc.CallOption) (resp *pb.StatusResponse, err error) {
err = rmc.repeatableRetry(ctx, func(rctx context.Context) error {
resp, err = rmc.mc.Status(rctx, in, opts...)
return err
})
return resp, err
}
func (rmc *retryMaintenanceClient) Hash(ctx context.Context, in *pb.HashRequest, opts ...grpc.CallOption) (resp *pb.HashResponse, err error) {
err = rmc.repeatableRetry(ctx, func(rctx context.Context) error {
resp, err = rmc.mc.Hash(rctx, in, opts...)
return err
})
return resp, err
}
func (rmc *retryMaintenanceClient) Snapshot(ctx context.Context, in *pb.SnapshotRequest, opts ...grpc.CallOption) (stream pb.Maintenance_SnapshotClient, err error) {
err = rmc.repeatableRetry(ctx, func(rctx context.Context) error {
stream, err = rmc.mc.Snapshot(rctx, in, opts...)
return err
})
return stream, err
}
type nonRepeatableMaintenanceClient struct {
mc pb.MaintenanceClient
nonRepeatableRetry retryRPCFunc
}
func (rmc *nonRepeatableMaintenanceClient) Defragment(ctx context.Context, in *pb.DefragmentRequest, opts ...grpc.CallOption) (resp *pb.DefragmentResponse, err error) {
err = rmc.nonRepeatableRetry(ctx, func(rctx context.Context) error {
resp, err = rmc.mc.Defragment(rctx, in, opts...)
return err
})
return resp, err
}
type retryAuthClient struct {
pb.AuthClient
retryf retryRpcFunc
*nonRepeatableAuthClient
repeatableRetry retryRPCFunc
}
// RetryAuthClient implements a AuthClient that uses the client's FailFast retry policy.
// RetryAuthClient implements a AuthClient.
func RetryAuthClient(c *Client) pb.AuthClient {
return &retryAuthClient{pb.NewAuthClient(c.conn), c.retryWrapper}
repeatableRetry := c.newRetryWrapper(isRepeatableStopError)
nonRepeatableRetry := c.newRetryWrapper(isNonRepeatableStopError)
ac := pb.NewAuthClient(c.conn)
return &retryAuthClient{&nonRepeatableAuthClient{ac, nonRepeatableRetry}, repeatableRetry}
}
func (rac *retryAuthClient) AuthEnable(ctx context.Context, in *pb.AuthEnableRequest, opts ...grpc.CallOption) (resp *pb.AuthEnableResponse, err error) {
err = rac.retryf(ctx, func(rctx context.Context) error {
resp, err = rac.AuthClient.AuthEnable(rctx, in, opts...)
func (rac *retryAuthClient) UserList(ctx context.Context, in *pb.AuthUserListRequest, opts ...grpc.CallOption) (resp *pb.AuthUserListResponse, err error) {
err = rac.repeatableRetry(ctx, func(rctx context.Context) error {
resp, err = rac.ac.UserList(rctx, in, opts...)
return err
})
return resp, err
}
func (rac *retryAuthClient) AuthDisable(ctx context.Context, in *pb.AuthDisableRequest, opts ...grpc.CallOption) (resp *pb.AuthDisableResponse, err error) {
err = rac.retryf(ctx, func(rctx context.Context) error {
resp, err = rac.AuthClient.AuthDisable(rctx, in, opts...)
func (rac *retryAuthClient) UserGet(ctx context.Context, in *pb.AuthUserGetRequest, opts ...grpc.CallOption) (resp *pb.AuthUserGetResponse, err error) {
err = rac.repeatableRetry(ctx, func(rctx context.Context) error {
resp, err = rac.ac.UserGet(rctx, in, opts...)
return err
})
return resp, err
}
func (rac *retryAuthClient) UserAdd(ctx context.Context, in *pb.AuthUserAddRequest, opts ...grpc.CallOption) (resp *pb.AuthUserAddResponse, err error) {
err = rac.retryf(ctx, func(rctx context.Context) error {
resp, err = rac.AuthClient.UserAdd(rctx, in, opts...)
func (rac *retryAuthClient) RoleGet(ctx context.Context, in *pb.AuthRoleGetRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleGetResponse, err error) {
err = rac.repeatableRetry(ctx, func(rctx context.Context) error {
resp, err = rac.ac.RoleGet(rctx, in, opts...)
return err
})
return resp, err
}
func (rac *retryAuthClient) UserDelete(ctx context.Context, in *pb.AuthUserDeleteRequest, opts ...grpc.CallOption) (resp *pb.AuthUserDeleteResponse, err error) {
err = rac.retryf(ctx, func(rctx context.Context) error {
resp, err = rac.AuthClient.UserDelete(rctx, in, opts...)
func (rac *retryAuthClient) RoleList(ctx context.Context, in *pb.AuthRoleListRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleListResponse, err error) {
err = rac.repeatableRetry(ctx, func(rctx context.Context) error {
resp, err = rac.ac.RoleList(rctx, in, opts...)
return err
})
return resp, err
}
func (rac *retryAuthClient) UserChangePassword(ctx context.Context, in *pb.AuthUserChangePasswordRequest, opts ...grpc.CallOption) (resp *pb.AuthUserChangePasswordResponse, err error) {
err = rac.retryf(ctx, func(rctx context.Context) error {
resp, err = rac.AuthClient.UserChangePassword(rctx, in, opts...)
type nonRepeatableAuthClient struct {
ac pb.AuthClient
nonRepeatableRetry retryRPCFunc
}
func (rac *nonRepeatableAuthClient) AuthEnable(ctx context.Context, in *pb.AuthEnableRequest, opts ...grpc.CallOption) (resp *pb.AuthEnableResponse, err error) {
err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error {
resp, err = rac.ac.AuthEnable(rctx, in, opts...)
return err
})
return resp, err
}
func (rac *retryAuthClient) UserGrantRole(ctx context.Context, in *pb.AuthUserGrantRoleRequest, opts ...grpc.CallOption) (resp *pb.AuthUserGrantRoleResponse, err error) {
err = rac.retryf(ctx, func(rctx context.Context) error {
resp, err = rac.AuthClient.UserGrantRole(rctx, in, opts...)
func (rac *nonRepeatableAuthClient) AuthDisable(ctx context.Context, in *pb.AuthDisableRequest, opts ...grpc.CallOption) (resp *pb.AuthDisableResponse, err error) {
err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error {
resp, err = rac.ac.AuthDisable(rctx, in, opts...)
return err
})
return resp, err
}
func (rac *retryAuthClient) UserRevokeRole(ctx context.Context, in *pb.AuthUserRevokeRoleRequest, opts ...grpc.CallOption) (resp *pb.AuthUserRevokeRoleResponse, err error) {
err = rac.retryf(ctx, func(rctx context.Context) error {
resp, err = rac.AuthClient.UserRevokeRole(rctx, in, opts...)
func (rac *nonRepeatableAuthClient) UserAdd(ctx context.Context, in *pb.AuthUserAddRequest, opts ...grpc.CallOption) (resp *pb.AuthUserAddResponse, err error) {
err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error {
resp, err = rac.ac.UserAdd(rctx, in, opts...)
return err
})
return resp, err
}
func (rac *retryAuthClient) RoleAdd(ctx context.Context, in *pb.AuthRoleAddRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleAddResponse, err error) {
err = rac.retryf(ctx, func(rctx context.Context) error {
resp, err = rac.AuthClient.RoleAdd(rctx, in, opts...)
func (rac *nonRepeatableAuthClient) UserDelete(ctx context.Context, in *pb.AuthUserDeleteRequest, opts ...grpc.CallOption) (resp *pb.AuthUserDeleteResponse, err error) {
err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error {
resp, err = rac.ac.UserDelete(rctx, in, opts...)
return err
})
return resp, err
}
func (rac *retryAuthClient) RoleDelete(ctx context.Context, in *pb.AuthRoleDeleteRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleDeleteResponse, err error) {
err = rac.retryf(ctx, func(rctx context.Context) error {
resp, err = rac.AuthClient.RoleDelete(rctx, in, opts...)
func (rac *nonRepeatableAuthClient) UserChangePassword(ctx context.Context, in *pb.AuthUserChangePasswordRequest, opts ...grpc.CallOption) (resp *pb.AuthUserChangePasswordResponse, err error) {
err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error {
resp, err = rac.ac.UserChangePassword(rctx, in, opts...)
return err
})
return resp, err
}
func (rac *retryAuthClient) RoleGrantPermission(ctx context.Context, in *pb.AuthRoleGrantPermissionRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleGrantPermissionResponse, err error) {
err = rac.retryf(ctx, func(rctx context.Context) error {
resp, err = rac.AuthClient.RoleGrantPermission(rctx, in, opts...)
func (rac *nonRepeatableAuthClient) UserGrantRole(ctx context.Context, in *pb.AuthUserGrantRoleRequest, opts ...grpc.CallOption) (resp *pb.AuthUserGrantRoleResponse, err error) {
err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error {
resp, err = rac.ac.UserGrantRole(rctx, in, opts...)
return err
})
return resp, err
}
func (rac *retryAuthClient) RoleRevokePermission(ctx context.Context, in *pb.AuthRoleRevokePermissionRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleRevokePermissionResponse, err error) {
err = rac.retryf(ctx, func(rctx context.Context) error {
resp, err = rac.AuthClient.RoleRevokePermission(rctx, in, opts...)
func (rac *nonRepeatableAuthClient) UserRevokeRole(ctx context.Context, in *pb.AuthUserRevokeRoleRequest, opts ...grpc.CallOption) (resp *pb.AuthUserRevokeRoleResponse, err error) {
err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error {
resp, err = rac.ac.UserRevokeRole(rctx, in, opts...)
return err
})
return resp, err
}
func (rac *nonRepeatableAuthClient) RoleAdd(ctx context.Context, in *pb.AuthRoleAddRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleAddResponse, err error) {
err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error {
resp, err = rac.ac.RoleAdd(rctx, in, opts...)
return err
})
return resp, err
}
func (rac *nonRepeatableAuthClient) RoleDelete(ctx context.Context, in *pb.AuthRoleDeleteRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleDeleteResponse, err error) {
err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error {
resp, err = rac.ac.RoleDelete(rctx, in, opts...)
return err
})
return resp, err
}
func (rac *nonRepeatableAuthClient) RoleGrantPermission(ctx context.Context, in *pb.AuthRoleGrantPermissionRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleGrantPermissionResponse, err error) {
err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error {
resp, err = rac.ac.RoleGrantPermission(rctx, in, opts...)
return err
})
return resp, err
}
func (rac *nonRepeatableAuthClient) RoleRevokePermission(ctx context.Context, in *pb.AuthRoleRevokePermissionRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleRevokePermissionResponse, err error) {
err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error {
resp, err = rac.ac.RoleRevokePermission(rctx, in, opts...)
return err
})
return resp, err
}
func (rac *nonRepeatableAuthClient) Authenticate(ctx context.Context, in *pb.AuthenticateRequest, opts ...grpc.CallOption) (resp *pb.AuthenticateResponse, err error) {
err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error {
resp, err = rac.ac.Authenticate(rctx, in, opts...)
return err
})
return resp, err

View File

@ -18,13 +18,14 @@ import (
"sync"
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
"golang.org/x/net/context"
"google.golang.org/grpc"
)
// Txn is the interface that wraps mini-transactions.
//
// Tx.If(
// Txn(context.TODO()).If(
// Compare(Value(k1), ">", v1),
// Compare(Version(k1), "=", 2)
// ).Then(
@ -66,6 +67,8 @@ type txn struct {
sus []*pb.RequestOp
fas []*pb.RequestOp
callOpts []grpc.CallOption
}
func (txn *txn) If(cs ...Cmp) Txn {
@ -135,30 +138,14 @@ func (txn *txn) Else(ops ...Op) Txn {
func (txn *txn) Commit() (*TxnResponse, error) {
txn.mu.Lock()
defer txn.mu.Unlock()
for {
resp, err := txn.commit()
if err == nil {
return resp, err
}
if isHaltErr(txn.ctx, err) {
return nil, toErr(txn.ctx, err)
}
if txn.isWrite {
return nil, toErr(txn.ctx, err)
}
}
}
func (txn *txn) commit() (*TxnResponse, error) {
r := &pb.TxnRequest{Compare: txn.cmps, Success: txn.sus, Failure: txn.fas}
var opts []grpc.CallOption
if !txn.isWrite {
opts = []grpc.CallOption{grpc.FailFast(false)}
}
resp, err := txn.kv.remote.Txn(txn.ctx, r, opts...)
var resp *pb.TxnResponse
var err error
resp, err = txn.kv.remote.Txn(txn.ctx, r, txn.callOpts...)
if err != nil {
return nil, err
return nil, toErr(txn.ctx, err)
}
return (*TxnResponse)(resp), nil
}

View File

@ -22,9 +22,12 @@ import (
v3rpc "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
mvccpb "github.com/coreos/etcd/mvcc/mvccpb"
"golang.org/x/net/context"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/status"
)
const (
@ -40,10 +43,9 @@ type WatchChan <-chan WatchResponse
type Watcher interface {
// Watch watches on a key or prefix. The watched events will be returned
// through the returned channel.
// If the watch is slow or the required rev is compacted, the watch request
// might be canceled from the server-side and the chan will be closed.
// 'opts' can be: 'WithRev' and/or 'WithPrefix'.
// through the returned channel. If revisions waiting to be sent over the
// watch are compacted, then the watch will be canceled by the server, the
// client will post a compacted error watch response, and the channel will close.
Watch(ctx context.Context, key string, opts ...OpOption) WatchChan
// Close closes the watcher and cancels all watch requests.
@ -90,7 +92,7 @@ func (wr *WatchResponse) Err() error {
return v3rpc.ErrCompacted
case wr.Canceled:
if len(wr.cancelReason) != 0 {
return v3rpc.Error(grpc.Errorf(codes.FailedPrecondition, "%s", wr.cancelReason))
return v3rpc.Error(status.Error(codes.FailedPrecondition, wr.cancelReason))
}
return v3rpc.ErrFutureRev
}
@ -104,7 +106,8 @@ func (wr *WatchResponse) IsProgressNotify() bool {
// watcher implements the Watcher interface
type watcher struct {
remote pb.WatchClient
remote pb.WatchClient
callOpts []grpc.CallOption
// mu protects the grpc streams map
mu sync.RWMutex
@ -115,8 +118,9 @@ type watcher struct {
// watchGrpcStream tracks all watch resources attached to a single grpc stream.
type watchGrpcStream struct {
owner *watcher
remote pb.WatchClient
owner *watcher
remote pb.WatchClient
callOpts []grpc.CallOption
// ctx controls internal remote.Watch requests
ctx context.Context
@ -135,7 +139,7 @@ type watchGrpcStream struct {
respc chan *pb.WatchResponse
// donec closes to broadcast shutdown
donec chan struct{}
// errc transmits errors from grpc Recv to the watch stream reconn logic
// errc transmits errors from grpc Recv to the watch stream reconnect logic
errc chan error
// closingc gets the watcherStream of closing watchers
closingc chan *watcherStream
@ -187,14 +191,18 @@ type watcherStream struct {
}
func NewWatcher(c *Client) Watcher {
return NewWatchFromWatchClient(pb.NewWatchClient(c.conn))
return NewWatchFromWatchClient(pb.NewWatchClient(c.conn), c)
}
func NewWatchFromWatchClient(wc pb.WatchClient) Watcher {
return &watcher{
func NewWatchFromWatchClient(wc pb.WatchClient, c *Client) Watcher {
w := &watcher{
remote: wc,
streams: make(map[string]*watchGrpcStream),
}
if c != nil {
w.callOpts = c.callOpts
}
return w
}
// never closes
@ -213,17 +221,17 @@ func (w *watcher) newWatcherGrpcStream(inctx context.Context) *watchGrpcStream {
wgs := &watchGrpcStream{
owner: w,
remote: w.remote,
callOpts: w.callOpts,
ctx: ctx,
ctxKey: fmt.Sprintf("%v", inctx),
ctxKey: streamKeyFromCtx(inctx),
cancel: cancel,
substreams: make(map[int64]*watcherStream),
respc: make(chan *pb.WatchResponse),
reqc: make(chan *watchRequest),
donec: make(chan struct{}),
errc: make(chan error, 1),
closingc: make(chan *watcherStream),
resumec: make(chan struct{}),
respc: make(chan *pb.WatchResponse),
reqc: make(chan *watchRequest),
donec: make(chan struct{}),
errc: make(chan error, 1),
closingc: make(chan *watcherStream),
resumec: make(chan struct{}),
}
go wgs.run()
return wgs
@ -254,7 +262,7 @@ func (w *watcher) Watch(ctx context.Context, key string, opts ...OpOption) Watch
}
ok := false
ctxKey := fmt.Sprintf("%v", ctx)
ctxKey := streamKeyFromCtx(ctx)
// find or allocate appropriate grpc watch stream
w.mu.Lock()
@ -317,14 +325,14 @@ func (w *watcher) Close() (err error) {
w.streams = nil
w.mu.Unlock()
for _, wgs := range streams {
if werr := wgs.Close(); werr != nil {
if werr := wgs.close(); werr != nil {
err = werr
}
}
return err
}
func (w *watchGrpcStream) Close() (err error) {
func (w *watchGrpcStream) close() (err error) {
w.cancel()
<-w.donec
select {
@ -435,7 +443,7 @@ func (w *watchGrpcStream) run() {
initReq: *wreq,
id: -1,
outc: outc,
// unbufffered so resumes won't cause repeat events
// unbuffered so resumes won't cause repeat events
recvc: make(chan *WatchResponse),
}
@ -487,7 +495,7 @@ func (w *watchGrpcStream) run() {
req := &pb.WatchRequest{RequestUnion: cr}
wc.Send(req)
}
// watch client failed to recv; spawn another if possible
// watch client failed on Recv; spawn another if possible
case err := <-w.errc:
if isHaltErr(w.ctx, err) || toErr(w.ctx, err) == v3rpc.ErrNoLeader {
closeErr = err
@ -749,7 +757,7 @@ func (w *watchGrpcStream) waitCancelSubstreams(stopc <-chan struct{}) <-chan str
return donec
}
// joinSubstream waits for all substream goroutines to complete
// joinSubstreams waits for all substream goroutines to complete.
func (w *watchGrpcStream) joinSubstreams() {
for _, ws := range w.substreams {
<-ws.donec
@ -761,7 +769,9 @@ func (w *watchGrpcStream) joinSubstreams() {
}
}
// openWatchClient retries opening a watchclient until retryConnection fails
// openWatchClient retries opening a watch client until success or halt.
// manually retry in case "ws==nil && err==nil"
// TODO: remove FailFast=false
func (w *watchGrpcStream) openWatchClient() (ws pb.Watch_WatchClient, err error) {
for {
select {
@ -772,7 +782,7 @@ func (w *watchGrpcStream) openWatchClient() (ws pb.Watch_WatchClient, err error)
return nil, err
default:
}
if ws, err = w.remote.Watch(w.ctx, grpc.FailFast(false)); ws != nil && err == nil {
if ws, err = w.remote.Watch(w.ctx, w.callOpts...); ws != nil && err == nil {
break
}
if isHaltErr(w.ctx, err) {
@ -782,7 +792,7 @@ func (w *watchGrpcStream) openWatchClient() (ws pb.Watch_WatchClient, err error)
return ws, nil
}
// toPB converts an internal watch request structure to its protobuf messagefunc (wr *watchRequest)
// toPB converts an internal watch request structure to its protobuf WatchRequest structure.
func (wr *watchRequest) toPB() *pb.WatchRequest {
req := &pb.WatchCreateRequest{
StartRevision: wr.rev,
@ -795,3 +805,10 @@ func (wr *watchRequest) toPB() *pb.WatchRequest {
cr := &pb.WatchRequest_CreateRequest{CreateRequest: req}
return &pb.WatchRequest{RequestUnion: cr}
}
func streamKeyFromCtx(ctx context.Context) string {
if md, ok := metadata.FromOutgoingContext(ctx); ok {
return fmt.Sprintf("%+v", md)
}
return ""
}

View File

@ -33,7 +33,11 @@ type yamlConfig struct {
InsecureSkipTLSVerify bool `json:"insecure-skip-tls-verify"`
Certfile string `json:"cert-file"`
Keyfile string `json:"key-file"`
CAfile string `json:"ca-file"`
TrustedCAfile string `json:"trusted-ca-file"`
// CAfile is being deprecated. Use 'TrustedCAfile' instead.
// TODO: deprecate this in v4
CAfile string `json:"ca-file"`
}
// NewConfig creates a new clientv3.Config from a yaml file.
@ -66,8 +70,11 @@ func NewConfig(fpath string) (*clientv3.Config, error) {
}
}
if yc.CAfile != "" {
cp, err = tlsutil.NewCertPool([]string{yc.CAfile})
if yc.CAfile != "" && yc.TrustedCAfile == "" {
yc.TrustedCAfile = yc.CAfile
}
if yc.TrustedCAfile != "" {
cp, err = tlsutil.NewCertPool([]string{yc.TrustedCAfile})
if err != nil {
return nil, err
}

View File

@ -50,7 +50,7 @@ func TestConfigFromFile(t *testing.T) {
&yamlConfig{
Keyfile: privateKeyPath,
Certfile: certPath,
CAfile: caPath,
TrustedCAfile: caPath,
InsecureSkipTLSVerify: true,
},
false,
@ -64,9 +64,9 @@ func TestConfigFromFile(t *testing.T) {
},
{
&yamlConfig{
Keyfile: privateKeyPath,
Certfile: certPath,
CAfile: "bad",
Keyfile: privateKeyPath,
Certfile: certPath,
TrustedCAfile: "bad",
},
true,
},
@ -113,7 +113,7 @@ func TestConfigFromFile(t *testing.T) {
if tt.ym.Certfile != "" && len(cfg.TLS.Certificates) == 0 {
t.Errorf("#%d: failed to load in cert", i)
}
if tt.ym.CAfile != "" && cfg.TLS.RootCAs == nil {
if tt.ym.TrustedCAfile != "" && cfg.TLS.RootCAs == nil {
t.Errorf("#%d: failed to load in ca cert", i)
}
if cfg.TLS.InsecureSkipVerify != tt.ym.InsecureSkipTLSVerify {

View File

@ -5,3 +5,6 @@ const maxMapSize = 0x7FFFFFFF // 2GB
// maxAllocSize is the size used when creating array pointers.
const maxAllocSize = 0xFFFFFFF
// Are unaligned load/stores broken on this arch?
var brokenUnaligned = false

View File

@ -5,3 +5,6 @@ const maxMapSize = 0xFFFFFFFFFFFF // 256TB
// maxAllocSize is the size used when creating array pointers.
const maxAllocSize = 0x7FFFFFFF
// Are unaligned load/stores broken on this arch?
var brokenUnaligned = false

28
cmd/vendor/github.com/coreos/bbolt/bolt_arm.go generated vendored Normal file
View File

@ -0,0 +1,28 @@
package bolt
import "unsafe"
// maxMapSize represents the largest mmap size supported by Bolt.
const maxMapSize = 0x7FFFFFFF // 2GB
// maxAllocSize is the size used when creating array pointers.
const maxAllocSize = 0xFFFFFFF
// Are unaligned load/stores broken on this arch?
var brokenUnaligned bool
func init() {
// Simple check to see whether this arch handles unaligned load/stores
// correctly.
// ARM9 and older devices require load/stores to be from/to aligned
// addresses. If not, the lower 2 bits are cleared and that address is
// read in a jumbled up order.
// See http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.faqs/ka15414.html
raw := [6]byte{0xfe, 0xef, 0x11, 0x22, 0x22, 0x11}
val := *(*uint32)(unsafe.Pointer(uintptr(unsafe.Pointer(&raw)) + 2))
brokenUnaligned = val != 0x11222211
}

View File

@ -7,3 +7,6 @@ const maxMapSize = 0xFFFFFFFFFFFF // 256TB
// maxAllocSize is the size used when creating array pointers.
const maxAllocSize = 0x7FFFFFFF
// Are unaligned load/stores broken on this arch?
var brokenUnaligned = false

12
cmd/vendor/github.com/coreos/bbolt/bolt_mips64x.go generated vendored Normal file
View File

@ -0,0 +1,12 @@
// +build mips64 mips64le
package bolt
// maxMapSize represents the largest mmap size supported by Bolt.
const maxMapSize = 0x8000000000 // 512GB
// maxAllocSize is the size used when creating array pointers.
const maxAllocSize = 0x7FFFFFFF
// Are unaligned load/stores broken on this arch?
var brokenUnaligned = false

View File

@ -1,7 +1,12 @@
// +build mips mipsle
package bolt
// maxMapSize represents the largest mmap size supported by Bolt.
const maxMapSize = 0x7FFFFFFF // 2GB
const maxMapSize = 0x40000000 // 1GB
// maxAllocSize is the size used when creating array pointers.
const maxAllocSize = 0xFFFFFFF
// Are unaligned load/stores broken on this arch?
var brokenUnaligned = false

View File

@ -7,3 +7,6 @@ const maxMapSize = 0xFFFFFFFFFFFF // 256TB
// maxAllocSize is the size used when creating array pointers.
const maxAllocSize = 0x7FFFFFFF
// Are unaligned load/stores broken on this arch?
var brokenUnaligned = false

View File

@ -7,3 +7,6 @@ const maxMapSize = 0xFFFFFFFFFFFF // 256TB
// maxAllocSize is the size used when creating array pointers.
const maxAllocSize = 0x7FFFFFFF
// Are unaligned load/stores broken on this arch?
var brokenUnaligned = false

View File

@ -7,3 +7,6 @@ const maxMapSize = 0xFFFFFFFFFFFF // 256TB
// maxAllocSize is the size used when creating array pointers.
const maxAllocSize = 0x7FFFFFFF
// Are unaligned load/stores broken on this arch?
var brokenUnaligned = false

View File

@ -13,29 +13,32 @@ import (
// flock acquires an advisory lock on a file descriptor.
func flock(db *DB, mode os.FileMode, exclusive bool, timeout time.Duration) error {
var t time.Time
if timeout != 0 {
t = time.Now()
}
fd := db.file.Fd()
flag := syscall.LOCK_NB
if exclusive {
flag |= syscall.LOCK_EX
} else {
flag |= syscall.LOCK_SH
}
for {
// If we're beyond our timeout then return an error.
// This can only occur after we've attempted a flock once.
if t.IsZero() {
t = time.Now()
} else if timeout > 0 && time.Since(t) > timeout {
return ErrTimeout
}
flag := syscall.LOCK_SH
if exclusive {
flag = syscall.LOCK_EX
}
// Otherwise attempt to obtain an exclusive lock.
err := syscall.Flock(int(db.file.Fd()), flag|syscall.LOCK_NB)
// Attempt to obtain an exclusive lock.
err := syscall.Flock(int(fd), flag)
if err == nil {
return nil
} else if err != syscall.EWOULDBLOCK {
return err
}
// If we timed out then return an error.
if timeout != 0 && time.Since(t) > timeout-flockRetryTimeout {
return ErrTimeout
}
// Wait for a bit and try again.
time.Sleep(50 * time.Millisecond)
time.Sleep(flockRetryTimeout)
}
}

View File

@ -13,34 +13,33 @@ import (
// flock acquires an advisory lock on a file descriptor.
func flock(db *DB, mode os.FileMode, exclusive bool, timeout time.Duration) error {
var t time.Time
if timeout != 0 {
t = time.Now()
}
fd := db.file.Fd()
var lockType int16
if exclusive {
lockType = syscall.F_WRLCK
} else {
lockType = syscall.F_RDLCK
}
for {
// If we're beyond our timeout then return an error.
// This can only occur after we've attempted a flock once.
if t.IsZero() {
t = time.Now()
} else if timeout > 0 && time.Since(t) > timeout {
return ErrTimeout
}
var lock syscall.Flock_t
lock.Start = 0
lock.Len = 0
lock.Pid = 0
lock.Whence = 0
lock.Pid = 0
if exclusive {
lock.Type = syscall.F_WRLCK
} else {
lock.Type = syscall.F_RDLCK
}
err := syscall.FcntlFlock(db.file.Fd(), syscall.F_SETLK, &lock)
// Attempt to obtain an exclusive lock.
lock := syscall.Flock_t{Type: lockType}
err := syscall.FcntlFlock(fd, syscall.F_SETLK, &lock)
if err == nil {
return nil
} else if err != syscall.EAGAIN {
return err
}
// If we timed out then return an error.
if timeout != 0 && time.Since(t) > timeout-flockRetryTimeout {
return ErrTimeout
}
// Wait for a bit and try again.
time.Sleep(50 * time.Millisecond)
time.Sleep(flockRetryTimeout)
}
}

View File

@ -59,29 +59,30 @@ func flock(db *DB, mode os.FileMode, exclusive bool, timeout time.Duration) erro
db.lockfile = f
var t time.Time
if timeout != 0 {
t = time.Now()
}
fd := f.Fd()
var flag uint32 = flagLockFailImmediately
if exclusive {
flag |= flagLockExclusive
}
for {
// If we're beyond our timeout then return an error.
// This can only occur after we've attempted a flock once.
if t.IsZero() {
t = time.Now()
} else if timeout > 0 && time.Since(t) > timeout {
return ErrTimeout
}
var flag uint32 = flagLockFailImmediately
if exclusive {
flag |= flagLockExclusive
}
err := lockFileEx(syscall.Handle(db.lockfile.Fd()), flag, 0, 1, 0, &syscall.Overlapped{})
// Attempt to obtain an exclusive lock.
err := lockFileEx(syscall.Handle(fd), flag, 0, 1, 0, &syscall.Overlapped{})
if err == nil {
return nil
} else if err != errLockViolation {
return err
}
// If we timed oumercit then return an error.
if timeout != 0 && time.Since(t) > timeout-flockRetryTimeout {
return ErrTimeout
}
// Wait for a bit and try again.
time.Sleep(50 * time.Millisecond)
time.Sleep(flockRetryTimeout)
}
}
@ -89,7 +90,7 @@ func flock(db *DB, mode os.FileMode, exclusive bool, timeout time.Duration) erro
func funlock(db *DB) error {
err := unlockFileEx(syscall.Handle(db.lockfile.Fd()), 0, 1, 0, &syscall.Overlapped{})
db.lockfile.Close()
os.Remove(db.path+lockExt)
os.Remove(db.path + lockExt)
return err
}

View File

@ -14,13 +14,6 @@ const (
MaxValueSize = (1 << 31) - 2
)
const (
maxUint = ^uint(0)
minUint = 0
maxInt = int(^uint(0) >> 1)
minInt = -maxInt - 1
)
const bucketHeaderSize = int(unsafe.Sizeof(bucket{}))
const (
@ -130,9 +123,17 @@ func (b *Bucket) Bucket(name []byte) *Bucket {
func (b *Bucket) openBucket(value []byte) *Bucket {
var child = newBucket(b.tx)
// If unaligned load/stores are broken on this arch and value is
// unaligned simply clone to an aligned byte array.
unaligned := brokenUnaligned && uintptr(unsafe.Pointer(&value[0]))&3 != 0
if unaligned {
value = cloneBytes(value)
}
// If this is a writable transaction then we need to copy the bucket entry.
// Read-only transactions can point directly at the mmap entry.
if b.tx.writable {
if b.tx.writable && !unaligned {
child.bucket = &bucket{}
*child.bucket = *(*bucket)(unsafe.Pointer(&value[0]))
} else {
@ -167,9 +168,8 @@ func (b *Bucket) CreateBucket(key []byte) (*Bucket, error) {
if bytes.Equal(key, k) {
if (flags & bucketLeafFlag) != 0 {
return nil, ErrBucketExists
} else {
return nil, ErrIncompatibleValue
}
return nil, ErrIncompatibleValue
}
// Create empty, inline bucket.
@ -316,7 +316,12 @@ func (b *Bucket) Delete(key []byte) error {
// Move cursor to correct position.
c := b.Cursor()
_, _, flags := c.seek(key)
k, _, flags := c.seek(key)
// Return nil if the key doesn't exist.
if !bytes.Equal(key, k) {
return nil
}
// Return an error if there is already existing bucket value.
if (flags & bucketLeafFlag) != 0 {
@ -329,6 +334,28 @@ func (b *Bucket) Delete(key []byte) error {
return nil
}
// Sequence returns the current integer for the bucket without incrementing it.
func (b *Bucket) Sequence() uint64 { return b.bucket.sequence }
// SetSequence updates the sequence number for the bucket.
func (b *Bucket) SetSequence(v uint64) error {
if b.tx.db == nil {
return ErrTxClosed
} else if !b.Writable() {
return ErrTxNotWritable
}
// Materialize the root node if it hasn't been already so that the
// bucket will be saved during commit.
if b.rootNode == nil {
_ = b.node(b.root, nil)
}
// Increment and return the sequence.
b.bucket.sequence = v
return nil
}
// NextSequence returns an autoincrementing integer for the bucket.
func (b *Bucket) NextSequence() (uint64, error) {
if b.tx.db == nil {

View File

@ -7,8 +7,7 @@ import (
"log"
"os"
"runtime"
"runtime/debug"
"strings"
"sort"
"sync"
"time"
"unsafe"
@ -23,6 +22,8 @@ const version = 2
// Represents a marker value to indicate that a file is a Bolt DB.
const magic uint32 = 0xED0CDAED
const pgidNoFreelist pgid = 0xffffffffffffffff
// IgnoreNoSync specifies whether the NoSync field of a DB is ignored when
// syncing changes to a file. This is required as some operating systems,
// such as OpenBSD, do not have a unified buffer cache (UBC) and writes
@ -39,6 +40,9 @@ const (
// default page size for db is set to the OS page size.
var defaultPageSize = os.Getpagesize()
// The time elapsed between consecutive file locking attempts.
const flockRetryTimeout = 50 * time.Millisecond
// DB represents a collection of buckets persisted to a file on disk.
// All data access is performed through transactions which can be obtained through the DB.
// All the functions on DB will return a ErrDatabaseNotOpen if accessed before Open() is called.
@ -61,6 +65,11 @@ type DB struct {
// THIS IS UNSAFE. PLEASE USE WITH CAUTION.
NoSync bool
// When true, skips syncing freelist to disk. This improves the database
// write performance under normal operation, but requires a full database
// re-sync during recovery.
NoFreelistSync bool
// When true, skips the truncate call when growing the database.
// Setting this to true is only safe on non-ext3/ext4 systems.
// Skipping truncation avoids preallocation of hard drive space and
@ -107,9 +116,11 @@ type DB struct {
opened bool
rwtx *Tx
txs []*Tx
freelist *freelist
stats Stats
freelist *freelist
freelistLoad sync.Once
pagePool sync.Pool
batchMu sync.Mutex
@ -148,14 +159,17 @@ func (db *DB) String() string {
// If the file does not exist then it will be created automatically.
// Passing in nil options will cause Bolt to open the database with the default options.
func Open(path string, mode os.FileMode, options *Options) (*DB, error) {
var db = &DB{opened: true}
db := &DB{
opened: true,
}
// Set default options if no options are provided.
if options == nil {
options = DefaultOptions
}
db.NoSync = options.NoSync
db.NoGrowSync = options.NoGrowSync
db.MmapFlags = options.MmapFlags
db.NoFreelistSync = options.NoFreelistSync
// Set default values for later DB operations.
db.MaxBatchSize = DefaultMaxBatchSize
@ -184,6 +198,7 @@ func Open(path string, mode os.FileMode, options *Options) (*DB, error) {
// The database file is locked using the shared lock (more than one process may
// hold a lock at the same time) otherwise (options.ReadOnly is set).
if err := flock(db, mode, !db.readOnly, options.Timeout); err != nil {
db.lockfile = nil // make 'unused' happy. TODO: rework locks
_ = db.close()
return nil, err
}
@ -191,6 +206,11 @@ func Open(path string, mode os.FileMode, options *Options) (*DB, error) {
// Default values for test hooks
db.ops.writeAt = db.file.WriteAt
if db.pageSize = options.PageSize; db.pageSize == 0 {
// Set the default page size to the OS page size.
db.pageSize = defaultPageSize
}
// Initialize the database if it doesn't exist.
if info, err := db.file.Stat(); err != nil {
return nil, err
@ -202,20 +222,21 @@ func Open(path string, mode os.FileMode, options *Options) (*DB, error) {
} else {
// Read the first meta page to determine the page size.
var buf [0x1000]byte
if _, err := db.file.ReadAt(buf[:], 0); err == nil {
m := db.pageInBuffer(buf[:], 0).meta()
if err := m.validate(); err != nil {
// If we can't read the page size, we can assume it's the same
// as the OS -- since that's how the page size was chosen in the
// first place.
//
// If the first page is invalid and this OS uses a different
// page size than what the database was created with then we
// are out of luck and cannot access the database.
db.pageSize = os.Getpagesize()
} else {
// If we can't read the page size, but can read a page, assume
// it's the same as the OS or one given -- since that's how the
// page size was chosen in the first place.
//
// If the first page is invalid and this OS uses a different
// page size than what the database was created with then we
// are out of luck and cannot access the database.
//
// TODO: scan for next page
if bw, err := db.file.ReadAt(buf[:], 0); err == nil && bw == len(buf) {
if m := db.pageInBuffer(buf[:], 0).meta(); m.validate() == nil {
db.pageSize = int(m.pageSize)
}
} else {
return nil, ErrInvalid
}
}
@ -232,14 +253,50 @@ func Open(path string, mode os.FileMode, options *Options) (*DB, error) {
return nil, err
}
// Read in the freelist.
db.freelist = newFreelist()
db.freelist.read(db.page(db.meta().freelist))
if db.readOnly {
return db, nil
}
db.loadFreelist()
// Flush freelist when transitioning from no sync to sync so
// NoFreelistSync unaware boltdb can open the db later.
if !db.NoFreelistSync && !db.hasSyncedFreelist() {
tx, err := db.Begin(true)
if tx != nil {
err = tx.Commit()
}
if err != nil {
_ = db.close()
return nil, err
}
}
// Mark the database as opened and return.
return db, nil
}
// loadFreelist reads the freelist if it is synced, or reconstructs it
// by scanning the DB if it is not synced. It assumes there are no
// concurrent accesses being made to the freelist.
func (db *DB) loadFreelist() {
db.freelistLoad.Do(func() {
db.freelist = newFreelist()
if !db.hasSyncedFreelist() {
// Reconstruct free list by scanning the DB.
db.freelist.readIDs(db.freepages())
} else {
// Read free list from freelist page.
db.freelist.read(db.page(db.meta().freelist))
}
db.stats.FreePageN = len(db.freelist.ids)
})
}
func (db *DB) hasSyncedFreelist() bool {
return db.meta().freelist != pgidNoFreelist
}
// mmap opens the underlying memory-mapped file and initializes the meta references.
// minsz is the minimum size that the new mmap can be.
func (db *DB) mmap(minsz int) error {
@ -341,9 +398,6 @@ func (db *DB) mmapSize(size int) (int, error) {
// init creates a new database file and initializes its meta pages.
func (db *DB) init() error {
// Set the page size to the OS page size.
db.pageSize = os.Getpagesize()
// Create two meta pages on a buffer.
buf := make([]byte, db.pageSize*4)
for i := 0; i < 2; i++ {
@ -526,21 +580,36 @@ func (db *DB) beginRWTx() (*Tx, error) {
t := &Tx{writable: true}
t.init(db)
db.rwtx = t
db.freePages()
return t, nil
}
// Free any pages associated with closed read-only transactions.
var minid txid = 0xFFFFFFFFFFFFFFFF
for _, t := range db.txs {
if t.meta.txid < minid {
minid = t.meta.txid
}
// freePages releases any pages associated with closed read-only transactions.
func (db *DB) freePages() {
// Free all pending pages prior to earliest open transaction.
sort.Sort(txsById(db.txs))
minid := txid(0xFFFFFFFFFFFFFFFF)
if len(db.txs) > 0 {
minid = db.txs[0].meta.txid
}
if minid > 0 {
db.freelist.release(minid - 1)
}
return t, nil
// Release unused txid extents.
for _, t := range db.txs {
db.freelist.releaseRange(minid, t.meta.txid-1)
minid = t.meta.txid + 1
}
db.freelist.releaseRange(minid, txid(0xFFFFFFFFFFFFFFFF))
// Any page both allocated and freed in an extent is safe to release.
}
type txsById []*Tx
func (t txsById) Len() int { return len(t) }
func (t txsById) Swap(i, j int) { t[i], t[j] = t[j], t[i] }
func (t txsById) Less(i, j int) bool { return t[i].meta.txid < t[j].meta.txid }
// removeTx removes a transaction from the database.
func (db *DB) removeTx(tx *Tx) {
// Release the read lock on the mmap.
@ -552,7 +621,10 @@ func (db *DB) removeTx(tx *Tx) {
// Remove the transaction.
for i, t := range db.txs {
if t == tx {
db.txs = append(db.txs[:i], db.txs[i+1:]...)
last := len(db.txs) - 1
db.txs[i] = db.txs[last]
db.txs[last] = nil
db.txs = db.txs[:last]
break
}
}
@ -630,11 +702,7 @@ func (db *DB) View(fn func(*Tx) error) error {
return err
}
if err := t.Rollback(); err != nil {
return err
}
return nil
return t.Rollback()
}
// Batch calls fn as part of a batch. It behaves similar to Update,
@ -823,7 +891,7 @@ func (db *DB) meta() *meta {
}
// allocate returns a contiguous block of memory starting at a given page.
func (db *DB) allocate(count int) (*page, error) {
func (db *DB) allocate(txid txid, count int) (*page, error) {
// Allocate a temporary buffer for the page.
var buf []byte
if count == 1 {
@ -835,7 +903,7 @@ func (db *DB) allocate(count int) (*page, error) {
p.overflow = uint32(count - 1)
// Use pages from the freelist if they are available.
if p.id = db.freelist.allocate(count); p.id != 0 {
if p.id = db.freelist.allocate(txid, count); p.id != 0 {
return p, nil
}
@ -890,6 +958,38 @@ func (db *DB) IsReadOnly() bool {
return db.readOnly
}
func (db *DB) freepages() []pgid {
tx, err := db.beginTx()
defer func() {
err = tx.Rollback()
if err != nil {
panic("freepages: failed to rollback tx")
}
}()
if err != nil {
panic("freepages: failed to open read only tx")
}
reachable := make(map[pgid]*page)
nofreed := make(map[pgid]bool)
ech := make(chan error)
go func() {
for e := range ech {
panic(fmt.Sprintf("freepages: failed to get all reachable pages (%v)", e))
}
}()
tx.checkBucket(&tx.root, reachable, nofreed, ech)
close(ech)
var fids []pgid
for i := pgid(2); i < db.meta().pgid; i++ {
if _, ok := reachable[i]; !ok {
fids = append(fids, i)
}
}
return fids
}
// Options represents the options that can be set when opening a database.
type Options struct {
// Timeout is the amount of time to wait to obtain a file lock.
@ -900,6 +1000,10 @@ type Options struct {
// Sets the DB.NoGrowSync flag before memory mapping the file.
NoGrowSync bool
// Do not sync freelist to disk. This improves the database write performance
// under normal operation, but requires a full database re-sync during recovery.
NoFreelistSync bool
// Open database in read-only mode. Uses flock(..., LOCK_SH |LOCK_NB) to
// grab a shared lock (UNIX).
ReadOnly bool
@ -916,6 +1020,14 @@ type Options struct {
// If initialMmapSize is smaller than the previous database size,
// it takes no effect.
InitialMmapSize int
// PageSize overrides the default OS page size.
PageSize int
// NoSync sets the initial value of DB.NoSync. Normally this can just be
// set directly on the DB itself when returned from Open(), but this option
// is useful in APIs which expose Options but not the underlying DB.
NoSync bool
}
// DefaultOptions represent the options used if nil options are passed into Open().
@ -952,15 +1064,11 @@ func (s *Stats) Sub(other *Stats) Stats {
diff.PendingPageN = s.PendingPageN
diff.FreeAlloc = s.FreeAlloc
diff.FreelistInuse = s.FreelistInuse
diff.TxN = other.TxN - s.TxN
diff.TxN = s.TxN - other.TxN
diff.TxStats = s.TxStats.Sub(&other.TxStats)
return diff
}
func (s *Stats) add(other *Stats) {
s.TxStats.add(&other.TxStats)
}
type Info struct {
Data uintptr
PageSize int
@ -999,7 +1107,8 @@ func (m *meta) copy(dest *meta) {
func (m *meta) write(p *page) {
if m.root.root >= m.pgid {
panic(fmt.Sprintf("root bucket pgid (%d) above high water mark (%d)", m.root.root, m.pgid))
} else if m.freelist >= m.pgid {
} else if m.freelist >= m.pgid && m.freelist != pgidNoFreelist {
// TODO: reject pgidNoFreeList if !NoFreelistSync
panic(fmt.Sprintf("freelist pgid (%d) above high water mark (%d)", m.freelist, m.pgid))
}
@ -1026,11 +1135,3 @@ func _assert(condition bool, msg string, v ...interface{}) {
panic(fmt.Sprintf("assertion failed: "+msg, v...))
}
}
func warn(v ...interface{}) { fmt.Fprintln(os.Stderr, v...) }
func warnf(msg string, v ...interface{}) { fmt.Fprintf(os.Stderr, msg+"\n", v...) }
func printstack() {
stack := strings.Join(strings.Split(string(debug.Stack()), "\n")[2:], "\n")
fmt.Fprintln(os.Stderr, stack)
}

Some files were not shown because too many files have changed in this diff Show More