Compare commits
152 Commits
v3.3.9_plu
...
v3.2.10
Author | SHA1 | Date | |
---|---|---|---|
694728c496 | |||
1557f8b534 | |||
4b9bfa17ee | |||
8de0c0419a | |||
3039c639c0 | |||
91335d01bb | |||
a8c84ffc93 | |||
939337f450 | |||
2a6d50470d | |||
d62e39d5ca | |||
7f0f5e2b3c | |||
eb1589ad35 | |||
546d5fe835 | |||
fddae84ce2 | |||
6d406285e6 | |||
8dc20ead31 | |||
d3a3c3154e | |||
d5572964e1 | |||
ea51c25030 | |||
d1447a8f5a | |||
c28c14a5f4 | |||
f9eb75044a | |||
2250f71e23 | |||
52be1d7b19 | |||
712024d3e5 | |||
7d99afdc7c | |||
5ceea41af4 | |||
2f74456443 | |||
fc87ae4202 | |||
f1d7dd87da | |||
ad212d339b | |||
9f49665284 | |||
78f8d6e185 | |||
a954a0de53 | |||
0c3defdd2b | |||
814588d166 | |||
2d2932822c | |||
e211fb6de3 | |||
fb7e274309 | |||
4a61fcf42d | |||
4c8fa30dda | |||
01c4f35b30 | |||
15e9510d2c | |||
09b7fd4975 | |||
bb66589f8c | |||
267a2fc8c9 | |||
1fc300ecbd | |||
877d0ce469 | |||
2188513161 | |||
5c7cff66b6 | |||
8c99ab80bd | |||
9d43462d17 | |||
78d68226e6 | |||
e9d576c3d6 | |||
1c578cd442 | |||
b97714b3e6 | |||
ce0a61ff67 | |||
74783a38ae | |||
d372ff96a0 | |||
8c7b9db9cc | |||
f1fb342305 | |||
fa0f278783 | |||
e197c14847 | |||
e7bf5477de | |||
f81b72fd93 | |||
d0d1a87aa9 | |||
7c6a9a7317 | |||
be8f102efb | |||
3003901447 | |||
157cfac31b | |||
40a1704e6f | |||
30981ecb0a | |||
f65a11ced5 | |||
db4838d4eb | |||
8ab42fb045 | |||
ff9a0a3527 | |||
c31bec0f29 | |||
19fe4b0cac | |||
a5d94fe229 | |||
e8f3cbf1c6 | |||
856502f788 | |||
ae23b0ef2f | |||
5ee89be616 | |||
38373b342d | |||
536a5f594b | |||
49e6916e66 | |||
b9b6f6f7c4 | |||
6ecbb3bbc5 | |||
cb2a496c4d | |||
fdf525a3fd | |||
40468ab11f | |||
f8f79666d4 | |||
fefcf348f1 | |||
81d39a75ff | |||
8f2b48465f | |||
026c1734b2 | |||
81e1d03d02 | |||
6171334595 | |||
55de54a757 | |||
c14aad0ba6 | |||
91ccc93042 | |||
61fc123e7a | |||
71d2008385 | |||
79794bf556 | |||
db0ca8963f | |||
27a3356c74 | |||
4526284326 | |||
0b0b1992b8 | |||
ed7ef5be8b | |||
ff5be50ee5 | |||
a032b3b914 | |||
9388a27649 | |||
af1d732916 | |||
939aa66b48 | |||
3365dd4ff0 | |||
959d55ae80 | |||
3e1992140a | |||
b547b982b9 | |||
56477ca998 | |||
66722b1ada | |||
963339d265 | |||
c87594f27c | |||
e72ad5dd2a | |||
3eb5d24cab | |||
8b9041a938 | |||
864ffec88c | |||
12bc2bba36 | |||
3a43afce5a | |||
0e56ea37e7 | |||
743192aa3b | |||
e8b156578f | |||
61f3338ce7 | |||
effffdbdca | |||
9bac803bee | |||
9169ad0d7d | |||
482a7839d9 | |||
ba3058ca79 | |||
0e90e504f5 | |||
998fa0de76 | |||
c273735729 | |||
c85f736522 | |||
a375ff172e | |||
1893af9bbd | |||
b4c655677a | |||
c2160adf1d | |||
5ada311416 | |||
f042cd7d9c | |||
f0a400a3a8 | |||
6066977280 | |||
fc88eccc74 | |||
5cb28a7d83 | |||
de57e88643 |
1
.gitignore
vendored
1
.gitignore
vendored
@ -1,3 +1,4 @@
|
||||
/agent-*
|
||||
/coverage
|
||||
/gopath
|
||||
/gopath.proto
|
||||
|
16
.semaphore.sh
Executable file
16
.semaphore.sh
Executable file
@ -0,0 +1,16 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
TEST_SUFFIX=$(date +%s | base64 | head -c 15)
|
||||
|
||||
TEST_OPTS="RELEASE_TEST=y INTEGRATION=y PASSES='build unit release integration_e2e functional' MANUAL_VER=v3.2.9"
|
||||
if [ "$TEST_ARCH" == "386" ]; then
|
||||
TEST_OPTS="GOARCH=386 PASSES='build unit integration_e2e'"
|
||||
fi
|
||||
|
||||
docker run \
|
||||
--rm \
|
||||
--volume=`pwd`:/go/src/github.com/coreos/etcd \
|
||||
gcr.io/etcd-development/etcd-test:go1.8.5 \
|
||||
/bin/bash -c "${TEST_OPTS} ./test 2>&1 | tee test-${TEST_SUFFIX}.log"
|
||||
|
||||
! egrep "(--- FAIL:|panic: test timed out|appears to have leaked|Too many goroutines)" -B50 -A10 test-${TEST_SUFFIX}.log
|
70
.travis.yml
70
.travis.yml
@ -1,11 +1,13 @@
|
||||
dist: trusty
|
||||
language: go
|
||||
go_import_path: github.com/coreos/etcd
|
||||
sudo: false
|
||||
|
||||
sudo: required
|
||||
|
||||
services: docker
|
||||
|
||||
go:
|
||||
- 1.8.3
|
||||
- tip
|
||||
- 1.8.5
|
||||
- tip
|
||||
|
||||
notifications:
|
||||
on_success: never
|
||||
@ -13,19 +15,25 @@ notifications:
|
||||
|
||||
env:
|
||||
matrix:
|
||||
- TARGET=amd64
|
||||
- TARGET=darwin-amd64
|
||||
- TARGET=windows-amd64
|
||||
- TARGET=arm64
|
||||
- TARGET=arm
|
||||
- TARGET=386
|
||||
- TARGET=ppc64le
|
||||
- TARGET=amd64
|
||||
- TARGET=amd64-go-tip
|
||||
- TARGET=darwin-amd64
|
||||
- TARGET=windows-amd64
|
||||
- TARGET=arm64
|
||||
- TARGET=arm
|
||||
- TARGET=386
|
||||
- TARGET=ppc64le
|
||||
|
||||
matrix:
|
||||
fast_finish: true
|
||||
allow_failures:
|
||||
- go: tip
|
||||
- go: tip
|
||||
env: TARGET=amd64-go-tip
|
||||
exclude:
|
||||
- go: 1.8.5
|
||||
env: TARGET=amd64-go-tip
|
||||
- go: tip
|
||||
env: TARGET=amd64
|
||||
- go: tip
|
||||
env: TARGET=darwin-amd64
|
||||
- go: tip
|
||||
@ -39,42 +47,42 @@ matrix:
|
||||
- go: tip
|
||||
env: TARGET=ppc64le
|
||||
|
||||
addons:
|
||||
apt:
|
||||
packages:
|
||||
- libpcap-dev
|
||||
- libaspell-dev
|
||||
- libhunspell-dev
|
||||
|
||||
before_install:
|
||||
- go get -v -u github.com/chzchzchz/goword
|
||||
- go get -v -u github.com/coreos/license-bill-of-materials
|
||||
- go get -v -u honnef.co/go/tools/cmd/gosimple
|
||||
- go get -v -u honnef.co/go/tools/cmd/unused
|
||||
- go get -v -u honnef.co/go/tools/cmd/staticcheck
|
||||
- ./scripts/install-marker.sh amd64
|
||||
- docker pull gcr.io/etcd-development/etcd-test:go1.8.5
|
||||
|
||||
# disable godep restore override
|
||||
install:
|
||||
- pushd cmd/etcd && go get -t -v ./... && popd
|
||||
- pushd cmd/etcd && go get -t -v ./... && popd
|
||||
|
||||
script:
|
||||
- >
|
||||
case "${TARGET}" in
|
||||
amd64)
|
||||
docker run --rm \
|
||||
--volume=`pwd`:/go/src/github.com/coreos/etcd gcr.io/etcd-development/etcd-test:go1.8.5 \
|
||||
/bin/bash -c "GOARCH=amd64 ./test"
|
||||
;;
|
||||
amd64-go-tip)
|
||||
GOARCH=amd64 ./test
|
||||
;;
|
||||
darwin-amd64)
|
||||
GO_BUILD_FLAGS="-a -v" GOPATH="" GOOS=darwin GOARCH=amd64 ./build
|
||||
docker run --rm \
|
||||
--volume=`pwd`:/go/src/github.com/coreos/etcd gcr.io/etcd-development/etcd-test:go1.8.5 \
|
||||
/bin/bash -c "GO_BUILD_FLAGS='-a -v' GOOS=darwin GOARCH=amd64 ./build"
|
||||
;;
|
||||
windows-amd64)
|
||||
GO_BUILD_FLAGS="-a -v" GOPATH="" GOOS=windows GOARCH=amd64 ./build
|
||||
docker run --rm \
|
||||
--volume=`pwd`:/go/src/github.com/coreos/etcd gcr.io/etcd-development/etcd-test:go1.8.5 \
|
||||
/bin/bash -c "GO_BUILD_FLAGS='-a -v' GOOS=windows GOARCH=amd64 ./build"
|
||||
;;
|
||||
386)
|
||||
GOARCH=386 PASSES="build unit" ./test
|
||||
docker run --rm \
|
||||
--volume=`pwd`:/go/src/github.com/coreos/etcd gcr.io/etcd-development/etcd-test:go1.8.5 \
|
||||
/bin/bash -c "GOARCH=386 PASSES='build unit' ./test"
|
||||
;;
|
||||
*)
|
||||
# test building out of gopath
|
||||
GO_BUILD_FLAGS="-a -v" GOPATH="" GOARCH="${TARGET}" ./build
|
||||
docker run --rm \
|
||||
--volume=`pwd`:/go/src/github.com/coreos/etcd gcr.io/etcd-development/etcd-test:go1.8.5 \
|
||||
/bin/bash -c "GO_BUILD_FLAGS='-a -v' GOARCH='${TARGET}' ./build"
|
||||
;;
|
||||
esac
|
||||
|
57
Dockerfile-test
Normal file
57
Dockerfile-test
Normal file
@ -0,0 +1,57 @@
|
||||
FROM ubuntu:16.10
|
||||
|
||||
RUN rm /bin/sh && ln -s /bin/bash /bin/sh
|
||||
RUN echo 'debconf debconf/frontend select Noninteractive' | debconf-set-selections
|
||||
|
||||
RUN apt-get -y update \
|
||||
&& apt-get -y install \
|
||||
build-essential \
|
||||
gcc \
|
||||
apt-utils \
|
||||
pkg-config \
|
||||
software-properties-common \
|
||||
apt-transport-https \
|
||||
libssl-dev \
|
||||
sudo \
|
||||
bash \
|
||||
curl \
|
||||
wget \
|
||||
tar \
|
||||
git \
|
||||
netcat \
|
||||
libaspell-dev \
|
||||
libhunspell-dev \
|
||||
hunspell-en-us \
|
||||
aspell-en \
|
||||
shellcheck \
|
||||
&& apt-get -y update \
|
||||
&& apt-get -y upgrade \
|
||||
&& apt-get -y autoremove \
|
||||
&& apt-get -y autoclean
|
||||
|
||||
ENV GOROOT /usr/local/go
|
||||
ENV GOPATH /go
|
||||
ENV PATH ${GOPATH}/bin:${GOROOT}/bin:${PATH}
|
||||
ENV GO_VERSION REPLACE_ME_GO_VERSION
|
||||
ENV GO_DOWNLOAD_URL https://storage.googleapis.com/golang
|
||||
RUN rm -rf ${GOROOT} \
|
||||
&& curl -s ${GO_DOWNLOAD_URL}/go${GO_VERSION}.linux-amd64.tar.gz | tar -v -C /usr/local/ -xz \
|
||||
&& mkdir -p ${GOPATH}/src ${GOPATH}/bin \
|
||||
&& go version
|
||||
|
||||
RUN mkdir -p ${GOPATH}/src/github.com/coreos/etcd
|
||||
WORKDIR ${GOPATH}/src/github.com/coreos/etcd
|
||||
|
||||
ADD ./scripts/install-marker.sh /tmp/install-marker.sh
|
||||
|
||||
RUN go get -v -u -tags spell github.com/chzchzchz/goword \
|
||||
&& go get -v -u github.com/coreos/license-bill-of-materials \
|
||||
&& go get -v -u honnef.co/go/tools/cmd/gosimple \
|
||||
&& go get -v -u honnef.co/go/tools/cmd/unused \
|
||||
&& go get -v -u honnef.co/go/tools/cmd/staticcheck \
|
||||
&& go get -v -u github.com/wadey/gocovmerge \
|
||||
&& go get -v -u github.com/gordonklaus/ineffassign \
|
||||
&& /tmp/install-marker.sh amd64 \
|
||||
&& rm -f /tmp/install-marker.sh \
|
||||
&& curl -s https://codecov.io/bash >/codecov \
|
||||
&& chmod 700 /codecov
|
@ -24,6 +24,11 @@ curl -L http://localhost:2379/v3alpha/kv/put \
|
||||
curl -L http://localhost:2379/v3alpha/kv/range \
|
||||
-X POST -d '{"key": "Zm9v"}'
|
||||
# {"header":{"cluster_id":"12585971608760269493","member_id":"13847567121247652255","revision":"2","raft_term":"3"},"kvs":[{"key":"Zm9v","create_revision":"2","mod_revision":"2","version":"1","value":"YmFy"}],"count":"1"}
|
||||
|
||||
# get all keys prefixed with "foo"
|
||||
curl -L http://localhost:2379/v3alpha/kv/range \
|
||||
-X POST -d '{"key": "Zm9v", "range_end": "Zm9w"}'
|
||||
# {"header":{"cluster_id":"12585971608760269493","member_id":"13847567121247652255","revision":"2","raft_term":"3"},"kvs":[{"key":"Zm9v","create_revision":"2","mod_revision":"2","version":"1","value":"YmFy"}],"count":"1"}
|
||||
```
|
||||
|
||||
Use `curl` to watch a key:
|
||||
@ -38,6 +43,15 @@ curl -L http://localhost:2379/v3alpha/kv/put \
|
||||
# {"result":{"header":{"cluster_id":"12585971608760269493","member_id":"13847567121247652255","revision":"2","raft_term":"2"},"events":[{"kv":{"key":"Zm9v","create_revision":"2","mod_revision":"2","version":"1","value":"YmFy"}}]}}
|
||||
```
|
||||
|
||||
Use `curl` to issue a transaction:
|
||||
|
||||
```bash
|
||||
curl -L http://localhost:2379/v3alpha/kv/txn \
|
||||
-X POST \
|
||||
-d '{"compare":[{"target":"CREATE","key":"Zm9v","createRevision":"2"}],"success":[{"requestPut":{"key":"Zm9v","value":"YmFy"}}]}'
|
||||
# {"header":{"cluster_id":"12585971608760269493","member_id":"13847567121247652255","revision":"3","raft_term":"2"},"succeeded":true,"responses":[{"response_put":{"header":{"revision":"3"}}}]}
|
||||
```
|
||||
|
||||
## Swagger
|
||||
|
||||
Generated [Swagger][swagger] API definitions can be found at [rpc.swagger.json][swagger-doc].
|
||||
|
@ -790,6 +790,7 @@ From google paxosdb paper: Our implementation hinges around a powerful primitive
|
||||
| created | created is set to true if the response is for a create watch request. The client should record the watch_id and expect to receive events for the created watcher from the same stream. All events sent to the created watcher will attach with the same watch_id. | bool |
|
||||
| canceled | canceled is set to true if the response is for a cancel watch request. No further events will be sent to the canceled watcher. | bool |
|
||||
| compact_revision | compact_revision is set to the minimum index if a watcher tries to watch at a compacted index. This happens when creating a watcher at a compacted revision or the watcher cannot catch up with the progress of the key-value store. The client should treat the watcher as canceled and should not try to create any watcher with the same start_revision again. | int64 |
|
||||
| cancel_reason | cancel_reason indicates the reason for canceling the watcher. | string |
|
||||
| events | | (slice of) mvccpb.Event |
|
||||
|
||||
|
||||
|
@ -2179,6 +2179,10 @@
|
||||
"format": "int64",
|
||||
"description": "compact_revision is set to the minimum index if a watcher tries to watch\nat a compacted index.\n\nThis happens when creating a watcher at a compacted revision or the watcher cannot\ncatch up with the progress of the key-value store. \n\nThe client should treat the watcher as canceled and should not try to create any\nwatcher with the same start_revision again."
|
||||
},
|
||||
"cancel_reason": {
|
||||
"type": "string",
|
||||
"description": "cancel_reason indicates the reason for canceling the watcher."
|
||||
},
|
||||
"events": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
|
@ -4,8 +4,4 @@ For the most part, the etcd project is stable, but we are still moving fast! We
|
||||
|
||||
## The current experimental API/features are:
|
||||
|
||||
- [gateway][gateway]: beta, to be stable in 3.2 release
|
||||
- [gRPC proxy][grpc-proxy]: alpha, to be stable in 3.2 release
|
||||
|
||||
[gateway]: ../op-guide/gateway.md
|
||||
[grpc-proxy]: ../op-guide/grpc_proxy.md
|
||||
(none currently)
|
||||
|
@ -2,7 +2,7 @@
|
||||
|
||||
## System requirements
|
||||
|
||||
The etcd performance benchmarks run etcd on 8 vCPU, 16GB RAM, 50GB SSD GCE instances, but any relatively modern machine with low latency storage and a few gigabytes of memory should suffice for most use cases. Applications with large v2 data stores will require more memory than a large v3 data store since data is kept in anonymous memory instead of memory mapped from a file. than For running etcd on a cloud provider, we suggest at least a medium instance on AWS or a standard-1 instance on GCE.
|
||||
The etcd performance benchmarks run etcd on 8 vCPU, 16GB RAM, 50GB SSD GCE instances, but any relatively modern machine with low latency storage and a few gigabytes of memory should suffice for most use cases. Applications with large v2 data stores will require more memory than a large v3 data store since data is kept in anonymous memory instead of memory mapped from a file. For running etcd on a cloud provider, we suggest at least a medium instance on AWS or a standard-1 instance on GCE.
|
||||
|
||||
## Download the pre-built binary
|
||||
|
||||
|
@ -47,12 +47,19 @@ Administrators who need to create reliable and scalable key-value stores for the
|
||||
- [Amazon Web Services][aws_platform]
|
||||
- [FreeBSD][freebsd_platform]
|
||||
|
||||
### Upgrading and compatibility
|
||||
### Security
|
||||
|
||||
- [Migrate applications from using API v2 to API v3][v2_migration]
|
||||
- [Upgrading a v2.3 cluster to v3.0][v3_upgrade]
|
||||
- [Upgrading a v3.0 cluster to v3.1][v31_upgrade]
|
||||
- [Upgrading a v3.1 cluster to v3.2][v32_upgrade]
|
||||
- [TLS][security]
|
||||
- [Role-based access control][authentication]
|
||||
|
||||
### Maintenance and troubleshooting
|
||||
|
||||
- [Frequently asked questions][common questions]
|
||||
- [Monitoring][monitoring]
|
||||
- [Maintenance][maintenance]
|
||||
- [Failure modes][failures]
|
||||
- [Disaster recovery][recovery]
|
||||
- [Upgrading][upgrading]
|
||||
|
||||
## Learning
|
||||
|
||||
@ -106,8 +113,6 @@ Answers to [common questions] about etcd.
|
||||
[freebsd_platform]: platforms/freebsd.md
|
||||
[aws_platform]: platforms/aws.md
|
||||
[experimental]: dev-guide/experimental_apis.md
|
||||
[v3_upgrade]: upgrades/upgrade_3_0.md
|
||||
[v31_upgrade]: upgrades/upgrade_3_1.md
|
||||
[v32_upgrade]: upgrades/upgrade_3_2.md
|
||||
[authentication]: op-guide/authentication.md
|
||||
[auth_design]: learning/auth_design.md
|
||||
[upgrading]: upgrades/upgrading-etcd.md
|
||||
|
@ -8,11 +8,11 @@
|
||||
|
||||
### Configuration
|
||||
|
||||
#### What is the difference between advertise-urls and listen-urls?
|
||||
#### What is the difference between listen-<client,peer>-urls, advertise-client-urls or initial-advertise-peer-urls?
|
||||
|
||||
`listen-urls` specifies the local addresses etcd server binds to for accepting incoming connections. To listen on a port for all interfaces, specify `0.0.0.0` as the listen IP address.
|
||||
`listen-client-urls` and `listen-peer-urls` specify the local addresses etcd server binds to for accepting incoming connections. To listen on a port for all interfaces, specify `0.0.0.0` as the listen IP address.
|
||||
|
||||
`advertise-urls` specifies the addresses etcd clients or other etcd members should use to contact the etcd server. The advertise addresses must be reachable from the remote machines. Do not advertise addresses like `localhost` or `0.0.0.0` for a production setup since these addresses are unreachable from remote machines.
|
||||
`advertise-client-urls` and `initial-advertise-peer-urls` specify the addresses etcd clients or other etcd members should use to contact the etcd server. The advertise addresses must be reachable from the remote machines. Do not advertise addresses like `localhost` or `0.0.0.0` for a production setup since these addresses are unreachable from remote machines.
|
||||
|
||||
### Deployment
|
||||
|
||||
@ -78,10 +78,26 @@ On the other hand, if the downed member is removed from cluster membership first
|
||||
|
||||
etcd sets `strict-reconfig-check` in order to reject reconfiguration requests that would cause quorum loss. Abandoning quorum is really risky (especially when the cluster is already unhealthy). Although it may be tempting to disable quorum checking if there's quorum loss to add a new member, this could lead to full fledged cluster inconsistency. For many applications, this will make the problem even worse ("disk geometry corruption" being a candidate for most terrifying).
|
||||
|
||||
### Why does etcd lose its leader from disk latency spikes?
|
||||
#### Why does etcd lose its leader from disk latency spikes?
|
||||
|
||||
This is intentional; disk latency is part of leader liveness. Suppose the cluster leader takes a minute to fsync a raft log update to disk, but the etcd cluster has a one second election timeout. Even though the leader can process network messages within the election interval (e.g., send heartbeats), it's effectively unavailable because it can't commit any new proposals; it's waiting on the slow disk. If the cluster frequently loses its leader due to disk latencies, try [tuning][tuning] the disk settings or etcd time parameters.
|
||||
|
||||
#### What does the etcd warning "request ignored (cluster ID mismatch)" mean?
|
||||
|
||||
Every new etcd cluster generates a new cluster ID based on the initial cluster configuration and a user-provided unique `initial-cluster-token` value. By having unique cluster ID's, etcd is protected from cross-cluster interaction which could corrupt the cluster.
|
||||
|
||||
Usually this warning happens after tearing down an old cluster, then reusing some of the peer addresses for the new cluster. If any etcd process from the old cluster is still running it will try to contact the new cluster. The new cluster will recognize a cluster ID mismatch, then ignore the request and emit this warning. This warning is often cleared by ensuring peer addresses among distinct clusters are disjoint.
|
||||
|
||||
#### What does "mvcc: database space exceeded" mean and how do I fix it?
|
||||
|
||||
The [multi-version concurrency control][api-mvcc] data model in etcd keeps an exact history of the keyspace. Without periodically compacting this history (e.g., by setting `--auto-compaction`), etcd will eventually exhaust its storage space. If etcd runs low on storage space, it raises a space quota alarm to protect the cluster from further writes. So long as the alarm is raised, etcd responds to write requests with the error `mvcc: database space exceeded`.
|
||||
|
||||
To recover from the low space quota alarm:
|
||||
|
||||
1. [Compact][maintenance-compact] etcd's history.
|
||||
2. [Defragment][maintenance-defragment] every etcd endpoint.
|
||||
3. [Disarm][maintenance-disarm] the alarm.
|
||||
|
||||
### Performance
|
||||
|
||||
#### How should I benchmark etcd?
|
||||
@ -91,7 +107,7 @@ Try the [benchmark] tool. Current [benchmark results][benchmark-result] are avai
|
||||
#### What does the etcd warning "apply entries took too long" mean?
|
||||
|
||||
After a majority of etcd members agree to commit a request, each etcd server applies the request to its data store and persists the result to disk. Even with a slow mechanical disk or a virtualized network disk, such as Amazon’s EBS or Google’s PD, applying a request should normally take fewer than 50 milliseconds. If the average apply duration exceeds 100 milliseconds, etcd will warn that entries are taking too long to apply.
|
||||
|
||||
|
||||
Usually this issue is caused by a slow disk. The disk could be experiencing contention among etcd and other applications, or the disk is too simply slow (e.g., a shared virtualized disk). To rule out a slow disk from causing this warning, monitor [backend_commit_duration_seconds][backend_commit_metrics] (p99 duration should be less than 25ms) to confirm the disk is reasonably fast. If the disk is too slow, assigning a dedicated disk to etcd or using faster disk will typically solve the problem.
|
||||
|
||||
The second most common cause is CPU starvation. If monitoring of the machine’s CPU usage shows heavy utilization, there may not be enough compute capacity for etcd. Moving etcd to dedicated machine, increasing process resource isolation cgroups, or renicing the etcd server process into a higher priority can usually solve the problem.
|
||||
@ -112,12 +128,6 @@ A slow network can also cause this issue. If network metrics among the etcd mach
|
||||
|
||||
If none of the above suggestions clear the warnings, please [open an issue][new_issue] with detailed logging, monitoring, metrics and optionally workload information.
|
||||
|
||||
#### What does the etcd warning "request ignored (cluster ID mismatch)" mean?
|
||||
|
||||
Every new etcd cluster generates a new cluster ID based on the initial cluster configuration and a user-provided unique `initial-cluster-token` value. By having unique cluster ID's, etcd is protected from cross-cluster interaction which could corrupt the cluster.
|
||||
|
||||
Usually this warning happens after tearing down an old cluster, then reusing some of the peer addresses for the new cluster. If any etcd process from the old cluster is still running it will try to contact the new cluster. The new cluster will recognize a cluster ID mismatch, then ignore the request and emit this warning. This warning is often cleared by ensuring peer addresses among distinct clusters are disjoint.
|
||||
|
||||
#### What does the etcd warning "snapshotting is taking more than x seconds to finish ..." mean?
|
||||
|
||||
etcd sends a snapshot of its complete key-value store to refresh slow followers and for [backups][backup]. Slow snapshot transfer times increase MTTR; if the cluster is ingesting data with high throughput, slow followers may livelock by needing a new snapshot before finishing receiving a snapshot. To catch slow snapshot performance, etcd warns when sending a snapshot takes more than thirty seconds and exceeds the expected transfer time for a 1Gbps connection.
|
||||
@ -135,3 +145,7 @@ etcd sends a snapshot of its complete key-value store to refresh slow followers
|
||||
[runtime reconfiguration]: https://github.com/coreos/etcd/blob/master/Documentation/op-guide/runtime-configuration.md
|
||||
[benchmark]: https://github.com/coreos/etcd/tree/master/tools/benchmark
|
||||
[benchmark-result]: https://github.com/coreos/etcd/blob/master/Documentation/op-guide/performance.md
|
||||
[api-mvcc]: learning/api.md#revisions
|
||||
[maintenance-compact]: op-guide/maintenance.md#history-compaction
|
||||
[maintenance-defragment]: op-guide/maintenance.md#defragmentation
|
||||
[maintenance-disarm]: ../etcdctl/README.md#alarm-disarm
|
||||
|
@ -449,7 +449,7 @@ message LeaseRevokeRequest {
|
||||
|
||||
### Keep alives
|
||||
|
||||
Leases are refreshed using a bi-directional stream created with the `LeaseKeepAlive` API call. When the client wishes to refresh a lease, it sends a `LeaseGrantRequest` over the stream:
|
||||
Leases are refreshed using a bi-directional stream created with the `LeaseKeepAlive` API call. When the client wishes to refresh a lease, it sends a `LeaseKeepAliveRequest` over the stream:
|
||||
|
||||
```protobuf
|
||||
message LeaseKeepAliveRequest {
|
||||
|
@ -60,7 +60,7 @@ For avoiding such a situation, the API layer performs *version number validation
|
||||
|
||||
After authenticating with `Authenticate()`, a client can create a gRPC connection as it would without auth. In addition to the existing initialization process, the client must associate the token with the newly created connection. `grpc.WithPerRPCCredentials()` provides the functionality for this purpose.
|
||||
|
||||
Every authenticated request from the client has a token. The token can be obtained with `grpc.metadata.FromContext()` in the server side. The server can obtain who is issuing the request and when the user was authorized. The information will be filled by the API layer in the header (`etcdserverpb.RequestHeader.Username` and `etcdserverpb.RequestHeader.AuthRevision`) of a raft log entry (`etcdserverpb.InternalRaftRequest`).
|
||||
Every authenticated request from the client has a token. The token can be obtained with `grpc.metadata.FromIncomingContext()` in the server side. The server can obtain who is issuing the request and when the user was authorized. The information will be filled by the API layer in the header (`etcdserverpb.RequestHeader.Username` and `etcdserverpb.RequestHeader.AuthRevision`) of a raft log entry (`etcdserverpb.InternalRaftRequest`).
|
||||
|
||||
### Checking permission in the state machine
|
||||
|
||||
|
@ -1,17 +1,17 @@
|
||||
# Why etcd
|
||||
# etcd versus other key-value stores
|
||||
|
||||
The name "etcd" originated from two ideas, the unix "/etc" folder and "d"istibuted systems. The "/etc" folder is a place to store configuration data for a single system whereas etcd stores configuration information for large scale distributed systems. Hence, a "d"istributed "/etc" is "etcd".
|
||||
|
||||
etcd stores metadata in a consistent and fault-tolerant way. Distributed systems use etcd as a consistent key-value store for configuration management, service discovery, and coordinating distributed work. Common distributed patterns using etcd include [leader election][etcd-etcdctl-elect], [distributed locks][etcd-etcdctl-lock], and monitoring machine liveness.
|
||||
etcd is designed as a general substrate for large scale distributed systems. These are systems that will never tolerate split-brain operation and are willing to sacrifice availability to achieve this end. etcd stores metadata in a consistent and fault-tolerant way. An etcd cluster is meant to provide key-value storage with best of class stability, reliability, scalability and performance.
|
||||
|
||||
Distributed systems use etcd as a consistent key-value store for configuration management, service discovery, and coordinating distributed work. Many [organizations][production-users] use etcd to implement production systems such as container schedulers, service discovery services, and distributed data storage. Common distributed patterns using etcd include [leader election][etcd-etcdctl-elect], [distributed locks][etcd-etcdctl-lock], and monitoring machine liveness.
|
||||
|
||||
## Use cases
|
||||
|
||||
- Container Linux by CoreOS: Application running on [Container Linux][container-linux] gets automatic, zero-downtime Linux kernel updates. Container Linux uses [locksmith] to coordinate updates. locksmith implements a distributed semaphore over etcd to ensure only a subset of a cluster is rebooting at any given time.
|
||||
- Container Linux by CoreOS: Applications running on [Container Linux][container-linux] get automatic, zero-downtime Linux kernel updates. Container Linux uses [locksmith] to coordinate updates. Locksmith implements a distributed semaphore over etcd to ensure only a subset of a cluster is rebooting at any given time.
|
||||
- [Kubernetes][kubernetes] stores configuration data into etcd for service discovery and cluster management; etcd's consistency is crucial for correctly scheduling and operating services. The Kubernetes API server persists cluster state into etcd. It uses etcd's watch API to monitor the cluster and roll out critical configuration changes.
|
||||
|
||||
## etcd versus other key-value stores
|
||||
|
||||
When deciding whether to use etcd as a key-value store, it’s worth keeping in mind etcd’s main goal. Namely, etcd is designed as a general substrate for large scale distributed systems. These are systems that will never tolerate split-brain operation and are willing to sacrifice availability to achieve this end. An etcd cluster is meant to provide consistent key-value storage with best of class stability, reliability, scalability and performance. The upshot of this focus is many [organizations][production-users] already use etcd to implement production systems such as container schedulers, service discovery services, distributed data storage, and more.
|
||||
## Comparison chart
|
||||
|
||||
Perhaps etcd already seems like a good fit, but as with all technological decisions, proceed with caution. Please note this documentation is written by the etcd team. Although the ideal is a disinterested comparison of technology and features, the authors’ expertise and biases obviously favor etcd. Use only as directed.
|
||||
|
||||
@ -84,7 +84,7 @@ For distributed coordination, choosing etcd can help prevent operational headach
|
||||
[tidb]: https://github.com/pingcap/tidb
|
||||
[etcd-v3lock]: https://godoc.org/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb
|
||||
[etcd-v3election]: https://godoc.org/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb
|
||||
[etcd-etcdctl-lock]: ../../etcdctl/README.md#lock-lockname
|
||||
[etcd-etcdctl-lock]: ../../etcdctl/README.md#lock-lockname-command-arg1-arg2-
|
||||
[etcd-etcdctl-elect]: ../../etcdctl/README.md#elect-options-election-name-proposal
|
||||
[etcd-mvcc]: data_model.md
|
||||
[etcd-recipe]: https://godoc.org/github.com/coreos/etcd/contrib/recipes
|
||||
@ -113,4 +113,3 @@ For distributed coordination, choosing etcd can help prevent operational headach
|
||||
[container-linux]: https://coreos.com/why
|
||||
[locksmith]: https://github.com/coreos/locksmith
|
||||
[kubernetes]: http://kubernetes.io/docs/whatisk8s
|
||||
|
||||
|
@ -79,14 +79,16 @@ export NODE1=192.168.1.21
|
||||
Run the latest version of etcd:
|
||||
|
||||
```
|
||||
docker run --net=host \
|
||||
--volume=${DATA_DIR}:/etcd-data \
|
||||
--name etcd quay.io/coreos/etcd:latest \
|
||||
/usr/local/bin/etcd \
|
||||
--data-dir=/etcd-data --name node1 \
|
||||
--initial-advertise-peer-urls http://${NODE1}:2380 --listen-peer-urls http://${NODE1}:2380 \
|
||||
--advertise-client-urls http://${NODE1}:2379 --listen-client-urls http://${NODE1}:2379 \
|
||||
--initial-cluster node1=http://${NODE1}:2380
|
||||
docker run \
|
||||
-p 2379:2379 \
|
||||
-p 2380:2380 \
|
||||
--volume=${DATA_DIR}:/etcd-data \
|
||||
--name etcd quay.io/coreos/etcd:latest \
|
||||
/usr/local/bin/etcd \
|
||||
--data-dir=/etcd-data --name node1 \
|
||||
--initial-advertise-peer-urls http://${NODE1}:2380 --listen-peer-urls http://${NODE1}:2380 \
|
||||
--advertise-client-urls http://${NODE1}:2379 --listen-client-urls http://${NODE1}:2379 \
|
||||
--initial-cluster node1=http://${NODE1}:2380
|
||||
```
|
||||
|
||||
List the cluster member:
|
||||
@ -114,41 +116,47 @@ DATA_DIR=/var/lib/etcd
|
||||
# For node 1
|
||||
THIS_NAME=${NAME_1}
|
||||
THIS_IP=${HOST_1}
|
||||
docker run --net=host \
|
||||
--volume=${DATA_DIR}:/etcd-data \
|
||||
--name etcd quay.io/coreos/etcd:${ETCD_VERSION} \
|
||||
/usr/local/bin/etcd \
|
||||
--data-dir=/etcd-data --name ${THIS_NAME} \
|
||||
--initial-advertise-peer-urls http://${THIS_IP}:2380 --listen-peer-urls http://${THIS_IP}:2380 \
|
||||
--advertise-client-urls http://${THIS_IP}:2379 --listen-client-urls http://${THIS_IP}:2379 \
|
||||
--initial-cluster ${CLUSTER} \
|
||||
--initial-cluster-state ${CLUSTER_STATE} --initial-cluster-token ${TOKEN}
|
||||
docker run \
|
||||
-p 2379:2379 \
|
||||
-p 2380:2380 \
|
||||
--volume=${DATA_DIR}:/etcd-data \
|
||||
--name etcd quay.io/coreos/etcd:${ETCD_VERSION} \
|
||||
/usr/local/bin/etcd \
|
||||
--data-dir=/etcd-data --name ${THIS_NAME} \
|
||||
--initial-advertise-peer-urls http://${THIS_IP}:2380 --listen-peer-urls http://${THIS_IP}:2380 \
|
||||
--advertise-client-urls http://${THIS_IP}:2379 --listen-client-urls http://${THIS_IP}:2379 \
|
||||
--initial-cluster ${CLUSTER} \
|
||||
--initial-cluster-state ${CLUSTER_STATE} --initial-cluster-token ${TOKEN}
|
||||
|
||||
# For node 2
|
||||
THIS_NAME=${NAME_2}
|
||||
THIS_IP=${HOST_2}
|
||||
docker run --net=host \
|
||||
--volume=${DATA_DIR}:/etcd-data \
|
||||
--name etcd quay.io/coreos/etcd:${ETCD_VERSION} \
|
||||
/usr/local/bin/etcd \
|
||||
--data-dir=/etcd-data --name ${THIS_NAME} \
|
||||
--initial-advertise-peer-urls http://${THIS_IP}:2380 --listen-peer-urls http://${THIS_IP}:2380 \
|
||||
--advertise-client-urls http://${THIS_IP}:2379 --listen-client-urls http://${THIS_IP}:2379 \
|
||||
--initial-cluster ${CLUSTER} \
|
||||
--initial-cluster-state ${CLUSTER_STATE} --initial-cluster-token ${TOKEN}
|
||||
docker run \
|
||||
-p 2379:2379 \
|
||||
-p 2380:2380 \
|
||||
--volume=${DATA_DIR}:/etcd-data \
|
||||
--name etcd quay.io/coreos/etcd:${ETCD_VERSION} \
|
||||
/usr/local/bin/etcd \
|
||||
--data-dir=/etcd-data --name ${THIS_NAME} \
|
||||
--initial-advertise-peer-urls http://${THIS_IP}:2380 --listen-peer-urls http://${THIS_IP}:2380 \
|
||||
--advertise-client-urls http://${THIS_IP}:2379 --listen-client-urls http://${THIS_IP}:2379 \
|
||||
--initial-cluster ${CLUSTER} \
|
||||
--initial-cluster-state ${CLUSTER_STATE} --initial-cluster-token ${TOKEN}
|
||||
|
||||
# For node 3
|
||||
THIS_NAME=${NAME_3}
|
||||
THIS_IP=${HOST_3}
|
||||
docker run --net=host \
|
||||
--volume=${DATA_DIR}:/etcd-data \
|
||||
--name etcd quay.io/coreos/etcd:${ETCD_VERSION} \
|
||||
/usr/local/bin/etcd \
|
||||
--data-dir=/etcd-data --name ${THIS_NAME} \
|
||||
--initial-advertise-peer-urls http://${THIS_IP}:2380 --listen-peer-urls http://${THIS_IP}:2380 \
|
||||
--advertise-client-urls http://${THIS_IP}:2379 --listen-client-urls http://${THIS_IP}:2379 \
|
||||
--initial-cluster ${CLUSTER} \
|
||||
--initial-cluster-state ${CLUSTER_STATE} --initial-cluster-token ${TOKEN}
|
||||
docker run \
|
||||
-p 2379:2379 \
|
||||
-p 2380:2380 \
|
||||
--volume=${DATA_DIR}:/etcd-data \
|
||||
--name etcd quay.io/coreos/etcd:${ETCD_VERSION} \
|
||||
/usr/local/bin/etcd \
|
||||
--data-dir=/etcd-data --name ${THIS_NAME} \
|
||||
--initial-advertise-peer-urls http://${THIS_IP}:2380 --listen-peer-urls http://${THIS_IP}:2380 \
|
||||
--advertise-client-urls http://${THIS_IP}:2379 --listen-client-urls http://${THIS_IP}:2379 \
|
||||
--initial-cluster ${CLUSTER} \
|
||||
--initial-cluster-state ${CLUSTER_STATE} --initial-cluster-token ${TOKEN}
|
||||
```
|
||||
|
||||
To run `etcdctl` using API version 3:
|
||||
@ -170,17 +178,19 @@ rkt run \
|
||||
--volume etcd-ssl-certs-bundle,kind=host,source=/etc/ssl/certs/ca-certificates.crt \
|
||||
--mount volume=etcd-ssl-certs-bundle,target=/etc/ssl/certs/ca-certificates.crt \
|
||||
quay.io/coreos/etcd:latest -- --name my-name \
|
||||
--initial-advertise-peer-urls http://localhost:2380 --listen-peer-urls http://localhost:2380 \
|
||||
--advertise-client-urls http://localhost:2379 --listen-client-urls http://localhost:2379 \
|
||||
--discovery https://discovery.etcd.io/c11fbcdc16972e45253491a24fcf45e1
|
||||
--initial-advertise-peer-urls http://localhost:2380 --listen-peer-urls http://localhost:2380 \
|
||||
--advertise-client-urls http://localhost:2379 --listen-client-urls http://localhost:2379 \
|
||||
--discovery https://discovery.etcd.io/c11fbcdc16972e45253491a24fcf45e1
|
||||
```
|
||||
|
||||
```
|
||||
docker run \
|
||||
--volume=/etc/ssl/certs/ca-certificates.crt:/etc/ssl/certs/ca-certificates.crt \
|
||||
quay.io/coreos/etcd:latest \
|
||||
/usr/local/bin/etcd --name my-name \
|
||||
--initial-advertise-peer-urls http://localhost:2380 --listen-peer-urls http://localhost:2380 \
|
||||
--advertise-client-urls http://localhost:2379 --listen-client-urls http://localhost:2379 \
|
||||
--discovery https://discovery.etcd.io/86a9ff6c8cb8b4c4544c1a2f88f8b801
|
||||
-p 2379:2379 \
|
||||
-p 2380:2380 \
|
||||
--volume=/etc/ssl/certs/ca-certificates.crt:/etc/ssl/certs/ca-certificates.crt \
|
||||
quay.io/coreos/etcd:latest \
|
||||
/usr/local/bin/etcd --name my-name \
|
||||
--initial-advertise-peer-urls http://localhost:2380 --listen-peer-urls http://localhost:2380 \
|
||||
--advertise-client-urls http://localhost:2379 --listen-client-urls http://localhost:2379 \
|
||||
--discovery https://discovery.etcd.io/86a9ff6c8cb8b4c4544c1a2f88f8b801
|
||||
```
|
||||
|
@ -10,8 +10,7 @@ The gateway supports multiple etcd server endpoints and works on a simple round-
|
||||
|
||||
Every application that accesses etcd must first have the address of an etcd cluster client endpoint. If multiple applications on the same server access the same etcd cluster, every application still needs to know the advertised client endpoints of the etcd cluster. If the etcd cluster is reconfigured to have different endpoints, every application may also need to update its endpoint list. This wide-scale reconfiguration is both tedious and error prone.
|
||||
|
||||
etcd gateway solves this problem by serving as a stable local endpoint. A typical etcd gateway configuration has
|
||||
each machine running a gateway listening on a local address and every etcd application connecting to its local gateway. The upshot is only the gateway needs to update its endpoints instead of updating each and every application.
|
||||
etcd gateway solves this problem by serving as a stable local endpoint. A typical etcd gateway configuration has each machine running a gateway listening on a local address and every etcd application connecting to its local gateway. The upshot is only the gateway needs to update its endpoints instead of updating each and every application.
|
||||
|
||||
In summary, to automatically propagate cluster endpoint changes, the etcd gateway runs on every machine serving multiple applications accessing the same etcd cluster.
|
||||
|
||||
@ -64,3 +63,43 @@ Start the etcd gateway to fetch the endpoints from the DNS SRV entries with the
|
||||
$ etcd gateway --discovery-srv=example.com
|
||||
2016-08-16 11:21:18.867350 I | tcpproxy: ready to proxy client requests to [...]
|
||||
```
|
||||
|
||||
## Configuration flags
|
||||
|
||||
### etcd cluster
|
||||
|
||||
#### --endpoints
|
||||
|
||||
* Comma-separated list of etcd server targets for forwarding client connections.
|
||||
* Default: `127.0.0.1:2379`
|
||||
* Invalid example: `https://127.0.0.1:2379` (gateway does not terminate TLS)
|
||||
|
||||
#### --discovery-srv
|
||||
|
||||
* DNS domain used to bootstrap cluster endpoints through SRV recrods.
|
||||
* Default: (not set)
|
||||
|
||||
### Network
|
||||
|
||||
#### --listen-addr
|
||||
|
||||
* Interface and port to bind for accepting client requests.
|
||||
* Default: `127.0.0.1:23790`
|
||||
|
||||
#### --retry-delay
|
||||
|
||||
* Duration of delay before retrying to connect to failed endpoints.
|
||||
* Default: 1m0s
|
||||
* Invalid example: "123" (expects time unit in format)
|
||||
|
||||
### Security
|
||||
|
||||
#### --insecure-discovery
|
||||
|
||||
* Accept SRV records that are insecure or susceptible to man-in-the-middle attacks.
|
||||
* Default: `false`
|
||||
|
||||
#### --trusted-ca-file
|
||||
|
||||
* Path to the client TLS CA file for the etcd cluster. Used to authenticate endpoints.
|
||||
* Default: (not set)
|
||||
|
@ -114,18 +114,21 @@
|
||||
"span": 5,
|
||||
"stack": false,
|
||||
"steppedLine": false,
|
||||
"targets": [{
|
||||
"expr": "sum(rate({grpc_type=\"unary\",grpc_code!=\"OK\"} [1m]))",
|
||||
"targets": [
|
||||
{
|
||||
"expr": "sum(rate(grpc_server_started_total{grpc_type=\"unary\"}[5m]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 2,
|
||||
"legendFormat": "{{instance}} RPC Rate",
|
||||
"legendFormat": "RPC Rate",
|
||||
"metric": "grpc_server_started_total",
|
||||
"refId": "A",
|
||||
"step": 2
|
||||
},
|
||||
{
|
||||
"expr": "sum(rate(grpc_server_started_total{grpc_type=\"unary\",grpc_code!=\"OK\"} [1m])) - sum(rate(grpc_server_handled_total{grpc_type=\"unary\"} [1m]))",
|
||||
"expr": "sum(rate(grpc_server_handled_total{grpc_type=\"unary\",grpc_code!=\"OK\"}[5m]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 2,
|
||||
"legendFormat": "{{instance}} RPC Failed Rate",
|
||||
"legendFormat": "RPC Failed Rate",
|
||||
"metric": "grpc_server_handled_total",
|
||||
"refId": "B",
|
||||
"step": 2
|
||||
@ -197,7 +200,7 @@
|
||||
"stack": true,
|
||||
"steppedLine": false,
|
||||
"targets": [{
|
||||
"expr": "sum(grpc_server_started_total {grpc_service=\"etcdserverpb.Watch\",grpc_type=\"bidi_stream\",grpc_code!=\"OK\"}) - sum(grpc_server_handled_total {grpc_service=\"etcdserverpb.Watch\",grpc_type=\"bidi_stream\"})",
|
||||
"expr": "sum(grpc_server_started_total{grpc_service=\"etcdserverpb.Watch\",grpc_type=\"bidi_stream\"}) - sum(grpc_server_handled_total{grpc_service=\"etcdserverpb.Watch\",grpc_type=\"bidi_stream\"})",
|
||||
"intervalFactor": 2,
|
||||
"legendFormat": "Watch Streams",
|
||||
"metric": "grpc_server_handled_total",
|
||||
@ -205,7 +208,7 @@
|
||||
"step": 4
|
||||
},
|
||||
{
|
||||
"expr": "sum(grpc_server_started_total {grpc_service=\"etcdserverpb.Lease\",grpc_type=\"bidi_stream\"}) - sum(grpc_server_handled_total {grpc_service=\"etcdserverpb.Lease\",grpc_type=\"bidi_stream\"})",
|
||||
"expr": "sum(grpc_server_started_total{grpc_service=\"etcdserverpb.Lease\",grpc_type=\"bidi_stream\"}) - sum(grpc_server_handled_total{grpc_service=\"etcdserverpb.Lease\",grpc_type=\"bidi_stream\"})",
|
||||
"intervalFactor": 2,
|
||||
"legendFormat": "Lease Streams",
|
||||
"metric": "grpc_server_handled_total",
|
||||
@ -361,7 +364,7 @@
|
||||
"stack": false,
|
||||
"steppedLine": true,
|
||||
"targets": [{
|
||||
"expr": "histogram_quantile(0.99, sum(rate(etcd_disk_wal_fsync_duration_seconds_bucket [5m])) by (instance, le))",
|
||||
"expr": "histogram_quantile(0.99, sum(rate(etcd_disk_wal_fsync_duration_seconds_bucket[5m])) by (instance, le))",
|
||||
"hide": false,
|
||||
"intervalFactor": 2,
|
||||
"legendFormat": "{{instance}} WAL fsync",
|
||||
@ -370,7 +373,7 @@
|
||||
"step": 4
|
||||
},
|
||||
{
|
||||
"expr": "histogram_quantile(0.99, sum(rate(etcd_disk_backend_commit_duration_seconds_bucket [5m])) by (instance, le))",
|
||||
"expr": "histogram_quantile(0.99, sum(rate(etcd_disk_backend_commit_duration_seconds_bucket[5m])) by (instance, le))",
|
||||
"intervalFactor": 2,
|
||||
"legendFormat": "{{instance}} DB fsync",
|
||||
"metric": "etcd_disk_backend_commit_duration_seconds_bucket",
|
||||
@ -522,7 +525,7 @@
|
||||
"stack": true,
|
||||
"steppedLine": false,
|
||||
"targets": [{
|
||||
"expr": "rate(etcd_network_client_grpc_received_bytes_total [1m])",
|
||||
"expr": "rate(etcd_network_client_grpc_received_bytes_total[5m])",
|
||||
"intervalFactor": 2,
|
||||
"legendFormat": "{{instance}} Client Traffic In",
|
||||
"metric": "etcd_network_client_grpc_received_bytes_total",
|
||||
@ -595,7 +598,7 @@
|
||||
"stack": true,
|
||||
"steppedLine": false,
|
||||
"targets": [{
|
||||
"expr": "rate(etcd_network_client_grpc_sent_bytes_total [1m])",
|
||||
"expr": "rate(etcd_network_client_grpc_sent_bytes_total[5m])",
|
||||
"intervalFactor": 2,
|
||||
"legendFormat": "{{instance}} Client Traffic Out",
|
||||
"metric": "etcd_network_client_grpc_sent_bytes_total",
|
||||
@ -668,7 +671,7 @@
|
||||
"stack": false,
|
||||
"steppedLine": false,
|
||||
"targets": [{
|
||||
"expr": "sum(rate(etcd_network_peer_received_bytes_total [1m])) by (instance)",
|
||||
"expr": "sum(rate(etcd_network_peer_received_bytes_total[5m])) by (instance)",
|
||||
"intervalFactor": 2,
|
||||
"legendFormat": "{{instance}} Peer Traffic In",
|
||||
"metric": "etcd_network_peer_received_bytes_total",
|
||||
@ -742,7 +745,7 @@
|
||||
"stack": false,
|
||||
"steppedLine": false,
|
||||
"targets": [{
|
||||
"expr": "sum(rate(etcd_network_peer_sent_bytes_total [1m])) by (instance)",
|
||||
"expr": "sum(rate(etcd_network_peer_sent_bytes_total[5m])) by (instance)",
|
||||
"hide": false,
|
||||
"interval": "",
|
||||
"intervalFactor": 2,
|
||||
@ -822,7 +825,7 @@
|
||||
"stack": false,
|
||||
"steppedLine": false,
|
||||
"targets": [{
|
||||
"expr": "sum(rate(etcd_server_proposals_failed_total [1m]))",
|
||||
"expr": "sum(rate(etcd_server_proposals_failed_total[5m]))",
|
||||
"intervalFactor": 2,
|
||||
"legendFormat": "Proposal Failure Rate",
|
||||
"metric": "etcd_server_proposals_failed_total",
|
||||
@ -838,7 +841,7 @@
|
||||
"step": 2
|
||||
},
|
||||
{
|
||||
"expr": "sum(rate(etcd_server_proposals_committed_total [1m]))",
|
||||
"expr": "sum(rate(etcd_server_proposals_committed_total[5m]))",
|
||||
"intervalFactor": 2,
|
||||
"legendFormat": "Proposal Commit Rate",
|
||||
"metric": "etcd_server_proposals_committed_total",
|
||||
@ -846,7 +849,7 @@
|
||||
"step": 2
|
||||
},
|
||||
{
|
||||
"expr": "sum(rate(etcd_server_proposals_applied_total [1m]))",
|
||||
"expr": "sum(rate(etcd_server_proposals_applied_total[5m]))",
|
||||
"intervalFactor": 2,
|
||||
"legendFormat": "Proposal Apply Rate",
|
||||
"refId": "D",
|
||||
@ -922,9 +925,9 @@
|
||||
"stack": false,
|
||||
"steppedLine": false,
|
||||
"targets": [{
|
||||
"expr": "etcd_server_leader_changes_seen_total",
|
||||
"expr": "changes(etcd_server_leader_changes_seen_total[1d])",
|
||||
"intervalFactor": 2,
|
||||
"legendFormat": "{{instance}} Leader Change Seen",
|
||||
"legendFormat": "{{instance}} Total Leader Elections Per Day",
|
||||
"metric": "etcd_server_leader_changes_seen_total",
|
||||
"refId": "A",
|
||||
"step": 2
|
||||
@ -932,7 +935,7 @@
|
||||
"thresholds": [],
|
||||
"timeFrom": null,
|
||||
"timeShift": null,
|
||||
"title": "Rate Leader Elections",
|
||||
"title": "Total Leader Elections Per Day",
|
||||
"tooltip": {
|
||||
"msResolution": false,
|
||||
"shared": true,
|
||||
@ -1009,4 +1012,4 @@
|
||||
"version": 215,
|
||||
"links": [],
|
||||
"gnetId": null
|
||||
}
|
||||
}
|
||||
|
@ -1,6 +1,49 @@
|
||||
# Monitoring etcd
|
||||
|
||||
Each etcd server exports metrics under the `/metrics` path on its client port.
|
||||
Each etcd server provides local monitoring information on its client port through http endpoints. The monitoring data is useful for both system health checking and cluster debugging.
|
||||
|
||||
## Debug endpoint
|
||||
|
||||
If `--debug` is set, the etcd server exports debugging information on its client port under the `/debug` path. Take care when setting `--debug`, since there will be degraded performance and verbose logging.
|
||||
|
||||
The `/debug/pprof` endpoint is the standard go runtime profiling endpoint. This can be used to profile CPU, heap, mutex, and goroutine utilization. For example, here `go tool pprof` gets the top 10 functions where etcd spends its time:
|
||||
|
||||
```sh
|
||||
$ go tool pprof http://localhost:2379/debug/pprof/profile
|
||||
Fetching profile from http://localhost:2379/debug/pprof/profile
|
||||
Please wait... (30s)
|
||||
Saved profile in /home/etcd/pprof/pprof.etcd.localhost:2379.samples.cpu.001.pb.gz
|
||||
Entering interactive mode (type "help" for commands)
|
||||
(pprof) top10
|
||||
310ms of 480ms total (64.58%)
|
||||
Showing top 10 nodes out of 157 (cum >= 10ms)
|
||||
flat flat% sum% cum cum%
|
||||
130ms 27.08% 27.08% 130ms 27.08% runtime.futex
|
||||
70ms 14.58% 41.67% 70ms 14.58% syscall.Syscall
|
||||
20ms 4.17% 45.83% 20ms 4.17% github.com/coreos/etcd/cmd/vendor/golang.org/x/net/http2/hpack.huffmanDecode
|
||||
20ms 4.17% 50.00% 30ms 6.25% runtime.pcvalue
|
||||
20ms 4.17% 54.17% 50ms 10.42% runtime.schedule
|
||||
10ms 2.08% 56.25% 10ms 2.08% github.com/coreos/etcd/cmd/vendor/github.com/coreos/etcd/etcdserver.(*EtcdServer).AuthInfoFromCtx
|
||||
10ms 2.08% 58.33% 10ms 2.08% github.com/coreos/etcd/cmd/vendor/github.com/coreos/etcd/etcdserver.(*EtcdServer).Lead
|
||||
10ms 2.08% 60.42% 10ms 2.08% github.com/coreos/etcd/cmd/vendor/github.com/coreos/etcd/pkg/wait.(*timeList).Trigger
|
||||
10ms 2.08% 62.50% 10ms 2.08% github.com/coreos/etcd/cmd/vendor/github.com/prometheus/client_golang/prometheus.(*MetricVec).hashLabelValues
|
||||
10ms 2.08% 64.58% 10ms 2.08% github.com/coreos/etcd/cmd/vendor/golang.org/x/net/http2.(*Framer).WriteHeaders
|
||||
```
|
||||
|
||||
The `/debug/requests` endpoint gives gRPC traces and performance statistics through a web browser. For example, here is a `Range` request for the key `abc`:
|
||||
|
||||
```
|
||||
When Elapsed (s)
|
||||
2017/08/18 17:34:51.999317 0.000244 /etcdserverpb.KV/Range
|
||||
17:34:51.999382 . 65 ... RPC: from 127.0.0.1:47204 deadline:4.999377747s
|
||||
17:34:51.999395 . 13 ... recv: key:"abc"
|
||||
17:34:51.999499 . 104 ... OK
|
||||
17:34:51.999535 . 36 ... sent: header:<cluster_id:14841639068965178418 member_id:10276657743932975437 revision:15 raft_term:17 > kvs:<key:"abc" create_revision:6 mod_revision:14 version:9 value:"asda" > count:1
|
||||
```
|
||||
|
||||
## Metrics endpoint
|
||||
|
||||
Each etcd server exports metrics under the `/metrics` path on its client port and optionally on interfaces given by `--listen-metrics-urls`.
|
||||
|
||||
The metrics can be fetched with `curl`:
|
||||
|
||||
@ -75,8 +118,6 @@ Access: proxy
|
||||
|
||||
Then import the default [etcd dashboard template][template] and customize. For instance, if Prometheus data source name is `my-etcd`, the `datasource` field values in JSON also need to be `my-etcd`.
|
||||
|
||||
See the [demo][demo].
|
||||
|
||||
Sample dashboard:
|
||||
|
||||

|
||||
@ -85,4 +126,3 @@ Sample dashboard:
|
||||
[prometheus]: https://prometheus.io/
|
||||
[grafana]: http://grafana.org/
|
||||
[template]: ./grafana.json
|
||||
[demo]: http://dash.etcd.io/dashboard/db/test-etcd
|
||||
|
@ -17,58 +17,54 @@ For some baseline performance numbers, we consider a three member etcd cluster w
|
||||
- Google Cloud Compute Engine
|
||||
- 3 machines of 8 vCPUs + 16GB Memory + 50GB SSD
|
||||
- 1 machine(client) of 16 vCPUs + 30GB Memory + 50GB SSD
|
||||
- Ubuntu 15.10
|
||||
- etcd v3 master branch (commit SHA d8f325d), Go 1.6.2
|
||||
- Ubuntu 17.04
|
||||
- etcd 3.2.0, go 1.8.3
|
||||
|
||||
With this configuration, etcd can approximately write:
|
||||
|
||||
| Number of keys | Key size in bytes | Value size in bytes | Number of connections | Number of clients | Target etcd server | Average write QPS | Average latency per request | Memory |
|
||||
|----------------|-------------------|---------------------|-----------------------|-------------------|--------------------|-------------------|-----------------------------|--------|
|
||||
| 10,000 | 8 | 256 | 1 | 1 | leader only | 525 | 2ms | 35 MB |
|
||||
| 100,000 | 8 | 256 | 100 | 1000 | leader only | 25,000 | 30ms | 35 MB |
|
||||
| 100,000 | 8 | 256 | 100 | 1000 | all members | 33,000 | 25ms | 35 MB |
|
||||
| Number of keys | Key size in bytes | Value size in bytes | Number of connections | Number of clients | Target etcd server | Average write QPS | Average latency per request | Average server RSS |
|
||||
|---------------:|------------------:|--------------------:|----------------------:|------------------:|--------------------|------------------:|----------------------------:|-------------------:|
|
||||
| 10,000 | 8 | 256 | 1 | 1 | leader only | 583 | 1.6ms | 48 MB |
|
||||
| 100,000 | 8 | 256 | 100 | 1000 | leader only | 44,341 | 22ms | 124MB |
|
||||
| 100,000 | 8 | 256 | 100 | 1000 | all members | 50,104 | 20ms | 126MB |
|
||||
|
||||
Sample commands are:
|
||||
|
||||
```
|
||||
# assuming IP_1 is leader, write requests to the leader
|
||||
benchmark --endpoints={IP_1} --conns=1 --clients=1 \
|
||||
```sh
|
||||
# write to leader
|
||||
benchmark --endpoints=${HOST_1} --target-leader --conns=1 --clients=1 \
|
||||
put --key-size=8 --sequential-keys --total=10000 --val-size=256
|
||||
benchmark --endpoints={IP_1} --conns=100 --clients=1000 \
|
||||
benchmark --endpoints=${HOST_1} --target-leader --conns=100 --clients=1000 \
|
||||
put --key-size=8 --sequential-keys --total=100000 --val-size=256
|
||||
|
||||
# write to all members
|
||||
benchmark --endpoints={IP_1},{IP_2},{IP_3} --conns=100 --clients=1000 \
|
||||
benchmark --endpoints=${HOST_1},${HOST_2},${HOST_3} --conns=100 --clients=1000 \
|
||||
put --key-size=8 --sequential-keys --total=100000 --val-size=256
|
||||
```
|
||||
|
||||
Linearizable read requests go through a quorum of cluster members for consensus to fetch the most recent data. Serializable read requests are cheaper than linearizable reads since they are served by any single etcd member, instead of a quorum of members, in exchange for possibly serving stale data. etcd can read:
|
||||
|
||||
| Number of requests | Key size in bytes | Value size in bytes | Number of connections | Number of clients | Consistency | Average latency per request | Average read QPS |
|
||||
|--------------------|-------------------|---------------------|-----------------------|-------------------|-------------|-----------------------------|------------------|
|
||||
| 10,000 | 8 | 256 | 1 | 1 | Linearizable | 2ms | 560 |
|
||||
| 10,000 | 8 | 256 | 1 | 1 | Serializable | 0.4ms | 7,500 |
|
||||
| 100,000 | 8 | 256 | 100 | 1000 | Linearizable | 15ms | 43,000 |
|
||||
| 100,000 | 8 | 256 | 100 | 1000 | Serializable | 9ms | 93,000 |
|
||||
| Number of requests | Key size in bytes | Value size in bytes | Number of connections | Number of clients | Consistency | Average read QPS | Average latency per request |
|
||||
|-------------------:|------------------:|--------------------:|----------------------:|------------------:|-------------|-----------------:|----------------------------:|
|
||||
| 10,000 | 8 | 256 | 1 | 1 | Linearizable | 1,353 | 0.7ms |
|
||||
| 10,000 | 8 | 256 | 1 | 1 | Serializable | 2,909 | 0.3ms |
|
||||
| 100,000 | 8 | 256 | 100 | 1000 | Linearizable | 141,578 | 5.5ms |
|
||||
| 100,000 | 8 | 256 | 100 | 1000 | Serializable | 185,758 | 2.2ms |
|
||||
|
||||
Sample commands are:
|
||||
|
||||
```
|
||||
# Linearizable read requests
|
||||
benchmark --endpoints={IP_1},{IP_2},{IP_3} --conns=1 --clients=1 \
|
||||
```sh
|
||||
# Single connection read requests
|
||||
benchmark --endpoints=${HOST_1},${HOST_2},${HOST_3} --conns=1 --clients=1 \
|
||||
range YOUR_KEY --consistency=l --total=10000
|
||||
benchmark --endpoints={IP_1},{IP_2},{IP_3} --conns=100 --clients=1000 \
|
||||
range YOUR_KEY --consistency=l --total=100000
|
||||
benchmark --endpoints=${HOST_1},${HOST_2},${HOST_3} --conns=1 --clients=1 \
|
||||
range YOUR_KEY --consistency=s --total=10000
|
||||
|
||||
# Serializable read requests for each member and sum up the numbers
|
||||
for endpoint in {IP_1} {IP_2} {IP_3}; do
|
||||
benchmark --endpoints=$endpoint --conns=1 --clients=1 \
|
||||
range YOUR_KEY --consistency=s --total=10000
|
||||
done
|
||||
for endpoint in {IP_1} {IP_2} {IP_3}; do
|
||||
benchmark --endpoints=$endpoint --conns=100 --clients=1000 \
|
||||
range YOUR_KEY --consistency=s --total=100000
|
||||
done
|
||||
# Many concurrent read requests
|
||||
benchmark --endpoints=${HOST_1},${HOST_2},${HOST_3} --conns=100 --clients=1000 \
|
||||
range YOUR_KEY --consistency=l --total=100000
|
||||
benchmark --endpoints=${HOST_1},${HOST_2},${HOST_3} --conns=100 --clients=1000 \
|
||||
range YOUR_KEY --consistency=s --total=100000
|
||||
```
|
||||
|
||||
We encourage running the benchmark test when setting up an etcd cluster for the first time in a new environment to ensure the cluster achieves adequate performance; cluster latency and throughput can be sensitive to minor environment differences.
|
||||
We encourage running the benchmark test when setting up an etcd cluster for the first time in a new environment to ensure the cluster achieves adequate performance; cluster latency and throughput can be sensitive to minor environment differences.
|
||||
|
@ -16,7 +16,7 @@ etcd takes several certificate related configuration options, either through com
|
||||
|
||||
`--key-file=<path>`: Key for the certificate. Must be unencrypted.
|
||||
|
||||
`--client-cert-auth`: When this is set etcd will check all incoming HTTPS requests for a client certificate signed by the trusted CA, requests that don't supply a valid client certificate will fail.
|
||||
`--client-cert-auth`: When this is set etcd will check all incoming HTTPS requests for a client certificate signed by the trusted CA, requests that don't supply a valid client certificate will fail. If [authentication][auth] is enabled, the certificate provides credentials for the user name given by the Common Name field.
|
||||
|
||||
`--trusted-ca-file=<path>`: Trusted certificate authority.
|
||||
|
||||
@ -222,3 +222,4 @@ The certificate needs to be signed for the member's FQDN in its Subject Name, us
|
||||
[tls-setup]: ../../hack/tls-setup
|
||||
[tls-guide]: https://github.com/coreos/docs/blob/master/os/generate-self-signed-certificates.md
|
||||
[alt-name]: http://wiki.cacert.org/FAQ/subjectAltName
|
||||
[auth]: authentication.md
|
||||
|
@ -6,7 +6,7 @@ This guide assumes operational knowledge of Amazon Web Services (AWS), specifica
|
||||
|
||||
As a critical building block for distributed systems it is crucial to perform adequate capacity planning in order to support the intended cluster workload. As a highly available and strongly consistent data store increasing the number of nodes in an etcd cluster will generally affect performance adversely. This makes sense intuitively, as more nodes means more members for the leader to coordinate state across. The most direct way to increase throughput and decrease latency of an etcd cluster is allocate more disk I/O, network I/O, CPU, and memory to cluster members. In the event it is impossible to temporarily divert incoming requests to the cluster, scaling the EC2 instances which comprise the etcd cluster members one at a time may improve performance. It is, however, best to avoid bottlenecks through capacity planning.
|
||||
|
||||
The etcd team has produced a [hardware recommendation guide]( ../op-guide/hardware.md) which is very useful for “ballparking” how many nodes and what instance type are necessary for a cluster.
|
||||
The etcd team has produced a [hardware recommendation guide](../op-guide/hardware.md) which is very useful for “ballparking” how many nodes and what instance type are necessary for a cluster.
|
||||
|
||||
AWS provides a service for creating groups of EC2 instances which are dynamically sized to match load on the instances. Using an Auto Scaling Group ([ASG](http://docs.aws.amazon.com/autoscaling/latest/userguide/AutoScalingGroup.html)) to dynamically scale an etcd cluster is not recommended for several reasons including:
|
||||
|
||||
|
@ -10,7 +10,7 @@ Before [starting an upgrade](#upgrade-procedure), read through the rest of this
|
||||
|
||||
#### Upgrade requirements
|
||||
|
||||
To upgrade an existing etcd deployment to 3.0, the running cluster must be 2.3 or greater. If it's before 2.3, please upgrade to [2.3](https://github.com/coreos/etcd/releases/tag/v2.3.0) before upgrading to 3.0.
|
||||
To upgrade an existing etcd deployment to 3.0, the running cluster must be 2.3 or greater. If it's before 2.3, please upgrade to [2.3](https://github.com/coreos/etcd/releases/tag/v2.3.8) before upgrading to 3.0.
|
||||
|
||||
Also, to ensure a smooth rolling upgrade, the running cluster must be healthy. Check the health of the cluster by using the `etcdctl cluster-health` command before proceeding.
|
||||
|
||||
@ -52,7 +52,7 @@ member 8211f1d0f64f3269 is healthy: got healthy result from http://localhost:123
|
||||
cluster is healthy
|
||||
|
||||
$ curl http://localhost:2379/version
|
||||
{"etcdserver":"2.3.x","etcdcluster":"2.3.0"}
|
||||
{"etcdserver":"2.3.x","etcdcluster":"2.3.8"}
|
||||
```
|
||||
|
||||
#### 2. Stop the existing etcd process
|
||||
|
@ -10,7 +10,7 @@ Before [starting an upgrade](#upgrade-procedure), read through the rest of this
|
||||
|
||||
#### Upgrade requirements
|
||||
|
||||
To upgrade an existing etcd deployment to 3.1, the running cluster must be 3.0 or greater. If it's before 3.0, please upgrade to [3.0](https://github.com/coreos/etcd/releases/tag/v3.0.16) before upgrading to 3.1.
|
||||
To upgrade an existing etcd deployment to 3.1, the running cluster must be 3.0 or greater. If it's before 3.0, please [upgrade to 3.0](upgrade_3_0.md) before upgrading to 3.1.
|
||||
|
||||
Also, to ensure a smooth rolling upgrade, the running cluster must be healthy. Check the health of the cluster by using the `etcdctl endpoint health` command before proceeding.
|
||||
|
||||
|
@ -30,11 +30,27 @@ resp.TTL == -1
|
||||
err == nil
|
||||
```
|
||||
|
||||
`clientv3.NewFromConfigFile` is moved to `yaml.NewConfig`.
|
||||
|
||||
Before
|
||||
|
||||
```go
|
||||
import "github.com/coreos/etcd/clientv3"
|
||||
clientv3.NewFromConfigFile
|
||||
```
|
||||
|
||||
After
|
||||
|
||||
```go
|
||||
import clientv3yaml "github.com/coreos/etcd/clientv3/yaml"
|
||||
clientv3yaml.NewConfig
|
||||
```
|
||||
|
||||
### Server upgrade checklists
|
||||
|
||||
#### Upgrade requirements
|
||||
|
||||
To upgrade an existing etcd deployment to 3.2, the running cluster must be 3.1 or greater. If it's before 3.1, please upgrade to [3.1](https://github.com/coreos/etcd/releases/tag/v3.1.7) before upgrading to 3.2.
|
||||
To upgrade an existing etcd deployment to 3.2, the running cluster must be 3.1 or greater. If it's before 3.1, please [upgrade to 3.1](upgrade_3_1.md) before upgrading to 3.2.
|
||||
|
||||
Also, to ensure a smooth rolling upgrade, the running cluster must be healthy. Check the health of the cluster by using the `etcdctl endpoint health` command before proceeding.
|
||||
|
||||
|
19
Documentation/upgrades/upgrading-etcd.md
Normal file
19
Documentation/upgrades/upgrading-etcd.md
Normal file
@ -0,0 +1,19 @@
|
||||
# Upgrading etcd clusters and applications
|
||||
|
||||
This section contains documents specific to upgrading etcd clusters and applications.
|
||||
|
||||
## Moving from etcd API v2 to API v3
|
||||
* [Migrate applications from using API v2 to API v3][migrate-apps]
|
||||
|
||||
## Upgrading an etcd v3.x cluster
|
||||
* [Upgrade etcd from 3.0 to 3.1][upgrade-3-1]
|
||||
* [Upgrade etcd from 3.1 to 3.2][upgrade-3-2]
|
||||
|
||||
## Upgrading from etcd v2.3
|
||||
* [Upgrade a v2.3 cluster to v3.0][upgrade-cluster]
|
||||
|
||||
|
||||
[migrate-apps]: ../op-guide/v2-migration.md
|
||||
[upgrade-cluster]: upgrade_3_0.md
|
||||
[upgrade-3-1]: upgrade_3_1.md
|
||||
[upgrade-3-2]: upgrade_3_2.md
|
@ -1,3 +1,8 @@
|
||||
**This is the documentation for etcd2 releases. Read [etcd3 doc][v3-docs] for etcd3 releases.**
|
||||
|
||||
[v3-docs]: ../docs.md#documentation
|
||||
|
||||
|
||||
# Snapshot Migration
|
||||
|
||||
You can migrate a snapshot of your data from a v0.4.9+ cluster into a new etcd 2.2 cluster using a snapshot migration. After snapshot migration, the etcd indexes of your data will change. Many etcd applications rely on these indexes to behave correctly. This operation should only be done while all etcd applications are stopped.
|
||||
|
@ -1,165 +1,85 @@
|
||||
# etcd2
|
||||
# Documentation
|
||||
|
||||
[](https://goreportcard.com/report/github.com/coreos/etcd)
|
||||
[](https://travis-ci.org/coreos/etcd)
|
||||
[](https://semaphoreci.com/coreos/etcd)
|
||||
[](https://quay.io/repository/coreos/etcd-git)
|
||||
etcd is a distributed key-value store designed to reliably and quickly preserve and provide access to critical data. It enables reliable distributed coordination through distributed locking, leader elections, and write barriers. An etcd cluster is intended for high availability and permanent data storage and retrieval.
|
||||
|
||||
**Note**: The `master` branch may be in an *unstable or even broken state* during development. Please use [releases][github-release] instead of the `master` branch in order to get stable binaries.
|
||||
This is the etcd v2 documentation set. For more recent versions, please see the [etcd v3 guides][etcd-v3].
|
||||
|
||||

|
||||
## Communicating with etcd v2
|
||||
|
||||
etcd is a distributed, consistent key-value store for shared configuration and service discovery, with a focus on being:
|
||||
Reading and writing into the etcd keyspace is done via a simple, RESTful HTTP API, or using language-specific libraries that wrap the HTTP API with higher level primitives.
|
||||
|
||||
* *Simple*: curl'able user-facing API (HTTP+JSON)
|
||||
* *Secure*: optional SSL client cert authentication
|
||||
* *Fast*: benchmarked 1000s of writes/s per instance
|
||||
* *Reliable*: properly distributed using Raft
|
||||
### Reading and Writing
|
||||
|
||||
etcd is written in Go and uses the [Raft][raft] consensus algorithm to manage a highly-available replicated log.
|
||||
- [Client API Documentation][api]
|
||||
- [Libraries, Tools, and Language Bindings][libraries]
|
||||
- [Admin API Documentation][admin-api]
|
||||
- [Members API][members-api]
|
||||
|
||||
etcd is used [in production by many companies](./production-users.md), and the development team stands behind it in critical deployment scenarios, where etcd is frequently teamed with applications such as [Kubernetes][k8s], [fleet][fleet], [locksmith][locksmith], [vulcand][vulcand], and many others.
|
||||
### Security, Auth, Access control
|
||||
|
||||
See [etcdctl][etcdctl] for a simple command line client.
|
||||
Or feel free to just use `curl`, as in the examples below.
|
||||
- [Security Model][security]
|
||||
- [Auth and Security][auth_api]
|
||||
- [Authentication Guide][authentication]
|
||||
|
||||
[raft]: https://raft.github.io/
|
||||
[k8s]: http://kubernetes.io/
|
||||
[fleet]: https://github.com/coreos/fleet
|
||||
[locksmith]: https://github.com/coreos/locksmith
|
||||
[vulcand]: https://github.com/vulcand/vulcand
|
||||
[etcdctl]: https://github.com/coreos/etcd/tree/master/etcdctl
|
||||
## etcd v2 Cluster Administration
|
||||
|
||||
## Getting Started
|
||||
Configuration values are distributed within the cluster for your applications to read. Values can be changed programmatically and smart applications can reconfigure automatically. You'll never again have to run a configuration management tool on every machine in order to change a single config value.
|
||||
|
||||
### Getting etcd
|
||||
### General Info
|
||||
|
||||
The easiest way to get etcd is to use one of the pre-built release binaries which are available for OSX, Linux, Windows, AppC (ACI), and Docker. Instructions for using these binaries are on the [GitHub releases page][github-release].
|
||||
- [etcd Proxies][proxy]
|
||||
- [Production Users][production-users]
|
||||
- [Admin Guide][admin_guide]
|
||||
- [Configuration Flags][configuration]
|
||||
- [Frequently Asked Questions][faq]
|
||||
|
||||
For those wanting to try the very latest version, you can build the latest version of etcd from the `master` branch.
|
||||
You will first need [*Go*](https://golang.org/) installed on your machine (version 1.5+ is required).
|
||||
All development occurs on `master`, including new features and bug fixes.
|
||||
Bug fixes are first targeted at `master` and subsequently ported to release branches, as described in the [branch management][branch-management] guide.
|
||||
### Initial Setup
|
||||
|
||||
[github-release]: https://github.com/coreos/etcd/releases/
|
||||
[branch-management]: branch_management.md
|
||||
- [Tuning etcd Clusters][tuning]
|
||||
- [Discovery Service Protocol][discovery_protocol]
|
||||
- [Running etcd under Docker][docker_guide]
|
||||
|
||||
### Running etcd
|
||||
### Live Reconfiguration
|
||||
|
||||
First start a single-member cluster of etcd:
|
||||
- [Runtime Configuration][runtime-configuration]
|
||||
|
||||
```sh
|
||||
./bin/etcd
|
||||
```
|
||||
### Debugging etcd
|
||||
|
||||
This will bring up etcd listening on port 2379 for client communication and on port 2380 for server-to-server communication.
|
||||
- [Metrics Collection][metrics]
|
||||
- [Error Code][errorcode]
|
||||
- [Reporting Bugs][reporting_bugs]
|
||||
|
||||
Next, let's set a single key, and then retrieve it:
|
||||
### Migration
|
||||
|
||||
```
|
||||
curl -L http://127.0.0.1:2379/v2/keys/mykey -XPUT -d value="this is awesome"
|
||||
curl -L http://127.0.0.1:2379/v2/keys/mykey
|
||||
```
|
||||
- [Upgrade etcd to 2.3][upgrade_2_3]
|
||||
- [Upgrade etcd to 2.2][upgrade_2_2]
|
||||
- [Upgrade to etcd 2.1][upgrade_2_1]
|
||||
- [Snapshot Migration (0.4.x to 2.x)][04_to_2_snapshot_migration]
|
||||
- [Backward Compatibility][backward_compatibility]
|
||||
|
||||
You have successfully started an etcd and written a key to the store.
|
||||
|
||||
### etcd TCP ports
|
||||
|
||||
The [official etcd ports][iana-ports] are 2379 for client requests, and 2380 for peer communication. To maintain compatibility, some etcd configuration and documentation continues to refer to the legacy ports 4001 and 7001, but all new etcd use and discussion should adopt the IANA-assigned ports. The legacy ports 4001 and 7001 will be fully deprecated, and support for their use removed, in future etcd releases.
|
||||
|
||||
[iana-ports]: http://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.txt
|
||||
|
||||
### Running local etcd cluster
|
||||
|
||||
First install [goreman](https://github.com/mattn/goreman), which manages Procfile-based applications.
|
||||
|
||||
Our [Procfile script](../../V2Procfile) will set up a local example cluster. You can start it with:
|
||||
|
||||
```sh
|
||||
goreman start
|
||||
```
|
||||
|
||||
This will bring up 3 etcd members `infra1`, `infra2` and `infra3` and etcd proxy `proxy`, which runs locally and composes a cluster.
|
||||
|
||||
You can write a key to the cluster and retrieve the value back from any member or proxy.
|
||||
|
||||
### Next Steps
|
||||
|
||||
Now it's time to dig into the full etcd API and other guides.
|
||||
|
||||
- Explore the full [API][api].
|
||||
- Set up a [multi-machine cluster][clustering].
|
||||
- Learn the [config format, env variables and flags][configuration].
|
||||
- Find [language bindings and tools][libraries-and-tools].
|
||||
- Use TLS to [secure an etcd cluster][security].
|
||||
- [Tune etcd][tuning].
|
||||
- [Upgrade from 0.4.9+ to 2.2.0][upgrade].
|
||||
|
||||
[api]: ./api.md
|
||||
[clustering]: ./clustering.md
|
||||
[configuration]: ./configuration.md
|
||||
[libraries-and-tools]: ./libraries-and-tools.md
|
||||
[security]: ./security.md
|
||||
[tuning]: ./tuning.md
|
||||
[upgrade]: ./04_to_2_snapshot_migration.md
|
||||
|
||||
## Contact
|
||||
|
||||
- Mailing list: [etcd-dev](https://groups.google.com/forum/?hl=en#!forum/etcd-dev)
|
||||
- IRC: #[etcd](irc://irc.freenode.org:6667/#etcd) on freenode.org
|
||||
- Planning/Roadmap: [milestones](https://github.com/coreos/etcd/milestones), [roadmap](../../ROADMAP.md)
|
||||
- Bugs: [issues](https://github.com/coreos/etcd/issues)
|
||||
|
||||
## Contributing
|
||||
|
||||
See [CONTRIBUTING](../../CONTRIBUTING.md) for details on submitting patches and the contribution workflow.
|
||||
|
||||
## Reporting bugs
|
||||
|
||||
See [reporting bugs](reporting_bugs.md) for details about reporting any issue you may encounter.
|
||||
|
||||
## Known bugs
|
||||
|
||||
[GH518](https://github.com/coreos/etcd/issues/518) is a known bug. Issue is that:
|
||||
|
||||
```
|
||||
curl http://127.0.0.1:2379/v2/keys/foo -XPUT -d value=bar
|
||||
curl http://127.0.0.1:2379/v2/keys/foo -XPUT -d dir=true -d prevExist=true
|
||||
```
|
||||
|
||||
If the previous node is a key and client tries to overwrite it with `dir=true`, it does not give warnings such as `Not a directory`. Instead, the key is set to empty value.
|
||||
|
||||
## Project Details
|
||||
|
||||
### Versioning
|
||||
|
||||
#### Service Versioning
|
||||
|
||||
etcd uses [semantic versioning](http://semver.org)
|
||||
New minor versions may add additional features to the API.
|
||||
|
||||
You can get the version of etcd by issuing a request to /version:
|
||||
|
||||
```sh
|
||||
curl -L http://127.0.0.1:2379/version
|
||||
```
|
||||
|
||||
#### API Versioning
|
||||
|
||||
The `v2` API responses should not change after the 2.0.0 release but new features will be added over time.
|
||||
|
||||
#### 32-bit and other unsupported systems
|
||||
|
||||
etcd has known issues on 32-bit systems due to a bug in the Go runtime. See #[358][358] for more information.
|
||||
|
||||
To avoid inadvertently running a possibly unstable etcd server, `etcd` on unsupported architectures will print
|
||||
a warning message and immediately exit if the environment variable `ETCD_UNSUPPORTED_ARCH` is not set to
|
||||
the target architecture.
|
||||
|
||||
Currently only the amd64 architecture is officially supported by `etcd`.
|
||||
|
||||
[358]: https://github.com/coreos/etcd/issues/358
|
||||
|
||||
### License
|
||||
|
||||
etcd is under the Apache 2.0 license. See the [LICENSE](../../LICENSE) file for details.
|
||||
[etcd-v3]: ../docs.md
|
||||
[api]: api.md
|
||||
[libraries]: libraries-and-tools.md
|
||||
[admin-api]: other_apis.md
|
||||
[members-api]: members_api.md
|
||||
[security]: security.md
|
||||
[auth_api]: auth_api.md
|
||||
[authentication]: authentication.md
|
||||
[proxy]: proxy.md
|
||||
[production-users]: production-users.md
|
||||
[admin_guide]: admin_guide.md
|
||||
[configuration]: configuration.md
|
||||
[faq]: faq.md
|
||||
[tuning]: tuning.md
|
||||
[discovery_protocol]: discovery_protocol.md
|
||||
[docker_guide]: docker_guide.md
|
||||
[runtime-configuration]: runtime-configuration.md
|
||||
[metrics]: metrics.md
|
||||
[errorcode]: errorcode.md
|
||||
[reporting_bugs]: reporting_bugs.md
|
||||
[upgrade_2_3]: upgrade_2_3.md
|
||||
[upgrade_2_2]: upgrade_2_2.md
|
||||
[upgrade_2_1]: upgrade_2_1.md
|
||||
[04_to_2_snapshot_migration]: 04_to_2_snapshot_migration.md
|
||||
[backward_compatibility]: backward_compatibility.md
|
||||
|
@ -1,3 +1,8 @@
|
||||
**This is the documentation for etcd2 releases. Read [etcd3 doc][v3-docs] for etcd3 releases.**
|
||||
|
||||
[v3-docs]: ../docs.md#documentation
|
||||
|
||||
|
||||
# Administration
|
||||
|
||||
## Data Directory
|
||||
@ -8,7 +13,7 @@ When first started, etcd stores its configuration into a data directory specifie
|
||||
Configuration is stored in the write ahead log and includes: the local member ID, cluster ID, and initial cluster configuration.
|
||||
The write ahead log and snapshot files are used during member operation and to recover after a restart.
|
||||
|
||||
Having a dedicated disk to store wal files can improve the throughput and stabilize the cluster.
|
||||
Having a dedicated disk to store wal files can improve the throughput and stabilize the cluster.
|
||||
It is highly recommended to dedicate a wal disk and set `--wal-dir` to point to a directory on that device for a production cluster deployment.
|
||||
|
||||
If a member’s data directory is ever lost or corrupted then the user should [remove][remove-a-member] the etcd member from the cluster using `etcdctl` tool.
|
||||
@ -51,7 +56,7 @@ $ curl -L http://127.0.0.1:2379/health
|
||||
You can also use etcdctl to check the cluster-wide health information. It will contact all the members of the cluster and collect the health information for you.
|
||||
|
||||
```
|
||||
$./etcdctl cluster-health
|
||||
$./etcdctl cluster-health
|
||||
member 8211f1d0f64f3269 is healthy: got healthy result from http://127.0.0.1:12379
|
||||
member 91bc3c398fb3c146 is healthy: got healthy result from http://127.0.0.1:22379
|
||||
member fd422379fda50e48 is healthy: got healthy result from http://127.0.0.1:32379
|
||||
|
@ -1,3 +1,8 @@
|
||||
**This is the documentation for etcd2 releases. Read [etcd3 doc][v3-docs] for etcd3 releases.**
|
||||
|
||||
[v3-docs]: ../docs.md#documentation
|
||||
|
||||
|
||||
# etcd API
|
||||
|
||||
## Running a Single Machine Cluster
|
||||
@ -318,7 +323,7 @@ The first terminal should get the notification and return with the same response
|
||||
|
||||
However, the watch command can do more than this.
|
||||
Using the index, we can watch for commands that have happened in the past.
|
||||
This is useful for ensuring you don't miss events between watch commands.
|
||||
This is useful for ensuring you don't miss events between watch commands.
|
||||
Typically, we watch again from the `modifiedIndex` + 1 of the node we got.
|
||||
|
||||
Let's try to watch for the set command of index 7 again:
|
||||
@ -338,13 +343,13 @@ curl 'http://127.0.0.1:2379/v2/keys/foo?wait=true&waitIndex=8'
|
||||
Then even if etcd is on index 9 or 800, the first event to occur to the `/foo`
|
||||
key between 8 and the current index will be returned.
|
||||
|
||||
**Note**: etcd only keeps the responses of the most recent 1000 events across all etcd keys.
|
||||
**Note**: etcd only keeps the responses of the most recent 1000 events across all etcd keys.
|
||||
It is recommended to send the response to another thread to process immediately
|
||||
instead of blocking the watch while processing the result.
|
||||
instead of blocking the watch while processing the result.
|
||||
|
||||
#### Watch from cleared event index
|
||||
|
||||
If we miss all the 1000 events, we need to recover the current state of the
|
||||
If we miss all the 1000 events, we need to recover the current state of the
|
||||
watching key space through a get and then start to watch from the
|
||||
`X-Etcd-Index` + 1.
|
||||
|
||||
@ -366,7 +371,7 @@ To start watch, first we need to fetch the current state of key `/foo`:
|
||||
curl 'http://127.0.0.1:2379/v2/keys/foo' -vv
|
||||
```
|
||||
|
||||
```
|
||||
```
|
||||
< HTTP/1.1 200 OK
|
||||
< Content-Type: application/json
|
||||
< X-Etcd-Cluster-Id: 7e27652122e8b2ae
|
||||
@ -375,7 +380,7 @@ curl 'http://127.0.0.1:2379/v2/keys/foo' -vv
|
||||
< X-Raft-Term: 2
|
||||
< Date: Mon, 05 Jan 2015 18:54:43 GMT
|
||||
< Transfer-Encoding: chunked
|
||||
<
|
||||
<
|
||||
{"action":"get","node":{"key":"/foo","value":"bar","modifiedIndex":7,"createdIndex":7}}
|
||||
```
|
||||
|
||||
|
@ -1,3 +1,8 @@
|
||||
**This is the documentation for etcd2 releases. Read [etcd3 doc][v3-docs] for etcd3 releases.**
|
||||
|
||||
[v3-docs]: ../docs.md#documentation
|
||||
|
||||
|
||||
# etcd3 API
|
||||
|
||||
TODO: API doc
|
||||
|
@ -1,13 +1,18 @@
|
||||
**This is the documentation for etcd2 releases. Read [etcd3 doc][v3-docs] for etcd3 releases.**
|
||||
|
||||
[v3-docs]: ../docs.md#documentation
|
||||
|
||||
|
||||
# v2 Auth and Security
|
||||
|
||||
## etcd Resources
|
||||
## etcd Resources
|
||||
There are three types of resources in etcd
|
||||
|
||||
1. permission resources: users and roles in the user store
|
||||
2. key-value resources: key-value pairs in the key-value store
|
||||
3. settings resources: security settings, auth settings, and dynamic etcd cluster settings (election/heartbeat)
|
||||
|
||||
### Permission Resources
|
||||
### Permission Resources
|
||||
|
||||
#### Users
|
||||
A user is an identity to be authenticated. Each user can have multiple roles. The user has a capability (such as reading or writing) on the resource if one of the roles has that capability.
|
||||
@ -15,7 +20,7 @@ A user is an identity to be authenticated. Each user can have multiple roles. Th
|
||||
A user named `root` is required before authentication can be enabled, and it always has the ROOT role. The ROOT role can be granted to multiple users, but `root` is required for recovery purposes.
|
||||
|
||||
#### Roles
|
||||
Each role has exact one associated Permission List. An permission list exists for each permission on key-value resources.
|
||||
Each role has exact one associated Permission List. An permission list exists for each permission on key-value resources.
|
||||
|
||||
The special static ROOT (named `root`) role has a full permissions on all key-value resources, the permission to manage user resources and settings resources. Only the ROOT role has the permission to manage user resources and modify settings resources. The ROOT role is built-in and does not need to be created.
|
||||
|
||||
@ -30,8 +35,8 @@ A Permission List is a list of allowed patterns for that particular permission (
|
||||
### Key-Value Resources
|
||||
A key-value resource is a key-value pairs in the store. Given a list of matching patterns, permission for any given key in a request is granted if any of the patterns in the list match.
|
||||
|
||||
Only prefixes or exact keys are supported. A prefix permission string ends in `*`.
|
||||
A permission on `/foo` is for that exact key or directory, not its children or recursively. `/foo*` is a prefix that matches `/foo` recursively, and all keys thereunder, and keys with that prefix (eg. `/foobar`. Contrast to the prefix `/foo/*`). `*` alone is permission on the full keyspace.
|
||||
Only prefixes or exact keys are supported. A prefix permission string ends in `*`.
|
||||
A permission on `/foo` is for that exact key or directory, not its children or recursively. `/foo*` is a prefix that matches `/foo` recursively, and all keys thereunder, and keys with that prefix (eg. `/foobar`. Contrast to the prefix `/foo/*`). `*` alone is permission on the full keyspace.
|
||||
|
||||
### Settings Resources
|
||||
|
||||
@ -66,7 +71,7 @@ An Error JSON corresponds to:
|
||||
}
|
||||
|
||||
#### Enable and Disable Authentication
|
||||
|
||||
|
||||
**Get auth status**
|
||||
|
||||
GET /v2/auth/enable
|
||||
@ -215,8 +220,8 @@ PUT /v2/auth/users/charlie
|
||||
Sent Headers:
|
||||
Authorization: Basic <BasicAuthString>
|
||||
Put Body:
|
||||
JSON struct, above, matching the appropriate name
|
||||
* Starting password and roles when creating.
|
||||
JSON struct, above, matching the appropriate name
|
||||
* Starting password and roles when creating.
|
||||
* Grant/Revoke/Password filled in when updating (to grant roles, revoke roles, or change the password).
|
||||
Possible Status Codes:
|
||||
200 OK
|
||||
@ -345,7 +350,7 @@ PUT /v2/auth/roles/rkt
|
||||
401 Unauthorized
|
||||
404 Not Found (update non-existent roles)
|
||||
409 Conflict (when granting duplicated permission or revoking non-existent permission)
|
||||
200 Body:
|
||||
200 Body:
|
||||
JSON state of the role
|
||||
|
||||
**Remove A Role**
|
||||
|
@ -1,3 +1,8 @@
|
||||
**This is the documentation for etcd2 releases. Read [etcd3 doc][v3-docs] for etcd3 releases.**
|
||||
|
||||
[v3-docs]: ../docs.md#documentation
|
||||
|
||||
|
||||
# Authentication Guide
|
||||
|
||||
## Overview
|
||||
@ -14,7 +19,7 @@ There is one special user, `root`, and there are two special roles, `root` and `
|
||||
|
||||
### User `root`
|
||||
|
||||
User `root` must be created before security can be activated. It has the `root` role and allows for the changing of anything inside etcd. The idea behind the `root` user is for recovery purposes -- a password is generated and stored somewhere -- and the root role is granted to the administrator accounts on the system. In the future, for troubleshooting and recovery, we will need to assume some access to the system, and future documentation will assume this root user (though anyone with the role will suffice).
|
||||
User `root` must be created before security can be activated. It has the `root` role and allows for the changing of anything inside etcd. The idea behind the `root` user is for recovery purposes -- a password is generated and stored somewhere -- and the root role is granted to the administrator accounts on the system. In the future, for troubleshooting and recovery, we will need to assume some access to the system, and future documentation will assume this root user (though anyone with the role will suffice).
|
||||
|
||||
### Role `root`
|
||||
|
||||
@ -104,7 +109,7 @@ $ etcdctl role grant myrolename -path '/foo/bar' -write
|
||||
$ etcdctl role grant myrolename -path '/pub/*' -readwrite
|
||||
```
|
||||
|
||||
Beware that
|
||||
Beware that
|
||||
|
||||
```
|
||||
# Give full access to keys under /pub??
|
||||
@ -133,12 +138,12 @@ $ etcdctl role remove myrolename
|
||||
|
||||
## Enabling authentication
|
||||
|
||||
The minimal steps to enabling auth are as follows. The administrator can set up users and roles before or after enabling authentication, as a matter of preference.
|
||||
The minimal steps to enabling auth are as follows. The administrator can set up users and roles before or after enabling authentication, as a matter of preference.
|
||||
|
||||
Make sure the root user is created:
|
||||
|
||||
```
|
||||
$ etcdctl user add root
|
||||
$ etcdctl user add root
|
||||
New password:
|
||||
```
|
||||
|
||||
|
@ -1,3 +1,8 @@
|
||||
**This is the documentation for etcd2 releases. Read [etcd3 doc][v3-docs] for etcd3 releases.**
|
||||
|
||||
[v3-docs]: ../docs.md#documentation
|
||||
|
||||
|
||||
# Backward Compatibility
|
||||
|
||||
The main goal of etcd 2.0 release is to improve cluster safety around bootstrapping and dynamic reconfiguration. To do this, we deprecated the old error-prone APIs and provide a new set of APIs.
|
||||
|
@ -1,3 +1,8 @@
|
||||
**This is the documentation for etcd2 releases. Read [etcd3 doc][v3-docs] for etcd3 releases.**
|
||||
|
||||
[v3-docs]: ../../docs.md#documentation
|
||||
|
||||
|
||||
# Benchmarks
|
||||
|
||||
etcd benchmarks will be published regularly and tracked for each release below:
|
||||
|
@ -1,3 +1,8 @@
|
||||
**This is the documentation for etcd2 releases. Read [etcd3 doc][v3-docs] for etcd3 releases.**
|
||||
|
||||
[v3-docs]: ../../docs.md#documentation
|
||||
|
||||
|
||||
## Physical machines
|
||||
|
||||
GCE n1-highcpu-2 machine type
|
||||
|
@ -1,3 +1,8 @@
|
||||
**This is the documentation for etcd2 releases. Read [etcd3 doc][v3-docs] for etcd3 releases.**
|
||||
|
||||
[v3-docs]: ../../docs.md#documentation
|
||||
|
||||
|
||||
# Benchmarking etcd v2.2.0
|
||||
|
||||
## Physical Machines
|
||||
|
@ -1,3 +1,8 @@
|
||||
**This is the documentation for etcd2 releases. Read [etcd3 doc][v3-docs] for etcd3 releases.**
|
||||
|
||||
[v3-docs]: ../../docs.md#documentation
|
||||
|
||||
|
||||
## Physical machines
|
||||
|
||||
GCE n1-highcpu-2 machine type
|
||||
|
@ -1,3 +1,8 @@
|
||||
**This is the documentation for etcd2 releases. Read [etcd3 doc][v3-docs] for etcd3 releases.**
|
||||
|
||||
[v3-docs]: ../../docs.md#documentation
|
||||
|
||||
|
||||
## Physical machine
|
||||
|
||||
GCE n1-standard-2 machine type
|
||||
|
@ -1,3 +1,8 @@
|
||||
**This is the documentation for etcd2 releases. Read [etcd3 doc][v3-docs] for etcd3 releases.**
|
||||
|
||||
[v3-docs]: ../../docs.md#documentation
|
||||
|
||||
|
||||
## Physical machines
|
||||
|
||||
GCE n1-highcpu-2 machine type
|
||||
|
@ -1,3 +1,8 @@
|
||||
**This is the documentation for etcd2 releases. Read [etcd3 doc][v3-docs] for etcd3 releases.**
|
||||
|
||||
[v3-docs]: ../../docs.md#documentation
|
||||
|
||||
|
||||
# Watch Memory Usage Benchmark
|
||||
|
||||
*NOTE*: The watch features are under active development, and their memory usage may change as that development progresses. We do not expect it to significantly increase beyond the figures stated below.
|
||||
@ -5,10 +10,10 @@
|
||||
A primary goal of etcd is supporting a very large number of watchers doing a massively large amount of watching. etcd aims to support O(10k) clients, O(100K) watch streams (O(10) streams per client) and O(10M) total watchings (O(100) watching per stream). The memory consumed by each individual watching accounts for the largest portion of etcd's overall usage, and is therefore the focus of current and future optimizations.
|
||||
|
||||
|
||||
Three related components of etcd watch consume physical memory: each `grpc.Conn`, each watch stream, and each instance of the watching activity. `grpc.Conn` maintains the actual TCP connection and other gRPC connection state. Each `grpc.Conn` consumes O(10kb) of memory, and might have multiple watch streams attached.
|
||||
Three related components of etcd watch consume physical memory: each `grpc.Conn`, each watch stream, and each instance of the watching activity. `grpc.Conn` maintains the actual TCP connection and other gRPC connection state. Each `grpc.Conn` consumes O(10kb) of memory, and might have multiple watch streams attached.
|
||||
|
||||
Each watch stream is an independent HTTP2 connection which consumes another O(10kb) of memory.
|
||||
Multiple watchings might share one watch stream.
|
||||
Each watch stream is an independent HTTP2 connection which consumes another O(10kb) of memory.
|
||||
Multiple watchings might share one watch stream.
|
||||
|
||||
Watching is the actual struct that tracks the changes on the key-value store. Each watching should only consume < O(1kb).
|
||||
|
||||
|
@ -1,3 +1,8 @@
|
||||
**This is the documentation for etcd2 releases. Read [etcd3 doc][v3-docs] for etcd3 releases.**
|
||||
|
||||
[v3-docs]: ../../docs.md#documentation
|
||||
|
||||
|
||||
# Storage Memory Usage Benchmark
|
||||
|
||||
<!---todo: link storage to storage design doc-->
|
||||
@ -60,7 +65,7 @@ GCE n1-standard-2 machine type
|
||||
|
||||
In this test, we only benchmark the memory usage of the in-memory index. The goal is to find `c1` and `c2` mentioned above and to understand the hard limit of memory consumption of the storage.
|
||||
|
||||
We calculate the memory usage consumption via the Go runtime.ReadMemStats. We calculate the total allocated bytes difference before creating the index and after creating the index. It cannot perfectly reflect the memory usage of the in-memory index itself but can show the rough consumption pattern.
|
||||
We calculate the memory usage consumption via the Go runtime.ReadMemStats. We calculate the total allocated bytes difference before creating the index and after creating the index. It cannot perfectly reflect the memory usage of the in-memory index itself but can show the rough consumption pattern.
|
||||
|
||||
| N | versions | key size | memory usage |
|
||||
|------|----------|----------|--------------|
|
||||
|
@ -1,3 +1,8 @@
|
||||
**This is the documentation for etcd2 releases. Read [etcd3 doc][v3-docs] for etcd3 releases.**
|
||||
|
||||
[v3-docs]: ../docs.md#documentation
|
||||
|
||||
|
||||
# Branch Management
|
||||
|
||||
## Guide
|
||||
|
@ -1,3 +1,8 @@
|
||||
**This is the documentation for etcd2 releases. Read [etcd3 doc][v3-docs] for etcd3 releases.**
|
||||
|
||||
[v3-docs]: ../docs.md#documentation
|
||||
|
||||
|
||||
# Clustering Guide
|
||||
|
||||
## Overview
|
||||
|
@ -1,3 +1,8 @@
|
||||
**This is the documentation for etcd2 releases. Read [etcd3 doc][v3-docs] for etcd3 releases.**
|
||||
|
||||
[v3-docs]: ../docs.md#documentation
|
||||
|
||||
|
||||
# Configuration Flags
|
||||
|
||||
etcd is configurable through command-line flags and environment variables. Options set on the command line take precedence over those from the environment.
|
||||
|
@ -1,8 +1,13 @@
|
||||
**This is the documentation for etcd2 releases. Read [etcd3 doc][v3-docs] for etcd3 releases.**
|
||||
|
||||
[v3-docs]: ../../docs.md#documentation
|
||||
|
||||
|
||||
# etcd release guide
|
||||
|
||||
The guide talks about how to release a new version of etcd.
|
||||
|
||||
The procedure includes some manual steps for sanity checking but it can probably be further scripted. Please keep this document up-to-date if you want to make changes to the release process.
|
||||
The procedure includes some manual steps for sanity checking but it can probably be further scripted. Please keep this document up-to-date if you want to make changes to the release process.
|
||||
|
||||
## Prepare Release
|
||||
|
||||
|
@ -1,3 +1,8 @@
|
||||
**This is the documentation for etcd2 releases. Read [etcd3 doc][v3-docs] for etcd3 releases.**
|
||||
|
||||
[v3-docs]: ../docs.md#documentation
|
||||
|
||||
|
||||
# Discovery Service Protocol
|
||||
|
||||
Discovery service protocol helps new etcd member to discover all other members in cluster bootstrap phase using a shared discovery URL.
|
||||
|
@ -1,3 +1,8 @@
|
||||
**This is the documentation for etcd2 releases. Read [etcd3 doc][v3-docs] for etcd3 releases.**
|
||||
|
||||
[v3-docs]: ../docs.md#documentation
|
||||
|
||||
|
||||
# Running etcd under Docker
|
||||
|
||||
The following guide will show you how to run etcd under Docker using the [static bootstrap process](clustering.md#static).
|
||||
|
@ -1,3 +1,8 @@
|
||||
**This is the documentation for etcd2 releases. Read [etcd3 doc][v3-docs] for etcd3 releases.**
|
||||
|
||||
[v3-docs]: ../docs.md#documentation
|
||||
|
||||
|
||||
# Error Code
|
||||
======
|
||||
|
||||
|
@ -1,3 +1,8 @@
|
||||
**This is the documentation for etcd2 releases. Read [etcd3 doc][v3-docs] for etcd3 releases.**
|
||||
|
||||
[v3-docs]: ../docs.md#documentation
|
||||
|
||||
|
||||
# FAQ
|
||||
|
||||
## 1) Why can an etcd client read an old version of data when a majority of the etcd cluster members are down?
|
||||
|
@ -1,3 +1,8 @@
|
||||
**This is the documentation for etcd2 releases. Read [etcd3 doc][v3-docs] for etcd3 releases.**
|
||||
|
||||
[v3-docs]: ../docs.md#documentation
|
||||
|
||||
|
||||
# Glossary
|
||||
|
||||
This document defines the various terms used in etcd documentation, command line and source code.
|
||||
|
@ -1,3 +1,8 @@
|
||||
**This is the documentation for etcd2 releases. Read [etcd3 doc][v3-docs] for etcd3 releases.**
|
||||
|
||||
[v3-docs]: ../docs.md#documentation
|
||||
|
||||
|
||||
# FAQ
|
||||
|
||||
## Initial Bootstrapping UX
|
||||
|
@ -1,3 +1,8 @@
|
||||
**This is the documentation for etcd2 releases. Read [etcd3 doc][v3-docs] for etcd3 releases.**
|
||||
|
||||
[v3-docs]: ../docs.md#documentation
|
||||
|
||||
|
||||
# Versioning
|
||||
|
||||
Goal: We want to be able to upgrade an individual peer in an etcd cluster to a newer version of etcd.
|
||||
|
@ -1,3 +1,8 @@
|
||||
**This is the documentation for etcd2 releases. Read [etcd3 doc][v3-docs] for etcd3 releases.**
|
||||
|
||||
[v3-docs]: ../docs.md#documentation
|
||||
|
||||
|
||||
# Libraries and Tools
|
||||
|
||||
**Tools**
|
||||
|
@ -1,3 +1,8 @@
|
||||
**This is the documentation for etcd2 releases. Read [etcd3 doc][v3-docs] for etcd3 releases.**
|
||||
|
||||
[v3-docs]: ../docs.md#documentation
|
||||
|
||||
|
||||
# Members API
|
||||
|
||||
* [List members](#list-members)
|
||||
|
@ -1,3 +1,8 @@
|
||||
**This is the documentation for etcd2 releases. Read [etcd3 doc][v3-docs] for etcd3 releases.**
|
||||
|
||||
[v3-docs]: ../docs.md#documentation
|
||||
|
||||
|
||||
# Metrics
|
||||
|
||||
etcd uses [Prometheus][prometheus] for metrics reporting. The metrics can be used for real-time monitoring and debugging. etcd does not persist its metrics; if a member restarts, the metrics will be reset.
|
||||
@ -14,9 +19,9 @@ The metrics under the `etcd` prefix are for monitoring and alerting. They are st
|
||||
|
||||
### http requests
|
||||
|
||||
These metrics describe the serving of requests (non-watch events) served by etcd members in non-proxy mode: total
|
||||
These metrics describe the serving of requests (non-watch events) served by etcd members in non-proxy mode: total
|
||||
incoming requests, request failures and processing latency (inc. raft rounds for storage). They are useful for tracking
|
||||
user-generated traffic hitting the etcd cluster .
|
||||
user-generated traffic hitting the etcd cluster .
|
||||
|
||||
All these metrics are prefixed with `etcd_http_`
|
||||
|
||||
@ -28,20 +33,20 @@ All these metrics are prefixed with `etcd_http_`
|
||||
|
||||
|
||||
Example Prometheus queries that may be useful from these metrics (across all etcd members):
|
||||
|
||||
* `sum(rate(etcd_http_failed_total{job="etcd"}[1m]) by (method) / sum(rate(etcd_http_events_received_total{job="etcd"})[1m]) by (method)`
|
||||
|
||||
|
||||
* `sum(rate(etcd_http_failed_total{job="etcd"}[1m]) by (method) / sum(rate(etcd_http_events_received_total{job="etcd"})[1m]) by (method)`
|
||||
|
||||
Shows the fraction of events that failed by HTTP method across all members, across a time window of `1m`.
|
||||
|
||||
|
||||
* `sum(rate(etcd_http_received_total{job="etcd",method="GET})[1m]) by (method)`
|
||||
`sum(rate(etcd_http_received_total{job="etcd",method~="GET})[1m]) by (method)`
|
||||
|
||||
|
||||
Shows the rate of successful readonly/write queries across all servers, across a time window of `1m`.
|
||||
|
||||
|
||||
* `histogram_quantile(0.9, sum(rate(etcd_http_successful_duration_seconds{job="etcd",method="GET"}[5m]) ) by (le))`
|
||||
`histogram_quantile(0.9, sum(rate(etcd_http_successful_duration_seconds{job="etcd",method!="GET"}[5m]) ) by (le))`
|
||||
|
||||
Show the 0.90-tile latency (in seconds) of read/write (respectively) event handling across all members, with a window of `5m`.
|
||||
|
||||
Show the 0.90-tile latency (in seconds) of read/write (respectively) event handling across all members, with a window of `5m`.
|
||||
|
||||
### proxy
|
||||
|
||||
@ -56,21 +61,21 @@ All these metrics are prefixed with `etcd_proxy_`
|
||||
| requests_total | Total number of requests by this proxy instance. | Counter(method) |
|
||||
| handled_total | Total number of fully handled requests, with responses from etcd members. | Counter(method) |
|
||||
| dropped_total | Total number of dropped requests due to forwarding errors to etcd members. | Counter(method,error) |
|
||||
| handling_duration_seconds | Bucketed handling times by HTTP method, including round trip to member instances. | Histogram(method) |
|
||||
| handling_duration_seconds | Bucketed handling times by HTTP method, including round trip to member instances. | Histogram(method) |
|
||||
|
||||
Example Prometheus queries that may be useful from these metrics (across all etcd servers):
|
||||
|
||||
* `sum(rate(etcd_proxy_handled_total{job="etcd"}[1m])) by (method)`
|
||||
|
||||
Rate of requests (by HTTP method) handled by all proxies, across a window of `1m`.
|
||||
|
||||
Rate of requests (by HTTP method) handled by all proxies, across a window of `1m`.
|
||||
|
||||
* `histogram_quantile(0.9, sum(rate(handling_duration_seconds{job="etcd",method="GET"}[5m])) by (le))`
|
||||
`histogram_quantile(0.9, sum(rate(handling_duration_seconds{job="etcd",method!="GET"}[5m])) by (le))`
|
||||
|
||||
Show the 0.90-tile latency (in seconds) of handling of user requests across all proxy machines, with a window of `5m`.
|
||||
|
||||
|
||||
Show the 0.90-tile latency (in seconds) of handling of user requests across all proxy machines, with a window of `5m`.
|
||||
|
||||
* `sum(rate(etcd_proxy_dropped_total{job="etcd"}[1m])) by (proxying_error)`
|
||||
|
||||
|
||||
Number of failed request on the proxy. This should be 0, spikes here indicate connectivity issues to the etcd cluster.
|
||||
|
||||
## etcd_debugging namespace metrics
|
||||
|
@ -1,3 +1,8 @@
|
||||
**This is the documentation for etcd2 releases. Read [etcd3 doc][v3-docs] for etcd3 releases.**
|
||||
|
||||
[v3-docs]: ../docs.md#documentation
|
||||
|
||||
|
||||
# Miscellaneous APIs
|
||||
|
||||
* [Getting the etcd version](#getting-the-etcd-version)
|
||||
|
@ -1,3 +1,8 @@
|
||||
**This is the documentation for etcd2 releases. Read [etcd3 doc][v3-docs] for etcd3 releases.**
|
||||
|
||||
[v3-docs]: ../../docs.md#documentation
|
||||
|
||||
|
||||
# FreeBSD
|
||||
|
||||
Starting with version 0.1.2 both etcd and etcdctl have been ported to FreeBSD and can
|
||||
|
@ -1,3 +1,8 @@
|
||||
**This is the documentation for etcd2 releases. Read [etcd3 doc][v3-docs] for etcd3 releases.**
|
||||
|
||||
[v3-docs]: ../docs.md#documentation
|
||||
|
||||
|
||||
# Production Users
|
||||
|
||||
This document tracks people and use cases for etcd in production. By creating a list of production use cases we hope to build a community of advisors that we can reach out to with experience using various etcd applications, operation environments, and cluster sizes. The etcd development team may reach out periodically to check-in on your experience and update this list.
|
||||
|
@ -1,3 +1,8 @@
|
||||
**This is the documentation for etcd2 releases. Read [etcd3 doc][v3-docs] for etcd3 releases.**
|
||||
|
||||
[v3-docs]: ../docs.md#documentation
|
||||
|
||||
|
||||
# Proxy
|
||||
|
||||
etcd can run as a transparent proxy. Doing so allows for easy discovery of etcd within your infrastructure, since it can run on each machine as a local service. In this mode, etcd acts as a reverse proxy and forwards client requests to an active etcd cluster. The etcd proxy does not participate in the consensus replication of the etcd cluster, thus it neither increases the resilience nor decreases the write performance of the etcd cluster.
|
||||
|
@ -1,3 +1,8 @@
|
||||
**This is the documentation for etcd2 releases. Read [etcd3 doc][v3-docs] for etcd3 releases.**
|
||||
|
||||
[v3-docs]: ../docs.md#documentation
|
||||
|
||||
|
||||
# Reporting Bugs
|
||||
|
||||
If you find bugs or documentation mistakes in the etcd project, please let us know by [opening an issue][etcd-issue]. We treat bugs and mistakes very seriously and believe no issue is too small. Before creating a bug report, please check that an issue reporting the same problem does not already exist.
|
||||
|
@ -1,3 +1,8 @@
|
||||
**This is the documentation for etcd2 releases. Read [etcd3 doc][v3-docs] for etcd3 releases.**
|
||||
|
||||
[v3-docs]: ../../docs.md#documentation
|
||||
|
||||
|
||||
# Overview
|
||||
|
||||
The etcd v3 API is designed to give users a more efficient and cleaner abstraction compared to etcd v2. There are a number of semantic and protocol changes in this new API. For an overview [see Xiang Li's video](https://youtu.be/J5AioGtEPeQ?t=211).
|
||||
|
@ -1,3 +1,8 @@
|
||||
**This is the documentation for etcd2 releases. Read [etcd3 doc][v3-docs] for etcd3 releases.**
|
||||
|
||||
[v3-docs]: ../docs.md#documentation
|
||||
|
||||
|
||||
# Runtime Reconfiguration
|
||||
|
||||
etcd comes with support for incremental runtime reconfiguration, which allows users to update the membership of the cluster at run time.
|
||||
@ -61,9 +66,9 @@ A wrongly updated client URL will not affect the health of the etcd cluster.
|
||||
|
||||
#### Update advertise peer URLs
|
||||
|
||||
If you would like to update the advertise peer URLs of a member, you have to first update
|
||||
If you would like to update the advertise peer URLs of a member, you have to first update
|
||||
it explicitly via member command and then restart the member. The additional action is required
|
||||
since updating peer URLs changes the cluster wide configuration and can affect the health of the etcd cluster.
|
||||
since updating peer URLs changes the cluster wide configuration and can affect the health of the etcd cluster.
|
||||
|
||||
To update the peer URLs, first, we need to find the target member's ID. You can list all members with `etcdctl`:
|
||||
|
||||
|
@ -1,3 +1,8 @@
|
||||
**This is the documentation for etcd2 releases. Read [etcd3 doc][v3-docs] for etcd3 releases.**
|
||||
|
||||
[v3-docs]: ../docs.md#documentation
|
||||
|
||||
|
||||
# Design of Runtime Reconfiguration
|
||||
|
||||
Runtime reconfiguration is one of the hardest and most error prone features in a distributed system, especially in a consensus based system like etcd.
|
||||
@ -26,21 +31,21 @@ We think runtime reconfiguration should be a low frequent operation. We made the
|
||||
|
||||
If a cluster permanently loses a majority of its members, a new cluster will need to be started from an old data directory to recover the previous state.
|
||||
|
||||
It is entirely possible to force removing the failed members from the existing cluster to recover. However, we decided not to support this method since it bypasses the normal consensus committing phase, which is unsafe. If the member to remove is not actually dead or you force to remove different members through different members in the same cluster, you will end up with diverged cluster with same clusterID. This is very dangerous and hard to debug/fix afterwards.
|
||||
It is entirely possible to force removing the failed members from the existing cluster to recover. However, we decided not to support this method since it bypasses the normal consensus committing phase, which is unsafe. If the member to remove is not actually dead or you force to remove different members through different members in the same cluster, you will end up with diverged cluster with same clusterID. This is very dangerous and hard to debug/fix afterwards.
|
||||
|
||||
If you have a correct deployment, the possibility of permanent majority lose is very low. But it is a severe enough problem that worth special care. We strongly suggest you to read the [disaster recovery documentation][disaster-recovery] and prepare for permanent majority lose before you put etcd into production.
|
||||
|
||||
## Do Not Use Public Discovery Service For Runtime Reconfiguration
|
||||
|
||||
The public discovery service should only be used for bootstrapping a cluster. To join member into an existing cluster, you should use runtime reconfiguration API.
|
||||
The public discovery service should only be used for bootstrapping a cluster. To join member into an existing cluster, you should use runtime reconfiguration API.
|
||||
|
||||
Discovery service is designed for bootstrapping an etcd cluster in the cloud environment, when you do not know the IP addresses of all the members beforehand. After you successfully bootstrap a cluster, the IP addresses of all the members are known. Technically, you should not need the discovery service any more.
|
||||
|
||||
It seems that using public discovery service is a convenient way to do runtime reconfiguration, after all discovery service already has all the cluster configuration information. However relying on public discovery service brings troubles:
|
||||
It seems that using public discovery service is a convenient way to do runtime reconfiguration, after all discovery service already has all the cluster configuration information. However relying on public discovery service brings troubles:
|
||||
|
||||
1. it introduces external dependencies for the entire life-cycle of your cluster, not just bootstrap time. If there is a network issue between your cluster and public discovery service, your cluster will suffer from it.
|
||||
|
||||
2. public discovery service must reflect correct runtime configuration of your cluster during it life-cycle. It has to provide security mechanism to avoid bad actions, and it is hard.
|
||||
|
||||
2. public discovery service must reflect correct runtime configuration of your cluster during it life-cycle. It has to provide security mechanism to avoid bad actions, and it is hard.
|
||||
|
||||
3. public discovery service has to keep tens of thousands of cluster configurations. Our public discovery service backend is not ready for that workload.
|
||||
|
||||
|
@ -1,3 +1,8 @@
|
||||
**This is the documentation for etcd2 releases. Read [etcd3 doc][v3-docs] for etcd3 releases.**
|
||||
|
||||
[v3-docs]: ../docs.md#documentation
|
||||
|
||||
|
||||
# Security Model
|
||||
|
||||
etcd supports SSL/TLS as well as authentication through client certificates, both for clients to server as well as peer (server to server / cluster) communication.
|
||||
@ -16,7 +21,7 @@ etcd takes several certificate related configuration options, either through com
|
||||
|
||||
`--key-file=<path>`: Key for the certificate. Must be unencrypted.
|
||||
|
||||
`--client-cert-auth`: When this is set etcd will check all incoming HTTPS requests for a client certificate signed by the trusted CA, requests that don't supply a valid client certificate will fail.
|
||||
`--client-cert-auth`: When this is set etcd will check all incoming HTTPS requests for a client certificate signed by the trusted CA, requests that don't supply a valid client certificate will fail. If [authentication][auth] is enabled, the certificate provides credentials for the user name given by the Common Name field.
|
||||
|
||||
`--trusted-ca-file=<path>`: Trusted certificate authority.
|
||||
|
||||
@ -191,3 +196,4 @@ If you need your certificate to be signed for your member's FQDN in its Subject
|
||||
[tls-setup]: ../../hack/tls-setup
|
||||
[tls-guide]: https://github.com/coreos/docs/blob/master/os/generate-self-signed-certificates.md
|
||||
[alt-name]: http://wiki.cacert.org/FAQ/subjectAltName
|
||||
[auth]: authentication.md
|
||||
|
@ -1,3 +1,8 @@
|
||||
**This is the documentation for etcd2 releases. Read [etcd3 doc][v3-docs] for etcd3 releases.**
|
||||
|
||||
[v3-docs]: ../docs.md#documentation
|
||||
|
||||
|
||||
# Tuning
|
||||
|
||||
The default settings in etcd should work well for installations on a local network where the average network latency is low.
|
||||
|
@ -1,3 +1,8 @@
|
||||
**This is the documentation for etcd2 releases. Read [etcd3 doc][v3-docs] for etcd3 releases.**
|
||||
|
||||
[v3-docs]: ../docs.md#documentation
|
||||
|
||||
|
||||
# Upgrade etcd to 2.1
|
||||
|
||||
In the general case, upgrading from etcd 2.0 to 2.1 can be a zero-downtime, rolling upgrade:
|
||||
@ -12,11 +17,11 @@ Before [starting an upgrade](#upgrade-procedure), read through the rest of this
|
||||
|
||||
To upgrade an existing etcd deployment to 2.1, you must be running 2.0. If you’re running a version of etcd before 2.0, you must upgrade to [2.0][v2.0] before upgrading to 2.1.
|
||||
|
||||
Also, to ensure a smooth rolling upgrade, your running cluster must be healthy. You can check the health of the cluster by using `etcdctl cluster-health` command.
|
||||
Also, to ensure a smooth rolling upgrade, your running cluster must be healthy. You can check the health of the cluster by using `etcdctl cluster-health` command.
|
||||
|
||||
### Preparedness
|
||||
### Preparedness
|
||||
|
||||
Before upgrading etcd, always test the services relying on etcd in a staging environment before deploying the upgrade to the production environment.
|
||||
Before upgrading etcd, always test the services relying on etcd in a staging environment before deploying the upgrade to the production environment.
|
||||
|
||||
You might also want to [backup your data directory][backup-datastore] for a potential [downgrade](#downgrade).
|
||||
|
||||
@ -38,7 +43,7 @@ If you have even more data, this might take more time. If you have a data size l
|
||||
|
||||
### Downgrade
|
||||
|
||||
If all members have been upgraded to v2.1, the cluster will be upgraded to v2.1, and downgrade is **not possible**. If any member is still v2.0, the cluster will remain in v2.0, and you can go back to use v2.0 binary.
|
||||
If all members have been upgraded to v2.1, the cluster will be upgraded to v2.1, and downgrade is **not possible**. If any member is still v2.0, the cluster will remain in v2.0, and you can go back to use v2.0 binary.
|
||||
|
||||
Please [backup your data directory][backup-datastore] of all etcd members if you want to downgrade the cluster, even if it is upgraded.
|
||||
|
||||
@ -96,7 +101,7 @@ member 924e2e83e93f2560 is healthy
|
||||
member a8266ecf031671f3 is healthy
|
||||
```
|
||||
|
||||
#### 4. Repeat step 2 to step 3 for all other members
|
||||
#### 4. Repeat step 2 to step 3 for all other members
|
||||
|
||||
#### 5. Finish
|
||||
|
||||
|
@ -1,3 +1,8 @@
|
||||
**This is the documentation for etcd2 releases. Read [etcd3 doc][v3-docs] for etcd3 releases.**
|
||||
|
||||
[v3-docs]: ../docs.md#documentation
|
||||
|
||||
|
||||
# Upgrade etcd from 2.1 to 2.2
|
||||
|
||||
In the general case, upgrading from etcd 2.1 to 2.2 can be a zero-downtime, rolling upgrade:
|
||||
@ -13,11 +18,11 @@ Before [starting an upgrade](#upgrade-procedure), read through the rest of this
|
||||
|
||||
To upgrade an existing etcd deployment to 2.2, you must be running 2.1. If you’re running a version of etcd before 2.1, you must upgrade to [2.1][v2.1] before upgrading to 2.2.
|
||||
|
||||
Also, to ensure a smooth rolling upgrade, your running cluster must be healthy. You can check the health of the cluster by using `etcdctl cluster-health` command.
|
||||
Also, to ensure a smooth rolling upgrade, your running cluster must be healthy. You can check the health of the cluster by using `etcdctl cluster-health` command.
|
||||
|
||||
### Preparedness
|
||||
### Preparedness
|
||||
|
||||
Before upgrading etcd, always test the services relying on etcd in a staging environment before deploying the upgrade to the production environment.
|
||||
Before upgrading etcd, always test the services relying on etcd in a staging environment before deploying the upgrade to the production environment.
|
||||
|
||||
You might also want to [backup the data directory][backup-datastore] for a potential [downgrade].
|
||||
|
||||
@ -31,11 +36,11 @@ Internally, etcd members negotiate with each other to determine the overall etcd
|
||||
|
||||
If you have a data size larger than 100MB you should contact us before upgrading, so we can make sure the upgrades work smoothly.
|
||||
|
||||
Every etcd 2.2 member will do health checking across the cluster periodically. etcd 2.1 member does not support health checking. During the upgrade, etcd 2.2 member will log warning about the unhealthy state of etcd 2.1 member. You can ignore the warning.
|
||||
Every etcd 2.2 member will do health checking across the cluster periodically. etcd 2.1 member does not support health checking. During the upgrade, etcd 2.2 member will log warning about the unhealthy state of etcd 2.1 member. You can ignore the warning.
|
||||
|
||||
### Downgrade
|
||||
|
||||
If all members have been upgraded to v2.2, the cluster will be upgraded to v2.2, and downgrade is **not possible**. If any member is still v2.1, the cluster will remain in v2.1, and you can go back to use v2.1 binary.
|
||||
If all members have been upgraded to v2.2, the cluster will be upgraded to v2.2, and downgrade is **not possible**. If any member is still v2.1, the cluster will remain in v2.1, and you can go back to use v2.1 binary.
|
||||
|
||||
Please [backup the data directory][backup-datastore] of all etcd members if you want to downgrade the cluster, even if it is upgraded.
|
||||
|
||||
@ -112,7 +117,7 @@ member a8266ecf031671f3 is healthy: got healthy result from http://localhost:123
|
||||
cluster is healthy
|
||||
```
|
||||
|
||||
#### 4. Repeat step 2 to step 3 for all other members
|
||||
#### 4. Repeat step 2 to step 3 for all other members
|
||||
|
||||
#### 5. Finish
|
||||
|
||||
|
@ -1,3 +1,8 @@
|
||||
**This is the documentation for etcd2 releases. Read [etcd3 doc][v3-docs] for etcd3 releases.**
|
||||
|
||||
[v3-docs]: ../docs.md#documentation
|
||||
|
||||
|
||||
## Upgrade etcd from 2.2 to 2.3
|
||||
|
||||
In the general case, upgrading from etcd 2.2 to 2.3 can be a zero-downtime, rolling upgrade:
|
||||
|
@ -992,7 +992,7 @@ func (as *authStore) AuthInfoFromTLS(ctx context.Context) *AuthInfo {
|
||||
}
|
||||
|
||||
func (as *authStore) AuthInfoFromCtx(ctx context.Context) (*AuthInfo, error) {
|
||||
md, ok := metadata.FromContext(ctx)
|
||||
md, ok := metadata.FromIncomingContext(ctx)
|
||||
if !ok {
|
||||
return nil, nil
|
||||
}
|
||||
|
@ -453,7 +453,8 @@ func TestAuthInfoFromCtx(t *testing.T) {
|
||||
t.Errorf("expected (nil, nil), got (%v, %v)", ai, err)
|
||||
}
|
||||
|
||||
ctx = metadata.NewContext(context.Background(), metadata.New(map[string]string{"tokens": "dummy"}))
|
||||
// as if it came from RPC
|
||||
ctx = metadata.NewIncomingContext(context.Background(), metadata.New(map[string]string{"tokens": "dummy"}))
|
||||
ai, err = as.AuthInfoFromCtx(ctx)
|
||||
if err != nil && ai != nil {
|
||||
t.Errorf("expected (nil, nil), got (%v, %v)", ai, err)
|
||||
@ -465,19 +466,19 @@ func TestAuthInfoFromCtx(t *testing.T) {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
ctx = metadata.NewContext(context.Background(), metadata.New(map[string]string{"token": "Invalid Token"}))
|
||||
ctx = metadata.NewIncomingContext(context.Background(), metadata.New(map[string]string{"token": "Invalid Token"}))
|
||||
_, err = as.AuthInfoFromCtx(ctx)
|
||||
if err != ErrInvalidAuthToken {
|
||||
t.Errorf("expected %v, got %v", ErrInvalidAuthToken, err)
|
||||
}
|
||||
|
||||
ctx = metadata.NewContext(context.Background(), metadata.New(map[string]string{"token": "Invalid.Token"}))
|
||||
ctx = metadata.NewIncomingContext(context.Background(), metadata.New(map[string]string{"token": "Invalid.Token"}))
|
||||
_, err = as.AuthInfoFromCtx(ctx)
|
||||
if err != ErrInvalidAuthToken {
|
||||
t.Errorf("expected %v, got %v", ErrInvalidAuthToken, err)
|
||||
}
|
||||
|
||||
ctx = metadata.NewContext(context.Background(), metadata.New(map[string]string{"token": resp.Token}))
|
||||
ctx = metadata.NewIncomingContext(context.Background(), metadata.New(map[string]string{"token": resp.Token}))
|
||||
ai, err = as.AuthInfoFromCtx(ctx)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
@ -521,7 +522,7 @@ func TestAuthInfoFromCtxRace(t *testing.T) {
|
||||
donec := make(chan struct{})
|
||||
go func() {
|
||||
defer close(donec)
|
||||
ctx := metadata.NewContext(context.Background(), metadata.New(map[string]string{"token": "test"}))
|
||||
ctx := metadata.NewIncomingContext(context.Background(), metadata.New(map[string]string{"token": "test"}))
|
||||
as.AuthInfoFromCtx(ctx)
|
||||
}()
|
||||
as.UserAdd(&pb.AuthUserAddRequest{Name: "test"})
|
||||
|
@ -1,212 +1,388 @@
|
||||
[
|
||||
{
|
||||
"project": "bitbucket.org/ww/goautoneg",
|
||||
"license": "BSD 3-clause \"New\" or \"Revised\" License",
|
||||
"confidence": 1
|
||||
"licenses": [
|
||||
{
|
||||
"type": "BSD 3-clause \"New\" or \"Revised\" License",
|
||||
"confidence": 1
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"project": "github.com/beorn7/perks/quantile",
|
||||
"license": "MIT License",
|
||||
"confidence": 0.989
|
||||
"licenses": [
|
||||
{
|
||||
"type": "MIT License",
|
||||
"confidence": 0.9891304347826086
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"project": "github.com/bgentry/speakeasy",
|
||||
"license": "MIT License",
|
||||
"confidence": 0.944
|
||||
},
|
||||
{
|
||||
"project": "github.com/boltdb/bolt",
|
||||
"license": "MIT License",
|
||||
"confidence": 1
|
||||
"licenses": [
|
||||
{
|
||||
"type": "MIT License",
|
||||
"confidence": 0.9441624365482234
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"project": "github.com/cockroachdb/cmux",
|
||||
"license": "Apache License 2.0",
|
||||
"confidence": 1
|
||||
"licenses": [
|
||||
{
|
||||
"type": "Apache License 2.0",
|
||||
"confidence": 1
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"project": "github.com/coreos/bbolt",
|
||||
"licenses": [
|
||||
{
|
||||
"type": "MIT License",
|
||||
"confidence": 1
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"project": "github.com/coreos/etcd",
|
||||
"license": "Apache License 2.0",
|
||||
"confidence": 1
|
||||
"licenses": [
|
||||
{
|
||||
"type": "Apache License 2.0",
|
||||
"confidence": 1
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"project": "github.com/coreos/go-semver/semver",
|
||||
"license": "Apache License 2.0",
|
||||
"confidence": 1
|
||||
"licenses": [
|
||||
{
|
||||
"type": "Apache License 2.0",
|
||||
"confidence": 1
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"project": "github.com/coreos/go-systemd",
|
||||
"license": "Apache License 2.0",
|
||||
"confidence": 0.997
|
||||
"licenses": [
|
||||
{
|
||||
"type": "Apache License 2.0",
|
||||
"confidence": 0.9966703662597114
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"project": "github.com/coreos/pkg",
|
||||
"license": "Apache License 2.0",
|
||||
"confidence": 1
|
||||
"licenses": [
|
||||
{
|
||||
"type": "Apache License 2.0",
|
||||
"confidence": 1
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"project": "github.com/cpuguy83/go-md2man/md2man",
|
||||
"license": "MIT License",
|
||||
"confidence": 1
|
||||
"licenses": [
|
||||
{
|
||||
"type": "MIT License",
|
||||
"confidence": 1
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"project": "github.com/dgrijalva/jwt-go",
|
||||
"license": "MIT License",
|
||||
"confidence": 0.989
|
||||
"licenses": [
|
||||
{
|
||||
"type": "MIT License",
|
||||
"confidence": 0.9891304347826086
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"project": "github.com/dustin/go-humanize",
|
||||
"license": "MIT License",
|
||||
"confidence": 0.969
|
||||
"licenses": [
|
||||
{
|
||||
"type": "MIT License",
|
||||
"confidence": 0.96875
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"project": "github.com/ghodss/yaml",
|
||||
"license": "MIT License and BSD 3-clause \"New\" or \"Revised\" License",
|
||||
"confidence": 1
|
||||
"licenses": [
|
||||
{
|
||||
"type": "MIT License and BSD 3-clause \"New\" or \"Revised\" License",
|
||||
"confidence": 1
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"project": "github.com/gogo/protobuf/proto",
|
||||
"license": "BSD 3-clause \"New\" or \"Revised\" License",
|
||||
"confidence": 0.909
|
||||
"licenses": [
|
||||
{
|
||||
"type": "BSD 3-clause \"New\" or \"Revised\" License",
|
||||
"confidence": 0.9090909090909091
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"project": "github.com/golang/groupcache/lru",
|
||||
"license": "Apache License 2.0",
|
||||
"confidence": 0.997
|
||||
"licenses": [
|
||||
{
|
||||
"type": "Apache License 2.0",
|
||||
"confidence": 0.9966703662597114
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"project": "github.com/golang/protobuf",
|
||||
"license": "BSD 3-clause \"New\" or \"Revised\" License",
|
||||
"confidence": 0.92
|
||||
"licenses": [
|
||||
{
|
||||
"type": "BSD 3-clause \"New\" or \"Revised\" License",
|
||||
"confidence": 0.92
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"project": "github.com/google/btree",
|
||||
"license": "Apache License 2.0",
|
||||
"confidence": 1
|
||||
"licenses": [
|
||||
{
|
||||
"type": "Apache License 2.0",
|
||||
"confidence": 1
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"project": "github.com/grpc-ecosystem/go-grpc-prometheus",
|
||||
"license": "Apache License 2.0",
|
||||
"confidence": 1
|
||||
"licenses": [
|
||||
{
|
||||
"type": "Apache License 2.0",
|
||||
"confidence": 1
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"project": "github.com/grpc-ecosystem/grpc-gateway",
|
||||
"license": "BSD 3-clause \"New\" or \"Revised\" License",
|
||||
"confidence": 0.979
|
||||
"licenses": [
|
||||
{
|
||||
"type": "BSD 3-clause \"New\" or \"Revised\" License",
|
||||
"confidence": 0.979253112033195
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"project": "github.com/inconshreveable/mousetrap",
|
||||
"license": "Apache License 2.0",
|
||||
"confidence": 1
|
||||
"licenses": [
|
||||
{
|
||||
"type": "MIT License and BSD 3-clause \"New\" or \"Revised\" License",
|
||||
"confidence": 1
|
||||
},
|
||||
{
|
||||
"type": "Apache License 2.0",
|
||||
"confidence": 1
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"project": "github.com/jonboulle/clockwork",
|
||||
"license": "Apache License 2.0",
|
||||
"confidence": 1
|
||||
"licenses": [
|
||||
{
|
||||
"type": "Apache License 2.0",
|
||||
"confidence": 1
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"project": "github.com/mattn/go-runewidth",
|
||||
"license": "MIT License",
|
||||
"confidence": 1
|
||||
"licenses": [
|
||||
{
|
||||
"type": "MIT License",
|
||||
"confidence": 1
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"project": "github.com/matttproud/golang_protobuf_extensions/pbutil",
|
||||
"license": "Apache License 2.0",
|
||||
"confidence": 1
|
||||
"licenses": [
|
||||
{
|
||||
"type": "Apache License 2.0",
|
||||
"confidence": 1
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"project": "github.com/olekukonko/tablewriter",
|
||||
"license": "MIT License",
|
||||
"confidence": 0.989
|
||||
"licenses": [
|
||||
{
|
||||
"type": "MIT License",
|
||||
"confidence": 0.9891304347826086
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"project": "github.com/prometheus/client_golang/prometheus",
|
||||
"license": "Apache License 2.0",
|
||||
"confidence": 1
|
||||
"licenses": [
|
||||
{
|
||||
"type": "Apache License 2.0",
|
||||
"confidence": 1
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"project": "github.com/prometheus/client_model/go",
|
||||
"license": "Apache License 2.0",
|
||||
"confidence": 1
|
||||
"licenses": [
|
||||
{
|
||||
"type": "Apache License 2.0",
|
||||
"confidence": 1
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"project": "github.com/prometheus/common",
|
||||
"license": "Apache License 2.0",
|
||||
"confidence": 1
|
||||
"licenses": [
|
||||
{
|
||||
"type": "Apache License 2.0",
|
||||
"confidence": 1
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"project": "github.com/prometheus/procfs",
|
||||
"license": "Apache License 2.0",
|
||||
"confidence": 1
|
||||
"licenses": [
|
||||
{
|
||||
"type": "Apache License 2.0",
|
||||
"confidence": 1
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"project": "github.com/russross/blackfriday",
|
||||
"license": "BSD 2-clause \"Simplified\" License",
|
||||
"confidence": 0.963
|
||||
},
|
||||
{
|
||||
"project": "github.com/shurcooL/sanitized_anchor_name",
|
||||
"license": "MIT License",
|
||||
"confidence": 1
|
||||
"licenses": [
|
||||
{
|
||||
"type": "BSD 2-clause \"Simplified\" License",
|
||||
"confidence": 0.9626168224299065
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"project": "github.com/spf13/cobra",
|
||||
"license": "Apache License 2.0",
|
||||
"confidence": 0.957
|
||||
"licenses": [
|
||||
{
|
||||
"type": "Apache License 2.0",
|
||||
"confidence": 0.9573241061130334
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"project": "github.com/spf13/pflag",
|
||||
"license": "BSD 3-clause \"New\" or \"Revised\" License",
|
||||
"confidence": 0.966
|
||||
"licenses": [
|
||||
{
|
||||
"type": "BSD 3-clause \"New\" or \"Revised\" License",
|
||||
"confidence": 0.9663865546218487
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"project": "github.com/ugorji/go/codec",
|
||||
"license": "MIT License",
|
||||
"confidence": 0.995
|
||||
"licenses": [
|
||||
{
|
||||
"type": "MIT License",
|
||||
"confidence": 0.9946524064171123
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"project": "github.com/urfave/cli",
|
||||
"license": "MIT License",
|
||||
"confidence": 1
|
||||
"licenses": [
|
||||
{
|
||||
"type": "MIT License",
|
||||
"confidence": 1
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"project": "github.com/xiang90/probing",
|
||||
"license": "MIT License",
|
||||
"confidence": 1
|
||||
"licenses": [
|
||||
{
|
||||
"type": "MIT License",
|
||||
"confidence": 1
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"project": "golang.org/x/crypto",
|
||||
"license": "BSD 3-clause \"New\" or \"Revised\" License",
|
||||
"confidence": 0.966
|
||||
"licenses": [
|
||||
{
|
||||
"type": "BSD 3-clause \"New\" or \"Revised\" License",
|
||||
"confidence": 0.9663865546218487
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"project": "golang.org/x/net",
|
||||
"license": "BSD 3-clause \"New\" or \"Revised\" License",
|
||||
"confidence": 0.966
|
||||
"licenses": [
|
||||
{
|
||||
"type": "BSD 3-clause \"New\" or \"Revised\" License",
|
||||
"confidence": 0.9663865546218487
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"project": "golang.org/x/text",
|
||||
"license": "BSD 3-clause \"New\" or \"Revised\" License",
|
||||
"confidence": 0.966
|
||||
"licenses": [
|
||||
{
|
||||
"type": "BSD 3-clause \"New\" or \"Revised\" License",
|
||||
"confidence": 0.9663865546218487
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"project": "golang.org/x/time/rate",
|
||||
"license": "BSD 3-clause \"New\" or \"Revised\" License",
|
||||
"confidence": 0.966
|
||||
"licenses": [
|
||||
{
|
||||
"type": "BSD 3-clause \"New\" or \"Revised\" License",
|
||||
"confidence": 0.9663865546218487
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"project": "google.golang.org/genproto/googleapis",
|
||||
"licenses": [
|
||||
{
|
||||
"type": "Apache License 2.0",
|
||||
"confidence": 1
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"project": "google.golang.org/grpc",
|
||||
"license": "BSD 3-clause \"New\" or \"Revised\" License",
|
||||
"confidence": 0.979
|
||||
"licenses": [
|
||||
{
|
||||
"type": "Apache License 2.0",
|
||||
"confidence": 1
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"project": "gopkg.in/cheggaaa/pb.v1",
|
||||
"license": "BSD 3-clause \"New\" or \"Revised\" License",
|
||||
"confidence": 0.992
|
||||
"licenses": [
|
||||
{
|
||||
"type": "BSD 3-clause \"New\" or \"Revised\" License",
|
||||
"confidence": 0.9916666666666667
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"project": "gopkg.in/yaml.v2",
|
||||
"license": "Apache License 2.0 and MIT License",
|
||||
"confidence": 1
|
||||
"licenses": [
|
||||
{
|
||||
"type": "The Unlicense",
|
||||
"confidence": 0.35294117647058826
|
||||
},
|
||||
{
|
||||
"type": "MIT License",
|
||||
"confidence": 0.8975609756097561
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
|
@ -1,18 +1,26 @@
|
||||
[
|
||||
{
|
||||
"project": "bitbucket.org/ww/goautoneg",
|
||||
"license": "BSD 3-clause \"New\" or \"Revised\" License"
|
||||
"licenses": [
|
||||
{
|
||||
"type": "BSD 3-clause \"New\" or \"Revised\" License"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"project": "github.com/ghodss/yaml",
|
||||
"license": "MIT License and BSD 3-clause \"New\" or \"Revised\" License"
|
||||
"licenses": [
|
||||
{
|
||||
"type": "MIT License and BSD 3-clause \"New\" or \"Revised\" License"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"project": "github.com/inconshreveable/mousetrap",
|
||||
"license": "Apache License 2.0"
|
||||
},
|
||||
{
|
||||
"project": "gopkg.in/yaml.v2",
|
||||
"license": "Apache License 2.0 and MIT License"
|
||||
"licenses": [
|
||||
{
|
||||
"type": "Apache License 2.0"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
|
23
build
23
build
@ -3,9 +3,7 @@
|
||||
# set some environment variables
|
||||
ORG_PATH="github.com/coreos"
|
||||
REPO_PATH="${ORG_PATH}/etcd"
|
||||
export GO15VENDOREXPERIMENT="1"
|
||||
|
||||
eval $(go env)
|
||||
GIT_SHA=`git rev-parse --short HEAD || echo "GitNotFound"`
|
||||
if [ ! -z "$FAILPOINTS" ]; then
|
||||
GIT_SHA="$GIT_SHA"-FAILPOINTS
|
||||
@ -17,11 +15,7 @@ GO_LDFLAGS="$GO_LDFLAGS -X ${REPO_PATH}/cmd/vendor/${REPO_PATH}/version.GitSHA=$
|
||||
# enable/disable failpoints
|
||||
toggle_failpoints() {
|
||||
FAILPKGS="etcdserver/ mvcc/backend/"
|
||||
|
||||
mode="disable"
|
||||
if [ ! -z "$FAILPOINTS" ]; then mode="enable"; fi
|
||||
if [ ! -z "$1" ]; then mode="$1"; fi
|
||||
|
||||
mode="$1"
|
||||
if which gofail >/dev/null 2>&1; then
|
||||
gofail "$mode" $FAILPKGS
|
||||
elif [ "$mode" != "disable" ]; then
|
||||
@ -30,19 +24,26 @@ toggle_failpoints() {
|
||||
fi
|
||||
}
|
||||
|
||||
toggle_failpoints_default() {
|
||||
mode="disable"
|
||||
if [ ! -z "$FAILPOINTS" ]; then mode="enable"; fi
|
||||
toggle_failpoints "$mode"
|
||||
}
|
||||
|
||||
etcd_build() {
|
||||
out="bin"
|
||||
if [ -n "${BINDIR}" ]; then out="${BINDIR}"; fi
|
||||
toggle_failpoints
|
||||
toggle_failpoints_default
|
||||
# Static compilation is useful when etcd is run in a container
|
||||
CGO_ENABLED=0 go build $GO_BUILD_FLAGS -installsuffix cgo -ldflags "$GO_LDFLAGS" -o ${out}/etcd ${REPO_PATH}/cmd/etcd || return
|
||||
CGO_ENABLED=0 go build $GO_BUILD_FLAGS -installsuffix cgo -ldflags "$GO_LDFLAGS" -o ${out}/etcdctl ${REPO_PATH}/cmd/etcdctl || return
|
||||
}
|
||||
|
||||
etcd_setup_gopath() {
|
||||
CDIR=$(cd `dirname "$0"` && pwd)
|
||||
d=$(dirname "$0")
|
||||
CDIR=$(cd "$d" && pwd)
|
||||
cd "$CDIR"
|
||||
etcdGOPATH=${CDIR}/gopath
|
||||
etcdGOPATH="${CDIR}/gopath"
|
||||
# preserve old gopath to support building with unvendored tooling deps (e.g., gofail)
|
||||
if [ -n "$GOPATH" ]; then
|
||||
GOPATH=":$GOPATH"
|
||||
@ -53,7 +54,7 @@ etcd_setup_gopath() {
|
||||
ln -s ${CDIR}/cmd/vendor ${etcdGOPATH}/src
|
||||
}
|
||||
|
||||
toggle_failpoints
|
||||
toggle_failpoints_default
|
||||
|
||||
# only build when called directly, not sourced
|
||||
if echo "$0" | grep "build$" >/dev/null; then
|
||||
|
@ -372,12 +372,7 @@ func (c *httpClusterClient) Do(ctx context.Context, act httpAction) (*http.Respo
|
||||
if err == context.Canceled || err == context.DeadlineExceeded {
|
||||
return nil, nil, err
|
||||
}
|
||||
if isOneShot {
|
||||
return nil, nil, err
|
||||
}
|
||||
continue
|
||||
}
|
||||
if resp.StatusCode/100 == 5 {
|
||||
} else if resp.StatusCode/100 == 5 {
|
||||
switch resp.StatusCode {
|
||||
case http.StatusInternalServerError, http.StatusServiceUnavailable:
|
||||
// TODO: make sure this is a no leader response
|
||||
@ -385,10 +380,16 @@ func (c *httpClusterClient) Do(ctx context.Context, act httpAction) (*http.Respo
|
||||
default:
|
||||
cerr.Errors = append(cerr.Errors, fmt.Errorf("client: etcd member %s returns server error [%s]", eps[k].String(), http.StatusText(resp.StatusCode)))
|
||||
}
|
||||
if isOneShot {
|
||||
return nil, nil, cerr.Errors[0]
|
||||
err = cerr.Errors[0]
|
||||
}
|
||||
if err != nil {
|
||||
if !isOneShot {
|
||||
continue
|
||||
}
|
||||
continue
|
||||
c.Lock()
|
||||
c.pinned = (k + 1) % leps
|
||||
c.Unlock()
|
||||
return nil, nil, err
|
||||
}
|
||||
if k != pinned {
|
||||
c.Lock()
|
||||
|
@ -16,6 +16,7 @@ package client
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math/rand"
|
||||
@ -304,7 +305,9 @@ func TestHTTPClusterClientDo(t *testing.T) {
|
||||
fakeErr := errors.New("fake!")
|
||||
fakeURL := url.URL{}
|
||||
tests := []struct {
|
||||
client *httpClusterClient
|
||||
client *httpClusterClient
|
||||
ctx context.Context
|
||||
|
||||
wantCode int
|
||||
wantErr error
|
||||
wantPinned int
|
||||
@ -395,10 +398,30 @@ func TestHTTPClusterClientDo(t *testing.T) {
|
||||
wantCode: http.StatusTeapot,
|
||||
wantPinned: 1,
|
||||
},
|
||||
|
||||
// 500-level errors cause one shot Do to fallthrough to next endpoint
|
||||
{
|
||||
client: &httpClusterClient{
|
||||
endpoints: []url.URL{fakeURL, fakeURL},
|
||||
clientFactory: newStaticHTTPClientFactory(
|
||||
[]staticHTTPResponse{
|
||||
{resp: http.Response{StatusCode: http.StatusBadGateway}},
|
||||
{resp: http.Response{StatusCode: http.StatusTeapot}},
|
||||
},
|
||||
),
|
||||
rand: rand.New(rand.NewSource(0)),
|
||||
},
|
||||
ctx: context.WithValue(context.Background(), &oneShotCtxValue, &oneShotCtxValue),
|
||||
wantErr: fmt.Errorf("client: etcd member returns server error [Bad Gateway]"),
|
||||
wantPinned: 1,
|
||||
},
|
||||
}
|
||||
|
||||
for i, tt := range tests {
|
||||
resp, _, err := tt.client.Do(context.Background(), nil)
|
||||
if tt.ctx == nil {
|
||||
tt.ctx = context.Background()
|
||||
}
|
||||
resp, _, err := tt.client.Do(tt.ctx, nil)
|
||||
if !reflect.DeepEqual(tt.wantErr, err) {
|
||||
t.Errorf("#%d: got err=%v, want=%v", i, err, tt.wantErr)
|
||||
continue
|
||||
@ -407,11 +430,9 @@ func TestHTTPClusterClientDo(t *testing.T) {
|
||||
if resp == nil {
|
||||
if tt.wantCode != 0 {
|
||||
t.Errorf("#%d: resp is nil, want=%d", i, tt.wantCode)
|
||||
continue
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if resp.StatusCode != tt.wantCode {
|
||||
} else if resp.StatusCode != tt.wantCode {
|
||||
t.Errorf("#%d: resp code=%d, want=%d", i, resp.StatusCode, tt.wantCode)
|
||||
continue
|
||||
}
|
||||
|
@ -1,6 +1,6 @@
|
||||
# etcd/clientv3
|
||||
|
||||
[](https://godoc.org/github.com/coreos/etcd/clientv3)
|
||||
[](https://godoc.org/github.com/coreos/etcd/clientv3)
|
||||
|
||||
`etcd/clientv3` is the official Go etcd client for v3.
|
||||
|
||||
|
@ -20,6 +20,7 @@ import (
|
||||
|
||||
"github.com/coreos/etcd/auth/authpb"
|
||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
@ -104,16 +105,16 @@ type auth struct {
|
||||
}
|
||||
|
||||
func NewAuth(c *Client) Auth {
|
||||
return &auth{remote: pb.NewAuthClient(c.ActiveConnection())}
|
||||
return &auth{remote: RetryAuthClient(c)}
|
||||
}
|
||||
|
||||
func (auth *auth) AuthEnable(ctx context.Context) (*AuthEnableResponse, error) {
|
||||
resp, err := auth.remote.AuthEnable(ctx, &pb.AuthEnableRequest{}, grpc.FailFast(false))
|
||||
resp, err := auth.remote.AuthEnable(ctx, &pb.AuthEnableRequest{})
|
||||
return (*AuthEnableResponse)(resp), toErr(ctx, err)
|
||||
}
|
||||
|
||||
func (auth *auth) AuthDisable(ctx context.Context) (*AuthDisableResponse, error) {
|
||||
resp, err := auth.remote.AuthDisable(ctx, &pb.AuthDisableRequest{}, grpc.FailFast(false))
|
||||
resp, err := auth.remote.AuthDisable(ctx, &pb.AuthDisableRequest{})
|
||||
return (*AuthDisableResponse)(resp), toErr(ctx, err)
|
||||
}
|
||||
|
||||
@ -138,12 +139,12 @@ func (auth *auth) UserGrantRole(ctx context.Context, user string, role string) (
|
||||
}
|
||||
|
||||
func (auth *auth) UserGet(ctx context.Context, name string) (*AuthUserGetResponse, error) {
|
||||
resp, err := auth.remote.UserGet(ctx, &pb.AuthUserGetRequest{Name: name}, grpc.FailFast(false))
|
||||
resp, err := auth.remote.UserGet(ctx, &pb.AuthUserGetRequest{Name: name})
|
||||
return (*AuthUserGetResponse)(resp), toErr(ctx, err)
|
||||
}
|
||||
|
||||
func (auth *auth) UserList(ctx context.Context) (*AuthUserListResponse, error) {
|
||||
resp, err := auth.remote.UserList(ctx, &pb.AuthUserListRequest{}, grpc.FailFast(false))
|
||||
resp, err := auth.remote.UserList(ctx, &pb.AuthUserListRequest{})
|
||||
return (*AuthUserListResponse)(resp), toErr(ctx, err)
|
||||
}
|
||||
|
||||
@ -168,12 +169,12 @@ func (auth *auth) RoleGrantPermission(ctx context.Context, name string, key, ran
|
||||
}
|
||||
|
||||
func (auth *auth) RoleGet(ctx context.Context, role string) (*AuthRoleGetResponse, error) {
|
||||
resp, err := auth.remote.RoleGet(ctx, &pb.AuthRoleGetRequest{Role: role}, grpc.FailFast(false))
|
||||
resp, err := auth.remote.RoleGet(ctx, &pb.AuthRoleGetRequest{Role: role})
|
||||
return (*AuthRoleGetResponse)(resp), toErr(ctx, err)
|
||||
}
|
||||
|
||||
func (auth *auth) RoleList(ctx context.Context) (*AuthRoleListResponse, error) {
|
||||
resp, err := auth.remote.RoleList(ctx, &pb.AuthRoleListRequest{}, grpc.FailFast(false))
|
||||
resp, err := auth.remote.RoleList(ctx, &pb.AuthRoleListRequest{})
|
||||
return (*AuthRoleListResponse)(resp), toErr(ctx, err)
|
||||
}
|
||||
|
||||
@ -201,7 +202,7 @@ type authenticator struct {
|
||||
}
|
||||
|
||||
func (auth *authenticator) authenticate(ctx context.Context, name string, password string) (*AuthenticateResponse, error) {
|
||||
resp, err := auth.remote.Authenticate(ctx, &pb.AuthenticateRequest{Name: name, Password: password}, grpc.FailFast(false))
|
||||
resp, err := auth.remote.Authenticate(ctx, &pb.AuthenticateRequest{Name: name, Password: password})
|
||||
return (*AuthenticateResponse)(resp), toErr(ctx, err)
|
||||
}
|
||||
|
||||
|
@ -1,356 +0,0 @@
|
||||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package clientv3
|
||||
|
||||
import (
|
||||
"net/url"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
)
|
||||
|
||||
// ErrNoAddrAvilable is returned by Get() when the balancer does not have
|
||||
// any active connection to endpoints at the time.
|
||||
// This error is returned only when opts.BlockingWait is true.
|
||||
var ErrNoAddrAvilable = grpc.Errorf(codes.Unavailable, "there is no address available")
|
||||
|
||||
// simpleBalancer does the bare minimum to expose multiple eps
|
||||
// to the grpc reconnection code path
|
||||
type simpleBalancer struct {
|
||||
// addrs are the client's endpoints for grpc
|
||||
addrs []grpc.Address
|
||||
// notifyCh notifies grpc of the set of addresses for connecting
|
||||
notifyCh chan []grpc.Address
|
||||
|
||||
// readyc closes once the first connection is up
|
||||
readyc chan struct{}
|
||||
readyOnce sync.Once
|
||||
|
||||
// mu protects upEps, pinAddr, and connectingAddr
|
||||
mu sync.RWMutex
|
||||
|
||||
// upc closes when upEps transitions from empty to non-zero or the balancer closes.
|
||||
upc chan struct{}
|
||||
|
||||
// downc closes when grpc calls down() on pinAddr
|
||||
downc chan struct{}
|
||||
|
||||
// stopc is closed to signal updateNotifyLoop should stop.
|
||||
stopc chan struct{}
|
||||
|
||||
// donec closes when all goroutines are exited
|
||||
donec chan struct{}
|
||||
|
||||
// updateAddrsC notifies updateNotifyLoop to update addrs.
|
||||
updateAddrsC chan struct{}
|
||||
|
||||
// grpc issues TLS cert checks using the string passed into dial so
|
||||
// that string must be the host. To recover the full scheme://host URL,
|
||||
// have a map from hosts to the original endpoint.
|
||||
host2ep map[string]string
|
||||
|
||||
// pinAddr is the currently pinned address; set to the empty string on
|
||||
// intialization and shutdown.
|
||||
pinAddr string
|
||||
|
||||
closed bool
|
||||
}
|
||||
|
||||
func newSimpleBalancer(eps []string) *simpleBalancer {
|
||||
notifyCh := make(chan []grpc.Address, 1)
|
||||
addrs := make([]grpc.Address, len(eps))
|
||||
for i := range eps {
|
||||
addrs[i].Addr = getHost(eps[i])
|
||||
}
|
||||
sb := &simpleBalancer{
|
||||
addrs: addrs,
|
||||
notifyCh: notifyCh,
|
||||
readyc: make(chan struct{}),
|
||||
upc: make(chan struct{}),
|
||||
stopc: make(chan struct{}),
|
||||
downc: make(chan struct{}),
|
||||
donec: make(chan struct{}),
|
||||
updateAddrsC: make(chan struct{}, 1),
|
||||
host2ep: getHost2ep(eps),
|
||||
}
|
||||
close(sb.downc)
|
||||
go sb.updateNotifyLoop()
|
||||
return sb
|
||||
}
|
||||
|
||||
func (b *simpleBalancer) Start(target string, config grpc.BalancerConfig) error { return nil }
|
||||
|
||||
func (b *simpleBalancer) ConnectNotify() <-chan struct{} {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
return b.upc
|
||||
}
|
||||
|
||||
func (b *simpleBalancer) getEndpoint(host string) string {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
return b.host2ep[host]
|
||||
}
|
||||
|
||||
func getHost2ep(eps []string) map[string]string {
|
||||
hm := make(map[string]string, len(eps))
|
||||
for i := range eps {
|
||||
_, host, _ := parseEndpoint(eps[i])
|
||||
hm[host] = eps[i]
|
||||
}
|
||||
return hm
|
||||
}
|
||||
|
||||
func (b *simpleBalancer) updateAddrs(eps []string) {
|
||||
np := getHost2ep(eps)
|
||||
|
||||
b.mu.Lock()
|
||||
|
||||
match := len(np) == len(b.host2ep)
|
||||
for k, v := range np {
|
||||
if b.host2ep[k] != v {
|
||||
match = false
|
||||
break
|
||||
}
|
||||
}
|
||||
if match {
|
||||
// same endpoints, so no need to update address
|
||||
b.mu.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
b.host2ep = np
|
||||
|
||||
addrs := make([]grpc.Address, 0, len(eps))
|
||||
for i := range eps {
|
||||
addrs = append(addrs, grpc.Address{Addr: getHost(eps[i])})
|
||||
}
|
||||
b.addrs = addrs
|
||||
|
||||
// updating notifyCh can trigger new connections,
|
||||
// only update addrs if all connections are down
|
||||
// or addrs does not include pinAddr.
|
||||
update := !hasAddr(addrs, b.pinAddr)
|
||||
b.mu.Unlock()
|
||||
|
||||
if update {
|
||||
select {
|
||||
case b.updateAddrsC <- struct{}{}:
|
||||
case <-b.stopc:
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func hasAddr(addrs []grpc.Address, targetAddr string) bool {
|
||||
for _, addr := range addrs {
|
||||
if targetAddr == addr.Addr {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (b *simpleBalancer) updateNotifyLoop() {
|
||||
defer close(b.donec)
|
||||
|
||||
for {
|
||||
b.mu.RLock()
|
||||
upc, downc, addr := b.upc, b.downc, b.pinAddr
|
||||
b.mu.RUnlock()
|
||||
// downc or upc should be closed
|
||||
select {
|
||||
case <-downc:
|
||||
downc = nil
|
||||
default:
|
||||
}
|
||||
select {
|
||||
case <-upc:
|
||||
upc = nil
|
||||
default:
|
||||
}
|
||||
switch {
|
||||
case downc == nil && upc == nil:
|
||||
// stale
|
||||
select {
|
||||
case <-b.stopc:
|
||||
return
|
||||
default:
|
||||
}
|
||||
case downc == nil:
|
||||
b.notifyAddrs()
|
||||
select {
|
||||
case <-upc:
|
||||
case <-b.updateAddrsC:
|
||||
b.notifyAddrs()
|
||||
case <-b.stopc:
|
||||
return
|
||||
}
|
||||
case upc == nil:
|
||||
select {
|
||||
// close connections that are not the pinned address
|
||||
case b.notifyCh <- []grpc.Address{{Addr: addr}}:
|
||||
case <-downc:
|
||||
case <-b.stopc:
|
||||
return
|
||||
}
|
||||
select {
|
||||
case <-downc:
|
||||
case <-b.updateAddrsC:
|
||||
case <-b.stopc:
|
||||
return
|
||||
}
|
||||
b.notifyAddrs()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (b *simpleBalancer) notifyAddrs() {
|
||||
b.mu.RLock()
|
||||
addrs := b.addrs
|
||||
b.mu.RUnlock()
|
||||
select {
|
||||
case b.notifyCh <- addrs:
|
||||
case <-b.stopc:
|
||||
}
|
||||
}
|
||||
|
||||
func (b *simpleBalancer) Up(addr grpc.Address) func(error) {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
|
||||
// gRPC might call Up after it called Close. We add this check
|
||||
// to "fix" it up at application layer. Or our simplerBalancer
|
||||
// might panic since b.upc is closed.
|
||||
if b.closed {
|
||||
return func(err error) {}
|
||||
}
|
||||
// gRPC might call Up on a stale address.
|
||||
// Prevent updating pinAddr with a stale address.
|
||||
if !hasAddr(b.addrs, addr.Addr) {
|
||||
return func(err error) {}
|
||||
}
|
||||
if b.pinAddr != "" {
|
||||
return func(err error) {}
|
||||
}
|
||||
// notify waiting Get()s and pin first connected address
|
||||
close(b.upc)
|
||||
b.downc = make(chan struct{})
|
||||
b.pinAddr = addr.Addr
|
||||
// notify client that a connection is up
|
||||
b.readyOnce.Do(func() { close(b.readyc) })
|
||||
return func(err error) {
|
||||
b.mu.Lock()
|
||||
b.upc = make(chan struct{})
|
||||
close(b.downc)
|
||||
b.pinAddr = ""
|
||||
b.mu.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
func (b *simpleBalancer) Get(ctx context.Context, opts grpc.BalancerGetOptions) (grpc.Address, func(), error) {
|
||||
var (
|
||||
addr string
|
||||
closed bool
|
||||
)
|
||||
|
||||
// If opts.BlockingWait is false (for fail-fast RPCs), it should return
|
||||
// an address it has notified via Notify immediately instead of blocking.
|
||||
if !opts.BlockingWait {
|
||||
b.mu.RLock()
|
||||
closed = b.closed
|
||||
addr = b.pinAddr
|
||||
b.mu.RUnlock()
|
||||
if closed {
|
||||
return grpc.Address{Addr: ""}, nil, grpc.ErrClientConnClosing
|
||||
}
|
||||
if addr == "" {
|
||||
return grpc.Address{Addr: ""}, nil, ErrNoAddrAvilable
|
||||
}
|
||||
return grpc.Address{Addr: addr}, func() {}, nil
|
||||
}
|
||||
|
||||
for {
|
||||
b.mu.RLock()
|
||||
ch := b.upc
|
||||
b.mu.RUnlock()
|
||||
select {
|
||||
case <-ch:
|
||||
case <-b.donec:
|
||||
return grpc.Address{Addr: ""}, nil, grpc.ErrClientConnClosing
|
||||
case <-ctx.Done():
|
||||
return grpc.Address{Addr: ""}, nil, ctx.Err()
|
||||
}
|
||||
b.mu.RLock()
|
||||
closed = b.closed
|
||||
addr = b.pinAddr
|
||||
b.mu.RUnlock()
|
||||
// Close() which sets b.closed = true can be called before Get(), Get() must exit if balancer is closed.
|
||||
if closed {
|
||||
return grpc.Address{Addr: ""}, nil, grpc.ErrClientConnClosing
|
||||
}
|
||||
if addr != "" {
|
||||
break
|
||||
}
|
||||
}
|
||||
return grpc.Address{Addr: addr}, func() {}, nil
|
||||
}
|
||||
|
||||
func (b *simpleBalancer) Notify() <-chan []grpc.Address { return b.notifyCh }
|
||||
|
||||
func (b *simpleBalancer) Close() error {
|
||||
b.mu.Lock()
|
||||
// In case gRPC calls close twice. TODO: remove the checking
|
||||
// when we are sure that gRPC wont call close twice.
|
||||
if b.closed {
|
||||
b.mu.Unlock()
|
||||
<-b.donec
|
||||
return nil
|
||||
}
|
||||
b.closed = true
|
||||
close(b.stopc)
|
||||
b.pinAddr = ""
|
||||
|
||||
// In the case of following scenario:
|
||||
// 1. upc is not closed; no pinned address
|
||||
// 2. client issues an rpc, calling invoke(), which calls Get(), enters for loop, blocks
|
||||
// 3. clientconn.Close() calls balancer.Close(); closed = true
|
||||
// 4. for loop in Get() never exits since ctx is the context passed in by the client and may not be canceled
|
||||
// we must close upc so Get() exits from blocking on upc
|
||||
select {
|
||||
case <-b.upc:
|
||||
default:
|
||||
// terminate all waiting Get()s
|
||||
close(b.upc)
|
||||
}
|
||||
|
||||
b.mu.Unlock()
|
||||
|
||||
// wait for updateNotifyLoop to finish
|
||||
<-b.donec
|
||||
close(b.notifyCh)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func getHost(ep string) string {
|
||||
url, uerr := url.Parse(ep)
|
||||
if uerr != nil || !strings.Contains(ep, "://") {
|
||||
return ep
|
||||
}
|
||||
return url.Host
|
||||
}
|
@ -1,239 +0,0 @@
|
||||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package clientv3
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"net"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||
"github.com/coreos/etcd/pkg/testutil"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
var (
|
||||
endpoints = []string{"localhost:2379", "localhost:22379", "localhost:32379"}
|
||||
)
|
||||
|
||||
func TestBalancerGetUnblocking(t *testing.T) {
|
||||
sb := newSimpleBalancer(endpoints)
|
||||
defer sb.Close()
|
||||
if addrs := <-sb.Notify(); len(addrs) != len(endpoints) {
|
||||
t.Errorf("Initialize newSimpleBalancer should have triggered Notify() chan, but it didn't")
|
||||
}
|
||||
unblockingOpts := grpc.BalancerGetOptions{BlockingWait: false}
|
||||
|
||||
_, _, err := sb.Get(context.Background(), unblockingOpts)
|
||||
if err != ErrNoAddrAvilable {
|
||||
t.Errorf("Get() with no up endpoints should return ErrNoAddrAvailable, got: %v", err)
|
||||
}
|
||||
|
||||
down1 := sb.Up(grpc.Address{Addr: endpoints[1]})
|
||||
if addrs := <-sb.Notify(); len(addrs) != 1 {
|
||||
t.Errorf("first Up() should have triggered balancer to send the first connected address via Notify chan so that other connections can be closed")
|
||||
}
|
||||
down2 := sb.Up(grpc.Address{Addr: endpoints[2]})
|
||||
addrFirst, putFun, err := sb.Get(context.Background(), unblockingOpts)
|
||||
if err != nil {
|
||||
t.Errorf("Get() with up endpoints should success, got %v", err)
|
||||
}
|
||||
if addrFirst.Addr != endpoints[1] {
|
||||
t.Errorf("Get() didn't return expected address, got %v", addrFirst)
|
||||
}
|
||||
if putFun == nil {
|
||||
t.Errorf("Get() returned unexpected nil put function")
|
||||
}
|
||||
addrSecond, _, _ := sb.Get(context.Background(), unblockingOpts)
|
||||
if addrFirst.Addr != addrSecond.Addr {
|
||||
t.Errorf("Get() didn't return the same address as previous call, got %v and %v", addrFirst, addrSecond)
|
||||
}
|
||||
|
||||
down1(errors.New("error"))
|
||||
if addrs := <-sb.Notify(); len(addrs) != len(endpoints) {
|
||||
t.Errorf("closing the only connection should triggered balancer to send the all endpoints via Notify chan so that we can establish a connection")
|
||||
}
|
||||
down2(errors.New("error"))
|
||||
_, _, err = sb.Get(context.Background(), unblockingOpts)
|
||||
if err != ErrNoAddrAvilable {
|
||||
t.Errorf("Get() with no up endpoints should return ErrNoAddrAvailable, got: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBalancerGetBlocking(t *testing.T) {
|
||||
sb := newSimpleBalancer(endpoints)
|
||||
defer sb.Close()
|
||||
if addrs := <-sb.Notify(); len(addrs) != len(endpoints) {
|
||||
t.Errorf("Initialize newSimpleBalancer should have triggered Notify() chan, but it didn't")
|
||||
}
|
||||
blockingOpts := grpc.BalancerGetOptions{BlockingWait: true}
|
||||
|
||||
ctx, _ := context.WithTimeout(context.Background(), time.Millisecond*100)
|
||||
_, _, err := sb.Get(ctx, blockingOpts)
|
||||
if err != context.DeadlineExceeded {
|
||||
t.Errorf("Get() with no up endpoints should timeout, got %v", err)
|
||||
}
|
||||
|
||||
downC := make(chan func(error), 1)
|
||||
|
||||
go func() {
|
||||
// ensure sb.Up() will be called after sb.Get() to see if Up() releases blocking Get()
|
||||
time.Sleep(time.Millisecond * 100)
|
||||
f := sb.Up(grpc.Address{Addr: endpoints[1]})
|
||||
if addrs := <-sb.Notify(); len(addrs) != 1 {
|
||||
t.Errorf("first Up() should have triggered balancer to send the first connected address via Notify chan so that other connections can be closed")
|
||||
}
|
||||
downC <- f
|
||||
}()
|
||||
addrFirst, putFun, err := sb.Get(context.Background(), blockingOpts)
|
||||
if err != nil {
|
||||
t.Errorf("Get() with up endpoints should success, got %v", err)
|
||||
}
|
||||
if addrFirst.Addr != endpoints[1] {
|
||||
t.Errorf("Get() didn't return expected address, got %v", addrFirst)
|
||||
}
|
||||
if putFun == nil {
|
||||
t.Errorf("Get() returned unexpected nil put function")
|
||||
}
|
||||
down1 := <-downC
|
||||
|
||||
down2 := sb.Up(grpc.Address{Addr: endpoints[2]})
|
||||
addrSecond, _, _ := sb.Get(context.Background(), blockingOpts)
|
||||
if addrFirst.Addr != addrSecond.Addr {
|
||||
t.Errorf("Get() didn't return the same address as previous call, got %v and %v", addrFirst, addrSecond)
|
||||
}
|
||||
|
||||
down1(errors.New("error"))
|
||||
if addrs := <-sb.Notify(); len(addrs) != len(endpoints) {
|
||||
t.Errorf("closing the only connection should triggered balancer to send the all endpoints via Notify chan so that we can establish a connection")
|
||||
}
|
||||
down2(errors.New("error"))
|
||||
ctx, _ = context.WithTimeout(context.Background(), time.Millisecond*100)
|
||||
_, _, err = sb.Get(ctx, blockingOpts)
|
||||
if err != context.DeadlineExceeded {
|
||||
t.Errorf("Get() with no up endpoints should timeout, got %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// TestBalancerDoNotBlockOnClose ensures that balancer and grpc don't deadlock each other
|
||||
// due to rapid open/close conn. The deadlock causes balancer.Close() to block forever.
|
||||
// See issue: https://github.com/coreos/etcd/issues/7283 for more detail.
|
||||
func TestBalancerDoNotBlockOnClose(t *testing.T) {
|
||||
defer testutil.AfterTest(t)
|
||||
|
||||
kcl := newKillConnListener(t, 3)
|
||||
defer kcl.close()
|
||||
|
||||
for i := 0; i < 5; i++ {
|
||||
sb := newSimpleBalancer(kcl.endpoints())
|
||||
conn, err := grpc.Dial("", grpc.WithInsecure(), grpc.WithBalancer(sb))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
kvc := pb.NewKVClient(conn)
|
||||
<-sb.readyc
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(100)
|
||||
cctx, cancel := context.WithCancel(context.TODO())
|
||||
for j := 0; j < 100; j++ {
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
kvc.Range(cctx, &pb.RangeRequest{}, grpc.FailFast(false))
|
||||
}()
|
||||
}
|
||||
// balancer.Close() might block
|
||||
// if balancer and grpc deadlock each other.
|
||||
bclosec, cclosec := make(chan struct{}), make(chan struct{})
|
||||
go func() {
|
||||
defer close(bclosec)
|
||||
sb.Close()
|
||||
}()
|
||||
go func() {
|
||||
defer close(cclosec)
|
||||
conn.Close()
|
||||
}()
|
||||
select {
|
||||
case <-bclosec:
|
||||
case <-time.After(3 * time.Second):
|
||||
testutil.FatalStack(t, "balancer close timeout")
|
||||
}
|
||||
select {
|
||||
case <-cclosec:
|
||||
case <-time.After(3 * time.Second):
|
||||
t.Fatal("grpc conn close timeout")
|
||||
}
|
||||
|
||||
cancel()
|
||||
wg.Wait()
|
||||
}
|
||||
}
|
||||
|
||||
// killConnListener listens incoming conn and kills it immediately.
|
||||
type killConnListener struct {
|
||||
wg sync.WaitGroup
|
||||
eps []string
|
||||
stopc chan struct{}
|
||||
t *testing.T
|
||||
}
|
||||
|
||||
func newKillConnListener(t *testing.T, size int) *killConnListener {
|
||||
kcl := &killConnListener{stopc: make(chan struct{}), t: t}
|
||||
|
||||
for i := 0; i < size; i++ {
|
||||
ln, err := net.Listen("tcp", ":0")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
kcl.eps = append(kcl.eps, ln.Addr().String())
|
||||
kcl.wg.Add(1)
|
||||
go kcl.listen(ln)
|
||||
}
|
||||
return kcl
|
||||
}
|
||||
|
||||
func (kcl *killConnListener) endpoints() []string {
|
||||
return kcl.eps
|
||||
}
|
||||
|
||||
func (kcl *killConnListener) listen(l net.Listener) {
|
||||
go func() {
|
||||
defer kcl.wg.Done()
|
||||
for {
|
||||
conn, err := l.Accept()
|
||||
select {
|
||||
case <-kcl.stopc:
|
||||
return
|
||||
default:
|
||||
}
|
||||
if err != nil {
|
||||
kcl.t.Fatal(err)
|
||||
}
|
||||
time.Sleep(1 * time.Millisecond)
|
||||
conn.Close()
|
||||
}
|
||||
}()
|
||||
<-kcl.stopc
|
||||
l.Close()
|
||||
}
|
||||
|
||||
func (kcl *killConnListener) close() {
|
||||
close(kcl.stopc)
|
||||
kcl.wg.Wait()
|
||||
}
|
@ -31,7 +31,9 @@ import (
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/credentials"
|
||||
"google.golang.org/grpc/keepalive"
|
||||
"google.golang.org/grpc/metadata"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
var (
|
||||
@ -51,18 +53,17 @@ type Client struct {
|
||||
conn *grpc.ClientConn
|
||||
dialerrc chan error
|
||||
|
||||
cfg Config
|
||||
creds *credentials.TransportCredentials
|
||||
balancer *simpleBalancer
|
||||
retryWrapper retryRpcFunc
|
||||
retryAuthWrapper retryRpcFunc
|
||||
cfg Config
|
||||
creds *credentials.TransportCredentials
|
||||
balancer *healthBalancer
|
||||
mu sync.Mutex
|
||||
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
|
||||
// Username is a username for authentication
|
||||
// Username is a user name for authentication.
|
||||
Username string
|
||||
// Password is a password for authentication
|
||||
// Password is a password for authentication.
|
||||
Password string
|
||||
// tokenCred is an instance of WithPerRPCCredentials()'s argument
|
||||
tokenCred *authTokenCredential
|
||||
@ -116,8 +117,23 @@ func (c *Client) Endpoints() (eps []string) {
|
||||
|
||||
// SetEndpoints updates client's endpoints.
|
||||
func (c *Client) SetEndpoints(eps ...string) {
|
||||
c.mu.Lock()
|
||||
c.cfg.Endpoints = eps
|
||||
c.balancer.updateAddrs(eps)
|
||||
c.mu.Unlock()
|
||||
c.balancer.updateAddrs(eps...)
|
||||
|
||||
// updating notifyCh can trigger new connections,
|
||||
// need update addrs if all connections are down
|
||||
// or addrs does not include pinAddr.
|
||||
c.balancer.mu.RLock()
|
||||
update := !hasAddr(c.balancer.addrs, c.balancer.pinAddr)
|
||||
c.balancer.mu.RUnlock()
|
||||
if update {
|
||||
select {
|
||||
case c.balancer.updateAddrsC <- notifyNext:
|
||||
case <-c.balancer.stopc:
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Sync synchronizes client's endpoints with the known endpoints from the etcd membership.
|
||||
@ -144,8 +160,10 @@ func (c *Client) autoSync() {
|
||||
case <-c.ctx.Done():
|
||||
return
|
||||
case <-time.After(c.cfg.AutoSyncInterval):
|
||||
ctx, _ := context.WithTimeout(c.ctx, 5*time.Second)
|
||||
if err := c.Sync(ctx); err != nil && err != c.ctx.Err() {
|
||||
ctx, cancel := context.WithTimeout(c.ctx, 5*time.Second)
|
||||
err := c.Sync(ctx)
|
||||
cancel()
|
||||
if err != nil && err != c.ctx.Err() {
|
||||
logger.Println("Auto sync endpoints failed:", err)
|
||||
}
|
||||
}
|
||||
@ -174,7 +192,7 @@ func parseEndpoint(endpoint string) (proto string, host string, scheme string) {
|
||||
host = endpoint
|
||||
url, uerr := url.Parse(endpoint)
|
||||
if uerr != nil || !strings.Contains(endpoint, "://") {
|
||||
return
|
||||
return proto, host, scheme
|
||||
}
|
||||
scheme = url.Scheme
|
||||
|
||||
@ -182,13 +200,13 @@ func parseEndpoint(endpoint string) (proto string, host string, scheme string) {
|
||||
host = url.Host
|
||||
switch url.Scheme {
|
||||
case "http", "https":
|
||||
case "unix":
|
||||
case "unix", "unixs":
|
||||
proto = "unix"
|
||||
host = url.Host + url.Path
|
||||
default:
|
||||
proto, host = "", ""
|
||||
}
|
||||
return
|
||||
return proto, host, scheme
|
||||
}
|
||||
|
||||
func (c *Client) processCreds(scheme string) (creds *credentials.TransportCredentials) {
|
||||
@ -197,7 +215,7 @@ func (c *Client) processCreds(scheme string) (creds *credentials.TransportCreden
|
||||
case "unix":
|
||||
case "http":
|
||||
creds = nil
|
||||
case "https":
|
||||
case "https", "unixs":
|
||||
if creds != nil {
|
||||
break
|
||||
}
|
||||
@ -207,7 +225,7 @@ func (c *Client) processCreds(scheme string) (creds *credentials.TransportCreden
|
||||
default:
|
||||
creds = nil
|
||||
}
|
||||
return
|
||||
return creds
|
||||
}
|
||||
|
||||
// dialSetupOpts gives the dial opts prior to any authentication
|
||||
@ -215,10 +233,17 @@ func (c *Client) dialSetupOpts(endpoint string, dopts ...grpc.DialOption) (opts
|
||||
if c.cfg.DialTimeout > 0 {
|
||||
opts = []grpc.DialOption{grpc.WithTimeout(c.cfg.DialTimeout)}
|
||||
}
|
||||
if c.cfg.DialKeepAliveTime > 0 {
|
||||
params := keepalive.ClientParameters{
|
||||
Time: c.cfg.DialKeepAliveTime,
|
||||
Timeout: c.cfg.DialKeepAliveTimeout,
|
||||
}
|
||||
opts = append(opts, grpc.WithKeepaliveParams(params))
|
||||
}
|
||||
opts = append(opts, dopts...)
|
||||
|
||||
f := func(host string, t time.Duration) (net.Conn, error) {
|
||||
proto, host, _ := parseEndpoint(c.balancer.getEndpoint(host))
|
||||
proto, host, _ := parseEndpoint(c.balancer.endpoint(host))
|
||||
if host == "" && endpoint != "" {
|
||||
// dialing an endpoint not in the balancer; use
|
||||
// endpoint passed into dial
|
||||
@ -311,7 +336,7 @@ func (c *Client) dial(endpoint string, dopts ...grpc.DialOption) (*grpc.ClientCo
|
||||
if err != nil {
|
||||
if toErr(ctx, err) != rpctypes.ErrAuthNotEnabled {
|
||||
if err == ctx.Err() && ctx.Err() != c.ctx.Err() {
|
||||
err = grpc.ErrClientConnTimeout
|
||||
err = context.DeadlineExceeded
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
@ -322,7 +347,7 @@ func (c *Client) dial(endpoint string, dopts ...grpc.DialOption) (*grpc.ClientCo
|
||||
|
||||
opts = append(opts, c.cfg.DialOptions...)
|
||||
|
||||
conn, err := grpc.Dial(host, opts...)
|
||||
conn, err := grpc.DialContext(c.ctx, host, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -333,7 +358,7 @@ func (c *Client) dial(endpoint string, dopts ...grpc.DialOption) (*grpc.ClientCo
|
||||
// when the cluster has a leader.
|
||||
func WithRequireLeader(ctx context.Context) context.Context {
|
||||
md := metadata.Pairs(rpctypes.MetadataRequireLeaderKey, rpctypes.MetadataHasLeader)
|
||||
return metadata.NewContext(ctx, md)
|
||||
return metadata.NewOutgoingContext(ctx, md)
|
||||
}
|
||||
|
||||
func newClient(cfg *Config) (*Client, error) {
|
||||
@ -366,29 +391,32 @@ func newClient(cfg *Config) (*Client, error) {
|
||||
client.Password = cfg.Password
|
||||
}
|
||||
|
||||
client.balancer = newSimpleBalancer(cfg.Endpoints)
|
||||
conn, err := client.dial("", grpc.WithBalancer(client.balancer))
|
||||
client.balancer = newHealthBalancer(cfg.Endpoints, cfg.DialTimeout, func(ep string) (bool, error) {
|
||||
return grpcHealthCheck(client, ep)
|
||||
})
|
||||
|
||||
// use Endpoints[0] so that for https:// without any tls config given, then
|
||||
// grpc will assume the certificate server name is the endpoint host.
|
||||
conn, err := client.dial(cfg.Endpoints[0], grpc.WithBalancer(client.balancer))
|
||||
if err != nil {
|
||||
client.cancel()
|
||||
client.balancer.Close()
|
||||
return nil, err
|
||||
}
|
||||
client.conn = conn
|
||||
client.retryWrapper = client.newRetryWrapper()
|
||||
client.retryAuthWrapper = client.newAuthRetryWrapper()
|
||||
|
||||
// wait for a connection
|
||||
if cfg.DialTimeout > 0 {
|
||||
hasConn := false
|
||||
waitc := time.After(cfg.DialTimeout)
|
||||
select {
|
||||
case <-client.balancer.readyc:
|
||||
case <-client.balancer.ready():
|
||||
hasConn = true
|
||||
case <-ctx.Done():
|
||||
case <-waitc:
|
||||
}
|
||||
if !hasConn {
|
||||
err := grpc.ErrClientConnTimeout
|
||||
err := context.DeadlineExceeded
|
||||
select {
|
||||
case err = <-client.dialerrc:
|
||||
default:
|
||||
@ -423,7 +451,7 @@ func (c *Client) checkVersion() (err error) {
|
||||
errc := make(chan error, len(c.cfg.Endpoints))
|
||||
ctx, cancel := context.WithCancel(c.ctx)
|
||||
if c.cfg.DialTimeout > 0 {
|
||||
ctx, _ = context.WithTimeout(ctx, c.cfg.DialTimeout)
|
||||
ctx, cancel = context.WithTimeout(ctx, c.cfg.DialTimeout)
|
||||
}
|
||||
wg.Add(len(c.cfg.Endpoints))
|
||||
for _, ep := range c.cfg.Endpoints {
|
||||
@ -438,7 +466,7 @@ func (c *Client) checkVersion() (err error) {
|
||||
vs := strings.Split(resp.Version, ".")
|
||||
maj, min := 0, 0
|
||||
if len(vs) >= 2 {
|
||||
maj, rerr = strconv.Atoi(vs[0])
|
||||
maj, _ = strconv.Atoi(vs[0])
|
||||
min, rerr = strconv.Atoi(vs[1])
|
||||
}
|
||||
if maj < 3 || (maj == 3 && min < 2) {
|
||||
@ -470,14 +498,14 @@ func isHaltErr(ctx context.Context, err error) bool {
|
||||
if err == nil {
|
||||
return false
|
||||
}
|
||||
code := grpc.Code(err)
|
||||
ev, _ := status.FromError(err)
|
||||
// Unavailable codes mean the system will be right back.
|
||||
// (e.g., can't connect, lost leader)
|
||||
// Treat Internal codes as if something failed, leaving the
|
||||
// system in an inconsistent state, but retrying could make progress.
|
||||
// (e.g., failed in middle of send, corrupted frame)
|
||||
// TODO: are permanent Internal errors possible from grpc?
|
||||
return code != codes.Unavailable && code != codes.Internal
|
||||
return ev.Code() != codes.Unavailable && ev.Code() != codes.Internal
|
||||
}
|
||||
|
||||
func toErr(ctx context.Context, err error) error {
|
||||
@ -488,7 +516,8 @@ func toErr(ctx context.Context, err error) error {
|
||||
if _, ok := err.(rpctypes.EtcdError); ok {
|
||||
return err
|
||||
}
|
||||
code := grpc.Code(err)
|
||||
ev, _ := status.FromError(err)
|
||||
code := ev.Code()
|
||||
switch code {
|
||||
case codes.DeadlineExceeded:
|
||||
fallthrough
|
||||
@ -497,7 +526,6 @@ func toErr(ctx context.Context, err error) error {
|
||||
err = ctx.Err()
|
||||
}
|
||||
case codes.Unavailable:
|
||||
err = ErrNoAvailableEndpoints
|
||||
case codes.FailedPrecondition:
|
||||
err = grpc.ErrClientConnClosing
|
||||
}
|
||||
|
@ -22,8 +22,8 @@ import (
|
||||
|
||||
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
|
||||
"github.com/coreos/etcd/pkg/testutil"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
func TestDialCancel(t *testing.T) {
|
||||
@ -45,7 +45,7 @@ func TestDialCancel(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// connect to ipv4 blackhole so dial blocks
|
||||
// connect to ipv4 black hole so dial blocks
|
||||
c.SetEndpoints("http://254.0.0.1:12345")
|
||||
|
||||
// issue Get to force redial attempts
|
||||
@ -97,7 +97,7 @@ func TestDialTimeout(t *testing.T) {
|
||||
for i, cfg := range testCfgs {
|
||||
donec := make(chan error)
|
||||
go func() {
|
||||
// without timeout, dial continues forever on ipv4 blackhole
|
||||
// without timeout, dial continues forever on ipv4 black hole
|
||||
c, err := New(cfg)
|
||||
if c != nil || err == nil {
|
||||
t.Errorf("#%d: new client should fail", i)
|
||||
@ -117,8 +117,8 @@ func TestDialTimeout(t *testing.T) {
|
||||
case <-time.After(5 * time.Second):
|
||||
t.Errorf("#%d: failed to timeout dial on time", i)
|
||||
case err := <-donec:
|
||||
if err != grpc.ErrClientConnTimeout {
|
||||
t.Errorf("#%d: unexpected error %v, want %v", i, err, grpc.ErrClientConnTimeout)
|
||||
if err != context.DeadlineExceeded {
|
||||
t.Errorf("#%d: unexpected error %v, want %v", i, err, context.DeadlineExceeded)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -15,11 +15,12 @@
|
||||
package clientv3util_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log"
|
||||
|
||||
"github.com/coreos/etcd/clientv3"
|
||||
"github.com/coreos/etcd/clientv3/clientv3util"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
func ExampleKeyExists_put() {
|
||||
@ -33,7 +34,7 @@ func ExampleKeyExists_put() {
|
||||
kvc := clientv3.NewKV(cli)
|
||||
|
||||
// perform a put only if key is missing
|
||||
// It is useful to do the check (transactionally) to avoid overwriting
|
||||
// It is useful to do the check atomically to avoid overwriting
|
||||
// the existing key which would generate potentially unwanted events,
|
||||
// unless of course you wanted to do an overwrite no matter what.
|
||||
_, err = kvc.Txn(context.Background()).
|
||||
|
@ -16,8 +16,8 @@ package clientv3
|
||||
|
||||
import (
|
||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
type (
|
||||
@ -74,27 +74,19 @@ func (c *cluster) MemberRemove(ctx context.Context, id uint64) (*MemberRemoveRes
|
||||
|
||||
func (c *cluster) MemberUpdate(ctx context.Context, id uint64, peerAddrs []string) (*MemberUpdateResponse, error) {
|
||||
// it is safe to retry on update.
|
||||
for {
|
||||
r := &pb.MemberUpdateRequest{ID: id, PeerURLs: peerAddrs}
|
||||
resp, err := c.remote.MemberUpdate(ctx, r, grpc.FailFast(false))
|
||||
if err == nil {
|
||||
return (*MemberUpdateResponse)(resp), nil
|
||||
}
|
||||
if isHaltErr(ctx, err) {
|
||||
return nil, toErr(ctx, err)
|
||||
}
|
||||
r := &pb.MemberUpdateRequest{ID: id, PeerURLs: peerAddrs}
|
||||
resp, err := c.remote.MemberUpdate(ctx, r)
|
||||
if err == nil {
|
||||
return (*MemberUpdateResponse)(resp), nil
|
||||
}
|
||||
return nil, toErr(ctx, err)
|
||||
}
|
||||
|
||||
func (c *cluster) MemberList(ctx context.Context) (*MemberListResponse, error) {
|
||||
// it is safe to retry on list.
|
||||
for {
|
||||
resp, err := c.remote.MemberList(ctx, &pb.MemberListRequest{}, grpc.FailFast(false))
|
||||
if err == nil {
|
||||
return (*MemberListResponse)(resp), nil
|
||||
}
|
||||
if isHaltErr(ctx, err) {
|
||||
return nil, toErr(ctx, err)
|
||||
}
|
||||
resp, err := c.remote.MemberList(ctx, &pb.MemberListRequest{})
|
||||
if err == nil {
|
||||
return (*MemberListResponse)(resp), nil
|
||||
}
|
||||
return nil, toErr(ctx, err)
|
||||
}
|
||||
|
@ -44,10 +44,8 @@ func (op CompactOp) toRequest() *pb.CompactionRequest {
|
||||
return &pb.CompactionRequest{Revision: op.revision, Physical: op.physical}
|
||||
}
|
||||
|
||||
// WithCompactPhysical makes compact RPC call wait until
|
||||
// the compaction is physically applied to the local database
|
||||
// such that compacted entries are totally removed from the
|
||||
// backend database.
|
||||
// WithCompactPhysical makes Compact wait until all compacted entries are
|
||||
// removed from the etcd server's storage.
|
||||
func WithCompactPhysical() CompactOption {
|
||||
return func(op *CompactOp) { op.physical = true }
|
||||
}
|
||||
|
@ -99,6 +99,7 @@ func (cmp *Cmp) ValueBytes() []byte {
|
||||
// WithValueBytes sets the byte slice for the comparison's value.
|
||||
func (cmp *Cmp) WithValueBytes(v []byte) { cmp.TargetUnion.(*pb.Compare_Value).Value = v }
|
||||
|
||||
// mustInt64 panics if val isn't an int or int64. It returns an int64 otherwise.
|
||||
func mustInt64(val interface{}) int64 {
|
||||
if v, ok := val.(int64); ok {
|
||||
return v
|
||||
@ -108,3 +109,12 @@ func mustInt64(val interface{}) int64 {
|
||||
}
|
||||
panic("bad value")
|
||||
}
|
||||
|
||||
// mustInt64orLeaseID panics if val isn't a LeaseID, int or int64. It returns an
|
||||
// int64 otherwise.
|
||||
func mustInt64orLeaseID(val interface{}) int64 {
|
||||
if v, ok := val.(LeaseID); ok {
|
||||
return int64(v)
|
||||
}
|
||||
return mustInt64(val)
|
||||
}
|
||||
|
@ -21,6 +21,7 @@ import (
|
||||
v3 "github.com/coreos/etcd/clientv3"
|
||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||
"github.com/coreos/etcd/mvcc/mvccpb"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
@ -185,12 +186,12 @@ func (e *Election) observe(ctx context.Context, ch chan<- v3.GetResponse) {
|
||||
cancel()
|
||||
return
|
||||
}
|
||||
// only accept PUTs; a DELETE will make observe() spin
|
||||
// only accept puts; a delete will make observe() spin
|
||||
for _, ev := range wr.Events {
|
||||
if ev.Type == mvccpb.PUT {
|
||||
hdr, kv = &wr.Header, ev.Kv
|
||||
// may have multiple revs; hdr.rev = the last rev
|
||||
// set to kv's rev in case batch has multiple PUTs
|
||||
// set to kv's rev in case batch has multiple Puts
|
||||
hdr.Revision = kv.ModRevision
|
||||
break
|
||||
}
|
||||
@ -213,6 +214,7 @@ func (e *Election) observe(ctx context.Context, ch chan<- v3.GetResponse) {
|
||||
for !keyDeleted {
|
||||
wr, ok := <-wch
|
||||
if !ok {
|
||||
cancel()
|
||||
return
|
||||
}
|
||||
for _, ev := range wr.Events {
|
||||
@ -225,6 +227,7 @@ func (e *Election) observe(ctx context.Context, ch chan<- v3.GetResponse) {
|
||||
select {
|
||||
case ch <- *resp:
|
||||
case <-cctx.Done():
|
||||
cancel()
|
||||
return
|
||||
}
|
||||
}
|
||||
@ -240,4 +243,4 @@ func (e *Election) Key() string { return e.leaderKey }
|
||||
func (e *Election) Rev() int64 { return e.leaderRev }
|
||||
|
||||
// Header is the response header from the last successful election proposal.
|
||||
func (m *Election) Header() *pb.ResponseHeader { return m.hdr }
|
||||
func (e *Election) Header() *pb.ResponseHeader { return e.hdr }
|
||||
|
@ -20,6 +20,7 @@ import (
|
||||
v3 "github.com/coreos/etcd/clientv3"
|
||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||
"github.com/coreos/etcd/mvcc/mvccpb"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
|
@ -20,6 +20,7 @@ import (
|
||||
|
||||
v3 "github.com/coreos/etcd/clientv3"
|
||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
@ -49,7 +50,9 @@ func (m *Mutex) Lock(ctx context.Context) error {
|
||||
put := v3.OpPut(m.myKey, "", v3.WithLease(s.Lease()))
|
||||
// reuse key in case this session already holds the lock
|
||||
get := v3.OpGet(m.myKey)
|
||||
resp, err := client.Txn(ctx).If(cmp).Then(put).Else(get).Commit()
|
||||
// fetch current holder to complete uncontended path with only one RPC
|
||||
getOwner := v3.OpGet(m.pfx, v3.WithFirstCreate()...)
|
||||
resp, err := client.Txn(ctx).If(cmp).Then(put, getOwner).Else(get, getOwner).Commit()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -57,6 +60,12 @@ func (m *Mutex) Lock(ctx context.Context) error {
|
||||
if !resp.Succeeded {
|
||||
m.myRev = resp.Responses[0].GetResponseRange().Kvs[0].CreateRevision
|
||||
}
|
||||
// if no key on prefix / the minimum rev is key, already hold the lock
|
||||
ownerKey := resp.Responses[1].GetResponseRange().Kvs
|
||||
if len(ownerKey) == 0 || ownerKey[0].CreateRevision == m.myRev {
|
||||
m.hdr = resp.Header
|
||||
return nil
|
||||
}
|
||||
|
||||
// wait for deletion revisions prior to myKey
|
||||
hdr, werr := waitDeletes(ctx, client, m.pfx, m.myRev-1)
|
||||
|
@ -18,6 +18,7 @@ import (
|
||||
"time"
|
||||
|
||||
v3 "github.com/coreos/etcd/clientv3"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
@ -53,6 +54,7 @@ func NewSession(client *v3.Client, opts ...SessionOption) (*Session, error) {
|
||||
ctx, cancel := context.WithCancel(ops.ctx)
|
||||
keepAlive, err := client.KeepAlive(ctx, id)
|
||||
if err != nil || keepAlive == nil {
|
||||
cancel()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
@ -18,6 +18,7 @@ import (
|
||||
"math"
|
||||
|
||||
v3 "github.com/coreos/etcd/clientv3"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
@ -46,7 +47,7 @@ const (
|
||||
// SerializableSnapshot provides serializable isolation and also checks
|
||||
// for write conflicts.
|
||||
SerializableSnapshot Isolation = iota
|
||||
// Serializable reads within the same transactiona attempt return data
|
||||
// Serializable reads within the same transaction attempt return data
|
||||
// from the at the revision of the first read.
|
||||
Serializable
|
||||
// RepeatableReads reads within the same transaction attempt always
|
||||
@ -85,7 +86,7 @@ func WithPrefetch(keys ...string) stmOption {
|
||||
return func(so *stmOptions) { so.prefetch = append(so.prefetch, keys...) }
|
||||
}
|
||||
|
||||
// NewSTM initiates a new STM instance, using snapshot isolation by default.
|
||||
// NewSTM initiates a new STM instance, using serializable snapshot isolation by default.
|
||||
func NewSTM(c *v3.Client, apply func(STM) error, so ...stmOption) (*v3.TxnResponse, error) {
|
||||
opts := &stmOptions{ctx: c.Ctx()}
|
||||
for _, f := range so {
|
||||
@ -193,11 +194,12 @@ func (rs readSet) add(keys []string, txnresp *v3.TxnResponse) {
|
||||
}
|
||||
}
|
||||
|
||||
// first returns the store revision from the first fetch
|
||||
func (rs readSet) first() int64 {
|
||||
ret := int64(math.MaxInt64 - 1)
|
||||
for _, resp := range rs {
|
||||
if len(resp.Kvs) > 0 && resp.Kvs[0].ModRevision < ret {
|
||||
ret = resp.Kvs[0].ModRevision
|
||||
if rev := resp.Header.Revision; rev < ret {
|
||||
ret = rev
|
||||
}
|
||||
}
|
||||
return ret
|
||||
|
@ -33,10 +33,18 @@ type Config struct {
|
||||
// DialTimeout is the timeout for failing to establish a connection.
|
||||
DialTimeout time.Duration `json:"dial-timeout"`
|
||||
|
||||
// DialKeepAliveTime is the time in seconds after which client pings the server to see if
|
||||
// transport is alive.
|
||||
DialKeepAliveTime time.Duration `json:"dial-keep-alive-time"`
|
||||
|
||||
// DialKeepAliveTimeout is the time in seconds that the client waits for a response for the
|
||||
// keep-alive probe. If the response is not received in this time, the connection is closed.
|
||||
DialKeepAliveTimeout time.Duration `json:"dial-keep-alive-timeout"`
|
||||
|
||||
// TLS holds the client secure credentials, if any.
|
||||
TLS *tls.Config
|
||||
|
||||
// Username is a username for authentication.
|
||||
// Username is a user name for authentication.
|
||||
Username string `json:"username"`
|
||||
|
||||
// Password is a password for authentication.
|
||||
|
@ -28,7 +28,7 @@
|
||||
// Make sure to close the client after using it. If the client is not closed, the
|
||||
// connection will have leaky goroutines.
|
||||
//
|
||||
// To specify client request timeout, pass context.WithTimeout to APIs:
|
||||
// To specify a client request timeout, wrap the context with context.WithTimeout:
|
||||
//
|
||||
// ctx, cancel := context.WithTimeout(context.Background(), timeout)
|
||||
// resp, err := kvc.Put(ctx, "sample_key", "sample_value")
|
||||
|
@ -1,113 +0,0 @@
|
||||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package clientv3_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
|
||||
"github.com/coreos/etcd/clientv3"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
func ExampleAuth() {
|
||||
cli, err := clientv3.New(clientv3.Config{
|
||||
Endpoints: endpoints,
|
||||
DialTimeout: dialTimeout,
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer cli.Close()
|
||||
|
||||
if _, err = cli.RoleAdd(context.TODO(), "root"); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
if _, err = cli.UserAdd(context.TODO(), "root", "123"); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
if _, err = cli.UserGrantRole(context.TODO(), "root", "root"); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
if _, err = cli.RoleAdd(context.TODO(), "r"); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
if _, err = cli.RoleGrantPermission(
|
||||
context.TODO(),
|
||||
"r", // role name
|
||||
"foo", // key
|
||||
"zoo", // range end
|
||||
clientv3.PermissionType(clientv3.PermReadWrite),
|
||||
); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
if _, err = cli.UserAdd(context.TODO(), "u", "123"); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
if _, err = cli.UserGrantRole(context.TODO(), "u", "r"); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
if _, err = cli.AuthEnable(context.TODO()); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
cliAuth, err := clientv3.New(clientv3.Config{
|
||||
Endpoints: endpoints,
|
||||
DialTimeout: dialTimeout,
|
||||
Username: "u",
|
||||
Password: "123",
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer cliAuth.Close()
|
||||
|
||||
if _, err = cliAuth.Put(context.TODO(), "foo1", "bar"); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
_, err = cliAuth.Txn(context.TODO()).
|
||||
If(clientv3.Compare(clientv3.Value("zoo1"), ">", "abc")).
|
||||
Then(clientv3.OpPut("zoo1", "XYZ")).
|
||||
Else(clientv3.OpPut("zoo1", "ABC")).
|
||||
Commit()
|
||||
fmt.Println(err)
|
||||
|
||||
// now check the permission with the root account
|
||||
rootCli, err := clientv3.New(clientv3.Config{
|
||||
Endpoints: endpoints,
|
||||
DialTimeout: dialTimeout,
|
||||
Username: "root",
|
||||
Password: "123",
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer rootCli.Close()
|
||||
|
||||
resp, err := rootCli.RoleGet(context.TODO(), "r")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
fmt.Printf("user u permission: key %q, range end %q\n", resp.Perm[0].Key, resp.Perm[0].RangeEnd)
|
||||
|
||||
if _, err = rootCli.AuthDisable(context.TODO()); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
// Output: etcdserver: permission denied
|
||||
// user u permission: key "foo", range end "zoo"
|
||||
}
|
@ -19,6 +19,7 @@ import (
|
||||
"log"
|
||||
|
||||
"github.com/coreos/etcd/clientv3"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
|
@ -20,6 +20,7 @@ import (
|
||||
|
||||
"github.com/coreos/etcd/clientv3"
|
||||
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
@ -236,8 +237,11 @@ func ExampleKV_txn() {
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), requestTimeout)
|
||||
_, err = kvc.Txn(ctx).
|
||||
If(clientv3.Compare(clientv3.Value("key"), ">", "abc")). // txn value comparisons are lexical
|
||||
Then(clientv3.OpPut("key", "XYZ")). // this runs, since 'xyz' > 'abc'
|
||||
// txn value comparisons are lexical
|
||||
If(clientv3.Compare(clientv3.Value("key"), ">", "abc")).
|
||||
// the "Then" runs, since "xyz" > "abc"
|
||||
Then(clientv3.OpPut("key", "XYZ")).
|
||||
// the "Else" does not run
|
||||
Else(clientv3.OpPut("key", "ABC")).
|
||||
Commit()
|
||||
cancel()
|
||||
|
@ -19,6 +19,7 @@ import (
|
||||
"log"
|
||||
|
||||
"github.com/coreos/etcd/clientv3"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
|
@ -18,9 +18,8 @@ import (
|
||||
"fmt"
|
||||
"log"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
|
||||
"github.com/coreos/etcd/clientv3"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
func ExampleMaintenance_status() {
|
||||
@ -34,20 +33,15 @@ func ExampleMaintenance_status() {
|
||||
}
|
||||
defer cli.Close()
|
||||
|
||||
// resp, err := cli.Status(context.Background(), ep)
|
||||
//
|
||||
// or
|
||||
//
|
||||
mapi := clientv3.NewMaintenance(cli)
|
||||
resp, err := mapi.Status(context.Background(), ep)
|
||||
resp, err := cli.Status(context.Background(), ep)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
fmt.Printf("endpoint: %s / IsLeader: %v\n", ep, resp.Header.MemberId == resp.Leader)
|
||||
fmt.Printf("endpoint: %s / Leader: %v\n", ep, resp.Header.MemberId == resp.Leader)
|
||||
}
|
||||
// endpoint: localhost:2379 / IsLeader: false
|
||||
// endpoint: localhost:22379 / IsLeader: false
|
||||
// endpoint: localhost:32379 / IsLeader: true
|
||||
// endpoint: localhost:2379 / Leader: false
|
||||
// endpoint: localhost:22379 / Leader: false
|
||||
// endpoint: localhost:32379 / Leader: true
|
||||
}
|
||||
|
||||
func ExampleMaintenance_defragment() {
|
||||
|
@ -30,7 +30,7 @@ import (
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
func ExampleMetrics_range() {
|
||||
func ExampleClient_metrics() {
|
||||
cli, err := clientv3.New(clientv3.Config{
|
||||
Endpoints: endpoints,
|
||||
DialOptions: []grpc.DialOption{
|
||||
@ -43,10 +43,10 @@ func ExampleMetrics_range() {
|
||||
}
|
||||
defer cli.Close()
|
||||
|
||||
// get a key so it shows up in the metrics as a range rpc
|
||||
// get a key so it shows up in the metrics as a range RPC
|
||||
cli.Get(context.TODO(), "test_key")
|
||||
|
||||
// listen for all prometheus metrics
|
||||
// listen for all Prometheus metrics
|
||||
ln, err := net.Listen("tcp", ":0")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
@ -61,7 +61,7 @@ func ExampleMetrics_range() {
|
||||
<-donec
|
||||
}()
|
||||
|
||||
// make an http request to fetch all prometheus metrics
|
||||
// make an http request to fetch all Prometheus metrics
|
||||
url := "http://" + ln.Addr().String() + "/metrics"
|
||||
resp, err := http.Get(url)
|
||||
if err != nil {
|
||||
@ -80,5 +80,6 @@ func ExampleMetrics_range() {
|
||||
break
|
||||
}
|
||||
}
|
||||
// Output: grpc_client_started_total{grpc_method="Range",grpc_service="etcdserverpb.KV",grpc_type="unary"} 1
|
||||
// Output:
|
||||
// grpc_client_started_total{grpc_method="Range",grpc_service="etcdserverpb.KV",grpc_type="unary"} 1
|
||||
}
|
||||
|
@ -16,12 +16,14 @@ package clientv3_test
|
||||
|
||||
import (
|
||||
"log"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/coreos/etcd/clientv3"
|
||||
"github.com/coreos/etcd/pkg/transport"
|
||||
"github.com/coreos/pkg/capnslog"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
)
|
||||
|
||||
var (
|
||||
@ -31,8 +33,7 @@ var (
|
||||
)
|
||||
|
||||
func Example() {
|
||||
var plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "clientv3")
|
||||
clientv3.SetLogger(plog)
|
||||
clientv3.SetLogger(grpclog.NewLoggerV2(os.Stderr, os.Stderr, os.Stderr))
|
||||
|
||||
cli, err := clientv3.New(clientv3.Config{
|
||||
Endpoints: endpoints,
|
||||
|
@ -19,6 +19,7 @@ import (
|
||||
"log"
|
||||
|
||||
"github.com/coreos/etcd/clientv3"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user