Compare commits

..

11 Commits

Author SHA1 Message Date
6f48bda7ac version: bump to v3.0.0 2016-06-30 10:04:59 -07:00
316534e09e *: remove beta from docs 2016-06-30 10:04:34 -07:00
3cecbdb464 hack: install goreman in tls-setup example 2016-06-30 09:33:19 -07:00
62f11e43ee hack: add tls-setup example generated certs to gitignore 2016-06-30 09:33:12 -07:00
064c1585ee Merge pull request #5822 from raoofm/patch-9
Doc: fix typo in dev-guide.md
2016-06-30 09:06:32 -07:00
15300a1eb8 Doc: fix typo in dev-guide.md 2016-06-30 10:36:50 -04:00
58dd047ee4 ctlv3: make flags, commands formats consistent
1. Capitalize first letter
2. Remove period at the end

(followed the pattern in linux coreutil man page)
2016-06-29 16:16:56 -07:00
4b42ea6cd7 clientv3: only use closeErr on watch when donec is closed
Fixes #5800
2016-06-28 17:48:44 -07:00
53c27ae621 benchmark: fix Compact request 2016-06-28 14:15:32 -07:00
269de67bde mvcc: do not hash consistent index 2016-06-28 12:29:36 -07:00
8bbccf1047 clientv3, ctl3, clientv3/integration: add compact response to compact 2016-06-28 12:29:32 -07:00
978 changed files with 36983 additions and 84576 deletions

View File

@ -5,4 +5,4 @@ A good bug report has some very specific qualities, so please read over our shor
To ask a question, go ahead and ignore this.
[report_bugs]: https://github.com/coreos/etcd/blob/master/Documentation/reporting_bugs.md
[report_bugs]: ../Documentation/reporting_bugs.md

View File

@ -2,4 +2,4 @@
Please read our [contribution workflow][contributing] before submitting a pull request.
[contributing]: https://github.com/coreos/etcd/blob/master/CONTRIBUTING.md#contribution-flow
[contributing]: ../CONTRIBUTING.md#contribution-flow

1
.gitignore vendored
View File

@ -1,6 +1,5 @@
/coverage
/gopath
/gopath.proto
/go-bindata
/machine*
/bin

View File

@ -4,18 +4,17 @@ go_import_path: github.com/coreos/etcd
sudo: false
go:
- 1.7.5
notifications:
on_success: never
on_failure: never
- 1.5
- 1.6
- tip
env:
global:
- GO15VENDOREXPERIMENT=1
matrix:
- TARGET=amd64
- TARGET=arm64
- TARGET=arm
- TARGET=386
- TARGET=ppc64le
matrix:
@ -23,12 +22,16 @@ matrix:
allow_failures:
- go: tip
exclude:
- go: 1.5
env: TARGET=arm
- go: 1.5
env: TARGET=ppc64le
- go: 1.6
env: TARGET=arm64
- go: tip
env: TARGET=arm
- go: tip
env: TARGET=arm64
- go: tip
env: TARGET=386
- go: tip
env: TARGET=ppc64le
@ -46,19 +49,12 @@ before_install:
# disable godep restore override
install:
- pushd cmd/etcd && go get -t -v ./... && popd
- pushd cmd/ && go get -t -v ./... && popd
script:
- >
case "${TARGET}" in
amd64)
GOARCH=amd64 ./test
;;
386)
GOARCH=386 PASSES="build unit" ./test
;;
*)
# test building out of gopath
GO_BUILD_FLAGS="-a -v" GOPATH="" GOARCH="${TARGET}" ./build
;;
esac
if [ "${TARGET}" == "amd64" ]; then
GOARCH="${TARGET}" ./test;
else
GOARCH="${TARGET}" ./build;
fi

View File

@ -1,9 +1,8 @@
FROM alpine:latest
ADD etcd /usr/local/bin/
ADD etcdctl /usr/local/bin/
ADD bin/etcd /usr/local/bin/
ADD bin/etcdctl /usr/local/bin/
RUN mkdir -p /var/etcd/
RUN mkdir -p /var/lib/etcd/
EXPOSE 2379 2380

View File

@ -1 +0,0 @@
docs.md

View File

@ -14,7 +14,7 @@ GCE n1-highcpu-2 machine type
## Testing
Bootstrap another machine and use the [hey HTTP benchmark tool][hey] to send requests to each etcd member. Check the [benchmark hacking guide][hack-benchmark] for detailed instructions.
Bootstrap another machine and use the [boom HTTP benchmark tool][boom] to send requests to each etcd member. Check the [benchmark hacking guide][hack-benchmark] for detailed instructions.
## Performance
@ -48,5 +48,5 @@ Bootstrap another machine and use the [hey HTTP benchmark tool][hey] to send req
| 256 | 64 | all servers | 1033 | 121.5 |
| 256 | 256 | all servers | 3061 | 119.3 |
[hey]: https://github.com/rakyll/hey
[boom]: https://github.com/rakyll/boom
[hack-benchmark]: /hack/benchmark/

View File

@ -24,7 +24,7 @@ Go OS/Arch: linux/amd64
## Testing
Bootstrap another machine, outside of the etcd cluster, and run the [`hey` HTTP benchmark tool](https://github.com/rakyll/hey) with a connection reuse patch to send requests to each etcd cluster member. See the [benchmark instructions](../../hack/benchmark/) for the patch and the steps to reproduce our procedures.
Bootstrap another machine, outside of the etcd cluster, and run the [`boom` HTTP benchmark tool](https://github.com/rakyll/boom) with a connection reuse patch to send requests to each etcd cluster member. See the [benchmark instructions](../../hack/benchmark/) for the patch and the steps to reproduce our procedures.
The performance is calulated through results of 100 benchmark rounds.
@ -66,4 +66,4 @@ The performance is calulated through results of 100 benchmark rounds.
- Write QPS to cluster leaders seems to be increased by a small margin. This is because the main loop and entry apply loops were decoupled in the etcd raft logic, eliminating several blocks between them.
- Write QPS to all members seems to be increased by a significant margin, because followers now receive the latest commit index sooner, and commit proposals more quickly.
- Write QPS to all members seems to be increased by a significant margin, because followers now receive the latest commit index sooner, and commit proposals more quickly.

View File

@ -24,7 +24,7 @@ Also, we use 3 etcd 2.1.0 alpha-stage members to form cluster to get base perfor
## Testing
Bootstrap another machine and use the [hey HTTP benchmark tool][hey] to send requests to each etcd member. Check the [benchmark hacking guide][hack-benchmark] for detailed instructions.
Bootstrap another machine and use the [boom HTTP benchmark tool][boom] to send requests to each etcd member. Check the [benchmark hacking guide][hack-benchmark] for detailed instructions.
## Performance
@ -66,7 +66,7 @@ Bootstrap another machine and use the [hey HTTP benchmark tool][hey] to send req
- write QPS to all servers is increased by 30~80% because follower could receive latest commit index earlier and commit proposals faster.
[hey]: https://github.com/rakyll/hey
[boom]: https://github.com/rakyll/boom
[c7146bd5]: https://github.com/coreos/etcd/commits/c7146bd5f2c73716091262edc638401bb8229144
[etcd-2.1-benchmark]: etcd-2-1-0-alpha-benchmarks.md
[hack-benchmark]: /hack/benchmark/

View File

@ -8,8 +8,6 @@ etcd v3 uses [gRPC][grpc] for its messaging protocol. The etcd project includes
The gateway accepts a [JSON mapping][json-mapping] for etcd's [protocol buffer][api-ref] message definitions. Note that `key` and `value` fields are defined as byte arrays and therefore must be base64 encoded in JSON.
Use `curl` to put and get a key:
```bash
<<COMMENT
https://www.base64encode.org/
@ -19,34 +17,21 @@ COMMENT
curl -L http://localhost:2379/v3alpha/kv/put \
-X POST -d '{"key": "Zm9v", "value": "YmFy"}'
# {"header":{"cluster_id":"12585971608760269493","member_id":"13847567121247652255","revision":"2","raft_term":"3"}}
curl -L http://localhost:2379/v3alpha/kv/range \
-X POST -d '{"key": "Zm9v"}'
# {"header":{"cluster_id":"12585971608760269493","member_id":"13847567121247652255","revision":"2","raft_term":"3"},"kvs":[{"key":"Zm9v","create_revision":"2","mod_revision":"2","version":"1","value":"YmFy"}],"count":"1"}
```
Use `curl` to watch a key:
```bash
curl http://localhost:2379/v3alpha/watch \
-X POST -d '{"create_request": {"key":"Zm9v"} }' &
# {"result":{"header":{"cluster_id":"12585971608760269493","member_id":"13847567121247652255","revision":"1","raft_term":"2"},"created":true}}
curl -L http://localhost:2379/v3alpha/kv/put \
-X POST -d '{"key": "Zm9v", "value": "YmFy"}' >/dev/null 2>&1
# {"result":{"header":{"cluster_id":"12585971608760269493","member_id":"13847567121247652255","revision":"2","raft_term":"2"},"events":[{"kv":{"key":"Zm9v","create_revision":"2","mod_revision":"2","version":"1","value":"YmFy"}}]}}
```
## Swagger
Generated [Swagger][swagger] API definitions can be found at [rpc.swagger.json][swagger-doc].
Generated [Swapper][swagger] API definitions can be found at [rpc.swagger.json][swagger-doc].
[api-ref]: ./api_reference_v3.md
[go-client]: https://github.com/coreos/etcd/tree/master/clientv3
[etcdctl]: https://github.com/coreos/etcd/tree/master/etcdctl
[grpc]: http://www.grpc.io/
[grpc-gateway]: https://github.com/grpc-ecosystem/grpc-gateway
[grpc-gateway]: https://github.com/gengo/grpc-gateway
[json-mapping]: https://developers.google.com/protocol-buffers/docs/proto3#json
[swagger]: http://swagger.io/
[swagger-doc]: apispec/swagger/rpc.swagger.json

View File

@ -59,7 +59,6 @@ for grpc-gateway
| LeaseGrant | LeaseGrantRequest | LeaseGrantResponse | LeaseGrant creates a lease which expires if the server does not receive a keepAlive within a given time to live period. All keys attached to the lease will be expired and deleted if the lease expires. Each expired key generates a delete event in the event history. |
| LeaseRevoke | LeaseRevokeRequest | LeaseRevokeResponse | LeaseRevoke revokes a lease. All keys attached to the lease will expire and be deleted. |
| LeaseKeepAlive | LeaseKeepAliveRequest | LeaseKeepAliveResponse | LeaseKeepAlive keeps the lease alive by streaming keep alive requests from the client to the server and streaming keep alive responses from the server to the client. |
| LeaseTimeToLive | LeaseTimeToLiveRequest | LeaseTimeToLiveResponse | LeaseTimeToLive retrieves lease information. |
@ -427,8 +426,7 @@ Empty field.
| Field | Description | Type |
| ----- | ----------- | ---- |
| key | key is the first key to delete in the range. | bytes |
| range_end | range_end is the key following the last key to delete for the range [key, range_end). If range_end is not given, the range is defined to contain only the key argument. If range_end is one bit larger than the given key, then the range is all the all keys with the prefix (the given key). If range_end is '\0', the range is all keys greater than or equal to the key argument. | bytes |
| prev_kv | If prev_kv is set, etcd gets the previous key-value pairs before deleting it. The previous key-value pairs will be returned in the delte response. | bool |
| range_end | range_end is the key following the last key to delete for the range [key, range_end). If range_end is not given, the range is defined to contain only the key argument. If range_end is '\0', the range is all keys greater than or equal to the key argument. | bytes |
@ -438,7 +436,6 @@ Empty field.
| ----- | ----------- | ---- |
| header | | ResponseHeader |
| deleted | deleted is the number of keys deleted by the delete range request. | int64 |
| prev_kvs | if prev_kv is set in the request, the previous key-value pairs will be returned. | (slice of) mvccpb.KeyValue |
@ -511,27 +508,6 @@ Empty field.
##### message `LeaseTimeToLiveRequest` (etcdserver/etcdserverpb/rpc.proto)
| Field | Description | Type |
| ----- | ----------- | ---- |
| ID | ID is the lease ID for the lease. | int64 |
| keys | keys is true to query all the keys attached to this lease. | bool |
##### message `LeaseTimeToLiveResponse` (etcdserver/etcdserverpb/rpc.proto)
| Field | Description | Type |
| ----- | ----------- | ---- |
| header | | ResponseHeader |
| ID | ID is the lease ID from the keep alive request. | int64 |
| TTL | TTL is the remaining TTL in seconds for the lease; the lease will expire in under TTL+1 seconds. | int64 |
| grantedTTL | GrantedTTL is the initial granted time in seconds upon lease creation/renewal. | int64 |
| keys | Keys is the list of keys attached to this lease. | (slice of) bytes |
##### message `Member` (etcdserver/etcdserverpb/rpc.proto)
| Field | Description | Type |
@ -615,7 +591,6 @@ Empty field.
| key | key is the key, in bytes, to put into the key-value store. | bytes |
| value | value is the value, in bytes, to associate with the key in the key-value store. | bytes |
| lease | lease is the lease ID to associate with the key in the key-value store. A lease value of 0 indicates no lease. | int64 |
| prev_kv | If prev_kv is set, etcd gets the previous key-value pair before changing it. The previous key-value pair will be returned in the put response. | bool |
@ -624,7 +599,6 @@ Empty field.
| Field | Description | Type |
| ----- | ----------- | ---- |
| header | | ResponseHeader |
| prev_kv | if prev_kv is set in the request, the previous key-value pair will be returned. | mvccpb.KeyValue |
@ -639,12 +613,6 @@ Empty field.
| sort_order | sort_order is the order for returned sorted results. | SortOrder |
| sort_target | sort_target is the key-value field to use for sorting. | SortTarget |
| serializable | serializable sets the range request to use serializable member-local reads. Range requests are linearizable by default; linearizable requests have higher latency and lower throughput than serializable requests but reflect the current consensus of the cluster. For better performance, in exchange for possible stale reads, a serializable range request is served locally without needing to reach consensus with other nodes in the cluster. | bool |
| keys_only | keys_only when set returns only the keys and not the values. | bool |
| count_only | count_only when set returns only the count of the keys in the range. | bool |
| min_mod_revision | min_mod_revision is the lower bound for returned key mod revisions; all keys with lesser mod revisions will be filtered away. | int64 |
| max_mod_revision | max_mod_revision is the upper bound for returned key mod revisions; all keys with greater mod revisions will be filtered away. | int64 |
| min_create_revision | min_create_revision is the lower bound for returned key create revisions; all keys with lesser create trevisions will be filtered away. | int64 |
| max_create_revision | max_create_revision is the upper bound for returned key create revisions; all keys with greater create revisions will be filtered away. | int64 |
@ -653,9 +621,8 @@ Empty field.
| Field | Description | Type |
| ----- | ----------- | ---- |
| header | | ResponseHeader |
| kvs | kvs is the list of key-value pairs matched by the range request. kvs is empty when count is requested. | (slice of) mvccpb.KeyValue |
| kvs | kvs is the list of key-value pairs matched by the range request. | (slice of) mvccpb.KeyValue |
| more | more indicates if there are more keys to return in the requested range. | bool |
| count | count is set to the number of keys within the range when requested. | int64 |
@ -762,11 +729,9 @@ From google paxosdb paper: Our implementation hinges around a powerful primitive
| Field | Description | Type |
| ----- | ----------- | ---- |
| key | key is the key to register for watching. | bytes |
| range_end | range_end is the end of the range [key, range_end) to watch. If range_end is not given, only the key argument is watched. If range_end is equal to '\0', all keys greater than or equal to the key argument are watched. If the range_end is one bit larger than the given key, then all keys with the prefix (the given key) will be watched. | bytes |
| range_end | range_end is the end of the range [key, range_end) to watch. If range_end is not given, only the key argument is watched. If range_end is equal to '\0', all keys greater than or equal to the key argument are watched. | bytes |
| start_revision | start_revision is an optional revision to watch from (inclusive). No start_revision is "now". | int64 |
| progress_notify | progress_notify is set so that the etcd server will periodically send a WatchResponse with no events to the new watcher if there are no recent events. It is useful when clients wish to recover a disconnected watcher starting from a recent known revision. The etcd server may decide how often it will send notifications based on current load. | bool |
| filters | filter out put event. filter out delete event. filters filter the events at server side before it sends back to the watcher. | (slice of) FilterType |
| prev_kv | If prev_kv is set, created watcher gets the previous KV before the event happens. If the previous KV is already compacted, nothing will be returned. | bool |
@ -799,7 +764,6 @@ From google paxosdb paper: Our implementation hinges around a powerful primitive
| ----- | ----------- | ---- |
| type | type is the kind of event. If type is a PUT, it indicates new data has been stored to the key. If type is a DELETE, it indicates the key was deleted. | EventType |
| kv | kv holds the KeyValue for the event. A PUT event contains current kv pair. A PUT event with kv.Version=1 indicates the creation of a key. A DELETE/EXPIRE event contains the deleted key with its modification revision set to the revision of deletion. | KeyValue |
| prev_kv | prev_kv holds the key-value pair before the event happens. | KeyValue |
@ -825,22 +789,6 @@ From google paxosdb paper: Our implementation hinges around a powerful primitive
##### message `LeaseInternalRequest` (lease/leasepb/lease.proto)
| Field | Description | Type |
| ----- | ----------- | ---- |
| LeaseTimeToLiveRequest | | etcdserverpb.LeaseTimeToLiveRequest |
##### message `LeaseInternalResponse` (lease/leasepb/lease.proto)
| Field | Description | Type |
| ----- | ----------- | ---- |
| LeaseTimeToLiveResponse | | etcdserverpb.LeaseTimeToLiveResponse |
##### message `Permission` (auth/authpb/auth.proto)
Permission is a single entity

File diff suppressed because it is too large Load Diff

View File

@ -1,11 +1,8 @@
# Experimental APIs and features
For the most part, the etcd project is stable, but we are still moving fast! We believe in the release fast philosophy. We want to get early feedback on features still in development and stabilizing. Thus, there are, and will be more, experimental features and APIs. We plan to improve these features based on the early feedback from the community, or abandon them if there is little interest, in the next few releases. Please do not rely on any experimental features or APIs in production environment.
For the most part, the etcd project is stable, but we are still moving fast! We believe in the release fast philosophy. We want to get early feedback on features still in development and stabilizing. Thus, there are, and will be more, experimental features and APIs. We plan to improve these features based on the early feedback from the community, or abandon them if there is little interest, in the next few releases. If you are running a production system, please do not rely on any experimental features or APIs.
## The current experimental API/features are:
- [gateway][gateway]: beta, to be stable in 3.2 release
- [gRPC proxy][grpc-proxy]: alpha, to be stable in 3.2 release
[gateway]: ../op-guide/gateway.md
[grpc-proxy]: ../op-guide/grpc_proxy.md
- v3 auth API: expect to be stable in 3.1 release
- etcd gateway: expect to be stable in 3.1 release

View File

@ -1,65 +0,0 @@
# gRPC naming and discovery
etcd provides a gRPC resolver to support an alternative name system that fetches endpoints from etcd for discovering gRPC services. The underlying mechanism is based on watching updates to keys prefixed with the service name.
## Using etcd discovery with go-grpc
The etcd client provides a gRPC resolver for resolving gRPC endpoints with an etcd backend. The resolver is initialized with an etcd client and given a target for resolution:
```go
import (
"github.com/coreos/etcd/clientv3"
etcdnaming "github.com/coreos/etcd/clientv3/naming"
"google.golang.org/grpc"
)
...
cli, cerr := clientv3.NewFromURL("http://localhost:2379")
r := &etcdnaming.GRPCResolver{Client: cli}
b := grpc.RoundRobin(r)
conn, gerr := grpc.Dial("my-service", grpc.WithBalancer(b))
```
## Managing service endpoints
The etcd resolver treats all keys under the prefix of the resolution target following a "/" (e.g., "my-service/") with JSON-encoded go-grpc `naming.Update` values as potential service endpoints. Endpoints are added to the service by creating new keys and removed from the service by deleting keys.
### Adding an endpoint
New endpoints can be added to the service through `etcdctl`:
```sh
ETCDCTL_API=3 etcdctl put my-service/1.2.3.4 '{"Addr":"1.2.3.4","Metadata":"..."}'
```
The etcd client's `GRPCResolver.Update` method can also register new endpoints with a key matching the `Addr`:
```go
r.Update(context.TODO(), "my-service", naming.Update{Op: naming.Add, Addr: "1.2.3.4", Metadata: "..."})
```
### Deleting an endpoint
Hosts can be deleted from the service through `etcdctl`:
```sh
ETCDCTL_API=3 etcdctl del my-service/1.2.3.4
```
The etcd client's `GRPCResolver.Update` method also supports deleting endpoints:
```go
r.Update(context.TODO(), "my-service", naming.Update{Op: naming.Delete, Addr: "1.2.3.4"})
```
### Registering an endpoint with a lease
Registering an endpoint with a lease ensures that if the host can't maintain a keepalive heartbeat (e.g., its machine fails), it will be removed from the service:
```sh
lease=`ETCDCTL_API=3 etcdctl lease grant 5 | cut -f2 -d' '`
ETCDCTL_API=3 etcdctl put --lease=$lease my-service/1.2.3.4 '{"Addr":"1.2.3.4","Metadata":"..."}'
ETCDCTL_API=3 etcdctl lease keep-alive $lease
```

View File

@ -4,54 +4,30 @@ Users mostly interact with etcd by putting or getting the value of a key. This s
By default, etcdctl talks to the etcd server with the v2 API for backward compatibility. For etcdctl to speak to etcd using the v3 API, the API version must be set to version 3 via the `ETCDCTL_API` environment variable.
```bash
``` bash
export ETCDCTL_API=3
```
## Find versions
etcdctl version and Server API version can be useful in finding the appropriate commands to be used for performing various opertions on etcd.
Here is the command to find the versions:
```bash
$ etcdctl version
etcdctl version: 3.1.0-alpha.0+git
API version: 3.1
```
## Write a key
Applications store keys into the etcd cluster by writing to keys. Every stored key is replicated to all etcd cluster members through the Raft protocol to achieve consistency and reliability.
Here is the command to set the value of key `foo` to `bar`:
```bash
``` bash
$ etcdctl put foo bar
OK
```
Also a key can be set for a specified interval of time by attaching lease to it.
Here is the command to set the value of key `foo1` to `bar1` for 10s.
```bash
$ etcdctl put foo1 bar1 --lease=1234abcd
OK
```
Note: The lease id `1234abcd` in the above command refers to id returned on creating the lease of 10s. This id can then be attached to the key.
## Read keys
Applications can read values of keys from an etcd cluster. Queries may read a single key, or a range of keys.
Applications can read values of keys from an etcd cluster. Queries may read a single key, or a range of keys.
Suppose the etcd cluster has stored the following keys:
```bash
```
foo = bar
foo1 = bar1
foo2 = bar2
foo3 = bar3
```
@ -63,59 +39,18 @@ foo
bar
```
Here is the command to read the value of key `foo` in hex format:
Here is the command to range over the keys from `foo` to `foo9`:
```bash
$ etcdctl get foo --hex
\x66\x6f\x6f # Key
\x62\x61\x72 # Value
```
Here is the command to read only the value of key `foo`:
```bash
$ etcdctl get foo --print-value-only
bar
```
Here is the command to range over the keys from `foo` to `foo3`:
```bash
$ etcdctl get foo foo3
$ etcdctl get foo foo9
foo
bar
foo1
bar1
foo2
bar2
```
Note that `foo3` is excluded since the range is over the half-open interval `[foo, foo3)`, excluding `foo3`.
Here is the command to range over all keys prefixed with `foo`:
```bash
$ etcdctl get --prefix foo
foo
bar
foo1
bar1
foo2
bar2
foo3
bar3
```
Here is the command to range over all keys prefixed with `foo`, limiting the number of results to 2:
```bash
$ etcdctl get --prefix --limit=2 foo
foo
bar
foo1
bar1
```
## Read past version of keys
Applications may want to read superseded versions of a key. For example, an application may wish to roll back to an old configuration by accessing an earlier version of a key. Alternatively, an application may want a consistent view over multiple keys through multiple requests by accessing key history.
@ -123,81 +58,45 @@ Since every modification to the etcd cluster key-value store increments the glob
Suppose an etcd cluster already has the following keys:
```bash
foo = bar # revision = 2
foo1 = bar1 # revision = 3
foo = bar_new # revision = 4
foo1 = bar1_new # revision = 5
``` bash
$ etcdctl put foo bar # revision = 2
$ etcdctl put foo1 bar1 # revision = 3
$ etcdctl put foo bar_new # revision = 4
$ etcdctl put foo1 bar1_new # revision = 5
```
Here are an example to access the past versions of keys:
```bash
$ etcdctl get --prefix foo # access the most recent versions of keys
$ etcdctl get foo foo9 # access the most recent versions of keys
foo
bar_new
foo1
bar1_new
$ etcdctl get --prefix --rev=4 foo # access the versions of keys at revision 4
$ etcdctl get --rev=4 foo foo9 # access the versions of keys at revision 4
foo
bar_new
foo1
bar1
$ etcdctl get --prefix --rev=3 foo # access the versions of keys at revision 3
$ etcdctl get --rev=3 foo foo9 # access the versions of keys at revision 3
foo
bar
foo1
bar1
$ etcdctl get --prefix --rev=2 foo # access the versions of keys at revision 2
$ etcdctl get --rev=2 foo foo9 # access the versions of keys at revision 2
foo
bar
$ etcdctl get --prefix --rev=1 foo # access the versions of keys at revision 1
```
## Read keys which are greater than or equal to the byte value of the specified key
Applications may want to read keys which are greater than or equal to the byte value of the specified key.
Suppose an etcd cluster already has the following keys:
```bash
a = 123
b = 456
z = 789
```
Here is the command to read keys which are greater than or equal to the byte value of key `b` :
```bash
$ etcdctl get --from-key b
b
456
z
789
$ etcdctl get --rev=1 foo foo9 # access the versions of keys at revision 1
```
## Delete keys
Applications can delete a key or a range of keys from an etcd cluster.
Suppose an etcd cluster already has the following keys:
```bash
foo = bar
foo1 = bar1
foo3 = bar3
zoo = val
zoo1 = val1
zoo2 = val2
a = 123
b = 456
z = 789
```
Here is the command to delete key `foo`:
```bash
@ -212,29 +111,6 @@ $ etcdctl del foo foo9
2 # two keys are deleted
```
Here is the command to delete key `zoo` with the deleted key value pair returned:
```bash
$ etcdctl del --prev-kv zoo
1 # one key is deleted
zoo # deleted key
val # the value of the deleted key
```
Here is the command to delete keys having prefix as `zoo`:
```bash
$ etcdctl del --prefix zoo
2 # two keys are deleted
```
Here is the command to delete keys which are greater than or equal to the byte value of key `b` :
```bash
$ etcdctl del --from-key b
2 # two keys are deleted
```
## Watch key changes
Applications can watch on a key or a range of keys to monitor for any updates.
@ -242,86 +118,38 @@ Applications can watch on a key or a range of keys to monitor for any updates.
Here is the command to watch on key `foo`:
```bash
$ etcdctl watch foo
$ etcdctl watch foo
# in another terminal: etcdctl put foo bar
PUT
foo
bar
```
Here is the command to watch on key `foo` in hex format:
```bash
$ etcdctl watch foo --hex
# in another terminal: etcdctl put foo bar
PUT
\x66\x6f\x6f # Key
\x62\x61\x72 # Value
```
Here is the command to watch on a range key from `foo` to `foo9`:
```bash
$ etcdctl watch foo foo9
# in another terminal: etcdctl put foo bar
PUT
foo
bar
# in another terminal: etcdctl put foo1 bar1
PUT
foo1
bar1
```
Here is the command to watch on keys having prefix `foo`:
```bash
$ etcdctl watch --prefix foo
# in another terminal: etcdctl put foo bar
PUT
foo
bar
# in another terminal: etcdctl put fooz1 barz1
PUT
fooz1
barz1
```
Here is the command to watch on multiple keys `foo` and `zoo`:
```bash
$ etcdctl watch -i
$ watch foo
$ watch zoo
# in another terminal: etcdctl put foo bar
PUT
foo
bar
# in another terminal: etcdctl put zoo val
PUT
zoo
val
```
## Watch historical changes of keys
Applications may want to watch for historical changes of keys in etcd. For example, an application may wish to receive all the modifications of a key; if the application stays connected to etcd, then `watch` is good enough. However, if the application or etcd fails, a change may happen during the failure, and the application will not receive the update in real time. To guarantee the update is delivered, the application must be able to watch for historical changes to keys. To do this, an application can specify a historical revision on a watch, just like reading past version of keys.
Suppose we finished the following sequence of operations:
```bash
$ etcdctl put foo bar # revision = 2
OK
$ etcdctl put foo1 bar1 # revision = 3
OK
$ etcdctl put foo bar_new # revision = 4
OK
$ etcdctl put foo1 bar1_new # revision = 5
OK
``` bash
etcdctl put foo bar # revision = 2
etcdctl put foo1 bar1 # revision = 3
etcdctl put foo bar_new # revision = 4
etcdctl put foo1 bar1_new # revision = 5
```
Here is an example to watch the historical changes:
```bash
# watch for changes on key `foo` since revision 2
$ etcdctl watch --rev=2 foo
@ -331,9 +159,7 @@ bar
PUT
foo
bar_new
```
```bash
# watch for changes on key `foo` since revision 3
$ etcdctl watch --rev=3 foo
PUT
@ -341,19 +167,6 @@ foo
bar_new
```
Here is an example to watch only from the last historical change:
```bash
# watch for changes on key `foo` and return last revision value along with modified value
$ etcdctl watch --prev-kv foo
# in another terminal: etcdctl put foo bar_latest
PUT
foo # key
bar_new # last value of foo key before modification
foo # key
bar_latest # value of foo key after modification
```
## Compacted revisions
As we mentioned, etcd keeps revisions so that applications can read past versions of keys. However, to avoid accumulating an unbounded amount of history, it is important to compact past revisions. After compacting, etcd removes historical revisions, releasing resources for future use. All superseded data with revisions before the compacted revision will be unavailable.
@ -369,20 +182,13 @@ $ etcdctl get --rev=4 foo
Error: rpc error: code = 11 desc = etcdserver: mvcc: required revision has been compacted
```
Note: The current revision of etcd server can be found using get command on any key (existent or non-existent) in json format. Example is shown below for mykey which does not exist in etcd server:
```bash
$ etcdctl get mykey -w=json
{"header":{"cluster_id":14841639068965178418,"member_id":10276657743932975437,"revision":15,"raft_term":4}}
```
## Grant leases
Applications can grant leases for keys from an etcd cluster. When a key is attached to a lease, its lifetime is bound to the lease's lifetime which in turn is governed by a time-to-live (TTL). Each lease has a minimum time-to-live (TTL) value specified by the application at grant time. The lease's actual TTL value is at least the minimum TTL and is chosen by the etcd cluster. Once a lease's TTL elapses, the lease expires and all attached keys are deleted.
Here is the command to grant a lease:
```bash
```
# grant a lease with 10 second TTL
$ etcdctl lease grant 10
lease 32695410dcc0ca06 granted with TTL(10s)
@ -398,7 +204,7 @@ Applications revoke leases by lease ID. Revoking a lease deletes all of its atta
Suppose we finished the following sequence of operations:
```bash
```
$ etcdctl lease grant 10
lease 32695410dcc0ca06 granted with TTL(10s)
$ etcdctl put --lease=32695410dcc0ca06 foo bar
@ -407,7 +213,7 @@ OK
Here is the command to revoke the same lease:
```bash
```
$ etcdctl lease revoke 32695410dcc0ca06
lease 32695410dcc0ca06 revoked
@ -421,55 +227,17 @@ Applications can keep a lease alive by refreshing its TTL so it does not expire.
Suppose we finished the following sequence of operations:
```bash
```
$ etcdctl lease grant 10
lease 32695410dcc0ca06 granted with TTL(10s)
```
Here is the command to keep the same lease alive:
```bash
$ etcdctl lease keep-alive 32695410dcc0ca06
lease 32695410dcc0ca06 keepalived with TTL(100)
lease 32695410dcc0ca06 keepalived with TTL(100)
lease 32695410dcc0ca06 keepalived with TTL(100)
```
$ etcdctl lease keep-alive 32695410dcc0ca0
lease 32695410dcc0ca0 keepalived with TTL(100)
lease 32695410dcc0ca0 keepalived with TTL(100)
lease 32695410dcc0ca0 keepalived with TTL(100)
...
```
## Get lease information
Applications may want to know about lease information, so that they can be renewed or to check if the lease still exists or it has expired. Applications may also want to know the keys to which a particular lease is attached.
Suppose we finished the following sequence of operations:
```bash
# grant a lease with 500 second TTL
$ etcdctl lease grant 500
lease 694d5765fc71500b granted with TTL(500s)
# attach key zoo1 to lease 694d5765fc71500b
$ etcdctl put zoo1 val1 --lease=694d5765fc71500b
OK
# attach key zoo2 to lease 694d5765fc71500b
$ etcdctl put zoo2 val2 --lease=694d5765fc71500b
OK
```
Here is the command to get information about the lease:
```bash
$ etcdctl lease timetolive 694d5765fc71500b
lease 694d5765fc71500b granted with TTL(500s), remaining(258s)
```
Here is the command to get information about the lease along with the keys attached with the lease:
```bash
$ etcdctl lease timetolive --keys 694d5765fc71500b
lease 694d5765fc71500b granted with TTL(500s), remaining(132s), attached keys([zoo2 zoo1])
# if the lease has expired or does not exist it will give the below response:
Error: etcdserver: requested lease not found
```

View File

@ -1,10 +0,0 @@
# System limits
## Request size limit
etcd is designed to handle small key value pairs typical for metadata. Larger requests will work, but may increase the latency of other requests. For the time being, etcd guarantees to support RPC requests with up to 1MB of data. In the future, the size limit may be loosened or made it configurable.
## Storage size limit
The default storage size limit is 2GB, configurable with `--quota-backend-bytes` flag; supports up to 8GB.

View File

@ -28,7 +28,7 @@ bar
## Local multi-member cluster
A `Procfile` at the base of this git repo is provided to easily set up a local multi-member cluster. To start a multi-member cluster go to the root of an etcd source tree and run:
A Procfile is provided to easily set up a local multi-member cluster. Start a multi-member cluster with a few commands:
```
# install goreman program to control Profile-based applications.
@ -37,7 +37,7 @@ $ goreman -f Procfile start
...
```
The started members listen on `localhost:2379`, `localhost:22379`, and `localhost:32379` for client requests respectively.
The started members listen on `localhost:12379`, `localhost:22379`, and `localhost:32379` for client requests respectively.
To interact with the started cluster by using etcdctl:
@ -45,16 +45,16 @@ To interact with the started cluster by using etcdctl:
# use API version 3
$ export ETCDCTL_API=3
$ etcdctl --write-out=table --endpoints=localhost:2379 member list
$ etcdctl --write-out=table --endpoints=localhost:12379 member list
+------------------+---------+--------+------------------------+------------------------+
| ID | STATUS | NAME | PEER ADDRS | CLIENT ADDRS |
+------------------+---------+--------+------------------------+------------------------+
| 8211f1d0f64f3269 | started | infra1 | http://127.0.0.1:2380 | http://127.0.0.1:2379 |
| 8211f1d0f64f3269 | started | infra1 | http://127.0.0.1:12380 | http://127.0.0.1:12379 |
| 91bc3c398fb3c146 | started | infra2 | http://127.0.0.1:22380 | http://127.0.0.1:22379 |
| fd422379fda50e48 | started | infra3 | http://127.0.0.1:32380 | http://127.0.0.1:32379 |
+------------------+---------+--------+------------------------+------------------------+
$ etcdctl put foo bar
$ etcdctl --endpoints=localhost:12379 put foo bar
OK
```
@ -64,10 +64,10 @@ To exercise etcd's fault tolerance, kill a member:
# kill etcd2
$ goreman run stop etcd2
$ etcdctl put key hello
$ etcdctl --endpoints=localhost:12379 put key hello
OK
$ etcdctl get key
$ etcdctl --endpoints=localhost:12379 get key
hello
# try to get key from the killed member

View File

@ -31,8 +31,8 @@ All releases version numbers follow the format of [semantic versioning 2.0.0](ht
## Write release note
- Write introduction for the new release. For example, what major bug we fix, what new features we introduce or what performance improvement we make.
- Write changelog for the last release. ChangeLog should be straightforward and easy to understand for the end-user.
- Put `[GH XXXX]` at the head of change line to reference Pull Request that introduces the change. Moreover, add a link on it to jump to the Pull Request.
- Find PRs with `release-note` label and explain them in `NEWS` file, as a straightforward summary of changes for end-users.
## Tag version
@ -47,7 +47,7 @@ All releases version numbers follow the format of [semantic versioning 2.0.0](ht
## Build release binaries and images
- Ensure `acbuild` is available.
- Ensure `actool` is available, or installing it through `go get github.com/appc/spec/actool`.
- Ensure `docker` is available.
Run release script in root directory:

View File

@ -10,44 +10,27 @@ The easiest way to get etcd is to use one of the pre-built release binaries whic
## Build the latest version
For those wanting to try the very latest version, build etcd from the `master` branch. [Go](https://golang.org/) version 1.7+ is required to build the latest version of etcd. To ensure etcd is built against well-tested libraries, etcd vendors its dependencies for official release binaries. However, etcd's vendoring is also optional to avoid potential import conflicts when embedding the etcd server or using the etcd client.
For those wanting to try the very latest version, build etcd from the `master` branch.
[Go](https://golang.org/) version 1.5+ is required to build the latest version of etcd.
First, confirm go 1.7+ is installed:
```sh
# go is required
$ go version
go version go1.7.3 darwin/amd64
Here are the commands to build an etcd binary from the `master` branch:
```
# go is required
$ go version
go version go1.6 darwin/amd64
To build `etcd` from the `master` branch without a `GOPATH` using the official `build` script:
# GOPATH should be set correctly
$ echo $GOPATH
/Users/example/go
```sh
$ git clone https://github.com/coreos/etcd.git
$ mkdir -p $GOPATH/src/github.com/coreos
$ cd $GOPATH/src/github.com/coreos
$ git clone github.com:coreos/etcd.git
$ cd etcd
$ ./build
$ ./bin/etcd
```
To build a vendored `etcd` from the `master` branch via `go get`:
```sh
# GOPATH should be set
$ echo $GOPATH
/Users/example/go
$ go get github.com/coreos/etcd/cmd/etcd
$ $GOPATH/bin/etcd
```
To build `etcd` from the `master` branch without vendoring (may not build due to upstream conflicts):
```sh
# GOPATH should be set
$ echo $GOPATH
/Users/example/go
$ go get github.com/coreos/etcd
$ $GOPATH/bin/etcd
...
```
## Test the installation
@ -71,6 +54,3 @@ If OK is printed, then etcd is working!
[github-release]: https://github.com/coreos/etcd/releases/
[go]: https://golang.org/doc/install
[build-script]: ../build
[cmd-directory]: ../cmd

View File

@ -14,23 +14,17 @@ The easiest way to get started using etcd as a distributed key-value store is to
- [Interacting with etcd][interacting]
- [API references][api_ref]
- [gRPC gateway][api_grpc_gateway]
- [gRPC naming and discovery][grpc_naming]
- [Embedding etcd][embed_etcd]
- [Experimental features and APIs][experimental]
- [System limits][system-limit]
## Operating etcd clusters
Administrators who need to create reliable and scalable key-value stores for the developers they support should begin with a [cluster on multiple machines][clustering].
- [Setting up etcd clusters][clustering]
- [Setting up etcd gateways][gateway]
- [Setting up etcd gRPC proxy (pre-alpha)][grpc_proxy]
- [Setting up clusters][clustering]
- [Run etcd clusters inside containers][container]
- [Hardware recommendations][hardware]
- [Configuration][conf]
- [Security][security]
- [Monitoring][monitoring]
- Monitoring
- [Maintenance][maintenance]
- [Understand failures][failures]
- [Disaster recovery][recovery]
@ -42,7 +36,7 @@ Administrators who need to create reliable and scalable key-value stores for the
To learn more about the concepts and internals behind etcd, read the following pages:
- [Why etcd][why] (TODO)
- Why etcd (TODO)
- [Understand data model][data_model]
- [Understand APIs][understand_apis]
- [Glossary][glossary]
@ -52,36 +46,24 @@ To learn more about the concepts and internals behind etcd, read the following p
- [Migrate applications from using API v2 to API v3][v2_migration]
- [Updating v2.3 to v3.0][v3_upgrade]
- [Updating v3.0 to v3.1][v31_upgrade]
## Frequently Asked Questions (FAQ)
Answers to [common questions] about etcd.
## Troubleshooting
[api_ref]: dev-guide/api_reference_v3.md
[api_grpc_gateway]: dev-guide/api_grpc_gateway.md
[clustering]: op-guide/clustering.md
[conf]: op-guide/configuration.md
[system-limit]: dev-guide/limit.md
[common questions]: faq.md
[why]: learning/why.md
[data_model]: learning/data_model.md
[demo]: demo.md
[download_build]: dl_build.md
[embed_etcd]: https://godoc.org/github.com/coreos/etcd/embed
[grpc_naming]: dev-guide/grpc_naming.md
[failures]: op-guide/failures.md
[gateway]: op-guide/gateway.md
[glossary]: learning/glossary.md
[grpc_proxy]: op-guide/grpc_proxy.md
[hardware]: op-guide/hardware.md
[interacting]: dev-guide/interacting_v3.md
[local_cluster]: dev-guide/local_cluster.md
[performance]: op-guide/performance.md
[recovery]: op-guide/recovery.md
[maintenance]: op-guide/maintenance.md
[security]: op-guide/security.md
[monitoring]: op-guide/monitoring.md
[v2_migration]: op-guide/v2-migration.md
[container]: op-guide/container.md
[understand_apis]: learning/api.md
@ -89,4 +71,3 @@ Answers to [common questions] about etcd.
[supported_platform]: op-guide/supported-platform.md
[experimental]: dev-guide/experimental_apis.md
[v3_upgrade]: upgrades/upgrade_3_0.md
[v31_upgrade]: upgrades/upgrade_3_1.md

View File

@ -1,128 +0,0 @@
## Frequently Asked Questions (FAQ)
### etcd, general
#### Do clients have to send requests to the etcd leader?
[Raft][raft] is leader-based; the leader handles all client requests which need cluster consensus. However, the client does not need to know which node is the leader. Any request that requires consensus sent to a follower is automatically forwarded to the leader. Requests that do not require consensus (e.g., serialized reads) can be processed by any cluster member.
### Configuration
#### What is the difference between advertise-urls and listen-urls?
`listen-urls` specifies the local addresses etcd server binds to for accepting incoming connections. To listen on a port for all interfaces, specify `0.0.0.0` as the listen IP address.
`advertise-urls` specifies the addresses etcd clients or other etcd members should use to contact the etcd server. The advertise addresses must be reachable from the remote machines. Do not advertise addresses like `localhost` or `0.0.0.0` for a production setup since these addresses are unreachable from remote machines.
### Deployment
#### System requirements
Since etcd writes data to disk, SSD is highly recommended. To prevent performance degradation or unintentionally overloading the key-value store, etcd enforces a 2GB default storage size quota, configurable up to 8GB. To avoid swapping or running out of memory, the machine should have at least as much RAM to cover the quota. At CoreOS, an etcd cluster is usually deployed on dedicated CoreOS Container Linux machines with dual-core processors, 2GB of RAM, and 80GB of SSD *at the very least*. **Note that performance is intrinsically workload dependent; please test before production deployment**. See [hardware][hardware-setup] for more recommendations.
Most stable production environment is Linux operating system with amd64 architecture; see [supported platform][supported-platform] for more.
#### Why an odd number of cluster members?
An etcd cluster needs a majority of nodes, a quorum, to agree on updates to the cluster state. For a cluster with n members, quorum is (n/2)+1. For any odd-sized cluster, adding one node will always increase the number of nodes necessary for quorum. Although adding a node to an odd-sized cluster appears better since there are more machines, the fault tolerance is worse since exactly the same number of nodes may fail without losing quorum but there are more nodes that can fail. If the cluster is in a state where it can't tolerate any more failures, adding a node before removing nodes is dangerous because if the new node fails to register with the cluster (e.g., the address is misconfigured), quorum will be permanently lost.
#### What is maximum cluster size?
Theoretically, there is no hard limit. However, an etcd cluster probably should have no more than seven nodes. [Google Chubby lock service][chubby], similar to etcd and widely deployed within Google for many years, suggests running five nodes. A 5-member etcd cluster can tolerate two member failures, which is enough in most cases. Although larger clusters provide better fault tolerance, the write performance suffers because data must be replicated across more machines.
#### What is failure tolerance?
An etcd cluster operates so long as a member quorum can be established. If quorum is lost through transient network failures (e.g., partitions), etcd automatically and safely resumes once the network recovers and restores quorum; Raft enforces cluster consistency. For power loss, etcd persists the Raft log to disk; etcd replays the log to the point of failure and resumes cluster participation. For permanent hardware failure, the node may be removed from the cluster through [runtime reconfiguration][runtime reconfiguration].
It is recommended to have an odd number of members in a cluster. An odd-size cluster tolerates the same number of failures as an even-size cluster but with fewer nodes. The difference can be seen by comparing even and odd sized clusters:
| Cluster Size | Majority | Failure Tolerance |
|:-:|:-:|:-:|
| 1 | 1 | 0 |
| 2 | 2 | 0 |
| 3 | 2 | 1 |
| 4 | 3 | 1 |
| 5 | 3 | 2 |
| 6 | 4 | 2 |
| 7 | 4 | 3 |
| 8 | 5 | 3 |
| 9 | 5 | 4 |
Adding a member to bring the size of cluster up to an even number doesn't buy additional fault tolerance. Likewise, during a network partition, an odd number of members guarantees that there will always be a majority partition that can continue to operate and be the source of truth when the partition ends.
#### Does etcd work in cross-region or cross data center deployments?
Deploying etcd across regions improves etcd's fault tolerance since members are in separate failure domains. The cost is higher consensus request latency from crossing data center boundaries. Since etcd relies on a member quorum for consensus, the latency from crossing data centers will be somewhat pronounced because at least a majority of cluster members must respond to consensus requests. Additionally, cluster data must be replicated across all peers, so there will be bandwidth cost as well.
With longer latencies, the default etcd configuration may cause frequent elections or heartbeat timeouts. See [tuning] for adjusting timeouts for high latency deployments.
### Operation
#### How to backup a etcd cluster?
etcdctl provides a `snapshot` command to create backups. See [backup][backup] for more details.
#### Should I add a member before removing an unhealthy member?
When replacing an etcd node, it's important to remove the member first and then add its replacement.
etcd employs distributed consensus based on a quorum model; (n+1)/2 members, a majority, must agree on a proposal before it can be committed to the cluster. These proposals include key-value updates and membership changes. This model totally avoids any possibility of split brain inconsistency. The downside is permanent quorum loss is catastrophic.
How this applies to membership: If a 3-member cluster has 1 downed member, it can still make forward progress because the quorum is 2 and 2 members are still live. However, adding a new member to a 3-member cluster will increase the quorum to 3 because 3 votes are required for a majority of 4 members. Since the quorum increased, this extra member buys nothing in terms of fault tolerance; the cluster is still one node failure away from being unrecoverable.
Additionally, that new member is risky because it may turn out to be misconfigured or incapable of joining the cluster. In that case, there's no way to recover quorum because the cluster has two members down and two members up, but needs three votes to change membership to undo the botched membership addition. etcd will by default reject member add attempts that could take down the cluster in this manner.
On the other hand, if the downed member is removed from cluster membership first, the number of members becomes 2 and the quorum remains at 2. Following that removal by adding a new member will also keep the quorum steady at 2. So, even if the new node can't be brought up, it's still possible to remove the new member through quorum on the remaining live members.
#### Why won't etcd accept my membership changes?
etcd sets `strict-reconfig-check` in order to reject reconfiguration requests that would cause quorum loss. Abandoning quorum is really risky (especially when the cluster is already unhealthy). Although it may be tempting to disable quorum checking if there's quorum loss to add a new member, this could lead to full fledged cluster inconsistency. For many applications, this will make the problem even worse ("disk geometry corruption" being a candidate for most terrifying).
### Performance
#### How should I benchmark etcd?
Try the [benchmark] tool. Current [benchmark results][benchmark-result] are available for comparison.
#### What does the etcd warning "apply entries took too long" mean?
After a majority of etcd members agree to commit a request, each etcd server applies the request to its data store and persists the result to disk. Even with a slow mechanical disk or a virtualized network disk, such as Amazons EBS or Googles PD, applying a request should normally take fewer than 50 milliseconds. If the average apply duration exceeds 100 milliseconds, etcd will warn that entries are taking too long to apply.
Usually this issue is caused by a slow disk. The disk could be experiencing contention among etcd and other applications, or the disk is too simply slow (e.g., a shared virtualized disk). To rule out a slow disk from causing this warning, monitor [backend_commit_duration_seconds][backend_commit_metrics] (p99 duration should be less than 25ms) to confirm the disk is reasonably fast. If the disk is too slow, assigning a dedicated disk to etcd or using faster disk will typically solve the problem.
The second most common cause is CPU starvation. If monitoring of the machines CPU usage shows heavy utilization, there may not be enough compute capacity for etcd. Moving etcd to dedicated machine, increasing process resource isolation cgroups, or renicing the etcd server process into a higher priority can usually solve the problem.
Expensive user requests which access too many keys (e.g., fetching the entire keyspace) can also cause long apply latencies. Accessing fewer than a several hundred keys per request, however, should always be performant.
If none of the above suggestions clear the warnings, please [open an issue][new_issue] with detailed logging, monitoring, metrics and optionally workload information.
#### What does the etcd warning "failed to send out heartbeat on time" mean?
etcd uses a leader-based consensus protocol for consistent data replication and log execution. Cluster members elect a single leader, all other members become followers. The elected leader must periodically send heartbeats to its followers to maintain its leadership. Followers infer leader failure if no heartbeats are received within an election interval and trigger an election. If a leader doesnt send its heartbeats in time but is still running, the election is spurious and likely caused by insufficient resources. To catch these soft failures, if the leader skips two heartbeat intervals, etcd will warn it failed to send a heartbeat on time.
Usually this issue is caused by a slow disk. Before the leader sends heartbeats attached with metadata, it may need to persist the metadata to disk. The disk could be experiencing contention among etcd and other applications, or the disk is too simply slow (e.g., a shared virtualized disk). To rule out a slow disk from causing this warning, monitor [wal_fsync_duration_seconds][wal_fsync_duration_seconds] (p99 duration should be less than 10ms) to confirm the disk is reasonably fast. If the disk is too slow, assigning a dedicated disk to etcd or using faster disk will typically solve the problem.
The second most common cause is CPU starvation. If monitoring of the machines CPU usage shows heavy utilization, there may not be enough compute capacity for etcd. Moving etcd to dedicated machine, increasing process resource isolation with cgroups, or renicing the etcd server process into a higher priority can usually solve the problem.
A slow network can also cause this issue. If network metrics among the etcd machines shows long latencies or high drop rate, there may not be enough network capacity for etcd. Moving etcd members to a less congested network will typically solve the problem. However, if the etcd cluster is deployed across data centers, long latency between members is expected. For such deployments, tune the `heartbeat-interval` configuration to roughly match the round trip time between the machines, and the `election-timeout` configuration to be at least 5 * `heartbeat-interval`. See [tuning documentation][tuning] for detailed information.
If none of the above suggestions clear the warnings, please [open an issue][new_issue] with detailed logging, monitoring, metrics and optionally workload information.
#### What does the etcd warning "request ignored (cluster ID mismatch)" mean?
Every new etcd cluster generates a new cluster ID based on the initial cluster configuration and a user-provided unique `initial-cluster-token` value. By having unique cluster ID's, etcd is protected from cross-cluster interaction which could corrupt the cluster.
Usually this warning happens after tearing down an old cluster, then reusing some of the peer addresses for the new cluster. If any etcd process from the old cluster is still running it will try to contact the new cluster. The new cluster will recognize a cluster ID mismatch, then ignore the request and emit this warning. This warning is often cleared by ensuring peer addresses among distinct clusters are disjoint.
[hardware-setup]: ./op-guide/hardware.md
[supported-platform]: ./op-guide/supported-platform.md
[wal_fsync_duration_seconds]: ./metrics.md#disk
[tuning]: ./tuning.md
[new_issue]: https://github.com/coreos/etcd/issues/new
[backend_commit_metrics]: ./metrics.md#disk
[raft]: https://raft.github.io/raft.pdf
[backup]: https://github.com/coreos/etcd/blob/master/Documentation/op-guide/recovery.md#snapshotting-the-keyspace
[chubby]: http://static.googleusercontent.com/media/research.google.com/en//archive/chubby-osdi06.pdf
[runtime reconfiguration]: https://github.com/coreos/etcd/blob/master/Documentation/op-guide/runtime-configuration.md
[benchmark]: https://github.com/coreos/etcd/tree/master/tools/benchmark
[benchmark-result]: https://github.com/coreos/etcd/blob/master/Documentation/op-guide/performance.md

View File

@ -2,17 +2,15 @@
This document defines the various terms used in etcd documentation, command line and source code.
## Alarm
## Node
The etcd server raises an alarm whenever the cluster needs operator intervention to remain reliable.
Node is an instance of raft state machine.
## Authentication
It has a unique identification, and records other nodes' progress internally when it is the leader.
Authentication manages user access permissions for etcd resources.
## Member
## Client
A client connects to the etcd cluster to issue service requests such as fetching key-value pairs, writing data, or watching for updates.
Member is an instance of etcd. It hosts a node, and provides service to clients.
## Cluster
@ -20,42 +18,6 @@ Cluster consists of several members.
The node in each member follows raft consensus protocol to replicate logs. Cluster receives proposals from members, commits them and apply to local store.
## Compaction
Compaction discards all etcd event history and superseded keys prior to a given revision. It is used to reclaim storage space in the etcd backend database.
## Election
The etcd cluster holds elections among its members to choose a leader as part of the raft consensus protocol.
## Endpoint
A URL pointing to an etcd service or resource.
## Key
A user-defined identifier for storing and retrieving user-defined values in etcd.
## Key range
A set of keys containing either an individual key, a lexical interval for all x such that a < x <= b, or all keys greater than a given key.
## Keyspace
The set of all keys in an etcd cluster.
## Lease
A short-lived renewable contract that deletes keys associated with it on its expiry.
## Member
A logical etcd server that participates in serving an etcd cluster.
## Modification Revision
The first revision to hold the last write to a given key.
## Peer
Peer is another member of the same cluster.
@ -64,34 +26,10 @@ Peer is another member of the same cluster.
A proposal is a request (for example a write request, a configuration change request) that needs to go through raft protocol.
## Quorum
## Client
The number of active members needed for consensus to modify the cluster state. etcd requires a member majority to reach quorum.
Client is a caller of the cluster's HTTP API.
## Revision
## Machine (deprecated)
A 64-bit cluster-wide counter that is incremented each time the keyspace is modified.
## Role
A unit of permissions over a set of key ranges which may be granted to a set of users for access control.
## Snapshot
A point-in-time backup of the etcd cluster state.
## Store
The physical storage backing the cluster keyspace.
## Transaction
An atomically executed set of operations. All modified keys in a transaction share the same modification revision.
## Key Version
The number of writes to a key since it was created, starting at 1. The version of a nonexistent or deleted key is 0.
## Watcher
A client opens a watcher to observe updates on a given key range.
The alternative of Member in etcd before 2.0

View File

@ -1,21 +0,0 @@
# Why etcd
The name "etcd" originated from two ideas, the unix "/etc" folder and "d"istibuted systems. The "/etc" folder is a place to store configuration data for a single system whereas etcd stores configuration information for large scale distributed systems. Hence, a "d"istributed "/etc" is "etcd".
etcd stores metadata in a consistent and fault-tolerant way. Distributed systems use etcd as a consistent key-value store for configuration management, service discovery, and coordinating distributed work. Common distributed patterns using etcd include leader election, [distributed locks][etcd-concurrency], and monitoring machine liveness.
## Use cases
- Container Linux by CoreOS: Application running on [Container Linux][container-linux] gets automatic, zero-downtime Linux kernel updates. Container Linux uses [locksmith] to coordinate updates. locksmith implements a distributed semaphore over etcd to ensure only a subset of a cluster is rebooting at any given time.
- [Kubernetes][kubernetes] stores configuration data into etcd for service discovery and cluster management; etcd's consistency is crucial for correctly scheduling and operating services. The Kubernetes API server persists cluster state into etcd. It uses etcd's watch API to monitor the cluster and roll out critical configuration changes.
## Features and system comparisons
TODO
[etcd-concurrency]: https://godoc.org/github.com/coreos/etcd/clientv3/concurrency
[container-linux]: https://coreos.com/why
[locksmith]: https://github.com/coreos/locksmith
[kubernetes]: http://kubernetes.io/docs/whatisk8s

View File

@ -14,7 +14,6 @@
- [etcdtool](https://github.com/mickep76/etcdtool) - Export/Import/Edit etcd directory as JSON/YAML/TOML and Validate directory using JSON schema
- [etcd-rest](https://github.com/mickep76/etcd-rest) - Create generic REST API in Go using etcd as a backend with validation using JSON schema
- [etcdsh](https://github.com/kamilhark/etcdsh) - A command line client with support of command history and tab completion. Supports v2
- [etcdloadtest](https://github.com/sinsharat/etcdloadtest) - A command line load test client for etcd version 3.0 and above.
**Go libraries**
@ -24,7 +23,6 @@
**Java libraries**
- [coreos/jetcd](https://github.com/coreos/jetcd) - Supports v3
- [boonproject/etcd](https://github.com/boonproject/boon/blob/master/etcd/README.md) - Supports v2, Async/Sync and waits
- [justinsb/jetcd](https://github.com/justinsb/jetcd)
- [diwakergupta/jetcd](https://github.com/diwakergupta/jetcd) - Supports v2
@ -35,11 +33,9 @@
**Scala libraries**
- [maciej/etcd-client](https://github.com/maciej/etcd-client) - Supports v2. Akka HTTP-based fully async client
- [eiipii/etcdhttpclient](https://bitbucket.org/eiipii/etcdhttpclient) - Supports v2. Async HTTP client based on Netty and Scala Futures.
**Python libraries**
- [kragniz/python-etcd3](https://github.com/kragniz/python-etcd3) - Work in progress client for v3
- [jplana/python-etcd](https://github.com/jplana/python-etcd) - Supports v2
- [russellhaering/txetcd](https://github.com/russellhaering/txetcd) - a Twisted Python library
- [cholcombe973/autodock](https://github.com/cholcombe973/autodock) - A docker deployment automation tool
@ -65,8 +61,6 @@
**C++ libraries**
- [edwardcapriolo/etcdcpp](https://github.com/edwardcapriolo/etcdcpp) - Supports v2
- [suryanathan/etcdcpp](https://github.com/suryanathan/etcdcpp) - Supports v2 (with waits)
- [nokia/etcd-cpp-api](https://github.com/nokia/etcd-cpp-api) - Supports v2
- [nokia/etcd-cpp-apiv3](https://github.com/nokia/etcd-cpp-apiv3) - Supports v3
**Clojure libraries**
@ -86,7 +80,6 @@
**PHP Libraries**
- [linkorb/etcd-php](https://github.com/linkorb/etcd-php)
- [activecollab/etcd](https://github.com/activecollab/etcd)
**Haskell libraries**
@ -96,10 +89,6 @@
- [ropensci/etseed](https://github.com/ropensci/etseed)
**Nim libraries**
- [etcd_client](https://github.com/FedericoCeratto/nim-etcd-client)
**Tcl libraries**
- [efrecon/etcd-tcl](https://github.com/efrecon/etcd-tcl) - Supports v2, except wait.
@ -124,9 +113,7 @@
**Projects using etcd**
- [binocarlos/yoda](https://github.com/binocarlos/yoda) - etcd + ZeroMQ
- [blox/blox](https://github.com/blox/blox) - a collection of open source projects for container management and orchestration with AWS ECS
- [calavera/active-proxy](https://github.com/calavera/active-proxy) - HTTP Proxy configured with etcd
- [chain/chain](https://github.com/chain/chain) - software designed to operate and connect to highly scalable permissioned blockchain networks
- [derekchiang/etcdplus](https://github.com/derekchiang/etcdplus) - A set of distributed synchronization primitives built upon etcd
- [go-discover](https://github.com/flynn/go-discover) - service discovery in Go
- [gleicon/goreman](https://github.com/gleicon/goreman/tree/etcd) - Branch of the Go Foreman clone with etcd support

View File

@ -70,8 +70,6 @@ All these metrics are prefixed with `etcd_network_`
|---------------------------|--------------------------------------------------------------------|---------------|
| peer_sent_bytes_total | The total number of bytes sent to the peer with ID `To`. | Counter(To) |
| peer_received_bytes_total | The total number of bytes received from the peer with ID `From`. | Counter(From) |
| peer_sent_failures_total | The total number of send failures from the peer with ID `To`. | Counter(To) |
| peer_received_failures_total | The total number of receive failures from the peer with ID `From`. | Counter(From) |
| peer_round_trip_time_seconds | Round-Trip-Time histogram between peers. | Histogram(To) |
| client_grpc_sent_bytes_total | The total number of bytes sent to grpc clients. | Counter |
| client_grpc_received_bytes_total| The total number of bytes received to grpc clients. | Counter |
@ -82,7 +80,30 @@ All these metrics are prefixed with `etcd_network_`
### gRPC requests
These metrics are exposed via [go-grpc-prometheus][go-grpc-prometheus].
These metrics describe the requests served by a specific etcd member: total received requests, total failed requests, and processing latency. They are useful for tracking user-generated traffic hitting the etcd cluster.
All these metrics are prefixed with `etcd_grpc_`
| Name | Description | Type |
|--------------------------------|-------------------------------------------------------------------------------------|------------------------|
| requests_total | Total number of received requests | Counter(method) |
| requests_failed_total | Total number of failed requests.   | Counter(method,error) |
| unary_requests_duration_seconds | Bucketed handling duration of the requests. | Histogram(method) |
Example Prometheus queries that may be useful from these metrics (across all etcd members):
* `sum(rate(etcd_grpc_requests_failed_total{job="etcd"}[1m]) by (grpc_method) / sum(rate(etcd_grpc_total{job="etcd"})[1m]) by (grpc_method)`
Shows the fraction of events that failed by gRPC method across all members, across a time window of `1m`.
* `sum(rate(etcd_grpc_requests_total{job="etcd",grpc_method="PUT"})[1m]) by (grpc_method)`
Shows the rate of PUT requests across all members, across a time window of `1m`.
* `histogram_quantile(0.9, sum(rate(etcd_grpc_unary_requests_duration_seconds{job="etcd",grpc_method="PUT"}[5m]) ) by (le))`
Show the 0.90-tile latency (in seconds) of PUT request handling across all members, with a window of `5m`.
## etcd_debugging namespace metrics
@ -113,4 +134,3 @@ Heavy file descriptor (`process_open_fds`) usage (i.e., near the process's file
[prometheus-getting-started]: http://prometheus.io/docs/introduction/getting_started/
[prometheus-naming]: http://prometheus.io/docs/practices/naming/
[v2-http-metrics]: v2/metrics.md#http-requests
[go-grpc-prometheus]: https://github.com/grpc-ecosystem/go-grpc-prometheus

View File

@ -83,7 +83,7 @@ A cluster using self-signed certificates both encrypts traffic and authenticates
On each machine, etcd would be started with these flags:
```
$ etcd --name infra0 --initial-advertise-peer-urls https://10.0.1.10:2380 \
$ etcd --name infra0 --initial-advertise-peer-urls http://10.0.1.10:2380 \
--listen-peer-urls https://10.0.1.10:2380 \
--listen-client-urls https://10.0.1.10:2379,https://127.0.0.1:2379 \
--advertise-client-urls https://10.0.1.10:2379 \
@ -126,7 +126,7 @@ $ etcd --name infra2 --initial-advertise-peer-urls https://10.0.1.12:2380 \
If the cluster needs encrypted communication but does not require authenticated connections, etcd can be configured to automatically generate its keys. On initialization, each member creates its own set of keys based on its advertised IP addresses and hosts.
On each machine, etcd would be started with these flags:
On each machine, etcd would be started with these flag:
```
$ etcd --name infra0 --initial-advertise-peer-urls https://10.0.1.10:2380 \
@ -205,7 +205,7 @@ exit 1
## Discovery
In a number of cases, the IPs of the cluster peers may not be known ahead of time. This is common when utilizing cloud providers or when the network uses DHCP. In these cases, rather than specifying a static configuration, use an existing etcd cluster to bootstrap a new one. This process is called "discovery".
In a number of cases, the IPs of the cluster peers may not be known ahead of time. This is common when utilizing cloud providers or when the network uses DHCP. In these cases, rather than specifying a static configuration, use an existing etcd cluster to bootstrap a new one. We call this process "discovery".
There two methods that can be used for discovery:
@ -214,17 +214,17 @@ There two methods that can be used for discovery:
### etcd discovery
To better understand the design of the discovery service protocol, we suggest reading the discovery service protocol [documentation][discovery-proto].
To better understand the design about discovery service protocol, we suggest reading the discovery service protocol [documentation][discovery-proto].
#### Lifetime of a discovery URL
A discovery URL identifies a unique etcd cluster. Instead of reusing an existing discovery URL, each etcd instance shares a new discovery URL to bootstrap the new cluster.
A discovery URL identifies a unique etcd cluster. Instead of reusing a discovery URL, always create discovery URLs for new clusters.
Moreover, discovery URLs should ONLY be used for the initial bootstrapping of a cluster. To change cluster membership after the cluster is already running, see the [runtime reconfiguration][runtime-conf] guide.
#### Custom etcd discovery service
Discovery uses an existing cluster to bootstrap itself. If using a private etcd cluster, create a URL like so:
Discovery uses an existing cluster to bootstrap itself. If using a private etcd cluster, can create a URL like so:
```
$ curl -X PUT https://myetcd.local/v2/keys/discovery/6c007a14875d53d9bf0ef5a6fc0257c817f0fb83/_config/size -d value=3
@ -271,7 +271,7 @@ $ curl https://discovery.etcd.io/new?size=3
https://discovery.etcd.io/3e86b59982e49066c5d813af1c2e2579cbf573de
```
This will create the cluster with an initial size of 3 members. If no size is specified, a default of 3 is used.
This will create the cluster with an initial expected size of 3 members. If no size is specified, a default of 3 is used.
```
ETCD_DISCOVERY=https://discovery.etcd.io/3e86b59982e49066c5d813af1c2e2579cbf573de
@ -281,7 +281,7 @@ ETCD_DISCOVERY=https://discovery.etcd.io/3e86b59982e49066c5d813af1c2e2579cbf573d
--discovery https://discovery.etcd.io/3e86b59982e49066c5d813af1c2e2579cbf573de
```
**Each member must have a different name flag specified or else discovery will fail due to duplicated names. `Hostname` or `machine-id` can be a good choice. **
**Each member must have a different name flag specified. `Hostname` or `machine-id` can be a good choice. Or discovery will fail due to duplicated name.**
Now we start etcd with those relevant flags for each member:
@ -357,8 +357,6 @@ To help clients discover the etcd cluster, the following DNS SRV records are loo
If `_etcd-client-ssl._tcp.example.com` is found, clients will attempt to communicate with the etcd cluster over SSL/TLS.
If etcd is using TLS without a custom certificate authority, the discovery domain (e.g., example.com) must match the SRV record domain (e.g., infra1.example.com). This is to mitigate attacks that forge SRV records to point to a different domain; the domain would have a valid certificate under PKI but be controlled by an unknown third party.
#### Create DNS SRV records
```
@ -456,10 +454,6 @@ $ etcd --name infra2 \
--listen-peer-urls http://10.0.1.12:2380
```
### Gateway
etcd gateway is a simple TCP proxy that forwards network data to the etcd cluster. Please read [gateway guide] for more information.
### Proxy
When the `--proxy` flag is set, etcd runs in [proxy mode][proxy]. This proxy mode only supports the etcd v2 API; there are no plans to support the v3 API. Instead, for v3 API support, there will be a new proxy with enhanced features following the etcd 3.0 release.
@ -476,4 +470,3 @@ To setup an etcd cluster with proxies of v2 API, please read the the [clustering
[clustering_etcd2]: https://github.com/coreos/etcd/blob/release-2.3/Documentation/clustering.md
[security-guide]: security.md
[tls-setup]: /hack/tls-setup
[gateway]: gateway.md

View File

@ -247,7 +247,7 @@ The security flags help to [build a secure etcd cluster][security].
+ env variable: ETCD_DEBUG
### --log-package-levels
+ Set individual etcd subpackages to specific log levels. An example being `etcdserver=WARNING,security=DEBUG`
+ Set individual etcd subpackages to specific log levels. An example being `etcdserver=WARNING,security=DEBUG`
+ default: none (INFO for all packages)
+ env variable: ETCD_LOG_PACKAGE_LEVELS
@ -276,13 +276,9 @@ Follow the instructions when using these flags.
## Profiling flags
### --enable-pprof
+ Enable runtime profiling data via HTTP server. Address is at client URL + "/debug/pprof/"
+ Enable runtime profiling data via HTTP server. Address is at client URL + "/debug/pprof"
+ default: false
### --metrics
+ Set level of detail for exported metrics, specify 'extensive' to include histogram metrics.
+ default: basic
[build-cluster]: clustering.md#static
[reconfig]: runtime-configuration.md
[discovery]: clustering.md#discovery

View File

@ -2,68 +2,6 @@
The following guide shows how to run etcd with rkt and Docker using the [static bootstrap process](clustering.md#static).
## rkt
### Running a single node etcd
The following rkt run command will expose the etcd client API on port 2379 and expose the peer API on port 2380.
Use the host IP address when configuring etcd.
```
export NODE1=192.168.1.21
```
Trust the CoreOS [App Signing Key](https://coreos.com/security/app-signing-key/).
```
sudo rkt trust --prefix coreos.com/etcd
# gpg key fingerprint is: 18AD 5014 C99E F7E3 BA5F 6CE9 50BD D3E0 FC8A 365E
```
Run the `v3.0.6` version of etcd or specify another release version.
```
sudo rkt run --net=default:IP=${NODE1} coreos.com/etcd:v3.0.6 -- -name=node1 -advertise-client-urls=http://${NODE1}:2379 -initial-advertise-peer-urls=http://${NODE1}:2380 -listen-client-urls=http://0.0.0.0:2379 -listen-peer-urls=http://${NODE1}:2380 -initial-cluster=node1=http://${NODE1}:2380
```
List the cluster member.
```
etcdctl --endpoints=http://192.168.1.21:2379 member list
```
### Running a 3 node etcd cluster
Setup a 3 node cluster with rkt locally, using the `-initial-cluster` flag.
```sh
export NODE1=172.16.28.21
export NODE2=172.16.28.22
export NODE3=172.16.28.23
```
```
# node 1
sudo rkt run --net=default:IP=${NODE1} coreos.com/etcd:v3.0.6 -- -name=node1 -advertise-client-urls=http://${NODE1}:2379 -initial-advertise-peer-urls=http://${NODE1}:2380 -listen-client-urls=http://0.0.0.0:2379 -listen-peer-urls=http://${NODE1}:2380 -initial-cluster=node1=http://${NODE1}:2380,node2=http://${NODE2}:2380,node3=http://${NODE3}:2380
# node 2
sudo rkt run --net=default:IP=${NODE2} coreos.com/etcd:v3.0.6 -- -name=node2 -advertise-client-urls=http://${NODE2}:2379 -initial-advertise-peer-urls=http://${NODE2}:2380 -listen-client-urls=http://0.0.0.0:2379 -listen-peer-urls=http://${NODE2}:2380 -initial-cluster=node1=http://${NODE1}:2380,node2=http://${NODE2}:2380,node3=http://${NODE3}:2380
# node 3
sudo rkt run --net=default:IP=${NODE3} coreos.com/etcd:v3.0.6 -- -name=node3 -advertise-client-urls=http://${NODE3}:2379 -initial-advertise-peer-urls=http://${NODE3}:2380 -listen-client-urls=http://0.0.0.0:2379 -listen-peer-urls=http://${NODE3}:2380 -initial-cluster=node1=http://${NODE1}:2380,node2=http://${NODE2}:2380,node3=http://${NODE3}:2380
```
Verify the cluster is healthy and can be reached.
```
ETCDCTL_API=3 etcdctl --endpoints=http://172.16.28.21:2379,http://172.16.28.22:2379,http://172.16.28.23:2379 endpoint health
```
### DNS
Production clusters which refer to peers by DNS name known to the local resolver must mount the [host's DNS configuration](https://coreos.com/kubernetes/docs/latest/kubelet-wrapper.html#customizing-rkt-options).
## Docker
In order to expose the etcd API to clients outside of Docker host, use the host IP address of the container. Please see [`docker inspect`](https://docs.docker.com/engine/reference/commandline/inspect) for more detail on how to get the IP address. Alternatively, specify `--net=host` flag to `docker run` command to skip placing the container inside of a separate network stack.
@ -121,7 +59,3 @@ To run `etcdctl` using API version 3:
docker exec etcd /bin/sh -c "export ETCDCTL_API=3 && /usr/local/bin/etcdctl put foo bar"
```
## Bare Metal
To provision a 3 node etcd cluster on bare-metal, you might find the examples in the [baremetal repo](https://github.com/coreos/coreos-baremetal/tree/master/examples) useful.

Binary file not shown.

Before

Width:  |  Height:  |  Size: 96 KiB

View File

@ -1,66 +0,0 @@
# etcd gateway
## What is etcd gateway
etcd gateway is a simple TCP proxy that forwards network data to the etcd cluster. The gateway is stateless and transparent; it neither inspects client requests nor interferes with cluster responses.
The gateway supports multiple etcd server endpoints. When the gateway starts, it randomly picks one etcd server endpoint and forwards all requests to that endpoint. This endpoint serves all requests until the gateway detects a network failure. If the gateway detects an endpoint failure, it will switch to a different endpoint, if available, to hide failures from its clients. Other retry policies, such as weighted round-robin, may be supported in the future.
## When to use etcd gateway
Every application that accesses etcd must first have the address of an etcd cluster client endpoint. If multiple applications on the same server access the same etcd cluster, every application still needs to know the advertised client endpoints of the etcd cluster. If the etcd cluster is reconfigured to have different endpoints, every application may also need to update its endpoint list. This wide-scale reconfiguration is both tedious and error prone.
etcd gateway solves this problem by serving as a stable local endpoint. A typical etcd gateway configuration has
each machine running a gateway listening on a local address and every etcd application connecting to its local gateway. The upshot is only the gateway needs to update its endpoints instead of updating each and every application.
In summary, to automatically propagate cluster endpoint changes, the etcd gateway runs on every machine serving multiple applications accessing same etcd cluster.
## When not to use etcd gateway
- Improving performance
The gateway is not designed for improving etcd cluster performance. It does not provide caching, watch coalescing or batching. The etcd team is developing a caching proxy designed for improving cluster scalability.
- Running on a cluster management system
Advanced cluster management systems like Kubernetes natively support service discovery. Applications can access an etcd cluster with a DNS name or a virtual IP address managed by the system. For example, kube-proxy is equivalent to etcd gateway.
## Start etcd gateway
Consider an etcd cluster with the following static endpoints:
|Name|Address|Hostname|
|------|---------|------------------|
|infra0|10.0.1.10|infra0.example.com|
|infra1|10.0.1.11|infra1.example.com|
|infra2|10.0.1.12|infra2.example.com|
Start the etcd gateway to use these static endpoints with the command:
```bash
$ etcd gateway start --endpoints=infra0.example.com,infra1.example.com,infra2.example.com
2016-08-16 11:21:18.867350 I | tcpproxy: ready to proxy client requests to [...]
```
Alternatively, if using DNS for service discovery, consider the DNS SRV entries:
```bash
$ dig +noall +answer SRV _etcd-client._tcp.example.com
_etcd-client._tcp.example.com. 300 IN SRV 0 0 2379 infra0.example.com.
_etcd-client._tcp.example.com. 300 IN SRV 0 0 2379 infra1.example.com.
_etcd-client._tcp.example.com. 300 IN SRV 0 0 2379 infra2.example.com.
```
```bash
$ dig +noall +answer infra0.example.com infra1.example.com infra2.example.com
infra0.example.com. 300 IN A 10.0.1.10
infra1.example.com. 300 IN A 10.0.1.11
infra2.example.com. 300 IN A 10.0.1.12
```
Start the etcd gateway to fetch the endpoints from the DNS SRV entries with the command:
```bash
$ etcd gateway --discovery-srv=example.com
2016-08-16 11:21:18.867350 I | tcpproxy: ready to proxy client requests to [...]
```

File diff suppressed because it is too large Load Diff

View File

@ -1,77 +0,0 @@
# gRPC proxy
*This is an alpha feature, we are looking for early feedback.*
The gRPC proxy is a stateless etcd reverse proxy operating at the gRPC layer (L7). The proxy is designed to reduce the total processing load on the core etcd cluster. For horizontal scalability, it coalesces watch and lease API requests. To protect the cluster against abusive clients, it caches key range requests.
The gRPC proxy supports multiple etcd server endpoints. When the proxy starts, it randomly picks one etcd server endpoint to use. This endpoint serves all requests until the proxy detects an endpoint failure. If the gRPC proxy detects an endpoint failure, it switches to a different endpoint, if available, to hide failures from its clients. Other retry policies, such as weighted round-robin, may be supported in the future.
## Scalable watch API
The gRPC proxy coalesces multiple client watchers (`c-watchers`) on the same key or range into a single watcher (`s-watcher`) connected to an etcd server. The proxy broadcasts all events from the `s-watcher` to its `c-watchers`.
Assuming N clients watch the same key, one gRPC proxy can reduce the watch load on the etcd server from N to 1. Users can deploy multiple gRPC proxies to further distribute server load.
In the following example, three clients watch on key A. The gRPC proxy coalesces the three watchers, creating a single watcher attached to the etcd server.
```
+-------------+
| etcd server |
+------+------+
^ watch key A (s-watcher)
|
+-------+-----+
| gRPC proxy | <-------+
| | |
++-----+------+ |watch key A (c-watcher)
watch key A ^ ^ watch key A |
(c-watcher) | | (c-watcher) |
+-------+-+ ++--------+ +----+----+
| client | | client | | client |
| | | | | |
+---------+ +---------+ +---------+
```
### Limitations
To effectively coalesce multiple client watchers into a single watcher, the gRPC proxy coalesces new `c-watchers` into an existing `s-watcher` when possible. This coalesced `s-watcher` may be out of sync with the etcd server due to network delays or buffered undelivered events. When the watch revision is unspecified, the gRPC proxy will not guarantee the `c-watcher` will start watching from the most recent store revision. For example, if a client watches from an etcd server with revision 1000, that watcher will begin at revision 1000. If a client watches from the gRPC proxy, may begin watching from revision 990.
Similar limitations apply to cancellation. When the watcher is cancelled, the etcd servers revision may be greater than the cancellation response revision.
These two limitations should not cause problems for most use cases. In the future, there may be additional options to force the watcher to bypass the gRPC proxy for more accurate revision responses.
## Scalable lease API
TODO
## Abusive clients protection
The gRPC proxy caches responses for requests when it does not break consistency requirements. This can protect the etcd server from abusive clients in tight for loops.
## Start etcd gRPC proxy
Consider an etcd cluster with the following static endpoints:
|Name|Address|Hostname|
|------|---------|------------------|
|infra0|10.0.1.10|infra0.example.com|
|infra1|10.0.1.11|infra1.example.com|
|infra2|10.0.1.12|infra2.example.com|
Start the etcd gRPC proxy to use these static endpoints with the command:
```bash
$ etcd grpc-proxy start --endpoints=infra0.example.com,infra1.example.com,infra2.example.com --listen-addr=127.0.0.1:2379
```
The etcd gRPC proxy starts and listens on port 8080. It forwards client requests to one of the three endpoints provided above.
Sending requests through the proxy:
```bash
$ ETCDCTL_API=3 ./etcdctl --endpoints=127.0.0.1:2379 put foo bar
OK
$ ETCDCTL_API=3 ./etcdctl --endpoints=127.0.0.1:2379 get foo
foo
bar
```

View File

@ -1,93 +0,0 @@
# Hardware recommendations
etcd usually runs well with limited resources for development or testing purposes; its common to develop with etcd on a laptop or a cheap cloud machine. However, when running etcd clusters in production, some hardware guidelines are useful for proper administration. These suggestions are not hard rules; they serve as a good starting point for a robust production deployment. As always, deployments should be tested with simulated workloads before running in production.
## CPUs
Few etcd deployments require a lot of CPU capacity. Typical clusters need two to four cores to run smoothly.
Heavily loaded etcd deployments, serving thousands of clients or tens of thousands of requests per second, tend to be CPU bound since etcd can serve requests from memory. Such heavy deployments usually need eight to sixteen dedicated cores.
## Memory
etcd has a relatively small memory footprint but its performance still depends on having enough memory. An etcd server will aggressively cache key-value data and spends most of the rest of its memory tracking watchers. Typically 8GB is enough. For heavy deployments with thousands of watchers and millions of keys, allocate 16GB to 64GB memory accordingly.
## Disks
Fast disks are the most critical factor for etcd deployment performance and stability.
A slow disk will increase etcd request latency and potentially hurt cluster stability. Since etcds consensus protocol depends on persistently storing metadata to a log, a majority of etcd cluster members must write every request down to disk. Additionally, etcd will also incrementally checkpoint its state to disk so it can truncate this log. If these writes take too long, heartbeats may time out and trigger an election, undermining the stability of the cluster.
etcd is very sensitive to disk write latency. Typically 50 sequential IOPS (e.g., a 7200 RPM disk) is required. For heavily loaded clusters, 500 sequential IOPS (e.g., a typical local SSD or a high performance virtualized block device) is recommended. Note that most cloud providers publish concurrent IOPS rather than sequential IOPS; the published concurrent IOPS can be 10x greater than the sequential IOPS. To measure actual sequential IOPS, we suggest using a disk benchmarking tool such as [diskbench][diskbench] or [fio][fio].
etcd requires only modest disk bandwidth but more disk bandwidth buys faster recovery times when a failed member has to catch up with the cluster. Typically 10MB/s will recover 100MB data within 15 seconds. For large clusters, 100MB/s or higher is suggested for recovering 1GB data within 15 seconds.
When possible, back etcds storage with a SSD. A SSD usually provides lower write latencies and with less variance than a spinning disk, thus improving the stability and reliability of etcd. If using spinning disk, get the fastest disks possible (15,000 RPM). Using RAID 0 is also an effective way to increase disk speed, for both spinning disks and SSD. With at least three cluster members, mirroring and/or parity variants of RAID are unnecessary; etcd's consistent replication already gets high availability.
## Network
Multi-member etcd deployments benefit from a fast and reliable network. In order for etcd to be both consistent and partition tolerant, an unreliable network with partitioning outages will lead to poor availability. Low latency ensures etcd members can communicate fast. High bandwidth can reduce the time to recover a failed etcd member. 1GbE is sufficient for common etcd deployments. For large etcd clusters, a 10GbE network will reduce mean time to recovery.
Deploy etcd members within a single data center when possible to avoid latency overheads and lessen the possibility of partitioning events. If a failure domain in another data center is required, choose a data center closer to the existing one. Please also read the [tuning][tuning] documentation for more information on cross data center deployment.
## Example hardware configurations
Here are a few example hardware setups on AWS and GCE environments. As mentioned before, but must be stressed regardless, administrators should test an etcd deployment with a simulated workload before putting it into production.
Note that these configurations assume these machines are totally dedicated to etcd. Running other applications along with etcd on these machines may cause resource contentions and lead to cluster instability.
### Small cluster
A small cluster serves fewer than 100 clients, fewer than 200 of requests per second, and stores no more than 100MB of data.
Example application workload: A 50-node Kubernetes cluster
| Provider | Type | vCPUs | Memory (GB) | Max concurrent IOPS | Disk bandwidth (MB/s) |
|----------|------|-------|--------|------|----------------|
| AWS | m4.large | 2 | 8 | 3600 | 56.25 |
| GCE | n1-standard-1 + 50GB PD SSD | 2 | 7.5 | 1500 | 25 |
### Medium cluster
A medium cluster serves fewer than 500 clients, fewer than 1,000 of requests per second, and stores no more than 500MB of data.
Example application workload: A 250-node Kubernetes cluster
| Provider | Type | vCPUs | Memory (GB) | Max concurrent IOPS | Disk bandwidth (MB/s) |
|----------|------|-------|--------|------|----------------|
| AWS | m4.xlarge | 4 | 16 | 6000 | 93.75 |
| GCE | n1-standard-4 + 150GB PD SSD | 4 | 15 | 4500 | 75 |
### Large cluster
A large cluster serves fewer than 1,500 clients, fewer than 10,000 of requests per second, and stores no more than 1GB of data.
Example application workload: A 1,000-node Kubernetes cluster
| Provider | Type | vCPUs | Memory (GB) | Max concurrent IOPS | Disk bandwidth (MB/s) |
|----------|------|-------|--------|------|----------------|
| AWS | m4.2xlarge | 8 | 32 | 8000 | 125 |
| GCE | n1-standard-8 + 250GB PD SSD | 8 | 30 | 7500 | 125 |
### xLarge cluster
An xLarge cluster serves more than 1,500 clients, more than 10,000 of requests per second, and stores more than 1GB data.
Example application workload: A 3,000 node Kubernetes cluster
| Provider | Type | vCPUs | Memory (GB) | Max concurrent IOPS | Disk bandwidth (MB/s) |
|----------|------|-------|--------|------|----------------|
| AWS | m4.4xlarge | 16 | 64 | 16,000 | 250 |
| GCE | n1-standard-16 + 500GB PD SSD | 16 | 60 | 15,000 | 250 |
[diskbench]: https://github.com/ongardie/diskbenchmark
[fio]: https://github.com/axboe/fio
[tuning]: ../tuning.md

View File

@ -49,50 +49,51 @@ Finished defragmenting etcd member[127.0.0.1:2379]
## Space quota
The space quota in `etcd` ensures the cluster operates in a reliable fashion. Without a space quota, `etcd` may suffer from poor performance if the keyspace grows excessively large, or it may simply run out of storage space, leading to unpredictable cluster behavior. If the keyspace's backend database for any member exceeds the space quota, `etcd` raises a cluster-wide alarm that puts the cluster into a maintenance mode which only accepts key reads and deletes. Only after freeing enough space in the keyspace and defragmenting the backend database, along with clearing the space quota alarm can the cluster resume normal operation.
The space quota in `etcd` ensures the cluster operates in a reliable fashion. Without a space quota, `etcd` may suffer from poor performance if the keyspace grows excessively large, or it may simply run out of storage space, leading to unpredictable cluster behavior. If the keyspace's backend database for any member exceeds the space quota, `etcd` raises a cluster-wide alarm that puts the cluster into a maintenance mode which only accepts key reads and deletes. After freeing enough space in the keyspace, the alarm can be disarmed and the cluster will resume normal operation.
By default, `etcd` sets a conservative space quota suitable for most applications, but it may be configured on the command line, in bytes:
```sh
# set a very small 16MB quota
$ etcd --quota-backend-bytes=$((16*1024*1024))
$ etcd --quota-backend-bytes=16777216
```
The space quota can be triggered with a loop:
```sh
# fill keyspace
$ while [ 1 ]; do dd if=/dev/urandom bs=1024 count=1024 | ETCDCTL_API=3 etcdctl put key || break; done
$ while [ 1 ]; do dd if=/dev/urandom bs=1024 count=1024 | etcdctl put key || break; done
...
Error: rpc error: code = 8 desc = etcdserver: mvcc: database space exceeded
# confirm quota space is exceeded
$ ETCDCTL_API=3 etcdctl --write-out=table endpoint status
$ etcdctl --write-out=table endpoint status
+----------------+------------------+-----------+---------+-----------+-----------+------------+
| ENDPOINT | ID | VERSION | DB SIZE | IS LEADER | RAFT TERM | RAFT INDEX |
+----------------+------------------+-----------+---------+-----------+-----------+------------+
| 127.0.0.1:2379 | bf9071f4639c75cc | 2.3.0+git | 18 MB | true | 2 | 3332 |
+----------------+------------------+-----------+---------+-----------+-----------+------------+
# confirm alarm is raised
$ ETCDCTL_API=3 etcdctl alarm list
$ etcdctl alarm list
memberID:13803658152347727308 alarm:NOSPACE
```
Removing excessive keyspace data and defragmenting the backend database will put the cluster back within the quota limits:
Removing excessive keyspace data will put the cluster back within the quota limits so the alarm can be disarmed:
```sh
# get current revision
$ rev=$(ETCDCTL_API=3 etcdctl --endpoints=:2379 endpoint status --write-out="json" | egrep -o '"revision":[0-9]*' | egrep -o '[0-9]*')
$ etcdctl --endpoints=:2379 endpoint status
[{"Endpoint":"127.0.0.1:2379","Status":{"header":{"cluster_id":8925027824743593106,"member_id":13803658152347727308,"revision":1516,"raft_term":2},"version":"2.3.0+git","dbSize":17973248,"leader":13803658152347727308,"raftIndex":6359,"raftTerm":2}}]
# compact away all old revisions
$ ETCDCTL_API=3 etcdctl compact $rev
$ etdctl compact 1516
compacted revision 1516
# defragment away excessive space
$ ETCDCTL_API=3 etcdctl defrag
$ etcdctl defrag
Finished defragmenting etcd member[127.0.0.1:2379]
# disarm alarm
$ ETCDCTL_API=3 etcdctl alarm disarm
$ etcdctl alarm disarm
memberID:13803658152347727308 alarm:NOSPACE
# test puts are allowed again
$ ETCDCTL_API=3 etcdctl put newkey 123
$ etdctl put newkey 123
OK
```

View File

@ -1,82 +0,0 @@
# Monitoring etcd
Each etcd server exports metrics under the `/metrics` path on its client port.
The metrics can be fetched with `curl`:
```sh
$ curl -L http://localhost:2379/metrics
# HELP etcd_debugging_mvcc_keys_total Total number of keys.
# TYPE etcd_debugging_mvcc_keys_total gauge
etcd_debugging_mvcc_keys_total 0
# HELP etcd_debugging_mvcc_pending_events_total Total number of pending events to be sent.
# TYPE etcd_debugging_mvcc_pending_events_total gauge
etcd_debugging_mvcc_pending_events_total 0
...
```
## Prometheus
Running a [Prometheus][prometheus] monitoring service is the easiest way to ingest and record etcd's metrics.
First, install Prometheus:
```sh
PROMETHEUS_VERSION="1.3.1"
wget https://github.com/prometheus/prometheus/releases/download/v$PROMETHEUS_VERSION/prometheus-$PROMETHEUS_VERSION.linux-amd64.tar.gz -O /tmp/prometheus-$PROMETHEUS_VERSION.linux-amd64.tar.gz
tar -xvzf /tmp/prometheus-$PROMETHEUS_VERSION.linux-amd64.tar.gz --directory /tmp/ --strip-components=1
/tmp/prometheus -version
```
Set Prometheus's scraper to target the etcd cluster endpoints:
```sh
cat > /tmp/test-etcd.yaml <<EOF
global:
scrape_interval: 10s
scrape_configs:
- job_name: test-etcd
static_configs:
- targets: ['10.240.0.32:2379','10.240.0.33:2379','10.240.0.34:2379']
EOF
cat /tmp/test-etcd.yaml
```
Set up the Prometheus handler:
```sh
nohup /tmp/prometheus \
-config.file /tmp/test-etcd.yaml \
-web.listen-address ":9090" \
-storage.local.path "test-etcd.data" >> /tmp/test-etcd.log 2>&1 &
```
Now Prometheus will scrape etcd metrics every 10 seconds.
## Grafana
[Grafana][grafana] has built-in Prometheus support; just add a Prometheus data source:
```
Name: test-etcd
Type: Prometheus
Url: http://localhost:9090
Access: proxy
```
Then import the default [etcd dashboard template][template] and customize. For instance, if Prometheus data source name is `my-etcd`, the `datasource` field values in JSON also need to be `my-etcd`.
See the [demo][demo].
Sample dashboard:
![](./etcd-sample-grafana.png)
[prometheus]: https://prometheus.io/
[grafana]: http://grafana.org/
[template]: ./grafana.json
[demo]: http://dash.etcd.io/dashboard/db/test-etcd

View File

@ -11,7 +11,7 @@ To recover from disastrous failure, etcd v3 provides snapshot and restore facili
Recovering a cluster first needs a snapshot of the keyspace from an etcd member. A snapshot may either be taken from a live member with the `etcdctl snapshot save` command or by copying the `member/snap/db` file from an etcd data directory. For example, the following command snapshots the keyspace served by `$ENDPOINT` to the file `snapshot.db`:
```sh
$ ETCDCTL_API=3 etcdctl --endpoints $ENDPOINT snapshot save snapshot.db
$ etcdctl --endpoints $ENDPOINT snapshot save snapshot.db
```
### Restoring a cluster
@ -23,19 +23,19 @@ Snapshot integrity may be optionally verified at restore time. If the snapshot i
A restore initializes a new member of a new cluster, with a fresh cluster configuration using `etcd`'s cluster configuration flags, but preserves the contents of the etcd keyspace. Continuing from the previous example, the following creates new etcd data directories (`m1.etcd`, `m2.etcd`, `m3.etcd`) for a three member cluster:
```sh
$ ETCDCTL_API=3 etcdctl snapshot restore snapshot.db \
$ etcdctl snapshot restore snapshot.db \
--name m1 \
--initial-cluster m1=http://host1:2380,m2=http://host2:2380,m3=http://host3:2380 \
--initial-cluster m1=http:/host1:2380,m2=http://host2:2380,m3=http://host3:2380 \
--initial-cluster-token etcd-cluster-1 \
--initial-advertise-peer-urls http://host1:2380
$ ETCDCTL_API=3 etcdctl snapshot restore snapshot.db \
$ etcdctl snapshot restore snapshot.db \
--name m2 \
--initial-cluster m1=http://host1:2380,m2=http://host2:2380,m3=http://host3:2380 \
--initial-cluster m1=http:/host1:2380,m2=http://host2:2380,m3=http://host3:2380 \
--initial-cluster-token etcd-cluster-1 \
--initial-advertise-peer-urls http://host2:2380
$ ETCDCTL_API=3 etcdctl snapshot restore snapshot.db \
$ etcdctl snapshot restore snapshot.db \
--name m3 \
--initial-cluster m1=http://host1:2380,m2=http://host2:2380,m3=http://host3:2380 \
--initial-cluster m1=http:/host1:2380,m2=http://host2:2380,m3=http://host3:2380 \
--initial-cluster-token etcd-cluster-1 \
--initial-advertise-peer-urls http://host3:2380
```

View File

@ -169,7 +169,7 @@ As described in the above, the best practice of adding new members is to configu
For avoiding this problem, etcd provides an option `-strict-reconfig-check`. If this option is passed to etcd, etcd rejects reconfiguration requests if the number of started members will be less than a quorum of the reconfigured cluster.
It is enabled by default.
It is recommended to enable this option. However, it is disabled by default because of keeping compatibility.
[add member]: #add-a-new-member
[cluster-reconf]: #cluster-reconfiguration-operations

View File

@ -1,39 +1,14 @@
## Supported platforms
### Current support
The following table lists etcd support status for common architectures and operating systems,
| Architecture | Operating System | Status | Maintainers |
| ------------ | ---------------- | ------------ | ---------------- |
| amd64 | Darwin | Experimental | etcd maintainers |
| amd64 | Linux | Stable | etcd maintainers |
| amd64 | Windows | Experimental | |
| arm64 | Linux | Experimental | @glevand |
| arm | Linux | Unstable | |
| 386 | Linux | Unstable | |
* etcd-maintainers are listed in https://github.com/coreos/etcd/blob/master/MAINTAINERS.
Experimental platforms appear to work in practice and have some platform specific code in etcd, but do not fully conform to the stable support policy. Unstable platforms have been lightly tested, but less than experimental. Unlisted architecture and operating system pairs are currently unsupported; caveat emptor.
### Supporting a new platform
For etcd to officially support a new platform as stable, a few requirements are necessary to ensure acceptable quality:
1. An "official" maintainer for the platform with clear motivation; someone must be responsible for taking care of the platform.
2. Set up CI for build; etcd must compile.
3. Set up CI for running unit tests; etcd must pass simple tests.
4. Set up CI (TravisCI, SemaphoreCI or Jenkins) for running integration tests; etcd must pass intensive tests.
5. (Optional) Set up a functional testing cluster; an etcd cluster should survive stress testing.
## Supported platform
### 32-bit and other unsupported systems
etcd has known issues on 32-bit systems due to a bug in the Go runtime. See the [Go issue][go-issue] and [atomic package][go-atomic] for more information.
etcd has known issues on 32-bit systems due to a bug in the Go runtime. See #[358][358] for more information.
To avoid inadvertently running a possibly unstable etcd server, `etcd` on unstable or unsupported architectures will print a warning message and immediately exit if the environment variable `ETCD_UNSUPPORTED_ARCH` is not set to the target architecture.
To avoid inadvertently running a possibly unstable etcd server, `etcd` on unsupported architectures will print
a warning message and immediately exit if the environment variable `ETCD_UNSUPPORTED_ARCH` is not set to
the target architecture.
Currently only the amd64 architecture is officially supported by `etcd`.
[go-issue]: https://github.com/golang/go/issues/599
[go-atomic]: https://golang.org/pkg/sync/atomic/#pkg-note-BUG
[358]: https://github.com/coreos/etcd/issues/358

View File

@ -71,23 +71,4 @@ $ etcd --snapshot-count=5000
$ ETCD_SNAPSHOT_COUNT=5000 etcd
```
## Network
If the etcd leader serves a large number of concurrent client requests, it may delay processing follower peer requests due to network congestion. This manifests as send buffer error messages on the follower nodes:
```
dropped MsgProp to 247ae21ff9436b2d since streamMsg's sending buffer is full
dropped MsgAppResp to 247ae21ff9436b2d since streamMsg's sending buffer is full
```
These errors may be resolved by prioritizing etcd's peer traffic over its client traffic. On Linux, peer traffic can be prioritized by using the traffic control mechanism:
```
tc qdisc add dev eth0 root handle 1: prio bands 3
tc filter add dev eth0 parent 1: protocol ip prio 1 u32 match ip sport 2380 0xffff flowid 1:1
tc filter add dev eth0 parent 1: protocol ip prio 1 u32 match ip dport 2380 0xffff flowid 1:1
tc filter add dev eth0 parent 1: protocol ip prio 2 u32 match ip sport 2739 0xffff flowid 1:1
tc filter add dev eth0 parent 1: protocol ip prio 2 u32 match ip dport 2739 0xffff flowid 1:1
```
[ping]: https://en.wikipedia.org/wiki/Ping_(networking_utility)

View File

@ -6,27 +6,27 @@ In the general case, upgrading from etcd 2.3 to 3.0 can be a zero-downtime, roll
Before [starting an upgrade](#upgrade-procedure), read through the rest of this guide to prepare.
### Upgrade checklists
### Upgrade Checklists
#### Upgrade requirements
#### Upgrade Requirements
To upgrade an existing etcd deployment to 3.0, the running cluster must be 2.3 or greater. If it's before 2.3, please upgrade to [2.3](https://github.com/coreos/etcd/releases/tag/v2.3.0) before upgrading to 3.0.
Also, to ensure a smooth rolling upgrade, the running cluster must be healthy. Check the health of the cluster by using the `etcdctl cluster-health` command before proceeding.
Also, to ensure a smooth rolling upgrade, the running cluster must be healthy. You can check the health of the cluster by using the `etcdctl cluster-health` command.
#### Preparation
Before upgrading etcd, always test the services relying on etcd in a staging environment before deploying the upgrade to the production environment.
Before beginning, [backup the etcd data directory](../v2/admin_guide.md#backing-up-the-datastore). Should something go wrong with the upgrade, it is possible to use this backup to [downgrade](#downgrade) back to existing etcd version.
Before beginning, [backup the etcd data directory](admin_guide.md#backing-up-the-datastore). Should something go wrong with the upgrade, it is possible to use this backup to [downgrade](#downgrade) back to existing etcd version.
#### Mixed versions
#### Mixed Versions
While upgrading, an etcd cluster supports mixed versions of etcd members, and operates with the protocol of the lowest common version. The cluster is only considered upgraded once all of its members are upgraded to version 3.0. Internally, etcd members negotiate with each other to determine the overall cluster version, which controls the reported version and the supported features.
#### Limitations
It might take up to 2 minutes for the newly upgraded member to catch up with the existing cluster when the total data size is larger than 50MB. Check the size of a recent snapshot to estimate the total data size. In other words, it is safest to wait for 2 minutes between upgrading each member.
It might take up to 2 minutes for the newly upgraded member to catch up with the existing cluster when the total data size is larger than 50MB. Check the size of a recent snapshot to estimate the total data size. In other words, it is safest to wait for 2 minutes between upgrading each member.
For a much larger total data size, 100MB or more , this one-time process might take even more time. Administrators of very large etcd clusters of this magnitude can feel free to contact the [etcd team][etcd-contact] before upgrading, and well be happy to provide advice on the procedure.
@ -34,15 +34,15 @@ For a much larger total data size, 100MB or more , this one-time process might t
If all members have been upgraded to v3.0, the cluster will be upgraded to v3.0, and downgrade from this completed state is **not possible**. If any single member is still v2.3, however, the cluster and its operations remains “v2.3”, and it is possible from this mixed cluster state to return to using a v2.3 etcd binary on all members.
Please [backup the data directory](../v2/admin_guide.md#backing-up-the-datastore) of all etcd members to make downgrading the cluster possible even after it has been completely upgraded.
Please [backup the data directory](admin_guide.md#backing-up-the-datastore) of all etcd members to make downgrading the cluster possible even after it has been completely upgraded.
### Upgrade procedure
### Upgrade Procedure
This example details the upgrade of a three-member v2.3 ectd cluster running on a local machine.
This example details the upgrade of a three-member v2.3 ectd cluster running on a local machine.
#### 1. Check upgrade requirements.
Is the cluster healthy and running v.2.3.x?
Is the the cluster healthy and running v.2.3.x?
```
$ etcdctl cluster-health
@ -64,7 +64,7 @@ When each etcd process is stopped, expected errors will be logged by other clust
2016-06-27 15:21:48.624175 I | rafthttp: the connection with 8211f1d0f64f3269 became inactive
```
Its a good idea at this point to [backup the etcd data directory](../v2/admin_guide.md#backing-up-the-datastore) to provide a downgrade path should any problems occur:
Its a good idea at this point to [backup the etcd data directory](https://github.com/coreos/etcd/blob/master/Documentation/v2/admin_guide.md#backing-up-the-datastore) to provide a downgrade path should any problems occur:
```
$ etcdctl backup \
@ -102,7 +102,7 @@ Upgraded members will log warnings like the following until the entire cluster i
#### 5. Finish
When all members are upgraded, the cluster will report upgrading to 3.0 successfully:
When all members are upgraded, the cluster will report upgrading to 3.0 successfully:
```
2016-06-27 15:22:19.873751 N | membership: updated the cluster version from 2.3 to 3.0

View File

@ -1,123 +0,0 @@
## Upgrade etcd from 3.0 to 3.1
In the general case, upgrading from etcd 3.0 to 3.1 can be a zero-downtime, rolling upgrade:
- one by one, stop the etcd v3.0 processes and replace them with etcd v3.1 processes
- after running all v3.1 processes, new features in v3.1 are available to the cluster
Before [starting an upgrade](#upgrade-procedure), read through the rest of this guide to prepare.
### Upgrade checklists
#### Upgrade requirements
To upgrade an existing etcd deployment to 3.1, the running cluster must be 3.0 or greater. If it's before 3.0, please upgrade to [3.0](https://github.com/coreos/etcd/releases/tag/v3.0.16) before upgrading to 3.1.
Also, to ensure a smooth rolling upgrade, the running cluster must be healthy. Check the health of the cluster by using the `etcdctl endpoint health` command before proceeding.
#### Preparation
Before upgrading etcd, always test the services relying on etcd in a staging environment before deploying the upgrade to the production environment.
Before beginning, [backup the etcd data](../op-guide/maintenance.md#snapshot-backup). Should something go wrong with the upgrade, it is possible to use this backup to [downgrade](#downgrade) back to existing etcd version. Please note that the `snapshot` command only backs up the v3 data. For v2 data, see [backing up v2 datastore](../v2/admin_guide.md#backing-up-the-datastore).
#### Mixed versions
While upgrading, an etcd cluster supports mixed versions of etcd members, and operates with the protocol of the lowest common version. The cluster is only considered upgraded once all of its members are upgraded to version 3.1. Internally, etcd members negotiate with each other to determine the overall cluster version, which controls the reported version and the supported features.
#### Limitations
Note: If the cluster only has v3 data and no v2 data, it is not subject to this limitation.
If the cluster is serving a v2 data set larger than 50MB, each newly upgraded member may take up to two minutes to catch up with the existing cluster. Check the size of a recent snapshot to estimate the total data size. In other words, it is safest to wait for 2 minutes between upgrading each member.
For a much larger total data size, 100MB or more , this one-time process might take even more time. Administrators of very large etcd clusters of this magnitude can feel free to contact the [etcd team][etcd-contact] before upgrading, and we'll be happy to provide advice on the procedure.
#### Downgrade
If all members have been upgraded to v3.1, the cluster will be upgraded to v3.1, and downgrade from this completed state is **not possible**. If any single member is still v3.0, however, the cluster and its operations remains "v3.0", and it is possible from this mixed cluster state to return to using a v3.0 etcd binary on all members.
Please [backup the data directory](../op-guide/maintenance.md#snapshot-backup) of all etcd members to make downgrading the cluster possible even after it has been completely upgraded.
### Upgrade procedure
This example shows how to upgrade a 3-member v3.0 ectd cluster running on a local machine.
#### 1. Check upgrade requirements
Is the cluster healthy and running v3.0.x?
```
$ ETCDCTL_API=3 etcdctl endpoint health --endpoints=localhost:2379,localhost:22379,localhost:32379
localhost:2379 is healthy: successfully committed proposal: took = 6.600684ms
localhost:22379 is healthy: successfully committed proposal: took = 8.540064ms
localhost:32379 is healthy: successfully committed proposal: took = 8.763432ms
$ curl http://localhost:2379/version
{"etcdserver":"3.0.16","etcdcluster":"3.0.0"}
```
#### 2. Stop the existing etcd process
When each etcd process is stopped, expected errors will be logged by other cluster members. This is normal since a cluster member connection has been (temporarily) broken:
```
2017-01-17 09:34:18.352662 I | raft: raft.node: 1640829d9eea5cfb elected leader 1640829d9eea5cfb at term 5
2017-01-17 09:34:18.359630 W | etcdserver: failed to reach the peerURL(http://localhost:2380) of member fd32987dcd0511e0 (Get http://localhost:2380/version: dial tcp 127.0.0.1:2380: getsockopt: connection refused)
2017-01-17 09:34:18.359679 W | etcdserver: cannot get the version of member fd32987dcd0511e0 (Get http://localhost:2380/version: dial tcp 127.0.0.1:2380: getsockopt: connection refused)
2017-01-17 09:34:18.548116 W | rafthttp: lost the TCP streaming connection with peer fd32987dcd0511e0 (stream Message writer)
2017-01-17 09:34:19.147816 W | rafthttp: lost the TCP streaming connection with peer fd32987dcd0511e0 (stream MsgApp v2 writer)
2017-01-17 09:34:34.364907 W | etcdserver: failed to reach the peerURL(http://localhost:2380) of member fd32987dcd0511e0 (Get http://localhost:2380/version: dial tcp 127.0.0.1:2380: getsockopt: connection refused)
```
It's a good idea at this point to [backup the etcd data](../op-guide/maintenance.md#snapshot-backup) to provide a downgrade path should any problems occur:
```
$ etcdctl snapshot save backup.db
```
#### 3. Drop-in etcd v3.1 binary and start the new etcd process
The new v3.1 etcd will publish its information to the cluster:
```
2017-01-17 09:36:00.996590 I | etcdserver: published {Name:my-etcd-1 ClientURLs:[http://localhost:2379]} to cluster 46bc3ce73049e678
```
Verify that each member, and then the entire cluster, becomes healthy with the new v3.1 etcd binary:
```
$ ETCDCTL_API=3 /etcdctl endpoint health --endpoints=localhost:2379,localhost:22379,localhost:32379
localhost:22379 is healthy: successfully committed proposal: took = 5.540129ms
localhost:32379 is healthy: successfully committed proposal: took = 7.321671ms
localhost:2379 is healthy: successfully committed proposal: took = 10.629901ms
```
Upgraded members will log warnings like the following until the entire cluster is upgraded. This is expected and will cease after all etcd cluster members are upgraded to v3.1:
```
2017-01-17 09:36:38.406268 W | etcdserver: the local etcd version 3.0.16 is not up-to-date
2017-01-17 09:36:38.406295 W | etcdserver: member fd32987dcd0511e0 has a higher version 3.1.0
2017-01-17 09:36:42.407695 W | etcdserver: the local etcd version 3.0.16 is not up-to-date
2017-01-17 09:36:42.407730 W | etcdserver: member fd32987dcd0511e0 has a higher version 3.1.0
```
#### 4. Repeat step 2 to step 3 for all other members
#### 5. Finish
When all members are upgraded, the cluster will report upgrading to 3.1 successfully:
```
2017-01-17 09:37:03.100015 I | etcdserver: updating the cluster version from 3.0 to 3.1
2017-01-17 09:37:03.104263 N | etcdserver/membership: updated the cluster version from 3.0 to 3.1
2017-01-17 09:37:03.104374 I | etcdserver/api: enabled capabilities for version 3.1
```
```
$ ETCDCTL_API=3 /etcdctl endpoint health --endpoints=localhost:2379,localhost:22379,localhost:32379
localhost:2379 is healthy: successfully committed proposal: took = 2.312897ms
localhost:22379 is healthy: successfully committed proposal: took = 2.553476ms
localhost:32379 is healthy: successfully committed proposal: took = 2.516902ms
```
[etcd-contact]: https://groups.google.com/forum/#!forum/etcd-dev

View File

@ -216,7 +216,7 @@ To recover from such scenarios, etcd provides functionality to backup and restor
#### Backing up the datastore
**Note:** Windows users must stop etcd before running the backup command.
**NB:** Windows users must stop etcd before running the backup command.
The first step of the recovery is to backup the data directory and wal directory, if stored separately, on a functioning etcd node. To do this, use the `etcdctl backup` command, passing in the original data (and wal) directory used by etcd. For example:
@ -262,9 +262,7 @@ Once you have verified that etcd has started successfully, shut it down and move
Now that the node is running successfully, [change its advertised peer URLs][update-a-member], as the `--force-new-cluster` option has set the peer URL to the default listening on localhost.
You can then add more nodes to the cluster and restore resiliency. See the [add a new member][add-a-member] guide for more details.
**Note:** If you are trying to restore your cluster using old failed etcd nodes, please make sure you have stopped old etcd instances and removed their old data directories specified by the data-dir configuration parameter.
You can then add more nodes to the cluster and restore resiliency. See the [add a new member][add-a-member] guide for more details. **NB:** If you are trying to restore your cluster using old failed etcd nodes, please make sure you have stopped old etcd instances and removed their old data directories specified by the data-dir configuration parameter.
### Client Request Timeout

View File

@ -559,25 +559,6 @@ Let's create a key-value pair first: `foo=one`.
curl http://127.0.0.1:2379/v2/keys/foo -XPUT -d value=one
```
```json
{
"action":"set",
"node":{
"key":"/foo",
"value":"one",
"modifiedIndex":4,
"createdIndex":4
}
}
```
Specifying `noValueOnSuccess` option skips returning the node as value.
```sh
curl http://127.0.0.1:2379/v2/keys/foo?noValueOnSuccess=true -XPUT -d value=one
# {"action":"set"}
```
Now let's try some invalid `CompareAndSwap` commands.
Trying to set this existing key with `prevExist=false` fails as expected:

View File

@ -266,7 +266,7 @@ Follow the instructions when using these flags.
## Profiling flags
### --enable-pprof
+ Enable runtime profiling data via HTTP server. Address is at client URL + "/debug/pprof/"
+ Enable runtime profiling data via HTTP server. Address is at client URL + "/debug/pprof"
+ default: false
[build-cluster]: clustering.md#static

View File

@ -48,7 +48,7 @@ All releases version numbers follow the format of [semantic versioning 2.0.0](ht
## Build Release Binaries and Images
- Ensure `acbuild` is available.
- Ensure `actool` is available, or installing it through `go get github.com/appc/spec/actool`.
- Ensure `docker` is available.
Run release script in root directory:

View File

@ -105,7 +105,7 @@ ETCD_INITIAL_CLUSTER_STATE=existing
### Stop the proxy process
Stop the existing proxy so we can wipe its state on disk and reload it with the new configuration:
Stop the existing proxy so we can wipe it's state on disk and reload it with the new configuration:
``` bash
px aux | grep etcd
@ -149,5 +149,5 @@ If an error occurs, check the [add member troubleshooting doc][runtime-configura
[discovery-service]: clustering.md#discovery
[goreman]: https://github.com/mattn/goreman
[procfile]: https://github.com/coreos/etcd/blob/master/Procfile
[procfile]: /Procfile
[runtime-configuration]: runtime-configuration.md#error-cases-when-adding-members

81
NEWS
View File

@ -1,81 +0,0 @@
etcd v3.1.0 (2017-01-20)
- faster linearizable reads (implements Raft read-index)
- automatic leadership transfer when leader steps down
- etcd uses default route IP if advertise URL is not given
- cluster rejects removing members if quorum will be lost
- SRV records (e.g., infra1.example.com) must match the discovery domain
(i.e., example.com) if no custom certificate authority is given
- TLSConfig ServerName is ignored with user-provided certificates
for backwards compatibility; to be deprecated in 3.2
- discovery now has upper limit for waiting on retries
- etcd flags
- --strict-reconfig-check flag is set by default
- add --log-output flag
- add --metrics flag
- v3 authentication API is now stable
- v3 client
- add SetEndpoints method; update endpoints at runtime
- add Sync method; auto-update endpoints at runtime
- add Lease TimeToLive API; fetch lease information
- replace Config.Logger field with global logger
- Get API responses are sorted in ascending order by default
- v3 etcdctl
- add lease timetolive command
- add --print-value-only flag to get command
- add --dest-prefix flag to make-mirror command
- command get responses are sorted in ascending order by default
- recipes now conform to sessions defined in clientv3/concurrency
- ACI has symlinks to /usr/local/bin/etcd*
- warn on binding listeners through domain names; to be deprecated in 3.2
- experimental gRPC proxy feature
etcd v3.0.16 (2017-01-13)
etcd v3.0.15 (2016-11-11)
- fix cancel watch request with wrong range end
etcd v3.0.14 (2016-11-04)
- v3 etcdctl migrate command now supports --no-ttl flag to discard keys on transform
etcd v3.0.13 (2016-10-24)
etcd v3.0.12 (2016-10-07)
etcd v3.0.11 (2016-10-07)
- server returns previous key-value (optional)
- clientv3 WithPrevKV option
- v3 etcdctl put,watch,del --prev-kv flag
etcd v3.0.10 (2016-09-23)
etcd v3.0.9 (2016-09-15)
- warn on domain names on listen URLs (v3.2 will reject domain names)
etcd v3.0.8 (2016-09-09)
- allow only IP addresses in listen URLs (domain names are rejected)
etcd v3.0.7 (2016-08-31)
- SRV records only allow A records (RFC 2052)
etcd v3.0.6 (2016-08-19)
etcd v3.0.5 (2016-08-19)
- SRV records (e.g., infra1.example.com) must match the discovery domain
(i.e., example.com) if no custom certificate authority is given
etcd v3.0.4 (2016-07-27)
- v2 auth can now use common name from TLS certificate when --client-cert-auth is enabled
- v2 etcdctl ls command now supports --output=json
- Add /var/lib/etcd directory to etcd official Docker image
etcd v3.0.3 (2016-07-15)
- Revert Dockerfile to use CMD, instead of ENTRYPOINT, to support etcdctl run
- Docker commands for v3.0.2 won't work without specifying executable binary paths
- v3 etcdctl default endpoints are now 127.0.0.1:2379
etcd v3.0.2 (2016-07-08)
- Dockerfile uses ENTRYPOINT, instead of CMD, to run etcd without binary path specified
etcd v3.0.1 (2016-07-01)
etcd v3.0.0 (2016-06-30)

View File

@ -37,17 +37,15 @@ See [etcdctl][etcdctl] for a simple command line client.
### Getting etcd
The easiest way to get etcd is to use one of the pre-built release binaries which are available for OSX, Linux, Windows, [rkt][rkt], and Docker. Instructions for using these binaries are on the [GitHub releases page][github-release].
The easiest way to get etcd is to use one of the pre-built release binaries which are available for OSX, Linux, Windows, AppC (ACI), and Docker. Instructions for using these binaries are on the [GitHub releases page][github-release].
For those wanting to try the very latest version, you can [build the latest version of etcd][dl-build] from the `master` branch.
You will first need [*Go*](https://golang.org/) installed on your machine (version 1.7+ is required).
For those wanting to try the very latest version, you can build the latest version of etcd from the `master` branch.
You will first need [*Go*](https://golang.org/) installed on your machine (version 1.5+ is required).
All development occurs on `master`, including new features and bug fixes.
Bug fixes are first targeted at `master` and subsequently ported to release branches, as described in the [branch management][branch-management] guide.
[rkt]: https://github.com/coreos/rkt/releases/
[github-release]: https://github.com/coreos/etcd/releases/
[branch-management]: ./Documentation/branch_management.md
[dl-build]: ./Documentation/dl_build.md#build-the-latest-version
### Running etcd
@ -94,10 +92,6 @@ This will bring up 3 etcd members `infra1`, `infra2` and `infra3` and etcd proxy
Every cluster member and proxy accepts key value reads and key value writes.
### Running etcd on Kubernetes
If you want to run etcd cluster on Kubernetes, try [etcd operator](https://github.com/coreos/etcd-operator).
### Next steps
Now it's time to dig into the full etcd API and other guides.
@ -137,4 +131,3 @@ See [reporting bugs](Documentation/reporting_bugs.md) for details about reportin
etcd is under the Apache 2.0 license. See the [LICENSE](LICENSE) file for details.

View File

@ -6,18 +6,26 @@ This document defines a high level roadmap for etcd development.
The dates below should not be considered authoritative, but rather indicative of the projected timeline of the project. The [milestones defined in GitHub](https://github.com/coreos/etcd/milestones) represent the most up-to-date and issue-for-issue plans.
etcd 3.1 is our current stable branch. The roadmap below outlines new features that will be added to etcd, and while subject to change, define what future stable will look like.
etcd 2.3 is our current stable branch. The roadmap below outlines new features that will be added to etcd, and while subject to change, define what future stable will look like.
### etcd 3.2 (2017-May)
- Stable scalable proxy
- Proxy-as-client interface passthrough
- Lock service
- Namespacing proxy
- TLS Command Name and JWT token based authentication
- Read-modify-write V3 Put
- Improved watch performance
- Support non-blocking concurrent read
### etcd 3.0 (April)
- v3 API ([see also the issue tag](https://github.com/coreos/etcd/issues?utf8=%E2%9C%93&q=label%3Aarea/v3api))
- Leases
- Binary protocol
- Support a large number of watchers
- Failure guarantees documented
- Simple v3 client (golang)
- v3 API
- Locking
- Better disk backend
- Improved write throughput
- Support larger datasets and histories
- Simpler disaster recovery UX
- Integrated with Kubernetes
- Mirroring
### etcd 3.3 (?)
- TBD
### etcd 3.1 (July)
- API bindings for other languages
### etcd 3.+ (future)
- Horizontally scalable proxy layer

View File

@ -18,13 +18,13 @@ package authpb
import (
"fmt"
proto "github.com/golang/protobuf/proto"
proto "github.com/gogo/protobuf/proto"
math "math"
io "io"
)
import io "io"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
@ -32,9 +32,7 @@ var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
const _ = proto.GoGoProtoPackageIsVersion1
type Permission_Type int32
@ -101,113 +99,113 @@ func init() {
proto.RegisterType((*Role)(nil), "authpb.Role")
proto.RegisterEnum("authpb.Permission_Type", Permission_Type_name, Permission_Type_value)
}
func (m *User) Marshal() (dAtA []byte, err error) {
func (m *User) Marshal() (data []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
data = make([]byte, size)
n, err := m.MarshalTo(data)
if err != nil {
return nil, err
}
return dAtA[:n], nil
return data[:n], nil
}
func (m *User) MarshalTo(dAtA []byte) (int, error) {
func (m *User) MarshalTo(data []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if len(m.Name) > 0 {
dAtA[i] = 0xa
data[i] = 0xa
i++
i = encodeVarintAuth(dAtA, i, uint64(len(m.Name)))
i += copy(dAtA[i:], m.Name)
i = encodeVarintAuth(data, i, uint64(len(m.Name)))
i += copy(data[i:], m.Name)
}
if len(m.Password) > 0 {
dAtA[i] = 0x12
data[i] = 0x12
i++
i = encodeVarintAuth(dAtA, i, uint64(len(m.Password)))
i += copy(dAtA[i:], m.Password)
i = encodeVarintAuth(data, i, uint64(len(m.Password)))
i += copy(data[i:], m.Password)
}
if len(m.Roles) > 0 {
for _, s := range m.Roles {
dAtA[i] = 0x1a
data[i] = 0x1a
i++
l = len(s)
for l >= 1<<7 {
dAtA[i] = uint8(uint64(l)&0x7f | 0x80)
data[i] = uint8(uint64(l)&0x7f | 0x80)
l >>= 7
i++
}
dAtA[i] = uint8(l)
data[i] = uint8(l)
i++
i += copy(dAtA[i:], s)
i += copy(data[i:], s)
}
}
return i, nil
}
func (m *Permission) Marshal() (dAtA []byte, err error) {
func (m *Permission) Marshal() (data []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
data = make([]byte, size)
n, err := m.MarshalTo(data)
if err != nil {
return nil, err
}
return dAtA[:n], nil
return data[:n], nil
}
func (m *Permission) MarshalTo(dAtA []byte) (int, error) {
func (m *Permission) MarshalTo(data []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.PermType != 0 {
dAtA[i] = 0x8
data[i] = 0x8
i++
i = encodeVarintAuth(dAtA, i, uint64(m.PermType))
i = encodeVarintAuth(data, i, uint64(m.PermType))
}
if len(m.Key) > 0 {
dAtA[i] = 0x12
data[i] = 0x12
i++
i = encodeVarintAuth(dAtA, i, uint64(len(m.Key)))
i += copy(dAtA[i:], m.Key)
i = encodeVarintAuth(data, i, uint64(len(m.Key)))
i += copy(data[i:], m.Key)
}
if len(m.RangeEnd) > 0 {
dAtA[i] = 0x1a
data[i] = 0x1a
i++
i = encodeVarintAuth(dAtA, i, uint64(len(m.RangeEnd)))
i += copy(dAtA[i:], m.RangeEnd)
i = encodeVarintAuth(data, i, uint64(len(m.RangeEnd)))
i += copy(data[i:], m.RangeEnd)
}
return i, nil
}
func (m *Role) Marshal() (dAtA []byte, err error) {
func (m *Role) Marshal() (data []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
data = make([]byte, size)
n, err := m.MarshalTo(data)
if err != nil {
return nil, err
}
return dAtA[:n], nil
return data[:n], nil
}
func (m *Role) MarshalTo(dAtA []byte) (int, error) {
func (m *Role) MarshalTo(data []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if len(m.Name) > 0 {
dAtA[i] = 0xa
data[i] = 0xa
i++
i = encodeVarintAuth(dAtA, i, uint64(len(m.Name)))
i += copy(dAtA[i:], m.Name)
i = encodeVarintAuth(data, i, uint64(len(m.Name)))
i += copy(data[i:], m.Name)
}
if len(m.KeyPermission) > 0 {
for _, msg := range m.KeyPermission {
dAtA[i] = 0x12
data[i] = 0x12
i++
i = encodeVarintAuth(dAtA, i, uint64(msg.Size()))
n, err := msg.MarshalTo(dAtA[i:])
i = encodeVarintAuth(data, i, uint64(msg.Size()))
n, err := msg.MarshalTo(data[i:])
if err != nil {
return 0, err
}
@ -217,31 +215,31 @@ func (m *Role) MarshalTo(dAtA []byte) (int, error) {
return i, nil
}
func encodeFixed64Auth(dAtA []byte, offset int, v uint64) int {
dAtA[offset] = uint8(v)
dAtA[offset+1] = uint8(v >> 8)
dAtA[offset+2] = uint8(v >> 16)
dAtA[offset+3] = uint8(v >> 24)
dAtA[offset+4] = uint8(v >> 32)
dAtA[offset+5] = uint8(v >> 40)
dAtA[offset+6] = uint8(v >> 48)
dAtA[offset+7] = uint8(v >> 56)
func encodeFixed64Auth(data []byte, offset int, v uint64) int {
data[offset] = uint8(v)
data[offset+1] = uint8(v >> 8)
data[offset+2] = uint8(v >> 16)
data[offset+3] = uint8(v >> 24)
data[offset+4] = uint8(v >> 32)
data[offset+5] = uint8(v >> 40)
data[offset+6] = uint8(v >> 48)
data[offset+7] = uint8(v >> 56)
return offset + 8
}
func encodeFixed32Auth(dAtA []byte, offset int, v uint32) int {
dAtA[offset] = uint8(v)
dAtA[offset+1] = uint8(v >> 8)
dAtA[offset+2] = uint8(v >> 16)
dAtA[offset+3] = uint8(v >> 24)
func encodeFixed32Auth(data []byte, offset int, v uint32) int {
data[offset] = uint8(v)
data[offset+1] = uint8(v >> 8)
data[offset+2] = uint8(v >> 16)
data[offset+3] = uint8(v >> 24)
return offset + 4
}
func encodeVarintAuth(dAtA []byte, offset int, v uint64) int {
func encodeVarintAuth(data []byte, offset int, v uint64) int {
for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80)
data[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
dAtA[offset] = uint8(v)
data[offset] = uint8(v)
return offset + 1
}
func (m *User) Size() (n int) {
@ -310,8 +308,8 @@ func sovAuth(x uint64) (n int) {
func sozAuth(x uint64) (n int) {
return sovAuth(uint64((x << 1) ^ uint64((int64(x) >> 63))))
}
func (m *User) Unmarshal(dAtA []byte) error {
l := len(dAtA)
func (m *User) Unmarshal(data []byte) error {
l := len(data)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
@ -323,7 +321,7 @@ func (m *User) Unmarshal(dAtA []byte) error {
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
b := data[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
@ -351,7 +349,7 @@ func (m *User) Unmarshal(dAtA []byte) error {
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
b := data[iNdEx]
iNdEx++
byteLen |= (int(b) & 0x7F) << shift
if b < 0x80 {
@ -365,7 +363,7 @@ func (m *User) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Name = append(m.Name[:0], dAtA[iNdEx:postIndex]...)
m.Name = append(m.Name[:0], data[iNdEx:postIndex]...)
if m.Name == nil {
m.Name = []byte{}
}
@ -382,7 +380,7 @@ func (m *User) Unmarshal(dAtA []byte) error {
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
b := data[iNdEx]
iNdEx++
byteLen |= (int(b) & 0x7F) << shift
if b < 0x80 {
@ -396,7 +394,7 @@ func (m *User) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Password = append(m.Password[:0], dAtA[iNdEx:postIndex]...)
m.Password = append(m.Password[:0], data[iNdEx:postIndex]...)
if m.Password == nil {
m.Password = []byte{}
}
@ -413,7 +411,7 @@ func (m *User) Unmarshal(dAtA []byte) error {
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
b := data[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
@ -428,11 +426,11 @@ func (m *User) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Roles = append(m.Roles, string(dAtA[iNdEx:postIndex]))
m.Roles = append(m.Roles, string(data[iNdEx:postIndex]))
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipAuth(dAtA[iNdEx:])
skippy, err := skipAuth(data[iNdEx:])
if err != nil {
return err
}
@ -451,8 +449,8 @@ func (m *User) Unmarshal(dAtA []byte) error {
}
return nil
}
func (m *Permission) Unmarshal(dAtA []byte) error {
l := len(dAtA)
func (m *Permission) Unmarshal(data []byte) error {
l := len(data)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
@ -464,7 +462,7 @@ func (m *Permission) Unmarshal(dAtA []byte) error {
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
b := data[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
@ -492,7 +490,7 @@ func (m *Permission) Unmarshal(dAtA []byte) error {
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
b := data[iNdEx]
iNdEx++
m.PermType |= (Permission_Type(b) & 0x7F) << shift
if b < 0x80 {
@ -511,7 +509,7 @@ func (m *Permission) Unmarshal(dAtA []byte) error {
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
b := data[iNdEx]
iNdEx++
byteLen |= (int(b) & 0x7F) << shift
if b < 0x80 {
@ -525,7 +523,7 @@ func (m *Permission) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...)
m.Key = append(m.Key[:0], data[iNdEx:postIndex]...)
if m.Key == nil {
m.Key = []byte{}
}
@ -542,7 +540,7 @@ func (m *Permission) Unmarshal(dAtA []byte) error {
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
b := data[iNdEx]
iNdEx++
byteLen |= (int(b) & 0x7F) << shift
if b < 0x80 {
@ -556,14 +554,14 @@ func (m *Permission) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.RangeEnd = append(m.RangeEnd[:0], dAtA[iNdEx:postIndex]...)
m.RangeEnd = append(m.RangeEnd[:0], data[iNdEx:postIndex]...)
if m.RangeEnd == nil {
m.RangeEnd = []byte{}
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipAuth(dAtA[iNdEx:])
skippy, err := skipAuth(data[iNdEx:])
if err != nil {
return err
}
@ -582,8 +580,8 @@ func (m *Permission) Unmarshal(dAtA []byte) error {
}
return nil
}
func (m *Role) Unmarshal(dAtA []byte) error {
l := len(dAtA)
func (m *Role) Unmarshal(data []byte) error {
l := len(data)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
@ -595,7 +593,7 @@ func (m *Role) Unmarshal(dAtA []byte) error {
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
b := data[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
@ -623,7 +621,7 @@ func (m *Role) Unmarshal(dAtA []byte) error {
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
b := data[iNdEx]
iNdEx++
byteLen |= (int(b) & 0x7F) << shift
if b < 0x80 {
@ -637,7 +635,7 @@ func (m *Role) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Name = append(m.Name[:0], dAtA[iNdEx:postIndex]...)
m.Name = append(m.Name[:0], data[iNdEx:postIndex]...)
if m.Name == nil {
m.Name = []byte{}
}
@ -654,7 +652,7 @@ func (m *Role) Unmarshal(dAtA []byte) error {
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
b := data[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
@ -669,13 +667,13 @@ func (m *Role) Unmarshal(dAtA []byte) error {
return io.ErrUnexpectedEOF
}
m.KeyPermission = append(m.KeyPermission, &Permission{})
if err := m.KeyPermission[len(m.KeyPermission)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
if err := m.KeyPermission[len(m.KeyPermission)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipAuth(dAtA[iNdEx:])
skippy, err := skipAuth(data[iNdEx:])
if err != nil {
return err
}
@ -694,8 +692,8 @@ func (m *Role) Unmarshal(dAtA []byte) error {
}
return nil
}
func skipAuth(dAtA []byte) (n int, err error) {
l := len(dAtA)
func skipAuth(data []byte) (n int, err error) {
l := len(data)
iNdEx := 0
for iNdEx < l {
var wire uint64
@ -706,7 +704,7 @@ func skipAuth(dAtA []byte) (n int, err error) {
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
b := data[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
@ -724,7 +722,7 @@ func skipAuth(dAtA []byte) (n int, err error) {
return 0, io.ErrUnexpectedEOF
}
iNdEx++
if dAtA[iNdEx-1] < 0x80 {
if data[iNdEx-1] < 0x80 {
break
}
}
@ -741,7 +739,7 @@ func skipAuth(dAtA []byte) (n int, err error) {
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
b := data[iNdEx]
iNdEx++
length |= (int(b) & 0x7F) << shift
if b < 0x80 {
@ -764,7 +762,7 @@ func skipAuth(dAtA []byte) (n int, err error) {
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
b := data[iNdEx]
iNdEx++
innerWire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
@ -775,7 +773,7 @@ func skipAuth(dAtA []byte) (n int, err error) {
if innerWireType == 4 {
break
}
next, err := skipAuth(dAtA[start:])
next, err := skipAuth(data[start:])
if err != nil {
return 0, err
}
@ -799,26 +797,24 @@ var (
ErrIntOverflowAuth = fmt.Errorf("proto: integer overflow")
)
func init() { proto.RegisterFile("auth.proto", fileDescriptorAuth) }
var fileDescriptorAuth = []byte{
// 288 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x6c, 0x90, 0xc1, 0x4a, 0xc3, 0x30,
0x1c, 0xc6, 0x9b, 0xb6, 0x1b, 0xed, 0x5f, 0x27, 0x25, 0x0c, 0x0c, 0x13, 0x42, 0xe9, 0xa9, 0x78,
0xa8, 0xb0, 0x5d, 0xbc, 0x2a, 0xf6, 0x20, 0x78, 0x90, 0x50, 0xf1, 0x28, 0x1d, 0x0d, 0x75, 0x6c,
0x6d, 0x4a, 0x32, 0x91, 0xbe, 0x89, 0x07, 0x1f, 0x68, 0xc7, 0x3d, 0x82, 0xab, 0x2f, 0x22, 0x4d,
0x64, 0x43, 0xdc, 0xed, 0xfb, 0xbe, 0xff, 0x97, 0xe4, 0x97, 0x3f, 0x40, 0xfe, 0xb6, 0x7e, 0x4d,
0x1a, 0x29, 0xd6, 0x02, 0x0f, 0x7b, 0xdd, 0xcc, 0x27, 0xe3, 0x52, 0x94, 0x42, 0x47, 0x57, 0xbd,
0x32, 0xd3, 0xe8, 0x01, 0xdc, 0x27, 0xc5, 0x25, 0xc6, 0xe0, 0xd6, 0x79, 0xc5, 0x09, 0x0a, 0x51,
0x7c, 0xca, 0xb4, 0xc6, 0x13, 0xf0, 0x9a, 0x5c, 0xa9, 0x77, 0x21, 0x0b, 0x62, 0xeb, 0x7c, 0xef,
0xf1, 0x18, 0x06, 0x52, 0xac, 0xb8, 0x22, 0x4e, 0xe8, 0xc4, 0x3e, 0x33, 0x26, 0xfa, 0x44, 0x00,
0x8f, 0x5c, 0x56, 0x0b, 0xa5, 0x16, 0xa2, 0xc6, 0x33, 0xf0, 0x1a, 0x2e, 0xab, 0xac, 0x6d, 0xcc,
0xc5, 0x67, 0xd3, 0xf3, 0xc4, 0xd0, 0x24, 0x87, 0x56, 0xd2, 0x8f, 0xd9, 0xbe, 0x88, 0x03, 0x70,
0x96, 0xbc, 0xfd, 0x7d, 0xb0, 0x97, 0xf8, 0x02, 0x7c, 0x99, 0xd7, 0x25, 0x7f, 0xe1, 0x75, 0x41,
0x1c, 0x03, 0xa2, 0x83, 0xb4, 0x2e, 0xa2, 0x4b, 0x70, 0xf5, 0x31, 0x0f, 0x5c, 0x96, 0xde, 0xdc,
0x05, 0x16, 0xf6, 0x61, 0xf0, 0xcc, 0xee, 0xb3, 0x34, 0x40, 0x78, 0x04, 0x7e, 0x1f, 0x1a, 0x6b,
0x47, 0x19, 0xb8, 0x4c, 0xac, 0xf8, 0xd1, 0xcf, 0x5e, 0xc3, 0x68, 0xc9, 0xdb, 0x03, 0x16, 0xb1,
0x43, 0x27, 0x3e, 0x99, 0xe2, 0xff, 0xc0, 0xec, 0x6f, 0xf1, 0x96, 0x6c, 0x76, 0xd4, 0xda, 0xee,
0xa8, 0xb5, 0xe9, 0x28, 0xda, 0x76, 0x14, 0x7d, 0x75, 0x14, 0x7d, 0x7c, 0x53, 0x6b, 0x3e, 0xd4,
0x3b, 0x9e, 0xfd, 0x04, 0x00, 0x00, 0xff, 0xff, 0xcc, 0x76, 0x8d, 0x4f, 0x8f, 0x01, 0x00, 0x00,
// 276 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0x4a, 0x2c, 0x2d, 0xc9,
0xd0, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x03, 0xb1, 0x0b, 0x92, 0xa4, 0x44, 0xd2, 0xf3,
0xd3, 0xf3, 0xc1, 0x42, 0xfa, 0x20, 0x16, 0x44, 0x56, 0xc9, 0x87, 0x8b, 0x25, 0xb4, 0x38, 0xb5,
0x48, 0x48, 0x88, 0x8b, 0x25, 0x2f, 0x31, 0x37, 0x55, 0x82, 0x51, 0x81, 0x51, 0x83, 0x27, 0x08,
0xcc, 0x16, 0x92, 0xe2, 0xe2, 0x28, 0x48, 0x2c, 0x2e, 0x2e, 0xcf, 0x2f, 0x4a, 0x91, 0x60, 0x02,
0x8b, 0xc3, 0xf9, 0x42, 0x22, 0x5c, 0xac, 0x45, 0xf9, 0x39, 0xa9, 0xc5, 0x12, 0xcc, 0x0a, 0xcc,
0x1a, 0x9c, 0x41, 0x10, 0x8e, 0xd2, 0x1c, 0x46, 0x2e, 0xae, 0x80, 0xd4, 0xa2, 0xdc, 0xcc, 0xe2,
0xe2, 0xcc, 0xfc, 0x3c, 0x21, 0x63, 0xa0, 0x01, 0x40, 0x5e, 0x48, 0x65, 0x01, 0xc4, 0x60, 0x3e,
0x23, 0x71, 0x3d, 0x88, 0x6b, 0xf4, 0x10, 0xaa, 0xf4, 0x40, 0xd2, 0x41, 0x70, 0x85, 0x42, 0x02,
0x5c, 0xcc, 0xd9, 0xa9, 0x95, 0x50, 0x0b, 0x41, 0x4c, 0x21, 0x69, 0x2e, 0xce, 0xa2, 0xc4, 0xbc,
0xf4, 0xd4, 0xf8, 0xd4, 0xbc, 0x14, 0xa0, 0x7d, 0x60, 0x87, 0x80, 0x05, 0x5c, 0xf3, 0x52, 0x94,
0xb4, 0xb8, 0x58, 0xc0, 0xda, 0x38, 0xb8, 0x58, 0x82, 0x5c, 0x1d, 0x5d, 0x04, 0x18, 0x84, 0x38,
0xb9, 0x58, 0xc3, 0x83, 0x3c, 0x43, 0x5c, 0x05, 0x18, 0x85, 0x78, 0xb9, 0x38, 0x41, 0x82, 0x10,
0x2e, 0x93, 0x52, 0x08, 0x50, 0x0d, 0xd0, 0x9d, 0x58, 0x3d, 0x6b, 0xc1, 0xc5, 0x0b, 0xb4, 0x0b,
0xe1, 0x2c, 0xa0, 0x03, 0x98, 0x35, 0xb8, 0x8d, 0x84, 0x30, 0x1d, 0x1c, 0x84, 0xaa, 0xd0, 0x49,
0xe4, 0xc4, 0x43, 0x39, 0x86, 0x0b, 0x40, 0x7c, 0xe2, 0x91, 0x1c, 0xe3, 0x05, 0x20, 0x7e, 0x00,
0xc4, 0x49, 0x6c, 0xe0, 0xf0, 0x35, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0x9e, 0x31, 0x53, 0xfd,
0x8b, 0x01, 0x00, 0x00,
}

View File

@ -22,10 +22,7 @@ import (
"github.com/coreos/etcd/mvcc/backend"
)
// isSubset returns true if a is a subset of b.
// If a is a prefix of b, then a is a subset of b.
// Given intervals [a1,a2) and [b1,b2), is
// the a interval a subset of b?
// isSubset returns true if a is a subset of b
func isSubset(a, b *rangePerm) bool {
switch {
case len(a.end) == 0 && len(b.end) == 0:
@ -35,11 +32,9 @@ func isSubset(a, b *rangePerm) bool {
// b is a key, a is a range
return false
case len(a.end) == 0:
// a is a key, b is a range. need b1 <= a1 and a1 < b2
return bytes.Compare(b.begin, a.begin) <= 0 && bytes.Compare(a.begin, b.end) < 0
return 0 <= bytes.Compare(a.begin, b.begin) && bytes.Compare(a.begin, b.end) <= 0
default:
// both are ranges. need b1 <= a1 and a2 <= b2
return bytes.Compare(b.begin, a.begin) <= 0 && bytes.Compare(a.end, b.end) <= 0
return 0 <= bytes.Compare(a.begin, b.begin) && bytes.Compare(a.end, b.end) <= 0
}
}
@ -49,53 +44,56 @@ func isRangeEqual(a, b *rangePerm) bool {
// removeSubsetRangePerms removes any rangePerms that are subsets of other rangePerms.
// If there are equal ranges, removeSubsetRangePerms only keeps one of them.
// It returns a sorted rangePerm slice.
func removeSubsetRangePerms(perms []*rangePerm) (newp []*rangePerm) {
sort.Sort(RangePermSliceByBegin(perms))
var prev *rangePerm
func removeSubsetRangePerms(perms []*rangePerm) []*rangePerm {
// TODO(mitake): currently it is O(n^2), we need a better algorithm
newp := make([]*rangePerm, 0)
for i := range perms {
if i == 0 {
prev = perms[i]
newp = append(newp, perms[i])
skip := false
for j := range perms {
if i == j {
continue
}
if isRangeEqual(perms[i], perms[j]) {
// if ranges are equal, we only keep the first range.
if i > j {
skip = true
break
}
} else if isSubset(perms[i], perms[j]) {
// if a range is a strict subset of the other one, we skip the subset.
skip = true
break
}
}
if skip {
continue
}
if isRangeEqual(perms[i], prev) {
continue
}
if isSubset(perms[i], prev) {
continue
}
if isSubset(prev, perms[i]) {
prev = perms[i]
newp[len(newp)-1] = perms[i]
continue
}
prev = perms[i]
newp = append(newp, perms[i])
}
return newp
}
// mergeRangePerms merges adjacent rangePerms.
func mergeRangePerms(perms []*rangePerm) []*rangePerm {
var merged []*rangePerm
merged := make([]*rangePerm, 0)
perms = removeSubsetRangePerms(perms)
sort.Sort(RangePermSliceByBegin(perms))
i := 0
for i < len(perms) {
begin, next := i, i
for next+1 < len(perms) && bytes.Compare(perms[next].end, perms[next+1].begin) >= 0 {
for next+1 < len(perms) && bytes.Compare(perms[next].end, perms[next+1].begin) != -1 {
next++
}
// don't merge ["a", "b") with ["b", ""), because perms[next+1].end is empty.
if next != begin && len(perms[next].end) > 0 {
merged = append(merged, &rangePerm{begin: perms[begin].begin, end: perms[next].end})
} else {
merged = append(merged, perms[begin])
if next != begin {
merged = append(merged, perms[next])
}
}
merged = append(merged, &rangePerm{begin: perms[begin].begin, end: perms[next].end})
i = next + 1
}

View File

@ -16,8 +16,6 @@ package auth
import (
"bytes"
"fmt"
"reflect"
"testing"
)
@ -48,10 +46,6 @@ func TestGetMergedPerms(t *testing.T) {
[]*rangePerm{{[]byte("a"), []byte("b")}},
[]*rangePerm{{[]byte("a"), []byte("b")}},
},
{
[]*rangePerm{{[]byte("a"), []byte("b")}, {[]byte("b"), []byte("")}},
[]*rangePerm{{[]byte("a"), []byte("b")}, {[]byte("b"), []byte("")}},
},
{
[]*rangePerm{{[]byte("a"), []byte("b")}, {[]byte("b"), []byte("c")}},
[]*rangePerm{{[]byte("a"), []byte("c")}},
@ -112,7 +106,7 @@ func TestGetMergedPerms(t *testing.T) {
},
{
[]*rangePerm{{[]byte("a"), []byte("")}, {[]byte("b"), []byte("c")}, {[]byte("b"), []byte("")}, {[]byte("c"), []byte("")}, {[]byte("d"), []byte("")}},
[]*rangePerm{{[]byte("a"), []byte("")}, {[]byte("b"), []byte("c")}, {[]byte("c"), []byte("")}, {[]byte("d"), []byte("")}},
[]*rangePerm{{[]byte("a"), []byte("")}, {[]byte("b"), []byte("c")}, {[]byte("d"), []byte("")}},
},
// duplicate ranges
{
@ -133,47 +127,3 @@ func TestGetMergedPerms(t *testing.T) {
}
}
}
func TestRemoveSubsetRangePerms(t *testing.T) {
tests := []struct {
perms []*rangePerm
expect []*rangePerm
}{
{ // subsets converge
[]*rangePerm{{[]byte{2}, []byte{3}}, {[]byte{2}, []byte{5}}, {[]byte{1}, []byte{4}}},
[]*rangePerm{{[]byte{1}, []byte{4}}, {[]byte{2}, []byte{5}}},
},
{ // subsets converge
[]*rangePerm{{[]byte{0}, []byte{3}}, {[]byte{0}, []byte{1}}, {[]byte{2}, []byte{4}}, {[]byte{0}, []byte{2}}},
[]*rangePerm{{[]byte{0}, []byte{3}}, {[]byte{2}, []byte{4}}},
},
{ // biggest range at the end
[]*rangePerm{{[]byte{2}, []byte{3}}, {[]byte{0}, []byte{2}}, {[]byte{1}, []byte{4}}, {[]byte{0}, []byte{5}}},
[]*rangePerm{{[]byte{0}, []byte{5}}},
},
{ // biggest range at the beginning
[]*rangePerm{{[]byte{0}, []byte{5}}, {[]byte{2}, []byte{3}}, {[]byte{0}, []byte{2}}, {[]byte{1}, []byte{4}}},
[]*rangePerm{{[]byte{0}, []byte{5}}},
},
{ // no overlapping ranges
[]*rangePerm{{[]byte{2}, []byte{3}}, {[]byte{0}, []byte{1}}, {[]byte{4}, []byte{7}}, {[]byte{8}, []byte{15}}},
[]*rangePerm{{[]byte{0}, []byte{1}}, {[]byte{2}, []byte{3}}, {[]byte{4}, []byte{7}}, {[]byte{8}, []byte{15}}},
},
}
for i, tt := range tests {
rs := removeSubsetRangePerms(tt.perms)
if !reflect.DeepEqual(rs, tt.expect) {
t.Fatalf("#%d: unexpected rangePerms %q, got %q", i, printPerms(rs), printPerms(tt.expect))
}
}
}
func printPerms(rs []*rangePerm) (txt string) {
for i, p := range rs {
if i != 0 {
txt += ","
}
txt += fmt.Sprintf("%+v", *p)
}
return
}

View File

@ -20,86 +20,13 @@ package auth
import (
"crypto/rand"
"math/big"
"strings"
"time"
)
const (
letters = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
defaultSimpleTokenLength = 16
simpleTokenTTL = 5 * time.Minute
simpleTokenTTLResolution = 1 * time.Second
)
type simpleTokenTTLKeeper struct {
tokens map[string]time.Time
addSimpleTokenCh chan string
resetSimpleTokenCh chan string
deleteSimpleTokenCh chan string
stopCh chan chan struct{}
deleteTokenFunc func(string)
}
func NewSimpleTokenTTLKeeper(deletefunc func(string)) *simpleTokenTTLKeeper {
stk := &simpleTokenTTLKeeper{
tokens: make(map[string]time.Time),
addSimpleTokenCh: make(chan string, 1),
resetSimpleTokenCh: make(chan string, 1),
deleteSimpleTokenCh: make(chan string, 1),
stopCh: make(chan chan struct{}),
deleteTokenFunc: deletefunc,
}
go stk.run()
return stk
}
func (tm *simpleTokenTTLKeeper) stop() {
waitCh := make(chan struct{})
tm.stopCh <- waitCh
<-waitCh
close(tm.stopCh)
}
func (tm *simpleTokenTTLKeeper) addSimpleToken(token string) {
tm.addSimpleTokenCh <- token
}
func (tm *simpleTokenTTLKeeper) resetSimpleToken(token string) {
tm.resetSimpleTokenCh <- token
}
func (tm *simpleTokenTTLKeeper) deleteSimpleToken(token string) {
tm.deleteSimpleTokenCh <- token
}
func (tm *simpleTokenTTLKeeper) run() {
tokenTicker := time.NewTicker(simpleTokenTTLResolution)
defer tokenTicker.Stop()
for {
select {
case t := <-tm.addSimpleTokenCh:
tm.tokens[t] = time.Now().Add(simpleTokenTTL)
case t := <-tm.resetSimpleTokenCh:
if _, ok := tm.tokens[t]; ok {
tm.tokens[t] = time.Now().Add(simpleTokenTTL)
}
case t := <-tm.deleteSimpleTokenCh:
delete(tm.tokens, t)
case <-tokenTicker.C:
nowtime := time.Now()
for t, tokenendtime := range tm.tokens {
if nowtime.After(tokenendtime) {
tm.deleteTokenFunc(t)
delete(tm.tokens, t)
}
}
case waitCh := <-tm.stopCh:
tm.tokens = make(map[string]time.Time)
waitCh <- struct{}{}
return
}
}
}
func (as *authStore) GenSimpleToken() (string, error) {
ret := make([]byte, defaultSimpleTokenLength)
@ -124,18 +51,5 @@ func (as *authStore) assignSimpleTokenToUser(username, token string) {
}
as.simpleTokens[token] = username
as.simpleTokenKeeper.addSimpleToken(token)
as.simpleTokensMu.Unlock()
}
func (as *authStore) invalidateUser(username string) {
as.simpleTokensMu.Lock()
defer as.simpleTokensMu.Unlock()
for token, name := range as.simpleTokens {
if strings.Compare(name, username) == 0 {
delete(as.simpleTokens, token)
as.simpleTokenKeeper.deleteSimpleToken(token)
}
}
}

View File

@ -16,11 +16,9 @@ package auth
import (
"bytes"
"encoding/binary"
"errors"
"fmt"
"sort"
"strconv"
"strings"
"sync"
@ -30,7 +28,6 @@ import (
"github.com/coreos/pkg/capnslog"
"golang.org/x/crypto/bcrypt"
"golang.org/x/net/context"
"google.golang.org/grpc/metadata"
)
var (
@ -38,8 +35,6 @@ var (
authEnabled = []byte{1}
authDisabled = []byte{0}
revisionKey = []byte("authRevision")
authBucketName = []byte("auth")
authUsersBucketName = []byte("authUsers")
authRolesBucketName = []byte("authRoles")
@ -49,7 +44,6 @@ var (
ErrRootUserNotExist = errors.New("auth: root user does not exist")
ErrRootRoleNotExist = errors.New("auth: root user does not have root role")
ErrUserAlreadyExist = errors.New("auth: user already exists")
ErrUserEmpty = errors.New("auth: user name is empty")
ErrUserNotFound = errors.New("auth: user not found")
ErrRoleAlreadyExist = errors.New("auth: role already exists")
ErrRoleNotFound = errors.New("auth: role not found")
@ -57,26 +51,13 @@ var (
ErrPermissionDenied = errors.New("auth: permission denied")
ErrRoleNotGranted = errors.New("auth: role is not granted to the user")
ErrPermissionNotGranted = errors.New("auth: permission is not granted to the role")
ErrAuthNotEnabled = errors.New("auth: authentication is not enabled")
ErrAuthOldRevision = errors.New("auth: revision in header is old")
ErrInvalidAuthToken = errors.New("auth: invalid auth token")
// BcryptCost is the algorithm cost / strength for hashing auth passwords
BcryptCost = bcrypt.DefaultCost
)
const (
rootUser = "root"
rootRole = "root"
revBytesLen = 8
)
type AuthInfo struct {
Username string
Revision uint64
}
type AuthStore interface {
// AuthEnable turns on the authentication feature
AuthEnable() error
@ -129,36 +110,23 @@ type AuthStore interface {
// RoleList gets a list of all roles
RoleList(r *pb.AuthRoleListRequest) (*pb.AuthRoleListResponse, error)
// AuthInfoFromToken gets a username from the given Token and current revision number
// (The revision number is used for preventing the TOCTOU problem)
AuthInfoFromToken(token string) (*AuthInfo, bool)
// UsernameFromToken gets a username from the given Token
UsernameFromToken(token string) (string, bool)
// IsPutPermitted checks put permission of the user
IsPutPermitted(authInfo *AuthInfo, key []byte) error
IsPutPermitted(username string, key []byte) bool
// IsRangePermitted checks range permission of the user
IsRangePermitted(authInfo *AuthInfo, key, rangeEnd []byte) error
IsRangePermitted(username string, key, rangeEnd []byte) bool
// IsDeleteRangePermitted checks delete-range permission of the user
IsDeleteRangePermitted(authInfo *AuthInfo, key, rangeEnd []byte) error
IsDeleteRangePermitted(username string, key, rangeEnd []byte) bool
// IsAdminPermitted checks admin permission of the user
IsAdminPermitted(authInfo *AuthInfo) error
IsAdminPermitted(username string) bool
// GenSimpleToken produces a simple random string
GenSimpleToken() (string, error)
// Revision gets current revision of authStore
Revision() uint64
// CheckPassword checks a given pair of username and password is correct
CheckPassword(username, password string) (uint64, error)
// Close does cleanup of AuthStore
Close() error
// AuthInfoFromCtx gets AuthInfo from gRPC's context
AuthInfoFromCtx(ctx context.Context) (*AuthInfo, error)
}
type authStore struct {
@ -168,33 +136,11 @@ type authStore struct {
rangePermCache map[string]*unifiedRangePermissions // username -> unifiedRangePermissions
simpleTokensMu sync.RWMutex
simpleTokens map[string]string // token -> username
simpleTokenKeeper *simpleTokenTTLKeeper
revision uint64
indexWaiter func(uint64) <-chan struct{}
}
func newDeleterFunc(as *authStore) func(string) {
return func(t string) {
as.simpleTokensMu.Lock()
defer as.simpleTokensMu.Unlock()
if username, ok := as.simpleTokens[t]; ok {
plog.Infof("deleting token %s for user %s", t, username)
delete(as.simpleTokens, t)
}
}
simpleTokensMu sync.RWMutex
simpleTokens map[string]string // token -> username
}
func (as *authStore) AuthEnable() error {
as.enabledMu.Lock()
defer as.enabledMu.Unlock()
if as.enabled {
plog.Noticef("Authentication already enabled")
return nil
}
b := as.be
tx := b.BatchTx()
tx.Lock()
@ -214,64 +160,33 @@ func (as *authStore) AuthEnable() error {
tx.UnsafePut(authBucketName, enableFlagKey, authEnabled)
as.enabledMu.Lock()
as.enabled = true
as.simpleTokenKeeper = NewSimpleTokenTTLKeeper(newDeleterFunc(as))
as.enabledMu.Unlock()
as.rangePermCache = make(map[string]*unifiedRangePermissions)
as.revision = getRevision(tx)
plog.Noticef("Authentication enabled")
return nil
}
func (as *authStore) AuthDisable() {
as.enabledMu.Lock()
defer as.enabledMu.Unlock()
if !as.enabled {
return
}
b := as.be
tx := b.BatchTx()
tx.Lock()
tx.UnsafePut(authBucketName, enableFlagKey, authDisabled)
as.commitRevision(tx)
tx.Unlock()
b.ForceCommit()
as.enabledMu.Lock()
as.enabled = false
as.simpleTokensMu.Lock()
as.simpleTokens = make(map[string]string) // invalidate all tokens
as.simpleTokensMu.Unlock()
if as.simpleTokenKeeper != nil {
as.simpleTokenKeeper.stop()
as.simpleTokenKeeper = nil
}
as.enabledMu.Unlock()
plog.Noticef("Authentication disabled")
}
func (as *authStore) Close() error {
as.enabledMu.Lock()
defer as.enabledMu.Unlock()
if !as.enabled {
return nil
}
if as.simpleTokenKeeper != nil {
as.simpleTokenKeeper.stop()
as.simpleTokenKeeper = nil
}
return nil
}
func (as *authStore) Authenticate(ctx context.Context, username, password string) (*pb.AuthenticateResponse, error) {
if !as.isAuthEnabled() {
return nil, ErrAuthNotEnabled
}
// TODO(mitake): after adding jwt support, branching based on values of ctx is required
index := ctx.Value("index").(uint64)
simpleToken := ctx.Value("simpleToken").(string)
@ -285,6 +200,11 @@ func (as *authStore) Authenticate(ctx context.Context, username, password string
return nil, ErrAuthFailed
}
if bcrypt.CompareHashAndPassword(user.Password, []byte(password)) != nil {
plog.Noticef("authentication failed, invalid password for user %s", username)
return &pb.AuthenticateResponse{}, ErrAuthFailed
}
token := fmt.Sprintf("%s.%d", simpleToken, index)
as.assignSimpleTokenToUser(username, token)
@ -292,24 +212,6 @@ func (as *authStore) Authenticate(ctx context.Context, username, password string
return &pb.AuthenticateResponse{Token: token}, nil
}
func (as *authStore) CheckPassword(username, password string) (uint64, error) {
tx := as.be.BatchTx()
tx.Lock()
defer tx.Unlock()
user := getUser(tx, username)
if user == nil {
return 0, ErrAuthFailed
}
if bcrypt.CompareHashAndPassword(user.Password, []byte(password)) != nil {
plog.Noticef("authentication failed, invalid password for user %s", username)
return 0, ErrAuthFailed
}
return getRevision(tx), nil
}
func (as *authStore) Recover(be backend.Backend) {
enabled := false
as.be = be
@ -321,9 +223,6 @@ func (as *authStore) Recover(be backend.Backend) {
enabled = true
}
}
as.revision = getRevision(tx)
tx.Unlock()
as.enabledMu.Lock()
@ -332,11 +231,7 @@ func (as *authStore) Recover(be backend.Backend) {
}
func (as *authStore) UserAdd(r *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse, error) {
if len(r.Name) == 0 {
return nil, ErrUserEmpty
}
hashed, err := bcrypt.GenerateFromPassword([]byte(r.Password), BcryptCost)
hashed, err := bcrypt.GenerateFromPassword([]byte(r.Password), bcrypt.DefaultCost)
if err != nil {
plog.Errorf("failed to hash password: %s", err)
return nil, err
@ -358,8 +253,6 @@ func (as *authStore) UserAdd(r *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse,
putUser(tx, newUser)
as.commitRevision(tx)
plog.Noticef("added a new user: %s", r.Name)
return &pb.AuthUserAddResponse{}, nil
@ -377,11 +270,6 @@ func (as *authStore) UserDelete(r *pb.AuthUserDeleteRequest) (*pb.AuthUserDelete
delUser(tx, r.Name)
as.commitRevision(tx)
as.invalidateCachedPerm(r.Name)
as.invalidateUser(r.Name)
plog.Noticef("deleted a user: %s", r.Name)
return &pb.AuthUserDeleteResponse{}, nil
@ -390,7 +278,7 @@ func (as *authStore) UserDelete(r *pb.AuthUserDeleteRequest) (*pb.AuthUserDelete
func (as *authStore) UserChangePassword(r *pb.AuthUserChangePasswordRequest) (*pb.AuthUserChangePasswordResponse, error) {
// TODO(mitake): measure the cost of bcrypt.GenerateFromPassword()
// If the cost is too high, we should move the encryption to outside of the raft
hashed, err := bcrypt.GenerateFromPassword([]byte(r.Password), BcryptCost)
hashed, err := bcrypt.GenerateFromPassword([]byte(r.Password), bcrypt.DefaultCost)
if err != nil {
plog.Errorf("failed to hash password: %s", err)
return nil, err
@ -413,11 +301,6 @@ func (as *authStore) UserChangePassword(r *pb.AuthUserChangePasswordRequest) (*p
putUser(tx, updatedUser)
as.commitRevision(tx)
as.invalidateCachedPerm(r.Name)
as.invalidateUser(r.Name)
plog.Noticef("changed a password of a user: %s", r.Name)
return &pb.AuthUserChangePasswordResponse{}, nil
@ -453,8 +336,6 @@ func (as *authStore) UserGrantRole(r *pb.AuthUserGrantRoleRequest) (*pb.AuthUser
as.invalidateCachedPerm(r.User)
as.commitRevision(tx)
plog.Noticef("granted role %s to user %s", r.Role, r.User)
return &pb.AuthUserGrantRoleResponse{}, nil
}
@ -470,7 +351,11 @@ func (as *authStore) UserGet(r *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse,
if user == nil {
return nil, ErrUserNotFound
}
resp.Roles = append(resp.Roles, user.Roles...)
for _, role := range user.Roles {
resp.Roles = append(resp.Roles, role)
}
return &resp, nil
}
@ -519,8 +404,6 @@ func (as *authStore) UserRevokeRole(r *pb.AuthUserRevokeRoleRequest) (*pb.AuthUs
as.invalidateCachedPerm(r.Name)
as.commitRevision(tx)
plog.Noticef("revoked role %s from user %s", r.Role, r.Name)
return &pb.AuthUserRevokeRoleResponse{}, nil
}
@ -536,7 +419,11 @@ func (as *authStore) RoleGet(r *pb.AuthRoleGetRequest) (*pb.AuthRoleGetResponse,
if role == nil {
return nil, ErrRoleNotFound
}
resp.Perm = append(resp.Perm, role.KeyPermission...)
for _, perm := range role.KeyPermission {
resp.Perm = append(resp.Perm, perm)
}
return &resp, nil
}
@ -586,8 +473,6 @@ func (as *authStore) RoleRevokePermission(r *pb.AuthRoleRevokePermissionRequest)
// It should be optimized.
as.clearCachedPerm()
as.commitRevision(tx)
plog.Noticef("revoked key %s from role %s", r.Key, r.Role)
return &pb.AuthRoleRevokePermissionResponse{}, nil
}
@ -616,8 +501,6 @@ func (as *authStore) RoleDelete(r *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDelete
delRole(tx, r.Role)
as.commitRevision(tx)
plog.Noticef("deleted role %s", r.Role)
return &pb.AuthRoleDeleteResponse{}, nil
}
@ -638,21 +521,16 @@ func (as *authStore) RoleAdd(r *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse,
putRole(tx, newRole)
as.commitRevision(tx)
plog.Noticef("Role %s is created", r.Name)
return &pb.AuthRoleAddResponse{}, nil
}
func (as *authStore) AuthInfoFromToken(token string) (*AuthInfo, bool) {
func (as *authStore) UsernameFromToken(token string) (string, bool) {
as.simpleTokensMu.RLock()
defer as.simpleTokensMu.RUnlock()
t, ok := as.simpleTokens[token]
if ok {
as.simpleTokenKeeper.resetSimpleToken(token)
}
return &AuthInfo{Username: t, Revision: as.revision}, ok
return t, ok
}
type permSlice []*authpb.Permission
@ -704,26 +582,15 @@ func (as *authStore) RoleGrantPermission(r *pb.AuthRoleGrantPermissionRequest) (
// It should be optimized.
as.clearCachedPerm()
as.commitRevision(tx)
plog.Noticef("role %s's permission of key %s is updated as %s", r.Name, r.Perm.Key, authpb.Permission_Type_name[int32(r.Perm.PermType)])
return &pb.AuthRoleGrantPermissionResponse{}, nil
}
func (as *authStore) isOpPermitted(userName string, revision uint64, key, rangeEnd []byte, permTyp authpb.Permission_Type) error {
func (as *authStore) isOpPermitted(userName string, key, rangeEnd []byte, permTyp authpb.Permission_Type) bool {
// TODO(mitake): this function would be costly so we need a caching mechanism
if !as.isAuthEnabled() {
return nil
}
// only gets rev == 0 when passed AuthInfo{}; no user given
if revision == 0 {
return ErrUserEmpty
}
if revision < as.revision {
return ErrAuthOldRevision
return true
}
tx := as.be.BatchTx()
@ -733,52 +600,43 @@ func (as *authStore) isOpPermitted(userName string, revision uint64, key, rangeE
user := getUser(tx, userName)
if user == nil {
plog.Errorf("invalid user name %s for permission checking", userName)
return ErrPermissionDenied
}
// root role should have permission on all ranges
if hasRootRole(user) {
return nil
return false
}
if as.isRangeOpPermitted(tx, userName, key, rangeEnd, permTyp) {
return nil
return true
}
return ErrPermissionDenied
return false
}
func (as *authStore) IsPutPermitted(authInfo *AuthInfo, key []byte) error {
return as.isOpPermitted(authInfo.Username, authInfo.Revision, key, nil, authpb.WRITE)
func (as *authStore) IsPutPermitted(username string, key []byte) bool {
return as.isOpPermitted(username, key, nil, authpb.WRITE)
}
func (as *authStore) IsRangePermitted(authInfo *AuthInfo, key, rangeEnd []byte) error {
return as.isOpPermitted(authInfo.Username, authInfo.Revision, key, rangeEnd, authpb.READ)
func (as *authStore) IsRangePermitted(username string, key, rangeEnd []byte) bool {
return as.isOpPermitted(username, key, rangeEnd, authpb.READ)
}
func (as *authStore) IsDeleteRangePermitted(authInfo *AuthInfo, key, rangeEnd []byte) error {
return as.isOpPermitted(authInfo.Username, authInfo.Revision, key, rangeEnd, authpb.WRITE)
func (as *authStore) IsDeleteRangePermitted(username string, key, rangeEnd []byte) bool {
return as.isOpPermitted(username, key, rangeEnd, authpb.WRITE)
}
func (as *authStore) IsAdminPermitted(authInfo *AuthInfo) error {
func (as *authStore) IsAdminPermitted(username string) bool {
if !as.isAuthEnabled() {
return nil
return true
}
tx := as.be.BatchTx()
tx.Lock()
defer tx.Unlock()
u := getUser(tx, authInfo.Username)
u := getUser(tx, username)
if u == nil {
return ErrUserNotFound
return false
}
if !hasRootRole(u) {
return ErrPermissionDenied
}
return nil
return hasRootRole(u)
}
func getUser(tx backend.BatchTx, username string) *authpb.User {
@ -882,7 +740,7 @@ func (as *authStore) isAuthEnabled() bool {
return as.enabled
}
func NewAuthStore(be backend.Backend, indexWaiter func(uint64) <-chan struct{}) *authStore {
func NewAuthStore(be backend.Backend) *authStore {
tx := be.BatchTx()
tx.Lock()
@ -890,35 +748,13 @@ func NewAuthStore(be backend.Backend, indexWaiter func(uint64) <-chan struct{})
tx.UnsafeCreateBucket(authUsersBucketName)
tx.UnsafeCreateBucket(authRolesBucketName)
enabled := false
_, vs := tx.UnsafeRange(authBucketName, enableFlagKey, nil, 0)
if len(vs) == 1 {
if bytes.Equal(vs[0], authEnabled) {
enabled = true
}
}
as := &authStore{
be: be,
simpleTokens: make(map[string]string),
revision: getRevision(tx),
indexWaiter: indexWaiter,
enabled: enabled,
rangePermCache: make(map[string]*unifiedRangePermissions),
}
if enabled {
as.simpleTokenKeeper = NewSimpleTokenTTLKeeper(newDeleterFunc(as))
}
if as.revision == 0 {
as.commitRevision(tx)
}
tx.Unlock()
be.ForceCommit()
return as
return &authStore{
be: be,
simpleTokens: make(map[string]string),
}
}
func hasRootRole(u *authpb.User) bool {
@ -929,67 +765,3 @@ func hasRootRole(u *authpb.User) bool {
}
return false
}
func (as *authStore) commitRevision(tx backend.BatchTx) {
as.revision++
revBytes := make([]byte, revBytesLen)
binary.BigEndian.PutUint64(revBytes, as.revision)
tx.UnsafePut(authBucketName, revisionKey, revBytes)
}
func getRevision(tx backend.BatchTx) uint64 {
_, vs := tx.UnsafeRange(authBucketName, []byte(revisionKey), nil, 0)
if len(vs) != 1 {
// this can happen in the initialization phase
return 0
}
return binary.BigEndian.Uint64(vs[0])
}
func (as *authStore) Revision() uint64 {
return as.revision
}
func (as *authStore) isValidSimpleToken(token string, ctx context.Context) bool {
splitted := strings.Split(token, ".")
if len(splitted) != 2 {
return false
}
index, err := strconv.Atoi(splitted[1])
if err != nil {
return false
}
select {
case <-as.indexWaiter(uint64(index)):
return true
case <-ctx.Done():
}
return false
}
func (as *authStore) AuthInfoFromCtx(ctx context.Context) (*AuthInfo, error) {
md, ok := metadata.FromContext(ctx)
if !ok {
return nil, nil
}
ts, tok := md["token"]
if !tok {
return nil, nil
}
token := ts[0]
if !as.isValidSimpleToken(token, ctx) {
return nil, ErrInvalidAuthToken
}
authInfo, uok := as.AuthInfoFromToken(token)
if !uok {
plog.Warningf("invalid auth token: %s", token)
return nil, ErrInvalidAuthToken
}
return authInfo, nil
}

View File

@ -20,88 +20,49 @@ import (
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
"github.com/coreos/etcd/mvcc/backend"
"golang.org/x/crypto/bcrypt"
"golang.org/x/net/context"
)
func init() { BcryptCost = bcrypt.MinCost }
func dummyIndexWaiter(index uint64) <-chan struct{} {
ch := make(chan struct{})
go func() {
ch <- struct{}{}
}()
return ch
}
// TestNewAuthStoreRevision ensures newly auth store
// keeps the old revision when there are no changes.
func TestNewAuthStoreRevision(t *testing.T) {
b, tPath := backend.NewDefaultTmpBackend()
defer os.Remove(tPath)
as := NewAuthStore(b, dummyIndexWaiter)
err := enableAuthAndCreateRoot(as)
if err != nil {
t.Fatal(err)
}
old := as.Revision()
b.Close()
as.Close()
// no changes to commit
b2 := backend.NewDefaultBackend(tPath)
as = NewAuthStore(b2, dummyIndexWaiter)
new := as.Revision()
b2.Close()
as.Close()
if old != new {
t.Fatalf("expected revision %d, got %d", old, new)
}
}
func enableAuthAndCreateRoot(as *authStore) error {
_, err := as.UserAdd(&pb.AuthUserAddRequest{Name: "root", Password: "root"})
if err != nil {
return err
}
_, err = as.RoleAdd(&pb.AuthRoleAddRequest{Name: "root"})
if err != nil {
return err
}
_, err = as.UserGrantRole(&pb.AuthUserGrantRoleRequest{User: "root", Role: "root"})
if err != nil {
return err
}
return as.AuthEnable()
}
func TestCheckPassword(t *testing.T) {
func TestUserAdd(t *testing.T) {
b, tPath := backend.NewDefaultTmpBackend()
defer func() {
b.Close()
os.Remove(tPath)
}()
as := NewAuthStore(b, dummyIndexWaiter)
defer as.Close()
err := enableAuthAndCreateRoot(as)
as := NewAuthStore(b)
ua := &pb.AuthUserAddRequest{Name: "foo"}
_, err := as.UserAdd(ua) // add a non-existing user
if err != nil {
t.Fatal(err)
}
_, err = as.UserAdd(ua) // add an existing user
if err == nil {
t.Fatalf("expected %v, got %v", ErrUserAlreadyExist, err)
}
if err != ErrUserAlreadyExist {
t.Fatalf("expected %v, got %v", ErrUserAlreadyExist, err)
}
}
func TestAuthenticate(t *testing.T) {
b, tPath := backend.NewDefaultTmpBackend()
defer func() {
b.Close()
os.Remove(tPath)
}()
as := NewAuthStore(b)
ua := &pb.AuthUserAddRequest{Name: "foo", Password: "bar"}
_, err = as.UserAdd(ua)
_, err := as.UserAdd(ua)
if err != nil {
t.Fatal(err)
}
// auth a non-existing user
_, err = as.CheckPassword("foo-test", "bar")
ctx1 := context.WithValue(context.WithValue(context.TODO(), "index", uint64(1)), "simpleToken", "dummy")
_, err = as.Authenticate(ctx1, "foo-test", "bar")
if err == nil {
t.Fatalf("expected %v, got %v", ErrAuthFailed, err)
}
@ -110,13 +71,15 @@ func TestCheckPassword(t *testing.T) {
}
// auth an existing user with correct password
_, err = as.CheckPassword("foo", "bar")
ctx2 := context.WithValue(context.WithValue(context.TODO(), "index", uint64(2)), "simpleToken", "dummy")
_, err = as.Authenticate(ctx2, "foo", "bar")
if err != nil {
t.Fatal(err)
}
// auth an existing user but with wrong password
_, err = as.CheckPassword("foo", "")
ctx3 := context.WithValue(context.WithValue(context.TODO(), "index", uint64(3)), "simpleToken", "dummy")
_, err = as.Authenticate(ctx3, "foo", "")
if err == nil {
t.Fatalf("expected %v, got %v", ErrAuthFailed, err)
}
@ -132,15 +95,10 @@ func TestUserDelete(t *testing.T) {
os.Remove(tPath)
}()
as := NewAuthStore(b, dummyIndexWaiter)
defer as.Close()
err := enableAuthAndCreateRoot(as)
if err != nil {
t.Fatal(err)
}
as := NewAuthStore(b)
ua := &pb.AuthUserAddRequest{Name: "foo"}
_, err = as.UserAdd(ua)
_, err := as.UserAdd(ua)
if err != nil {
t.Fatal(err)
}
@ -169,14 +127,9 @@ func TestUserChangePassword(t *testing.T) {
os.Remove(tPath)
}()
as := NewAuthStore(b, dummyIndexWaiter)
defer as.Close()
err := enableAuthAndCreateRoot(as)
if err != nil {
t.Fatal(err)
}
as := NewAuthStore(b)
_, err = as.UserAdd(&pb.AuthUserAddRequest{Name: "foo"})
_, err := as.UserAdd(&pb.AuthUserAddRequest{Name: "foo"})
if err != nil {
t.Fatal(err)
}
@ -215,15 +168,10 @@ func TestRoleAdd(t *testing.T) {
os.Remove(tPath)
}()
as := NewAuthStore(b, dummyIndexWaiter)
defer as.Close()
err := enableAuthAndCreateRoot(as)
if err != nil {
t.Fatal(err)
}
as := NewAuthStore(b)
// adds a new role
_, err = as.RoleAdd(&pb.AuthRoleAddRequest{Name: "role-test"})
_, err := as.RoleAdd(&pb.AuthRoleAddRequest{Name: "role-test"})
if err != nil {
t.Fatal(err)
}
@ -236,14 +184,9 @@ func TestUserGrant(t *testing.T) {
os.Remove(tPath)
}()
as := NewAuthStore(b, dummyIndexWaiter)
defer as.Close()
err := enableAuthAndCreateRoot(as)
if err != nil {
t.Fatal(err)
}
as := NewAuthStore(b)
_, err = as.UserAdd(&pb.AuthUserAddRequest{Name: "foo"})
_, err := as.UserAdd(&pb.AuthUserAddRequest{Name: "foo"})
if err != nil {
t.Fatal(err)
}
@ -268,93 +211,4 @@ func TestUserGrant(t *testing.T) {
if err != ErrUserNotFound {
t.Fatalf("expected %v, got %v", ErrUserNotFound, err)
}
// non-admin user
err = as.IsAdminPermitted(&AuthInfo{Username: "foo", Revision: 1})
if err != ErrPermissionDenied {
t.Errorf("expected %v, got %v", ErrPermissionDenied, err)
}
// disabled auth should return nil
as.AuthDisable()
err = as.IsAdminPermitted(&AuthInfo{Username: "root", Revision: 1})
if err != nil {
t.Errorf("expected nil, got %v", err)
}
}
func TestRecoverFromSnapshot(t *testing.T) {
as, _ := setupAuthStore(t)
ua := &pb.AuthUserAddRequest{Name: "foo"}
_, err := as.UserAdd(ua) // add an existing user
if err == nil {
t.Fatalf("expected %v, got %v", ErrUserAlreadyExist, err)
}
if err != ErrUserAlreadyExist {
t.Fatalf("expected %v, got %v", ErrUserAlreadyExist, err)
}
ua = &pb.AuthUserAddRequest{Name: ""}
_, err = as.UserAdd(ua) // add a user with empty name
if err != ErrUserEmpty {
t.Fatal(err)
}
as.Close()
as2 := NewAuthStore(as.be, dummyIndexWaiter)
defer func(a *authStore) {
a.Close()
}(as2)
if !as2.isAuthEnabled() {
t.Fatal("recovering authStore from existing backend failed")
}
ul, err := as.UserList(&pb.AuthUserListRequest{})
if err != nil {
t.Fatal(err)
}
if !contains(ul.Users, "root") {
t.Errorf("expected %v in %v", "root", ul.Users)
}
}
func contains(array []string, str string) bool {
for _, s := range array {
if s == str {
return true
}
}
return false
}
func setupAuthStore(t *testing.T) (store *authStore, teardownfunc func(t *testing.T)) {
b, tPath := backend.NewDefaultTmpBackend()
as := NewAuthStore(b, dummyIndexWaiter)
err := enableAuthAndCreateRoot(as)
if err != nil {
t.Fatal(err)
}
// adds a new role
_, err = as.RoleAdd(&pb.AuthRoleAddRequest{Name: "role-test"})
if err != nil {
t.Fatal(err)
}
ua := &pb.AuthUserAddRequest{Name: "foo", Password: "bar"}
_, err = as.UserAdd(ua) // add a non-existing user
if err != nil {
t.Fatal(err)
}
tearDown := func(t *testing.T) {
b.Close()
os.Remove(tPath)
as.Close()
}
return as, tearDown
}

39
build
View File

@ -4,19 +4,15 @@
ORG_PATH="github.com/coreos"
REPO_PATH="${ORG_PATH}/etcd"
export GO15VENDOREXPERIMENT="1"
eval $(go env)
GIT_SHA=`git rev-parse --short HEAD || echo "GitNotFound"`
if [ ! -z "$FAILPOINTS" ]; then
GIT_SHA="$GIT_SHA"-FAILPOINTS
fi
# Set GO_LDFLAGS="-s" for building without symbols for debugging.
GO_LDFLAGS="$GO_LDFLAGS -X ${REPO_PATH}/cmd/vendor/${REPO_PATH}/version.GitSHA=${GIT_SHA}"
# enable/disable failpoints
toggle_failpoints() {
FAILPKGS="etcdserver/ mvcc/backend/"
FAILPKGS="etcdserver/"
mode="disable"
if [ ! -z "$FAILPOINTS" ]; then mode="enable"; fi
@ -31,33 +27,18 @@ toggle_failpoints() {
}
etcd_build() {
out="bin"
if [ -n "${BINDIR}" ]; then out="${BINDIR}"; fi
if [ -z "${GOARCH}" ] || [ "${GOARCH}" = "$(go env GOHOSTARCH)" ]; then
out="bin"
else
out="bin/${GOARCH}"
fi
toggle_failpoints
# Static compilation is useful when etcd is run in a container
CGO_ENABLED=0 go build $GO_BUILD_FLAGS -installsuffix cgo -ldflags "$GO_LDFLAGS" -o ${out}/etcd ${REPO_PATH}/cmd/etcd || return
CGO_ENABLED=0 go build $GO_BUILD_FLAGS -installsuffix cgo -ldflags "$GO_LDFLAGS" -o ${out}/etcdctl ${REPO_PATH}/cmd/etcdctl || return
}
etcd_setup_gopath() {
CDIR=$(cd `dirname "$0"` && pwd)
cd "$CDIR"
etcdGOPATH=${CDIR}/gopath
# preserve old gopath to support building with unvendored tooling deps (e.g., gofail)
if [ -n "$GOPATH" ]; then
GOPATH=":$GOPATH"
fi
export GOPATH=${etcdGOPATH}$GOPATH
rm -rf ${etcdGOPATH}/src
mkdir -p ${etcdGOPATH}
ln -s ${CDIR}/cmd/vendor ${etcdGOPATH}/src
CGO_ENABLED=0 go build $GO_BUILD_FLAGS -installsuffix cgo -ldflags "-s -X ${REPO_PATH}/cmd/vendor/${REPO_PATH}/version.GitSHA=${GIT_SHA}" -o ${out}/etcd ${REPO_PATH}/cmd
CGO_ENABLED=0 go build $GO_BUILD_FLAGS -installsuffix cgo -ldflags "-s" -o ${out}/etcdctl ${REPO_PATH}/cmd/etcdctl
}
toggle_failpoints
# only build when called directly, not sourced
if echo "$0" | grep "build$" >/dev/null; then
# force new gopath so builds outside of gopath work
etcd_setup_gopath
etcd_build
fi
# don't build when sourced
(echo "$0" | grep "/build$" > /dev/null) && etcd_build || true

View File

@ -1,18 +1,9 @@
$ORG_PATH="github.com/coreos"
$REPO_PATH="$ORG_PATH/etcd"
$PWD = $((Get-Item -Path ".\" -Verbose).FullName)
$FSROOT = $((Get-Location).Drive.Name+":")
$FSYS = $((Get-WMIObject win32_logicaldisk -filter "DeviceID = '$FSROOT'").filesystem)
if ($FSYS.StartsWith("FAT","CurrentCultureIgnoreCase")) {
echo "Error: Cannot build etcd using the $FSYS filesystem (use NTFS instead)"
exit 1
}
# Set $Env:GO_LDFLAGS="-s" for building without symbols.
$GO_LDFLAGS="$Env:GO_LDFLAGS -X $REPO_PATH/cmd/vendor/$REPO_PATH/version.GitSHA=$GIT_SHA"
# rebuild symlinks
echo "Rebuilding symlinks"
git ls-files -s cmd | select-string -pattern 120000 | ForEach {
$l = $_.ToString()
$lnkname = $l.Split(' ')[1]
@ -22,54 +13,27 @@ git ls-files -s cmd | select-string -pattern 120000 | ForEach {
$terms = $lnkname.Split("\")
$dirname = $terms[0..($terms.length-2)] -join "\"
$lnkname = "$PWD\$lnkname"
$targetAbs = "$((Get-Item -Path "$dirname\$target").FullName)"
$targetAbs = $targetAbs.Replace("/", "\")
if (test-path -pathtype container "$targetAbs") {
if (Test-Path "$lnkname") {
if ((Get-Item "$lnkname") -is [System.IO.DirectoryInfo]) {
# rd so deleting junction doesn't take files with it
cmd /c rd "$lnkname"
}
}
if (Test-Path "$lnkname") {
if (!((Get-Item "$lnkname") -is [System.IO.DirectoryInfo])) {
cmd /c del /A /F "$lnkname"
}
}
cmd /c mklink /J "$lnkname" "$targetAbs" ">NUL"
# rd so deleting junction doesn't take files with it
cmd /c rd "$lnkname"
cmd /c del /A /F "$lnkname"
cmd /c mklink /J "$lnkname" "$targetAbs"
} else {
# Remove file with symlink data (first run)
if (Test-Path "$lnkname") {
cmd /c del /A /F "$lnkname"
}
cmd /c mklink /H "$lnkname" "$targetAbs" ">NUL"
cmd /c del /A /F "$lnkname"
cmd /c mklink /H "$lnkname" "$targetAbs"
}
}
if (-not $env:GOPATH) {
$orgpath="$PWD\gopath\src\" + $ORG_PATH.Replace("/", "\")
if (Test-Path "$orgpath\etcd") {
if ((Get-Item "$orgpath\etcd") -is [System.IO.DirectoryInfo]) {
# rd so deleting junction doesn't take files with it
cmd /c rd "$orgpath\etcd"
}
}
if (Test-Path "$orgpath") {
if ((Get-Item "$orgpath") -is [System.IO.DirectoryInfo]) {
# rd so deleting junction doesn't take files with it
cmd /c rd "$orgpath"
}
}
if (Test-Path "$orgpath") {
if (!((Get-Item "$orgpath") -is [System.IO.DirectoryInfo])) {
# Remove file with symlink data (first run)
cmd /c del /A /F "$orgpath"
}
}
cmd /c mkdir "$orgpath"
cmd /c mklink /J "$orgpath\etcd" "$PWD" ">NUL"
cmd /c rd "$orgpath\etcd"
cmd /c del "$orgpath"
cmd /c mkdir "$orgpath"
cmd /c mklink /J "$orgpath\etcd" "$PWD"
$env:GOPATH = "$PWD\gopath"
}
@ -77,5 +41,5 @@ if (-not $env:GOPATH) {
$env:CGO_ENABLED = 0
$env:GO15VENDOREXPERIMENT = 1
$GIT_SHA="$(git rev-parse --short HEAD)"
go build -a -installsuffix cgo -ldflags $GO_LDFLAGS -o bin\etcd.exe "$REPO_PATH\cmd\etcd"
go build -a -installsuffix cgo -ldflags $GO_LDFLAGS -o bin\etcdctl.exe "$REPO_PATH\cmd\etcdctl"
go build -a -installsuffix cgo -ldflags "-s -X $REPO_PATH/cmd/vendor/$REPO_PATH/version.GitSHA=$GIT_SHA" -o bin\etcd.exe "$REPO_PATH\cmd"
go build -a -installsuffix cgo -ldflags "-s" -o bin\etcdctl.exe "$REPO_PATH\cmd\etcdctl"

View File

@ -114,4 +114,4 @@ if err != nil {
3. Default etcd/client cannot handle the case that the remote server is SIGSTOPed now. TCP keepalive mechanism doesn't help in this scenario because operating system may still send TCP keep-alive packets. Over time we'd like to improve this functionality, but solving this issue isn't high priority because a real-life case in which a server is stopped, but the connection is kept alive, hasn't been brought to our attention.
4. etcd/client cannot detect whether a member is healthy with watches and non-quorum read requests. If the member is isolated from the cluster, etcd/client may retrieve outdated data. Instead, users can either issue quorum read requests or monitor the /health endpoint for member health information.
4. etcd/client cannot detect whether the member in use is healthy when doing read requests. If the member is isolated from the cluster, etcd/client may retrieve outdated data. As a workaround, users could monitor experimental /health endpoint for member healthy information. We are improving it at [#3265](https://github.com/coreos/etcd/issues/3265).

View File

@ -22,6 +22,7 @@ import (
"net"
"net/http"
"net/url"
"reflect"
"sort"
"strconv"
"sync"
@ -36,10 +37,6 @@ var (
ErrClusterUnavailable = errors.New("client: etcd cluster is unavailable or misconfigured")
ErrNoLeaderEndpoint = errors.New("client: no leader endpoint available")
errTooManyRedirectChecks = errors.New("client: too many redirect checks")
// oneShotCtxValue is set on a context using WithValue(&oneShotValue) so
// that Do() will not retry a request
oneShotCtxValue interface{}
)
var DefaultRequestTimeout = 5 * time.Second
@ -260,67 +257,53 @@ type httpClusterClient struct {
selectionMode EndpointSelectionMode
}
func (c *httpClusterClient) getLeaderEndpoint(ctx context.Context, eps []url.URL) (string, error) {
ceps := make([]url.URL, len(eps))
copy(ceps, eps)
// To perform a lookup on the new endpoint list without using the current
// client, we'll copy it
clientCopy := &httpClusterClient{
clientFactory: c.clientFactory,
credentials: c.credentials,
rand: c.rand,
pinned: 0,
endpoints: ceps,
}
mAPI := NewMembersAPI(clientCopy)
leader, err := mAPI.Leader(ctx)
func (c *httpClusterClient) getLeaderEndpoint() (string, error) {
mAPI := NewMembersAPI(c)
leader, err := mAPI.Leader(context.Background())
if err != nil {
return "", err
}
if len(leader.ClientURLs) == 0 {
return "", ErrNoLeaderEndpoint
}
return leader.ClientURLs[0], nil // TODO: how to handle multiple client URLs?
}
func (c *httpClusterClient) parseEndpoints(eps []string) ([]url.URL, error) {
func (c *httpClusterClient) SetEndpoints(eps []string) error {
if len(eps) == 0 {
return []url.URL{}, ErrNoEndpoints
return ErrNoEndpoints
}
neps := make([]url.URL, len(eps))
for i, ep := range eps {
u, err := url.Parse(ep)
if err != nil {
return []url.URL{}, err
return err
}
neps[i] = *u
}
return neps, nil
}
func (c *httpClusterClient) SetEndpoints(eps []string) error {
neps, err := c.parseEndpoints(eps)
if err != nil {
return err
switch c.selectionMode {
case EndpointSelectionRandom:
c.endpoints = shuffleEndpoints(c.rand, neps)
c.pinned = 0
case EndpointSelectionPrioritizeLeader:
c.endpoints = neps
lep, err := c.getLeaderEndpoint()
if err != nil {
return ErrNoLeaderEndpoint
}
for i := range c.endpoints {
if c.endpoints[i].String() == lep {
c.pinned = i
break
}
}
// If endpoints doesn't have the lu, just keep c.pinned = 0.
// Forwarding between follower and leader would be required but it works.
default:
return errors.New(fmt.Sprintf("invalid endpoint selection mode: %d", c.selectionMode))
}
c.Lock()
defer c.Unlock()
c.endpoints = shuffleEndpoints(c.rand, neps)
// We're not doing anything for PrioritizeLeader here. This is
// due to not having a context meaning we can't call getLeaderEndpoint
// However, if you're using PrioritizeLeader, you've already been told
// to regularly call sync, where we do have a ctx, and can figure the
// leader. PrioritizeLeader is also quite a loose guarantee, so deal
// with it
c.pinned = 0
return nil
}
@ -352,7 +335,6 @@ func (c *httpClusterClient) Do(ctx context.Context, act httpAction) (*http.Respo
var body []byte
var err error
cerr := &ClusterError{}
isOneShot := ctx.Value(&oneShotCtxValue) != nil
for i := pinned; i < leps+pinned; i++ {
k := i % leps
@ -366,9 +348,6 @@ func (c *httpClusterClient) Do(ctx context.Context, act httpAction) (*http.Respo
if err == context.Canceled || err == context.DeadlineExceeded {
return nil, nil, err
}
if isOneShot {
return nil, nil, err
}
continue
}
if resp.StatusCode/100 == 5 {
@ -379,9 +358,6 @@ func (c *httpClusterClient) Do(ctx context.Context, act httpAction) (*http.Respo
default:
cerr.Errors = append(cerr.Errors, fmt.Errorf("client: etcd member %s returns server error [%s]", eps[k].String(), http.StatusText(resp.StatusCode)))
}
if isOneShot {
return nil, nil, cerr.Errors[0]
}
continue
}
if k != pinned {
@ -414,51 +390,27 @@ func (c *httpClusterClient) Sync(ctx context.Context) error {
return err
}
var eps []string
c.Lock()
defer c.Unlock()
eps := make([]string, 0)
for _, m := range ms {
eps = append(eps, m.ClientURLs...)
}
sort.Sort(sort.StringSlice(eps))
neps, err := c.parseEndpoints(eps)
if err != nil {
return err
ceps := make([]string, len(c.endpoints))
for i, cep := range c.endpoints {
ceps[i] = cep.String()
}
sort.Sort(sort.StringSlice(ceps))
// fast path if no change happens
// this helps client to pin the endpoint when no cluster change
if reflect.DeepEqual(eps, ceps) {
return nil
}
npin := 0
switch c.selectionMode {
case EndpointSelectionRandom:
c.RLock()
eq := endpointsEqual(c.endpoints, neps)
c.RUnlock()
if eq {
return nil
}
// When items in the endpoint list changes, we choose a new pin
neps = shuffleEndpoints(c.rand, neps)
case EndpointSelectionPrioritizeLeader:
nle, err := c.getLeaderEndpoint(ctx, neps)
if err != nil {
return ErrNoLeaderEndpoint
}
for i, n := range neps {
if n.String() == nle {
npin = i
break
}
}
default:
return fmt.Errorf("invalid endpoint selection mode: %d", c.selectionMode)
}
c.Lock()
defer c.Unlock()
c.endpoints = neps
c.pinned = npin
return nil
return c.SetEndpoints(eps)
}
func (c *httpClusterClient) AutoSync(ctx context.Context, interval time.Duration) error {
@ -644,27 +596,3 @@ func shuffleEndpoints(r *rand.Rand, eps []url.URL) []url.URL {
}
return neps
}
func endpointsEqual(left, right []url.URL) bool {
if len(left) != len(right) {
return false
}
sLeft := make([]string, len(left))
sRight := make([]string, len(right))
for i, l := range left {
sLeft[i] = l.String()
}
for i, r := range right {
sRight[i] = r.String()
}
sort.Strings(sLeft)
sort.Strings(sRight)
for i := range sLeft {
if sLeft[i] != sRight[i] {
return false
}
}
return true
}

View File

@ -855,7 +855,7 @@ func TestHTTPClusterClientAutoSyncFail(t *testing.T) {
}
err = hc.AutoSync(context.Background(), time.Hour)
if !strings.HasPrefix(err.Error(), ErrClusterUnavailable.Error()) {
if err.Error() != ErrClusterUnavailable.Error() {
t.Fatalf("incorrect error value: want=%v got=%v", ErrClusterUnavailable, err)
}
}
@ -900,90 +900,6 @@ func TestHTTPClusterClientSyncPinEndpoint(t *testing.T) {
}
}
// TestHTTPClusterClientSyncUnpinEndpoint tests that Sync() unpins the endpoint when
// it gets a different member list than before.
func TestHTTPClusterClientSyncUnpinEndpoint(t *testing.T) {
cf := newStaticHTTPClientFactory([]staticHTTPResponse{
{
resp: http.Response{StatusCode: http.StatusOK, Header: http.Header{"Content-Type": []string{"application/json"}}},
body: []byte(`{"members":[{"id":"2745e2525fce8fe","peerURLs":["http://127.0.0.1:7003"],"name":"node3","clientURLs":["http://127.0.0.1:4003"]},{"id":"42134f434382925","peerURLs":["http://127.0.0.1:2380","http://127.0.0.1:7001"],"name":"node1","clientURLs":["http://127.0.0.1:2379","http://127.0.0.1:4001"]},{"id":"94088180e21eb87b","peerURLs":["http://127.0.0.1:7002"],"name":"node2","clientURLs":["http://127.0.0.1:4002"]}]}`),
},
{
resp: http.Response{StatusCode: http.StatusOK, Header: http.Header{"Content-Type": []string{"application/json"}}},
body: []byte(`{"members":[{"id":"42134f434382925","peerURLs":["http://127.0.0.1:2380","http://127.0.0.1:7001"],"name":"node1","clientURLs":["http://127.0.0.1:2379","http://127.0.0.1:4001"]},{"id":"94088180e21eb87b","peerURLs":["http://127.0.0.1:7002"],"name":"node2","clientURLs":["http://127.0.0.1:4002"]}]}`),
},
{
resp: http.Response{StatusCode: http.StatusOK, Header: http.Header{"Content-Type": []string{"application/json"}}},
body: []byte(`{"members":[{"id":"2745e2525fce8fe","peerURLs":["http://127.0.0.1:7003"],"name":"node3","clientURLs":["http://127.0.0.1:4003"]},{"id":"42134f434382925","peerURLs":["http://127.0.0.1:2380","http://127.0.0.1:7001"],"name":"node1","clientURLs":["http://127.0.0.1:2379","http://127.0.0.1:4001"]},{"id":"94088180e21eb87b","peerURLs":["http://127.0.0.1:7002"],"name":"node2","clientURLs":["http://127.0.0.1:4002"]}]}`),
},
})
hc := &httpClusterClient{
clientFactory: cf,
rand: rand.New(rand.NewSource(0)),
}
err := hc.SetEndpoints([]string{"http://127.0.0.1:4003", "http://127.0.0.1:2379", "http://127.0.0.1:4001", "http://127.0.0.1:4002"})
if err != nil {
t.Fatalf("unexpected error during setup: %#v", err)
}
wants := []string{"http://127.0.0.1:2379", "http://127.0.0.1:4001", "http://127.0.0.1:4002"}
for i := 0; i < 3; i++ {
err = hc.Sync(context.Background())
if err != nil {
t.Fatalf("#%d: unexpected error during Sync: %#v", i, err)
}
if g := hc.endpoints[hc.pinned]; g.String() != wants[i] {
t.Errorf("#%d: pinned endpoint = %v, want %v", i, g, wants[i])
}
}
}
// TestHTTPClusterClientSyncPinLeaderEndpoint tests that Sync() pins the leader
// when the selection mode is EndpointSelectionPrioritizeLeader
func TestHTTPClusterClientSyncPinLeaderEndpoint(t *testing.T) {
cf := newStaticHTTPClientFactory([]staticHTTPResponse{
{
resp: http.Response{StatusCode: http.StatusOK, Header: http.Header{"Content-Type": []string{"application/json"}}},
body: []byte(`{"members":[{"id":"2745e2525fce8fe","peerURLs":["http://127.0.0.1:7003"],"name":"node3","clientURLs":["http://127.0.0.1:4003"]},{"id":"42134f434382925","peerURLs":["http://127.0.0.1:2380","http://127.0.0.1:7001"],"name":"node1","clientURLs":["http://127.0.0.1:2379","http://127.0.0.1:4001"]},{"id":"94088180e21eb87b","peerURLs":["http://127.0.0.1:7002"],"name":"node2","clientURLs":["http://127.0.0.1:4002"]}]}`),
},
{
resp: http.Response{StatusCode: http.StatusOK, Header: http.Header{"Content-Type": []string{"application/json"}}},
body: []byte(`{"id":"2745e2525fce8fe","peerURLs":["http://127.0.0.1:7003"],"name":"node3","clientURLs":["http://127.0.0.1:4003"]}`),
},
{
resp: http.Response{StatusCode: http.StatusOK, Header: http.Header{"Content-Type": []string{"application/json"}}},
body: []byte(`{"members":[{"id":"2745e2525fce8fe","peerURLs":["http://127.0.0.1:7003"],"name":"node3","clientURLs":["http://127.0.0.1:4003"]},{"id":"42134f434382925","peerURLs":["http://127.0.0.1:2380","http://127.0.0.1:7001"],"name":"node1","clientURLs":["http://127.0.0.1:2379","http://127.0.0.1:4001"]},{"id":"94088180e21eb87b","peerURLs":["http://127.0.0.1:7002"],"name":"node2","clientURLs":["http://127.0.0.1:4002"]}]}`),
},
{
resp: http.Response{StatusCode: http.StatusOK, Header: http.Header{"Content-Type": []string{"application/json"}}},
body: []byte(`{"id":"94088180e21eb87b","peerURLs":["http://127.0.0.1:7002"],"name":"node2","clientURLs":["http://127.0.0.1:4002"]}`),
},
})
hc := &httpClusterClient{
clientFactory: cf,
rand: rand.New(rand.NewSource(0)),
selectionMode: EndpointSelectionPrioritizeLeader,
endpoints: []url.URL{{}}, // Need somewhere to pretend to send to initially
}
wants := []string{"http://127.0.0.1:4003", "http://127.0.0.1:4002"}
for i, want := range wants {
err := hc.Sync(context.Background())
if err != nil {
t.Fatalf("#%d: unexpected error during Sync: %#v", i, err)
}
pinned := hc.endpoints[hc.pinned].String()
if pinned != want {
t.Errorf("#%d: pinned endpoint = %v, want %v", i, pinned, want)
}
}
}
func TestHTTPClusterClientResetFail(t *testing.T) {
tests := [][]string{
// need at least one endpoint

View File

@ -21,11 +21,7 @@ type ClusterError struct {
}
func (ce *ClusterError) Error() string {
s := ErrClusterUnavailable.Error()
for i, e := range ce.Errors {
s += fmt.Sprintf("; error #%d: %s\n", i, e)
}
return s
return ErrClusterUnavailable.Error()
}
func (ce *ClusterError) Detail() string {

View File

@ -1,134 +0,0 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package integration
import (
"fmt"
"net/http"
"net/http/httptest"
"os"
"strings"
"sync/atomic"
"testing"
"golang.org/x/net/context"
"github.com/coreos/etcd/client"
"github.com/coreos/etcd/integration"
"github.com/coreos/etcd/pkg/testutil"
)
// TestV2NoRetryEOF tests destructive api calls won't retry on a disconnection.
func TestV2NoRetryEOF(t *testing.T) {
defer testutil.AfterTest(t)
// generate an EOF response; specify address so appears first in sorted ep list
lEOF := integration.NewListenerWithAddr(t, fmt.Sprintf("eof:123.%d.sock", os.Getpid()))
defer lEOF.Close()
tries := uint32(0)
go func() {
for {
conn, err := lEOF.Accept()
if err != nil {
return
}
atomic.AddUint32(&tries, 1)
conn.Close()
}
}()
eofURL := integration.UrlScheme + "://" + lEOF.Addr().String()
cli := integration.MustNewHTTPClient(t, []string{eofURL, eofURL}, nil)
kapi := client.NewKeysAPI(cli)
for i, f := range noRetryList(kapi) {
startTries := atomic.LoadUint32(&tries)
if err := f(); err == nil {
t.Errorf("#%d: expected EOF error, got nil", i)
}
endTries := atomic.LoadUint32(&tries)
if startTries+1 != endTries {
t.Errorf("#%d: expected 1 try, got %d", i, endTries-startTries)
}
}
}
// TestV2NoRetryNoLeader tests destructive api calls won't retry if given an error code.
func TestV2NoRetryNoLeader(t *testing.T) {
defer testutil.AfterTest(t)
lHttp := integration.NewListenerWithAddr(t, fmt.Sprintf("errHttp:123.%d.sock", os.Getpid()))
eh := &errHandler{errCode: http.StatusServiceUnavailable}
srv := httptest.NewUnstartedServer(eh)
defer lHttp.Close()
defer srv.Close()
srv.Listener = lHttp
go srv.Start()
lHttpURL := integration.UrlScheme + "://" + lHttp.Addr().String()
cli := integration.MustNewHTTPClient(t, []string{lHttpURL, lHttpURL}, nil)
kapi := client.NewKeysAPI(cli)
// test error code
for i, f := range noRetryList(kapi) {
reqs := eh.reqs
if err := f(); err == nil || !strings.Contains(err.Error(), "no leader") {
t.Errorf("#%d: expected \"no leader\", got %v", i, err)
}
if eh.reqs != reqs+1 {
t.Errorf("#%d: expected 1 request, got %d", i, eh.reqs-reqs)
}
}
}
// TestV2RetryRefuse tests destructive api calls will retry if a connection is refused.
func TestV2RetryRefuse(t *testing.T) {
defer testutil.AfterTest(t)
cl := integration.NewCluster(t, 1)
cl.Launch(t)
defer cl.Terminate(t)
// test connection refused; expect no error failover
cli := integration.MustNewHTTPClient(t, []string{integration.UrlScheme + "://refuseconn:123", cl.URL(0)}, nil)
kapi := client.NewKeysAPI(cli)
if _, err := kapi.Set(context.Background(), "/delkey", "def", nil); err != nil {
t.Fatal(err)
}
for i, f := range noRetryList(kapi) {
if err := f(); err != nil {
t.Errorf("#%d: unexpected retry failure (%v)", i, err)
}
}
}
type errHandler struct {
errCode int
reqs int
}
func (eh *errHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
req.Body.Close()
eh.reqs++
w.WriteHeader(eh.errCode)
}
func noRetryList(kapi client.KeysAPI) []func() error {
return []func() error{
func() error {
opts := &client.SetOptions{PrevExist: client.PrevNoExist}
_, err := kapi.Set(context.Background(), "/setkey", "bar", opts)
return err
},
func() error {
_, err := kapi.Delete(context.Background(), "/delkey", nil)
return err
},
}
}

View File

@ -1,17 +0,0 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package integration implements tests built upon embedded etcd, focusing on
// the correctness of the etcd v2 client.
package integration

View File

@ -1,20 +0,0 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package integration
import (
"os"
"testing"
"github.com/coreos/etcd/pkg/testutil"
)
func TestMain(m *testing.M) {
v := m.Run()
if v == 0 && testutil.CheckLeakedGoroutine() {
os.Exit(1)
}
os.Exit(v)
}

File diff suppressed because it is too large Load Diff

View File

@ -191,10 +191,6 @@ type SetOptions struct {
// Dir specifies whether or not this Node should be created as a directory.
Dir bool
// NoValueOnSuccess specifies whether the response contains the current value of the Node.
// If set, the response will only contain the current value when the request fails.
NoValueOnSuccess bool
}
type GetOptions struct {
@ -272,10 +268,6 @@ type Response struct {
// Index holds the cluster-level index at the time the Response was generated.
// This index is not tied to the Node(s) contained in this Response.
Index uint64 `json:"-"`
// ClusterID holds the cluster-level ID reported by the server. This
// should be different for different etcd clusters.
ClusterID string `json:"-"`
}
type Node struct {
@ -343,14 +335,9 @@ func (k *httpKeysAPI) Set(ctx context.Context, key, val string, opts *SetOptions
act.TTL = opts.TTL
act.Refresh = opts.Refresh
act.Dir = opts.Dir
act.NoValueOnSuccess = opts.NoValueOnSuccess
}
doCtx := ctx
if act.PrevExist == PrevNoExist {
doCtx = context.WithValue(doCtx, &oneShotCtxValue, &oneShotCtxValue)
}
resp, body, err := k.client.Do(doCtx, act)
resp, body, err := k.client.Do(ctx, act)
if err != nil {
return nil, err
}
@ -398,8 +385,7 @@ func (k *httpKeysAPI) Delete(ctx context.Context, key string, opts *DeleteOption
act.Recursive = opts.Recursive
}
doCtx := context.WithValue(ctx, &oneShotCtxValue, &oneShotCtxValue)
resp, body, err := k.client.Do(doCtx, act)
resp, body, err := k.client.Do(ctx, act)
if err != nil {
return nil, err
}
@ -532,16 +518,15 @@ func (w *waitAction) HTTPRequest(ep url.URL) *http.Request {
}
type setAction struct {
Prefix string
Key string
Value string
PrevValue string
PrevIndex uint64
PrevExist PrevExistType
TTL time.Duration
Refresh bool
Dir bool
NoValueOnSuccess bool
Prefix string
Key string
Value string
PrevValue string
PrevIndex uint64
PrevExist PrevExistType
TTL time.Duration
Refresh bool
Dir bool
}
func (a *setAction) HTTPRequest(ep url.URL) *http.Request {
@ -575,9 +560,6 @@ func (a *setAction) HTTPRequest(ep url.URL) *http.Request {
if a.Refresh {
form.Add("refresh", "true")
}
if a.NoValueOnSuccess {
params.Set("noValueOnSuccess", strconv.FormatBool(a.NoValueOnSuccess))
}
u.RawQuery = params.Encode()
body := strings.NewReader(form.Encode())
@ -669,7 +651,6 @@ func unmarshalSuccessfulKeysResponse(header http.Header, body []byte) (*Response
return nil, err
}
}
res.ClusterID = header.Get("X-Etcd-Cluster-ID")
return &res, nil
}

View File

@ -407,15 +407,6 @@ func TestSetAction(t *testing.T) {
wantURL: "http://example.com/foo?dir=true",
wantBody: "",
},
// NoValueOnSuccess is set
{
act: setAction{
Key: "foo",
NoValueOnSuccess: true,
},
wantURL: "http://example.com/foo?noValueOnSuccess=true",
wantBody: "value=",
},
}
for i, tt := range tests {
@ -673,24 +664,23 @@ func TestUnmarshalSuccessfulResponse(t *testing.T) {
expiration.UnmarshalText([]byte("2015-04-07T04:40:23.044979686Z"))
tests := []struct {
indexHdr string
clusterIDHdr string
body string
wantRes *Response
wantErr bool
hdr string
body string
wantRes *Response
wantErr bool
}{
// Neither PrevNode or Node
{
indexHdr: "1",
body: `{"action":"delete"}`,
wantRes: &Response{Action: "delete", Index: 1},
wantErr: false,
hdr: "1",
body: `{"action":"delete"}`,
wantRes: &Response{Action: "delete", Index: 1},
wantErr: false,
},
// PrevNode
{
indexHdr: "15",
body: `{"action":"delete", "prevNode": {"key": "/foo", "value": "bar", "modifiedIndex": 12, "createdIndex": 10}}`,
hdr: "15",
body: `{"action":"delete", "prevNode": {"key": "/foo", "value": "bar", "modifiedIndex": 12, "createdIndex": 10}}`,
wantRes: &Response{
Action: "delete",
Index: 15,
@ -707,8 +697,8 @@ func TestUnmarshalSuccessfulResponse(t *testing.T) {
// Node
{
indexHdr: "15",
body: `{"action":"get", "node": {"key": "/foo", "value": "bar", "modifiedIndex": 12, "createdIndex": 10, "ttl": 10, "expiration": "2015-04-07T04:40:23.044979686Z"}}`,
hdr: "15",
body: `{"action":"get", "node": {"key": "/foo", "value": "bar", "modifiedIndex": 12, "createdIndex": 10, "ttl": 10, "expiration": "2015-04-07T04:40:23.044979686Z"}}`,
wantRes: &Response{
Action: "get",
Index: 15,
@ -727,9 +717,8 @@ func TestUnmarshalSuccessfulResponse(t *testing.T) {
// Node Dir
{
indexHdr: "15",
clusterIDHdr: "abcdef",
body: `{"action":"get", "node": {"key": "/foo", "dir": true, "modifiedIndex": 12, "createdIndex": 10}}`,
hdr: "15",
body: `{"action":"get", "node": {"key": "/foo", "dir": true, "modifiedIndex": 12, "createdIndex": 10}}`,
wantRes: &Response{
Action: "get",
Index: 15,
@ -739,16 +728,15 @@ func TestUnmarshalSuccessfulResponse(t *testing.T) {
ModifiedIndex: 12,
CreatedIndex: 10,
},
PrevNode: nil,
ClusterID: "abcdef",
PrevNode: nil,
},
wantErr: false,
},
// PrevNode and Node
{
indexHdr: "15",
body: `{"action":"update", "prevNode": {"key": "/foo", "value": "baz", "modifiedIndex": 10, "createdIndex": 10}, "node": {"key": "/foo", "value": "bar", "modifiedIndex": 12, "createdIndex": 10}}`,
hdr: "15",
body: `{"action":"update", "prevNode": {"key": "/foo", "value": "baz", "modifiedIndex": 10, "createdIndex": 10}, "node": {"key": "/foo", "value": "bar", "modifiedIndex": 12, "createdIndex": 10}}`,
wantRes: &Response{
Action: "update",
Index: 15,
@ -770,24 +758,24 @@ func TestUnmarshalSuccessfulResponse(t *testing.T) {
// Garbage in body
{
indexHdr: "",
body: `garbage`,
wantRes: nil,
wantErr: true,
hdr: "",
body: `garbage`,
wantRes: nil,
wantErr: true,
},
// non-integer index
{
indexHdr: "poo",
body: `{}`,
wantRes: nil,
wantErr: true,
hdr: "poo",
body: `{}`,
wantRes: nil,
wantErr: true,
},
}
for i, tt := range tests {
h := make(http.Header)
h.Add("X-Etcd-Index", tt.indexHdr)
h.Add("X-Etcd-Index", tt.hdr)
res, err := unmarshalSuccessfulKeysResponse(h, []byte(tt.body))
if tt.wantErr != (err != nil) {
t.Errorf("#%d: wantErr=%t, err=%v", i, tt.wantErr, err)

View File

@ -14,20 +14,6 @@
package client
import (
"regexp"
)
var (
roleNotFoundRegExp *regexp.Regexp
userNotFoundRegExp *regexp.Regexp
)
func init() {
roleNotFoundRegExp = regexp.MustCompile("auth: Role .* does not exist.")
userNotFoundRegExp = regexp.MustCompile("auth: User .* does not exist.")
}
// IsKeyNotFound returns true if the error code is ErrorCodeKeyNotFound.
func IsKeyNotFound(err error) bool {
if cErr, ok := err.(Error); ok {
@ -35,19 +21,3 @@ func IsKeyNotFound(err error) bool {
}
return false
}
// IsRoleNotFound returns true if the error means role not found of v2 API.
func IsRoleNotFound(err error) bool {
if ae, ok := err.(authError); ok {
return roleNotFoundRegExp.MatchString(ae.Message)
}
return false
}
// IsUserNotFound returns true if the error means user not found of v2 API.
func IsUserNotFound(err error) bool {
if ae, ok := err.(authError); ok {
return userNotFoundRegExp.MatchString(ae.Message)
}
return false
}

View File

@ -72,10 +72,6 @@ if err != nil {
}
```
## Metrics
The etcd client optionally exposes RPC metrics through [go-grpc-prometheus](https://github.com/grpc-ecosystem/go-grpc-prometheus). See the [examples](https://github.com/coreos/etcd/blob/master/clientv3/example_metrics_test.go).
## Examples
More code examples can be found at [GoDoc](https://godoc.org/github.com/coreos/etcd/clientv3).

View File

@ -43,7 +43,6 @@ type (
AuthRoleListResponse pb.AuthRoleListResponse
PermissionType authpb.Permission_Type
Permission authpb.Permission
)
const (
@ -116,12 +115,12 @@ func NewAuth(c *Client) Auth {
}
func (auth *auth) AuthEnable(ctx context.Context) (*AuthEnableResponse, error) {
resp, err := auth.remote.AuthEnable(ctx, &pb.AuthEnableRequest{}, grpc.FailFast(false))
resp, err := auth.remote.AuthEnable(ctx, &pb.AuthEnableRequest{})
return (*AuthEnableResponse)(resp), toErr(ctx, err)
}
func (auth *auth) AuthDisable(ctx context.Context) (*AuthDisableResponse, error) {
resp, err := auth.remote.AuthDisable(ctx, &pb.AuthDisableRequest{}, grpc.FailFast(false))
resp, err := auth.remote.AuthDisable(ctx, &pb.AuthDisableRequest{})
return (*AuthDisableResponse)(resp), toErr(ctx, err)
}
@ -146,12 +145,12 @@ func (auth *auth) UserGrantRole(ctx context.Context, user string, role string) (
}
func (auth *auth) UserGet(ctx context.Context, name string) (*AuthUserGetResponse, error) {
resp, err := auth.remote.UserGet(ctx, &pb.AuthUserGetRequest{Name: name}, grpc.FailFast(false))
resp, err := auth.remote.UserGet(ctx, &pb.AuthUserGetRequest{Name: name})
return (*AuthUserGetResponse)(resp), toErr(ctx, err)
}
func (auth *auth) UserList(ctx context.Context) (*AuthUserListResponse, error) {
resp, err := auth.remote.UserList(ctx, &pb.AuthUserListRequest{}, grpc.FailFast(false))
resp, err := auth.remote.UserList(ctx, &pb.AuthUserListRequest{})
return (*AuthUserListResponse)(resp), toErr(ctx, err)
}
@ -176,12 +175,12 @@ func (auth *auth) RoleGrantPermission(ctx context.Context, name string, key, ran
}
func (auth *auth) RoleGet(ctx context.Context, role string) (*AuthRoleGetResponse, error) {
resp, err := auth.remote.RoleGet(ctx, &pb.AuthRoleGetRequest{Role: role}, grpc.FailFast(false))
resp, err := auth.remote.RoleGet(ctx, &pb.AuthRoleGetRequest{Role: role})
return (*AuthRoleGetResponse)(resp), toErr(ctx, err)
}
func (auth *auth) RoleList(ctx context.Context) (*AuthRoleListResponse, error) {
resp, err := auth.remote.RoleList(ctx, &pb.AuthRoleListRequest{}, grpc.FailFast(false))
resp, err := auth.remote.RoleList(ctx, &pb.AuthRoleListRequest{})
return (*AuthRoleListResponse)(resp), toErr(ctx, err)
}
@ -209,7 +208,7 @@ type authenticator struct {
}
func (auth *authenticator) authenticate(ctx context.Context, name string, password string) (*AuthenticateResponse, error) {
resp, err := auth.remote.Authenticate(ctx, &pb.AuthenticateRequest{Name: name, Password: password}, grpc.FailFast(false))
resp, err := auth.remote.Authenticate(ctx, &pb.AuthenticateRequest{Name: name, Password: password})
return (*AuthenticateResponse)(resp), toErr(ctx, err)
}

View File

@ -17,216 +17,41 @@ package clientv3
import (
"net/url"
"strings"
"sync"
"sync/atomic"
"golang.org/x/net/context"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
)
// ErrNoAddrAvilable is returned by Get() when the balancer does not have
// any active connection to endpoints at the time.
// This error is returned only when opts.BlockingWait is true.
var ErrNoAddrAvilable = grpc.Errorf(codes.Unavailable, "there is no address available")
// simpleBalancer does the bare minimum to expose multiple eps
// to the grpc reconnection code path
type simpleBalancer struct {
// addrs are the client's endpoints for grpc
addrs []grpc.Address
// notifyCh notifies grpc of the set of addresses for connecting
notifyCh chan []grpc.Address
// readyc closes once the first connection is up
readyc chan struct{}
readyOnce sync.Once
// mu protects upEps, pinAddr, and connectingAddr
mu sync.RWMutex
// upEps holds the current endpoints that have an active connection
upEps map[string]struct{}
// upc closes when upEps transitions from empty to non-zero or the balancer closes.
upc chan struct{}
// grpc issues TLS cert checks using the string passed into dial so
// that string must be the host. To recover the full scheme://host URL,
// have a map from hosts to the original endpoint.
host2ep map[string]string
// pinAddr is the currently pinned address; set to the empty string on
// intialization and shutdown.
pinAddr string
closed bool
// eps are the client's endpoints stripped of any URL scheme
eps []string
ch chan []grpc.Address
numGets uint32
}
func newSimpleBalancer(eps []string) *simpleBalancer {
notifyCh := make(chan []grpc.Address, 1)
func newSimpleBalancer(eps []string) grpc.Balancer {
ch := make(chan []grpc.Address, 1)
addrs := make([]grpc.Address, len(eps))
for i := range eps {
addrs[i].Addr = getHost(eps[i])
}
notifyCh <- addrs
sb := &simpleBalancer{
addrs: addrs,
notifyCh: notifyCh,
readyc: make(chan struct{}),
upEps: make(map[string]struct{}),
upc: make(chan struct{}),
host2ep: getHost2ep(eps),
}
return sb
}
func (b *simpleBalancer) Start(target string, config grpc.BalancerConfig) error { return nil }
func (b *simpleBalancer) ConnectNotify() <-chan struct{} {
b.mu.Lock()
defer b.mu.Unlock()
return b.upc
}
func (b *simpleBalancer) getEndpoint(host string) string {
b.mu.Lock()
defer b.mu.Unlock()
return b.host2ep[host]
}
func getHost2ep(eps []string) map[string]string {
hm := make(map[string]string, len(eps))
for i := range eps {
_, host, _ := parseEndpoint(eps[i])
hm[host] = eps[i]
}
return hm
}
func (b *simpleBalancer) updateAddrs(eps []string) {
np := getHost2ep(eps)
b.mu.Lock()
defer b.mu.Unlock()
match := len(np) == len(b.host2ep)
for k, v := range np {
if b.host2ep[k] != v {
match = false
break
}
}
if match {
// same endpoints, so no need to update address
return
}
b.host2ep = np
addrs := make([]grpc.Address, 0, len(eps))
for i := range eps {
addrs = append(addrs, grpc.Address{Addr: getHost(eps[i])})
}
b.addrs = addrs
b.notifyCh <- addrs
}
func (b *simpleBalancer) Up(addr grpc.Address) func(error) {
b.mu.Lock()
defer b.mu.Unlock()
// gRPC might call Up after it called Close. We add this check
// to "fix" it up at application layer. Or our simplerBalancer
// might panic since b.upc is closed.
if b.closed {
return func(err error) {}
}
if len(b.upEps) == 0 {
// notify waiting Get()s and pin first connected address
close(b.upc)
b.pinAddr = addr.Addr
}
b.upEps[addr.Addr] = struct{}{}
// notify client that a connection is up
b.readyOnce.Do(func() { close(b.readyc) })
return func(err error) {
b.mu.Lock()
delete(b.upEps, addr.Addr)
if len(b.upEps) == 0 && b.pinAddr != "" {
b.upc = make(chan struct{})
} else if b.pinAddr == addr.Addr {
// choose new random up endpoint
for k := range b.upEps {
b.pinAddr = k
break
}
}
b.mu.Unlock()
}
ch <- addrs
return &simpleBalancer{eps: eps, ch: ch}
}
func (b *simpleBalancer) Start(target string) error { return nil }
func (b *simpleBalancer) Up(addr grpc.Address) func(error) { return func(error) {} }
func (b *simpleBalancer) Get(ctx context.Context, opts grpc.BalancerGetOptions) (grpc.Address, func(), error) {
var addr string
// If opts.BlockingWait is false (for fail-fast RPCs), it should return
// an address it has notified via Notify immediately instead of blocking.
if !opts.BlockingWait {
b.mu.RLock()
closed := b.closed
addr = b.pinAddr
upEps := len(b.upEps)
b.mu.RUnlock()
if closed {
return grpc.Address{Addr: ""}, nil, grpc.ErrClientConnClosing
}
if upEps == 0 {
return grpc.Address{Addr: ""}, nil, ErrNoAddrAvilable
}
return grpc.Address{Addr: addr}, func() {}, nil
}
for {
b.mu.RLock()
ch := b.upc
b.mu.RUnlock()
select {
case <-ch:
case <-ctx.Done():
return grpc.Address{Addr: ""}, nil, ctx.Err()
}
b.mu.RLock()
addr = b.pinAddr
upEps := len(b.upEps)
b.mu.RUnlock()
if addr == "" {
return grpc.Address{Addr: ""}, nil, grpc.ErrClientConnClosing
}
if upEps > 0 {
break
}
}
return grpc.Address{Addr: addr}, func() {}, nil
v := atomic.AddUint32(&b.numGets, 1)
ep := b.eps[v%uint32(len(b.eps))]
return grpc.Address{Addr: getHost(ep)}, func() {}, nil
}
func (b *simpleBalancer) Notify() <-chan []grpc.Address { return b.notifyCh }
func (b *simpleBalancer) Notify() <-chan []grpc.Address { return b.ch }
func (b *simpleBalancer) Close() error {
b.mu.Lock()
defer b.mu.Unlock()
// In case gRPC calls close twice. TODO: remove the checking
// when we are sure that gRPC wont call close twice.
if b.closed {
return nil
}
b.closed = true
close(b.notifyCh)
// terminate all waiting Get()s
b.pinAddr = ""
if len(b.upEps) == 0 {
close(b.upc)
}
close(b.ch)
return nil
}

View File

@ -1,106 +0,0 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package clientv3
import (
"errors"
"testing"
"time"
"golang.org/x/net/context"
"google.golang.org/grpc"
)
var (
endpoints = []string{"localhost:2379", "localhost:22379", "localhost:32379"}
)
func TestBalancerGetUnblocking(t *testing.T) {
sb := newSimpleBalancer(endpoints)
unblockingOpts := grpc.BalancerGetOptions{BlockingWait: false}
_, _, err := sb.Get(context.Background(), unblockingOpts)
if err != ErrNoAddrAvilable {
t.Errorf("Get() with no up endpoints should return ErrNoAddrAvailable, got: %v", err)
}
down1 := sb.Up(grpc.Address{Addr: endpoints[1]})
down2 := sb.Up(grpc.Address{Addr: endpoints[2]})
addrFirst, putFun, err := sb.Get(context.Background(), unblockingOpts)
if err != nil {
t.Errorf("Get() with up endpoints should success, got %v", err)
}
if addrFirst.Addr != endpoints[1] {
t.Errorf("Get() didn't return expected address, got %v", addrFirst)
}
if putFun == nil {
t.Errorf("Get() returned unexpected nil put function")
}
addrSecond, _, _ := sb.Get(context.Background(), unblockingOpts)
if addrFirst.Addr != addrSecond.Addr {
t.Errorf("Get() didn't return the same address as previous call, got %v and %v", addrFirst, addrSecond)
}
down1(errors.New("error"))
down2(errors.New("error"))
_, _, err = sb.Get(context.Background(), unblockingOpts)
if err != ErrNoAddrAvilable {
t.Errorf("Get() with no up endpoints should return ErrNoAddrAvailable, got: %v", err)
}
}
func TestBalancerGetBlocking(t *testing.T) {
sb := newSimpleBalancer(endpoints)
blockingOpts := grpc.BalancerGetOptions{BlockingWait: true}
ctx, _ := context.WithTimeout(context.Background(), time.Millisecond*100)
_, _, err := sb.Get(ctx, blockingOpts)
if err != context.DeadlineExceeded {
t.Errorf("Get() with no up endpoints should timeout, got %v", err)
}
downC := make(chan func(error), 1)
go func() {
// ensure sb.Up() will be called after sb.Get() to see if Up() releases blocking Get()
time.Sleep(time.Millisecond * 100)
downC <- sb.Up(grpc.Address{Addr: endpoints[1]})
}()
addrFirst, putFun, err := sb.Get(context.Background(), blockingOpts)
if err != nil {
t.Errorf("Get() with up endpoints should success, got %v", err)
}
if addrFirst.Addr != endpoints[1] {
t.Errorf("Get() didn't return expected address, got %v", addrFirst)
}
if putFun == nil {
t.Errorf("Get() returned unexpected nil put function")
}
down1 := <-downC
down2 := sb.Up(grpc.Address{Addr: endpoints[2]})
addrSecond, _, _ := sb.Get(context.Background(), blockingOpts)
if addrFirst.Addr != addrSecond.Addr {
t.Errorf("Get() didn't return the same address as previous call, got %v and %v", addrFirst, addrSecond)
}
down1(errors.New("error"))
down2(errors.New("error"))
ctx, _ = context.WithTimeout(context.Background(), time.Millisecond*100)
_, _, err = sb.Get(ctx, blockingOpts)
if err != context.DeadlineExceeded {
t.Errorf("Get() with no up endpoints should timeout, got %v", err)
}
}

View File

@ -18,18 +18,17 @@ import (
"crypto/tls"
"errors"
"fmt"
"io/ioutil"
"log"
"net"
"net/url"
"strings"
"sync"
"time"
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
prometheus "github.com/grpc-ecosystem/go-grpc-prometheus"
"golang.org/x/net/context"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/metadata"
)
@ -47,12 +46,9 @@ type Client struct {
Auth
Maintenance
conn *grpc.ClientConn
cfg Config
creds *credentials.TransportCredentials
balancer *simpleBalancer
retryWrapper retryRpcFunc
retryAuthWrapper retryRpcFunc
conn *grpc.ClientConn
cfg Config
creds *credentials.TransportCredentials
ctx context.Context
cancel context.CancelFunc
@ -61,8 +57,6 @@ type Client struct {
Username string
// Password is a password for authentication
Password string
// tokenCred is an instance of WithPerRPCCredentials()'s argument
tokenCred *authTokenCredential
}
// New creates a new etcdv3 client from a given configuration.
@ -91,8 +85,6 @@ func NewFromConfigFile(path string) (*Client, error) {
// Close shuts down the client's etcd connections.
func (c *Client) Close() error {
c.cancel()
c.Watcher.Close()
c.Lease.Close()
return toErr(c.ctx, c.conn.Close())
}
@ -102,54 +94,10 @@ func (c *Client) Close() error {
func (c *Client) Ctx() context.Context { return c.ctx }
// Endpoints lists the registered endpoints for the client.
func (c *Client) Endpoints() (eps []string) {
// copy the slice; protect original endpoints from being changed
eps = make([]string, len(c.cfg.Endpoints))
copy(eps, c.cfg.Endpoints)
return
}
// SetEndpoints updates client's endpoints.
func (c *Client) SetEndpoints(eps ...string) {
c.cfg.Endpoints = eps
c.balancer.updateAddrs(eps)
}
// Sync synchronizes client's endpoints with the known endpoints from the etcd membership.
func (c *Client) Sync(ctx context.Context) error {
mresp, err := c.MemberList(ctx)
if err != nil {
return err
}
var eps []string
for _, m := range mresp.Members {
eps = append(eps, m.ClientURLs...)
}
c.SetEndpoints(eps...)
return nil
}
func (c *Client) autoSync() {
if c.cfg.AutoSyncInterval == time.Duration(0) {
return
}
for {
select {
case <-c.ctx.Done():
return
case <-time.After(c.cfg.AutoSyncInterval):
ctx, _ := context.WithTimeout(c.ctx, 5*time.Second)
if err := c.Sync(ctx); err != nil && err != c.ctx.Err() {
logger.Println("Auto sync endpoints failed:", err)
}
}
}
}
func (c *Client) Endpoints() []string { return c.cfg.Endpoints }
type authTokenCredential struct {
token string
tokenMu *sync.RWMutex
token string
}
func (cred authTokenCredential) RequireTransportSecurity() bool {
@ -157,38 +105,24 @@ func (cred authTokenCredential) RequireTransportSecurity() bool {
}
func (cred authTokenCredential) GetRequestMetadata(ctx context.Context, s ...string) (map[string]string, error) {
cred.tokenMu.RLock()
defer cred.tokenMu.RUnlock()
return map[string]string{
"token": cred.token,
}, nil
}
func parseEndpoint(endpoint string) (proto string, host string, scheme string) {
func (c *Client) dialTarget(endpoint string) (proto string, host string, creds *credentials.TransportCredentials) {
proto = "tcp"
host = endpoint
creds = c.creds
url, uerr := url.Parse(endpoint)
if uerr != nil || !strings.Contains(endpoint, "://") {
return
}
scheme = url.Scheme
// strip scheme:// prefix since grpc dials by host
host = url.Host
switch url.Scheme {
case "http", "https":
case "unix":
proto = "unix"
default:
proto, host = "", ""
}
return
}
func (c *Client) processCreds(scheme string) (creds *credentials.TransportCredentials) {
creds = c.creds
switch scheme {
case "unix":
case "http":
creds = nil
case "https":
@ -199,20 +133,30 @@ func (c *Client) processCreds(scheme string) (creds *credentials.TransportCreden
emptyCreds := credentials.NewTLS(tlsconfig)
creds = &emptyCreds
default:
creds = nil
return "", "", nil
}
return
}
// dialSetupOpts gives the dial opts prior to any authentication
func (c *Client) dialSetupOpts(endpoint string, dopts ...grpc.DialOption) (opts []grpc.DialOption) {
if c.cfg.DialTimeout > 0 {
opts = []grpc.DialOption{grpc.WithTimeout(c.cfg.DialTimeout)}
// dialSetupOpts gives the dial opts prioer to any authentication
func (c *Client) dialSetupOpts(endpoint string, dopts ...grpc.DialOption) []grpc.DialOption {
opts := []grpc.DialOption{
grpc.WithBlock(),
grpc.WithTimeout(c.cfg.DialTimeout),
}
opts = append(opts, dopts...)
// grpc issues TLS cert checks using the string passed into dial so
// that string must be the host. To recover the full scheme://host URL,
// have a map from hosts to the original endpoint.
host2ep := make(map[string]string)
for i := range c.cfg.Endpoints {
_, host, _ := c.dialTarget(c.cfg.Endpoints[i])
host2ep[host] = c.cfg.Endpoints[i]
}
f := func(host string, t time.Duration) (net.Conn, error) {
proto, host, _ := parseEndpoint(c.balancer.getEndpoint(host))
proto, host, _ := c.dialTarget(host2ep[host])
if proto == "" {
return nil, fmt.Errorf("unknown scheme for %q", host)
}
@ -221,15 +165,11 @@ func (c *Client) dialSetupOpts(endpoint string, dopts ...grpc.DialOption) (opts
return nil, c.ctx.Err()
default:
}
dialer := &net.Dialer{Timeout: t}
return dialer.DialContext(c.ctx, proto, host)
return net.DialTimeout(proto, host, t)
}
opts = append(opts, grpc.WithDialer(f))
creds := c.creds
if _, _, scheme := parseEndpoint(endpoint); len(scheme) != 0 {
creds = c.processCreds(scheme)
}
_, _, creds := c.dialTarget(endpoint)
if creds != nil {
opts = append(opts, grpc.WithTransportCredentials(*creds))
} else {
@ -244,56 +184,24 @@ func (c *Client) Dial(endpoint string) (*grpc.ClientConn, error) {
return c.dial(endpoint)
}
func (c *Client) getToken(ctx context.Context) error {
var err error // return last error in a case of fail
var auth *authenticator
for i := 0; i < len(c.cfg.Endpoints); i++ {
endpoint := c.cfg.Endpoints[i]
host := getHost(endpoint)
// use dial options without dopts to avoid reusing the client balancer
auth, err = newAuthenticator(host, c.dialSetupOpts(endpoint))
if err != nil {
continue
}
defer auth.close()
var resp *AuthenticateResponse
resp, err = auth.authenticate(ctx, c.Username, c.Password)
if err != nil {
continue
}
c.tokenCred.tokenMu.Lock()
c.tokenCred.token = resp.Token
c.tokenCred.tokenMu.Unlock()
return nil
}
return err
}
func (c *Client) dial(endpoint string, dopts ...grpc.DialOption) (*grpc.ClientConn, error) {
opts := c.dialSetupOpts(endpoint, dopts...)
host := getHost(endpoint)
if c.Username != "" && c.Password != "" {
c.tokenCred = &authTokenCredential{
tokenMu: &sync.RWMutex{},
}
err := c.getToken(context.TODO())
// use dial options without dopts to avoid reusing the client balancer
auth, err := newAuthenticator(host, c.dialSetupOpts(endpoint))
if err != nil {
return nil, err
}
defer auth.close()
opts = append(opts, grpc.WithPerRPCCredentials(c.tokenCred))
resp, err := auth.authenticate(c.ctx, c.Username, c.Password)
if err != nil {
return nil, err
}
opts = append(opts, grpc.WithPerRPCCredentials(authTokenCredential{token: resp.Token}))
}
// add metrics options
opts = append(opts, grpc.WithUnaryInterceptor(prometheus.UnaryClientInterceptor))
opts = append(opts, grpc.WithStreamInterceptor(prometheus.StreamClientInterceptor))
conn, err := grpc.Dial(host, opts...)
if err != nil {
return nil, err
@ -332,31 +240,12 @@ func newClient(cfg *Config) (*Client, error) {
client.Password = cfg.Password
}
client.balancer = newSimpleBalancer(cfg.Endpoints)
conn, err := client.dial(cfg.Endpoints[0], grpc.WithBalancer(client.balancer))
b := newSimpleBalancer(cfg.Endpoints)
conn, err := client.dial(cfg.Endpoints[0], grpc.WithBalancer(b))
if err != nil {
return nil, err
}
client.conn = conn
client.retryWrapper = client.newRetryWrapper()
client.retryAuthWrapper = client.newAuthRetryWrapper()
// wait for a connection
if cfg.DialTimeout > 0 {
hasConn := false
waitc := time.After(cfg.DialTimeout)
select {
case <-client.balancer.readyc:
hasConn = true
case <-ctx.Done():
case <-waitc:
}
if !hasConn {
client.cancel()
conn.Close()
return nil, grpc.ErrClientConnTimeout
}
}
client.Cluster = NewCluster(client)
client.KV = NewKV(client)
@ -364,8 +253,13 @@ func newClient(cfg *Config) (*Client, error) {
client.Watcher = NewWatcher(client)
client.Auth = NewAuth(client)
client.Maintenance = NewMaintenance(client)
if cfg.Logger != nil {
logger.Set(cfg.Logger)
} else {
// disable client side grpc by default
logger.Set(log.New(ioutil.Discard, "", 0))
}
go client.autoSync()
return client, nil
}
@ -381,14 +275,8 @@ func isHaltErr(ctx context.Context, err error) bool {
if err == nil {
return false
}
code := grpc.Code(err)
// Unavailable codes mean the system will be right back.
// (e.g., can't connect, lost leader)
// Treat Internal codes as if something failed, leaving the
// system in an inconsistent state, but retrying could make progress.
// (e.g., failed in middle of send, corrupted frame)
// TODO: are permanent Internal errors possible from grpc?
return code != codes.Unavailable && code != codes.Internal
return strings.HasPrefix(grpc.ErrorDesc(err), "etcdserver: ") ||
strings.Contains(err.Error(), grpc.ErrClientConnClosing.Error())
}
func toErr(ctx context.Context, err error) error {
@ -396,20 +284,9 @@ func toErr(ctx context.Context, err error) error {
return nil
}
err = rpctypes.Error(err)
if _, ok := err.(rpctypes.EtcdError); ok {
return err
}
code := grpc.Code(err)
switch code {
case codes.DeadlineExceeded:
fallthrough
case codes.Canceled:
if ctx.Err() != nil {
err = ctx.Err()
}
case codes.Unavailable:
err = ErrNoAvailableEndpoints
case codes.FailedPrecondition:
if ctx.Err() != nil && strings.Contains(err.Error(), "context") {
err = ctx.Err()
} else if strings.Contains(err.Error(), grpc.ErrClientConnClosing.Error()) {
err = grpc.ErrClientConnClosing
}
return err

View File

@ -16,60 +16,14 @@ package clientv3
import (
"fmt"
"net"
"testing"
"time"
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
"github.com/coreos/etcd/pkg/testutil"
"golang.org/x/net/context"
"google.golang.org/grpc"
)
func TestDialCancel(t *testing.T) {
defer testutil.AfterTest(t)
// accept first connection so client is created with dial timeout
ln, err := net.Listen("unix", "dialcancel:12345")
if err != nil {
t.Fatal(err)
}
defer ln.Close()
ep := "unix://dialcancel:12345"
cfg := Config{
Endpoints: []string{ep},
DialTimeout: 30 * time.Second}
c, err := New(cfg)
if err != nil {
t.Fatal(err)
}
// connect to ipv4 blackhole so dial blocks
c.SetEndpoints("http://254.0.0.1:12345")
// issue Get to force redial attempts
go c.Get(context.TODO(), "abc")
// wait a little bit so client close is after dial starts
time.Sleep(100 * time.Millisecond)
donec := make(chan struct{})
go func() {
defer close(donec)
c.Close()
}()
select {
case <-time.After(5 * time.Second):
t.Fatalf("failed to close")
case <-donec:
}
}
func TestDialTimeout(t *testing.T) {
defer testutil.AfterTest(t)
donec := make(chan error)
go func() {
// without timeout, grpc keeps redialing if connection refused
@ -101,24 +55,9 @@ func TestDialTimeout(t *testing.T) {
}
}
func TestDialNoTimeout(t *testing.T) {
cfg := Config{Endpoints: []string{"127.0.0.1:12345"}}
c, err := New(cfg)
if c == nil || err != nil {
t.Fatalf("new client with DialNoWait should succeed, got %v", err)
}
c.Close()
}
func TestIsHaltErr(t *testing.T) {
if !isHaltErr(nil, fmt.Errorf("etcdserver: some etcdserver error")) {
t.Errorf(`error prefixed with "etcdserver: " should be Halted by default`)
}
if isHaltErr(nil, rpctypes.ErrGRPCStopped) {
t.Errorf("error %v should not halt", rpctypes.ErrGRPCStopped)
}
if isHaltErr(nil, rpctypes.ErrGRPCNoLeader) {
t.Errorf("error %v should not halt", rpctypes.ErrGRPCNoLeader)
t.Errorf(`error prefixed with "etcdserver: " should be Halted`)
}
ctx, cancel := context.WithCancel(context.TODO())
if isHaltErr(ctx, nil) {

View File

@ -17,7 +17,6 @@ package clientv3
import (
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
"golang.org/x/net/context"
"google.golang.org/grpc"
)
type (
@ -47,7 +46,7 @@ type cluster struct {
}
func NewCluster(c *Client) Cluster {
return &cluster{remote: RetryClusterClient(c)}
return &cluster{remote: pb.NewClusterClient(c.conn)}
}
func (c *cluster) MemberAdd(ctx context.Context, peerAddrs []string) (*MemberAddResponse, error) {
@ -78,7 +77,7 @@ func (c *cluster) MemberUpdate(ctx context.Context, id uint64, peerAddrs []strin
// it is safe to retry on update.
for {
r := &pb.MemberUpdateRequest{ID: id, PeerURLs: peerAddrs}
resp, err := c.remote.MemberUpdate(ctx, r, grpc.FailFast(false))
resp, err := c.remote.MemberUpdate(ctx, r)
if err == nil {
return (*MemberUpdateResponse)(resp), nil
}
@ -91,7 +90,7 @@ func (c *cluster) MemberUpdate(ctx context.Context, id uint64, peerAddrs []strin
func (c *cluster) MemberList(ctx context.Context) (*MemberListResponse, error) {
// it is safe to retry on list.
for {
resp, err := c.remote.MemberList(ctx, &pb.MemberListRequest{}, grpc.FailFast(false))
resp, err := c.remote.MemberList(ctx, &pb.MemberListRequest{})
if err == nil {
return (*MemberListResponse)(resp), nil
}

View File

@ -36,8 +36,6 @@ func Compare(cmp Cmp, result string, v interface{}) Cmp {
switch result {
case "=":
r = pb.Compare_EQUAL
case "!=":
r = pb.Compare_NOT_EQUAL
case ">":
r = pb.Compare_GREATER
case "<":

View File

@ -29,7 +29,7 @@ var (
)
type Election struct {
session *Session
client *v3.Client
keyPrefix string
@ -39,24 +39,27 @@ type Election struct {
}
// NewElection returns a new election on a given key prefix.
func NewElection(s *Session, pfx string) *Election {
return &Election{session: s, keyPrefix: pfx + "/"}
func NewElection(client *v3.Client, pfx string) *Election {
return &Election{client: client, keyPrefix: pfx}
}
// Campaign puts a value as eligible for the election. It blocks until
// it is elected, an error occurs, or the context is cancelled.
func (e *Election) Campaign(ctx context.Context, val string) error {
s := e.session
client := e.session.Client()
s, serr := NewSession(e.client)
if serr != nil {
return serr
}
k := fmt.Sprintf("%s%x", e.keyPrefix, s.Lease())
txn := client.Txn(ctx).If(v3.Compare(v3.CreateRevision(k), "=", 0))
k := fmt.Sprintf("%s/%x", e.keyPrefix, s.Lease())
txn := e.client.Txn(ctx).If(v3.Compare(v3.CreateRevision(k), "=", 0))
txn = txn.Then(v3.OpPut(k, val, v3.WithLease(s.Lease())))
txn = txn.Else(v3.OpGet(k))
resp, err := txn.Commit()
if err != nil {
return err
}
e.leaderKey, e.leaderRev, e.leaderSession = k, resp.Header.Revision, s
if !resp.Succeeded {
kv := resp.Responses[0].GetResponseRange().Kvs[0]
@ -69,12 +72,12 @@ func (e *Election) Campaign(ctx context.Context, val string) error {
}
}
err = waitDeletes(ctx, client, e.keyPrefix, e.leaderRev-1)
err = waitDeletes(ctx, e.client, e.keyPrefix, v3.WithPrefix(), v3.WithRev(e.leaderRev-1))
if err != nil {
// clean up in case of context cancel
select {
case <-ctx.Done():
e.Resign(client.Ctx())
e.Resign(e.client.Ctx())
default:
e.leaderSession = nil
}
@ -89,9 +92,8 @@ func (e *Election) Proclaim(ctx context.Context, val string) error {
if e.leaderSession == nil {
return ErrElectionNotLeader
}
client := e.session.Client()
cmp := v3.Compare(v3.CreateRevision(e.leaderKey), "=", e.leaderRev)
txn := client.Txn(ctx).If(cmp)
txn := e.client.Txn(ctx).If(cmp)
txn = txn.Then(v3.OpPut(e.leaderKey, val, v3.WithLease(e.leaderSession.Lease())))
tresp, terr := txn.Commit()
if terr != nil {
@ -109,8 +111,7 @@ func (e *Election) Resign(ctx context.Context) (err error) {
if e.leaderSession == nil {
return nil
}
client := e.session.Client()
_, err = client.Delete(ctx, e.leaderKey)
_, err = e.client.Delete(ctx, e.leaderKey)
e.leaderKey = ""
e.leaderSession = nil
return err
@ -118,8 +119,7 @@ func (e *Election) Resign(ctx context.Context) (err error) {
// Leader returns the leader value for the current election.
func (e *Election) Leader(ctx context.Context) (string, error) {
client := e.session.Client()
resp, err := client.Get(ctx, e.keyPrefix, v3.WithFirstCreate()...)
resp, err := e.client.Get(ctx, e.keyPrefix, v3.WithFirstCreate()...)
if err != nil {
return "", err
} else if len(resp.Kvs) == 0 {
@ -139,11 +139,9 @@ func (e *Election) Observe(ctx context.Context) <-chan v3.GetResponse {
}
func (e *Election) observe(ctx context.Context, ch chan<- v3.GetResponse) {
client := e.session.Client()
defer close(ch)
for {
resp, err := client.Get(ctx, e.keyPrefix, v3.WithFirstCreate()...)
resp, err := e.client.Get(ctx, e.keyPrefix, v3.WithFirstCreate()...)
if err != nil {
return
}
@ -154,7 +152,7 @@ func (e *Election) observe(ctx context.Context, ch chan<- v3.GetResponse) {
if len(resp.Kvs) == 0 {
// wait for first key put on prefix
opts := []v3.OpOption{v3.WithRev(resp.Header.Revision), v3.WithPrefix()}
wch := client.Watch(cctx, e.keyPrefix, opts...)
wch := e.client.Watch(cctx, e.keyPrefix, opts...)
for kv == nil {
wr, ok := <-wch
@ -174,7 +172,7 @@ func (e *Election) observe(ctx context.Context, ch chan<- v3.GetResponse) {
kv = resp.Kvs[0]
}
wch := client.Watch(cctx, string(kv.Key), v3.WithRev(kv.ModRevision))
wch := e.client.Watch(cctx, string(kv.Key), v3.WithRev(kv.ModRevision))
keyDeleted := false
for !keyDeleted {
wr, ok := <-wch

View File

@ -16,6 +16,7 @@ package concurrency
import (
"fmt"
"math"
v3 "github.com/coreos/etcd/clientv3"
"github.com/coreos/etcd/mvcc/mvccpb"
@ -25,40 +26,46 @@ import (
func waitDelete(ctx context.Context, client *v3.Client, key string, rev int64) error {
cctx, cancel := context.WithCancel(ctx)
defer cancel()
var wr v3.WatchResponse
wch := client.Watch(cctx, key, v3.WithRev(rev))
for wr = range wch {
for wr := range wch {
for _, ev := range wr.Events {
if ev.Type == mvccpb.DELETE {
return nil
}
}
}
if err := wr.Err(); err != nil {
return err
}
if err := ctx.Err(); err != nil {
return err
}
return fmt.Errorf("lost watcher waiting for delete")
}
// waitDeletes efficiently waits until all keys matching the prefix and no greater
// than the create revision.
func waitDeletes(ctx context.Context, client *v3.Client, pfx string, maxCreateRev int64) error {
getOpts := append(v3.WithLastCreate(), v3.WithMaxCreateRev(maxCreateRev))
for {
resp, err := client.Get(ctx, pfx, getOpts...)
if err != nil {
return err
// waitDeletes efficiently waits until all keys matched by Get(key, opts...) are deleted
func waitDeletes(ctx context.Context, client *v3.Client, key string, opts ...v3.OpOption) error {
getOpts := []v3.OpOption{v3.WithSort(v3.SortByCreateRevision, v3.SortAscend)}
getOpts = append(getOpts, opts...)
resp, err := client.Get(ctx, key, getOpts...)
maxRev := int64(math.MaxInt64)
getOpts = append(getOpts, v3.WithRev(0))
for err == nil {
for len(resp.Kvs) > 0 {
i := len(resp.Kvs) - 1
if resp.Kvs[i].CreateRevision <= maxRev {
break
}
resp.Kvs = resp.Kvs[:i]
}
if len(resp.Kvs) == 0 {
return nil
break
}
lastKey := string(resp.Kvs[0].Key)
if err = waitDelete(ctx, client, lastKey, resp.Header.Revision); err != nil {
return err
lastKV := resp.Kvs[len(resp.Kvs)-1]
maxRev = lastKV.CreateRevision
err = waitDelete(ctx, client, string(lastKV.Key), maxRev)
if err != nil || len(resp.Kvs) == 1 {
break
}
getOpts = append(getOpts, v3.WithLimit(int64(len(resp.Kvs)-1)))
resp, err = client.Get(ctx, key, getOpts...)
}
return err
}

View File

@ -24,30 +24,32 @@ import (
// Mutex implements the sync Locker interface with etcd
type Mutex struct {
s *Session
client *v3.Client
pfx string
myKey string
myRev int64
}
func NewMutex(s *Session, pfx string) *Mutex {
return &Mutex{s, pfx + "/", "", -1}
func NewMutex(client *v3.Client, pfx string) *Mutex {
return &Mutex{client, pfx, "", -1}
}
// Lock locks the mutex with a cancellable context. If the context is cancelled
// while trying to acquire the lock, the mutex tries to clean its stale lock entry.
func (m *Mutex) Lock(ctx context.Context) error {
s := m.s
client := m.s.Client()
s, serr := NewSession(m.client)
if serr != nil {
return serr
}
m.myKey = fmt.Sprintf("%s%x", m.pfx, s.Lease())
m.myKey = fmt.Sprintf("%s/%x", m.pfx, s.Lease())
cmp := v3.Compare(v3.CreateRevision(m.myKey), "=", 0)
// put self in lock waiters via myKey; oldest waiter holds lock
put := v3.OpPut(m.myKey, "", v3.WithLease(s.Lease()))
// reuse key in case this session already holds the lock
get := v3.OpGet(m.myKey)
resp, err := client.Txn(ctx).If(cmp).Then(put).Else(get).Commit()
resp, err := m.client.Txn(ctx).If(cmp).Then(put).Else(get).Commit()
if err != nil {
return err
}
@ -57,19 +59,18 @@ func (m *Mutex) Lock(ctx context.Context) error {
}
// wait for deletion revisions prior to myKey
err = waitDeletes(ctx, client, m.pfx, m.myRev-1)
err = waitDeletes(ctx, m.client, m.pfx, v3.WithPrefix(), v3.WithRev(m.myRev-1))
// release lock key if cancelled
select {
case <-ctx.Done():
m.Unlock(client.Ctx())
m.Unlock(m.client.Ctx())
default:
}
return err
}
func (m *Mutex) Unlock(ctx context.Context) error {
client := m.s.Client()
if _, err := client.Delete(ctx, m.myKey); err != nil {
if _, err := m.client.Delete(ctx, m.myKey); err != nil {
return err
}
m.myKey = "\x00"
@ -86,19 +87,17 @@ func (m *Mutex) Key() string { return m.myKey }
type lockerMutex struct{ *Mutex }
func (lm *lockerMutex) Lock() {
client := lm.s.Client()
if err := lm.Mutex.Lock(client.Ctx()); err != nil {
if err := lm.Mutex.Lock(lm.client.Ctx()); err != nil {
panic(err)
}
}
func (lm *lockerMutex) Unlock() {
client := lm.s.Client()
if err := lm.Mutex.Unlock(client.Ctx()); err != nil {
if err := lm.Mutex.Unlock(lm.client.Ctx()); err != nil {
panic(err)
}
}
// NewLocker creates a sync.Locker backed by an etcd mutex.
func NewLocker(s *Session, pfx string) sync.Locker {
return &lockerMutex{NewMutex(s, pfx)}
func NewLocker(client *v3.Client, pfx string) sync.Locker {
return &lockerMutex{NewMutex(client, pfx)}
}

View File

@ -15,19 +15,26 @@
package concurrency
import (
"time"
"sync"
v3 "github.com/coreos/etcd/clientv3"
"golang.org/x/net/context"
)
const defaultSessionTTL = 60
// only keep one ephemeral lease per client
var clientSessions clientSessionMgr = clientSessionMgr{sessions: make(map[*v3.Client]*Session)}
const sessionTTL = 60
type clientSessionMgr struct {
sessions map[*v3.Client]*Session
mu sync.Mutex
}
// Session represents a lease kept alive for the lifetime of a client.
// Fault-tolerant applications may use sessions to reason about liveness.
type Session struct {
client *v3.Client
opts *sessionOptions
id v3.LeaseID
cancel context.CancelFunc
@ -35,30 +42,37 @@ type Session struct {
}
// NewSession gets the leased session for a client.
func NewSession(client *v3.Client, opts ...SessionOption) (*Session, error) {
ops := &sessionOptions{ttl: defaultSessionTTL, ctx: client.Ctx()}
for _, opt := range opts {
opt(ops)
func NewSession(client *v3.Client) (*Session, error) {
clientSessions.mu.Lock()
defer clientSessions.mu.Unlock()
if s, ok := clientSessions.sessions[client]; ok {
return s, nil
}
resp, err := client.Grant(ops.ctx, int64(ops.ttl))
resp, err := client.Grant(client.Ctx(), sessionTTL)
if err != nil {
return nil, err
}
id := v3.LeaseID(resp.ID)
ctx, cancel := context.WithCancel(ops.ctx)
ctx, cancel := context.WithCancel(client.Ctx())
keepAlive, err := client.KeepAlive(ctx, id)
if err != nil || keepAlive == nil {
return nil, err
}
donec := make(chan struct{})
s := &Session{client: client, opts: ops, id: id, cancel: cancel, donec: donec}
s := &Session{client: client, id: id, cancel: cancel, donec: donec}
clientSessions.sessions[client] = s
// keep the lease alive until client error or cancelled context
go func() {
defer close(donec)
defer func() {
clientSessions.mu.Lock()
delete(clientSessions.sessions, client)
clientSessions.mu.Unlock()
close(donec)
}()
for range keepAlive {
// eat messages until keep alive channel closes
}
@ -67,11 +81,6 @@ func NewSession(client *v3.Client, opts ...SessionOption) (*Session, error) {
return s, nil
}
// Client is the etcd client that is attached to the session.
func (s *Session) Client() *v3.Client {
return s.client
}
// Lease is the lease ID for keys bound to the session.
func (s *Session) Lease() v3.LeaseID { return s.id }
@ -90,38 +99,6 @@ func (s *Session) Orphan() {
// Close orphans the session and revokes the session lease.
func (s *Session) Close() error {
s.Orphan()
// if revoke takes longer than the ttl, lease is expired anyway
ctx, cancel := context.WithTimeout(s.opts.ctx, time.Duration(s.opts.ttl)*time.Second)
_, err := s.client.Revoke(ctx, s.id)
cancel()
_, err := s.client.Revoke(s.client.Ctx(), s.id)
return err
}
type sessionOptions struct {
ttl int
ctx context.Context
}
// SessionOption configures Session.
type SessionOption func(*sessionOptions)
// WithTTL configures the session's TTL in seconds.
// If TTL is <= 0, the default 60 seconds TTL will be used.
func WithTTL(ttl int) SessionOption {
return func(so *sessionOptions) {
if ttl > 0 {
so.ttl = ttl
}
}
}
// WithContext assigns a context to the session instead of defaulting to
// using the client context. This is useful for canceling NewSession and
// Close operations immediately without having to close the client. If the
// context is canceled before Close() completes, the session's lease will be
// abandoned and left to expire instead of being revoked.
func WithContext(ctx context.Context) SessionOption {
return func(so *sessionOptions) {
so.ctx = ctx
}
}

View File

@ -249,10 +249,11 @@ func (s *stmReadCommitted) commit() *v3.TxnResponse {
}
func isKeyCurrent(k string, r *v3.GetResponse) v3.Cmp {
rev := r.Header.Revision + 1
if len(r.Kvs) != 0 {
return v3.Compare(v3.ModRevision(k), "=", r.Kvs[0].ModRevision)
rev = r.Kvs[0].ModRevision + 1
}
return v3.Compare(v3.ModRevision(k), "=", 0)
return v3.Compare(v3.ModRevision(k), "<", rev)
}
func respToValue(resp *v3.GetResponse) string {

View File

@ -28,16 +28,15 @@ type Config struct {
// Endpoints is a list of URLs
Endpoints []string
// AutoSyncInterval is the interval to update endpoints with its latest members.
// 0 disables auto-sync. By default auto-sync is disabled.
AutoSyncInterval time.Duration
// DialTimeout is the timeout for failing to establish a connection.
DialTimeout time.Duration
// TLS holds the client secure credentials, if any.
TLS *tls.Config
// Logger is the logger used by client library.
Logger Logger
// Username is a username for authentication
Username string
@ -47,7 +46,6 @@ type Config struct {
type yamlConfig struct {
Endpoints []string `json:"endpoints"`
AutoSyncInterval time.Duration `json:"auto-sync-interval"`
DialTimeout time.Duration `json:"dial-timeout"`
InsecureTransport bool `json:"insecure-transport"`
InsecureSkipTLSVerify bool `json:"insecure-skip-tls-verify"`
@ -70,9 +68,8 @@ func configFromFile(fpath string) (*Config, error) {
}
cfg := &Config{
Endpoints: yc.Endpoints,
AutoSyncInterval: yc.AutoSyncInterval,
DialTimeout: yc.DialTimeout,
Endpoints: yc.Endpoints,
DialTimeout: yc.DialTimeout,
}
if yc.InsecureTransport {

View File

@ -44,7 +44,7 @@
// etcd client returns 2 types of errors:
//
// 1. context error: canceled or deadline exceeded.
// 2. gRPC error: see https://github.com/coreos/etcd/blob/master/etcdserver/api/v3rpc/rpctypes/error.go
// 2. gRPC error: see https://github.com/coreos/etcd/blob/master/etcdserver/api/v3rpc/error.go.
//
// Here is the example code to handle client errors:
//

View File

@ -32,63 +32,35 @@ func ExampleAuth() {
}
defer cli.Close()
if _, err = cli.RoleAdd(context.TODO(), "root"); err != nil {
log.Fatal(err)
}
if _, err = cli.UserAdd(context.TODO(), "root", "123"); err != nil {
log.Fatal(err)
}
if _, err = cli.UserGrantRole(context.TODO(), "root", "root"); err != nil {
authapi := clientv3.NewAuth(cli)
if _, err = authapi.RoleAdd(context.TODO(), "root"); err != nil {
log.Fatal(err)
}
if _, err = cli.RoleAdd(context.TODO(), "r"); err != nil {
log.Fatal(err)
}
if _, err = cli.RoleGrantPermission(
if _, err = authapi.RoleGrantPermission(
context.TODO(),
"r", // role name
"foo", // key
"zoo", // range end
"root", // role name
"foo", // key
"zoo", // range end
clientv3.PermissionType(clientv3.PermReadWrite),
); err != nil {
log.Fatal(err)
}
if _, err = cli.UserAdd(context.TODO(), "u", "123"); err != nil {
if _, err = authapi.UserAdd(context.TODO(), "root", "123"); err != nil {
log.Fatal(err)
}
if _, err = cli.UserGrantRole(context.TODO(), "u", "r"); err != nil {
if _, err = authapi.UserGrantRole(context.TODO(), "root", "root"); err != nil {
log.Fatal(err)
}
if _, err = cli.AuthEnable(context.TODO()); err != nil {
if _, err = authapi.AuthEnable(context.TODO()); err != nil {
log.Fatal(err)
}
cliAuth, err := clientv3.New(clientv3.Config{
Endpoints: endpoints,
DialTimeout: dialTimeout,
Username: "u",
Password: "123",
})
if err != nil {
log.Fatal(err)
}
defer cliAuth.Close()
if _, err = cliAuth.Put(context.TODO(), "foo1", "bar"); err != nil {
log.Fatal(err)
}
_, err = cliAuth.Txn(context.TODO()).
If(clientv3.Compare(clientv3.Value("zoo1"), ">", "abc")).
Then(clientv3.OpPut("zoo1", "XYZ")).
Else(clientv3.OpPut("zoo1", "ABC")).
Commit()
fmt.Println(err)
// now check the permission with the root account
rootCli, err := clientv3.New(clientv3.Config{
Endpoints: endpoints,
DialTimeout: dialTimeout,
Username: "root",
@ -97,17 +69,31 @@ func ExampleAuth() {
if err != nil {
log.Fatal(err)
}
defer rootCli.Close()
defer cliAuth.Close()
resp, err := rootCli.RoleGet(context.TODO(), "r")
kv := clientv3.NewKV(cliAuth)
if _, err = kv.Put(context.TODO(), "foo1", "bar"); err != nil {
log.Fatal(err)
}
_, err = kv.Txn(context.TODO()).
If(clientv3.Compare(clientv3.Value("zoo1"), ">", "abc")).
Then(clientv3.OpPut("zoo1", "XYZ")).
Else(clientv3.OpPut("zoo1", "ABC")).
Commit()
fmt.Println(err)
// now check the permission
authapi2 := clientv3.NewAuth(cliAuth)
resp, err := authapi2.RoleGet(context.TODO(), "root")
if err != nil {
log.Fatal(err)
}
fmt.Printf("user u permission: key %q, range end %q\n", resp.Perm[0].Key, resp.Perm[0].RangeEnd)
fmt.Printf("root user permission: key %q, range end %q\n", resp.Perm[0].Key, resp.Perm[0].RangeEnd)
if _, err = rootCli.AuthDisable(context.TODO()); err != nil {
if _, err = authapi2.AuthDisable(context.TODO()); err != nil {
log.Fatal(err)
}
// Output: etcdserver: permission denied
// user u permission: key "foo", range end "zoo"
// root user permission: key "foo", range end "zoo"
}

View File

@ -1,46 +0,0 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package clientv3_test
import (
"fmt"
"io/ioutil"
"log"
"net/http"
"github.com/prometheus/client_golang/prometheus"
)
func ExampleMetrics_All() {
// listen for all prometheus metrics
go func() {
http.Handle("/metrics", prometheus.Handler())
log.Fatal(http.ListenAndServe(":47989", nil))
}()
url := "http://localhost:47989/metrics"
// make an http request to fetch all prometheus metrics
resp, err := http.Get(url)
if err != nil {
log.Fatalf("fetch error: %v", err)
}
b, err := ioutil.ReadAll(resp.Body)
resp.Body.Close()
if err != nil {
log.Fatalf("fetch error: reading %s: %v", url, err)
}
fmt.Printf("%s", b)
}

View File

@ -19,8 +19,6 @@ import (
"time"
"github.com/coreos/etcd/clientv3"
"github.com/coreos/etcd/pkg/transport"
"github.com/coreos/pkg/capnslog"
"golang.org/x/net/context"
)
@ -31,9 +29,6 @@ var (
)
func Example() {
var plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "clientv3")
clientv3.SetLogger(plog)
cli, err := clientv3.New(clientv3.Config{
Endpoints: endpoints,
DialTimeout: dialTimeout,
@ -48,29 +43,3 @@ func Example() {
log.Fatal(err)
}
}
func ExampleConfig_withTLS() {
tlsInfo := transport.TLSInfo{
CertFile: "/tmp/test-certs/test-name-1.pem",
KeyFile: "/tmp/test-certs/test-name-1-key.pem",
TrustedCAFile: "/tmp/test-certs/trusted-ca.pem",
}
tlsConfig, err := tlsInfo.ClientConfig()
if err != nil {
log.Fatal(err)
}
cli, err := clientv3.New(clientv3.Config{
Endpoints: endpoints,
DialTimeout: dialTimeout,
TLS: tlsConfig,
})
if err != nil {
log.Fatal(err)
}
defer cli.Close() // make sure to close the client
_, err = cli.Put(context.TODO(), "foo", "bar")
if err != nil {
log.Fatal(err)
}
}

View File

@ -1,60 +0,0 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package integration
import (
"math/rand"
"testing"
"time"
"github.com/coreos/etcd/clientv3"
"github.com/coreos/etcd/integration"
"github.com/coreos/etcd/pkg/testutil"
"golang.org/x/net/context"
)
// TestDialSetEndpoints ensures SetEndpoints can replace unavailable endpoints with available ones.
func TestDialSetEndpoints(t *testing.T) {
defer testutil.AfterTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
defer clus.Terminate(t)
// get endpoint list
eps := make([]string, 3)
for i := range eps {
eps[i] = clus.Members[i].GRPCAddr()
}
toKill := rand.Intn(len(eps))
cfg := clientv3.Config{Endpoints: []string{eps[toKill]}, DialTimeout: 1 * time.Second}
cli, err := clientv3.New(cfg)
if err != nil {
t.Fatal(err)
}
defer cli.Close()
// make a dead node
clus.Members[toKill].Stop(t)
clus.WaitLeader(t)
// update client with available endpoints
cli.SetEndpoints(eps[(toKill+1)%3])
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
if _, err = cli.Get(ctx, "foo", clientv3.WithSerializable()); err != nil {
t.Fatal(err)
}
cancel()
}

View File

@ -16,8 +16,6 @@ package integration
import (
"bytes"
"math/rand"
"os"
"reflect"
"strings"
"testing"
@ -36,8 +34,8 @@ func TestKVPutError(t *testing.T) {
defer testutil.AfterTest(t)
var (
maxReqBytes = 1.5 * 1024 * 1024 // hard coded max in v3_server.go
quota = int64(int(maxReqBytes) + 8*os.Getpagesize())
maxReqBytes = 1.5 * 1024 * 1024
quota = int64(maxReqBytes * 1.2)
)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, QuotaBackendBytes: quota})
defer clus.Terminate(t)
@ -50,7 +48,7 @@ func TestKVPutError(t *testing.T) {
t.Fatalf("expected %v, got %v", rpctypes.ErrEmptyKey, err)
}
_, err = kv.Put(ctx, "key", strings.Repeat("a", int(maxReqBytes+100)))
_, err = kv.Put(ctx, "key", strings.Repeat("a", int(maxReqBytes+100))) // 1.5MB
if err != rpctypes.ErrRequestTooLarge {
t.Fatalf("expected %v, got %v", rpctypes.ErrRequestTooLarge, err)
}
@ -60,7 +58,7 @@ func TestKVPutError(t *testing.T) {
t.Fatal(err)
}
time.Sleep(1 * time.Second) // give enough time for commit
time.Sleep(500 * time.Millisecond) // give enough time for commit
_, err = kv.Put(ctx, "foo2", strings.Repeat("a", int(maxReqBytes-50)))
if err != rpctypes.ErrNoSpace { // over quota
@ -226,21 +224,6 @@ func TestKVRange(t *testing.T) {
{Key: []byte("fop"), Value: nil, CreateRevision: 9, ModRevision: 9, Version: 1},
},
},
// range all with SortByKey, missing sorting order (ASCEND by default)
{
"a", "x",
0,
[]clientv3.OpOption{clientv3.WithSort(clientv3.SortByKey, clientv3.SortNone)},
[]*mvccpb.KeyValue{
{Key: []byte("a"), Value: nil, CreateRevision: 2, ModRevision: 2, Version: 1},
{Key: []byte("b"), Value: nil, CreateRevision: 3, ModRevision: 3, Version: 1},
{Key: []byte("c"), Value: nil, CreateRevision: 4, ModRevision: 6, Version: 3},
{Key: []byte("foo"), Value: nil, CreateRevision: 7, ModRevision: 7, Version: 1},
{Key: []byte("foo/abc"), Value: nil, CreateRevision: 8, ModRevision: 8, Version: 1},
{Key: []byte("fop"), Value: nil, CreateRevision: 9, ModRevision: 9, Version: 1},
},
},
// range all with SortByCreateRevision, SortDescend
{
"a", "x",
@ -256,21 +239,6 @@ func TestKVRange(t *testing.T) {
{Key: []byte("a"), Value: nil, CreateRevision: 2, ModRevision: 2, Version: 1},
},
},
// range all with SortByCreateRevision, missing sorting order (ASCEND by default)
{
"a", "x",
0,
[]clientv3.OpOption{clientv3.WithSort(clientv3.SortByCreateRevision, clientv3.SortNone)},
[]*mvccpb.KeyValue{
{Key: []byte("a"), Value: nil, CreateRevision: 2, ModRevision: 2, Version: 1},
{Key: []byte("b"), Value: nil, CreateRevision: 3, ModRevision: 3, Version: 1},
{Key: []byte("c"), Value: nil, CreateRevision: 4, ModRevision: 6, Version: 3},
{Key: []byte("foo"), Value: nil, CreateRevision: 7, ModRevision: 7, Version: 1},
{Key: []byte("foo/abc"), Value: nil, CreateRevision: 8, ModRevision: 8, Version: 1},
{Key: []byte("fop"), Value: nil, CreateRevision: 9, ModRevision: 9, Version: 1},
},
},
// range all with SortByModRevision, SortDescend
{
"a", "x",
@ -679,121 +647,18 @@ func TestKVGetCancel(t *testing.T) {
}
}
// TestKVGetStoppedServerAndClose ensures closing after a failed Get works.
func TestKVGetStoppedServerAndClose(t *testing.T) {
defer testutil.AfterTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
defer clus.Terminate(t)
cli := clus.Client(0)
clus.Members[0].Stop(t)
ctx, cancel := context.WithTimeout(context.TODO(), time.Second)
// this Get fails and triggers an asynchronous connection retry
_, err := cli.Get(ctx, "abc")
cancel()
if !strings.Contains(err.Error(), "context deadline") {
t.Fatal(err)
}
}
// TestKVPutStoppedServerAndClose ensures closing after a failed Put works.
func TestKVPutStoppedServerAndClose(t *testing.T) {
defer testutil.AfterTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
defer clus.Terminate(t)
cli := clus.Client(0)
clus.Members[0].Stop(t)
ctx, cancel := context.WithTimeout(context.TODO(), time.Second)
// get retries on all errors.
// so here we use it to eat the potential broken pipe error for the next put.
// grpc client might see a broken pipe error when we issue the get request before
// grpc finds out the original connection is down due to the member shutdown.
_, err := cli.Get(ctx, "abc")
cancel()
if !strings.Contains(err.Error(), "context deadline") {
t.Fatal(err)
}
// this Put fails and triggers an asynchronous connection retry
_, err = cli.Put(ctx, "abc", "123")
_, err := cli.Put(ctx, "abc", "123")
cancel()
if !strings.Contains(err.Error(), "context deadline") {
t.Fatal(err)
}
}
// TestKVGetOneEndpointDown ensures a client can connect and get if one endpoint is down
func TestKVPutOneEndpointDown(t *testing.T) {
defer testutil.AfterTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
defer clus.Terminate(t)
// get endpoint list
eps := make([]string, 3)
for i := range eps {
eps[i] = clus.Members[i].GRPCAddr()
}
// make a dead node
clus.Members[rand.Intn(len(eps))].Stop(t)
// try to connect with dead node in the endpoint list
cfg := clientv3.Config{Endpoints: eps, DialTimeout: 1 * time.Second}
cli, err := clientv3.New(cfg)
if err != nil {
t.Fatal(err)
}
defer cli.Close()
ctx, cancel := context.WithTimeout(context.TODO(), 3*time.Second)
if _, err := cli.Get(ctx, "abc", clientv3.WithSerializable()); err != nil {
t.Fatal(err)
}
cancel()
}
// TestKVGetResetLoneEndpoint ensures that if an endpoint resets and all other
// endpoints are down, then it will reconnect.
func TestKVGetResetLoneEndpoint(t *testing.T) {
defer testutil.AfterTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 2})
defer clus.Terminate(t)
// get endpoint list
eps := make([]string, 2)
for i := range eps {
eps[i] = clus.Members[i].GRPCAddr()
}
cfg := clientv3.Config{Endpoints: eps, DialTimeout: 500 * time.Millisecond}
cli, err := clientv3.New(cfg)
if err != nil {
t.Fatal(err)
}
defer cli.Close()
// disconnect everything
clus.Members[0].Stop(t)
clus.Members[1].Stop(t)
// have Get try to reconnect
donec := make(chan struct{})
go func() {
ctx, cancel := context.WithTimeout(context.TODO(), 5*time.Second)
if _, err := cli.Get(ctx, "abc", clientv3.WithSerializable()); err != nil {
t.Fatal(err)
}
cancel()
close(donec)
}()
time.Sleep(500 * time.Millisecond)
clus.Members[0].Restart(t)
select {
case <-time.After(10 * time.Second):
t.Fatalf("timed out waiting for Get")
case <-donec:
}
}

View File

@ -15,14 +15,10 @@
package integration
import (
"reflect"
"sort"
"sync"
"testing"
"time"
"github.com/coreos/etcd/clientv3"
"github.com/coreos/etcd/clientv3/concurrency"
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
"github.com/coreos/etcd/integration"
"github.com/coreos/etcd/pkg/testutil"
@ -156,30 +152,6 @@ func TestLeaseKeepAlive(t *testing.T) {
}
}
func TestLeaseKeepAliveOneSecond(t *testing.T) {
defer testutil.AfterTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
defer clus.Terminate(t)
cli := clus.Client(0)
resp, err := cli.Grant(context.Background(), 1)
if err != nil {
t.Errorf("failed to create lease %v", err)
}
rc, kerr := cli.KeepAlive(context.Background(), resp.ID)
if kerr != nil {
t.Errorf("failed to keepalive lease %v", kerr)
}
for i := 0; i < 3; i++ {
if _, ok := <-rc; !ok {
t.Errorf("chan is closed, want not closed")
}
}
}
// TODO: add a client that can connect to all the members of cluster via unix sock.
// TODO: test handle more complicated failures.
func TestLeaseKeepAliveHandleFailure(t *testing.T) {
@ -387,8 +359,7 @@ func TestLeaseKeepAliveCloseAfterDisconnectRevoke(t *testing.T) {
if kerr != nil {
t.Fatal(kerr)
}
kresp := <-rc
if kresp.ID != resp.ID {
if kresp := <-rc; kresp.ID != resp.ID {
t.Fatalf("ID = %x, want %x", kresp.ID, resp.ID)
}
@ -403,14 +374,13 @@ func TestLeaseKeepAliveCloseAfterDisconnectRevoke(t *testing.T) {
clus.Members[0].Restart(t)
// some keep-alives may still be buffered; drain until close
timer := time.After(time.Duration(kresp.TTL) * time.Second)
for kresp != nil {
select {
case kresp = <-rc:
case <-timer:
t.Fatalf("keepalive channel did not close")
select {
case ka, ok := <-rc:
if ok {
t.Fatalf("unexpected keepalive %v", ka)
}
case <-time.After(5 * time.Second):
t.Fatalf("keepalive channel did not close")
}
}
@ -483,174 +453,3 @@ func TestLeaseKeepAliveTTLTimeout(t *testing.T) {
clus.Members[0].Restart(t)
}
func TestLeaseTimeToLive(t *testing.T) {
defer testutil.AfterTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
defer clus.Terminate(t)
lapi := clientv3.NewLease(clus.RandClient())
defer lapi.Close()
resp, err := lapi.Grant(context.Background(), 10)
if err != nil {
t.Errorf("failed to create lease %v", err)
}
kv := clientv3.NewKV(clus.RandClient())
keys := []string{"foo1", "foo2"}
for i := range keys {
if _, err = kv.Put(context.TODO(), keys[i], "bar", clientv3.WithLease(resp.ID)); err != nil {
t.Fatal(err)
}
}
lresp, lerr := lapi.TimeToLive(context.Background(), resp.ID, clientv3.WithAttachedKeys())
if lerr != nil {
t.Fatal(lerr)
}
if lresp.ID != resp.ID {
t.Fatalf("leaseID expected %d, got %d", resp.ID, lresp.ID)
}
if lresp.GrantedTTL != int64(10) {
t.Fatalf("GrantedTTL expected %d, got %d", 10, lresp.GrantedTTL)
}
if lresp.TTL == 0 || lresp.TTL > lresp.GrantedTTL {
t.Fatalf("unexpected TTL %d (granted %d)", lresp.TTL, lresp.GrantedTTL)
}
ks := make([]string, len(lresp.Keys))
for i := range lresp.Keys {
ks[i] = string(lresp.Keys[i])
}
sort.Strings(ks)
if !reflect.DeepEqual(ks, keys) {
t.Fatalf("keys expected %v, got %v", keys, ks)
}
lresp, lerr = lapi.TimeToLive(context.Background(), resp.ID)
if lerr != nil {
t.Fatal(lerr)
}
if len(lresp.Keys) != 0 {
t.Fatalf("unexpected keys %+v", lresp.Keys)
}
}
// TestLeaseRenewLostQuorum ensures keepalives work after losing quorum
// for a while.
func TestLeaseRenewLostQuorum(t *testing.T) {
defer testutil.AfterTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
defer clus.Terminate(t)
cli := clus.Client(0)
r, err := cli.Grant(context.TODO(), 4)
if err != nil {
t.Fatal(err)
}
kctx, kcancel := context.WithCancel(context.Background())
defer kcancel()
ka, err := cli.KeepAlive(kctx, r.ID)
if err != nil {
t.Fatal(err)
}
// consume first keepalive so next message sends when cluster is down
<-ka
// force keepalive stream message to timeout
clus.Members[1].Stop(t)
clus.Members[2].Stop(t)
// Use TTL-1 since the client closes the keepalive channel if no
// keepalive arrives before the lease deadline.
// The cluster has 1 second to recover and reply to the keepalive.
time.Sleep(time.Duration(r.TTL-1) * time.Second)
clus.Members[1].Restart(t)
clus.Members[2].Restart(t)
select {
case _, ok := <-ka:
if !ok {
t.Fatalf("keepalive closed")
}
case <-time.After(time.Duration(r.TTL) * time.Second):
t.Fatalf("timed out waiting for keepalive")
}
}
func TestLeaseKeepAliveLoopExit(t *testing.T) {
defer testutil.AfterTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
defer clus.Terminate(t)
ctx := context.Background()
cli := clus.Client(0)
resp, err := cli.Grant(ctx, 5)
if err != nil {
t.Fatal(err)
}
cli.Lease.Close()
_, err = cli.KeepAlive(ctx, resp.ID)
if _, ok := err.(clientv3.ErrKeepAliveHalted); !ok {
t.Fatalf("expected %T, got %v(%T)", clientv3.ErrKeepAliveHalted{}, err, err)
}
}
// TestV3LeaseFailureOverlap issues Grant and Keepalive requests to a cluster
// before, during, and after quorum loss to confirm Grant/Keepalive tolerates
// transient cluster failure.
func TestV3LeaseFailureOverlap(t *testing.T) {
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 2})
defer clus.Terminate(t)
numReqs := 5
cli := clus.Client(0)
// bring up a session, tear it down
updown := func(i int) error {
sess, err := concurrency.NewSession(cli)
if err != nil {
return err
}
ch := make(chan struct{})
go func() {
defer close(ch)
sess.Close()
}()
select {
case <-ch:
case <-time.After(time.Minute / 4):
t.Fatalf("timeout %d", i)
}
return nil
}
var wg sync.WaitGroup
mkReqs := func(n int) {
wg.Add(numReqs)
for i := 0; i < numReqs; i++ {
go func() {
defer wg.Done()
err := updown(n)
if err == nil || err == rpctypes.ErrTimeoutDueToConnectionLost {
return
}
t.Fatal(err)
}()
}
}
mkReqs(1)
clus.Members[1].Stop(t)
mkReqs(2)
time.Sleep(time.Second)
mkReqs(3)
clus.Members[1].Restart(t)
mkReqs(4)
wg.Wait()
}

View File

@ -1,21 +0,0 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package integration
import "github.com/coreos/pkg/capnslog"
func init() {
capnslog.SetGlobalLogLevel(capnslog.INFO)
}

View File

@ -1,169 +0,0 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package integration
import (
"bufio"
"io"
"net"
"net/http"
"strconv"
"strings"
"testing"
"time"
"github.com/coreos/etcd/clientv3"
"github.com/coreos/etcd/integration"
"github.com/coreos/etcd/pkg/testutil"
"github.com/coreos/etcd/pkg/transport"
"github.com/prometheus/client_golang/prometheus"
"golang.org/x/net/context"
)
func TestV3ClientMetrics(t *testing.T) {
defer testutil.AfterTest(t)
var (
addr string = "localhost:27989"
ln net.Listener
err error
)
// listen for all prometheus metrics
donec := make(chan struct{})
go func() {
defer close(donec)
srv := &http.Server{Handler: prometheus.Handler()}
srv.SetKeepAlivesEnabled(false)
ln, err = transport.NewUnixListener(addr)
if err != nil {
t.Fatalf("Error: %v occurred while listening on addr: %v", err, addr)
}
err = srv.Serve(ln)
if err != nil && !strings.Contains(err.Error(), "use of closed network connection") {
t.Fatalf("Err serving http requests: %v", err)
}
}()
url := "unix://" + addr + "/metrics"
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
defer clus.Terminate(t)
client := clus.Client(0)
w := clientv3.NewWatcher(client)
defer w.Close()
kv := clientv3.NewKV(client)
wc := w.Watch(context.Background(), "foo")
wBefore := sumCountersForMetricAndLabels(t, url, "grpc_client_msg_received_total", "Watch", "bidi_stream")
pBefore := sumCountersForMetricAndLabels(t, url, "grpc_client_started_total", "Put", "unary")
_, err = kv.Put(context.Background(), "foo", "bar")
if err != nil {
t.Errorf("Error putting value in key store")
}
pAfter := sumCountersForMetricAndLabels(t, url, "grpc_client_started_total", "Put", "unary")
if pBefore+1 != pAfter {
t.Errorf("grpc_client_started_total expected %d, got %d", 1, pAfter-pBefore)
}
// consume watch response
select {
case <-wc:
case <-time.After(10 * time.Second):
t.Error("Timeout occurred for getting watch response")
}
wAfter := sumCountersForMetricAndLabels(t, url, "grpc_client_msg_received_total", "Watch", "bidi_stream")
if wBefore+1 != wAfter {
t.Errorf("grpc_client_msg_received_total expected %d, got %d", 1, wAfter-wBefore)
}
ln.Close()
<-donec
}
func sumCountersForMetricAndLabels(t *testing.T, url string, metricName string, matchingLabelValues ...string) int {
count := 0
for _, line := range getHTTPBodyAsLines(t, url) {
ok := true
if !strings.HasPrefix(line, metricName) {
continue
}
for _, labelValue := range matchingLabelValues {
if !strings.Contains(line, `"`+labelValue+`"`) {
ok = false
break
}
}
if !ok {
continue
}
valueString := line[strings.LastIndex(line, " ")+1 : len(line)-1]
valueFloat, err := strconv.ParseFloat(valueString, 32)
if err != nil {
t.Fatalf("failed parsing value for line: %v and matchingLabelValues: %v", line, matchingLabelValues)
}
count += int(valueFloat)
}
return count
}
func getHTTPBodyAsLines(t *testing.T, url string) []string {
cfgtls := transport.TLSInfo{}
tr, err := transport.NewTransport(cfgtls, time.Second)
if err != nil {
t.Fatalf("Error getting transport: %v", err)
}
tr.MaxIdleConns = -1
tr.DisableKeepAlives = true
cli := &http.Client{Transport: tr}
resp, err := cli.Get(url)
if err != nil {
t.Fatalf("Error fetching: %v", err)
}
reader := bufio.NewReader(resp.Body)
lines := []string{}
for {
line, err := reader.ReadString('\n')
if err != nil {
if err == io.EOF {
break
} else {
t.Fatalf("error reading: %v", err)
}
}
lines = append(lines, line)
}
resp.Body.Close()
return lines
}

View File

@ -15,9 +15,7 @@
package integration
import (
"fmt"
"reflect"
"sync"
"testing"
"time"
@ -71,55 +69,3 @@ func TestMirrorSync(t *testing.T) {
t.Fatal("failed to receive update in one second")
}
}
func TestMirrorSyncBase(t *testing.T) {
cluster := integration.NewClusterV3(nil, &integration.ClusterConfig{Size: 1})
defer cluster.Terminate(nil)
cli := cluster.Client(0)
ctx := context.TODO()
keyCh := make(chan string)
var wg sync.WaitGroup
for i := 0; i < 50; i++ {
wg.Add(1)
go func() {
defer wg.Done()
for key := range keyCh {
if _, err := cli.Put(ctx, key, "test"); err != nil {
t.Fatal(err)
}
}
}()
}
for i := 0; i < 2000; i++ {
keyCh <- fmt.Sprintf("test%d", i)
}
close(keyCh)
wg.Wait()
syncer := mirror.NewSyncer(cli, "test", 0)
respCh, errCh := syncer.SyncBase(ctx)
count := 0
for resp := range respCh {
count = count + len(resp.Kvs)
if !resp.More {
break
}
}
for err := range errCh {
t.Fatalf("unexpected error %v", err)
}
if count != 2000 {
t.Errorf("unexpected kv count: %d", count)
}
}

View File

@ -73,7 +73,6 @@ func TestTxnWriteFail(t *testing.T) {
}()
go func() {
defer close(getc)
select {
case <-time.After(5 * time.Second):
t.Fatalf("timed out waiting for txn fail")
@ -87,10 +86,11 @@ func TestTxnWriteFail(t *testing.T) {
if len(gresp.Kvs) != 0 {
t.Fatalf("expected no keys, got %v", gresp.Kvs)
}
close(getc)
}()
select {
case <-time.After(2 * clus.Members[1].ServerConfig.ReqTimeout()):
case <-time.After(5 * time.Second):
t.Fatalf("timed out waiting for get")
case <-getc:
}
@ -125,7 +125,7 @@ func TestTxnReadRetry(t *testing.T) {
clus.Members[0].Restart(t)
select {
case <-donec:
case <-time.After(2 * clus.Members[1].ServerConfig.ReqTimeout()):
case <-time.After(5 * time.Second):
t.Fatalf("waited too long")
}
}

View File

@ -211,14 +211,6 @@ func testWatchReconnRequest(t *testing.T, wctx *watchctx) {
stopc <- struct{}{}
<-donec
// spinning on dropping connections may trigger a leader election
// due to resource starvation; l-read to ensure the cluster is stable
ctx, cancel := context.WithTimeout(context.TODO(), 30*time.Second)
if _, err := wctx.kv.Get(ctx, "_"); err != nil {
t.Fatal(err)
}
cancel()
// ensure watcher works
putAndWatch(t, wctx, "a", "a")
}
@ -349,9 +341,6 @@ func putAndWatch(t *testing.T, wctx *watchctx, key, val string) {
// TestWatchResumeComapcted checks that the watcher gracefully closes in case
// that it tries to resume to a revision that's been compacted out of the store.
// Since the watcher's server restarts with stale data, the watcher will receive
// either a compaction error or all keys by staying in sync before the compaction
// is finally applied.
func TestWatchResumeCompacted(t *testing.T) {
defer testutil.AfterTest(t)
@ -380,9 +369,8 @@ func TestWatchResumeCompacted(t *testing.T) {
}
// put some data and compact away
numPuts := 5
kv := clientv3.NewKV(clus.Client(1))
for i := 0; i < numPuts; i++ {
for i := 0; i < 5; i++ {
if _, err := kv.Put(context.TODO(), "foo", "bar"); err != nil {
t.Fatal(err)
}
@ -393,48 +381,17 @@ func TestWatchResumeCompacted(t *testing.T) {
clus.Members[0].Restart(t)
// since watch's server isn't guaranteed to be synced with the cluster when
// the watch resumes, there is a window where the watch can stay synced and
// read off all events; if the watcher misses the window, it will go out of
// sync and get a compaction error.
wRev := int64(2)
for int(wRev) <= numPuts+1 {
var wresp clientv3.WatchResponse
var ok bool
select {
case wresp, ok = <-wch:
if !ok {
t.Fatalf("expected wresp, but got closed channel")
}
case <-time.After(5 * time.Second):
t.Fatalf("compacted watch timed out")
}
for _, ev := range wresp.Events {
if ev.Kv.ModRevision != wRev {
t.Fatalf("expected modRev %v, got %+v", wRev, ev)
}
wRev++
}
if wresp.Err() == nil {
continue
}
if wresp.Err() != rpctypes.ErrCompacted {
t.Fatalf("wresp.Err() expected %v, but got %v %+v", rpctypes.ErrCompacted, wresp.Err())
}
break
// get compacted error message
wresp, ok := <-wch
if !ok {
t.Fatalf("expected wresp, but got closed channel")
}
if int(wRev) > numPuts+1 {
// got data faster than the compaction
return
if wresp.Err() != rpctypes.ErrCompacted {
t.Fatalf("wresp.Err() expected %v, but got %v", rpctypes.ErrCompacted, wresp.Err())
}
// received compaction error; ensure the channel closes
select {
case wresp, ok := <-wch:
if ok {
t.Fatalf("expected closed channel, but got %v", wresp)
}
case <-time.After(5 * time.Second):
t.Fatalf("timed out waiting for channel close")
// ensure the channel is closed
if wresp, ok = <-wch; ok {
t.Fatalf("expected closed channel, but got %v", wresp)
}
}
@ -443,7 +400,7 @@ func TestWatchResumeCompacted(t *testing.T) {
func TestWatchCompactRevision(t *testing.T) {
defer testutil.AfterTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
defer clus.Terminate(t)
// set some keys
@ -530,7 +487,7 @@ func testWatchWithProgressNotify(t *testing.T, watchOnPut bool) {
} else if len(resp.Events) != 0 { // wait for notification otherwise
t.Fatalf("expected no events, but got %+v", resp.Events)
}
case <-time.After(time.Duration(1.5 * float64(pi))):
case <-time.After(2 * pi):
t.Fatalf("watch response expected in %v, but timed out", pi)
}
}
@ -614,19 +571,18 @@ func TestWatchErrConnClosed(t *testing.T) {
defer clus.Terminate(t)
cli := clus.Client(0)
defer cli.Close()
wc := clientv3.NewWatcher(cli)
donec := make(chan struct{})
go func() {
defer close(donec)
ch := wc.Watch(context.TODO(), "foo")
if wr := <-ch; grpc.ErrorDesc(wr.Err()) != grpc.ErrClientConnClosing.Error() {
t.Fatalf("expected %v, got %v", grpc.ErrClientConnClosing, grpc.ErrorDesc(wr.Err()))
wc.Watch(context.TODO(), "foo")
if err := wc.Close(); err != nil && err != grpc.ErrClientConnClosing {
t.Fatalf("expected %v, got %v", grpc.ErrClientConnClosing, err)
}
}()
if err := cli.ActiveConnection().Close(); err != nil {
if err := cli.Close(); err != nil {
t.Fatal(err)
}
clus.TakeClient(0)
@ -673,12 +629,8 @@ func TestWatchWithRequireLeader(t *testing.T) {
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
defer clus.Terminate(t)
// Put a key for the non-require leader watch to read as an event.
// The watchers will be on member[0]; put key through member[0] to
// ensure that it receives the update so watching after killing quorum
// is guaranteed to have the key.
liveClient := clus.Client(0)
if _, err := liveClient.Put(context.TODO(), "foo", "bar"); err != nil {
// something for the non-require leader watch to read as an event
if _, err := clus.Client(1).Put(context.TODO(), "foo", "bar"); err != nil {
t.Fatal(err)
}
@ -693,8 +645,8 @@ func TestWatchWithRequireLeader(t *testing.T) {
tickDuration := 10 * time.Millisecond
time.Sleep(time.Duration(3*clus.Members[0].ElectionTicks) * tickDuration)
chLeader := liveClient.Watch(clientv3.WithRequireLeader(context.TODO()), "foo", clientv3.WithRev(1))
chNoLeader := liveClient.Watch(context.TODO(), "foo", clientv3.WithRev(1))
chLeader := clus.Client(0).Watch(clientv3.WithRequireLeader(context.TODO()), "foo", clientv3.WithRev(1))
chNoLeader := clus.Client(0).Watch(context.TODO(), "foo", clientv3.WithRev(1))
select {
case resp, ok := <-chLeader:
@ -721,262 +673,3 @@ func TestWatchWithRequireLeader(t *testing.T) {
t.Fatalf("expected response, got closed channel")
}
}
// TestWatchWithFilter checks that watch filtering works.
func TestWatchWithFilter(t *testing.T) {
cluster := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
defer cluster.Terminate(t)
client := cluster.RandClient()
ctx := context.Background()
wcNoPut := client.Watch(ctx, "a", clientv3.WithFilterPut())
wcNoDel := client.Watch(ctx, "a", clientv3.WithFilterDelete())
if _, err := client.Put(ctx, "a", "abc"); err != nil {
t.Fatal(err)
}
if _, err := client.Delete(ctx, "a"); err != nil {
t.Fatal(err)
}
npResp := <-wcNoPut
if len(npResp.Events) != 1 || npResp.Events[0].Type != clientv3.EventTypeDelete {
t.Fatalf("expected delete event, got %+v", npResp.Events)
}
ndResp := <-wcNoDel
if len(ndResp.Events) != 1 || ndResp.Events[0].Type != clientv3.EventTypePut {
t.Fatalf("expected put event, got %+v", ndResp.Events)
}
select {
case resp := <-wcNoPut:
t.Fatalf("unexpected event on filtered put (%+v)", resp)
case resp := <-wcNoDel:
t.Fatalf("unexpected event on filtered delete (%+v)", resp)
case <-time.After(100 * time.Millisecond):
}
}
// TestWatchWithCreatedNotification checks that createdNotification works.
func TestWatchWithCreatedNotification(t *testing.T) {
cluster := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
defer cluster.Terminate(t)
client := cluster.RandClient()
ctx := context.Background()
createC := client.Watch(ctx, "a", clientv3.WithCreatedNotify())
resp := <-createC
if !resp.Created {
t.Fatalf("expected created event, got %v", resp)
}
}
// TestWatchWithCreatedNotificationDropConn ensures that
// a watcher with created notify does not post duplicate
// created events from disconnect.
func TestWatchWithCreatedNotificationDropConn(t *testing.T) {
cluster := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
defer cluster.Terminate(t)
client := cluster.RandClient()
wch := client.Watch(context.Background(), "a", clientv3.WithCreatedNotify())
resp := <-wch
if !resp.Created {
t.Fatalf("expected created event, got %v", resp)
}
cluster.Members[0].DropConnections()
// try to receive from watch channel again
// ensure it doesn't post another createNotify
select {
case wresp := <-wch:
t.Fatalf("got unexpected watch response: %+v\n", wresp)
case <-time.After(time.Second):
// watcher may not reconnect by the time it hits the select,
// so it wouldn't have a chance to filter out the second create event
}
}
// TestWatchCancelOnServer ensures client watcher cancels propagate back to the server.
func TestWatchCancelOnServer(t *testing.T) {
cluster := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
defer cluster.Terminate(t)
client := cluster.RandClient()
for i := 0; i < 10; i++ {
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
client.Watch(ctx, "a", clientv3.WithCreatedNotify())
cancel()
}
// wait for cancels to propagate
time.Sleep(time.Second)
watchers, err := cluster.Members[0].Metric("etcd_debugging_mvcc_watcher_total")
if err != nil {
t.Fatal(err)
}
if watchers != "0" {
t.Fatalf("expected 0 watchers, got %q", watchers)
}
}
// TestWatchOverlapContextCancel stresses the watcher stream teardown path by
// creating/canceling watchers to ensure that new watchers are not taken down
// by a torn down watch stream. The sort of race that's being detected:
// 1. create w1 using a cancelable ctx with %v as "ctx"
// 2. cancel ctx
// 3. watcher client begins tearing down watcher grpc stream since no more watchers
// 3. start creating watcher w2 using a new "ctx" (not canceled), attaches to old grpc stream
// 4. watcher client finishes tearing down stream on "ctx"
// 5. w2 comes back canceled
func TestWatchOverlapContextCancel(t *testing.T) {
f := func(clus *integration.ClusterV3) {}
testWatchOverlapContextCancel(t, f)
}
func TestWatchOverlapDropConnContextCancel(t *testing.T) {
f := func(clus *integration.ClusterV3) {
clus.Members[0].DropConnections()
}
testWatchOverlapContextCancel(t, f)
}
func testWatchOverlapContextCancel(t *testing.T, f func(*integration.ClusterV3)) {
defer testutil.AfterTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
defer clus.Terminate(t)
// each unique context "%v" has a unique grpc stream
n := 100
ctxs, ctxc := make([]context.Context, 5), make([]chan struct{}, 5)
for i := range ctxs {
// make "%v" unique
ctxs[i] = context.WithValue(context.TODO(), "key", i)
// limits the maximum number of outstanding watchers per stream
ctxc[i] = make(chan struct{}, 2)
}
// issue concurrent watches on "abc" with cancel
cli := clus.RandClient()
if _, err := cli.Put(context.TODO(), "abc", "def"); err != nil {
t.Fatal(err)
}
ch := make(chan struct{}, n)
for i := 0; i < n; i++ {
go func() {
defer func() { ch <- struct{}{} }()
idx := rand.Intn(len(ctxs))
ctx, cancel := context.WithCancel(ctxs[idx])
ctxc[idx] <- struct{}{}
wch := cli.Watch(ctx, "abc", clientv3.WithRev(1))
f(clus)
select {
case _, ok := <-wch:
if !ok {
t.Fatalf("unexpected closed channel %p", wch)
}
// may take a second or two to reestablish a watcher because of
// grpc backoff policies for disconnects
case <-time.After(5 * time.Second):
t.Errorf("timed out waiting for watch on %p", wch)
}
// randomize how cancel overlaps with watch creation
if rand.Intn(2) == 0 {
<-ctxc[idx]
cancel()
} else {
cancel()
<-ctxc[idx]
}
}()
}
// join on watches
for i := 0; i < n; i++ {
select {
case <-ch:
case <-time.After(5 * time.Second):
t.Fatalf("timed out waiting for completed watch")
}
}
}
// TestWatchCanelAndCloseClient ensures that canceling a watcher then immediately
// closing the client does not return a client closing error.
func TestWatchCancelAndCloseClient(t *testing.T) {
defer testutil.AfterTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
defer clus.Terminate(t)
cli := clus.Client(0)
ctx, cancel := context.WithCancel(context.Background())
wch := cli.Watch(ctx, "abc")
donec := make(chan struct{})
go func() {
defer close(donec)
select {
case wr, ok := <-wch:
if ok {
t.Fatalf("expected closed watch after cancel(), got resp=%+v err=%v", wr, wr.Err())
}
case <-time.After(5 * time.Second):
t.Fatal("timed out waiting for closed channel")
}
}()
cancel()
if err := cli.Close(); err != nil {
t.Fatal(err)
}
<-donec
clus.TakeClient(0)
}
// TestWatchStressResumeClose establishes a bunch of watchers, disconnects
// to put them in resuming mode, cancels them so some resumes by cancel fail,
// then closes the watcher interface to ensure correct clean up.
func TestWatchStressResumeClose(t *testing.T) {
defer testutil.AfterTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
defer clus.Terminate(t)
cli := clus.Client(0)
ctx, cancel := context.WithCancel(context.Background())
// add more watches than can be resumed before the cancel
wchs := make([]clientv3.WatchChan, 2000)
for i := range wchs {
wchs[i] = cli.Watch(ctx, "abc")
}
clus.Members[0].DropConnections()
cancel()
if err := cli.Close(); err != nil {
t.Fatal(err)
}
clus.TakeClient(0)
}
// TestWatchCancelDisconnected ensures canceling a watcher works when
// its grpc stream is disconnected / reconnecting.
func TestWatchCancelDisconnected(t *testing.T) {
defer testutil.AfterTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
defer clus.Terminate(t)
cli := clus.Client(0)
ctx, cancel := context.WithCancel(context.Background())
// add more watches than can be resumed before the cancel
wch := cli.Watch(ctx, "abc")
clus.Members[0].Stop(t)
cancel()
select {
case <-wch:
case <-time.After(time.Second):
t.Fatal("took too long to cancel disconnected watcher")
}
}

View File

@ -17,7 +17,6 @@ package clientv3
import (
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
"golang.org/x/net/context"
"google.golang.org/grpc"
)
type (
@ -82,11 +81,7 @@ type kv struct {
}
func NewKV(c *Client) KV {
return &kv{remote: RetryKVClient(c)}
}
func NewKVFromKVClient(remote pb.KVClient) KV {
return &kv{remote: remote}
return &kv{remote: pb.NewKVClient(c.conn)}
}
func (kv *kv) Put(ctx context.Context, key, val string, opts ...OpOption) (*PutResponse, error) {
@ -125,7 +120,6 @@ func (kv *kv) Do(ctx context.Context, op Op) (OpResponse, error) {
if err == nil {
return resp, nil
}
if isHaltErr(ctx, err) {
return resp, toErr(ctx, err)
}
@ -142,20 +136,34 @@ func (kv *kv) do(ctx context.Context, op Op) (OpResponse, error) {
// TODO: handle other ops
case tRange:
var resp *pb.RangeResponse
resp, err = kv.remote.Range(ctx, op.toRangeRequest(), grpc.FailFast(false))
r := &pb.RangeRequest{
Key: op.key,
RangeEnd: op.end,
Limit: op.limit,
Revision: op.rev,
Serializable: op.serializable,
KeysOnly: op.keysOnly,
CountOnly: op.countOnly,
}
if op.sort != nil {
r.SortOrder = pb.RangeRequest_SortOrder(op.sort.Order)
r.SortTarget = pb.RangeRequest_SortTarget(op.sort.Target)
}
resp, err = kv.remote.Range(ctx, r)
if err == nil {
return OpResponse{get: (*GetResponse)(resp)}, nil
}
case tPut:
var resp *pb.PutResponse
r := &pb.PutRequest{Key: op.key, Value: op.val, Lease: int64(op.leaseID), PrevKv: op.prevKV}
r := &pb.PutRequest{Key: op.key, Value: op.val, Lease: int64(op.leaseID)}
resp, err = kv.remote.Put(ctx, r)
if err == nil {
return OpResponse{put: (*PutResponse)(resp)}, nil
}
case tDeleteRange:
var resp *pb.DeleteRangeResponse
r := &pb.DeleteRangeRequest{Key: op.key, RangeEnd: op.end, PrevKv: op.prevKV}
r := &pb.DeleteRangeRequest{Key: op.key, RangeEnd: op.end}
resp, err = kv.remote.DeleteRange(ctx, r)
if err == nil {
return OpResponse{del: (*DeleteResponse)(resp)}, nil

View File

@ -21,7 +21,6 @@ import (
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
"golang.org/x/net/context"
"google.golang.org/grpc"
)
type (
@ -44,21 +43,6 @@ type LeaseKeepAliveResponse struct {
TTL int64
}
// LeaseTimeToLiveResponse is used to convert the protobuf lease timetolive response.
type LeaseTimeToLiveResponse struct {
*pb.ResponseHeader
ID LeaseID `json:"id"`
// TTL is the remaining TTL in seconds for the lease; the lease will expire in under TTL+1 seconds.
TTL int64 `json:"ttl"`
// GrantedTTL is the initial granted time in seconds upon lease creation/renewal.
GrantedTTL int64 `json:"granted-ttl"`
// Keys is the list of keys attached to this lease.
Keys [][]byte `json:"keys"`
}
const (
// defaultTTL is the assumed lease TTL used for the first keepalive
// deadline before the actual TTL is known to the client.
@ -69,21 +53,6 @@ const (
NoLease LeaseID = 0
)
// ErrKeepAliveHalted is returned if client keep alive loop halts with an unexpected error.
//
// This usually means that automatic lease renewal via KeepAlive is broken, but KeepAliveOnce will still work as expected.
type ErrKeepAliveHalted struct {
Reason error
}
func (e ErrKeepAliveHalted) Error() string {
s := "etcdclient: leases keep alive halted"
if e.Reason != nil {
s += ": " + e.Reason.Error()
}
return s
}
type Lease interface {
// Grant creates a new lease.
Grant(ctx context.Context, ttl int64) (*LeaseGrantResponse, error)
@ -91,9 +60,6 @@ type Lease interface {
// Revoke revokes the given lease.
Revoke(ctx context.Context, id LeaseID) (*LeaseRevokeResponse, error)
// TimeToLive retrieves the lease information of the given lease ID.
TimeToLive(ctx context.Context, id LeaseID, opts ...LeaseOption) (*LeaseTimeToLiveResponse, error)
// KeepAlive keeps the given lease alive forever.
KeepAlive(ctx context.Context, id LeaseID) (<-chan *LeaseKeepAliveResponse, error)
@ -109,9 +75,8 @@ type Lease interface {
type lessor struct {
mu sync.Mutex // guards all fields
// donec is closed and loopErr is set when recvKeepAliveLoop stops
donec chan struct{}
loopErr error
// donec is closed when recvKeepAliveLoop stops
donec chan struct{}
remote pb.LeaseClient
@ -144,7 +109,7 @@ func NewLease(c *Client) Lease {
l := &lessor{
donec: make(chan struct{}),
keepAlives: make(map[LeaseID]*keepAlive),
remote: RetryLeaseClient(c),
remote: pb.NewLeaseClient(c.conn),
firstKeepAliveTimeout: c.cfg.DialTimeout + time.Second,
}
if l.firstKeepAliveTimeout == time.Second {
@ -175,7 +140,10 @@ func (l *lessor) Grant(ctx context.Context, ttl int64) (*LeaseGrantResponse, err
return gresp, nil
}
if isHaltErr(cctx, err) {
return nil, toErr(cctx, err)
return nil, toErr(ctx, err)
}
if nerr := l.newStream(); nerr != nil {
return nil, nerr
}
}
}
@ -195,29 +163,8 @@ func (l *lessor) Revoke(ctx context.Context, id LeaseID) (*LeaseRevokeResponse,
if isHaltErr(ctx, err) {
return nil, toErr(ctx, err)
}
}
}
func (l *lessor) TimeToLive(ctx context.Context, id LeaseID, opts ...LeaseOption) (*LeaseTimeToLiveResponse, error) {
cctx, cancel := context.WithCancel(ctx)
done := cancelWhenStop(cancel, l.stopCtx.Done())
defer close(done)
for {
r := toLeaseTimeToLiveRequest(id, opts...)
resp, err := l.remote.LeaseTimeToLive(cctx, r, grpc.FailFast(false))
if err == nil {
gresp := &LeaseTimeToLiveResponse{
ResponseHeader: resp.GetHeader(),
ID: LeaseID(resp.ID),
TTL: resp.TTL,
GrantedTTL: resp.GrantedTTL,
Keys: resp.Keys,
}
return gresp, nil
}
if isHaltErr(cctx, err) {
return nil, toErr(cctx, err)
if nerr := l.newStream(); nerr != nil {
return nil, nerr
}
}
}
@ -226,15 +173,6 @@ func (l *lessor) KeepAlive(ctx context.Context, id LeaseID) (<-chan *LeaseKeepAl
ch := make(chan *LeaseKeepAliveResponse, leaseResponseChSize)
l.mu.Lock()
// ensure that recvKeepAliveLoop is still running
select {
case <-l.donec:
err := l.loopErr
l.mu.Unlock()
close(ch)
return ch, ErrKeepAliveHalted{Reason: err}
default:
}
ka, ok := l.keepAlives[id]
if !ok {
// create fresh keep alive
@ -274,6 +212,10 @@ func (l *lessor) KeepAliveOnce(ctx context.Context, id LeaseID) (*LeaseKeepAlive
if isHaltErr(ctx, err) {
return nil, toErr(ctx, err)
}
if nerr := l.newStream(); nerr != nil {
return nil, nerr
}
}
}
@ -319,7 +261,7 @@ func (l *lessor) keepAliveOnce(ctx context.Context, id LeaseID) (*LeaseKeepAlive
cctx, cancel := context.WithCancel(ctx)
defer cancel()
stream, err := l.remote.LeaseKeepAlive(cctx, grpc.FailFast(false))
stream, err := l.remote.LeaseKeepAlive(cctx)
if err != nil {
return nil, toErr(ctx, err)
}
@ -342,11 +284,10 @@ func (l *lessor) keepAliveOnce(ctx context.Context, id LeaseID) (*LeaseKeepAlive
return karesp, nil
}
func (l *lessor) recvKeepAliveLoop() (gerr error) {
func (l *lessor) recvKeepAliveLoop() {
defer func() {
l.mu.Lock()
close(l.donec)
l.loopErr = gerr
for _, ka := range l.keepAlives {
ka.Close()
}
@ -359,35 +300,21 @@ func (l *lessor) recvKeepAliveLoop() (gerr error) {
resp, err := stream.Recv()
if err != nil {
if isHaltErr(l.stopCtx, err) {
return err
return
}
stream, serr = l.resetRecv()
continue
}
l.recvKeepAlive(resp)
}
return serr
}
// resetRecv opens a new lease stream and starts sending LeaseKeepAliveRequests
func (l *lessor) resetRecv() (pb.Lease_LeaseKeepAliveClient, error) {
sctx, cancel := context.WithCancel(l.stopCtx)
stream, err := l.remote.LeaseKeepAlive(sctx, grpc.FailFast(false))
if err = toErr(sctx, err); err != nil {
cancel()
if err := l.newStream(); err != nil {
return nil, err
}
l.mu.Lock()
defer l.mu.Unlock()
if l.stream != nil && l.streamCancel != nil {
l.stream.CloseSend()
l.streamCancel()
}
l.streamCancel = cancel
l.stream = stream
stream := l.getKeepAliveStream()
go l.sendKeepAliveLoop(stream)
return stream, nil
}
@ -416,7 +343,7 @@ func (l *lessor) recvKeepAlive(resp *pb.LeaseKeepAliveResponse) {
}
// send update to all channels
nextKeepAlive := time.Now().Add((time.Duration(karesp.TTL) * time.Second) / 3.0)
nextKeepAlive := time.Now().Add(1 + time.Duration(karesp.TTL/3)*time.Second)
ka.deadline = time.Now().Add(time.Duration(karesp.TTL) * time.Second)
for _, ch := range ka.chs {
select {
@ -462,7 +389,7 @@ func (l *lessor) sendKeepAliveLoop(stream pb.Lease_LeaseKeepAliveClient) {
return
}
var tosend []LeaseID
tosend := make([]LeaseID, 0)
now := time.Now()
l.mu.Lock()
@ -483,6 +410,32 @@ func (l *lessor) sendKeepAliveLoop(stream pb.Lease_LeaseKeepAliveClient) {
}
}
func (l *lessor) getKeepAliveStream() pb.Lease_LeaseKeepAliveClient {
l.mu.Lock()
defer l.mu.Unlock()
return l.stream
}
func (l *lessor) newStream() error {
sctx, cancel := context.WithCancel(l.stopCtx)
stream, err := l.remote.LeaseKeepAlive(sctx)
if err != nil {
cancel()
return toErr(sctx, err)
}
l.mu.Lock()
defer l.mu.Unlock()
if l.stream != nil && l.streamCancel != nil {
l.stream.CloseSend()
l.streamCancel()
}
l.streamCancel = cancel
l.stream = stream
return nil
}
func (ka *keepAlive) Close() {
close(ka.donec)
for _, ch := range ka.chs {

View File

@ -15,15 +15,13 @@
package clientv3
import (
"io/ioutil"
"log"
"os"
"sync"
"google.golang.org/grpc/grpclog"
)
// Logger is the logger used by client library.
// It implements grpclog.Logger interface.
type Logger grpclog.Logger
var (
@ -36,36 +34,20 @@ type settableLogger struct {
}
func init() {
// disable client side logs by default
// use go's standard logger by default like grpc
logger.mu.Lock()
logger.l = log.New(ioutil.Discard, "", 0)
// logger has to override the grpclog at initialization so that
// any changes to the grpclog go through logger with locking
// instead of through SetLogger
//
// now updates only happen through settableLogger.set
logger.l = log.New(os.Stderr, "", log.LstdFlags)
grpclog.SetLogger(&logger)
logger.mu.Unlock()
}
// SetLogger sets client-side Logger. By default, logs are disabled.
func SetLogger(l Logger) {
logger.set(l)
}
// GetLogger returns the current logger.
func GetLogger() Logger {
return logger.get()
}
func (s *settableLogger) set(l Logger) {
func (s *settableLogger) Set(l Logger) {
s.mu.Lock()
logger.l = l
s.mu.Unlock()
}
func (s *settableLogger) get() Logger {
func (s *settableLogger) Get() Logger {
s.mu.RLock()
l := logger.l
s.mu.RUnlock()
@ -74,9 +56,9 @@ func (s *settableLogger) get() Logger {
// implement the grpclog.Logger interface
func (s *settableLogger) Fatal(args ...interface{}) { s.get().Fatal(args...) }
func (s *settableLogger) Fatalf(format string, args ...interface{}) { s.get().Fatalf(format, args...) }
func (s *settableLogger) Fatalln(args ...interface{}) { s.get().Fatalln(args...) }
func (s *settableLogger) Print(args ...interface{}) { s.get().Print(args...) }
func (s *settableLogger) Printf(format string, args ...interface{}) { s.get().Printf(format, args...) }
func (s *settableLogger) Println(args ...interface{}) { s.get().Println(args...) }
func (s *settableLogger) Fatal(args ...interface{}) { s.Get().Fatal(args...) }
func (s *settableLogger) Fatalf(format string, args ...interface{}) { s.Get().Fatalf(format, args...) }
func (s *settableLogger) Fatalln(args ...interface{}) { s.Get().Fatalln(args...) }
func (s *settableLogger) Print(args ...interface{}) { s.Get().Print(args...) }
func (s *settableLogger) Printf(format string, args ...interface{}) { s.Get().Printf(format, args...) }
func (s *settableLogger) Println(args ...interface{}) { s.Get().Println(args...) }

View File

@ -20,14 +20,10 @@ import (
"strings"
"testing"
"github.com/coreos/etcd/auth"
"github.com/coreos/etcd/integration"
"github.com/coreos/etcd/pkg/testutil"
"golang.org/x/crypto/bcrypt"
)
func init() { auth.BcryptCost = bcrypt.MinCost }
// TestMain sets up an etcd cluster if running the examples.
func TestMain(m *testing.M) {
useCluster := true // default to running all tests

View File

@ -19,7 +19,6 @@ import (
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
"golang.org/x/net/context"
"google.golang.org/grpc"
)
type (
@ -68,7 +67,7 @@ func (m *maintenance) AlarmList(ctx context.Context) (*AlarmResponse, error) {
Alarm: pb.AlarmType_NONE, // all
}
for {
resp, err := m.remote.Alarm(ctx, req, grpc.FailFast(false))
resp, err := m.remote.Alarm(ctx, req)
if err == nil {
return (*AlarmResponse)(resp), nil
}
@ -101,7 +100,7 @@ func (m *maintenance) AlarmDisarm(ctx context.Context, am *AlarmMember) (*AlarmR
return &ret, nil
}
resp, err := m.remote.Alarm(ctx, req, grpc.FailFast(false))
resp, err := m.remote.Alarm(ctx, req)
if err == nil {
return (*AlarmResponse)(resp), nil
}
@ -115,7 +114,7 @@ func (m *maintenance) Defragment(ctx context.Context, endpoint string) (*Defragm
}
defer conn.Close()
remote := pb.NewMaintenanceClient(conn)
resp, err := remote.Defragment(ctx, &pb.DefragmentRequest{}, grpc.FailFast(false))
resp, err := remote.Defragment(ctx, &pb.DefragmentRequest{})
if err != nil {
return nil, toErr(ctx, err)
}
@ -129,7 +128,7 @@ func (m *maintenance) Status(ctx context.Context, endpoint string) (*StatusRespo
}
defer conn.Close()
remote := pb.NewMaintenanceClient(conn)
resp, err := remote.Status(ctx, &pb.StatusRequest{}, grpc.FailFast(false))
resp, err := remote.Status(ctx, &pb.StatusRequest{})
if err != nil {
return nil, toErr(ctx, err)
}
@ -137,7 +136,7 @@ func (m *maintenance) Status(ctx context.Context, endpoint string) (*StatusRespo
}
func (m *maintenance) Snapshot(ctx context.Context) (io.ReadCloser, error) {
ss, err := m.remote.Snapshot(ctx, &pb.SnapshotRequest{}, grpc.FailFast(false))
ss, err := m.remote.Snapshot(ctx, &pb.SnapshotRequest{})
if err != nil {
return nil, toErr(ctx, err)
}

View File

@ -78,7 +78,7 @@ func (s *syncer) SyncBase(ctx context.Context) (<-chan clientv3.GetResponse, cha
// If len(s.prefix) != 0, we will sync key-value space with given prefix.
// We then range from the prefix to the next prefix if exists. Or we will
// range from the prefix to the end if the next prefix does not exists.
opts = append(opts, clientv3.WithRange(clientv3.GetPrefixRangeEnd(s.prefix)))
opts = append(opts, clientv3.WithPrefix())
key = s.prefix
}

Some files were not shown because too many files have changed in this diff Show More