Compare commits

...

33 Commits

Author SHA1 Message Date
66722b1ada version: bump up to 3.2.0
Signed-off-by: Gyu-Ho Lee <gyuhox@gmail.com>
2017-06-09 10:59:09 -07:00
963339d265 rafthttp: permit very large v2 snapshots
v2 snapshots were hitting the 512MB message decode limit, causing
sending snapshots to new members to fail for being too big.
2017-06-09 10:49:51 -07:00
c87594f27c etcdserver: use same ReadView for read-only txns
A read-only txn isn't serialized by raft, but it uses a fresh
read txn for every mvcc access prior to executing its request ops.
If a write txn modifies the keys matching the read txn's comparisons,
the read txn may return inconsistent results.

To fix, use the same read-only mvcc txn for the duration of the etcd
txn. Probably gets a modest txn speedup as well since there are
fewer read txn allocations.
2017-06-09 09:50:43 -07:00
e72ad5dd2a mvcc: create TxnWrites from TxnRead with NewReadOnlyTxnWrite
Already used internally by mvcc, but needed by etcdserver txns.
2017-06-09 09:50:37 -07:00
3eb5d24cab integration: test txn comparison and concurrent put ordering 2017-06-09 09:50:30 -07:00
8b9041a938 Documentation/op-guide: do not use host network, fix indentation
Signed-off-by: Gyu-Ho Lee <gyuhox@gmail.com>
2017-06-09 09:14:21 -07:00
864ffec88c v2http: put back /v2/machines and mark as non-deprecated
This reverts commit 2bb33181b6. python-etcd
seems to depend on /v2/machines and the maintainer vanished. Plus, it is
prefixed with /v2/ so it probably can't be deprecated anyway.
2017-06-08 12:05:59 -07:00
12bc2bba36 etcdserver: add leaseExpired debugging metrics
Fix https://github.com/coreos/etcd/issues/8050.

Signed-off-by: Gyu-Ho Lee <gyuhox@gmail.com>
2017-06-08 11:23:12 -07:00
3a43afce5a Documentation/op-guide: fix 'grpc_code' field in metrics
Signed-off-by: Gyu-Ho Lee <gyuhox@gmail.com>
2017-06-08 10:16:07 -07:00
0e56ea37e7 fileutil: return immediately if preallocating 0 bytes
fallocate will return EINVAL, causing zeroing to the end of a
0 byte file to fail.

Fixes #8045
2017-06-07 12:59:35 -07:00
743192aa3b *: clear rarer shellcheck errors on scripts
Clean up the tail of the warnings
2017-06-06 10:44:59 -07:00
e8b156578f travis: add shellcheck 2017-06-06 10:44:53 -07:00
61f3338ce7 test: shellcheck 2017-06-06 10:44:46 -07:00
effffdbdca test, osutil: disable setting SIG_DFL on linux if built with cov tag
Was causing etcd to terminate before finishing writing its
coverage profile.
2017-06-06 09:47:22 -07:00
9bac803bee Documentation/op-guide: fix typo in grafana.json
Signed-off-by: Gyu-Ho Lee <gyuhox@gmail.com>
2017-06-06 09:47:15 -07:00
9169ad0d7d *: fix go tool vet -all -shadow errors 2017-06-06 09:47:06 -07:00
482a7839d9 test: speedup and strengthen go vet checking
Was iterating over every file, reloading everything. Instead,
analyze the package directories. On my machine, the time for
vet checking goes from 34s to 3s. Scans more code too.
2017-06-06 09:46:54 -07:00
ba3058ca79 op-guide: document CN certs in security.md 2017-06-06 09:46:47 -07:00
0e90e504f5 scripts, Documentation: fix swagger generation
Changes to the genproto to support splitting out the grpc-gateway broke
swagger generation.
2017-06-02 11:05:21 -07:00
998fa0de76 Documentation, scripts: regen RPC docs
Was missing the new cancel_reason field. Also includes updated protodoc
sha to fix generating documentation for upcoming txn compare range patchset.
2017-06-02 10:27:49 -07:00
c273735729 op-guide: document configuration flags for gateway 2017-06-01 15:59:49 -07:00
c85f736522 mvcc: time restore in restore benchmark
This never worked.
2017-06-01 14:59:31 -07:00
a375ff172e mvcc: chunk reads for restoring
Loading all keys at once would cause etcd to use twice as much
memory than it would need to serve the keys, causing RSS to spike on
boot. Instead, load the keys into the mvcc by chunk. Uses pipelining
for some concurrency.

Fixes #7822
2017-06-01 14:59:27 -07:00
1893af9bbd integration: use unixs:// if client port configured for tls 2017-06-01 09:47:08 -07:00
b4c655677a clientv3: support unixs:// scheme
For using TLS without giving a TLSConfig to the client.
2017-06-01 09:47:03 -07:00
c2160adf1d clientv3/integration: test dialing to TLS without a TLS config times out
etcdctl was getting ctx errors from timing out trying to issue RPCs to
a TLS endpoint but without using TLS for transmission. Client should
immediately bail out with a time out error.
2017-06-01 09:46:57 -07:00
5ada311416 clientv3: use Endpoints[0] to initialize grpc creds
Dialing out without specifying TLS creds but giving https uses some
default behavior that depends on passing an endpoint with https to
Dial(), so it's not enough to completely rely on the balancer to supply
endpoints.

Fixes #8008

Also ctx-izes grpc.Dial
2017-06-01 09:46:48 -07:00
f042cd7d9c vendor: ghodss/yaml v1.0.0 2017-05-30 14:44:30 -07:00
f0a400a3a8 vendor: kr/pty v1.0.0 2017-05-30 14:44:23 -07:00
6066977280 op-guide: update performance.md
It's been a year, time to refresh with 3.2.0 data.
2017-05-30 10:16:19 -07:00
fc88eccc74 vendor: use v0.2.0 of go-semver 2017-05-30 10:15:23 -07:00
5cb28a7d83 Documentation: add 'yaml.NewConfig' change in 3.2
Signed-off-by: Gyu-Ho Lee <gyuhox@gmail.com>
2017-05-30 10:14:55 -07:00
de57e88643 Documentation: add FAQ entry for "database space exceeded" errors
Also moves miscategorized cluster id mismatch entry from "performance"
to "operation".
2017-05-26 09:13:13 -07:00
73 changed files with 949 additions and 492 deletions

View File

@ -41,10 +41,13 @@ matrix:
addons: addons:
apt: apt:
sources:
- debian-sid
packages: packages:
- libpcap-dev - libpcap-dev
- libaspell-dev - libaspell-dev
- libhunspell-dev - libhunspell-dev
- shellcheck
before_install: before_install:
- go get -v -u github.com/chzchzchz/goword - go get -v -u github.com/chzchzchz/goword

View File

@ -790,6 +790,7 @@ From google paxosdb paper: Our implementation hinges around a powerful primitive
| created | created is set to true if the response is for a create watch request. The client should record the watch_id and expect to receive events for the created watcher from the same stream. All events sent to the created watcher will attach with the same watch_id. | bool | | created | created is set to true if the response is for a create watch request. The client should record the watch_id and expect to receive events for the created watcher from the same stream. All events sent to the created watcher will attach with the same watch_id. | bool |
| canceled | canceled is set to true if the response is for a cancel watch request. No further events will be sent to the canceled watcher. | bool | | canceled | canceled is set to true if the response is for a cancel watch request. No further events will be sent to the canceled watcher. | bool |
| compact_revision | compact_revision is set to the minimum index if a watcher tries to watch at a compacted index. This happens when creating a watcher at a compacted revision or the watcher cannot catch up with the progress of the key-value store. The client should treat the watcher as canceled and should not try to create any watcher with the same start_revision again. | int64 | | compact_revision | compact_revision is set to the minimum index if a watcher tries to watch at a compacted index. This happens when creating a watcher at a compacted revision or the watcher cannot catch up with the progress of the key-value store. The client should treat the watcher as canceled and should not try to create any watcher with the same start_revision again. | int64 |
| cancel_reason | cancel_reason indicates the reason for canceling the watcher. | string |
| events | | (slice of) mvccpb.Event | | events | | (slice of) mvccpb.Event |

View File

@ -2179,6 +2179,10 @@
"format": "int64", "format": "int64",
"description": "compact_revision is set to the minimum index if a watcher tries to watch\nat a compacted index.\n\nThis happens when creating a watcher at a compacted revision or the watcher cannot\ncatch up with the progress of the key-value store. \n\nThe client should treat the watcher as canceled and should not try to create any\nwatcher with the same start_revision again." "description": "compact_revision is set to the minimum index if a watcher tries to watch\nat a compacted index.\n\nThis happens when creating a watcher at a compacted revision or the watcher cannot\ncatch up with the progress of the key-value store. \n\nThe client should treat the watcher as canceled and should not try to create any\nwatcher with the same start_revision again."
}, },
"cancel_reason": {
"type": "string",
"description": "cancel_reason indicates the reason for canceling the watcher."
},
"events": { "events": {
"type": "array", "type": "array",
"items": { "items": {

View File

@ -78,10 +78,26 @@ On the other hand, if the downed member is removed from cluster membership first
etcd sets `strict-reconfig-check` in order to reject reconfiguration requests that would cause quorum loss. Abandoning quorum is really risky (especially when the cluster is already unhealthy). Although it may be tempting to disable quorum checking if there's quorum loss to add a new member, this could lead to full fledged cluster inconsistency. For many applications, this will make the problem even worse ("disk geometry corruption" being a candidate for most terrifying). etcd sets `strict-reconfig-check` in order to reject reconfiguration requests that would cause quorum loss. Abandoning quorum is really risky (especially when the cluster is already unhealthy). Although it may be tempting to disable quorum checking if there's quorum loss to add a new member, this could lead to full fledged cluster inconsistency. For many applications, this will make the problem even worse ("disk geometry corruption" being a candidate for most terrifying).
### Why does etcd lose its leader from disk latency spikes? #### Why does etcd lose its leader from disk latency spikes?
This is intentional; disk latency is part of leader liveness. Suppose the cluster leader takes a minute to fsync a raft log update to disk, but the etcd cluster has a one second election timeout. Even though the leader can process network messages within the election interval (e.g., send heartbeats), it's effectively unavailable because it can't commit any new proposals; it's waiting on the slow disk. If the cluster frequently loses its leader due to disk latencies, try [tuning][tuning] the disk settings or etcd time parameters. This is intentional; disk latency is part of leader liveness. Suppose the cluster leader takes a minute to fsync a raft log update to disk, but the etcd cluster has a one second election timeout. Even though the leader can process network messages within the election interval (e.g., send heartbeats), it's effectively unavailable because it can't commit any new proposals; it's waiting on the slow disk. If the cluster frequently loses its leader due to disk latencies, try [tuning][tuning] the disk settings or etcd time parameters.
#### What does the etcd warning "request ignored (cluster ID mismatch)" mean?
Every new etcd cluster generates a new cluster ID based on the initial cluster configuration and a user-provided unique `initial-cluster-token` value. By having unique cluster ID's, etcd is protected from cross-cluster interaction which could corrupt the cluster.
Usually this warning happens after tearing down an old cluster, then reusing some of the peer addresses for the new cluster. If any etcd process from the old cluster is still running it will try to contact the new cluster. The new cluster will recognize a cluster ID mismatch, then ignore the request and emit this warning. This warning is often cleared by ensuring peer addresses among distinct clusters are disjoint.
#### What does "mvcc: database space exceeded" mean and how do I fix it?
The [multi-version concurrency control][api-mvcc] data model in etcd keeps an exact history of the keyspace. Without periodically compacting this history (e.g., by setting `--auto-compaction`), etcd will eventually exhaust its storage space. If etcd runs low on storage space, it raises a space quota alarm to protect the cluster from further writes. So long as the alarm is raised, etcd responds to write requests with the error `mvcc: database space exceeded`.
To recover from the low space quota alarm:
1. [Compact][maintenance-compact] etcd's history.
2. [Defragment][maintenance-defragment] every etcd endpoint.
3. [Disarm][maintenance-disarm] the alarm.
### Performance ### Performance
#### How should I benchmark etcd? #### How should I benchmark etcd?
@ -112,12 +128,6 @@ A slow network can also cause this issue. If network metrics among the etcd mach
If none of the above suggestions clear the warnings, please [open an issue][new_issue] with detailed logging, monitoring, metrics and optionally workload information. If none of the above suggestions clear the warnings, please [open an issue][new_issue] with detailed logging, monitoring, metrics and optionally workload information.
#### What does the etcd warning "request ignored (cluster ID mismatch)" mean?
Every new etcd cluster generates a new cluster ID based on the initial cluster configuration and a user-provided unique `initial-cluster-token` value. By having unique cluster ID's, etcd is protected from cross-cluster interaction which could corrupt the cluster.
Usually this warning happens after tearing down an old cluster, then reusing some of the peer addresses for the new cluster. If any etcd process from the old cluster is still running it will try to contact the new cluster. The new cluster will recognize a cluster ID mismatch, then ignore the request and emit this warning. This warning is often cleared by ensuring peer addresses among distinct clusters are disjoint.
#### What does the etcd warning "snapshotting is taking more than x seconds to finish ..." mean? #### What does the etcd warning "snapshotting is taking more than x seconds to finish ..." mean?
etcd sends a snapshot of its complete key-value store to refresh slow followers and for [backups][backup]. Slow snapshot transfer times increase MTTR; if the cluster is ingesting data with high throughput, slow followers may livelock by needing a new snapshot before finishing receiving a snapshot. To catch slow snapshot performance, etcd warns when sending a snapshot takes more than thirty seconds and exceeds the expected transfer time for a 1Gbps connection. etcd sends a snapshot of its complete key-value store to refresh slow followers and for [backups][backup]. Slow snapshot transfer times increase MTTR; if the cluster is ingesting data with high throughput, slow followers may livelock by needing a new snapshot before finishing receiving a snapshot. To catch slow snapshot performance, etcd warns when sending a snapshot takes more than thirty seconds and exceeds the expected transfer time for a 1Gbps connection.
@ -135,3 +145,7 @@ etcd sends a snapshot of its complete key-value store to refresh slow followers
[runtime reconfiguration]: https://github.com/coreos/etcd/blob/master/Documentation/op-guide/runtime-configuration.md [runtime reconfiguration]: https://github.com/coreos/etcd/blob/master/Documentation/op-guide/runtime-configuration.md
[benchmark]: https://github.com/coreos/etcd/tree/master/tools/benchmark [benchmark]: https://github.com/coreos/etcd/tree/master/tools/benchmark
[benchmark-result]: https://github.com/coreos/etcd/blob/master/Documentation/op-guide/performance.md [benchmark-result]: https://github.com/coreos/etcd/blob/master/Documentation/op-guide/performance.md
[api-mvcc]: learning/api.md#revisions
[maintenance-compact]: op-guide/maintenance.md#history-compaction
[maintenance-defragment]: op-guide/maintenance.md#defragmentation
[maintenance-disarm]: ../etcdctl/README.md#alarm-disarm

View File

@ -79,14 +79,16 @@ export NODE1=192.168.1.21
Run the latest version of etcd: Run the latest version of etcd:
``` ```
docker run --net=host \ docker run \
--volume=${DATA_DIR}:/etcd-data \ -p 2379:2379 \
--name etcd quay.io/coreos/etcd:latest \ -p 2380:2380 \
/usr/local/bin/etcd \ --volume=${DATA_DIR}:/etcd-data \
--data-dir=/etcd-data --name node1 \ --name etcd quay.io/coreos/etcd:latest \
--initial-advertise-peer-urls http://${NODE1}:2380 --listen-peer-urls http://${NODE1}:2380 \ /usr/local/bin/etcd \
--advertise-client-urls http://${NODE1}:2379 --listen-client-urls http://${NODE1}:2379 \ --data-dir=/etcd-data --name node1 \
--initial-cluster node1=http://${NODE1}:2380 --initial-advertise-peer-urls http://${NODE1}:2380 --listen-peer-urls http://${NODE1}:2380 \
--advertise-client-urls http://${NODE1}:2379 --listen-client-urls http://${NODE1}:2379 \
--initial-cluster node1=http://${NODE1}:2380
``` ```
List the cluster member: List the cluster member:
@ -114,41 +116,47 @@ DATA_DIR=/var/lib/etcd
# For node 1 # For node 1
THIS_NAME=${NAME_1} THIS_NAME=${NAME_1}
THIS_IP=${HOST_1} THIS_IP=${HOST_1}
docker run --net=host \ docker run \
--volume=${DATA_DIR}:/etcd-data \ -p 2379:2379 \
--name etcd quay.io/coreos/etcd:${ETCD_VERSION} \ -p 2380:2380 \
/usr/local/bin/etcd \ --volume=${DATA_DIR}:/etcd-data \
--data-dir=/etcd-data --name ${THIS_NAME} \ --name etcd quay.io/coreos/etcd:${ETCD_VERSION} \
--initial-advertise-peer-urls http://${THIS_IP}:2380 --listen-peer-urls http://${THIS_IP}:2380 \ /usr/local/bin/etcd \
--advertise-client-urls http://${THIS_IP}:2379 --listen-client-urls http://${THIS_IP}:2379 \ --data-dir=/etcd-data --name ${THIS_NAME} \
--initial-cluster ${CLUSTER} \ --initial-advertise-peer-urls http://${THIS_IP}:2380 --listen-peer-urls http://${THIS_IP}:2380 \
--initial-cluster-state ${CLUSTER_STATE} --initial-cluster-token ${TOKEN} --advertise-client-urls http://${THIS_IP}:2379 --listen-client-urls http://${THIS_IP}:2379 \
--initial-cluster ${CLUSTER} \
--initial-cluster-state ${CLUSTER_STATE} --initial-cluster-token ${TOKEN}
# For node 2 # For node 2
THIS_NAME=${NAME_2} THIS_NAME=${NAME_2}
THIS_IP=${HOST_2} THIS_IP=${HOST_2}
docker run --net=host \ docker run \
--volume=${DATA_DIR}:/etcd-data \ -p 2379:2379 \
--name etcd quay.io/coreos/etcd:${ETCD_VERSION} \ -p 2380:2380 \
/usr/local/bin/etcd \ --volume=${DATA_DIR}:/etcd-data \
--data-dir=/etcd-data --name ${THIS_NAME} \ --name etcd quay.io/coreos/etcd:${ETCD_VERSION} \
--initial-advertise-peer-urls http://${THIS_IP}:2380 --listen-peer-urls http://${THIS_IP}:2380 \ /usr/local/bin/etcd \
--advertise-client-urls http://${THIS_IP}:2379 --listen-client-urls http://${THIS_IP}:2379 \ --data-dir=/etcd-data --name ${THIS_NAME} \
--initial-cluster ${CLUSTER} \ --initial-advertise-peer-urls http://${THIS_IP}:2380 --listen-peer-urls http://${THIS_IP}:2380 \
--initial-cluster-state ${CLUSTER_STATE} --initial-cluster-token ${TOKEN} --advertise-client-urls http://${THIS_IP}:2379 --listen-client-urls http://${THIS_IP}:2379 \
--initial-cluster ${CLUSTER} \
--initial-cluster-state ${CLUSTER_STATE} --initial-cluster-token ${TOKEN}
# For node 3 # For node 3
THIS_NAME=${NAME_3} THIS_NAME=${NAME_3}
THIS_IP=${HOST_3} THIS_IP=${HOST_3}
docker run --net=host \ docker run \
--volume=${DATA_DIR}:/etcd-data \ -p 2379:2379 \
--name etcd quay.io/coreos/etcd:${ETCD_VERSION} \ -p 2380:2380 \
/usr/local/bin/etcd \ --volume=${DATA_DIR}:/etcd-data \
--data-dir=/etcd-data --name ${THIS_NAME} \ --name etcd quay.io/coreos/etcd:${ETCD_VERSION} \
--initial-advertise-peer-urls http://${THIS_IP}:2380 --listen-peer-urls http://${THIS_IP}:2380 \ /usr/local/bin/etcd \
--advertise-client-urls http://${THIS_IP}:2379 --listen-client-urls http://${THIS_IP}:2379 \ --data-dir=/etcd-data --name ${THIS_NAME} \
--initial-cluster ${CLUSTER} \ --initial-advertise-peer-urls http://${THIS_IP}:2380 --listen-peer-urls http://${THIS_IP}:2380 \
--initial-cluster-state ${CLUSTER_STATE} --initial-cluster-token ${TOKEN} --advertise-client-urls http://${THIS_IP}:2379 --listen-client-urls http://${THIS_IP}:2379 \
--initial-cluster ${CLUSTER} \
--initial-cluster-state ${CLUSTER_STATE} --initial-cluster-token ${TOKEN}
``` ```
To run `etcdctl` using API version 3: To run `etcdctl` using API version 3:
@ -170,17 +178,19 @@ rkt run \
--volume etcd-ssl-certs-bundle,kind=host,source=/etc/ssl/certs/ca-certificates.crt \ --volume etcd-ssl-certs-bundle,kind=host,source=/etc/ssl/certs/ca-certificates.crt \
--mount volume=etcd-ssl-certs-bundle,target=/etc/ssl/certs/ca-certificates.crt \ --mount volume=etcd-ssl-certs-bundle,target=/etc/ssl/certs/ca-certificates.crt \
quay.io/coreos/etcd:latest -- --name my-name \ quay.io/coreos/etcd:latest -- --name my-name \
--initial-advertise-peer-urls http://localhost:2380 --listen-peer-urls http://localhost:2380 \ --initial-advertise-peer-urls http://localhost:2380 --listen-peer-urls http://localhost:2380 \
--advertise-client-urls http://localhost:2379 --listen-client-urls http://localhost:2379 \ --advertise-client-urls http://localhost:2379 --listen-client-urls http://localhost:2379 \
--discovery https://discovery.etcd.io/c11fbcdc16972e45253491a24fcf45e1 --discovery https://discovery.etcd.io/c11fbcdc16972e45253491a24fcf45e1
``` ```
``` ```
docker run \ docker run \
--volume=/etc/ssl/certs/ca-certificates.crt:/etc/ssl/certs/ca-certificates.crt \ -p 2379:2379 \
quay.io/coreos/etcd:latest \ -p 2380:2380 \
/usr/local/bin/etcd --name my-name \ --volume=/etc/ssl/certs/ca-certificates.crt:/etc/ssl/certs/ca-certificates.crt \
--initial-advertise-peer-urls http://localhost:2380 --listen-peer-urls http://localhost:2380 \ quay.io/coreos/etcd:latest \
--advertise-client-urls http://localhost:2379 --listen-client-urls http://localhost:2379 \ /usr/local/bin/etcd --name my-name \
--discovery https://discovery.etcd.io/86a9ff6c8cb8b4c4544c1a2f88f8b801 --initial-advertise-peer-urls http://localhost:2380 --listen-peer-urls http://localhost:2380 \
--advertise-client-urls http://localhost:2379 --listen-client-urls http://localhost:2379 \
--discovery https://discovery.etcd.io/86a9ff6c8cb8b4c4544c1a2f88f8b801
``` ```

View File

@ -10,8 +10,7 @@ The gateway supports multiple etcd server endpoints and works on a simple round-
Every application that accesses etcd must first have the address of an etcd cluster client endpoint. If multiple applications on the same server access the same etcd cluster, every application still needs to know the advertised client endpoints of the etcd cluster. If the etcd cluster is reconfigured to have different endpoints, every application may also need to update its endpoint list. This wide-scale reconfiguration is both tedious and error prone. Every application that accesses etcd must first have the address of an etcd cluster client endpoint. If multiple applications on the same server access the same etcd cluster, every application still needs to know the advertised client endpoints of the etcd cluster. If the etcd cluster is reconfigured to have different endpoints, every application may also need to update its endpoint list. This wide-scale reconfiguration is both tedious and error prone.
etcd gateway solves this problem by serving as a stable local endpoint. A typical etcd gateway configuration has etcd gateway solves this problem by serving as a stable local endpoint. A typical etcd gateway configuration has each machine running a gateway listening on a local address and every etcd application connecting to its local gateway. The upshot is only the gateway needs to update its endpoints instead of updating each and every application.
each machine running a gateway listening on a local address and every etcd application connecting to its local gateway. The upshot is only the gateway needs to update its endpoints instead of updating each and every application.
In summary, to automatically propagate cluster endpoint changes, the etcd gateway runs on every machine serving multiple applications accessing the same etcd cluster. In summary, to automatically propagate cluster endpoint changes, the etcd gateway runs on every machine serving multiple applications accessing the same etcd cluster.
@ -64,3 +63,43 @@ Start the etcd gateway to fetch the endpoints from the DNS SRV entries with the
$ etcd gateway --discovery-srv=example.com $ etcd gateway --discovery-srv=example.com
2016-08-16 11:21:18.867350 I | tcpproxy: ready to proxy client requests to [...] 2016-08-16 11:21:18.867350 I | tcpproxy: ready to proxy client requests to [...]
``` ```
## Configuration flags
### etcd cluster
#### --endpoints
* Comma-separated list of etcd server targets for forwarding client connections.
* Default: `127.0.0.1:2379`
* Invalid example: `https://127.0.0.1:2379` (gateway does not terminate TLS)
#### --discovery-srv
* DNS domain used to bootstrap cluster endpoints through SRV recrods.
* Default: (not set)
### Network
#### --listen-addr
* Interface and port to bind for accepting client requests.
* Default: `127.0.0.1:23790`
#### --retry-delay
* Duration of delay before retrying to connect to failed endpoints.
* Default: 1m0s
* Invalid example: "123" (expects time unit in format)
### Security
#### --insecure-discovery
* Accept SRV records that are insecure or susceptible to man-in-the-middle attacks.
* Default: `false`
#### --trusted-ca-file
* Path to the client TLS CA file for the etcd cluster. Used to authenticate endpoints.
* Default: (not set)

View File

@ -115,7 +115,7 @@
"stack": false, "stack": false,
"steppedLine": false, "steppedLine": false,
"targets": [{ "targets": [{
"expr": "sum(rate({grpc_type=\"unary\",grpc_code!=\"OK\"} [1m]))", "expr": "sum(rate(grpc_server_started_total{grpc_type=\"unary\"} [1m]))",
"intervalFactor": 2, "intervalFactor": 2,
"legendFormat": "{{instance}} RPC Rate", "legendFormat": "{{instance}} RPC Rate",
"metric": "grpc_server_started_total", "metric": "grpc_server_started_total",
@ -123,7 +123,7 @@
"step": 2 "step": 2
}, },
{ {
"expr": "sum(rate(grpc_server_started_total{grpc_type=\"unary\",grpc_code!=\"OK\"} [1m])) - sum(rate(grpc_server_handled_total{grpc_type=\"unary\"} [1m]))", "expr": "sum(rate(grpc_server_started_total{grpc_type=\"unary\"} [1m])) - sum(rate(grpc_server_handled_total{grpc_type=\"unary\",grpc_code!=\"OK\"} [1m]))",
"intervalFactor": 2, "intervalFactor": 2,
"legendFormat": "{{instance}} RPC Failed Rate", "legendFormat": "{{instance}} RPC Failed Rate",
"metric": "grpc_server_handled_total", "metric": "grpc_server_handled_total",
@ -197,7 +197,7 @@
"stack": true, "stack": true,
"steppedLine": false, "steppedLine": false,
"targets": [{ "targets": [{
"expr": "sum(grpc_server_started_total {grpc_service=\"etcdserverpb.Watch\",grpc_type=\"bidi_stream\",grpc_code!=\"OK\"}) - sum(grpc_server_handled_total {grpc_service=\"etcdserverpb.Watch\",grpc_type=\"bidi_stream\"})", "expr": "sum(grpc_server_started_total{grpc_service=\"etcdserverpb.Watch\",grpc_type=\"bidi_stream\"}) - sum(grpc_server_handled_total{grpc_service=\"etcdserverpb.Watch\",grpc_type=\"bidi_stream\"})",
"intervalFactor": 2, "intervalFactor": 2,
"legendFormat": "Watch Streams", "legendFormat": "Watch Streams",
"metric": "grpc_server_handled_total", "metric": "grpc_server_handled_total",
@ -205,7 +205,7 @@
"step": 4 "step": 4
}, },
{ {
"expr": "sum(grpc_server_started_total {grpc_service=\"etcdserverpb.Lease\",grpc_type=\"bidi_stream\"}) - sum(grpc_server_handled_total {grpc_service=\"etcdserverpb.Lease\",grpc_type=\"bidi_stream\"})", "expr": "sum(grpc_server_started_total{grpc_service=\"etcdserverpb.Lease\",grpc_type=\"bidi_stream\"}) - sum(grpc_server_handled_total{grpc_service=\"etcdserverpb.Lease\",grpc_type=\"bidi_stream\"})",
"intervalFactor": 2, "intervalFactor": 2,
"legendFormat": "Lease Streams", "legendFormat": "Lease Streams",
"metric": "grpc_server_handled_total", "metric": "grpc_server_handled_total",

View File

@ -17,58 +17,54 @@ For some baseline performance numbers, we consider a three member etcd cluster w
- Google Cloud Compute Engine - Google Cloud Compute Engine
- 3 machines of 8 vCPUs + 16GB Memory + 50GB SSD - 3 machines of 8 vCPUs + 16GB Memory + 50GB SSD
- 1 machine(client) of 16 vCPUs + 30GB Memory + 50GB SSD - 1 machine(client) of 16 vCPUs + 30GB Memory + 50GB SSD
- Ubuntu 15.10 - Ubuntu 17.04
- etcd v3 master branch (commit SHA d8f325d), Go 1.6.2 - etcd 3.2.0, go 1.8.3
With this configuration, etcd can approximately write: With this configuration, etcd can approximately write:
| Number of keys | Key size in bytes | Value size in bytes | Number of connections | Number of clients | Target etcd server | Average write QPS | Average latency per request | Memory | | Number of keys | Key size in bytes | Value size in bytes | Number of connections | Number of clients | Target etcd server | Average write QPS | Average latency per request | Average server RSS |
|----------------|-------------------|---------------------|-----------------------|-------------------|--------------------|-------------------|-----------------------------|--------| |---------------:|------------------:|--------------------:|----------------------:|------------------:|--------------------|------------------:|----------------------------:|-------------------:|
| 10,000 | 8 | 256 | 1 | 1 | leader only | 525 | 2ms | 35 MB | | 10,000 | 8 | 256 | 1 | 1 | leader only | 583 | 1.6ms | 48 MB |
| 100,000 | 8 | 256 | 100 | 1000 | leader only | 25,000 | 30ms | 35 MB | | 100,000 | 8 | 256 | 100 | 1000 | leader only | 44,341 | 22ms | 124MB |
| 100,000 | 8 | 256 | 100 | 1000 | all members | 33,000 | 25ms | 35 MB | | 100,000 | 8 | 256 | 100 | 1000 | all members | 50,104 | 20ms | 126MB |
Sample commands are: Sample commands are:
``` ```sh
# assuming IP_1 is leader, write requests to the leader # write to leader
benchmark --endpoints={IP_1} --conns=1 --clients=1 \ benchmark --endpoints=${HOST_1} --target-leader --conns=1 --clients=1 \
put --key-size=8 --sequential-keys --total=10000 --val-size=256 put --key-size=8 --sequential-keys --total=10000 --val-size=256
benchmark --endpoints={IP_1} --conns=100 --clients=1000 \ benchmark --endpoints=${HOST_1} --target-leader --conns=100 --clients=1000 \
put --key-size=8 --sequential-keys --total=100000 --val-size=256 put --key-size=8 --sequential-keys --total=100000 --val-size=256
# write to all members # write to all members
benchmark --endpoints={IP_1},{IP_2},{IP_3} --conns=100 --clients=1000 \ benchmark --endpoints=${HOST_1},${HOST_2},${HOST_3} --conns=100 --clients=1000 \
put --key-size=8 --sequential-keys --total=100000 --val-size=256 put --key-size=8 --sequential-keys --total=100000 --val-size=256
``` ```
Linearizable read requests go through a quorum of cluster members for consensus to fetch the most recent data. Serializable read requests are cheaper than linearizable reads since they are served by any single etcd member, instead of a quorum of members, in exchange for possibly serving stale data. etcd can read: Linearizable read requests go through a quorum of cluster members for consensus to fetch the most recent data. Serializable read requests are cheaper than linearizable reads since they are served by any single etcd member, instead of a quorum of members, in exchange for possibly serving stale data. etcd can read:
| Number of requests | Key size in bytes | Value size in bytes | Number of connections | Number of clients | Consistency | Average latency per request | Average read QPS | | Number of requests | Key size in bytes | Value size in bytes | Number of connections | Number of clients | Consistency | Average read QPS | Average latency per request |
|--------------------|-------------------|---------------------|-----------------------|-------------------|-------------|-----------------------------|------------------| |-------------------:|------------------:|--------------------:|----------------------:|------------------:|-------------|-----------------:|----------------------------:|
| 10,000 | 8 | 256 | 1 | 1 | Linearizable | 2ms | 560 | | 10,000 | 8 | 256 | 1 | 1 | Linearizable | 1,353 | 0.7ms |
| 10,000 | 8 | 256 | 1 | 1 | Serializable | 0.4ms | 7,500 | | 10,000 | 8 | 256 | 1 | 1 | Serializable | 2,909 | 0.3ms |
| 100,000 | 8 | 256 | 100 | 1000 | Linearizable | 15ms | 43,000 | | 100,000 | 8 | 256 | 100 | 1000 | Linearizable | 141,578 | 5.5ms |
| 100,000 | 8 | 256 | 100 | 1000 | Serializable | 9ms | 93,000 | | 100,000 | 8 | 256 | 100 | 1000 | Serializable | 185,758 | 2.2ms |
Sample commands are: Sample commands are:
``` ```sh
# Linearizable read requests # Single connection read requests
benchmark --endpoints={IP_1},{IP_2},{IP_3} --conns=1 --clients=1 \ benchmark --endpoints=${HOST_1},${HOST_2},${HOST_3} --conns=1 --clients=1 \
range YOUR_KEY --consistency=l --total=10000 range YOUR_KEY --consistency=l --total=10000
benchmark --endpoints={IP_1},{IP_2},{IP_3} --conns=100 --clients=1000 \ benchmark --endpoints=${HOST_1},${HOST_2},${HOST_3} --conns=1 --clients=1 \
range YOUR_KEY --consistency=l --total=100000 range YOUR_KEY --consistency=s --total=10000
# Serializable read requests for each member and sum up the numbers # Many concurrent read requests
for endpoint in {IP_1} {IP_2} {IP_3}; do benchmark --endpoints=${HOST_1},${HOST_2},${HOST_3} --conns=100 --clients=1000 \
benchmark --endpoints=$endpoint --conns=1 --clients=1 \ range YOUR_KEY --consistency=l --total=100000
range YOUR_KEY --consistency=s --total=10000 benchmark --endpoints=${HOST_1},${HOST_2},${HOST_3} --conns=100 --clients=1000 \
done range YOUR_KEY --consistency=s --total=100000
for endpoint in {IP_1} {IP_2} {IP_3}; do
benchmark --endpoints=$endpoint --conns=100 --clients=1000 \
range YOUR_KEY --consistency=s --total=100000
done
``` ```
We encourage running the benchmark test when setting up an etcd cluster for the first time in a new environment to ensure the cluster achieves adequate performance; cluster latency and throughput can be sensitive to minor environment differences. We encourage running the benchmark test when setting up an etcd cluster for the first time in a new environment to ensure the cluster achieves adequate performance; cluster latency and throughput can be sensitive to minor environment differences.

View File

@ -16,7 +16,7 @@ etcd takes several certificate related configuration options, either through com
`--key-file=<path>`: Key for the certificate. Must be unencrypted. `--key-file=<path>`: Key for the certificate. Must be unencrypted.
`--client-cert-auth`: When this is set etcd will check all incoming HTTPS requests for a client certificate signed by the trusted CA, requests that don't supply a valid client certificate will fail. `--client-cert-auth`: When this is set etcd will check all incoming HTTPS requests for a client certificate signed by the trusted CA, requests that don't supply a valid client certificate will fail. If [authentication][auth] is enabled, the certificate provides credentials for the user name given by the Common Name field.
`--trusted-ca-file=<path>`: Trusted certificate authority. `--trusted-ca-file=<path>`: Trusted certificate authority.
@ -222,3 +222,4 @@ The certificate needs to be signed for the member's FQDN in its Subject Name, us
[tls-setup]: ../../hack/tls-setup [tls-setup]: ../../hack/tls-setup
[tls-guide]: https://github.com/coreos/docs/blob/master/os/generate-self-signed-certificates.md [tls-guide]: https://github.com/coreos/docs/blob/master/os/generate-self-signed-certificates.md
[alt-name]: http://wiki.cacert.org/FAQ/subjectAltName [alt-name]: http://wiki.cacert.org/FAQ/subjectAltName
[auth]: authentication.md

View File

@ -30,6 +30,22 @@ resp.TTL == -1
err == nil err == nil
``` ```
`clientv3.NewFromConfigFile` is moved to `yaml.NewConfig`.
Before
```go
import "github.com/coreos/etcd/clientv3"
clientv3.NewFromConfigFile
```
After
```go
import clientv3yaml "github.com/coreos/etcd/clientv3/yaml"
clientv3yaml.NewConfig
```
### Server upgrade checklists ### Server upgrade checklists
#### Upgrade requirements #### Upgrade requirements

View File

@ -16,7 +16,7 @@ etcd takes several certificate related configuration options, either through com
`--key-file=<path>`: Key for the certificate. Must be unencrypted. `--key-file=<path>`: Key for the certificate. Must be unencrypted.
`--client-cert-auth`: When this is set etcd will check all incoming HTTPS requests for a client certificate signed by the trusted CA, requests that don't supply a valid client certificate will fail. `--client-cert-auth`: When this is set etcd will check all incoming HTTPS requests for a client certificate signed by the trusted CA, requests that don't supply a valid client certificate will fail. If [authentication][auth] is enabled, the certificate provides credentials for the user name given by the Common Name field.
`--trusted-ca-file=<path>`: Trusted certificate authority. `--trusted-ca-file=<path>`: Trusted certificate authority.
@ -191,3 +191,4 @@ If you need your certificate to be signed for your member's FQDN in its Subject
[tls-setup]: ../../hack/tls-setup [tls-setup]: ../../hack/tls-setup
[tls-guide]: https://github.com/coreos/docs/blob/master/os/generate-self-signed-certificates.md [tls-guide]: https://github.com/coreos/docs/blob/master/os/generate-self-signed-certificates.md
[alt-name]: http://wiki.cacert.org/FAQ/subjectAltName [alt-name]: http://wiki.cacert.org/FAQ/subjectAltName
[auth]: authentication.md

View File

@ -144,11 +144,6 @@
"license": "BSD 2-clause \"Simplified\" License", "license": "BSD 2-clause \"Simplified\" License",
"confidence": 0.963 "confidence": 0.963
}, },
{
"project": "github.com/shurcooL/sanitized_anchor_name",
"license": "MIT License",
"confidence": 1
},
{ {
"project": "github.com/spf13/cobra", "project": "github.com/spf13/cobra",
"license": "Apache License 2.0", "license": "Apache License 2.0",

23
build
View File

@ -3,9 +3,7 @@
# set some environment variables # set some environment variables
ORG_PATH="github.com/coreos" ORG_PATH="github.com/coreos"
REPO_PATH="${ORG_PATH}/etcd" REPO_PATH="${ORG_PATH}/etcd"
export GO15VENDOREXPERIMENT="1"
eval $(go env)
GIT_SHA=`git rev-parse --short HEAD || echo "GitNotFound"` GIT_SHA=`git rev-parse --short HEAD || echo "GitNotFound"`
if [ ! -z "$FAILPOINTS" ]; then if [ ! -z "$FAILPOINTS" ]; then
GIT_SHA="$GIT_SHA"-FAILPOINTS GIT_SHA="$GIT_SHA"-FAILPOINTS
@ -17,11 +15,7 @@ GO_LDFLAGS="$GO_LDFLAGS -X ${REPO_PATH}/cmd/vendor/${REPO_PATH}/version.GitSHA=$
# enable/disable failpoints # enable/disable failpoints
toggle_failpoints() { toggle_failpoints() {
FAILPKGS="etcdserver/ mvcc/backend/" FAILPKGS="etcdserver/ mvcc/backend/"
mode="$1"
mode="disable"
if [ ! -z "$FAILPOINTS" ]; then mode="enable"; fi
if [ ! -z "$1" ]; then mode="$1"; fi
if which gofail >/dev/null 2>&1; then if which gofail >/dev/null 2>&1; then
gofail "$mode" $FAILPKGS gofail "$mode" $FAILPKGS
elif [ "$mode" != "disable" ]; then elif [ "$mode" != "disable" ]; then
@ -30,19 +24,26 @@ toggle_failpoints() {
fi fi
} }
toggle_failpoints_default() {
mode="disable"
if [ ! -z "$FAILPOINTS" ]; then mode="enable"; fi
toggle_failpoints "$mode"
}
etcd_build() { etcd_build() {
out="bin" out="bin"
if [ -n "${BINDIR}" ]; then out="${BINDIR}"; fi if [ -n "${BINDIR}" ]; then out="${BINDIR}"; fi
toggle_failpoints toggle_failpoints_default
# Static compilation is useful when etcd is run in a container # Static compilation is useful when etcd is run in a container
CGO_ENABLED=0 go build $GO_BUILD_FLAGS -installsuffix cgo -ldflags "$GO_LDFLAGS" -o ${out}/etcd ${REPO_PATH}/cmd/etcd || return CGO_ENABLED=0 go build $GO_BUILD_FLAGS -installsuffix cgo -ldflags "$GO_LDFLAGS" -o ${out}/etcd ${REPO_PATH}/cmd/etcd || return
CGO_ENABLED=0 go build $GO_BUILD_FLAGS -installsuffix cgo -ldflags "$GO_LDFLAGS" -o ${out}/etcdctl ${REPO_PATH}/cmd/etcdctl || return CGO_ENABLED=0 go build $GO_BUILD_FLAGS -installsuffix cgo -ldflags "$GO_LDFLAGS" -o ${out}/etcdctl ${REPO_PATH}/cmd/etcdctl || return
} }
etcd_setup_gopath() { etcd_setup_gopath() {
CDIR=$(cd `dirname "$0"` && pwd) d=$(dirname "$0")
CDIR=$(cd "$d" && pwd)
cd "$CDIR" cd "$CDIR"
etcdGOPATH=${CDIR}/gopath etcdGOPATH="${CDIR}/gopath"
# preserve old gopath to support building with unvendored tooling deps (e.g., gofail) # preserve old gopath to support building with unvendored tooling deps (e.g., gofail)
if [ -n "$GOPATH" ]; then if [ -n "$GOPATH" ]; then
GOPATH=":$GOPATH" GOPATH=":$GOPATH"
@ -53,7 +54,7 @@ etcd_setup_gopath() {
ln -s ${CDIR}/cmd/vendor ${etcdGOPATH}/src ln -s ${CDIR}/cmd/vendor ${etcdGOPATH}/src
} }
toggle_failpoints toggle_failpoints_default
# only build when called directly, not sourced # only build when called directly, not sourced
if echo "$0" | grep "build$" >/dev/null; then if echo "$0" | grep "build$" >/dev/null; then

View File

@ -182,7 +182,7 @@ func parseEndpoint(endpoint string) (proto string, host string, scheme string) {
host = url.Host host = url.Host
switch url.Scheme { switch url.Scheme {
case "http", "https": case "http", "https":
case "unix": case "unix", "unixs":
proto = "unix" proto = "unix"
host = url.Host + url.Path host = url.Host + url.Path
default: default:
@ -197,7 +197,7 @@ func (c *Client) processCreds(scheme string) (creds *credentials.TransportCreden
case "unix": case "unix":
case "http": case "http":
creds = nil creds = nil
case "https": case "https", "unixs":
if creds != nil { if creds != nil {
break break
} }
@ -322,7 +322,7 @@ func (c *Client) dial(endpoint string, dopts ...grpc.DialOption) (*grpc.ClientCo
opts = append(opts, c.cfg.DialOptions...) opts = append(opts, c.cfg.DialOptions...)
conn, err := grpc.Dial(host, opts...) conn, err := grpc.DialContext(c.ctx, host, opts...)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -367,7 +367,9 @@ func newClient(cfg *Config) (*Client, error) {
} }
client.balancer = newSimpleBalancer(cfg.Endpoints) client.balancer = newSimpleBalancer(cfg.Endpoints)
conn, err := client.dial("", grpc.WithBalancer(client.balancer)) // use Endpoints[0] so that for https:// without any tls config given, then
// grpc will assume the ServerName is in the endpoint.
conn, err := client.dial(cfg.Endpoints[0], grpc.WithBalancer(client.balancer))
if err != nil { if err != nil {
client.cancel() client.cancel()
client.balancer.Close() client.balancer.Close()

View File

@ -30,7 +30,7 @@ import (
"google.golang.org/grpc" "google.golang.org/grpc"
) )
func ExampleMetrics_range() { func ExampleClient_metrics() {
cli, err := clientv3.New(clientv3.Config{ cli, err := clientv3.New(clientv3.Config{
Endpoints: endpoints, Endpoints: endpoints,
DialOptions: []grpc.DialOption{ DialOptions: []grpc.DialOption{

View File

@ -66,6 +66,22 @@ func TestDialTLSExpired(t *testing.T) {
} }
} }
// TestDialTLSNoConfig ensures the client fails to dial / times out
// when TLS endpoints (https, unixs) are given but no tls config.
func TestDialTLSNoConfig(t *testing.T) {
defer testutil.AfterTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, ClientTLS: &testTLSInfo})
defer clus.Terminate(t)
// expect 'signed by unknown authority'
_, err := clientv3.New(clientv3.Config{
Endpoints: []string{clus.Members[0].GRPCAddr()},
DialTimeout: time.Second,
})
if err != grpc.ErrClientConnTimeout {
t.Fatalf("expected %v, got %v", grpc.ErrClientConnTimeout, err)
}
}
// TestDialSetEndpoints ensures SetEndpoints can replace unavailable endpoints with available ones. // TestDialSetEndpoints ensures SetEndpoints can replace unavailable endpoints with available ones.
func TestDialSetEndpointsBeforeFail(t *testing.T) { func TestDialSetEndpointsBeforeFail(t *testing.T) {
testDialSetEndpoints(t, true) testDialSetEndpoints(t, true)

View File

@ -1,3 +1,18 @@
// Copyright 2013-2015 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Semantic Versions http://semver.org
package semver package semver
import ( import (
@ -29,35 +44,21 @@ func splitOff(input *string, delim string) (val string) {
return val return val
} }
func New(version string) *Version {
return Must(NewVersion(version))
}
func NewVersion(version string) (*Version, error) { func NewVersion(version string) (*Version, error) {
v := Version{} v := Version{}
dotParts := strings.SplitN(version, ".", 3) if err := v.Set(version); err != nil {
return nil, err
if len(dotParts) != 3 {
return nil, errors.New(fmt.Sprintf("%s is not in dotted-tri format", version))
} }
v.Metadata = splitOff(&dotParts[2], "+")
v.PreRelease = PreRelease(splitOff(&dotParts[2], "-"))
parsed := make([]int64, 3, 3)
for i, v := range dotParts[:3] {
val, err := strconv.ParseInt(v, 10, 64)
parsed[i] = val
if err != nil {
return nil, err
}
}
v.Major = parsed[0]
v.Minor = parsed[1]
v.Patch = parsed[2]
return &v, nil return &v, nil
} }
// Must is a helper for wrapping NewVersion and will panic if err is not nil.
func Must(v *Version, err error) *Version { func Must(v *Version, err error) *Version {
if err != nil { if err != nil {
panic(err) panic(err)
@ -65,45 +66,99 @@ func Must(v *Version, err error) *Version {
return v return v
} }
func (v *Version) String() string { // Set parses and updates v from the given version string. Implements flag.Value
func (v *Version) Set(version string) error {
metadata := splitOff(&version, "+")
preRelease := PreRelease(splitOff(&version, "-"))
dotParts := strings.SplitN(version, ".", 3)
if len(dotParts) != 3 {
return fmt.Errorf("%s is not in dotted-tri format", version)
}
parsed := make([]int64, 3, 3)
for i, v := range dotParts[:3] {
val, err := strconv.ParseInt(v, 10, 64)
parsed[i] = val
if err != nil {
return err
}
}
v.Metadata = metadata
v.PreRelease = preRelease
v.Major = parsed[0]
v.Minor = parsed[1]
v.Patch = parsed[2]
return nil
}
func (v Version) String() string {
var buffer bytes.Buffer var buffer bytes.Buffer
base := fmt.Sprintf("%d.%d.%d", v.Major, v.Minor, v.Patch) fmt.Fprintf(&buffer, "%d.%d.%d", v.Major, v.Minor, v.Patch)
buffer.WriteString(base)
if v.PreRelease != "" { if v.PreRelease != "" {
buffer.WriteString(fmt.Sprintf("-%s", v.PreRelease)) fmt.Fprintf(&buffer, "-%s", v.PreRelease)
} }
if v.Metadata != "" { if v.Metadata != "" {
buffer.WriteString(fmt.Sprintf("+%s", v.Metadata)) fmt.Fprintf(&buffer, "+%s", v.Metadata)
} }
return buffer.String() return buffer.String()
} }
func (v *Version) LessThan(versionB Version) bool { func (v *Version) UnmarshalYAML(unmarshal func(interface{}) error) error {
versionA := *v var data string
cmp := recursiveCompare(versionA.Slice(), versionB.Slice()) if err := unmarshal(&data); err != nil {
return err
if cmp == 0 {
cmp = preReleaseCompare(versionA, versionB)
} }
return v.Set(data)
if cmp == -1 {
return true
}
return false
} }
/* Slice converts the comparable parts of the semver into a slice of strings */ func (v Version) MarshalJSON() ([]byte, error) {
func (v *Version) Slice() []int64 { return []byte(`"` + v.String() + `"`), nil
}
func (v *Version) UnmarshalJSON(data []byte) error {
l := len(data)
if l == 0 || string(data) == `""` {
return nil
}
if l < 2 || data[0] != '"' || data[l-1] != '"' {
return errors.New("invalid semver string")
}
return v.Set(string(data[1 : l-1]))
}
// Compare tests if v is less than, equal to, or greater than versionB,
// returning -1, 0, or +1 respectively.
func (v Version) Compare(versionB Version) int {
if cmp := recursiveCompare(v.Slice(), versionB.Slice()); cmp != 0 {
return cmp
}
return preReleaseCompare(v, versionB)
}
// Equal tests if v is equal to versionB.
func (v Version) Equal(versionB Version) bool {
return v.Compare(versionB) == 0
}
// LessThan tests if v is less than versionB.
func (v Version) LessThan(versionB Version) bool {
return v.Compare(versionB) < 0
}
// Slice converts the comparable parts of the semver into a slice of integers.
func (v Version) Slice() []int64 {
return []int64{v.Major, v.Minor, v.Patch} return []int64{v.Major, v.Minor, v.Patch}
} }
func (p *PreRelease) Slice() []string { func (p PreRelease) Slice() []string {
preRelease := string(*p) preRelease := string(p)
return strings.Split(preRelease, ".") return strings.Split(preRelease, ".")
} }
@ -119,7 +174,7 @@ func preReleaseCompare(versionA Version, versionB Version) int {
return -1 return -1
} }
// If there is a prelease, check and compare each part. // If there is a prerelease, check and compare each part.
return recursivePreReleaseCompare(a.Slice(), b.Slice()) return recursivePreReleaseCompare(a.Slice(), b.Slice())
} }
@ -141,9 +196,12 @@ func recursiveCompare(versionA []int64, versionB []int64) int {
} }
func recursivePreReleaseCompare(versionA []string, versionB []string) int { func recursivePreReleaseCompare(versionA []string, versionB []string) int {
// Handle slice length disparity. // A larger set of pre-release fields has a higher precedence than a smaller set,
// if all of the preceding identifiers are equal.
if len(versionA) == 0 { if len(versionA) == 0 {
// Nothing to compare too, so we return 0 if len(versionB) > 0 {
return -1
}
return 0 return 0
} else if len(versionB) == 0 { } else if len(versionB) == 0 {
// We're longer than versionB so return 1. // We're longer than versionB so return 1.
@ -153,7 +211,8 @@ func recursivePreReleaseCompare(versionA []string, versionB []string) int {
a := versionA[0] a := versionA[0]
b := versionB[0] b := versionB[0]
aInt := false; bInt := false aInt := false
bInt := false
aI, err := strconv.Atoi(versionA[0]) aI, err := strconv.Atoi(versionA[0])
if err == nil { if err == nil {

View File

@ -1,3 +1,17 @@
// Copyright 2013-2015 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package semver package semver
import ( import (

View File

@ -45,7 +45,11 @@ func indirect(v reflect.Value, decodingNull bool) (json.Unmarshaler, encoding.Te
break break
} }
if v.IsNil() { if v.IsNil() {
v.Set(reflect.New(v.Type().Elem())) if v.CanSet() {
v.Set(reflect.New(v.Type().Elem()))
} else {
v = reflect.New(v.Type().Elem())
}
} }
if v.Type().NumMethod() > 0 { if v.Type().NumMethod() > 0 {
if u, ok := v.Interface().(json.Unmarshaler); ok { if u, ok := v.Interface().(json.Unmarshaler); ok {

View File

@ -15,12 +15,12 @@ import (
func Marshal(o interface{}) ([]byte, error) { func Marshal(o interface{}) ([]byte, error) {
j, err := json.Marshal(o) j, err := json.Marshal(o)
if err != nil { if err != nil {
return nil, fmt.Errorf("error marshaling into JSON: ", err) return nil, fmt.Errorf("error marshaling into JSON: %v", err)
} }
y, err := JSONToYAML(j) y, err := JSONToYAML(j)
if err != nil { if err != nil {
return nil, fmt.Errorf("error converting JSON to YAML: ", err) return nil, fmt.Errorf("error converting JSON to YAML: %v", err)
} }
return y, nil return y, nil
@ -48,7 +48,7 @@ func JSONToYAML(j []byte) ([]byte, error) {
var jsonObj interface{} var jsonObj interface{}
// We are using yaml.Unmarshal here (instead of json.Unmarshal) because the // We are using yaml.Unmarshal here (instead of json.Unmarshal) because the
// Go JSON library doesn't try to pick the right number type (int, float, // Go JSON library doesn't try to pick the right number type (int, float,
// etc.) when unmarshling to interface{}, it just picks float64 // etc.) when unmarshalling to interface{}, it just picks float64
// universally. go-yaml does go through the effort of picking the right // universally. go-yaml does go through the effort of picking the right
// number type, so we can preserve number type throughout this process. // number type, so we can preserve number type throughout this process.
err := yaml.Unmarshal(j, &jsonObj) err := yaml.Unmarshal(j, &jsonObj)

View File

@ -1,3 +1,5 @@
// +build !windows
package pty package pty
import "syscall" import "syscall"

76
cmd/vendor/github.com/kr/pty/pty_dragonfly.go generated vendored Normal file
View File

@ -0,0 +1,76 @@
package pty
import (
"errors"
"os"
"strings"
"syscall"
"unsafe"
)
// same code as pty_darwin.go
func open() (pty, tty *os.File, err error) {
p, err := os.OpenFile("/dev/ptmx", os.O_RDWR, 0)
if err != nil {
return nil, nil, err
}
sname, err := ptsname(p)
if err != nil {
return nil, nil, err
}
err = grantpt(p)
if err != nil {
return nil, nil, err
}
err = unlockpt(p)
if err != nil {
return nil, nil, err
}
t, err := os.OpenFile(sname, os.O_RDWR, 0)
if err != nil {
return nil, nil, err
}
return p, t, nil
}
func grantpt(f *os.File) error {
_, err := isptmaster(f.Fd())
return err
}
func unlockpt(f *os.File) error {
_, err := isptmaster(f.Fd())
return err
}
func isptmaster(fd uintptr) (bool, error) {
err := ioctl(fd, syscall.TIOCISPTMASTER, 0)
return err == nil, err
}
var (
emptyFiodgnameArg fiodgnameArg
ioctl_FIODNAME = _IOW('f', 120, unsafe.Sizeof(emptyFiodgnameArg))
)
func ptsname(f *os.File) (string, error) {
name := make([]byte, _C_SPECNAMELEN)
fa := fiodgnameArg{Name: (*byte)(unsafe.Pointer(&name[0])), Len: _C_SPECNAMELEN, Pad_cgo_0: [4]byte{0, 0, 0, 0}}
err := ioctl(f.Fd(), ioctl_FIODNAME, uintptr(unsafe.Pointer(&fa)))
if err != nil {
return "", err
}
for i, c := range name {
if c == 0 {
s := "/dev/" + string(name[:i])
return strings.Replace(s, "ptm", "pts", -1), nil
}
}
return "", errors.New("TIOCPTYGNAME string not NUL-terminated")
}

View File

@ -1,4 +1,4 @@
// +build !linux,!darwin,!freebsd // +build !linux,!darwin,!freebsd,!dragonfly
package pty package pty

View File

@ -1,3 +1,5 @@
// +build !windows
package pty package pty
import ( import (

17
cmd/vendor/github.com/kr/pty/types_dragonfly.go generated vendored Normal file
View File

@ -0,0 +1,17 @@
// +build ignore
package pty
/*
#define _KERNEL
#include <sys/conf.h>
#include <sys/param.h>
#include <sys/filio.h>
*/
import "C"
const (
_C_SPECNAMELEN = C.SPECNAMELEN /* max length of devicename */
)
type fiodgnameArg C.struct_fiodname_args

View File

@ -1,3 +1,5 @@
// +build !windows
package pty package pty
import ( import (

14
cmd/vendor/github.com/kr/pty/ztypes_dragonfly_amd64.go generated vendored Normal file
View File

@ -0,0 +1,14 @@
// Created by cgo -godefs - DO NOT EDIT
// cgo -godefs types_dragonfly.go
package pty
const (
_C_SPECNAMELEN = 0x3f
)
type fiodgnameArg struct {
Name *byte
Len uint32
Pad_cgo_0 [4]byte
}

12
cmd/vendor/github.com/kr/pty/ztypes_mipsx.go generated vendored Normal file
View File

@ -0,0 +1,12 @@
// Created by cgo -godefs - DO NOT EDIT
// cgo -godefs types.go
// +build linux
// +build mips mipsle mips64 mips64le
package pty
type (
_C_int int32
_C_uint uint32
)

View File

@ -15,8 +15,7 @@ package blackfriday
import ( import (
"bytes" "bytes"
"unicode"
"github.com/shurcooL/sanitized_anchor_name"
) )
// Parse block-level data. // Parse block-level data.
@ -243,7 +242,7 @@ func (p *parser) prefixHeader(out *bytes.Buffer, data []byte) int {
} }
if end > i { if end > i {
if id == "" && p.flags&EXTENSION_AUTO_HEADER_IDS != 0 { if id == "" && p.flags&EXTENSION_AUTO_HEADER_IDS != 0 {
id = sanitized_anchor_name.Create(string(data[i:end])) id = SanitizedAnchorName(string(data[i:end]))
} }
work := func() bool { work := func() bool {
p.inline(out, data[i:end]) p.inline(out, data[i:end])
@ -1364,7 +1363,7 @@ func (p *parser) paragraph(out *bytes.Buffer, data []byte) int {
id := "" id := ""
if p.flags&EXTENSION_AUTO_HEADER_IDS != 0 { if p.flags&EXTENSION_AUTO_HEADER_IDS != 0 {
id = sanitized_anchor_name.Create(string(data[prev:eol])) id = SanitizedAnchorName(string(data[prev:eol]))
} }
p.r.Header(out, work, level, id) p.r.Header(out, work, level, id)
@ -1428,3 +1427,24 @@ func (p *parser) paragraph(out *bytes.Buffer, data []byte) int {
p.renderParagraph(out, data[:i]) p.renderParagraph(out, data[:i])
return i return i
} }
// SanitizedAnchorName returns a sanitized anchor name for the given text.
//
// It implements the algorithm specified in the package comment.
func SanitizedAnchorName(text string) string {
var anchorName []rune
futureDash := false
for _, r := range text {
switch {
case unicode.IsLetter(r) || unicode.IsNumber(r):
if futureDash && len(anchorName) > 0 {
anchorName = append(anchorName, '-')
}
futureDash = false
anchorName = append(anchorName, unicode.ToLower(r))
default:
futureDash = true
}
}
return string(anchorName)
}

32
cmd/vendor/github.com/russross/blackfriday/doc.go generated vendored Normal file
View File

@ -0,0 +1,32 @@
// Package blackfriday is a Markdown processor.
//
// It translates plain text with simple formatting rules into HTML or LaTeX.
//
// Sanitized Anchor Names
//
// Blackfriday includes an algorithm for creating sanitized anchor names
// corresponding to a given input text. This algorithm is used to create
// anchors for headings when EXTENSION_AUTO_HEADER_IDS is enabled. The
// algorithm is specified below, so that other packages can create
// compatible anchor names and links to those anchors.
//
// The algorithm iterates over the input text, interpreted as UTF-8,
// one Unicode code point (rune) at a time. All runes that are letters (category L)
// or numbers (category N) are considered valid characters. They are mapped to
// lower case, and included in the output. All other runes are considered
// invalid characters. Invalid characters that preceed the first valid character,
// as well as invalid character that follow the last valid character
// are dropped completely. All other sequences of invalid characters
// between two valid characters are replaced with a single dash character '-'.
//
// SanitizedAnchorName exposes this functionality, and can be used to
// create compatible links to the anchor names generated by blackfriday.
// This algorithm is also implemented in a small standalone package at
// github.com/shurcooL/sanitized_anchor_name. It can be useful for clients
// that want a small package and don't need full functionality of blackfriday.
package blackfriday
// NOTE: Keep Sanitized Anchor Name algorithm in sync with package
// github.com/shurcooL/sanitized_anchor_name.
// Otherwise, users of sanitized_anchor_name will get anchor names
// that are incompatible with those generated by blackfriday.

View File

@ -13,9 +13,6 @@
// //
// //
// Blackfriday markdown processor.
//
// Translates plain text with simple formatting rules into HTML or LaTeX.
package blackfriday package blackfriday
import ( import (

View File

@ -1,21 +0,0 @@
MIT License
Copyright (c) 2015 Dmitri Shuralyov
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@ -1,29 +0,0 @@
// Package sanitized_anchor_name provides a func to create sanitized anchor names.
//
// Its logic can be reused by multiple packages to create interoperable anchor names
// and links to those anchors.
//
// At this time, it does not try to ensure that generated anchor names
// are unique, that responsibility falls on the caller.
package sanitized_anchor_name // import "github.com/shurcooL/sanitized_anchor_name"
import "unicode"
// Create returns a sanitized anchor name for the given text.
func Create(text string) string {
var anchorName []rune
var futureDash = false
for _, r := range []rune(text) {
switch {
case unicode.IsLetter(r) || unicode.IsNumber(r):
if futureDash && len(anchorName) > 0 {
anchorName = append(anchorName, '-')
}
futureDash = false
anchorName = append(anchorName, unicode.ToLower(r))
default:
futureDash = true
}
}
return string(anchorName)
}

View File

@ -90,16 +90,20 @@ func (in *input) charinfoNFKC(p int) (uint16, int) {
} }
func (in *input) hangul(p int) (r rune) { func (in *input) hangul(p int) (r rune) {
var size int
if in.bytes == nil { if in.bytes == nil {
if !isHangulString(in.str[p:]) { if !isHangulString(in.str[p:]) {
return 0 return 0
} }
r, _ = utf8.DecodeRuneInString(in.str[p:]) r, size = utf8.DecodeRuneInString(in.str[p:])
} else { } else {
if !isHangul(in.bytes[p:]) { if !isHangul(in.bytes[p:]) {
return 0 return 0
} }
r, _ = utf8.DecodeRune(in.bytes[p:]) r, size = utf8.DecodeRune(in.bytes[p:])
}
if size != hangulUTF8Size {
return 0
} }
return r return r
} }

View File

@ -288,14 +288,11 @@ func (rc *raftNode) startRaft() {
rc.node = raft.StartNode(c, startPeers) rc.node = raft.StartNode(c, startPeers)
} }
ss := &stats.ServerStats{}
ss.Initialize()
rc.transport = &rafthttp.Transport{ rc.transport = &rafthttp.Transport{
ID: types.ID(rc.id), ID: types.ID(rc.id),
ClusterID: 0x1000, ClusterID: 0x1000,
Raft: rc, Raft: rc,
ServerStats: ss, ServerStats: stats.NewServerStats("", ""),
LeaderStats: stats.NewLeaderStats(strconv.Itoa(rc.id)), LeaderStats: stats.NewLeaderStats(strconv.Itoa(rc.id)),
ErrorC: make(chan error), ErrorC: make(chan error),
} }

View File

@ -80,7 +80,7 @@ func testElect(cx ctlCtx) {
if err = blocked.Signal(os.Interrupt); err != nil { if err = blocked.Signal(os.Interrupt); err != nil {
cx.t.Fatal(err) cx.t.Fatal(err)
} }
if err := closeWithTimeout(blocked, time.Second); err != nil { if err = closeWithTimeout(blocked, time.Second); err != nil {
cx.t.Fatal(err) cx.t.Fatal(err)
} }

View File

@ -150,8 +150,8 @@ func newCheckPerfCommand(cmd *cobra.Command, args []string) {
} }
go func() { go func() {
cctx, _ := context.WithTimeout(context.Background(), time.Duration(cfg.duration)*time.Second) cctx, ccancel := context.WithTimeout(context.Background(), time.Duration(cfg.duration)*time.Second)
defer ccancel()
for limit.Wait(cctx) == nil { for limit.Wait(cctx) == nil {
binary.PutVarint(k, int64(rand.Int63n(math.MaxInt64))) binary.PutVarint(k, int64(rand.Int63n(math.MaxInt64)))
requests <- v3.OpPut(checkPerfPrefix+string(k), v) requests <- v3.OpPut(checkPerfPrefix+string(k), v)

View File

@ -46,15 +46,16 @@ import (
) )
const ( const (
authPrefix = "/v2/auth" authPrefix = "/v2/auth"
keysPrefix = "/v2/keys" keysPrefix = "/v2/keys"
membersPrefix = "/v2/members" machinesPrefix = "/v2/machines"
statsPrefix = "/v2/stats" membersPrefix = "/v2/members"
varsPath = "/debug/vars" statsPrefix = "/v2/stats"
metricsPath = "/metrics" varsPath = "/debug/vars"
healthPath = "/health" metricsPath = "/metrics"
versionPath = "/version" healthPath = "/health"
configPath = "/config" versionPath = "/version"
configPath = "/config"
) )
// NewClientHandler generates a muxed http.Handler with the given parameters to serve etcd client requests. // NewClientHandler generates a muxed http.Handler with the given parameters to serve etcd client requests.
@ -83,6 +84,8 @@ func NewClientHandler(server *etcdserver.EtcdServer, timeout time.Duration) http
clientCertAuthEnabled: server.Cfg.ClientCertAuthEnabled, clientCertAuthEnabled: server.Cfg.ClientCertAuthEnabled,
} }
mah := &machinesHandler{cluster: server.Cluster()}
sech := &authHandler{ sech := &authHandler{
sec: sec, sec: sec,
cluster: server.Cluster(), cluster: server.Cluster(),
@ -103,6 +106,7 @@ func NewClientHandler(server *etcdserver.EtcdServer, timeout time.Duration) http
mux.Handle(metricsPath, prometheus.Handler()) mux.Handle(metricsPath, prometheus.Handler())
mux.Handle(membersPrefix, mh) mux.Handle(membersPrefix, mh)
mux.Handle(membersPrefix+"/", mh) mux.Handle(membersPrefix+"/", mh)
mux.Handle(machinesPrefix, mah)
handleAuth(mux, sech) handleAuth(mux, sech)
return requestLogger(mux) return requestLogger(mux)
@ -164,6 +168,18 @@ func (h *keysHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
} }
} }
type machinesHandler struct {
cluster api.Cluster
}
func (h *machinesHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
if !allowMethod(w, r.Method, "GET", "HEAD") {
return
}
endpoints := h.cluster.ClientURLs()
w.Write([]byte(strings.Join(endpoints, ", ")))
}
type membersHandler struct { type membersHandler struct {
sec auth.Store sec auth.Store
server etcdserver.Server server etcdserver.Server

View File

@ -1220,6 +1220,56 @@ func TestWriteEvent(t *testing.T) {
} }
} }
func TestV2DMachinesEndpoint(t *testing.T) {
tests := []struct {
method string
wcode int
}{
{"GET", http.StatusOK},
{"HEAD", http.StatusOK},
{"POST", http.StatusMethodNotAllowed},
}
m := &machinesHandler{cluster: &fakeCluster{}}
s := httptest.NewServer(m)
defer s.Close()
for _, tt := range tests {
req, err := http.NewRequest(tt.method, s.URL+machinesPrefix, nil)
if err != nil {
t.Fatal(err)
}
resp, err := http.DefaultClient.Do(req)
if err != nil {
t.Fatal(err)
}
if resp.StatusCode != tt.wcode {
t.Errorf("StatusCode = %d, expected %d", resp.StatusCode, tt.wcode)
}
}
}
func TestServeMachines(t *testing.T) {
cluster := &fakeCluster{
clientURLs: []string{"http://localhost:8080", "http://localhost:8081", "http://localhost:8082"},
}
writer := httptest.NewRecorder()
req, err := http.NewRequest("GET", "", nil)
if err != nil {
t.Fatal(err)
}
h := &machinesHandler{cluster: cluster}
h.ServeHTTP(writer, req)
w := "http://localhost:8080, http://localhost:8081, http://localhost:8082"
if g := writer.Body.String(); g != w {
t.Errorf("body = %s, want %s", g, w)
}
if writer.Code != http.StatusOK {
t.Errorf("code = %d, want %d", writer.Code, http.StatusOK)
}
}
func TestGetID(t *testing.T) { func TestGetID(t *testing.T) {
tests := []struct { tests := []struct {
path string path string

View File

@ -318,33 +318,36 @@ func (a *applierV3backend) Range(txn mvcc.TxnRead, r *pb.RangeRequest) (*pb.Rang
} }
func (a *applierV3backend) Txn(rt *pb.TxnRequest) (*pb.TxnResponse, error) { func (a *applierV3backend) Txn(rt *pb.TxnRequest) (*pb.TxnResponse, error) {
ok := true isWrite := !isTxnReadonly(rt)
for _, c := range rt.Compare { txn := mvcc.NewReadOnlyTxnWrite(a.s.KV().Read())
if _, ok = a.applyCompare(c); !ok {
break reqs, ok := a.compareToOps(txn, rt)
if isWrite {
if err := a.checkRequestPut(txn, reqs); err != nil {
txn.End()
return nil, err
} }
} }
if err := checkRequestRange(txn, reqs); err != nil {
var reqs []*pb.RequestOp txn.End()
if ok {
reqs = rt.Success
} else {
reqs = rt.Failure
}
if err := a.checkRequestPut(reqs); err != nil {
return nil, err
}
if err := a.checkRequestRange(reqs); err != nil {
return nil, err return nil, err
} }
resps := make([]*pb.ResponseOp, len(reqs)) resps := make([]*pb.ResponseOp, len(reqs))
txnResp := &pb.TxnResponse{
Responses: resps,
Succeeded: ok,
Header: &pb.ResponseHeader{},
}
// When executing the operations of txn, etcd must hold the txn lock so // When executing mutable txn ops, etcd must hold the txn lock so
// readers do not see any intermediate results. // readers do not see any intermediate results. Since writes are
// TODO: use Read txn if only Ranges // serialized on the raft loop, the revision in the read view will
txn := a.s.KV().Write() // be the revision of the write txn.
if isWrite {
txn.End()
txn = a.s.KV().Write()
}
for i := range reqs { for i := range reqs {
resps[i] = a.applyUnion(txn, reqs[i]) resps[i] = a.applyUnion(txn, reqs[i])
} }
@ -354,23 +357,25 @@ func (a *applierV3backend) Txn(rt *pb.TxnRequest) (*pb.TxnResponse, error) {
} }
txn.End() txn.End()
txnResp := &pb.TxnResponse{}
txnResp.Header = &pb.ResponseHeader{}
txnResp.Header.Revision = rev txnResp.Header.Revision = rev
txnResp.Responses = resps
txnResp.Succeeded = ok
return txnResp, nil return txnResp, nil
} }
// applyCompare applies the compare request. func (a *applierV3backend) compareToOps(rv mvcc.ReadView, rt *pb.TxnRequest) ([]*pb.RequestOp, bool) {
// It returns the revision at which the comparison happens. If the comparison for _, c := range rt.Compare {
// succeeds, the it returns true. Otherwise it returns false. if !applyCompare(rv, c) {
func (a *applierV3backend) applyCompare(c *pb.Compare) (int64, bool) { return rt.Failure, false
rr, err := a.s.KV().Range(c.Key, nil, mvcc.RangeOptions{}) }
rev := rr.Rev }
return rt.Success, true
}
// applyCompare applies the compare request.
// If the comparison succeeds, it returns true. Otherwise, returns false.
func applyCompare(rv mvcc.ReadView, c *pb.Compare) bool {
rr, err := rv.Range(c.Key, nil, mvcc.RangeOptions{})
if err != nil { if err != nil {
return rev, false return false
} }
var ckv mvccpb.KeyValue var ckv mvccpb.KeyValue
if len(rr.KVs) != 0 { if len(rr.KVs) != 0 {
@ -382,7 +387,7 @@ func (a *applierV3backend) applyCompare(c *pb.Compare) (int64, bool) {
// We can treat non-existence as the empty set explicitly, such that // We can treat non-existence as the empty set explicitly, such that
// even a key with a value of length 0 bytes is still a real key // even a key with a value of length 0 bytes is still a real key
// that was written that way // that was written that way
return rev, false return false
} }
} }
@ -414,23 +419,15 @@ func (a *applierV3backend) applyCompare(c *pb.Compare) (int64, bool) {
switch c.Result { switch c.Result {
case pb.Compare_EQUAL: case pb.Compare_EQUAL:
if result != 0 { return result == 0
return rev, false
}
case pb.Compare_NOT_EQUAL: case pb.Compare_NOT_EQUAL:
if result == 0 { return result != 0
return rev, false
}
case pb.Compare_GREATER: case pb.Compare_GREATER:
if result != 1 { return result > 0
return rev, false
}
case pb.Compare_LESS: case pb.Compare_LESS:
if result != -1 { return result < 0
return rev, false
}
} }
return rev, true return true
} }
func (a *applierV3backend) applyUnion(txn mvcc.TxnWrite, union *pb.RequestOp) *pb.ResponseOp { func (a *applierV3backend) applyUnion(txn mvcc.TxnWrite, union *pb.RequestOp) *pb.ResponseOp {
@ -770,7 +767,7 @@ func (s *kvSortByValue) Less(i, j int) bool {
return bytes.Compare(s.kvs[i].Value, s.kvs[j].Value) < 0 return bytes.Compare(s.kvs[i].Value, s.kvs[j].Value) < 0
} }
func (a *applierV3backend) checkRequestPut(reqs []*pb.RequestOp) error { func (a *applierV3backend) checkRequestPut(rv mvcc.ReadView, reqs []*pb.RequestOp) error {
for _, requ := range reqs { for _, requ := range reqs {
tv, ok := requ.Request.(*pb.RequestOp_RequestPut) tv, ok := requ.Request.(*pb.RequestOp_RequestPut)
if !ok { if !ok {
@ -782,7 +779,7 @@ func (a *applierV3backend) checkRequestPut(reqs []*pb.RequestOp) error {
} }
if preq.IgnoreValue || preq.IgnoreLease { if preq.IgnoreValue || preq.IgnoreLease {
// expects previous key-value, error if not exist // expects previous key-value, error if not exist
rr, err := a.s.KV().Range(preq.Key, nil, mvcc.RangeOptions{}) rr, err := rv.Range(preq.Key, nil, mvcc.RangeOptions{})
if err != nil { if err != nil {
return err return err
} }
@ -800,7 +797,7 @@ func (a *applierV3backend) checkRequestPut(reqs []*pb.RequestOp) error {
return nil return nil
} }
func (a *applierV3backend) checkRequestRange(reqs []*pb.RequestOp) error { func checkRequestRange(rv mvcc.ReadView, reqs []*pb.RequestOp) error {
for _, requ := range reqs { for _, requ := range reqs {
tv, ok := requ.Request.(*pb.RequestOp_RequestRange) tv, ok := requ.Request.(*pb.RequestOp_RequestRange)
if !ok { if !ok {
@ -811,10 +808,10 @@ func (a *applierV3backend) checkRequestRange(reqs []*pb.RequestOp) error {
continue continue
} }
if greq.Revision > a.s.KV().Rev() { if greq.Revision > rv.Rev() {
return mvcc.ErrFutureRev return mvcc.ErrFutureRev
} }
if greq.Revision < a.s.KV().FirstRev() { if greq.Revision < rv.FirstRev() {
return mvcc.ErrCompacted return mvcc.ErrCompacted
} }
} }

View File

@ -58,6 +58,12 @@ var (
Name: "proposals_failed_total", Name: "proposals_failed_total",
Help: "The total number of failed proposals seen.", Help: "The total number of failed proposals seen.",
}) })
leaseExpired = prometheus.NewCounter(prometheus.CounterOpts{
Namespace: "etcd_debugging",
Subsystem: "server",
Name: "lease_expired_total",
Help: "The total number of expired leases.",
})
) )
func init() { func init() {
@ -67,6 +73,7 @@ func init() {
prometheus.MustRegister(proposalsApplied) prometheus.MustRegister(proposalsApplied)
prometheus.MustRegister(proposalsPending) prometheus.MustRegister(proposalsPending)
prometheus.MustRegister(proposalsFailed) prometheus.MustRegister(proposalsFailed)
prometheus.MustRegister(leaseExpired)
} }
func monitorFileDescriptor(done <-chan struct{}) { func monitorFileDescriptor(done <-chan struct{}) {

View File

@ -395,11 +395,7 @@ func NewServer(cfg *ServerConfig) (srv *EtcdServer, err error) {
return nil, fmt.Errorf("cannot access member directory: %v", terr) return nil, fmt.Errorf("cannot access member directory: %v", terr)
} }
sstats := &stats.ServerStats{ sstats := stats.NewServerStats(cfg.Name, id.String())
Name: cfg.Name,
ID: id.String(),
}
sstats.Initialize()
lstats := stats.NewLeaderStats(id.String()) lstats := stats.NewLeaderStats(id.String())
heartbeat := time.Duration(cfg.TickMs) * time.Millisecond heartbeat := time.Duration(cfg.TickMs) * time.Millisecond
@ -743,6 +739,7 @@ func (s *EtcdServer) run() {
lid := lease.ID lid := lease.ID
s.goAttach(func() { s.goAttach(func() {
s.LeaseRevoke(s.ctx, &pb.LeaseRevokeRequest{ID: int64(lid)}) s.LeaseRevoke(s.ctx, &pb.LeaseRevokeRequest{ID: int64(lid)})
leaseExpired.Inc()
<-c <-c
}) })
} }

View File

@ -24,25 +24,30 @@ import (
// LeaderStats is used by the leader in an etcd cluster, and encapsulates // LeaderStats is used by the leader in an etcd cluster, and encapsulates
// statistics about communication with its followers // statistics about communication with its followers
type LeaderStats struct { type LeaderStats struct {
leaderStats
sync.Mutex
}
type leaderStats struct {
// Leader is the ID of the leader in the etcd cluster. // Leader is the ID of the leader in the etcd cluster.
// TODO(jonboulle): clarify that these are IDs, not names // TODO(jonboulle): clarify that these are IDs, not names
Leader string `json:"leader"` Leader string `json:"leader"`
Followers map[string]*FollowerStats `json:"followers"` Followers map[string]*FollowerStats `json:"followers"`
sync.Mutex
} }
// NewLeaderStats generates a new LeaderStats with the given id as leader // NewLeaderStats generates a new LeaderStats with the given id as leader
func NewLeaderStats(id string) *LeaderStats { func NewLeaderStats(id string) *LeaderStats {
return &LeaderStats{ return &LeaderStats{
Leader: id, leaderStats: leaderStats{
Followers: make(map[string]*FollowerStats), Leader: id,
Followers: make(map[string]*FollowerStats),
},
} }
} }
func (ls *LeaderStats) JSON() []byte { func (ls *LeaderStats) JSON() []byte {
ls.Lock() ls.Lock()
stats := *ls stats := ls.leaderStats
ls.Unlock() ls.Unlock()
b, err := json.Marshal(stats) b, err := json.Marshal(stats)
// TODO(jonboulle): appropriate error handling? // TODO(jonboulle): appropriate error handling?

View File

@ -26,6 +26,26 @@ import (
// ServerStats encapsulates various statistics about an EtcdServer and its // ServerStats encapsulates various statistics about an EtcdServer and its
// communication with other members of the cluster // communication with other members of the cluster
type ServerStats struct { type ServerStats struct {
serverStats
sync.Mutex
}
func NewServerStats(name, id string) *ServerStats {
ss := &ServerStats{
serverStats: serverStats{
Name: name,
ID: id,
},
}
now := time.Now()
ss.StartTime = now
ss.LeaderInfo.StartTime = now
ss.sendRateQueue = &statsQueue{back: -1}
ss.recvRateQueue = &statsQueue{back: -1}
return ss
}
type serverStats struct {
Name string `json:"name"` Name string `json:"name"`
// ID is the raft ID of the node. // ID is the raft ID of the node.
// TODO(jonboulle): use ID instead of name? // TODO(jonboulle): use ID instead of name?
@ -49,17 +69,15 @@ type ServerStats struct {
sendRateQueue *statsQueue sendRateQueue *statsQueue
recvRateQueue *statsQueue recvRateQueue *statsQueue
sync.Mutex
} }
func (ss *ServerStats) JSON() []byte { func (ss *ServerStats) JSON() []byte {
ss.Lock() ss.Lock()
stats := *ss stats := ss.serverStats
ss.Unlock() ss.Unlock()
stats.LeaderInfo.Uptime = time.Since(stats.LeaderInfo.StartTime).String() stats.LeaderInfo.Uptime = time.Since(stats.LeaderInfo.StartTime).String()
stats.SendingPkgRate, stats.SendingBandwidthRate = stats.SendRates() stats.SendingPkgRate, stats.SendingBandwidthRate = stats.sendRateQueue.Rate()
stats.RecvingPkgRate, stats.RecvingBandwidthRate = stats.RecvRates() stats.RecvingPkgRate, stats.RecvingBandwidthRate = stats.recvRateQueue.Rate()
b, err := json.Marshal(stats) b, err := json.Marshal(stats)
// TODO(jonboulle): appropriate error handling? // TODO(jonboulle): appropriate error handling?
if err != nil { if err != nil {
@ -68,32 +86,6 @@ func (ss *ServerStats) JSON() []byte {
return b return b
} }
// Initialize clears the statistics of ServerStats and resets its start time
func (ss *ServerStats) Initialize() {
if ss == nil {
return
}
now := time.Now()
ss.StartTime = now
ss.LeaderInfo.StartTime = now
ss.sendRateQueue = &statsQueue{
back: -1,
}
ss.recvRateQueue = &statsQueue{
back: -1,
}
}
// RecvRates calculates and returns the rate of received append requests
func (ss *ServerStats) RecvRates() (float64, float64) {
return ss.recvRateQueue.Rate()
}
// SendRates calculates and returns the rate of sent append requests
func (ss *ServerStats) SendRates() (float64, float64) {
return ss.sendRateQueue.Rate()
}
// RecvAppendReq updates the ServerStats in response to an AppendRequest // RecvAppendReq updates the ServerStats in response to an AppendRequest
// from the given leader being received // from the given leader being received
func (ss *ServerStats) RecvAppendReq(leader string, reqSize int) { func (ss *ServerStats) RecvAppendReq(leader string, reqSize int) {

18
glide.lock generated
View File

@ -1,5 +1,5 @@
hash: 4248f4a610b399df10cab942b0b3ef8a6d7db9c942bafd115f25d05293571658 hash: cee1f2629857e9c2384ad89ff6014db09498c9af53771e5144ad3a4b510ff00e
updated: 2017-04-24T16:15:17.066493631-07:00 updated: 2017-05-30T10:29:08.22609283-07:00
imports: imports:
- name: github.com/beorn7/perks - name: github.com/beorn7/perks
version: 4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9 version: 4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9
@ -12,7 +12,7 @@ imports:
- name: github.com/cockroachdb/cmux - name: github.com/cockroachdb/cmux
version: 112f0506e7743d64a6eb8fedbcff13d9979bbf92 version: 112f0506e7743d64a6eb8fedbcff13d9979bbf92
- name: github.com/coreos/go-semver - name: github.com/coreos/go-semver
version: 568e959cd89871e61434c1143528d9162da89ef2 version: 8ab6407b697782a06568d4b7f1db25550ec2e4c6
subpackages: subpackages:
- semver - semver
- name: github.com/coreos/go-systemd - name: github.com/coreos/go-systemd
@ -27,7 +27,7 @@ imports:
- capnslog - capnslog
- dlopen - dlopen
- name: github.com/cpuguy83/go-md2man - name: github.com/cpuguy83/go-md2man
version: a65d4d2de4d5f7c74868dfa9b202a3c8be315aaa version: bcc0a711c5e6bbe72c7cb13d81c7109b45267fd2
subpackages: subpackages:
- md2man - md2man
- name: github.com/dgrijalva/jwt-go - name: github.com/dgrijalva/jwt-go
@ -35,7 +35,7 @@ imports:
- name: github.com/dustin/go-humanize - name: github.com/dustin/go-humanize
version: 8929fe90cee4b2cb9deb468b51fb34eba64d1bf0 version: 8929fe90cee4b2cb9deb468b51fb34eba64d1bf0
- name: github.com/ghodss/yaml - name: github.com/ghodss/yaml
version: 73d445a93680fa1a78ae23a5839bad48f32ba1ee version: 0ca9ea5df5451ffdf184b4428c902747c2c11cd7
- name: github.com/gogo/protobuf - name: github.com/gogo/protobuf
version: 909568be09de550ed094403c2bf8a261b5bb730a version: 909568be09de550ed094403c2bf8a261b5bb730a
subpackages: subpackages:
@ -64,7 +64,7 @@ imports:
- name: github.com/jonboulle/clockwork - name: github.com/jonboulle/clockwork
version: 2eee05ed794112d45db504eb05aa693efd2b8b09 version: 2eee05ed794112d45db504eb05aa693efd2b8b09
- name: github.com/kr/pty - name: github.com/kr/pty
version: f7ee69f31298ecbe5d2b349c711e2547a617d398 version: 2c10821df3c3cf905230d078702dfbe9404c9b23
- name: github.com/mattn/go-runewidth - name: github.com/mattn/go-runewidth
version: 9e777a8366cce605130a531d2cd6363d07ad7317 version: 9e777a8366cce605130a531d2cd6363d07ad7317
subpackages: subpackages:
@ -94,9 +94,7 @@ imports:
subpackages: subpackages:
- xfs - xfs
- name: github.com/russross/blackfriday - name: github.com/russross/blackfriday
version: b253417e1cb644d645a0a3bb1fa5034c8030127c version: 0ba0f2b6ed7c475a92e4df8641825cb7a11d1fa3
- name: github.com/shurcooL/sanitized_anchor_name
version: 79c90efaf01eddc01945af5bc1797859189b830b
- name: github.com/spf13/cobra - name: github.com/spf13/cobra
version: 1c44ec8d3f1552cac48999f9306da23c4d8a288b version: 1c44ec8d3f1552cac48999f9306da23c4d8a288b
- name: github.com/spf13/pflag - name: github.com/spf13/pflag
@ -129,7 +127,7 @@ imports:
subpackages: subpackages:
- unix - unix
- name: golang.org/x/text - name: golang.org/x/text
version: a9a820217f98f7c8a207ec1e45a874e1fe12c478 version: 4ee4af566555f5fbe026368b75596286a312663a
subpackages: subpackages:
- secure/bidirule - secure/bidirule
- transform - transform

View File

@ -7,7 +7,7 @@ import:
- package: github.com/cockroachdb/cmux - package: github.com/cockroachdb/cmux
version: 112f0506e7743d64a6eb8fedbcff13d9979bbf92 version: 112f0506e7743d64a6eb8fedbcff13d9979bbf92
- package: github.com/coreos/go-semver - package: github.com/coreos/go-semver
version: 568e959cd89871e61434c1143528d9162da89ef2 version: v0.2.0
subpackages: subpackages:
- semver - semver
- package: github.com/coreos/go-systemd - package: github.com/coreos/go-systemd
@ -23,7 +23,7 @@ import:
- package: github.com/dustin/go-humanize - package: github.com/dustin/go-humanize
version: 8929fe90cee4b2cb9deb468b51fb34eba64d1bf0 version: 8929fe90cee4b2cb9deb468b51fb34eba64d1bf0
- package: github.com/ghodss/yaml - package: github.com/ghodss/yaml
version: 73d445a93680fa1a78ae23a5839bad48f32ba1ee version: v1.0.0
- package: github.com/gogo/protobuf - package: github.com/gogo/protobuf
version: v0.3 version: v0.3
subpackages: subpackages:
@ -48,7 +48,7 @@ import:
- package: github.com/jonboulle/clockwork - package: github.com/jonboulle/clockwork
version: v0.1.0 version: v0.1.0
- package: github.com/kr/pty - package: github.com/kr/pty
version: f7ee69f31298ecbe5d2b349c711e2547a617d398 version: v1.0.0
- package: github.com/olekukonko/tablewriter - package: github.com/olekukonko/tablewriter
version: a0225b3f23b5ce0cbec6d7a66a968f8a59eca9c4 version: a0225b3f23b5ce0cbec6d7a66a968f8a59eca9c4
- package: github.com/mattn/go-runewidth - package: github.com/mattn/go-runewidth

View File

@ -554,7 +554,7 @@ func (m *member) listenGRPC() error {
l.Close() l.Close()
return err return err
} }
m.grpcAddr = m.grpcBridge.URL() m.grpcAddr = schemeFromTLSInfo(m.ClientTLSInfo) + "://" + m.grpcBridge.inaddr
m.grpcListener = l m.grpcListener = l
return nil return nil
} }

View File

@ -328,6 +328,58 @@ func TestV3TxnRevision(t *testing.T) {
} }
} }
// Testv3TxnCmpHeaderRev tests that the txn header revision is set as expected
// when compared to the Succeeded field in the txn response.
func TestV3TxnCmpHeaderRev(t *testing.T) {
defer testutil.AfterTest(t)
clus := NewClusterV3(t, &ClusterConfig{Size: 1})
defer clus.Terminate(t)
kvc := toGRPC(clus.RandClient()).KV
for i := 0; i < 10; i++ {
// Concurrently put a key with a txn comparing on it.
revc := make(chan int64, 1)
go func() {
defer close(revc)
pr := &pb.PutRequest{Key: []byte("k"), Value: []byte("v")}
presp, err := kvc.Put(context.TODO(), pr)
if err != nil {
t.Fatal(err)
}
revc <- presp.Header.Revision
}()
// The read-only txn uses the optimized readindex server path.
txnget := &pb.RequestOp{Request: &pb.RequestOp_RequestRange{
RequestRange: &pb.RangeRequest{Key: []byte("k")}}}
txn := &pb.TxnRequest{Success: []*pb.RequestOp{txnget}}
// i = 0 /\ Succeeded => put followed txn
cmp := &pb.Compare{
Result: pb.Compare_EQUAL,
Target: pb.Compare_VERSION,
Key: []byte("k"),
TargetUnion: &pb.Compare_Version{Version: int64(i)},
}
txn.Compare = append(txn.Compare, cmp)
tresp, err := kvc.Txn(context.TODO(), txn)
if err != nil {
t.Fatal(err)
}
prev := <-revc
// put followed txn; should eval to false
if prev > tresp.Header.Revision && !tresp.Succeeded {
t.Errorf("#%d: got else but put rev %d followed txn rev (%+v)", i, prev, tresp)
}
// txn follows put; should eval to true
if tresp.Header.Revision >= prev && tresp.Succeeded {
t.Errorf("#%d: got then but put rev %d preceded txn (%+v)", i, prev, tresp)
}
}
}
// TestV3PutIgnoreValue ensures that writes with ignore_value overwrites with previous key-value pair. // TestV3PutIgnoreValue ensures that writes with ignore_value overwrites with previous key-value pair.
func TestV3PutIgnoreValue(t *testing.T) { func TestV3PutIgnoreValue(t *testing.T) {
defer testutil.AfterTest(t) defer testutil.AfterTest(t)
@ -1402,9 +1454,9 @@ func TestTLSReloadAtomicReplace(t *testing.T) {
defer os.RemoveAll(certsDirExp) defer os.RemoveAll(certsDirExp)
cloneFunc := func() transport.TLSInfo { cloneFunc := func() transport.TLSInfo {
tlsInfo, err := copyTLSFiles(testTLSInfo, certsDir) tlsInfo, terr := copyTLSFiles(testTLSInfo, certsDir)
if err != nil { if terr != nil {
t.Fatal(err) t.Fatal(terr)
} }
if _, err = copyTLSFiles(testTLSInfoExpired, certsDirExp); err != nil { if _, err = copyTLSFiles(testTLSInfoExpired, certsDirExp); err != nil {
t.Fatal(err) t.Fatal(err)
@ -1448,9 +1500,9 @@ func TestTLSReloadCopy(t *testing.T) {
defer os.RemoveAll(certsDir) defer os.RemoveAll(certsDir)
cloneFunc := func() transport.TLSInfo { cloneFunc := func() transport.TLSInfo {
tlsInfo, err := copyTLSFiles(testTLSInfo, certsDir) tlsInfo, terr := copyTLSFiles(testTLSInfo, certsDir)
if err != nil { if terr != nil {
t.Fatal(err) t.Fatal(terr)
} }
return tlsInfo return tlsInfo
} }

View File

@ -93,7 +93,9 @@ func (trw *txnReadWrite) DeleteRange(key, end []byte) (n, rev int64) { panic("un
func (trw *txnReadWrite) Put(key, value []byte, lease lease.LeaseID) (rev int64) { func (trw *txnReadWrite) Put(key, value []byte, lease lease.LeaseID) (rev int64) {
panic("unexpected Put") panic("unexpected Put")
} }
func (trw *txnReadWrite) Changes() []mvccpb.KeyValue { panic("unexpected Changes") } func (trw *txnReadWrite) Changes() []mvccpb.KeyValue { return nil }
func NewReadOnlyTxnWrite(txn TxnRead) TxnWrite { return &txnReadWrite{txn} }
type KV interface { type KV interface {
ReadView ReadView

View File

@ -33,13 +33,6 @@ var (
keyBucketName = []byte("key") keyBucketName = []byte("key")
metaBucketName = []byte("meta") metaBucketName = []byte("meta")
// markedRevBytesLen is the byte length of marked revision.
// The first `revBytesLen` bytes represents a normal revision. The last
// one byte is the mark.
markedRevBytesLen = revBytesLen + 1
markBytePosition = markedRevBytesLen - 1
markTombstone byte = 't'
consistentIndexKeyName = []byte("consistent_index") consistentIndexKeyName = []byte("consistent_index")
scheduledCompactKeyName = []byte("scheduledCompactRev") scheduledCompactKeyName = []byte("scheduledCompactRev")
finishedCompactKeyName = []byte("finishedCompactRev") finishedCompactKeyName = []byte("finishedCompactRev")
@ -52,6 +45,17 @@ var (
plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "mvcc") plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "mvcc")
) )
const (
// markedRevBytesLen is the byte length of marked revision.
// The first `revBytesLen` bytes represents a normal revision. The last
// one byte is the mark.
markedRevBytesLen = revBytesLen + 1
markBytePosition = markedRevBytesLen - 1
markTombstone byte = 't'
restoreChunkKeys = 10000
)
// ConsistentIndexGetter is an interface that wraps the Get method. // ConsistentIndexGetter is an interface that wraps the Get method.
// Consistent index is the offset of an entry in a consistent replicated log. // Consistent index is the offset of an entry in a consistent replicated log.
type ConsistentIndexGetter interface { type ConsistentIndexGetter interface {
@ -247,11 +251,6 @@ func (s *store) restore() error {
keyToLease := make(map[string]lease.LeaseID) keyToLease := make(map[string]lease.LeaseID)
// use an unordered map to hold the temp index data to speed up
// the initial key index recovery.
// we will convert this unordered map into the tree index later.
unordered := make(map[string]*keyIndex, 100000)
// restore index // restore index
tx := s.b.BatchTx() tx := s.b.BatchTx()
tx.Lock() tx.Lock()
@ -260,48 +259,41 @@ func (s *store) restore() error {
s.compactMainRev = bytesToRev(finishedCompactBytes[0]).main s.compactMainRev = bytesToRev(finishedCompactBytes[0]).main
plog.Printf("restore compact to %d", s.compactMainRev) plog.Printf("restore compact to %d", s.compactMainRev)
} }
_, scheduledCompactBytes := tx.UnsafeRange(metaBucketName, scheduledCompactKeyName, nil, 0)
// TODO: limit N to reduce max memory usage scheduledCompact := int64(0)
keys, vals := tx.UnsafeRange(keyBucketName, min, max, 0) if len(scheduledCompactBytes) != 0 {
for i, key := range keys { scheduledCompact = bytesToRev(scheduledCompactBytes[0]).main
var kv mvccpb.KeyValue
if err := kv.Unmarshal(vals[i]); err != nil {
plog.Fatalf("cannot unmarshal event: %v", err)
}
rev := bytesToRev(key[:revBytesLen])
s.currentRev = rev.main
// restore index
switch {
case isTombstone(key):
if ki, ok := unordered[string(kv.Key)]; ok {
ki.tombstone(rev.main, rev.sub)
}
delete(keyToLease, string(kv.Key))
default:
ki, ok := unordered[string(kv.Key)]
if ok {
ki.put(rev.main, rev.sub)
} else {
ki = &keyIndex{key: kv.Key}
ki.restore(revision{kv.CreateRevision, 0}, rev, kv.Version)
unordered[string(kv.Key)] = ki
}
if lid := lease.LeaseID(kv.Lease); lid != lease.NoLease {
keyToLease[string(kv.Key)] = lid
} else {
delete(keyToLease, string(kv.Key))
}
}
} }
// restore the tree index from the unordered index. // index keys concurrently as they're loaded in from tx
for _, v := range unordered { unorderedc, donec := make(chan map[string]*keyIndex), make(chan struct{})
s.kvindex.Insert(v) go func() {
defer close(donec)
for unordered := range unorderedc {
// restore the tree index from the unordered index.
for _, v := range unordered {
s.kvindex.Insert(v)
}
}
}()
for {
keys, vals := tx.UnsafeRange(keyBucketName, min, max, restoreChunkKeys)
if len(keys) == 0 {
break
}
// unbuffered so keys don't pile up in memory
unorderedc <- s.restoreChunk(keys, vals, keyToLease)
if len(keys) < restoreChunkKeys {
// partial set implies final set
break
}
// next set begins after where this one ended
newMin := bytesToRev(keys[len(keys)-1][:revBytesLen])
newMin.sub++
revToBytes(newMin, min)
} }
close(unorderedc)
<-donec
// keys in the range [compacted revision -N, compaction] might all be deleted due to compaction. // keys in the range [compacted revision -N, compaction] might all be deleted due to compaction.
// the correct revision should be set to compaction revision in the case, not the largest revision // the correct revision should be set to compaction revision in the case, not the largest revision
@ -309,6 +301,9 @@ func (s *store) restore() error {
if s.currentRev < s.compactMainRev { if s.currentRev < s.compactMainRev {
s.currentRev = s.compactMainRev s.currentRev = s.compactMainRev
} }
if scheduledCompact <= s.compactMainRev {
scheduledCompact = 0
}
for key, lid := range keyToLease { for key, lid := range keyToLease {
if s.le == nil { if s.le == nil {
@ -320,15 +315,6 @@ func (s *store) restore() error {
} }
} }
_, scheduledCompactBytes := tx.UnsafeRange(metaBucketName, scheduledCompactKeyName, nil, 0)
scheduledCompact := int64(0)
if len(scheduledCompactBytes) != 0 {
scheduledCompact = bytesToRev(scheduledCompactBytes[0]).main
if scheduledCompact <= s.compactMainRev {
scheduledCompact = 0
}
}
tx.Unlock() tx.Unlock()
if scheduledCompact != 0 { if scheduledCompact != 0 {
@ -339,6 +325,40 @@ func (s *store) restore() error {
return nil return nil
} }
func (s *store) restoreChunk(keys, vals [][]byte, keyToLease map[string]lease.LeaseID) map[string]*keyIndex {
// assume half of keys are overwrites
unordered := make(map[string]*keyIndex, len(keys)/2)
for i, key := range keys {
var kv mvccpb.KeyValue
if err := kv.Unmarshal(vals[i]); err != nil {
plog.Fatalf("cannot unmarshal event: %v", err)
}
rev := bytesToRev(key[:revBytesLen])
s.currentRev = rev.main
kstr := string(kv.Key)
if isTombstone(key) {
if ki, ok := unordered[kstr]; ok {
ki.tombstone(rev.main, rev.sub)
}
delete(keyToLease, kstr)
continue
}
if ki, ok := unordered[kstr]; ok {
ki.put(rev.main, rev.sub)
} else {
ki = &keyIndex{key: kv.Key}
ki.restore(revision{kv.CreateRevision, 0}, rev, kv.Version)
unordered[kstr] = ki
}
if lid := lease.LeaseID(kv.Lease); lid != lease.NoLease {
keyToLease[kstr] = lid
} else {
delete(keyToLease, kstr)
}
}
return unordered
}
func (s *store) Close() error { func (s *store) Close() error {
close(s.stopc) close(s.stopc)
s.fifoSched.Stop() s.fifoSched.Stop()

View File

@ -89,7 +89,8 @@ func benchmarkStoreRestore(revsPerKey int, b *testing.B) {
var i fakeConsistentIndex var i fakeConsistentIndex
be, tmpPath := backend.NewDefaultTmpBackend() be, tmpPath := backend.NewDefaultTmpBackend()
s := NewStore(be, &lease.FakeLessor{}, &i) s := NewStore(be, &lease.FakeLessor{}, &i)
defer cleanup(s, be, tmpPath) // use closure to capture 's' to pick up the reassignment
defer func() { cleanup(s, be, tmpPath) }()
// arbitrary number of bytes // arbitrary number of bytes
bytesN := 64 bytesN := 64
@ -103,7 +104,11 @@ func benchmarkStoreRestore(revsPerKey int, b *testing.B) {
txn.End() txn.End()
} }
} }
s.Close()
b.ReportAllocs()
b.ResetTimer() b.ResetTimer()
s = NewStore(be, &lease.FakeLessor{}, &i)
} }
func BenchmarkStoreRestoreRevs1(b *testing.B) { func BenchmarkStoreRestoreRevs1(b *testing.B) {

View File

@ -373,9 +373,11 @@ func TestStoreRestore(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
b.tx.rangeRespc <- rangeResp{[][]byte{finishedCompactKeyName}, [][]byte{newTestRevBytes(revision{3, 0})}} b.tx.rangeRespc <- rangeResp{[][]byte{finishedCompactKeyName}, [][]byte{newTestRevBytes(revision{3, 0})}}
b.tx.rangeRespc <- rangeResp{[][]byte{putkey, delkey}, [][]byte{putkvb, delkvb}}
b.tx.rangeRespc <- rangeResp{[][]byte{scheduledCompactKeyName}, [][]byte{newTestRevBytes(revision{3, 0})}} b.tx.rangeRespc <- rangeResp{[][]byte{scheduledCompactKeyName}, [][]byte{newTestRevBytes(revision{3, 0})}}
b.tx.rangeRespc <- rangeResp{[][]byte{putkey, delkey}, [][]byte{putkvb, delkvb}}
b.tx.rangeRespc <- rangeResp{nil, nil}
s.restore() s.restore()
if s.compactMainRev != 3 { if s.compactMainRev != 3 {
@ -386,8 +388,8 @@ func TestStoreRestore(t *testing.T) {
} }
wact := []testutil.Action{ wact := []testutil.Action{
{"range", []interface{}{metaBucketName, finishedCompactKeyName, []byte(nil), int64(0)}}, {"range", []interface{}{metaBucketName, finishedCompactKeyName, []byte(nil), int64(0)}},
{"range", []interface{}{keyBucketName, newTestRevBytes(revision{1, 0}), newTestRevBytes(revision{math.MaxInt64, math.MaxInt64}), int64(0)}},
{"range", []interface{}{metaBucketName, scheduledCompactKeyName, []byte(nil), int64(0)}}, {"range", []interface{}{metaBucketName, scheduledCompactKeyName, []byte(nil), int64(0)}},
{"range", []interface{}{keyBucketName, newTestRevBytes(revision{1, 0}), newTestRevBytes(revision{math.MaxInt64, math.MaxInt64}), int64(restoreChunkKeys)}},
} }
if g := b.tx.Action(); !reflect.DeepEqual(g, wact) { if g := b.tx.Action(); !reflect.DeepEqual(g, wact) {
t.Errorf("tx actions = %+v, want %+v", g, wact) t.Errorf("tx actions = %+v, want %+v", g, wact)

View File

@ -127,6 +127,11 @@ func TestZeroToEnd(t *testing.T) {
} }
defer f.Close() defer f.Close()
// Ensure 0 size is a nop so zero-to-end on an empty file won't give EINVAL.
if err = ZeroToEnd(f); err != nil {
t.Fatal(err)
}
b := make([]byte, 1024) b := make([]byte, 1024)
for i := range b { for i := range b {
b[i] = 12 b[i] = 12

View File

@ -25,6 +25,10 @@ import (
// If the operation is unsupported, no error will be returned. // If the operation is unsupported, no error will be returned.
// Otherwise, the error encountered will be returned. // Otherwise, the error encountered will be returned.
func Preallocate(f *os.File, sizeInBytes int64, extendFile bool) error { func Preallocate(f *os.File, sizeInBytes int64, extendFile bool) error {
if sizeInBytes == 0 {
// fallocate will return EINVAL if length is 0; skip
return nil
}
if extendFile { if extendFile {
return preallocExtend(f, sizeInBytes) return preallocExtend(f, sizeInBytes)
} }

View File

@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
// +build !linux // +build !linux cov
package osutil package osutil

View File

@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
// +build linux // +build linux,!cov
package osutil package osutil

View File

@ -140,9 +140,7 @@ func TestSendMessageWhenStreamIsBroken(t *testing.T) {
} }
func newServerStats() *stats.ServerStats { func newServerStats() *stats.ServerStats {
ss := &stats.ServerStats{} return stats.NewServerStats("", "")
ss.Initialize()
return ss
} }
func waitStreamWorking(p *peer) bool { func waitStreamWorking(p *peer) bool {

View File

@ -183,7 +183,8 @@ func (h *snapshotHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
} }
dec := &messageDecoder{r: r.Body} dec := &messageDecoder{r: r.Body}
m, err := dec.decode() // let snapshots be very large since they can exceed 512MB for large installations
m, err := dec.decodeLimit(uint64(1 << 63))
if err != nil { if err != nil {
msg := fmt.Sprintf("failed to decode raft message (%v)", err) msg := fmt.Sprintf("failed to decode raft message (%v)", err)
plog.Errorf(msg) plog.Errorf(msg)

View File

@ -48,12 +48,16 @@ var (
) )
func (dec *messageDecoder) decode() (raftpb.Message, error) { func (dec *messageDecoder) decode() (raftpb.Message, error) {
return dec.decodeLimit(readBytesLimit)
}
func (dec *messageDecoder) decodeLimit(numBytes uint64) (raftpb.Message, error) {
var m raftpb.Message var m raftpb.Message
var l uint64 var l uint64
if err := binary.Read(dec.r, binary.BigEndian, &l); err != nil { if err := binary.Read(dec.r, binary.BigEndian, &l); err != nil {
return m, err return m, err
} }
if l > readBytesLimit { if l > numBytes {
return m, ErrExceedSizeLimit return m, ErrExceedSizeLimit
} }
buf := make([]byte, int(l)) buf := make([]byte, int(l))

View File

@ -30,12 +30,10 @@ import (
// TestTransportSend tests that transport can send messages using correct // TestTransportSend tests that transport can send messages using correct
// underlying peer, and drop local or unknown-target messages. // underlying peer, and drop local or unknown-target messages.
func TestTransportSend(t *testing.T) { func TestTransportSend(t *testing.T) {
ss := &stats.ServerStats{}
ss.Initialize()
peer1 := newFakePeer() peer1 := newFakePeer()
peer2 := newFakePeer() peer2 := newFakePeer()
tr := &Transport{ tr := &Transport{
ServerStats: ss, ServerStats: stats.NewServerStats("", ""),
peers: map[types.ID]Peer{types.ID(1): peer1, types.ID(2): peer2}, peers: map[types.ID]Peer{types.ID(1): peer1, types.ID(2): peer2},
} }
wmsgsIgnored := []raftpb.Message{ wmsgsIgnored := []raftpb.Message{
@ -67,12 +65,10 @@ func TestTransportSend(t *testing.T) {
} }
func TestTransportCutMend(t *testing.T) { func TestTransportCutMend(t *testing.T) {
ss := &stats.ServerStats{}
ss.Initialize()
peer1 := newFakePeer() peer1 := newFakePeer()
peer2 := newFakePeer() peer2 := newFakePeer()
tr := &Transport{ tr := &Transport{
ServerStats: ss, ServerStats: stats.NewServerStats("", ""),
peers: map[types.ID]Peer{types.ID(1): peer1, types.ID(2): peer2}, peers: map[types.ID]Peer{types.ID(1): peer1, types.ID(2): peer2},
} }

View File

@ -71,7 +71,7 @@ acbuild --debug port add peer tcp 2380
acbuild --debug copy "$TMPHOSTS" /etc/hosts acbuild --debug copy "$TMPHOSTS" /etc/hosts
acbuild --debug label add arch $(go2aci ${GOARCH}) acbuild --debug label add arch "$(go2aci ${GOARCH})"
# mkdir default data-dir # mkdir default data-dir
mkdir -p .acbuild/currentaci/rootfs/var/lib/etcd mkdir -p .acbuild/currentaci/rootfs/var/lib/etcd

View File

@ -17,8 +17,7 @@ if [ -z ${BINARYDIR} ]; then
BINARYDIR="${RELEASE}" BINARYDIR="${RELEASE}"
TARFILE="${RELEASE}.tar.gz" TARFILE="${RELEASE}.tar.gz"
TARURL="https://github.com/coreos/etcd/releases/download/${1}/${TARFILE}" TARURL="https://github.com/coreos/etcd/releases/download/${1}/${TARFILE}"
curl -f -L -o ${TARFILE} ${TARURL} if ! curl -f -L -o ${TARFILE} ${TARURL} ; then
if [ $? != 0 ]; then
echo "Failed to download ${TARURL}." echo "Failed to download ${TARURL}."
exit 1 exit 1
fi fi

View File

@ -57,7 +57,7 @@ popd
for dir in ${DIRS}; do for dir in ${DIRS}; do
pushd ${dir} pushd ${dir}
protoc --gofast_out=plugins=grpc,import_prefix=github.com/coreos/:. -I=.:"${GOGOPROTO_PATH}":"${COREOS_ROOT}":"${GRPC_GATEWAY_ROOT}/third_party/googleapis" *.proto protoc --gofast_out=plugins=grpc,import_prefix=github.com/coreos/:. -I=".:${GOGOPROTO_PATH}:${COREOS_ROOT}:${GRPC_GATEWAY_ROOT}/third_party/googleapis" *.proto
sed -i.bak -E "s/github\.com\/coreos\/(gogoproto|github\.com|golang\.org|google\.golang\.org)/\1/g" *.pb.go sed -i.bak -E "s/github\.com\/coreos\/(gogoproto|github\.com|golang\.org|google\.golang\.org)/\1/g" *.pb.go
sed -i.bak -E 's/github\.com\/coreos\/(errors|fmt|io)/\1/g' *.pb.go sed -i.bak -E 's/github\.com\/coreos\/(errors|fmt|io)/\1/g' *.pb.go
sed -i.bak -E 's/import _ \"gogoproto\"//g' *.pb.go sed -i.bak -E 's/import _ \"gogoproto\"//g' *.pb.go
@ -68,6 +68,8 @@ for dir in ${DIRS}; do
popd popd
done done
# remove old swagger files so it's obvious whether the files fail to generate
rm -rf Documentation/dev-guide/apispec/swagger/*json
for pb in etcdserverpb/rpc api/v3lock/v3lockpb/v3lock api/v3election/v3electionpb/v3election; do for pb in etcdserverpb/rpc api/v3lock/v3lockpb/v3lock api/v3election/v3electionpb/v3election; do
protobase="etcdserver/${pb}" protobase="etcdserver/${pb}"
protoc -I. \ protoc -I. \
@ -93,8 +95,9 @@ for pb in etcdserverpb/rpc api/v3lock/v3lockpb/v3lock api/v3election/v3electionp
go fmt ${gwfile} go fmt ${gwfile}
mv ${gwfile} ${pkgpath}/gw/ mv ${gwfile} ${pkgpath}/gw/
rm -f ./etcdserver/${pb}*.bak rm -f ./etcdserver/${pb}*.bak
swaggerName=`basename ${pb}`
mv Documentation/dev-guide/apispec/swagger/etcdserver/${pb}.swagger.json \ mv Documentation/dev-guide/apispec/swagger/etcdserver/${pb}.swagger.json \
Documentation/dev-guide/apispec/swagger/${name}.swagger.json Documentation/dev-guide/apispec/swagger/${swaggerName}.swagger.json
done done
rm -rf Documentation/dev-guide/apispec/swagger/etcdserver/ rm -rf Documentation/dev-guide/apispec/swagger/etcdserver/
@ -107,7 +110,7 @@ rm -rf Documentation/dev-guide/apispec/swagger/etcdserver/
if [ "$1" = "-g" ]; then if [ "$1" = "-g" ]; then
echo "protodoc is auto-generating grpc API reference documentation..." echo "protodoc is auto-generating grpc API reference documentation..."
go get -v -u github.com/coreos/protodoc go get -v -u github.com/coreos/protodoc
SHA_PROTODOC="e901a6d4fa64c815acf117864521a8dadb11390f" SHA_PROTODOC="4372ee725035a208404e2d5465ba921469decc32"
PROTODOC_PATH="${GOPATH}/src/github.com/coreos/protodoc" PROTODOC_PATH="${GOPATH}/src/github.com/coreos/protodoc"
pushd "${PROTODOC_PATH}" pushd "${PROTODOC_PATH}"
git reset --hard "${SHA_PROTODOC}" git reset --hard "${SHA_PROTODOC}"

View File

@ -21,7 +21,7 @@ if ! command -v docker >/dev/null; then
exit 1 exit 1
fi fi
ETCD_ROOT=$(dirname "${BASH_SOURCE}")/.. ETCD_ROOT=$(dirname "${BASH_SOURCE[0]}")/..
pushd ${ETCD_ROOT} >/dev/null pushd ${ETCD_ROOT} >/dev/null
echo Building etcd binary... echo Building etcd binary...

View File

@ -43,7 +43,7 @@ pushd "${GLIDE_VC_ROOT}"
popd popd
if [ -n "$1" ]; then if [ -n "$1" ]; then
echo "glide get on $(echo $1)" echo "glide get on $1"
matches=`grep "name: $1" glide.lock` matches=`grep "name: $1" glide.lock`
if [ ! -z "$matches" ]; then if [ ! -z "$matches" ]; then
echo "glide update on $1" echo "glide update on $1"

98
test
View File

@ -23,6 +23,8 @@ if [ -z "$PASSES" ]; then
PASSES="fmt bom dep compile build unit" PASSES="fmt bom dep compile build unit"
fi fi
USERPKG=${PKG:-}
# Invoke ./cover for HTML output # Invoke ./cover for HTML output
COVER=${COVER:-"-cover"} COVER=${COVER:-"-cover"}
@ -31,14 +33,31 @@ IGNORE_PKGS="(cmd/|etcdserverpb|rafttest|gopath.proto|v3lockpb|v3electionpb)"
INTEGRATION_PKGS="(integration|e2e|contrib|functional-tester)" INTEGRATION_PKGS="(integration|e2e|contrib|functional-tester)"
# all github.com/coreos/etcd/whatever pkgs that are not auto-generated / tools # all github.com/coreos/etcd/whatever pkgs that are not auto-generated / tools
PKGS=`find . -name \*.go | while read a; do dirname $a; done | sort | uniq | egrep -v "$IGNORE_PKGS" | egrep -v "(tools/|contrib/|e2e|pb)" | sed "s|\.|${REPO_PATH}|g"` PKGS=`find . -name \*.go | while read a; do dirname $a; done | sort | uniq | egrep -v "$IGNORE_PKGS" | egrep -v "(tools/|contrib/|e2e|pb)" | sed "s|\.|${REPO_PATH}|g" | xargs echo`
# pkg1,pkg2,pkg3 # pkg1,pkg2,pkg3
PKGS_COMMA=`echo ${PKGS} | sed 's/ /,/g'` PKGS_COMMA=${PKGS// /,}
TEST_PKGS=`find . -name \*_test.go | while read a; do dirname $a; done | sort | uniq | egrep -v "$IGNORE_PKGS" | sed "s|\./||g"` TEST_PKGS=`find . -name \*_test.go | while read a; do dirname $a; done | sort | uniq | egrep -v "$IGNORE_PKGS" | sed "s|\./||g"`
FORMATTABLE=`find . -name \*.go | while read a; do echo $(dirname $a)/"*.go"; done | sort | uniq | egrep -v "$IGNORE_PKGS" | sed "s|\./||g"` FORMATTABLE=`find . -name \*.go | while read a; do echo "$(dirname $a)/*.go"; done | sort | uniq | egrep -v "$IGNORE_PKGS" | sed "s|\./||g"`
TESTABLE_AND_FORMATTABLE=`echo "$TEST_PKGS" | egrep -v "$INTEGRATION_PKGS"` TESTABLE_AND_FORMATTABLE=`echo "$TEST_PKGS" | egrep -v "$INTEGRATION_PKGS"`
# check if user provided PKG override
if [ -z "${USERPKG}" ]; then
TEST=$TESTABLE_AND_FORMATTABLE
FMT=$FORMATTABLE
else
# strip out leading dotslashes and trailing slashes from PKG=./foo/
TEST=${USERPKG/#./}
TEST=${TEST/#\//}
TEST=${TEST/%\//}
# only run gofmt on packages provided by user
FMT="$TEST"
fi
# split TEST into an array and prepend REPO_PATH to each local package
split=(${TEST// / })
TEST=${split/#/${REPO_PATH}/}
# TODO: 'client' pkg fails with gosimple from generated files # TODO: 'client' pkg fails with gosimple from generated files
# TODO: 'rafttest' is failing with unused # TODO: 'rafttest' is failing with unused
STATIC_ANALYSIS_PATHS=`find . -name \*.go | while read a; do dirname $a; done | sort | uniq | egrep -v "$IGNORE_PKGS" | grep -v 'client'` STATIC_ANALYSIS_PATHS=`find . -name \*.go | while read a; do dirname $a; done | sort | uniq | egrep -v "$IGNORE_PKGS" | grep -v 'client'`
@ -47,25 +66,6 @@ if [ -z "$GOARCH" ]; then
GOARCH=$(go env GOARCH); GOARCH=$(go env GOARCH);
fi fi
# user has not provided PKG override
if [ -z "$PKG" ]; then
TEST=$TESTABLE_AND_FORMATTABLE
FMT=$FORMATTABLE
# user has provided PKG override
else
# strip out leading dotslashes and trailing slashes from PKG=./foo/
TEST=${PKG/#./}
TEST=${TEST/#\//}
TEST=${TEST/%\//}
# only run gofmt on packages provided by user
FMT="$TEST"
fi
# split TEST into an array and prepend REPO_PATH to each local package
split=(${TEST// / })
TEST=${split[@]/#/${REPO_PATH}/}
# determine whether target supports race detection # determine whether target supports race detection
if [ "$GOARCH" == "amd64" ]; then if [ "$GOARCH" == "amd64" ]; then
@ -213,21 +213,33 @@ function fmt_pass {
exit 255 exit 255
fi fi
echo "Checking 'go tool vet -shadow'..." echo "Checking 'go tool vet -all -shadow'..."
for path in $FMT; do fmtpkgs=$(echo $FMT | xargs dirname | sort | uniq | sed '/\./d')
if [ "${path##*.}" != "go" ]; then vetRes=$(go tool vet -all -shadow ${fmtpkgs} 2>&1 | grep -v '/gw/' || true)
path="${path}/*.go" if [ -n "${vetRes}" ]; then
echo -e "govet -all -shadow checking failed:\n${vetRes}"
exit 255
fi
if which shellcheck >/dev/null; then
echo "Checking shellcheck..."
shellcheckResult=$(shellcheck -fgcc build test scripts/* 2>&1 || true)
if [ -n "${shellcheckResult}" ]; then
# mask the most common ones; fix later
SHELLCHECK_MASK="SC(2086|2006|2068|2196|2035|2162|2076)"
errs=$(echo "${shellcheckResult}" | egrep -v "${SHELLCHECK_MASK}" || true)
if [ -n "${errs}" ]; then
echo -e "shellcheck checking failed:\n${shellcheckResult}\n===\nFailed:\n${errs}"
exit 255
fi
suppressed=$(echo "${shellcheckResult}" | cut -f4- -d':' | sort | uniq -c | sort -n)
echo -e "shellcheck suppressed warnings:\n${suppressed}"
fi fi
vetRes=$(go tool vet -shadow ${path}) fi
if [ -n "${vetRes}" ]; then
echo -e "govet -shadow checking ${path} failed:\n${vetRes}"
exit 255
fi
done
echo "Checking documentation style..." echo "Checking documentation style..."
# eschew you # eschew you
yous=`find . -name \*.md | xargs egrep --color "[Yy]ou[r]?[ '.,;]" | grep -v /v2/ || true` yous=`find . -name \*.md -exec egrep --color "[Yy]ou[r]?[ '.,;]" {} + | grep -v /v2/ || true`
if [ ! -z "$yous" ]; then if [ ! -z "$yous" ]; then
echo -e "found 'you' in documentation:\n${yous}" echo -e "found 'you' in documentation:\n${yous}"
exit 255 exit 255
@ -268,7 +280,7 @@ function fmt_pass {
# TODO: resolve these after go1.8 migration # TODO: resolve these after go1.8 migration
SIMPLE_CHECK_MASK="S(1024)" SIMPLE_CHECK_MASK="S(1024)"
if echo "${gosimpleResult}" | egrep -v "$SIMPLE_CHECK_MASK"; then if echo "${gosimpleResult}" | egrep -v "$SIMPLE_CHECK_MASK"; then
echo -e "gosimple checking ${path} failed:\n${gosimpleResult}" echo -e "gosimple checking failed:\n${gosimpleResult}"
exit 255 exit 255
else else
echo -e "gosimple warning:\n${gosimpleResult}" echo -e "gosimple warning:\n${gosimpleResult}"
@ -297,7 +309,7 @@ function fmt_pass {
# See https://github.com/dominikh/go-tools/tree/master/cmd/staticcheck # See https://github.com/dominikh/go-tools/tree/master/cmd/staticcheck
STATIC_CHECK_MASK="SA(1019|2002)" STATIC_CHECK_MASK="SA(1019|2002)"
if echo "${staticcheckResult}" | egrep -v "$STATIC_CHECK_MASK"; then if echo "${staticcheckResult}" | egrep -v "$STATIC_CHECK_MASK"; then
echo -e "staticcheck checking ${path} failed:\n${staticcheckResult}" echo -e "staticcheck checking failed:\n${staticcheckResult}"
exit 255 exit 255
else else
suppressed=`echo "${staticcheckResult}" | sed 's/ /\n/g' | grep "(SA" | sort | uniq -c` suppressed=`echo "${staticcheckResult}" | sed 's/ /\n/g' | grep "(SA" | sort | uniq -c`
@ -309,16 +321,20 @@ function fmt_pass {
fi fi
echo "Checking for license header..." echo "Checking for license header..."
licRes=$(for file in $(find . -type f -iname '*.go' ! -path './cmd/*' ! -path './gopath.proto/*'); do licRes=""
head -n3 "${file}" | grep -Eq "(Copyright|generated|GENERATED)" || echo -e " ${file}" files=$(find . -type f -iname '*.go' ! -path './cmd/*' ! -path './gopath.proto/*')
done;) for file in $files; do
if ! head -n3 "${file}" | grep -Eq "(Copyright|generated|GENERATED)" ; then
licRes="${licRes}"$(echo -e " ${file}")
fi
done
if [ -n "${licRes}" ]; then if [ -n "${licRes}" ]; then
echo -e "license header checking failed:\n${licRes}" echo -e "license header checking failed:\n${licRes}"
exit 255 exit 255
fi fi
echo "Checking commit titles..." echo "Checking commit titles..."
git log --oneline `git merge-base HEAD master`...HEAD | while read l; do git log --oneline "$(git merge-base HEAD master)"...HEAD | while read l; do
commitMsg=`echo "$l" | cut -f2- -d' '` commitMsg=`echo "$l" | cut -f2- -d' '`
if [[ "$commitMsg" == Merge* ]]; then if [[ "$commitMsg" == Merge* ]]; then
# ignore "Merge pull" commits # ignore "Merge pull" commits
@ -350,7 +366,7 @@ function bom_pass {
--override-file bill-of-materials.override.json \ --override-file bill-of-materials.override.json \
github.com/coreos/etcd github.com/coreos/etcd/etcdctl >bom-now.json || true github.com/coreos/etcd github.com/coreos/etcd/etcdctl >bom-now.json || true
if ! diff bill-of-materials.json bom-now.json; then if ! diff bill-of-materials.json bom-now.json; then
echo vendored licenses do not match given bill of materials echo "vendored licenses do not match given bill of materials"
exit 255 exit 255
fi fi
rm bom-now.json rm bom-now.json
@ -372,7 +388,7 @@ function dep_pass {
function build_cov_pass { function build_cov_pass {
out="bin" out="bin"
if [ -n "${BINDIR}" ]; then out="${BINDIR}"; fi if [ -n "${BINDIR}" ]; then out="${BINDIR}"; fi
go test -c -covermode=set -coverpkg=$PKGS_COMMA -o ${out}/etcd_test go test -tags cov -c -covermode=set -coverpkg=$PKGS_COMMA -o ${out}/etcd_test
go test -tags cov -c -covermode=set -coverpkg=$PKGS_COMMA -o ${out}/etcdctl_test ${REPO_PATH}/etcdctl go test -tags cov -c -covermode=set -coverpkg=$PKGS_COMMA -o ${out}/etcdctl_test ${REPO_PATH}/etcdctl
} }

View File

@ -44,9 +44,8 @@ func mustFindLeaderEndpoints(c *clientv3.Client) {
leaderId := uint64(0) leaderId := uint64(0)
for _, ep := range c.Endpoints() { for _, ep := range c.Endpoints() {
resp, serr := c.Status(context.TODO(), ep) if sresp, serr := c.Status(context.TODO(), ep); serr == nil {
if serr == nil { leaderId = sresp.Leader
leaderId = resp.Leader
break break
} }
} }

View File

@ -49,7 +49,6 @@ func runElectionFunc(cmd *cobra.Command, args []string) {
// nextc closes when election is ready for next round. // nextc closes when election is ready for next round.
nextc := make(chan struct{}) nextc := make(chan struct{})
eps := endpointsFromFlag(cmd) eps := endpointsFromFlag(cmd)
dialTimeout := dialTimeoutFromCmd(cmd)
for i := range rcs { for i := range rcs {
v := fmt.Sprintf("%d", i) v := fmt.Sprintf("%d", i)

View File

@ -112,11 +112,3 @@ func endpointsFromFlag(cmd *cobra.Command) []string {
} }
return endpoints return endpoints
} }
func dialTimeoutFromCmd(cmd *cobra.Command) time.Duration {
dialTimeout, err := cmd.Flags().GetDuration("dial-timeout")
if err != nil {
ExitWithError(ExitError, err)
}
return dialTimeout
}

View File

@ -49,7 +49,6 @@ func runLeaseRenewerFunc(cmd *cobra.Command, args []string) {
} }
eps := endpointsFromFlag(cmd) eps := endpointsFromFlag(cmd)
dialTimeout := dialTimeoutFromCmd(cmd)
c := newClient(eps, dialTimeout) c := newClient(eps, dialTimeout)
ctx := context.Background() ctx := context.Background()

View File

@ -53,7 +53,6 @@ func runRacerFunc(cmd *cobra.Command, args []string) {
cnt := 0 cnt := 0
eps := endpointsFromFlag(cmd) eps := endpointsFromFlag(cmd)
dialTimeout := dialTimeoutFromCmd(cmd)
for i := range rcs { for i := range rcs {
var ( var (

View File

@ -73,7 +73,6 @@ func performWatchOnPrefixes(ctx context.Context, cmd *cobra.Command, round int)
roundPrefix := fmt.Sprintf("%16x", round) roundPrefix := fmt.Sprintf("%16x", round)
eps := endpointsFromFlag(cmd) eps := endpointsFromFlag(cmd)
dialTimeout := dialTimeoutFromCmd(cmd)
var ( var (
revision int64 revision int64

View File

@ -26,7 +26,7 @@ import (
var ( var (
// MinClusterVersion is the min cluster version this etcd binary is compatible with. // MinClusterVersion is the min cluster version this etcd binary is compatible with.
MinClusterVersion = "3.0.0" MinClusterVersion = "3.0.0"
Version = "3.2.0-rc.1+git" Version = "3.2.0"
APIVersion = "unknown" APIVersion = "unknown"
// Git SHA Value will be set during build // Git SHA Value will be set during build