Compare commits

..

688 Commits

Author SHA1 Message Date
ffd1fa6f52 Merge pull request #5288 from gyuho/version_bump
*: bump to 3.0.0-beta.0
2016-05-06 13:29:19 -07:00
faca29fc3b Merge pull request #5287 from xiang90/l_metrics
*: add leader changes to metrics
2016-05-06 13:27:08 -07:00
76d073a2b5 *: add leader changes to metrics 2016-05-06 13:12:20 -07:00
74ea9ea5cd *: bump to 3.0.0-beta.0 2016-05-06 13:09:50 -07:00
d17aaae714 Merge pull request #5265 from gyuho/fix_5246
v2http: allow empty role for GET '/users'
2016-05-06 11:58:21 -07:00
3c2d0a229c v2http: allow empty role for GET /users
Fix https://github.com/coreos/etcd/issues/5246.
2016-05-06 11:39:38 -07:00
879cfe7666 Merge pull request #5278 from heyitsanthony/fix-clientv3-disconnects
clientv3: fix disconnect breakage
2016-05-05 19:53:08 -07:00
712090fc09 clientv3: keep watcher client active if reconnect has network error
Otherwise watchers created after a long disconnect period will always
close immediately.
2016-05-05 19:30:11 -07:00
22c3a439bc clientv3: do not stop lease client on lost receive stream
Fixes #5242
2016-05-05 19:30:11 -07:00
cdc8f99658 clientv3: rework reconnection logic
Avoids go routine flood for tight loops with a dead connection.
Now uses request ctx when reconnecting for immediate retry.
2016-05-05 19:30:11 -07:00
cc37632003 Merge pull request #5285 from heyitsanthony/fix-windows-sha
build: set git sha on windows builds
2016-05-05 18:39:36 -07:00
5d86525230 build: set git sha on windows builds 2016-05-05 18:18:07 -07:00
93d84b9076 Merge pull request #5284 from xiang90/perf_doc
doc: add performance.md
2016-05-05 16:02:39 -07:00
b033167094 doc: add performance.md 2016-05-05 14:58:34 -07:00
98031a3b6e Merge pull request #5249 from xiang90/metrics
*: add metrics for grpc api
2016-05-05 14:19:46 -07:00
063307ec0a *: add metrics for grpc api 2016-05-05 13:45:52 -07:00
61add11b05 Merge pull request #5259 from gyuho/functional-test
etcd-tester: refactor
2016-05-05 11:15:18 -07:00
cc7dd9b729 etcd-tester: refactor 2016-05-05 10:55:42 -07:00
3bcd2b5b9f Merge pull request #5271 from heyitsanthony/fix-rafthttp-active-race
rafthttp: fix race on peer status activeSince
2016-05-04 13:49:58 -07:00
c5af1d7a88 rafthttp: fix race on peer status activeSince 2016-05-04 11:48:16 -07:00
b24d0032d2 Merge pull request #5269 from heyitsanthony/fix-httpproxy-race
httpproxy: fix race on getting close notifier channel
2016-05-04 09:49:19 -07:00
a76f5f5ed2 httpproxy: fix race on getting close notifier channel
Fixes #5267
2016-05-04 09:32:26 -07:00
53ed8750ce Merge pull request #5266 from maciej/scala_etcd_client
librarites-and-tools.md: add Scala-based maciej/etcd-client
2016-05-04 09:15:18 -07:00
aeff5507e6 librarites-and-tools.md: add Scala-based maciej/etcd-client 2016-05-04 02:56:17 +02:00
b7761530e1 Merge pull request #5251 from heyitsanthony/fix-watch-panic
clientv3: gracefully handle watcher resume on compacted revision
2016-05-03 15:00:39 -07:00
b53aaf4c82 Merge pull request #5262 from gyuho/more_logging
*: more detailed timeout logging
2016-05-03 14:13:46 -07:00
9bf601a921 etcdserver: log timeout 2016-05-03 13:39:31 -07:00
0f5b8c39b4 Merge pull request #5263 from gyuho/autotls-flag
etcdmain: add auto-tls flag to help.go
2016-05-03 13:14:23 -07:00
56dd991b4e etcdmain: add auto-tls flag to help.go 2016-05-03 12:40:02 -07:00
864cbd36bf Merge pull request #5261 from gyuho/typo
*: typo, remove string type assertions
2016-05-03 11:29:13 -07:00
1a0d1ab4ab Merge pull request #5260 from glevand/for-merge-build
build: Simplify host detection
2016-05-03 11:28:46 -07:00
a288188001 *: typo, remove string type assertions 2016-05-03 10:59:57 -07:00
5d8d684a91 Merge pull request #5257 from gyuho/proto_fix
*: fix protodoc, re-run genproto script, typos in proto files
2016-05-03 10:46:46 -07:00
fd27f9cd28 Merge pull request #5256 from gyuho/fix_build
build: set GitSHA version in cmd directory
2016-05-03 10:13:07 -07:00
4ecb560604 build: Simplify host detection
Signed-off-by: Geoff Levand <geoff@infradead.org>
2016-05-03 09:54:44 -07:00
8b52fd0d2d clientv3: gracefully handle watcher resume on compacted revision
Fixes #5239
2016-05-03 09:30:53 -07:00
b7639b00e0 Merge pull request #5252 from xiang90/client-tls
*: support auto tls on client side
2016-05-03 09:22:11 -07:00
c5bf6a9d9e e2e: add test for auto client tls 2016-05-03 08:35:02 -07:00
015acabdbb *: rerun genproto -g 2016-05-02 23:02:31 -07:00
6222d46233 scripts/genproto.sh: update protodoc git SHA
To use protodoc with the fix
58fed2ed06.

This correctly parses the order of values in 'directories' flag.
2016-05-02 23:00:40 -07:00
36acde620e build: set GitSHA version in cmd directory
Fix https://github.com/coreos/etcd/issues/5255.
2016-05-02 22:16:40 -07:00
1f5c5abe6d Merge pull request #5253 from xiang90/fix_raft_test
raft: fix flaky test
2016-05-02 21:33:51 -07:00
2fa5b913fe raft: fix flaky test
We recently changed the randomized election timeout from (et, 2*et-1] tp
[et, 2*et-2], where et is user set election timeout.

So 2*et might trigger two elections instead of one. We need to fix the test
code accordingly.

Thanks for Tikv guys for finding this issue. We probably need to randomize
etcd/raft test more.
2016-05-02 21:08:19 -07:00
973ad5aa7c *: support auto tls on client side 2016-05-02 16:17:49 -07:00
fee71b18a3 Merge pull request #5248 from gyuho/hash_with_revision
functional-tester: use revision from hash method
2016-05-02 15:30:26 -07:00
064c1ff0f3 etcdserver/api/v3rpc: use Revision from Hash API 2016-05-02 15:06:39 -07:00
7a6d9ea01a mvcc: Hash to return Revision 2016-05-02 15:04:24 -07:00
a8139e2b0e Merge pull request #5247 from joshix/faqhead
Documentation/v2: Add newline before heading in faq.md
2016-05-02 11:43:58 -07:00
92d673ea59 Documentation/v2: Add newline before heading in faq.md
Minor rewrite to the heading text for clarity.

Matches downstream coreos-inc/coreos-pages#648 and
coreos-inc/coreos-pages#649.
2016-05-02 11:15:18 -07:00
b9ea5f6d90 Merge pull request #5241 from claws/avoid_differences_in_gnu_and_bsd_cut
use sed instead of cut to accomodate GNU and BSD differences
2016-04-30 20:58:57 -07:00
c071104fc4 script: fix build script regression to work on OSX
Use sed instead of cut to accomodate GNU and BSD differences

Fixes: #5240
2016-05-01 13:06:07 +09:30
28f3cb0f14 Merge pull request #5171 from xiang90/runner
etcd-runner: initial commit
2016-04-30 19:39:53 -07:00
73ecb61ff4 etcd-runner: initial commit 2016-04-30 17:24:03 -07:00
262de75a7e Merge pull request #5238 from xiang90/bench_watch_put
mvcc: add benchmark for watch put and improve it
2016-04-30 10:46:38 -07:00
ad327e01d0 mvcc: add benchmark for watch put and improve it 2016-04-29 19:58:37 -07:00
b58f8dd64b Merge pull request #5237 from brian-brazil/master
Improve some debug metrics.
2016-04-29 17:53:54 -07:00
ea1d0f3e0d etcdserver: Improve some debug metrics.
The _total suffix is by convention for counters,
don't use it on a gauge. Clarify help string.
Tweak metric name so it'll sort with related metrics,
and be a little more understandable.

Remove open file descriptor metric, as Prometheus client_golang
provides that out of the box as process_open_fds which is also
more up to date. Both only support Linux, so there's no loss of
platform support.

Fixes #5229
2016-04-30 01:29:13 +01:00
c89e348fbc Merge pull request #5232 from glevand/master
Add arm64 travis builds
2016-04-29 16:41:09 -07:00
552a5af10f Merge pull request #5236 from gyuho/wait_purge
pkg/fileutil: wait up to 300ms for purge test
2016-04-29 16:37:09 -07:00
b79bb6f164 travis: Enable arm64 builds
Setup a travis test matrix on a new variable 'TARGET', which specifies the CI
target.  Update the script section with a conditional that runs the needed
commands for each target.

Also, set go_import_path to make cloned repos work, enable the trusty VM, and
enable verbose builds when testing.

Signed-off-by: Geoff Levand <geoff@infradead.org>
2016-04-29 15:31:30 -07:00
4ab1500a6d pkg/fileutil: wait up to 300ms for purge test
Fix https://github.com/coreos/etcd/issues/5231.

The issue shows that slow CI can take more than 200ms
for purging. This increase the loop iteration to wait
up to 300ms in case the disk is being slow.
2016-04-29 15:24:44 -07:00
00d6f104b5 Merge pull request #5235 from ronabop/get_put_typo
simple typo in README docs for getting started
2016-04-29 14:24:21 -07:00
c97b74a72f doc: fix etcdctl example in README
repeated put rather than put followed by get
2016-04-29 21:20:14 +00:00
634a9e833e Merge pull request #5233 from gyuho/client-doc
clientv3: fix README, add error handling example
2016-04-29 13:56:37 -07:00
0c5bcd5d80 clientv3: fix README, add error handling example 2016-04-29 13:34:16 -07:00
33968059e9 Merge pull request #5222 from gyuho/error_interface
rpctypes: error interface
2016-04-29 13:01:54 -07:00
ec1fdd3938 integration: test with new server errors 2016-04-29 12:00:26 -07:00
b3ebe66c97 clientv3/integration: tests with new errors 2016-04-29 12:00:26 -07:00
6049c95dc9 clientv3: auth with rpctypes.Error 2016-04-29 12:00:26 -07:00
506cf1f03f etcdserver/api/v3rpc: use new errors 2016-04-29 12:00:26 -07:00
2b361cf06b rpctypes: define a new error interface 2016-04-29 12:00:22 -07:00
d893a78c38 test: add v3rpc, rpctypes 2016-04-29 11:00:02 -07:00
8e099ab713 Merge pull request #5225 from heyitsanthony/local-tester
local-tester: procfile, faults, and network bridge
2016-04-29 10:27:56 -07:00
b8850cec93 Merge pull request #5228 from xiang90/fix_d
mvcc: fix watch deleteRange
2016-04-29 10:23:46 -07:00
29eca4eb88 Merge pull request #5223 from heyitsanthony/kv-less-reconnect
clientv3: better serialization for kv and txn connection retry
2016-04-29 10:02:17 -07:00
c0ff77e809 local-tester: procfile, faults, and network bridge
Creates a local fault injected cluster and stresser for etcd.

Usage: goreman -f tools/local-tester/Procfile start
2016-04-29 09:57:02 -07:00
3ddcc21179 mvcc: fix watch deleteRange 2016-04-29 09:40:28 -07:00
c26eb3f241 clientv3: better serialization for kv and txn connection retry
If the grpc connection is restored between an rpc network failure
and trying to reestablish the connection, the connection retry would
end up resetting good connections if many operations were
in-flight at the time of network failure.
2016-04-29 09:26:32 -07:00
60425de0ff Merge pull request #5227 from raoofm/patch-3
Doc: Update production-users.md
2016-04-29 08:25:07 -07:00
db8588ab93 Doc: Update production-users.md
Update the Backups policy
2016-04-29 11:23:51 -04:00
51ad5f00bf Merge pull request #5226 from raoofm/patch-2
Doc: Update production-users.md
2016-04-29 08:01:07 -07:00
419ae757d2 Update production-users.md 2016-04-29 10:58:24 -04:00
4480eb6d49 Merge pull request #5217 from gyuho/rpc_types
*: return rpctypes.Err in clientv3
2016-04-28 15:58:47 -07:00
f148f4b2b9 clientv3/integration: tests error types (rpctypes) 2016-04-28 15:42:27 -07:00
2e3d79a7bf clientv3: convert errors to rpctypes on returning
For https://github.com/coreos/etcd/issues/5211.
2016-04-28 15:39:37 -07:00
f613052435 rpctypes: Error function to convert clientv3 error 2016-04-28 12:16:13 -07:00
bef5be42b5 integration: add quota backend bytes option 2016-04-28 12:15:31 -07:00
11ec94b7e8 Merge pull request #5218 from heyitsanthony/fix-issue-3699
integration: wait for ReadyNotify in Issue3699 test
2016-04-28 10:48:08 -07:00
7c666b533a Merge pull request #5221 from heyitsanthony/parallel-e2e-integration
test: run e2e and integration tests in parallel
2016-04-28 10:30:40 -07:00
85edd66c65 test: run e2e and integration tests in parallel 2016-04-28 10:17:40 -07:00
8291110049 rafthttp: do not create new connections after stopping transport 2016-04-28 10:10:52 -07:00
d1e11842df Merge pull request #5219 from xiang90/req_timeout
etcdserver: add timeout for processing v3 request
2016-04-28 09:25:08 -07:00
6ee5f9c677 etcdserver: add timeout for processing v3 request 2016-04-28 08:52:17 -07:00
d814e9dc35 integration: wait for ReadyNotify in Issue3699 test
Fixes #5147
2016-04-27 22:04:07 -07:00
8df52dc6fa Merge pull request #5216 from heyitsanthony/lease-header-err
v3rpc: only fill lease grant header if no error
2016-04-27 16:51:16 -07:00
06ea8aee11 v3rpc: only fill lease grant header if no error
Was panicking under cluster fault injection.
2016-04-27 16:28:40 -07:00
ca83793876 Merge pull request #5169 from xiang90/ready
etcdserver: do not serve requests before finish the first internal proposal
2016-04-27 16:05:12 -07:00
434f2c356d etcdserver: do not serve requests before finish the first internal proposal 2016-04-27 15:46:31 -07:00
e50df7c19b Merge pull request #5215 from gyuho/finish_doc
Finish v2 documentation cleaning
2016-04-27 14:07:59 -07:00
c697aa7c60 Documentation: remove the rest
Remove:
1. auth_api.md
2. docker_guide.md
3. faq.md
4. implementation-faq.md
5. internal-protocol-versioning.md
2016-04-27 13:48:11 -07:00
8b3d1562f9 Documentation: remove admin_guide out of v2 2016-04-27 13:48:07 -07:00
c25c8573ac Merge pull request #5212 from gyuho/doc_fix
v2 documentation link fix
2016-04-27 13:18:39 -07:00
954535c2b4 Documentation: move members_api.md 2016-04-27 11:49:41 -07:00
42c09a95a0 Documentation: remove other_apis from v3 2016-04-27 11:40:48 -07:00
a2ab18fce5 Documentation: move api.md to v2 2016-04-27 11:40:48 -07:00
5464665107 Documentation: del backward_compatibility from v3 2016-04-27 11:40:48 -07:00
04fda9d25f Documentation: fix proxy link and delete from v3 2016-04-27 11:40:44 -07:00
95bac2dc3c Documentation: remove v2 snapshot migration doc 2016-04-27 11:31:44 -07:00
01927cc26a *: remove v2 specific authentication doc 2016-04-27 11:30:51 -07:00
f4b8e878ed Documentation: delete upgrade_2_* from v3 doc dir 2016-04-27 11:29:36 -07:00
63c5725fef Documentation: fix errorcode link to v2 2016-04-27 11:28:48 -07:00
afd2cc7373 Merge pull request #5206 from xiang90/lease_header
v3rpc: fill lease header
2016-04-27 11:18:00 -07:00
08f6c0775a Merge pull request #5199 from heyitsanthony/safe-lock-retry
clientv3/concurrency: use session lease id for mutex keys
2016-04-27 11:10:46 -07:00
07daa9fdc0 Merge pull request #5201 from gyuho/auth_test
auth: add basic tests
2016-04-27 10:57:20 -07:00
c3de53c23c v3rpc: fill lease header 2016-04-27 10:30:23 -07:00
14415c2187 auth: add tests 2016-04-27 10:13:36 -07:00
81ac766bb4 Merge pull request #5174 from gyuho/restart
etcd-tester: match more grpc errors
2016-04-27 09:47:55 -07:00
de7c18909f etcd-tester: match more grpc errors
To prevent stressers from returning from failure injections
2016-04-27 09:34:05 -07:00
8a4c9c9da1 Merge pull request #5205 from clearbit/rh-error-newline
etcdctl: Add a newline so that errors don't bleed into each other.
2016-04-27 07:31:08 -07:00
a00be40db2 etcdctl: Add a newline so that errors don't bleed into each other. 2016-04-27 14:25:57 +01:00
ecb0e2bd38 Merge pull request #5203 from heyitsanthony/fix-lease-leak
clientv3: check stream context in lease keep alive send loop
2016-04-26 20:42:31 -07:00
30a9229f38 clientv3: check stream context in lease keep alive send loop
If no leases are being kept alive, a connection reset would leak
the send routine since it would only test the stream when sending
keep alives.

Fixes #5200
2016-04-26 20:10:09 -07:00
22797c7185 clientv3/concurrency: use session lease id for mutex keys
With randomized keys, if the connection goes down, but the session remains,
the client would need complicated recovery logic to avoid deadlock.
Instead, bind the session's lease id to the lock entry; if a session tries
to reacquire the lock it will reassume its old place in the wait list.
2016-04-26 17:37:26 -07:00
c8ab6c348a Merge pull request #5196 from gyuho/password_check
etcdserver/auth: check empty password
2016-04-26 15:56:17 -07:00
bba08f6f79 e2e: add tests for issue 5182
For https://github.com/coreos/etcd/issues/5182.
2016-04-26 15:37:19 -07:00
07685bcf97 etcdserver/auth: check empty password in merge
Fix https://github.com/coreos/etcd/issues/5182.
2016-04-26 15:37:15 -07:00
78c96e893e Merge pull request #5198 from heyitsanthony/readme-3.0
doc: focus on v3 in README
2016-04-26 14:47:22 -07:00
dc55c312b0 doc: focus on v3 in README and clone old v2 docs
Fixes #5192
2016-04-26 14:41:59 -07:00
ce76c28805 Merge pull request #5197 from heyitsanthony/fix-lease-revoke-keepalive
etcdserver: respond with ttl=0 for revoked lease keep alive
2016-04-26 14:13:54 -07:00
af1a0b60e2 etcdserver: respond with ttl=0 for revoked lease keep alive
Fixes #5172
2016-04-26 13:53:20 -07:00
26e52d2bce Merge pull request #5190 from xiang90/deb_metrics
*: add debugging metrics
2016-04-26 10:27:05 -07:00
67645095e9 *: add debugging metrics 2016-04-26 09:52:56 -07:00
7161eeed8b Merge pull request #5191 from xiang90/github-folder
.github: add pull request and issue template
2016-04-25 16:22:48 -07:00
cc27c3a1e6 .github: add pull request and issue template 2016-04-25 16:22:13 -07:00
d923b59190 Merge pull request #5189 from heyitsanthony/storage-to-mvcc
*: rename storage package to mvcc
2016-04-25 15:52:08 -07:00
b7ac758969 *: rename storage package to mvcc 2016-04-25 15:25:51 -07:00
1440007608 Merge pull request #5187 from xiang90/doc_security
doc: add link to security
2016-04-25 14:32:12 -07:00
1d5bfd95dc Merge pull request #5188 from gyuho/gogoproto-dependency
Update gogo/proto, grpc dependency
2016-04-25 14:29:42 -07:00
12d01bb1eb vendor: update grpc, gogo/protobuf 2016-04-25 14:10:58 -07:00
4b31acf0e0 *: update generated Proto 2016-04-25 14:08:33 -07:00
82ef33a8d3 scripts: update genproto with new gogoproto hash 2016-04-25 14:07:40 -07:00
4b296bf51c doc: add link to security 2016-04-25 13:54:38 -07:00
9ec176a9b0 Merge pull request #5176 from xiang90/lease_client
clientv3: keepaliveonce should have a per call ctx
2016-04-25 11:45:58 -07:00
6de5b45b2f Merge pull request #5185 from joshix/dochds
Documentation/doc.md: Make headings boring :)
2016-04-25 11:45:20 -07:00
2a38cb5ad8 Documentation/doc.md: Make headings boring :)
Make the heading sentences that introduce each list of documents
a little more standard language, remove implied 2nd person, reduce
exclamations.
2016-04-25 10:58:25 -07:00
8a82ddadb9 Merge pull request #5181 from xiang90/cluster_doc
docs: move clustering doc
2016-04-25 10:50:29 -07:00
cbd79c666e clientv3: keepaliveonce should have a per call ctx
KeepAliveOnce should have a per call ctx. Now we have a per
API ctx, but we might do rpc calls mutiple times in a for loop.

To avoid unnecessary routine leak, use per call ctx.
2016-04-25 10:46:47 -07:00
1b98074897 docs: move clustering doc 2016-04-25 10:35:29 -07:00
1378e72bc2 Merge pull request #5184 from gyuho/typo
*: fix flag location, minor typo
2016-04-25 09:59:24 -07:00
3ae956eb89 Merge pull request #5179 from xiang90/doc_di
doc: link to recovery.md
2016-04-25 09:47:30 -07:00
3ad8e91e00 *: fix flag location, minor typo 2016-04-25 09:41:11 -07:00
663aca701d Merge pull request #5177 from xiang90/lease_client_2
clientv3: retry on switchRemoteAndStream
2016-04-25 09:36:06 -07:00
736e1d6c33 doc: link to recovery.md 2016-04-23 22:40:35 -07:00
844208d7dd clientv3: retry on switchRemoteAndStream
If switchRemoteAndStream fails, the whole lease API fails since
the internal routine exits. We should only fail the whole API when
there is a fatal error. For example, we should fail if we fail to
connection to all the endpoints user provided.

If we connect to an endpoint, but fail to create a stream, we should
retry instead of returning error to fail the entire API.
2016-04-23 21:55:34 -07:00
f8673b5f60 Merge pull request #5170 from gyuho/tester
etcd-tester: flag consistency-check
2016-04-22 22:26:17 -07:00
151d0d3831 etcd-tester: flag consistency-check 2016-04-22 22:22:12 -07:00
90f91ac8ac Merge pull request #5162 from heyitsanthony/disaster-doc
doc: v3 disaster recovery doc
2016-04-22 19:48:45 -07:00
579c1342e6 doc: v3 disaster recovery doc 2016-04-22 19:49:39 -07:00
50471d0c5c Merge pull request #5168 from heyitsanthony/fix-pipeline-leak
etcdserver: stop raft after stopping apply scheduler
2016-04-22 19:11:34 -07:00
08d879341d etcdserver: stop raft after stopping apply scheduler
Was causing a pipeline leak.
2016-04-22 17:15:13 -07:00
e51e146a19 Merge pull request #5167 from xiang90/doc_reorg
docs: update docs.md and create subdirs
2016-04-22 17:03:16 -07:00
bfd6465ea3 docs: update docs.md and create subdirs 2016-04-22 16:58:03 -07:00
45bf7fb960 Merge pull request #5165 from xiang90/race
raft: fix detected race in node.go
2016-04-22 16:12:33 -07:00
59c5110b73 raft: fix detected race in node.go 2016-04-22 15:45:33 -07:00
0dd9c2520b Merge pull request #5164 from gyuho/sleep_for_slow_network
etcd-tester: wait more for slow network recovery
2016-04-22 15:36:50 -07:00
6a0664d701 etcd-tester: wait more for slow network recovery
For https://github.com/coreos/etcd/issues/5121.
2016-04-22 15:24:47 -07:00
da1138f8de Merge pull request #5160 from gyuho/close_db
etcdctl/ctlv3: close bolt.DB in snapshot status
2016-04-22 12:10:28 -07:00
d49c044666 Merge pull request #5135 from heyitsanthony/maintenance-doc
doc: v3 maintenance
2016-04-22 12:02:39 -07:00
53abaf86c6 etcdctl/ctlv3: close bolt.DB in snapshot status 2016-04-22 11:43:52 -07:00
3ffbc4c8dd Merge pull request #5158 from gyuho/functional-test-check
etcd-tester: reset success var for every case
2016-04-22 09:37:27 -07:00
0feb88cee1 etcd-tester: change var success->failed
Previous success overwrites the later failure.
Make it simpler by changing the variable to 'failed'.
2016-04-22 09:27:37 -07:00
af30795752 Merge pull request #5157 from mitake/5155
etcdserver: remove a data race of ServerStat
2016-04-22 09:19:47 -07:00
24077fb3f6 etcdserver: remove a data race of ServerStat
It seems that ServerStats.BecomeLeader() is missing a lock.

Fix https://github.com/coreos/etcd/issues/5155
2016-04-22 23:41:38 +09:00
69bc0f76bc Merge pull request #5152 from heyitsanthony/fix-quota-test
integration: wait for alarm in TestV3StorageQuotaApply
2016-04-21 21:26:46 -07:00
2927c90fae integration: wait for alarm in TestV3StorageQuotaApply
Fixes #4974
2016-04-21 20:53:43 -07:00
f73cdf4035 Merge pull request #5153 from gyuho/api_doc
*: change Protocol Buffer documentation title
2016-04-21 20:03:37 -07:00
2751a10db6 *: change Protocol Buffer documentation title 2016-04-21 19:58:41 -07:00
fdf6335416 Merge pull request #5117 from gyuho/proto_gen
*: Protocol Buffer docs auto-generate script
2016-04-21 19:33:41 -07:00
753630dc37 *: Protocol Buffer docs auto-generate script 2016-04-21 19:14:21 -07:00
b8c35e3af8 doc: v3 maintenance 2016-04-21 17:02:46 -07:00
d32113a0e5 Merge pull request #5150 from xiang90/doc_f
doc: front page of etcd3 doc
2016-04-21 16:49:18 -07:00
e38710b5f9 doc: front page of etcd3 doc 2016-04-21 16:42:16 -07:00
0c191b71ec Merge pull request #5146 from gyuho/help
etcdmain: quota-backend-bytes in help.go
2016-04-21 13:24:20 -07:00
fa61bf86d7 etcdmain: add quota-backend-bytes to help.go 2016-04-21 13:05:54 -07:00
79a91b3450 Merge pull request #5145 from gyuho/skip_compact
etcd-tester: skip compaction after different hash
2016-04-21 11:09:19 -07:00
4e175a98c3 Merge pull request #5144 from xiang90/l
*: fix invalid access to backend struct
2016-04-21 10:14:39 -07:00
c0cf44f134 backedn: protect backend access with lock 2016-04-21 09:34:31 -07:00
4991cda202 etcdsever: fix the leaky snashot routine issue 2016-04-21 08:48:11 -07:00
8684d96914 Merge pull request #5124 from mitake/auth-v3-authenticate
*: support authenticate in v3 auth
2016-04-20 21:07:09 -07:00
131e3806bb *: support authenticate in v3 auth
This commit implements Authenticate() API of the auth package. It does
authentication based on its authUsers bucket and generate a token for
succeeding RPCs.
2016-04-21 12:32:19 +09:00
e835d24bea etcd-tester: skip compaction after different hash
When hashes don't match, there could be some nodes
falling behind and the compact request can then error
with 'future revision compact'.
2016-04-20 17:13:51 -07:00
05d5459b1d Merge pull request #5143 from gyuho/mirror-make-e2e
e2e: make-mirror
2016-04-20 15:33:41 -07:00
6eb25751ec e2e: make-mirror 2016-04-20 15:13:45 -07:00
29dfca883f Merge pull request #5141 from gyuho/alarm_test
e2e: test alarm
2016-04-20 12:09:43 -07:00
d976121e35 e2e: test alarm 2016-04-20 11:38:53 -07:00
20db51bfb2 Merge pull request #5138 from heyitsanthony/v2api-refactor
etcdserver: v2api refactor
2016-04-20 11:07:37 -07:00
b37a0ad9e7 Merge pull request #5137 from gyuho/member_add_test
e2e: add member add/update test
2016-04-20 10:38:43 -07:00
f1440f1d63 Merge pull request #5140 from xiang90/fix_d
backend: update db.size after defrag
2016-04-20 10:31:37 -07:00
0fe24e7ffc etcdserver: rename v3demo_server to v3_server
Not much of a demo any more.
2016-04-20 10:29:22 -07:00
ebace2eb1b etcdserver: split out v2 Do() API from core server code 2016-04-20 10:29:22 -07:00
41382bc3f0 etcdserver: split out v2 raft apply interface 2016-04-20 10:29:22 -07:00
1fe4c34398 Merge pull request #5131 from heyitsanthony/etcdctl-get-json
etcdctl: print full json response for Get
2016-04-20 10:21:48 -07:00
0893dbf7c1 e2e: add member add/update test 2016-04-20 10:05:55 -07:00
bfc6309222 Merge pull request #5129 from xiang90/pipe_test
make TestPipelineKeepSendingWhenPostError reliable
2016-04-20 10:02:17 -07:00
74d50884bb backend: update db.size after defrag 2016-04-20 10:01:38 -07:00
d2a58cbb0a etcdctl: print full json response for Get
Otherwise parsing get/txn output with json is somewhat complicated
because in some cases there's a json message and sometimes not.
Likewise, a get on an absent key has to return the current revision for
some algorithms to work.
2016-04-20 09:56:32 -07:00
fb137f11c5 rafthttp: make TestPipelineKeepSendingWhenPostError reliable 2016-04-20 09:38:47 -07:00
0c40f4a7e3 Merge pull request #5136 from heyitsanthony/test-display-gosimple
test: display failure output for gosimple
2016-04-19 23:25:12 -07:00
46dfa682e7 test: display failure output for gosimple 2016-04-19 22:58:37 -07:00
32a486b462 Merge pull request #5127 from xiang90/down_build
doc: build
2016-04-19 13:38:04 -07:00
d1067d39c7 doc: build 2016-04-19 13:37:50 -07:00
8af9c88377 Merge pull request #5122 from xiang90/lease_doc
doc: add lease section to interacting doc
2016-04-19 13:31:16 -07:00
16630529f7 Merge pull request #5125 from xiang90/dev_cluster
doc: add local_cluster doc
2016-04-19 10:51:16 -07:00
531ee93878 doc: add local_cluster doc 2016-04-19 10:50:54 -07:00
6d06c060b4 doc: add lease section to interacting doc 2016-04-19 08:18:59 -07:00
668ea89980 Merge pull request #5126 from judwhite/patch-2
raft/doc.go: add missing }
2016-04-19 07:25:31 -07:00
a9cfbd5414 raft/doc.go: add missing } 2016-04-19 04:21:33 -05:00
bf9cccfc34 Merge pull request #5118 from ajityagaty/fsync_osx
fileutil: Sync on HFS/OSX needs to be handled differently.
2016-04-18 22:22:53 -07:00
8b6de5f85d fileutil: Sync on HFS/OSX needs to be handled differently.
A call file.Sync on OSX doesn't guarantee actual persistence on
physical drive media as the data can be cached in physical drive's
buffers. Hence calls to file.Sync need to be replaced with
fcntl(F_FULLFSYNC).
2016-04-18 21:49:04 -07:00
d16628bf50 Merge pull request #5120 from magicwang-cn/master
etcdserver: close response body when getting cluster information
2016-04-18 19:44:19 -07:00
97c71f44fd etcdserver: close response body when getting cluster information 2016-04-19 10:03:40 +08:00
c4892c7f51 Merge pull request #5105 from xiang90/get_started
doc: add write/read example for interact doc
2016-04-18 14:27:53 -07:00
a2ac639176 doc: add write/read example for interact doc 2016-04-18 13:42:12 -07:00
8a0fa5622e Merge pull request #5114 from gyuho/snapshot_test
*: add Snapshot e2e test
2016-04-18 09:27:07 -07:00
b494ad3a0d Merge pull request #5112 from heyitsanthony/protobuf-comments
storagepb, etcdserverpb: improve documentation for RPC message fields
2016-04-17 23:53:53 -07:00
42245a5518 storagepb, etcdserverpb: improve documentation for RPC message fields 2016-04-17 23:33:00 -07:00
ea6a747fc1 Merge pull request #5116 from ajityagaty/typo_fix
etcdctlv3: Fix for typo in alarm command handling.
2016-04-17 20:30:22 -07:00
68dd22d93d etcdctlv3: Fix for typo in alarm command handling. 2016-04-17 19:31:39 -07:00
9504df2917 Merge pull request #5115 from gyuho/gc
v3rpc: bytes-key map look-up gc optimization
2016-04-17 13:21:47 -07:00
86f580fa8f v3rpc: bytes-key map look-up gc optimization
This change
f5f5a8b620
just got merged to go1.6.1 where Go does special optimization for x =
m[string(k)] where k is []byte.
2016-04-17 10:52:19 -07:00
a2afb513dd *: add snapshot e2e test 2016-04-16 13:27:10 -07:00
d4ff9364d4 Merge pull request #4861 from heyitsanthony/nfs-lock
pkg/fileutil: fix linux file locks over NFS
2016-04-16 08:59:10 -07:00
11e8d01035 Merge pull request #5113 from ajityagaty/remove_lease_id_casts
clientv3: Remove superfluous LeaseID casts in integration tests.
2016-04-16 07:22:06 -07:00
f15b5aa4e6 Merge pull request #5034 from ZhuPeng/proxy-http2
Enable http2 support between proxy and member
2016-04-16 07:04:41 -07:00
da5bd04a1a clientv3: Remove superflous LeaseID casts in integration tests.
The integration tests under clientv3 have superflous LeaseID casts
that are not needed as the ID field of the lease responses are of
type LeaseID now.
2016-04-15 17:48:20 -07:00
73b48dd8eb Merge pull request #5111 from Amit-PivotalLabs/fix-etcdctl-unset-env
etcdctl: unset ETCDCTL_API env var properly
2016-04-15 16:32:42 -07:00
c629a30f1f etcdctl: unset ETCDCTL_API env var properly 2016-04-15 15:43:00 -07:00
4ed5f66a7a Merge pull request #5109 from gyuho/member_remove_test
e2e: add member remove test
2016-04-15 15:04:00 -07:00
caf0e9b9b1 Merge pull request #5110 from gyuho/error_when_db_not_exist
etcdctl: snapshot status error for non-existent file
2016-04-15 14:44:25 -07:00
59a88d1cf6 e2e: add member remove test 2016-04-15 14:43:32 -07:00
a78ece4ac2 etcdctl: snapshot status error for non-existent file 2016-04-15 14:15:16 -07:00
3ee99a496f Merge pull request #5096 from heyitsanthony/clientv3-run-examples
test, clientv3: run examples as integration tests
2016-04-15 12:42:44 -07:00
9bfa0172f5 test, clientv3: run examples as integration tests 2016-04-15 11:51:30 -07:00
d4dae7e9e9 Merge pull request #5101 from gyuho/watch_bench_fix
benchmark: ensure all watcher receivers to finish
2016-04-15 11:49:24 -07:00
ad226f2020 benchmark: ensure all watcher receivers to finish
Fix https://github.com/coreos/etcd/issues/5099.
2016-04-15 11:11:14 -07:00
c1455a4f10 Merge pull request #5090 from ajityagaty/lease_id
clientv3: Use LeaseID in all the client APIs.
2016-04-15 10:48:29 -07:00
da153d3f3c Merge pull request #5091 from xiang90/r_h
doc: add response header doc into api
2016-04-15 09:57:48 -07:00
3b72c3da53 doc: add response header doc into api 2016-04-15 09:54:30 -07:00
81a5fc16ef Merge pull request #5095 from gyuho/govet_fix
*: fix govet -shadow in go tip
2016-04-15 09:41:24 -07:00
376234f196 Merge pull request #5094 from gyuho/watch_range_example
*: add more examples to clientv3, pkg/adt
2016-04-15 09:10:25 -07:00
641a1a66e1 *: fix govet -shadow in go tip 2016-04-15 07:39:52 -07:00
ae27b991b1 *: add more examples to clientv3, pkg/adt 2016-04-14 23:46:50 -07:00
06a4086bf9 clientv3: Use LeaseID in all the client APIs.
In order to use LeaseID type instead of int64 we have to convert
the protobuf lease responses into client lease reponses.
2016-04-14 23:09:46 -07:00
4ee7cad116 Merge pull request #5093 from gyuho/fix_test
functional-tester/etcd-tester: fix error check
2016-04-14 21:45:44 -07:00
8515ae30fb functional-tester/etcd-tester: fix error check 2016-04-14 21:31:12 -07:00
67db28f979 proxy: enable http2 for connecting to members
enable http2 when transport specified a custom TLS config, which was
not automatically enable.

Issue 5033
2016-04-15 10:16:26 +08:00
6c1cc1d4ea Merge pull request #5089 from heyitsanthony/fix-func-tester-timeout
etcd-tester: return error if first compaction times out
2016-04-14 17:24:22 -07:00
21233416e8 etcd-tester: return error if first compaction times out
Fixes #5081
2016-04-14 17:11:53 -07:00
74153ffa45 Merge pull request #5082 from xiang90/kv_d
doc: add doc for kv message
2016-04-14 15:17:04 -07:00
df37c75bb9 doc: add doc for kv message 2016-04-14 15:16:23 -07:00
f2e915f56e Merge pull request #5086 from heyitsanthony/test-race-rafthttp
test: check races on rafthttp
2016-04-14 14:21:20 -07:00
57448622d9 Merge pull request #5085 from heyitsanthony/hide-yaml
clientv3: make YamlConfig struct private
2016-04-14 14:10:20 -07:00
01be6933c6 test: check races on rafthttp
The data race in net/http has been fixed for a while.
2016-04-14 13:45:31 -07:00
cfbb8a71db Merge pull request #5084 from gyuho/typo
clientv3: fix example code format, more examples
2016-04-14 12:30:44 -07:00
04ef861c3d clientv3: make YamlConfig struct private 2016-04-14 12:26:01 -07:00
81e344bef9 clientv3: fix example code format, more examples 2016-04-14 12:13:07 -07:00
6bbdebb281 Merge pull request #5076 from gyuho/more_e2e
*: add, clean up e2e tests
2016-04-14 11:59:13 -07:00
6a3b5fe70c Merge pull request #5083 from ajityagaty/role_grant_test
e2e: Test case for the etcdctlv3 'role grant' command.
2016-04-14 11:53:21 -07:00
fefb58dc90 e2e: clean up, add more tests 2016-04-14 11:42:57 -07:00
4495559ad6 e2e: Test case for the etcdctlv3 'role grant' command.
Adding a test case to test the 'role grant' sub-command.
2016-04-14 11:31:07 -07:00
ba1c0a2b12 Merge pull request #5080 from xiang90/up
proxy: initial userspace tcp proxy
2016-04-14 10:41:46 -07:00
4a913ae60a proxy: initial userspace tcp proxy 2016-04-14 10:14:30 -07:00
da1132662a Merge pull request #5078 from ajityagaty/role_cmd_tests
e2e: Test case for the etcdctlv3 role command.
2016-04-14 09:44:07 -07:00
27844a6aef Merge pull request #5079 from mitake/auth-fix
auth: remove index out of range in role grant
2016-04-14 08:07:18 -07:00
a016220648 auth: remove index out of range in role grant
Fixes https://github.com/coreos/etcd/issues/5077
2016-04-14 22:02:10 +09:00
3b7c8d752c e2e: Test case for the etcdctlv3 role command.
New test cases have been added to test the 'role' and 'user'
sub-commands of etcdctlv3 utility.
2016-04-14 01:54:22 -07:00
ac95cc32ef Merge pull request #5075 from xiang90/p
proxy: move http related thing to httpproxy
2016-04-13 22:44:29 -07:00
e913792d0f Merge pull request #5073 from heyitsanthony/etcdctl-docs
doc: document many etcdctl commands
2016-04-13 22:08:22 -07:00
cd05ac4217 doc: document many etcdctl commands
documents defrag, compaction, lease, snapshot status, member, endpoint
2016-04-13 21:50:59 -07:00
b20d171ee1 Merge pull request #5074 from heyitsanthony/fix-compact-current-rev
storage: have Range on rev=0 work even if compacted to current revision
2016-04-13 21:15:55 -07:00
66d2ae7a39 proxy: move http related thing to httpproxy 2016-04-13 21:09:26 -07:00
d72bcdc156 storage: have Range on rev=0 work even if compacted to current revision 2016-04-13 21:00:35 -07:00
e6ff5a38e1 Merge pull request #5072 from heyitsanthony/fix-ep-json
etcdctl: respect --write-out=json for endpoint status command
2016-04-13 19:12:26 -07:00
793fb2cf64 Merge pull request #4673 from gyuho/slow
functional-tester: add latency test (simulate slow network)
2016-04-13 17:07:30 -07:00
f07350735d etcdctl: respect --write-out=json for endpoint status command 2016-04-13 17:04:31 -07:00
6af40ea1e1 functional-tester: add latency test (simulate slow network)
Fix https://github.com/coreos/etcd/issues/4666.
2016-04-13 17:00:09 -07:00
e9aa8ff235 Merge pull request #5071 from gyuho/member_api_change
*: Member api change
2016-04-13 16:45:10 -07:00
3dcfe79cc0 Merge pull request #5070 from heyitsanthony/member-doc
etcdctl: display required arguments for member commands in usage
2016-04-13 16:40:16 -07:00
7a2ef3eb00 *: regenerate proto buffers 2016-04-13 16:24:07 -07:00
2c6176b5f2 *: remove MemberLeader API in client side (fix examples) 2016-04-13 16:23:57 -07:00
b78886239e *: remove IsLeader field in Member API server side 2016-04-13 16:23:33 -07:00
90df7fd738 etcdctl: display required arguments for member commands in usage 2016-04-13 16:18:00 -07:00
22812badc2 Merge pull request #5069 from heyitsanthony/fix-snapshot-status-json
etcdctl: respect -write-out=json for snapshot status
2016-04-13 15:57:39 -07:00
b90e30b28e etcdctl: respect -write-out=json for snapshot status 2016-04-13 13:37:32 -07:00
a553ea8ba7 Merge pull request #5068 from heyitsanthony/lease-fixups
etcdctl: improve lease command documentation and exit codes
2016-04-13 13:20:06 -07:00
993f25f055 Merge pull request #5065 from heyitsanthony/errexit-defrag
etcdctl: return non-zero exit code if defrag fails on any endpoint
2016-04-13 13:19:43 -07:00
721ed6ba2b etcdctl: return non-zero exit code if defrag fails on any endpoint 2016-04-13 12:39:43 -07:00
855a5116a2 etcdctl: improve lease command documentation and exit codes 2016-04-13 12:38:21 -07:00
c0971a6ebc Merge pull request #5066 from gyuho/compaction_test
e2e: compaction test
2016-04-13 12:35:20 -07:00
3f0863a1e9 e2e: compact test 2016-04-13 12:07:48 -07:00
c8e860c4fa Merge pull request #5055 from gyuho/get_rev
*: add rev flag to get command
2016-04-13 12:05:48 -07:00
3fef0eb0d8 Merge pull request #5061 from xiang90/grpc_d
*:update dependencies
2016-04-13 11:40:14 -07:00
5157b713ed Merge pull request #5064 from raoofm/patch-1
Documentation: v3 mem benchmark total watch value
2016-04-13 11:35:23 -07:00
60548b85c4 *: add rev flag to get command 2016-04-13 11:32:29 -07:00
15e865e024 Merge pull request #5062 from gyuho/govet-mutex
etcd-tester: fix govet
2016-04-13 11:19:20 -07:00
cb280bae91 etcd-tester: fix govet 2016-04-13 11:12:31 -07:00
61cfe68247 Documentation: v3 mem benchmark total watch value
Updating Documentation/benchmarks/etcd-3-watch-memory-benchmark.md with the correct 'total watching' value
2016-04-13 14:12:10 -04:00
52c4595899 Merge pull request #5060 from gyuho/ineffassign
*: fixes based on ineffassign
2016-04-13 10:59:58 -07:00
7c5ec417c3 *:update dependencies 2016-04-13 10:47:24 -07:00
89f8e66682 *: fixes based on ineffassign 2016-04-13 10:41:58 -07:00
35d2d7b23e Merge pull request #5059 from gyuho/elect_e2e_test
e2e: add elect command test
2016-04-13 10:25:28 -07:00
1224044553 e2e: add elect command test 2016-04-13 10:00:56 -07:00
228e772b3a Merge pull request #5056 from heyitsanthony/expect-signal
pkg/expect, e2e: support sending Signals to expect process, test etcdctl lock
2016-04-13 09:42:41 -07:00
8763bd1e97 e2e: etcdctlv3 lock test 2016-04-13 09:26:16 -07:00
604a73c833 e2e: remove sh in spawnCmd
certain shells claim the ppid for expect processes which interferes with
signals
2016-04-13 09:12:40 -07:00
fcb5ba98d0 pkg/expect: support sending Signals to expect process 2016-04-13 09:11:57 -07:00
18992bac4f Merge pull request #5057 from heyitsanthony/e2e-v3-cleanup
e2e: cleanup error and prefix arg handling for ctlv3 tests
2016-04-13 09:09:13 -07:00
209f573083 e2e: cleanup error and prefix arg handling for ctlv3 tests 2016-04-12 23:48:13 -07:00
2985396768 Merge pull request #5053 from xiang90/ctl_i
etcdctl: move endpoint-heath and status into endpoint command
2016-04-12 16:50:03 -07:00
ae9b251d99 etcdctl: move endpoint-heath and status into endpoint command 2016-04-12 16:30:26 -07:00
0ca949ce90 Merge pull request #5051 from heyitsanthony/fix-user-list
etcdctl: don't panic on ListUser with roles
2016-04-12 14:24:08 -07:00
c9ce92f635 client: accept roles in response for ListUser
Fixes #5046
2016-04-12 12:48:43 -07:00
a8b7d0b63c Merge pull request #5050 from xiang90/b_v
etcdserver: save cluster version into backend
2016-04-12 12:05:02 -07:00
e9735b7bd0 etcdserver: save cluster version into backend 2016-04-12 11:37:22 -07:00
f13e558ab4 e2e: test etcdtl user list on root user 2016-04-12 11:15:06 -07:00
095a755e4d Merge pull request #5049 from heyitsanthony/fix-grant-roles-existing
etcdctl: don't crash on duplicate role in user grant
2016-04-12 11:05:08 -07:00
a12fd9cc92 etcdctl: print grant/revoke error instead of scanning roles for changes
Fixes #5045
2016-04-12 10:49:05 -07:00
a0d653b630 e2e: test etcdctl v2 double user grant
Crashes in 2.3.1
2016-04-12 10:49:05 -07:00
040f7b90c7 Merge pull request #5048 from xiang90/fix_c
rafthttp: fix comment in msgappv2
2016-04-12 10:20:04 -07:00
f2d558644d rafthttp: fix comment in msgappv2 2016-04-12 10:14:06 -07:00
ef0d5c3d7d Merge pull request #4957 from mqliang/memberStatus
etcdctlv3: expose store size and raft status in 'etcdctl status' command
2016-04-12 08:46:45 -07:00
ff311ba0a7 etcdctlv3: print db size and raft status in 'etcdctl status' command 2016-04-12 22:58:22 +08:00
a9a06438f9 etcdctlv3: expose db size and raft status in server side 2016-04-12 22:49:15 +08:00
1044fbce2c etcdctlv3: update aunto generated files 2016-04-12 22:48:47 +08:00
c3da2631bf etcdctlv3: add db size and raft status in protobuffer 2016-04-12 22:47:27 +08:00
17e32b6aa9 Merge pull request #5041 from xiang90/snap_info
snapshot status
2016-04-11 23:13:08 -07:00
50219d4def Merge pull request #5042 from gyuho/ts
benchmark: return time series with missing periods filled in
2016-04-11 23:09:36 -07:00
8e3d99cd3e Merge pull request #5043 from mitake/auth-trivial
little cleaning of v3 auth
2016-04-11 23:09:22 -07:00
2aab6ff2eb benchmark: return time series with missing periods filled in 2016-04-11 23:07:45 -07:00
94d436c5d1 vendor: add go-humanize 2016-04-11 22:55:47 -07:00
b5292f6fce etcdctl: add snapshot status support 2016-04-11 22:55:47 -07:00
0b4749ea65 auth: remove needless logging during creating a new user 2016-04-12 14:52:31 +09:00
bfd49023a1 auth: sort key permissions of role struct for effective searching 2016-04-12 14:52:31 +09:00
b1c3e7edbf Merge pull request #4982 from heyitsanthony/godep-update-script
scripts: updatedep.sh to update vendored dependencies
2016-04-11 19:47:18 -07:00
4481e54017 Merge pull request #5040 from heyitsanthony/fix-txn-rev
etcdserver: set txn header revision to store revision following txn
2016-04-11 19:41:54 -07:00
c5b8e8dc88 etcdserver: set txn header revision to store revision following txn 2016-04-11 17:03:05 -07:00
8c2225f251 Merge pull request #5038 from heyitsanthony/sshot-docs
doc: document etcdctl snapshot command
2016-04-11 16:21:09 -07:00
195100a769 Merge pull request #5039 from heyitsanthony/fix-write-out
etcdctl: respect --write-out
2016-04-11 16:19:43 -07:00
3a695a82a3 Merge pull request #5036 from xiang90/r_t
raft: add a test case for Test Slice
2016-04-11 16:02:13 -07:00
e5a2bd58ec etcdctl: respect --write-out
Support got clobbered about a month ago.
2016-04-11 16:01:38 -07:00
6e8d01f956 doc: document etcdctl snapshot command 2016-04-11 15:58:20 -07:00
0a684c10ad Merge pull request #5025 from xiang90/no_dup_resp
etcdserver: do not send out out of date appResp
2016-04-11 14:41:52 -07:00
3bad47d691 Merge pull request #5018 from xiang90/b
etcdserver: set backend to cluster
2016-04-11 13:02:57 -07:00
be822b05d2 Merge pull request #5012 from heyitsanthony/snap-api
*: snapshot RPC
2016-04-11 13:00:18 -07:00
e838c26f8a etcdctl: use snapshot RPC in snapshot command 2016-04-11 12:32:53 -07:00
b97b5843a3 Merge pull request #5035 from heyitsanthony/fix-unused-output
test: display unused output if unused source found
2016-04-11 11:36:23 -07:00
174a996c37 Merge pull request #5032 from mitake/auth-user-grant
*: support granting a role to a user in v3 auth
2016-04-11 11:10:10 -07:00
9423125ce1 raft: add a test case for Test Slice 2016-04-11 10:04:03 -07:00
2113b77635 test: display unused output if unused source found
unused will non-zero exit if it finds unused source which causes test's
set -e to close out of the test script
2016-04-11 09:55:22 -07:00
d5766eab3e clientv3: add Snapshot to Maintenance 2016-04-11 09:51:17 -07:00
a6b6fcf1c4 etcdserverpb, v3rpc: add Snapshot to Maintenance RPC service 2016-04-11 09:51:16 -07:00
7ba2646d37 *: support granting a role to a user in v3 auth 2016-04-11 15:53:30 +09:00
af1b3f061a Merge pull request #5024 from ajityagaty/user_cmd_tests
e2e: Test cases for the etcdctlv3 user commands.
2016-04-10 23:50:12 -07:00
6c8428c393 Merge pull request #5031 from gyuho/cleanup
*: clean up from go vet, misspell
2016-04-10 23:32:46 -07:00
9108af9046 *: clean up from go vet, misspell 2016-04-10 23:16:56 -07:00
1f4f3667a4 Merge pull request #5021 from gyuho/vendor_doc
*: client vendoring README
2016-04-10 22:15:36 -07:00
935999a80e Merge pull request #5030 from mitake/auth-trivial
trivial updates for v3 auth
2016-04-10 21:47:08 -07:00
53bb79f240 auth: remove needless field from protobuf define
The field tombstone won't be used in the future because of the design
change.
2016-04-11 13:02:34 +09:00
097cec8194 etcdctl: let some v3 auth related functions be private
They don't need to be public.
2016-04-11 13:01:19 +09:00
27480f9ea4 Merge pull request #4966 from mitake/auth-role-grant
*: support granting key permission to role in v3 auth
2016-04-10 20:31:05 -07:00
02033b4c47 *: support granting key permission to role in v3 auth 2016-04-11 12:23:19 +09:00
f5f0280a63 Merge pull request #5028 from heyitsanthony/etcdmain-unsupported-envvar
etcdmain: start on unsupported arch when ETCD_UNSUPPORTED_ARCH is set
2016-04-10 19:55:34 -07:00
c4caa65c51 etcdmain: start on unsupported arch when ETCD_UNSUPPORTED_ARCH is set 2016-04-10 19:36:04 -07:00
130567832f Merge pull request #4734 from luxas/32bit_alignments
etcdserver: align 64-bit atomics on 8-byte boundary
2016-04-10 19:18:15 -07:00
603c14db9d e2e: Test cases for the etcdctlv3 user commands.
New test cases have been added to test the "user" sub-commands of
the etcdctlv3 utility.
2016-04-10 17:46:04 -07:00
0d9039f192 Merge pull request #5026 from lodevil/master
KeepAliveOnce error fix (when the lease not found)
2016-04-10 17:35:52 -07:00
e3fd246414 clientv3: fix KeepAliveOnce return error message 2016-04-11 08:13:36 +08:00
de7692b2b2 etcdserver: do not send out out of date appResp 2016-04-09 23:30:00 -07:00
3c0ac9d600 etcdserver: set backend to cluster 2016-04-08 21:46:45 -07:00
78554c6de6 *: client vendoring README 2016-04-08 19:48:17 -07:00
345bdc3db6 Merge pull request #5017 from xiang90/member
membership: save/update the whole member information into backend
2016-04-08 13:30:35 -07:00
a406c9fa3d membership: save/update the whole member information into backend 2016-04-08 13:14:37 -07:00
fe810e7b43 Merge pull request #5015 from gyuho/semaphore-ci-badge
README: change semaphore CI status badge
2016-04-08 12:10:05 -07:00
9bb99b5f72 Merge pull request #5016 from gyuho/lease_simple
*: clean up from gosimple
2016-04-08 12:09:56 -07:00
953a08d841 *: clean up from gosimple 2016-04-08 11:55:03 -07:00
4997ed36b4 Merge pull request #5011 from xiang90/r_c
raft: fix issues reported by golint
2016-04-08 11:46:12 -07:00
97730778e5 README: change semaphore CI status badge
We just reset Semaphore CI, and badge URL is changed.
2016-04-08 11:24:30 -07:00
b70e6a6bf1 Merge pull request #4916 from es-chow/transfer-leader
raft: transfer leader feature
2016-04-08 08:01:24 -07:00
ac059eb8cb raft: transfer leader feature 2016-04-08 16:56:32 +08:00
4041bbe571 Merge pull request #5008 from gyuho/gosimple_unused
clean up with gosimple and unused
2016-04-07 23:31:21 -07:00
fb85da92e8 *: fix based on gosimple and unused 2016-04-07 23:16:37 -07:00
9aec045fce test, travis: integrate gosimple and unused 2016-04-07 23:16:33 -07:00
1b41ee9c99 raft: fix issues reported by golint 2016-04-07 22:14:56 -07:00
49f9b5470e Merge pull request #5009 from xiang90/sp
*: fix misspell
2016-04-07 22:09:41 -07:00
9c7fb9c360 *: fix misspell 2016-04-07 21:57:06 -07:00
71a492e59e Merge pull request #5005 from xiang90/clu_storage
membership: update attr in membership pkg
2016-04-07 21:40:32 -07:00
b13b77f362 membership: update attr in membership pkg 2016-04-07 21:25:32 -07:00
2fe3e1e850 Merge pull request #5007 from heyitsanthony/hush-caps
v2http: only report capabilities on update
2016-04-07 20:31:37 -07:00
2b7ad35fa0 v2http: only report capabilities on update 2016-04-07 20:14:30 -07:00
1f5794c117 Merge pull request #4997 from heyitsanthony/fix-race-consistent
etcdserver: fix race on consistent index
2016-04-07 20:12:22 -07:00
4d2d2cabb9 etcdserver: fix race on consistent index 2016-04-07 19:53:08 -07:00
004ff3d4f0 Merge pull request #5006 from gyuho/watch_type
clientv3/integration: use clientv3.Event type
2016-04-07 19:48:18 -07:00
a9f1d5dfa6 clientv3/integration: use clientv3 event types
Fix https://github.com/coreos/etcd/issues/5001.
2016-04-07 19:29:32 -07:00
8b320e7c55 Merge pull request #4999 from gyuho/test
*: log, expect by capability check
2016-04-07 19:06:15 -07:00
1c12b66e35 Merge pull request #5000 from xiang90/clu_storage
membership: save/update/delete member when backend is provided
2016-04-07 18:00:11 -07:00
868a3e279d Merge pull request #5002 from gyuho/agent_test
etcd-agent: fix etcd agent tests, remove unused listener
2016-04-07 17:19:44 -07:00
d78345244b *: log, expect by capability check 2016-04-07 17:18:51 -07:00
139f23fd13 etcd-agent: fix etcd agent tests, remove unused listener 2016-04-07 17:04:24 -07:00
29623cccb2 membership: save/update/delete member when backend is provided 2016-04-07 16:34:43 -07:00
c91c7ca3bf Merge pull request #4961 from heyitsanthony/rename-lease-create
*: rename lease Create to Grant
2016-04-07 14:51:22 -07:00
f31105bc08 Merge pull request #4994 from xiang90/clu
etcdserver: move membership related code to membership pkg
2016-04-07 14:39:18 -07:00
bf2289ae00 etcdserver: move membership related code to membership pkg 2016-04-07 14:21:37 -07:00
5d4ee7ac5f Merge pull request #4995 from gyuho/proxy-clean
proxy: simplify channel receive, add missing function call
2016-04-07 12:32:24 -07:00
dc17eaace7 *: rename Lease Create to Grant
Creating a lease through the client API interface union looked like
"c.Create(...)"-- the method name wasn't very descriptive.
2016-04-07 12:28:14 -07:00
6abbdcdc06 proxy: simplify channel receive, add missing function call 2016-04-07 12:24:17 -07:00
ee4ff1e448 Merge pull request #4976 from gyuho/lease_testing
e2e: lease tests, fix minor format string
2016-04-07 11:25:51 -07:00
84bf6e7462 e2e: lease tests, fix minor format string 2016-04-07 11:18:49 -07:00
a38617d93a Merge pull request #4992 from gyuho/e2e_clean
e2e: clean up, return all lines in error
2016-04-07 10:59:08 -07:00
2779341250 e2e: clean up, return all lines in error
1. change file names
2. now if sub-command errors, the test will receive all
lines from stdout and stderr.

Expected output:

```
read /dev/ptmx: input/output error (expected key2, got ["key1\r\n" "val1\r\n" ""])
```

3. change how we check GRPC timeout (only bypass timeout error when we give 0
timeout)
2016-04-07 10:41:56 -07:00
ac232ac9a7 scripts: updatedep.sh to update vendored dependencies
Running godep in the vendored cmd directory will try to pull etcd in
as a dependency. As a fix, this script safely vendors into cmd.
2016-04-07 10:28:33 -07:00
2e5ee26300 Merge pull request #4987 from hongchaodeng/ev
expose APIs to recognize event type
2016-04-07 10:18:25 -07:00
21eda79451 Merge pull request #4991 from endocode/kayrus/faq_doc
docs: fixed markdown formatting in faq.md
2016-04-07 09:59:35 -07:00
5c782a2086 docs: fixed markdown formatting in faq.md 2016-04-07 18:51:33 +02:00
aa11dafaf8 clientv3: expose event type in user API
- add another layer of abstraction in clientv3 for user, not expose internal storagepb ones
- provide commonly used routines IsCreate(), IsModify() on event
2016-04-07 09:47:04 -07:00
a5f341e886 Merge pull request #4989 from xiang90/clu
*: move Cluster interface to api
2016-04-07 08:33:52 -07:00
030865abe3 *: move Cluster interface to api 2016-04-07 08:05:47 -07:00
b137df77f1 Merge pull request #4985 from gyuho/unused
*: clean up unused vars, functions
2016-04-06 21:49:58 -07:00
6e6d64fb9b *: clean up unused vars, functions
With help from https://github.com/dominikh/go-unused.
IsNetTimeoutError seems useful, so moved to pkg/netutil.
2016-04-06 21:33:55 -07:00
79a09e6857 Merge pull request #4984 from gyuho/watch-range
clientv3/integration: fix watch range test typo
2016-04-06 21:32:30 -07:00
e72591b4a2 clientv3/integration: fix watch range test typo 2016-04-06 21:12:07 -07:00
7408bc2504 Merge pull request #4948 from heyitsanthony/update-grpc
vendor: update grpc
2016-04-06 17:55:53 -07:00
82e58e602d Merge pull request #4983 from gyuho/expect_line
pkg/expect: ExpectFunc, LineCount
2016-04-06 16:10:02 -07:00
679e5e379b pkg/expect: ExpectFunc, LineCount
ExpectFunc to make expect more extensible. LineCount to be
able to check 'no output' command.
2016-04-06 15:56:00 -07:00
62990fb5fa Merge pull request #4970 from tamird/fix-raft-past-election
raft: correct regression in `pastElectionTimeout`
2016-04-06 08:03:38 -07:00
68db18667a raft: correct doc comment 2016-04-06 08:43:42 -04:00
5250784b09 raft: use rand.Intn instead of rand.Int and mod
This provides a better random distribution and is easier to read.
2016-04-06 08:43:42 -04:00
6b0eb9c3c0 godeps: update grpc dependency 2016-04-06 01:30:06 -07:00
34375ef851 Merge pull request #4950 from heyitsanthony/revendor
vendor: only vendor on emitted binaries
2016-04-05 21:36:51 -07:00
b1d41016b2 vendor: only vendor on emitted binaries
Moves the vendor/ directory to cmd/vendor. Vendored binaries are built
from cmd/, which is backed by symlinks pointing back to repo root.
2016-04-05 21:01:16 -07:00
b9e933b850 Merge pull request #4971 from gyuho/e2e_more
e2e: clean up to test tables, endpoint-health test
2016-04-05 14:34:35 -07:00
e3599e4145 e2e: clean up to test tables, endpoint-health test 2016-04-05 13:33:37 -07:00
01c303113d Merge pull request #4964 from gyuho/get_sort_e2e
e2e: get with sort order, target
2016-04-04 23:22:04 -07:00
3e39f36b34 e2e: get with sort order, target 2016-04-04 23:10:03 -07:00
c3bca3739f Merge pull request #4926 from mitake/auth-role-add
*: support adding role in auth v3
2016-04-04 18:44:16 -07:00
21096bf27f Merge pull request #4963 from xiang90/ht
*: mv etcdhttp into api pkg
2016-04-04 18:40:29 -07:00
8662aaada4 Merge pull request #4958 from mitake/progrep-race
etcdserver, clientv3: let progressReportIntervalMilliseconds be private
2016-04-04 18:04:57 -07:00
2b17a3919c *: support adding role in auth v3 2016-04-05 09:28:17 +09:00
88306c9fa7 etcdserver, clientv3: let progressReportIntervalMilliseconds be private
progressReportIntervalMilliseconds (old
ProgressReportIntervalMilliseconds) is accessed by multiple goroutines
and it is reported as race.

For avoiding this report, this commit wraps the variable with
functions. They access the variable with atomic operations so the race
won't be reported.
2016-04-05 09:12:17 +09:00
2c50eb240e *: mv etcdhttp into api pkg 2016-04-04 16:31:35 -07:00
bfbe0fac8c Merge pull request #4951 from gyuho/watch_prefix
e2e: watch by prefix
2016-04-04 15:11:32 -07:00
9de5b8db80 e2e: watch by prefix 2016-04-04 14:52:54 -07:00
b3247356c1 Merge pull request #4956 from heyitsanthony/txn-serialize
etcdserver: serializable transactions
2016-04-04 09:51:09 -07:00
98504fe863 Merge pull request #4959 from gyuho/ctl_doc
etcdctl: READMEv3 doc about prefix
2016-04-04 08:28:41 -07:00
1543e7bd95 etcdctl: READMEv3 doc about prefix 2016-04-04 07:00:49 -07:00
fab3c8e705 etcdserver: serializable transactions
Support case where txn doesn't have to go through quorum.
2016-04-04 04:21:42 -07:00
46e877b8bb Merge pull request #4955 from mitake/e2e-test
e2e: import fmt in etcdctlv3_test.go
2016-04-04 01:37:21 -07:00
4ff81678ac e2e: import fmt in etcdctlv3_test.go 2016-04-04 17:00:33 +09:00
b6ac21374e Merge pull request #4952 from ajityagaty/snap_db_file_fix
snap: Do not complain about db file.
2016-04-03 17:54:03 -07:00
c12f263577 snap: Do not complain about db file.
Currently the snapshotter throws a warning if a file without the
.snap suffix is found. Fix it to allow known files to exist in
the snap folder.
2016-04-03 17:28:04 -07:00
e8a4ed01e2 Merge pull request #4949 from gyuho/delete
*: add del by prefix with e2e tests
2016-04-03 12:09:16 -07:00
3abd137dc5 Merge pull request #4945 from heyitsanthony/fix-exit-status
e2e, pkg/expect: distinguish between Stop and Close
2016-04-03 12:02:59 -07:00
dc420d660e e2e, pkg/expect: distinguish between Stop and Close
Fixes #4928
2016-04-03 11:45:02 -07:00
9afae9e2c1 *: add del by prefix with e2e tests 2016-04-03 11:41:49 -07:00
bb69dd324e Merge pull request #4939 from gyuho/e2e_txn_version
e2e: etcdctlv3 version, txn basic tests
2016-04-03 11:09:57 -07:00
73b0d398e4 Merge pull request #4946 from xiang90/b
vendor: update boltdb to 1.2.0
2016-04-03 10:59:51 -07:00
f4eaa3f8fb pkg/expect: replace SendLine with Send method 2016-04-03 10:57:35 -07:00
c280871714 e2e: etcdctlv3 version, txn basic tests 2016-04-03 10:57:31 -07:00
37c1edc952 vendor: update boltdb to 1.2.0 2016-04-03 10:47:07 -07:00
19136afc2b Merge pull request #4798 from mqliang/memberStatus
etcdctlv3: initial implementaton of 'etcdctl member status' command
2016-04-03 08:48:23 -07:00
d80af00785 etcdctlv3: implement the 'etcdctl status' command 2016-04-03 13:55:58 +08:00
f3ca17ea03 etcdctlv3: implement the client side functionality 2016-04-03 13:46:34 +08:00
1d5d2494ed etcdctlv3: implement status rpc in server side 2016-04-03 13:46:01 +08:00
bbca61252f etcdctlv3: update aunto generated files 2016-04-03 13:45:17 +08:00
3c62bfb7a3 etcdctlv3: add status rpc in protbuffer file 2016-04-03 13:44:45 +08:00
6770b9c67a Merge pull request #4944 from gyuho/delete_num
etcdctl: print number of deleted keys
2016-04-02 21:13:46 -07:00
e8877ab180 etcdctl: print number of deleted keys 2016-04-02 20:54:37 -07:00
584d90cd5d Merge pull request #4912 from gyuho/defrag
functional-tester: defrag every 500 round
2016-04-02 18:58:41 -07:00
b866337f25 functional-tester: defrag every 500 round
Fix https://github.com/coreos/etcd/issues/4665.
2016-04-02 18:51:26 -07:00
d2ce6836af Merge pull request #4942 from xiang90/def
backend: reset count in defrg
2016-04-02 18:43:03 -07:00
c09f23c46d *: clean up bool comparison 2016-04-02 18:27:54 -07:00
2b54b73b90 backend: reset count in defrg 2016-04-02 17:25:05 -07:00
b0cc0e443c *: clean up if, bool comparison 2016-04-02 12:55:11 -07:00
dc0061e4db e2e: add Get tests 2016-04-01 22:45:27 -07:00
ff01a4de65 Merge pull request #4936 from heyitsanthony/compact-barrier-restore
etcdserver, storage: don't ack physical compaction on error or snap restore
2016-04-01 20:18:12 -07:00
6f707b857a etcdserver, storage: don't ack physical compaction on error or snap restore
Snapshot recovery will reset the FIFO; reschedule the physical acknowledgment
instead of acknowledging on scheduler teardown.
2016-04-01 16:32:05 -07:00
eea56d037e etcdserver: fix govet error 2016-04-01 16:01:47 -07:00
3083b6d11e Merge pull request #4933 from xiang90/m
MAINTAINERS: update maintainers list
2016-04-01 15:34:57 -07:00
623c7b4df4 Merge pull request #4930 from heyitsanthony/fix-wal-corrupt
wal: fix tail corruption
2016-04-01 15:23:52 -07:00
c0e614b0bd MAINTAINERS: update maintainers list 2016-04-01 15:12:08 -07:00
bfe3a3d08e wal: fix tail corruption
On ReadAll, WAL seeks to the end of the last record in the tail. If the tail did not
end with preallocated space, the decoder would report 0 as the last offset and begin
writing at offset 0 of the tail.

Fixes #4903
2016-04-01 15:05:52 -07:00
e1b561cb7c Merge pull request #4929 from xiang90/rand
raft: lower split vote rate
2016-04-01 12:35:59 -07:00
5d431b4782 raft: lower split vote rate 2016-04-01 12:11:03 -07:00
bf6d905a5a Merge pull request #4923 from xiang90/conf
clientv3: support read conf from file
2016-04-01 10:09:51 -07:00
f05f7b475e vendor: add yaml dependencies 2016-04-01 09:36:11 -07:00
802de5f9f8 clientv3: support read conf from file 2016-04-01 09:36:11 -07:00
307cb5167c Merge pull request #4925 from heyitsanthony/wal-dump-lock
etcd-dump-logs: don't try to acquire wal file locks
2016-03-31 22:24:54 -07:00
7fffd6ffd2 etcd-dump-logs: don't try to acquire wal file locks
can now dump logs from a running etcd instance
2016-03-31 21:51:20 -07:00
c43910f835 Merge pull request #4910 from gyuho/compact_test
etcd-tester: no error for compact double-send
2016-03-31 21:43:26 -07:00
bdaba136a9 Merge pull request #4915 from heyitsanthony/hash-barrier
etcdserver: force backend commit before acking physical compaction
2016-03-31 21:36:57 -07:00
f9b90e13ac etcd-tester: no error for compact double-send
When compactKV request is halted before final acknowledgement,
it used to just continue on the next endpoint. But there could be
a case than compactKV is requested twice, and the first one is already
replicated and applied by the time the second request was to be
applied (returning compact revision error). This skips the case
by parsing the error message.
2016-03-31 21:29:02 -07:00
81de5648d9 etcdserver: force backend commit before acking physical compaction 2016-03-31 21:25:40 -07:00
2f785015a5 Merge pull request #4922 from gyuho/ctl_test
e2e: basic v3 watch test
2016-03-31 18:18:29 -07:00
b98f67095e e2e: add basic v3 watch test 2016-03-31 18:04:14 -07:00
d898c68f2c pkg/expect: add SendLine for interactive mode 2016-03-31 15:34:30 -07:00
1d698f093f Merge pull request #4921 from xiang90/tls
*: move baisc tls util funcs to tlsutil pkg
2016-03-31 09:59:25 -07:00
eb3919e8cf *: move baisc tls util funcs to tlsutil pkg 2016-03-31 09:45:45 -07:00
de801b500b Merge pull request #4920 from mitake/auth-user-password
*: support changing password in v3 auth
2016-03-30 23:45:50 -07:00
73166b41e9 *: support changing password in v3 auth
This commit adds a functionality for updating password of existing
users.
2016-03-31 15:28:15 +09:00
f328c75ba7 Merge pull request #4919 from gyuho/expects
*: ctl v3 tests with multi expects
2016-03-30 22:23:21 -07:00
a6c6bbd81c e2e: ctl tests with multi expects 2016-03-30 22:09:23 -07:00
324afd7fde Merge pull request #4918 from mitake/auth-user-messages
etcdctl: print messages for successful auth operations
2016-03-30 22:03:14 -07:00
2ad9b5692f etcdctl: print messages for successful auth operations
This commit lets etcdctl v3 follow the manner of etcdctl v2.
2016-03-31 13:56:01 +09:00
59bb65182a Merge pull request #4917 from mitake/auth-user-delete
*: support deleting user in v3 auth
2016-03-30 21:36:17 -07:00
d8888ded12 *: support deleting user in v3 auth
This commit adds a functionality of user deletion. It can be invoked
with the new user delete command.

Example usage:
$ ETCDCTL_API=3 etcdctl user delete usr1
2016-03-31 13:18:51 +09:00
93c3f920ca Merge pull request #4909 from heyitsanthony/pkg-expect
e2e: replace gexpect with simpler expect
2016-03-30 15:36:41 -07:00
eb3351533a godep: remove gexpect 2016-03-30 15:14:24 -07:00
5022dce31a e2e: use pkg/expect 2016-03-30 15:14:24 -07:00
5707f6b997 pkg/expect: add expect package 2016-03-30 15:14:24 -07:00
b539d3a411 test: check formatting for all relevant packages in pkg/ 2016-03-30 15:14:24 -07:00
6cf198d1b1 Merge pull request #4911 from heyitsanthony/physical-already
etcdserver, storage: wait for physical compaction if already compacted
2016-03-30 14:27:21 -07:00
7b37bd332c etcdserver, storage: wait for physical compaction if already compacted 2016-03-30 13:59:52 -07:00
7ce5c2b9ff Merge pull request #4902 from heyitsanthony/alarm-ctl
etcdctl: alarm command
2016-03-30 13:55:29 -07:00
14f146b9f7 Merge pull request #4908 from xiang90/c
*: simplify consistent index handling
2016-03-30 13:53:21 -07:00
eddc741b5e *: simplify consistent index handling 2016-03-30 13:38:28 -07:00
2aca3252e8 etcdctl: alarm command 2016-03-30 13:33:52 -07:00
c91b2d098d clientv3: AlarmList and AlarmDisarm 2016-03-30 13:33:52 -07:00
dd5b73cfee alarms: support Get of all alarms 2016-03-30 13:33:52 -07:00
cd02cef5e9 etcdserver: only warn on new and disarmed alarms
listing alarms was generating warning output
2016-03-30 13:33:52 -07:00
0f64e01f6b Merge pull request #4864 from cdancy/patch-1
Update libraries-and-tools.md
2016-03-30 13:02:09 -07:00
4e2a4b17b5 Documentation: add etcd-rest to libraries-and-tools.md
Add link to the etcd-rest client under the 'Java libraries' sub-section.

Fixes #4906
2016-03-30 15:56:20 -04:00
a5172974da Merge pull request #4863 from heyitsanthony/ft-check-compact
etcd-tester: check compaction revision
2016-03-30 10:08:05 -07:00
1eb375d296 Merge pull request #4880 from gyuho/drain
*: drain http.Response.Body before closing
2016-03-30 10:02:52 -07:00
1bee31a3bb Merge pull request #4905 from gyuho/vendor_doc
*: document client package vendoring guide
2016-03-30 10:02:32 -07:00
4c65f3fe7a etcd-tester: check compaction revision
Faster than waiting 30 seconds between rounds.
2016-03-30 09:45:30 -07:00
4b35cb9462 etcdserver, storage: optionally wait for Compaction completion in RPC 2016-03-30 09:45:30 -07:00
a42d1dc1fe *: drain http.Response.Body before closing 2016-03-30 09:35:47 -07:00
b8d3b15206 *: document client package vendoring guide 2016-03-30 09:34:41 -07:00
12d8d33a1c Merge pull request #4879 from mitake/auth-user-error
etcdserver: return internal error in a case of not auth specific errors
2016-03-30 08:04:41 -07:00
8ee8d755bb etcdserver: return internal error in a case of not auth specific errors 2016-03-30 23:44:22 +09:00
443c677357 etcdserver: extract togRPCError() to a separated file
It is used from multiple files in v3rpc package.
2016-03-30 22:53:20 +09:00
96ee00a322 etcdserverpb: make alarm memberId uint64
To be consistent with Cluster API
2016-03-29 20:15:39 -07:00
2deed74494 Merge pull request #4901 from heyitsanthony/config-dbsize
etcdserver: configurable backend size quota
2016-03-29 18:55:12 -07:00
9b2c963179 etcdserver: configurable backend size quota
Configurable with the flag --experimental-quota-backend-bytes and
through ServerConfig.QuotaBackendBytes.

Fixes #4894
2016-03-29 18:39:25 -07:00
b0956d5dbf Merge pull request #4891 from mitake/auth-prefix
*: add Auth prefix to auth related requests and responses
2016-03-29 17:24:12 -07:00
d00811428d Merge pull request #4898 from gyuho/context_err
client: return context error
2016-03-29 17:22:40 -07:00
8d0d10cce5 client: return original ctx error
Fix https://github.com/coreos/etcd/issues/3209.
2016-03-29 16:57:48 -07:00
00f222ecad Merge pull request #4892 from gyuho/help
etcdmain: add missing flag doc
2016-03-29 10:30:33 -07:00
870b5c5ea7 Merge pull request #4219 from endocode/kayrus/username_environment
Handle ETCDCTL_USERNAME environment
2016-03-29 10:24:43 -07:00
720502b25f etcdctl: Handle ETCDCTL_USERNAME environment 2016-03-29 19:06:31 +02:00
92f4aced25 etcdmain: add peer-auto-tls doc 2016-03-29 09:40:57 -07:00
bb8619f4f7 Merge pull request #4895 from xiang90/client_doc
client: doc that client is thread-safe
2016-03-29 09:36:01 -07:00
9d49d35090 client: doc that client is thread-safe 2016-03-29 09:28:53 -07:00
d533c14881 Merge pull request #4876 from heyitsanthony/integration-races
*: fix races from clientv3/integration tests
2016-03-29 09:10:53 -07:00
75babb82b6 Merge pull request #4888 from xiang90/fix_raft
rafthttp: do not block on proposal
2016-03-29 07:37:18 -07:00
161bc5e19c clientv3: fix race when setting grpc Logger
grpc only permits SetLogger on init()
2016-03-28 23:30:03 -07:00
987568c65c *: add Auth prefix to auth related requests and responses 2016-03-29 14:32:19 +09:00
1637b37132 Merge pull request #4890 from heyitsanthony/fix-4889
clientv3/integration: get quorum before watching in TestKVCompact
2016-03-28 22:30:58 -07:00
096abb3f37 clientv3/integration: get quorum before watching in TestKVCompact
Fixes #4889
2016-03-28 22:18:10 -07:00
660eef8a95 Merge pull request #4872 from ajityagaty/cli_opts_aliases
etcdctl: Add aliases for command flags.
2016-03-28 22:04:00 -07:00
0c137b344b rafthttp: do not block on proposal 2016-03-28 21:40:12 -07:00
2e3856740d etcdctl: Add aliases for command flags.
Add aliases to the flags that are supplied to the sub commands.
2016-03-28 20:57:34 -07:00
c53380cd2a Merge pull request #4886 from heyitsanthony/move-hash
v3rpc: move Hash RPC to Maintenance service
2016-03-28 19:35:03 -07:00
3fbacf4be2 v3rpc: move Hash RPC to Maintenance service 2016-03-28 17:15:58 -07:00
495bef8b4c Merge pull request #4885 from xiang90/log_doc
doc/dev: add logging doc
2016-03-28 17:00:41 -07:00
4bdfc0a46d clientv3: fix race on writing watch channel over return channel
Found in TestElectionFailover
2016-03-28 16:08:18 -07:00
5ee85bea7c v3rpc: fix race on watch progress map
Found in TestElectionWait
2016-03-28 16:08:18 -07:00
813afc3d11 rafthttp: fix race between AddRemote and Send 2016-03-28 16:08:18 -07:00
91dc6b29a6 clientv3/integration: fix race when setting progress report interval 2016-03-28 16:08:18 -07:00
2c83362e63 clientv3: fix race in KV reconnection logic 2016-03-28 16:08:18 -07:00
e129223dbe clientv3: fix race in watcher resume 2016-03-28 16:08:18 -07:00
47db0a2f2e test: add race detection to clientv3 integration tests 2016-03-28 16:08:18 -07:00
ffc7488af2 doc/dev: add logging doc 2016-03-28 15:34:51 -07:00
6e3a0948e4 Merge pull request #4868 from heyitsanthony/api-quota
etcdserver: storage quotas
2016-03-28 15:15:57 -07:00
a403a94d7b etcdserver: cap new keys on space alarm 2016-03-28 14:56:26 -07:00
9e7f47c490 etcdserver: Alarm RPC
Alarms are events that nodes can use to relay health information to
the rest of the cluster. A node may Activate an alarm and that alarm
will stay set until Deactivated.
2016-03-28 14:56:26 -07:00
ae077a2183 backend: add UnsafeForEach to BatchTx
Useful for efficiently iterating over an entire bucket.
2016-03-28 14:56:26 -07:00
9c8253c543 etcdserver, v3rpc: space quotas 2016-03-28 14:56:26 -07:00
fc346041e5 Merge pull request #4883 from heyitsanthony/fix-4874
integration: don't call rand.Intn in TestSTMConflict on 0
2016-03-28 13:36:19 -07:00
94e77cfa5d etcdserver: move v3 raft apply functions to interface 2016-03-28 13:16:21 -07:00
384c3ec907 integration: don't call rand.Intn in TestSTMConflict on 0
Fixes #4874
2016-03-28 13:06:07 -07:00
2b83d9c2e5 Merge pull request #4882 from xiang90/ctl_combine
*: combine etcdctl and etcdctlv3
2016-03-28 11:42:25 -07:00
87d9f06a45 *: combine etcdctl and etcdctlv3 2016-03-28 11:28:05 -07:00
83ada7232a Merge pull request #4871 from gyuho/windows_file_lock_20160326
pkg/fileutil: lock file on Windows
2016-03-27 12:38:38 -07:00
fa98d8d337 Merge pull request #4845 from mitake/auth-user
*: support adding user in v3 auth
2016-03-27 07:51:10 -07:00
8874545a1e *: support adding user in v3 auth
This commit adds a new subcommand "user add" to etcdctlv3. With the
command users can create a user for the authentication.

Example of usage:
$ etcdctlv3 user add user1
Password of user1:
Type password of user1 again for confirmation:
2016-03-27 18:11:42 +09:00
3f1a1c3192 pkg/fileutil: lock file on Windows 2016-03-27 00:35:44 -07:00
68b38e7ade Merge pull request #4875 from gyuho/clientv3_disable_grpclog
clientv3: disable client side grpc log
2016-03-26 22:57:37 -07:00
29fccb3221 clientv3: configurable grpc logger 2016-03-26 22:38:53 -07:00
b8fc61bcec Merge pull request #4869 from ajityagaty/insecure_skip_tls_verify
etcdctlv3: Add insecure-skip-tls-verify flag.
2016-03-26 12:12:55 -07:00
9c3242c6df Merge pull request #4862 from mitake/procfiles
Procfile, V3DemoProcfile: add default endpoint of v3 to Procfile remo…
2016-03-26 08:21:01 -07:00
7418c1af24 V3DemoProcfile: remove the obsolete flag
The flag --experimental-v3demo is already removed so V3DemoProcfile
cannot be used. This commit removes it.
2016-03-26 08:15:58 -07:00
4e39db4158 etcdctlv3: Add insecure-skip-tls-verify flag.
The user can specify insecure-skip-tls-verify flag to skip the
server certificate verification step.
2016-03-25 19:28:41 -07:00
877030ea9d pkg/fileutil: fix linux file locks over NFS
Fixes #4853
2016-03-25 16:28:29 -07:00
36db6cd982 Merge pull request #4867 from xiang90/ctl_env
etcdctlv3: accept evn for global configuration flags
2016-03-25 15:32:06 -07:00
a120ca16c0 etcdctlv3: accept evn for global configuration flags 2016-03-25 14:23:32 -07:00
92a73e727b Merge pull request #4857 from xiang90/warn_tls
etcdmain: warn on contradictory TLS settings
2016-03-25 09:38:11 -07:00
5449edc025 Merge pull request #4817 from mqliang/time-out
etcdctlv3: add timeout support
2016-03-25 07:30:48 -07:00
f165f8b44e etcdctlv3: add timeout support
add timeout setting support for etcdctlv3
2016-03-25 16:24:49 +08:00
20a267dc6a Merge pull request #4860 from heyitsanthony/tester-sched
tools/functional-tester: --schedule-cases flag
2016-03-24 22:05:05 -07:00
4a17097d00 tools/functional-tester: --schedule-cases flag
Command line argument for specifying a schedule of test cases per round.
Default is run each test case once each round.
2016-03-24 19:43:23 -07:00
05dc2dac70 Merge pull request #4859 from xiang90/ctl_secure
etcdctlv3: support secure connection without key/cert
2016-03-24 16:47:23 -07:00
0865688c27 etcdctlv3: support secure connection without key/cert 2016-03-24 16:29:33 -07:00
6285455f85 etcdmain: warn on contradictory TLS settings 2016-03-24 10:21:47 -07:00
7d2545c72e Merge pull request #4856 from xiang90/fail_key_cert
etcdmain: etcd should fail to start when https is enabled but tls con…
2016-03-24 10:10:43 -07:00
5ee3729738 etcdmain: etcd should fail to start when https is enabled but tls config is not given 2016-03-24 09:57:25 -07:00
d16bfa5e54 Merge pull request #4854 from mitake/genproto
scripts: update genproto.sh for vendor
2016-03-24 08:53:55 -07:00
d0d3b32210 Merge pull request #4850 from xiang90/rm_demo
*: enable v3 by default
2016-03-23 23:48:29 -07:00
0436223793 scripts: update genproto.sh for vendor
Current genproto.sh assumes Godep so its output files have obsolete
parts.
2016-03-24 14:30:46 +09:00
70a9391378 *: enable v3 by default 2016-03-23 17:01:36 -07:00
2fec88ebfc Merge pull request #4851 from gyuho/fix_functional_tester
functional-tester: add GRPCURLs for cluster config
2016-03-23 16:47:33 -07:00
9fb60deb7c functional-tester: add GRPCURLs for cluster config
GRPC and v2 client address share the same host(port)
but GRPC does not work with schema specified. This fixes
it by adding another member for GRPC without schema, as
we had before.
2016-03-23 16:28:05 -07:00
333ac5789a Merge pull request #4831 from xiang90/tlx
*: http and https on the same port
2016-03-23 15:59:58 -07:00
e4ac8edb2f Merge pull request #4849 from gyuho/functional_test
functional-tester: set gRPC endpoint for stresser
2016-03-23 15:32:14 -07:00
4d2227e5ab e2e: combine cfg.isClientTLS and cfg.isClientBoth 2016-03-23 15:30:58 -07:00
012143e703 functional-tester: set gRPC endpoint for stresser 2016-03-23 15:23:19 -07:00
9d55420a00 e2e: add an e2e test for TLS/non-TLS on the same port 2016-03-23 13:43:47 -07:00
ca5dff6682 Merge pull request #4848 from heyitsanthony/rename-compare-created
clientv3: rename comparison from CreatedRevision to CreateRevision
2016-03-23 10:32:49 -07:00
900a61b023 *: http and https on the same port 2016-03-23 10:28:38 -07:00
489779d905 clientv3: rename comparison from CreatedRevision to CreateRevision
To match protobuf naming
2016-03-23 09:50:46 -07:00
88e738fcb6 Merge pull request #4844 from ajityagaty/polish_naming_conventions
clientv3: Renaming SortByCreatedRev to maintain consistency.
2016-03-23 09:27:34 -07:00
0181725e55 Merge pull request #4846 from jonboulle/master
docs: "master election" -> "leader election"
2016-03-23 09:27:08 -07:00
6081a29c13 Merge pull request #4843 from heyitsanthony/go-vendor
*: migrate Godeps to vendor/
2016-03-23 06:01:26 -07:00
5f72a28157 docs: "master election" -> "leader election" 2016-03-23 12:23:01 +01:00
86a477c2f6 doc: update client README to use vendor/ 2016-03-22 18:02:10 -07:00
2f22ac662c travis: use GO15VENDOREXPERIMENT 2016-03-22 18:02:10 -07:00
2bb417bfff clientv3: Renaming SortByCreatedRev to maintain consistency.
Renamed SortByCreatedRev to SortByCreateRevision to be consistent
with the naming used for SortByModRevision.
2016-03-22 17:56:24 -07:00
45cf31650c test: ignore vendor/ directory on license check 2016-03-22 17:33:46 -07:00
fb3510b276 build: enable vendor experiment for go1.5 2016-03-22 17:33:46 -07:00
bd832e5b0a *: migrate Godeps to vendor/ 2016-03-22 17:10:28 -07:00
e9b9b228e7 Merge pull request #4842 from gyuho/serial
etcdctlv3: get command with consistency flag
2016-03-22 17:07:29 -07:00
a10662210a e2e: etcdctlv3 with serializable read 2016-03-22 16:52:33 -07:00
5686340d26 etcdctlv3: get command with consistency flag
As we do in benchmark tool.
2016-03-22 16:52:28 -07:00
096a89117a Merge pull request #4840 from ajityagaty/polish_naming_conventions
clientv3: Fix inconsistent naming convention in v3 client.
2016-03-22 15:27:12 -07:00
70e709c5f4 Merge pull request #4812 from xiang90/ping
etcdctlv3: implement endpoint-health command
2016-03-22 15:22:53 -07:00
43221f0b7a etcdctlv3: implement endpoint-health command
endpoint-health checks endpoint.

It can generate 3 outputs:

1. cannot connect to the member through endpoint

2. connected to the member, but member failed to commit any proposals

3. connected to the member, and member committed a proposal
2016-03-22 15:09:50 -07:00
606889a002 clientv3: Fix inconsistent naming convention in v3 client.
In order to have a consistent naming for variable/function names
pertaining to ModifiedRevision, all occurrences have been renamed
to ModRevision.
2016-03-22 14:58:11 -07:00
499b893704 Merge pull request #4838 from gyuho/dial
etcdctlv3: add dial timeout flag
2016-03-22 13:26:19 -07:00
8396da3e83 etcdctlv3: add dial timeout flag
Fix https://github.com/coreos/etcd/issues/4836.
2016-03-22 13:15:26 -07:00
2b44df5440 Merge pull request #4833 from gyuho/govet
etcdmain: fix shadowed variables
2016-03-22 10:01:31 -07:00
565eb61cd3 Merge pull request #4834 from gyuho/godep_pb
Godeps: semantic versioning cheggaaa/pb
2016-03-22 09:09:58 -07:00
a054ae320b Merge pull request #4830 from mischief/proxy-env
pkg/transport: use ProxyFromEnvironment when constructing a transport
2016-03-22 01:14:03 -07:00
afb1bc242b Merge pull request #4822 from mitake/auth-backend
auth, etcdserver: add a method for updating backend during apply snap…
2016-03-21 23:34:46 -07:00
4e39f690f2 auth, etcdserver: add a method for recoverying from backend during apply snapshot
This commit adds a new method Recovery() to auth.AuthStore for
recoverying auth state from backend during apply snapshot. It follows
a manner of the lessor.
2016-03-22 15:17:40 +09:00
bb9a7f5a7c Godeps: semantic versioning cheggaaa/pb
Fix https://github.com/coreos/etcd/issues/4832.
2016-03-21 22:06:16 -07:00
2364d71ea2 etcdmain: fix shadowed variables 2016-03-21 21:55:06 -07:00
d80a546ed4 pkg/transport: use ProxyFromEnvironment when constructing a transport
this allows use of HTTP_PROXY/HTTPS_PROXY for etcdctl.
2016-03-21 21:02:42 -07:00
e73ac5bdd7 Merge pull request #4826 from philips/buildv3-by-default
build: build etcdctlv3 by default
2016-03-21 17:04:11 -07:00
0ac4eba60e Merge pull request #4829 from gyuho/server_closure
etcdmain: fix blocking m.Server closure
2016-03-21 16:50:13 -07:00
cdb7cfd74b etcdmain: fix blocking m.Server closure 2016-03-21 16:39:20 -07:00
7e3fc182d5 Merge pull request #4828 from xiang90/cmux
*: gRPC + HTTP on the same port
2016-03-21 15:37:44 -07:00
7c3432a79f Godep: add cmux dependency 2016-03-21 14:33:37 -07:00
d3809abe42 *: gRPC + HTTP on the same port
We use cmux to do this since we want to do http+https on the same
port in the near future too.
2016-03-21 14:29:25 -07:00
adebd91114 Merge pull request #4785 from heyitsanthony/gce-fallocate
wal: extend WAL file to segment size on fallocate
2016-03-21 13:08:53 -07:00
3fed78ae7b Merge pull request #4484 from heyitsanthony/auto-tls
automatic peer TLS
2016-03-21 12:59:29 -07:00
0df732c052 wal: pre-create segment files
Pipeline file creation and allocation so it overlaps writes to the log.

Fixes #4773
2016-03-21 11:56:53 -07:00
24b806d2ee wal: preallocate WAL files with initial size equal to segment size
Avoids having to update file size metadata during fdatasync on common path.

Fixes #4755
2016-03-21 11:56:53 -07:00
8f653572ac Merge pull request #4827 from xiang90/fix_ctl
etcdctlv3: use godep dir for tablewriter dependency
2016-03-21 11:50:40 -07:00
5a0bb40a41 etcdctlv3: use godep dir for tablewriter dependency 2016-03-21 11:47:55 -07:00
d1ee12566b e2e: test auto tls 2016-03-21 11:44:14 -07:00
7d2aee8eca build: build etcdctlv3 by default
Any reason not to? It makes demoing etcd easier with the V3 procfile.
2016-03-21 11:42:01 -07:00
c8c0c728a0 Merge pull request #4825 from gyuho/key_link
Documentation: add public key link to release doc
2016-03-21 11:39:45 -07:00
e9b2bd751d etcdmain: add --peer-auto-tls option
Lets the peer generate its own (unsigned) certs.
2016-03-21 11:38:23 -07:00
a69c709839 pkg/transport: generate certs 2016-03-21 11:38:23 -07:00
86164374ef Merge pull request #4824 from gyuho/dash
*: replace '-' with '--' in doc
2016-03-21 11:33:22 -07:00
f58d1348a7 Documentation: add public key link to release doc 2016-03-21 11:28:49 -07:00
67c2384bdf *: replace '-' with '--' in doc
Fix https://github.com/coreos/etcd/issues/4595.
2016-03-21 11:12:43 -07:00
aafe717f2f fileutil: support file extending preallocate 2016-03-21 09:42:30 -07:00
7879429a94 Merge pull request #4802 from heyitsanthony/bench-stm
benchmark: STM benchmark
2016-03-20 16:10:54 -07:00
1383da1030 benchmark: STM benchmark 2016-03-20 12:21:29 -07:00
053bc83fe4 Merge pull request #4810 from gyuho/client_serialized_read
clientv3: set Serializable from Op
2016-03-19 14:35:55 -07:00
c740b60db8 Merge pull request #4814 from gyuho/godoc
*: minor updates for godoc
2016-03-19 14:29:48 -07:00
dae7e009b0 *: godoc clean up 2016-03-19 14:19:23 -07:00
0555a6112d Merge pull request #4813 from gyuho/clientv3
clientv3/concurrency: fix godoc
2016-03-18 19:07:54 -07:00
dc9af8e3f3 Merge pull request #4807 from gyuho/go1.4-build
Drop go1.4 support for development
2016-03-18 19:07:44 -07:00
21b33de810 rafthttp: drop go1.4 tests 2016-03-18 18:46:11 -07:00
5bba773199 pkg/testutil: drop go1.4 goroutine leak exception 2016-03-18 18:45:47 -07:00
0a82c06a2c pkg/types: drop go1.4 tests 2016-03-18 18:45:29 -07:00
33e22fa8d7 pkg/httputil: drop go1.4 tests 2016-03-18 18:45:12 -07:00
25e47db416 client: drop go1.4 tests 2016-03-18 18:44:56 -07:00
896cba5cb9 README: go1.5 for Go development 2016-03-18 18:44:40 -07:00
6aa17f0c76 Merge pull request #4775 from heyitsanthony/wal-locks
fileutil, wal: refactor file locking
2016-03-18 18:26:31 -07:00
16270dba4f Merge pull request #4805 from gyuho/drop-go1.4
travis: drop go1.4
2016-03-18 18:24:55 -07:00
4e4f0ab619 clientv3/concurrency: fix godoc 2016-03-18 16:34:58 -07:00
ac9376ea16 *: bump to v2.3.0+git 2016-03-18 16:32:04 -07:00
f38a611b55 clientv3: set Serializable from Op
Fix https://github.com/coreos/etcd/issues/4809.
2016-03-18 15:56:48 -07:00
5badbab8b7 travis: drop go1.4
Fix https://github.com/coreos/etcd/issues/4790.
2016-03-18 13:33:12 -07:00
7397e14c0a fileutil, wal: refactor file locking
File lock interface was more verbose than it needed to be while
simultaneously making it difficult to support systems (e.g., Windows)
that only permit locked writes on a single fd holding the lock.
2016-03-16 15:02:15 -07:00
a44645e13d etcdserver: align 64-bit atomics on 8-byte boundary 2016-03-10 07:24:33 +02:00
1494 changed files with 40973 additions and 65761 deletions

8
.github/ISSUE_TEMPLATE.md vendored Normal file
View File

@ -0,0 +1,8 @@
# Bug reporting
A good bug report has some very specific qualities, so please read over our short document on
[reporting bugs][report_bugs] before you submit your bug report.
To ask a question, go ahead and ignore this.
[report_bugs]: ../Documentation/reporting_bugs.md

5
.github/PULL_REQUEST_TEMPLATE.md vendored Normal file
View File

@ -0,0 +1,5 @@
# Contributing guidelines
Please read our [contribution workflow][contributing] before submitting a pull request.
[contributing]: ../CONTRIBUTING.md#contribution-flow

View File

@ -1,12 +1,20 @@
dist: trusty
language: go
go_import_path: github.com/coreos/etcd
sudo: false
go:
- 1.4
- 1.5
- 1.6
- tip
env:
global:
- GO15VENDOREXPERIMENT=1
matrix:
- TARGET=amd64
- TARGET=arm64
matrix:
allow_failures:
- go: tip
@ -20,6 +28,17 @@ addons:
before_install:
- go get -v github.com/chzchzchz/goword
- go get -v honnef.co/go/simple/cmd/gosimple
- go get -v honnef.co/go/unused/cmd/unused
# disable godep restore override
install:
- pushd cmd/ && go get -t -v ./... && popd
script:
- ./test
- >
if [ "${TARGET}" == "amd64" ]; then
GOARCH="${TARGET}" ./test;
elif [ "${TARGET}" == "arm64" ]; then
GOARCH="${TARGET}" ./build;
fi

View File

@ -1,6 +1,57 @@
# etcd3 API
TODO: API doc
TODO: finish API doc
## Response Header
All Responses from etcd API have a [response header][response_header] attached. The response header includes the metadata of the response.
```proto
message ResponseHeader {
uint64 cluster_id = 1;
uint64 member_id = 2;
int64 revision = 3;
uint64 raft_term = 4;
}
```
* Cluster_ID - the ID of the cluster that generates the response
* Member_ID - the ID of the member that generates the response
* Revision - the revision of the key-value store when the response is generated
* Raft_Term - the Raft term of the member when the response is generated
An application may read the Cluster_ID (Member_ID) field to ensure it is communicating with the intended cluster (member).
Applications can use the `Revision` to know the latest revision of the key-value store. This is especially useful when applications specify a historical revision to make time `travel query` and wishes to know the latest revision at the time of the request.
Applications can use `Raft_Term` to detect when the cluster completes a new leader election.
## Key-Value API
Key-Value API is used to manipulate key-value pairs stored inside etcd. The key-value API is defined as a [gRPC service][kv-service]. The Key-Value pair is defined as structured data in [protobuf format][kv-proto].
### Key-Value Pair
A key-value pair is the smallest unit that the key-value API can manipulate. Each key-value pair has a number of fields:
```protobuf
message KeyValue {
bytes key = 1;
int64 create_revision = 2;
int64 mod_revision = 3;
int64 version = 4;
bytes value = 5;
int64 lease = 6;
}
```
* Key - key in bytes. An empty key is not allowed.
* Value - value in bytes.
* Version - version is the version of the key. A deletion resets the version to zero and any modification of the key increases its version.
* Create_Revision - revision of the last creation on the key.
* Mod_Revision - revision of the last modification on the key.
* Lease - the ID of the lease attached to the key. If lease is 0, then no lease is attached to the key.
## Data Model
@ -83,10 +134,13 @@ etcd does not ensure linearizability for watch operations. Users are expected to
etcd ensures linearizability for all other operations by default. Linearizability comes with a cost, however, because linearized requests must go through the Raft consensus process. To obtain lower latencies and higher throughput for read requests, clients can configure a requests consistency mode to `serializable`, which may access stale data with respect to quorum, but removes the performance penalty of linearized accesses' reliance on live consensus.
[persistent-ds]: [https://en.wikipedia.org/wiki/Persistent_data_structure]
[btree]: [https://en.wikipedia.org/wiki/B-tree]
[b+tree]: [https://en.wikipedia.org/wiki/B%2B_tree]
[seq_consistency]: [https://en.wikipedia.org/wiki/Consistency_model#Sequential_consistency]
[strict_consistency]: [https://en.wikipedia.org/wiki/Consistency_model#Strict_consistency]
[serializable_isolation]: [https://en.wikipedia.org/wiki/Isolation_(database_systems)#Serializable]
[Linearizability]: [#Linearizability]
[persistent-ds]: https://en.wikipedia.org/wiki/Persistent_data_structure
[btree]: https://en.wikipedia.org/wiki/B-tree
[b+tree]: https://en.wikipedia.org/wiki/B%2B_tree
[seq_consistency]: https://en.wikipedia.org/wiki/Consistency_model#Sequential_consistency
[strict_consistency]: https://en.wikipedia.org/wiki/Consistency_model#Strict_consistency
[serializable_isolation]: https://en.wikipedia.org/wiki/Isolation_(database_systems)#Serializable
[Linearizability]: #Linearizability
[kv-proto]: https://github.com/coreos/etcd/blob/master/mvcc/mvccpb/kv.proto
[kv-service]: https://github.com/coreos/etcd/blob/master/etcdserver/etcdserverpb/rpc.proto
[response_header]: https://github.com/coreos/etcd/blob/master/etcdserver/etcdserverpb/rpc.proto

View File

@ -72,6 +72,6 @@ With the benchmark result, we can calculate roughly that `c1 = 17kb`, `c2 = 18kb
| 5k | 50 | 10 | 2.5M | 5710MB |
| 1k | 50 | 100 | 5M | 2380MB |
| 2k | 50 | 100 | 10M | 4672MB |
| 5k | 50 | 100 | 50M | *OOM* |
| 5k | 50 | 100 | 25M | *OOM* |
[rss]: https://en.wikipedia.org/wiki/Resident_set_size

View File

@ -0,0 +1,771 @@
### etcd API Reference
##### service `Auth` (etcdserver/etcdserverpb/rpc.proto)
| Method | Request Type | Response Type | Description |
| ------ | ------------ | ------------- | ----------- |
| AuthEnable | AuthEnableRequest | AuthEnableResponse | AuthEnable enables authentication. |
| AuthDisable | AuthDisableRequest | AuthDisableResponse | AuthDisable disables authentication. |
| Authenticate | AuthenticateRequest | AuthenticateResponse | Authenticate processes an authenticate request. |
| UserAdd | AuthUserAddRequest | AuthUserAddResponse | UserAdd adds a new user. |
| UserGet | AuthUserGetRequest | AuthUserGetResponse | UserGet gets detailed user information or lists all users. |
| UserDelete | AuthUserDeleteRequest | AuthUserDeleteResponse | UserDelete deletes a specified user. |
| UserChangePassword | AuthUserChangePasswordRequest | AuthUserChangePasswordResponse | UserChangePassword changes the password of a specified user. |
| UserGrant | AuthUserGrantRequest | AuthUserGrantResponse | UserGrant grants a role to a specified user. |
| UserRevoke | AuthUserRevokeRequest | AuthUserRevokeResponse | UserRevoke revokes a role of specified user. |
| RoleAdd | AuthRoleAddRequest | AuthRoleAddResponse | RoleAdd adds a new role. |
| RoleGet | AuthRoleGetRequest | AuthRoleGetResponse | RoleGet gets detailed role information or lists all roles. |
| RoleDelete | AuthRoleDeleteRequest | AuthRoleDeleteResponse | RoleDelete deletes a specified role. |
| RoleGrant | AuthRoleGrantRequest | AuthRoleGrantResponse | RoleGrant grants a permission of a specified key or range to a specified role. |
| RoleRevoke | AuthRoleRevokeRequest | AuthRoleRevokeResponse | RoleRevoke revokes a key or range permission of a specified role. |
##### service `Cluster` (etcdserver/etcdserverpb/rpc.proto)
| Method | Request Type | Response Type | Description |
| ------ | ------------ | ------------- | ----------- |
| MemberAdd | MemberAddRequest | MemberAddResponse | MemberAdd adds a member into the cluster. |
| MemberRemove | MemberRemoveRequest | MemberRemoveResponse | MemberRemove removes an existing member from the cluster. |
| MemberUpdate | MemberUpdateRequest | MemberUpdateResponse | MemberUpdate updates the member configuration. |
| MemberList | MemberListRequest | MemberListResponse | MemberList lists all the members in the cluster. |
##### service `KV` (etcdserver/etcdserverpb/rpc.proto)
| Method | Request Type | Response Type | Description |
| ------ | ------------ | ------------- | ----------- |
| Range | RangeRequest | RangeResponse | Range gets the keys in the range from the key-value store. |
| Put | PutRequest | PutResponse | Put puts the given key into the key-value store. A put request increments the revision of the key-value store and generates one event in the event history. |
| DeleteRange | DeleteRangeRequest | DeleteRangeResponse | Delete deletes the given range from the key-value store. A delete request increments the revision of the key-value store and generates a delete event in the event history for every deleted key. |
| Txn | TxnRequest | TxnResponse | Txn processes multiple requests in a single transaction. A txn request increments the revision of the key-value store and generates events with the same revision for every completed request. It is not allowed to modify the same key several times within one txn. |
| Compact | CompactionRequest | CompactionResponse | Compact compacts the event history in the etcd key-value store. The key-value store should be periodically compacted or the event history will continue to grow indefinitely. |
##### service `Lease` (etcdserver/etcdserverpb/rpc.proto)
| Method | Request Type | Response Type | Description |
| ------ | ------------ | ------------- | ----------- |
| LeaseGrant | LeaseGrantRequest | LeaseGrantResponse | LeaseGrant creates a lease which expires if the server does not receive a keepAlive within a given time to live period. All keys attached to the lease will be expired and deleted if the lease expires. Each expired key generates a delete event in the event history. |
| LeaseRevoke | LeaseRevokeRequest | LeaseRevokeResponse | LeaseRevoke revokes a lease. All keys attached to the lease will expire and be deleted. |
| LeaseKeepAlive | LeaseKeepAliveRequest | LeaseKeepAliveResponse | LeaseKeepAlive keeps the lease alive by streaming keep alive requests from the client to the server and streaming keep alive responses from the server to the client. |
##### service `Maintenance` (etcdserver/etcdserverpb/rpc.proto)
| Method | Request Type | Response Type | Description |
| ------ | ------------ | ------------- | ----------- |
| Alarm | AlarmRequest | AlarmResponse | Alarm activates, deactivates, and queries alarms regarding cluster health. |
| Status | StatusRequest | StatusResponse | Status gets the status of the member. |
| Defragment | DefragmentRequest | DefragmentResponse | Defragment defragments a member's backend database to recover storage space. |
| Hash | HashRequest | HashResponse | Hash returns the hash of the local KV state for consistency checking purpose. This is designed for testing; do not use this in production when there are ongoing transactions. |
| Snapshot | SnapshotRequest | SnapshotResponse | Snapshot sends a snapshot of the entire backend from a member over a stream to a client. |
##### service `Watch` (etcdserver/etcdserverpb/rpc.proto)
| Method | Request Type | Response Type | Description |
| ------ | ------------ | ------------- | ----------- |
| Watch | WatchRequest | WatchResponse | Watch watches for events happening or that have happened. Both input and output are streams; the input stream is for creating and canceling watchers and the output stream sends events. One watch RPC can watch on multiple key ranges, streaming events for several watches at once. The entire event history can be watched starting from the last compaction revision. |
##### message `AlarmMember` (etcdserver/etcdserverpb/rpc.proto)
| Field | Description | Type |
| ----- | ----------- | ---- |
| memberID | memberID is the ID of the member associated with the raised alarm. | uint64 |
| alarm | alarm is the type of alarm which has been raised. | AlarmType |
##### message `AlarmRequest` (etcdserver/etcdserverpb/rpc.proto)
| Field | Description | Type |
| ----- | ----------- | ---- |
| action | action is the kind of alarm request to issue. The action may GET alarm statuses, ACTIVATE an alarm, or DEACTIVATE a raised alarm. | AlarmAction |
| memberID | memberID is the ID of the member associated with the alarm. If memberID is 0, the alarm request covers all members. | uint64 |
| alarm | alarm is the type of alarm to consider for this request. | AlarmType |
##### message `AlarmResponse` (etcdserver/etcdserverpb/rpc.proto)
| Field | Description | Type |
| ----- | ----------- | ---- |
| header | | ResponseHeader |
| alarms | alarms is a list of alarms associated with the alarm request. | (slice of) AlarmMember |
##### message `AuthDisableRequest` (etcdserver/etcdserverpb/rpc.proto)
Empty field.
##### message `AuthDisableResponse` (etcdserver/etcdserverpb/rpc.proto)
| Field | Description | Type |
| ----- | ----------- | ---- |
| header | | ResponseHeader |
##### message `AuthEnableRequest` (etcdserver/etcdserverpb/rpc.proto)
Empty field.
##### message `AuthEnableResponse` (etcdserver/etcdserverpb/rpc.proto)
| Field | Description | Type |
| ----- | ----------- | ---- |
| header | | ResponseHeader |
##### message `AuthRoleAddRequest` (etcdserver/etcdserverpb/rpc.proto)
| Field | Description | Type |
| ----- | ----------- | ---- |
| name | name is the name of the role to add to the authentication system. | string |
##### message `AuthRoleAddResponse` (etcdserver/etcdserverpb/rpc.proto)
| Field | Description | Type |
| ----- | ----------- | ---- |
| header | | ResponseHeader |
##### message `AuthRoleDeleteRequest` (etcdserver/etcdserverpb/rpc.proto)
Empty field.
##### message `AuthRoleDeleteResponse` (etcdserver/etcdserverpb/rpc.proto)
| Field | Description | Type |
| ----- | ----------- | ---- |
| header | | ResponseHeader |
##### message `AuthRoleGetRequest` (etcdserver/etcdserverpb/rpc.proto)
Empty field.
##### message `AuthRoleGetResponse` (etcdserver/etcdserverpb/rpc.proto)
| Field | Description | Type |
| ----- | ----------- | ---- |
| header | | ResponseHeader |
##### message `AuthRoleGrantRequest` (etcdserver/etcdserverpb/rpc.proto)
| Field | Description | Type |
| ----- | ----------- | ---- |
| name | name is the name of the role which will be granted the permission. | string |
| perm | perm is the permission to grant to the role. | authpb.Permission |
##### message `AuthRoleGrantResponse` (etcdserver/etcdserverpb/rpc.proto)
| Field | Description | Type |
| ----- | ----------- | ---- |
| header | | ResponseHeader |
##### message `AuthRoleRevokeRequest` (etcdserver/etcdserverpb/rpc.proto)
Empty field.
##### message `AuthRoleRevokeResponse` (etcdserver/etcdserverpb/rpc.proto)
| Field | Description | Type |
| ----- | ----------- | ---- |
| header | | ResponseHeader |
##### message `AuthUserAddRequest` (etcdserver/etcdserverpb/rpc.proto)
| Field | Description | Type |
| ----- | ----------- | ---- |
| name | | string |
| password | | string |
##### message `AuthUserAddResponse` (etcdserver/etcdserverpb/rpc.proto)
| Field | Description | Type |
| ----- | ----------- | ---- |
| header | | ResponseHeader |
##### message `AuthUserChangePasswordRequest` (etcdserver/etcdserverpb/rpc.proto)
| Field | Description | Type |
| ----- | ----------- | ---- |
| name | name is the name of the user whose password is being changed. | string |
| password | password is the new password for the user. | string |
##### message `AuthUserChangePasswordResponse` (etcdserver/etcdserverpb/rpc.proto)
| Field | Description | Type |
| ----- | ----------- | ---- |
| header | | ResponseHeader |
##### message `AuthUserDeleteRequest` (etcdserver/etcdserverpb/rpc.proto)
| Field | Description | Type |
| ----- | ----------- | ---- |
| name | name is the name of the user to delete. | string |
##### message `AuthUserDeleteResponse` (etcdserver/etcdserverpb/rpc.proto)
| Field | Description | Type |
| ----- | ----------- | ---- |
| header | | ResponseHeader |
##### message `AuthUserGetRequest` (etcdserver/etcdserverpb/rpc.proto)
Empty field.
##### message `AuthUserGetResponse` (etcdserver/etcdserverpb/rpc.proto)
| Field | Description | Type |
| ----- | ----------- | ---- |
| header | | ResponseHeader |
##### message `AuthUserGrantRequest` (etcdserver/etcdserverpb/rpc.proto)
| Field | Description | Type |
| ----- | ----------- | ---- |
| user | user is the name of the user which should be granted a given role. | string |
| role | role is the name of the role to grant to the user. | string |
##### message `AuthUserGrantResponse` (etcdserver/etcdserverpb/rpc.proto)
| Field | Description | Type |
| ----- | ----------- | ---- |
| header | | ResponseHeader |
##### message `AuthUserRevokeRequest` (etcdserver/etcdserverpb/rpc.proto)
Empty field.
##### message `AuthUserRevokeResponse` (etcdserver/etcdserverpb/rpc.proto)
| Field | Description | Type |
| ----- | ----------- | ---- |
| header | | ResponseHeader |
##### message `AuthenticateRequest` (etcdserver/etcdserverpb/rpc.proto)
| Field | Description | Type |
| ----- | ----------- | ---- |
| name | | string |
| password | | string |
##### message `AuthenticateResponse` (etcdserver/etcdserverpb/rpc.proto)
| Field | Description | Type |
| ----- | ----------- | ---- |
| header | | ResponseHeader |
| token | token is an authorized token that can be used in succeeding RPCs | string |
##### message `CompactionRequest` (etcdserver/etcdserverpb/rpc.proto)
CompactionRequest compacts the key-value store up to a given revision. All superseded keys with a revision less than the compaction revision will be removed.
| Field | Description | Type |
| ----- | ----------- | ---- |
| revision | revision is the key-value store revision for the compaction operation. | int64 |
| physical | physical is set so the RPC will wait until the compaction is physically applied to the local database such that compacted entries are totally removed from the backend database. | bool |
##### message `CompactionResponse` (etcdserver/etcdserverpb/rpc.proto)
| Field | Description | Type |
| ----- | ----------- | ---- |
| header | | ResponseHeader |
##### message `Compare` (etcdserver/etcdserverpb/rpc.proto)
| Field | Description | Type |
| ----- | ----------- | ---- |
| result | result is logical comparison operation for this comparison. | CompareResult |
| target | target is the key-value field to inspect for the comparison. | CompareTarget |
| key | key is the subject key for the comparison operation. | bytes |
| target_union | | oneof |
| version | version is the version of the given key | int64 |
| create_revision | create_revision is the creation revision of the given key | int64 |
| mod_revision | mod_revision is the last modified revision of the given key. | int64 |
| value | value is the value of the given key, in bytes. | bytes |
##### message `DefragmentRequest` (etcdserver/etcdserverpb/rpc.proto)
Empty field.
##### message `DefragmentResponse` (etcdserver/etcdserverpb/rpc.proto)
| Field | Description | Type |
| ----- | ----------- | ---- |
| header | | ResponseHeader |
##### message `DeleteRangeRequest` (etcdserver/etcdserverpb/rpc.proto)
| Field | Description | Type |
| ----- | ----------- | ---- |
| key | key is the first key to delete in the range. | bytes |
| range_end | range_end is the key following the last key to delete for the range [key, range_end). If range_end is not given, the range is defined to contain only the key argument. If range_end is '\0', the range is all keys greater than or equal to the key argument. | bytes |
##### message `DeleteRangeResponse` (etcdserver/etcdserverpb/rpc.proto)
| Field | Description | Type |
| ----- | ----------- | ---- |
| header | | ResponseHeader |
| deleted | Deleted is the number of keys deleted by the delete range request. | int64 |
##### message `HashRequest` (etcdserver/etcdserverpb/rpc.proto)
Empty field.
##### message `HashResponse` (etcdserver/etcdserverpb/rpc.proto)
| Field | Description | Type |
| ----- | ----------- | ---- |
| header | | ResponseHeader |
| hash | hash is the hash value computed from the responding member's key-value store. | uint32 |
##### message `LeaseGrantRequest` (etcdserver/etcdserverpb/rpc.proto)
| Field | Description | Type |
| ----- | ----------- | ---- |
| TTL | TTL is the advisory time-to-live in seconds. | int64 |
| ID | ID is the requested ID for the lease. If ID is set to 0, the lessor chooses an ID. | int64 |
##### message `LeaseGrantResponse` (etcdserver/etcdserverpb/rpc.proto)
| Field | Description | Type |
| ----- | ----------- | ---- |
| header | | ResponseHeader |
| ID | ID is the lease ID for the granted lease. | int64 |
| TTL | TTL is the server chosen lease time-to-live in seconds. | int64 |
| error | | string |
##### message `LeaseKeepAliveRequest` (etcdserver/etcdserverpb/rpc.proto)
| Field | Description | Type |
| ----- | ----------- | ---- |
| ID | ID is the lease ID for the lease to keep alive. | int64 |
##### message `LeaseKeepAliveResponse` (etcdserver/etcdserverpb/rpc.proto)
| Field | Description | Type |
| ----- | ----------- | ---- |
| header | | ResponseHeader |
| ID | ID is the lease ID from the keep alive request. | int64 |
| TTL | TTL is the new time-to-live for the lease. | int64 |
##### message `LeaseRevokeRequest` (etcdserver/etcdserverpb/rpc.proto)
| Field | Description | Type |
| ----- | ----------- | ---- |
| ID | ID is the lease ID to revoke. When the ID is revoked, all associated keys will be deleted. | int64 |
##### message `LeaseRevokeResponse` (etcdserver/etcdserverpb/rpc.proto)
| Field | Description | Type |
| ----- | ----------- | ---- |
| header | | ResponseHeader |
##### message `Member` (etcdserver/etcdserverpb/rpc.proto)
| Field | Description | Type |
| ----- | ----------- | ---- |
| ID | ID is the member ID for this member. | uint64 |
| name | name is the human-readable name of the member. If the member is not started, the name will be an empty string. | string |
| peerURLs | peerURLs is the list of URLs the member exposes to the cluster for communication. | (slice of) string |
| clientURLs | clientURLs is the list of URLs the member exposes to clients for communication. If the member is not started, clientURLs will be empty. | (slice of) string |
##### message `MemberAddRequest` (etcdserver/etcdserverpb/rpc.proto)
| Field | Description | Type |
| ----- | ----------- | ---- |
| peerURLs | peerURLs is the list of URLs the added member will use to communicate with the cluster. | (slice of) string |
##### message `MemberAddResponse` (etcdserver/etcdserverpb/rpc.proto)
| Field | Description | Type |
| ----- | ----------- | ---- |
| header | | ResponseHeader |
| member | member is the member information for the added member. | Member |
##### message `MemberListRequest` (etcdserver/etcdserverpb/rpc.proto)
Empty field.
##### message `MemberListResponse` (etcdserver/etcdserverpb/rpc.proto)
| Field | Description | Type |
| ----- | ----------- | ---- |
| header | | ResponseHeader |
| members | members is a list of all members associated with the cluster. | (slice of) Member |
##### message `MemberRemoveRequest` (etcdserver/etcdserverpb/rpc.proto)
| Field | Description | Type |
| ----- | ----------- | ---- |
| ID | ID is the member ID of the member to remove. | uint64 |
##### message `MemberRemoveResponse` (etcdserver/etcdserverpb/rpc.proto)
| Field | Description | Type |
| ----- | ----------- | ---- |
| header | | ResponseHeader |
##### message `MemberUpdateRequest` (etcdserver/etcdserverpb/rpc.proto)
| Field | Description | Type |
| ----- | ----------- | ---- |
| ID | ID is the member ID of the member to update. | uint64 |
| peerURLs | peerURLs is the new list of URLs the member will use to communicate with the cluster. | (slice of) string |
##### message `MemberUpdateResponse` (etcdserver/etcdserverpb/rpc.proto)
| Field | Description | Type |
| ----- | ----------- | ---- |
| header | | ResponseHeader |
##### message `PutRequest` (etcdserver/etcdserverpb/rpc.proto)
| Field | Description | Type |
| ----- | ----------- | ---- |
| key | key is the key, in bytes, to put into the key-value store. | bytes |
| value | value is the value, in bytes, to associate with the key in the key-value store. | bytes |
| lease | lease is the lease ID to associate with the key in the key-value store. A lease value of 0 indicates no lease. | int64 |
##### message `PutResponse` (etcdserver/etcdserverpb/rpc.proto)
| Field | Description | Type |
| ----- | ----------- | ---- |
| header | | ResponseHeader |
##### message `RangeRequest` (etcdserver/etcdserverpb/rpc.proto)
| Field | Description | Type |
| ----- | ----------- | ---- |
| key | key is the first key for the range. If range_end is not given, the request only looks up key. | bytes |
| range_end | range_end is the upper bound on the requested range [key, range_end). If range_end is '\0', the range is all keys >= key. | bytes |
| limit | limit is a limit on the number of keys returned for the request. | int64 |
| revision | revision is the point-in-time of the key-value store to use for the range. If revision is less or equal to zero, the range is over the newest key-value store. If the revision has been compacted, ErrCompaction is returned as a response. | int64 |
| sort_order | sort_order is the order for returned sorted results. | SortOrder |
| sort_target | sort_target is the key-value field to use for sorting. | SortTarget |
| serializable | serializable sets the range request to use serializable member-local reads. Range requests are linearizable by default; linearizable requests have higher latency and lower throughput than serializable requests but reflect the current consensus of the cluster. For better performance, in exchange for possible stale reads, a serializable range request is served locally without needing to reach consensus with other nodes in the cluster. | bool |
##### message `RangeResponse` (etcdserver/etcdserverpb/rpc.proto)
| Field | Description | Type |
| ----- | ----------- | ---- |
| header | | ResponseHeader |
| kvs | kvs is the list of key-value pairs matched by the range request. | (slice of) mvccpb.KeyValue |
| more | more indicates if there are more keys to return in the requested range. | bool |
##### message `RequestUnion` (etcdserver/etcdserverpb/rpc.proto)
| Field | Description | Type |
| ----- | ----------- | ---- |
| request | request is a union of request types accepted by a transaction. | oneof |
| request_range | | RangeRequest |
| request_put | | PutRequest |
| request_delete_range | | DeleteRangeRequest |
##### message `ResponseHeader` (etcdserver/etcdserverpb/rpc.proto)
| Field | Description | Type |
| ----- | ----------- | ---- |
| cluster_id | cluster_id is the ID of the cluster which sent the response. | uint64 |
| member_id | member_id is the ID of the member which sent the response. | uint64 |
| revision | revision is the key-value store revision when the request was applied. | int64 |
| raft_term | raft_term is the raft term when the request was applied. | uint64 |
##### message `ResponseUnion` (etcdserver/etcdserverpb/rpc.proto)
| Field | Description | Type |
| ----- | ----------- | ---- |
| response | response is a union of response types returned by a transaction. | oneof |
| response_range | | RangeResponse |
| response_put | | PutResponse |
| response_delete_range | | DeleteRangeResponse |
##### message `SnapshotRequest` (etcdserver/etcdserverpb/rpc.proto)
Empty field.
##### message `SnapshotResponse` (etcdserver/etcdserverpb/rpc.proto)
| Field | Description | Type |
| ----- | ----------- | ---- |
| header | header has the current key-value store information. The first header in the snapshot stream indicates the point in time of the snapshot. | ResponseHeader |
| remaining_bytes | remaining_bytes is the number of blob bytes to be sent after this message | uint64 |
| blob | blob contains the next chunk of the snapshot in the snapshot stream. | bytes |
##### message `StatusRequest` (etcdserver/etcdserverpb/rpc.proto)
Empty field.
##### message `StatusResponse` (etcdserver/etcdserverpb/rpc.proto)
| Field | Description | Type |
| ----- | ----------- | ---- |
| header | | ResponseHeader |
| version | version is the cluster protocol version used by the responding member. | string |
| dbSize | dbSize is the size of the backend database, in bytes, of the responding member. | int64 |
| leader | leader is the member ID which the responding member believes is the current leader. | uint64 |
| raftIndex | raftIndex is the current raft index of the responding member. | uint64 |
| raftTerm | raftTerm is the current raft term of the responding member. | uint64 |
##### message `TxnRequest` (etcdserver/etcdserverpb/rpc.proto)
From google paxosdb paper: Our implementation hinges around a powerful primitive which we call MultiOp. All other database operations except for iteration are implemented as a single call to MultiOp. A MultiOp is applied atomically and consists of three components: 1. A list of tests called guard. Each test in guard checks a single entry in the database. It may check for the absence or presence of a value, or compare with a given value. Two different tests in the guard may apply to the same or different entries in the database. All tests in the guard are applied and MultiOp returns the results. If all tests are true, MultiOp executes t op (see item 2 below), otherwise it executes f op (see item 3 below). 2. A list of database operations called t op. Each operation in the list is either an insert, delete, or lookup operation, and applies to a single database entry. Two different operations in the list may apply to the same or different entries in the database. These operations are executed if guard evaluates to true. 3. A list of database operations called f op. Like t op, but executed if guard evaluates to false.
| Field | Description | Type |
| ----- | ----------- | ---- |
| compare | Compare is a list of predicates representing a conjunction of terms. If the comparisons succeed, then the success requests will be processed in order, and the response will contain their respective responses in order. If the comparisons fail, then the failure requests will be processed in order, and the response will contain their respective responses in order. | (slice of) Compare |
| success | success is a list of requests which will be applied when compare evaluates to true. | (slice of) RequestUnion |
| failure | failure is a list of requests which will be applied when compare evaluates to false. | (slice of) RequestUnion |
##### message `TxnResponse` (etcdserver/etcdserverpb/rpc.proto)
| Field | Description | Type |
| ----- | ----------- | ---- |
| header | | ResponseHeader |
| succeeded | succeeded is set to true if the compare evaluated to true or false otherwise. | bool |
| responses | responses is a list of responses corresponding to the results from applying success if succeeded is true or failure if succeeded is false. | (slice of) ResponseUnion |
##### message `WatchCancelRequest` (etcdserver/etcdserverpb/rpc.proto)
| Field | Description | Type |
| ----- | ----------- | ---- |
| watch_id | watch_id is the watcher id to cancel so that no more events are transmitted. | int64 |
##### message `WatchCreateRequest` (etcdserver/etcdserverpb/rpc.proto)
| Field | Description | Type |
| ----- | ----------- | ---- |
| key | key is the key to register for watching. | bytes |
| range_end | range_end is the end of the range [key, range_end) to watch. If range_end is not given, only the key argument is watched. If range_end is equal to '\0', all keys greater than or equal to the key argument are watched. | bytes |
| start_revision | start_revision is an optional revision to watch from (inclusive). No start_revision is "now". | int64 |
| progress_notify | progress_notify is set so that the etcd server will periodically send a WatchResponse with no events to the new watcher if there are no recent events. It is useful when clients wish to recover a disconnected watcher starting from a recent known revision. The etcd server may decide how often it will send notifications based on current load. | bool |
##### message `WatchRequest` (etcdserver/etcdserverpb/rpc.proto)
| Field | Description | Type |
| ----- | ----------- | ---- |
| request_union | request_union is a request to either create a new watcher or cancel an existing watcher. | oneof |
| create_request | | WatchCreateRequest |
| cancel_request | | WatchCancelRequest |
##### message `WatchResponse` (etcdserver/etcdserverpb/rpc.proto)
| Field | Description | Type |
| ----- | ----------- | ---- |
| header | | ResponseHeader |
| watch_id | watch_id is the ID of the watcher that corresponds to the response. | int64 |
| created | created is set to true if the response is for a create watch request. The client should record the watch_id and expect to receive events for the created watcher from the same stream. All events sent to the created watcher will attach with the same watch_id. | bool |
| canceled | canceled is set to true if the response is for a cancel watch request. No further events will be sent to the canceled watcher. | bool |
| compact_revision | compact_revision is set to the minimum index if a watcher tries to watch at a compacted index. This happens when creating a watcher at a compacted revision or the watcher cannot catch up with the progress of the key-value store. The client should treat the watcher as canceled and should not try to create any watcher with the same start_revision again. | int64 |
| events | | (slice of) mvccpb.Event |
##### message `Event` (mvcc/mvccpb/kv.proto)
| Field | Description | Type |
| ----- | ----------- | ---- |
| type | type is the kind of event. If type is a PUT, it indicates new data has been stored to the key. If type is a DELETE, it indicates the key was deleted. | EventType |
| kv | kv holds the KeyValue for the event. A PUT event contains current kv pair. A PUT event with kv.Version=1 indicates the creation of a key. A DELETE/EXPIRE event contains the deleted key with its modification revision set to the revision of deletion. | KeyValue |
##### message `KeyValue` (mvcc/mvccpb/kv.proto)
| Field | Description | Type |
| ----- | ----------- | ---- |
| key | key is the key in bytes. An empty key is not allowed. | bytes |
| create_revision | create_revision is the revision of last creation on this key. | int64 |
| mod_revision | mod_revision is the revision of last modification on this key. | int64 |
| version | version is the version of the key. A deletion resets the version to zero and any modification of the key increases its version. | int64 |
| value | value is the value held by the key, in bytes. | bytes |
| lease | lease is the ID of the lease that attached to key. When the attached lease expires, the key will be deleted. If lease is 0, then no lease is attached to the key. | int64 |
##### message `Lease` (lease/leasepb/lease.proto)
| Field | Description | Type |
| ----- | ----------- | ---- |
| ID | | int64 |
| TTL | | int64 |
##### message `Permission` (auth/authpb/auth.proto)
Permission is a single entity
| Field | Description | Type |
| ----- | ----------- | ---- |
| key | | bytes |
| permType | | Type |
##### message `Role` (auth/authpb/auth.proto)
Role is a single entry in the bucket authRoles
| Field | Description | Type |
| ----- | ----------- | ---- |
| name | | bytes |
| keyPermission | | (slice of) Permission |
##### message `User` (auth/authpb/auth.proto)
User is a single entry in the bucket authUsers
| Field | Description | Type |
| ----- | ----------- | ---- |
| name | | bytes |
| password | | bytes |
| roles | | (slice of) string |

View File

@ -0,0 +1,243 @@
# Interacting with etcd
Users mostly interact with etcd by putting or getting the value of a key. This section describes how to do that by using etcdctl, a command line tool for interacting with etcd server. The concepts described here should apply to the gRPC APIs or client library APIs.
By default, etcdctl talks to the etcd server with the v2 API for backward compatibility. For etcdctl to speak to etcd using the v3 API, the API version must be set to version 3 via the `ETCDCTL_API` environment variable.
``` bash
export ETCDCTL_API=3
```
## Write a key
Applications store keys into the etcd cluster by writing to keys. Every stored key is replicated to all etcd cluster members through the Raft protocol to achieve consistency and reliability.
Here is the command to set the value of key `foo` to `bar`:
``` bash
$ etcdctl put foo bar
OK
```
## Read keys
Applications can read values of keys from an etcd cluster. Queries may read a single key, or a range of keys.
Suppose the etcd cluster has stored the following keys:
```
foo = bar
foo1 = bar1
foo3 = bar3
```
Here is the command to read the value of key `foo`:
```bash
$ etcdctl get foo
foo
bar
```
Here is the command to range over the keys from `foo` to `foo9`:
```bash
$ etcdctl get foo foo9
foo
bar
foo1
bar1
foo3
bar3
```
## Read past version of keys
Applications may want to read superseded versions of a key. For example, an application may wish to roll back to an old configuration by accessing an earlier version of a key. Alternatively, an application may want a consistent view over multiple keys through multiple requests by accessing key history.
Since every modification to the etcd cluster key-value store increments the global revision of an etcd cluster, an application can read superseded keys by providing an older etcd revision.
Suppose an etcd cluster already has the following keys:
``` bash
$ etcdctl put foo bar # revision = 2
$ etcdctl put foo1 bar1 # revision = 3
$ etcdctl put foo bar_new # revision = 4
$ etcdctl put foo1 bar1_new # revision = 5
```
Here are an example to access the past versions of keys:
```bash
$ etcdctl get foo foo9 # access the most recent versions of keys
foo
bar_new
foo1
bar1_new
$ etcdctl get --rev=4 foo foo9 # access the versions of keys at revision 4
foo
bar_new
foo1
bar1
$ etcdctl get --rev=3 foo foo9 # access the versions of keys at revision 3
foo
bar
foo1
bar1
$ etcdctl get --rev=2 foo foo9 # access the versions of keys at revision 2
foo
bar
$ etcdctl get --rev=1 foo foo9 # access the versions of keys at revision 1
```
## Delete keys
Applications can delete a key or a range of keys from an etcd cluster.
Here is the command to delete key `foo`:
```bash
$ etcdctl del foo
1 # one key is deleted
```
Here is the command to delete keys ranging from `foo` to `foo9`:
```bash
$ etcdctl del foo foo9
2 # two keys are deleted
```
## Watch key changes
Applications can watch on a key or a range of keys to monitor for any updates.
Here is the command to watch on key `foo`:
```bash
$ etcdctl watch foo
# in another terminal: etcdctl put foo bar
foo
bar
```
Here is the command to watch on a range key from `foo` to `foo9`:
```bash
$ etcdctl watch foo foo9
# in another terminal: etcdctl put foo bar
foo
bar
# in another terminal: etcdctl put foo1 bar1
foo1
bar1
```
## Watch historical changes of keys
Applications may want to watch for historical changes of keys in etcd. For example, an application may wish to receive all the modifications of a key; if the application stays connected to etcd, then `watch` is good enough. However, if the application or etcd fails, a change may happen during the failure, and the application will not receive the update in real time. To guarantee the update is delivered, the application must be able to watch for historical changes to keys. To do this, an application can specify a historical revision on a watch, just like reading past version of keys.
Suppose we finished the following sequence of operations:
``` bash
etcdctl put foo bar # revision = 2
etcdctl put foo1 bar1 # revision = 3
etcdctl put foo bar_new # revision = 4
etcdctl put foo1 bar1_new # revision = 5
```
Here is an example to watch the historical changes:
```bash
# watch for changes on key `foo` since revision 2
$ etcdctl watch --rev=2 foo
PUT
foo
bar
PUT
foo
bar_new
# watch for changes on key `foo` since revision 3
$ etcdctl watch --rev=3 foo
PUT
foo
bar_new
```
## Compacted revisions
As we mentioned, etcd keeps revisions so that applications can read past versions of keys. However, to avoid accumulating an unbounded amount of history, it is important to compact past revisions. After compacting, etcd removes historical revisions, releasing resources for future use. All superseded data with revisions before the compacted revision will be unavailable.
Here is the command to compact the revisions:
```bash
$ etcdctl compact 5
compacted revision 5
# any revisions before the compacted one are not accessible
$ etcdctl get --rev=4 foo
Error: rpc error: code = 11 desc = etcdserver: mvcc: required revision has been compacted
```
## Grant leases
Applications can grant leases for keys from an etcd cluster. When a key is attached to a lease, its lifetime is bound to the lease's lifetime which in turn is governed by a time-to-live (TTL). Each lease has a minimum time-to-live (TTL) value specified by the application at grant time. The lease's actual TTL value is at least the minimum TTL and is chosen by the etcd cluster. Once a lease's TTL elapses, the lease expires and all attached keys are deleted.
Here is the command to grant a lease:
```
# grant a lease with 10 second TTL
$ etcdctl lease grant 10
lease 32695410dcc0ca06 granted with TTL(10s)
# attach key foo to lease 32695410dcc0ca06
$ etcdctl put --lease=32695410dcc0ca06 foo bar
OK
```
## Revoke leases
Applications revoke leases by lease ID. Revoking a lease deletes all of its attached keys.
Suppose we finished the following sequence of operations:
```
$ etcdctl lease grant 10
lease 32695410dcc0ca06 granted with TTL(10s)
$ etcdctl put --lease=32695410dcc0ca06 foo bar
OK
```
Here is the command to revoke the same lease:
```
$ etcdctl lease revoke 32695410dcc0ca06
lease 32695410dcc0ca06 revoked
$ etcdctl get foo
# empty response since foo is deleted due to lease revocation
```
## Keep leases alive
Applications can keep a lease alive by refreshing its TTL so it does not expire.
Suppose we finished the following sequence of operations:
```
$ etcdctl lease grant 10
lease 32695410dcc0ca06 granted with TTL(10s)
```
Here is the command to keep the same lease alive:
```
$ etcdctl lease keep-alive 32695410dcc0ca0
lease 32695410dcc0ca0 keepalived with TTL(100)
lease 32695410dcc0ca0 keepalived with TTL(100)
lease 32695410dcc0ca0 keepalived with TTL(100)
...
```

View File

@ -0,0 +1,90 @@
# Setup a Local Cluster
For testing and development deployments, the quickest and easiest way is to set up a local cluster. For a production deployment, refer to the [clustering][clustering] section.
## Local Standalone Cluster
Deploying an etcd cluster as a standalone cluster is straightforward. Start it with just one command:
```
$ ./etcd
...
```
The started etcd member listens on `localhost:2379` for client requests.
To interact with the started cluster by using etcdctl:
```
# use API version 3
$ export ETCDCTL_API=3
$ ./etcdctl put foo bar
OK
$ ./etcdctl get foo
bar
```
## Local Multi-member Cluster
A Procfile is provided to easily set up a local multi-member cluster. Start a multi-member cluster with a few commands:
```
# install goreman program to control Profile-based applications.
$ go get github.com/mattn/goreman
$ goreman -f Procfile start
...
```
The started members listen on `localhost:12379`, `localhost:22379`, and `localhost:32379` for client requests respectively.
To interact with the started cluster by using etcdctl:
```
# use API version 3
$ export ETCDCTL_API=3
$ etcdctl --endpoints=localhost:12379 member list
+------------------+---------+--------+------------------------+------------------------+
| ID | STATUS | NAME | PEER ADDRS | CLIENT ADDRS |
+------------------+---------+--------+------------------------+------------------------+
| 8211f1d0f64f3269 | started | infra1 | http://127.0.0.1:12380 | http://127.0.0.1:12379 |
| 91bc3c398fb3c146 | started | infra2 | http://127.0.0.1:22380 | http://127.0.0.1:22379 |
| fd422379fda50e48 | started | infra3 | http://127.0.0.1:32380 | http://127.0.0.1:32379 |
+------------------+---------+--------+------------------------+------------------------+
$ etcdctl --endpoints=localhost:12379 put foo bar
OK
```
To exercise etcd's fault tolerance, kill a member:
```
# kill etcd2
$ goreman run stop etcd2
$ etcdctl --endpoints=localhost:12379 put key hello
OK
$ etcdctl --endpoints=localhost:12379 get key
hello
# try to get key from the killed member
$ etcdctl --endpoints=localhost:22379 get key
2016/04/18 23:07:35 grpc: Conn.resetTransport failed to create client transport: connection error: desc = "transport: dial tcp 127.0.0.1:22379: getsockopt: connection refused"; Reconnecting to "localhost:22379"
Error: grpc: timed out trying to connect
# restart the killed member
$ goreman run restart etcd2
# get the key from restarted member
$ etcdctl --endpoints=localhost:22379 get key
hello
```
To learn more about interacting with etcd, read [interacting with etcd section][interacting].
[interacting]: ./interacting_v3.md
[clustering]: ./clustering.md

View File

@ -0,0 +1,114 @@
# Discovery Service Protocol
Discovery service protocol helps new etcd member to discover all other members in cluster bootstrap phase using a shared discovery URL.
Discovery service protocol is _only_ used in cluster bootstrap phase, and cannot be used for runtime reconfiguration or cluster monitoring.
The protocol uses a new discovery token to bootstrap one _unique_ etcd cluster. Remember that one discovery token can represent only one etcd cluster. As long as discovery protocol on this token starts, even if it fails halfway, it must not be used to bootstrap another etcd cluster.
The rest of this article will walk through the discovery process with examples that correspond to a self-hosted discovery cluster. The public discovery service, discovery.etcd.io, functions the same way, but with a layer of polish to abstract away ugly URLs, generate UUIDs automatically, and provide some protections against excessive requests. At its core, the public discovery service still uses an etcd cluster as the data store as described in this document.
## The Protocol Workflow
The idea of discovery protocol is to use an internal etcd cluster to coordinate bootstrap of a new cluster. First, all new members interact with discovery service and help to generate the expected member list. Then each new member bootstraps its server using this list, which performs the same functionality as -initial-cluster flag.
In the following example workflow, we will list each step of protocol in curl format for ease of understanding.
By convention the etcd discovery protocol uses the key prefix `_etcd/registry`. If `http://example.com` hosts an etcd cluster for discovery service, a full URL to discovery keyspace will be `http://example.com/v2/keys/_etcd/registry`. We will use this as the URL prefix in the example.
### Creating a New Discovery Token
Generate a unique token that will identify the new cluster. This will be used as a unique prefix in discovery keyspace in the following steps. An easy way to do this is to use `uuidgen`:
```
UUID=$(uuidgen)
```
### Specifying the Expected Cluster Size
You need to specify the expected cluster size for this discovery token. The size is used by the discovery service to know when it has found all members that will initially form the cluster.
```
curl -X PUT http://example.com/v2/keys/_etcd/registry/${UUID}/_config/size -d value=${cluster_size}
```
Usually the cluster size is 3, 5 or 7. Check [optimal cluster size][cluster-size] for more details.
### Bringing up etcd Processes
Now that you have your discovery URL, you can use it as `-discovery` flag and bring up etcd processes. Every etcd process will follow this next few steps internally if given a `-discovery` flag.
### Registering itself
The first thing for etcd process is to register itself into the discovery URL as a member. This is done by creating member ID as a key in the discovery URL.
```
curl -X PUT http://example.com/v2/keys/_etcd/registry/${UUID}/${member_id}?prevExist=false -d value="${member_name}=${member_peer_url_1}&${member_name}=${member_peer_url_2}"
```
### Checking the Status
It checks the expected cluster size and registration status in discovery URL, and decides what the next action is.
```
curl -X GET http://example.com/v2/keys/_etcd/registry/${UUID}/_config/size
curl -X GET http://example.com/v2/keys/_etcd/registry/${UUID}
```
If registered members are still not enough, it will wait for left members to appear.
If the number of registered members is bigger than the expected size N, it treats the first N registered members as the member list for the cluster. If the member itself is in the member list, the discovery procedure succeeds and it fetches all peers through the member list. If it is not in the member list, the discovery procedure finishes with the failure that the cluster has been full.
In etcd implementation, the member may check the cluster status even before registering itself. So it could fail quickly if the cluster has been full.
### Waiting for All Members
The wait process is described in detail in the [etcd API documentation][api].
```
curl -X GET http://example.com/v2/keys/_etcd/registry/${UUID}?wait=true&waitIndex=${current_etcd_index}
```
It keeps waiting until finding all members.
## Public Discovery Service
CoreOS Inc. hosts a public discovery service at https://discovery.etcd.io/ , which provides some nice features for ease of use.
### Mask Key Prefix
Public discovery service will redirect `https://discovery.etcd.io/${UUID}` to etcd cluster behind for the key at `/v2/keys/_etcd/registry`. It masks register key prefix for short and readable discovery url.
### Get new token
```
GET /new
Sent query:
size=${cluster_size}
Possible status codes:
200 OK
400 Bad Request
200 Body:
generated discovery url
```
The generation process in the service follows the steps from [Creating a New Discovery Token][new-discovery-token] to [Specifying the Expected Cluster Size][expected-cluster-size].
### Check Discovery Status
```
GET /${UUID}
```
You can check the status for this discovery token, including the machines that have been registered, by requesting the value of the UUID.
### Open-source repository
The repository is located at https://github.com/coreos/discovery.etcd.io. You could use it to build your own public discovery service.
[api]: ../v2/api.md#waiting-for-a-change
[cluster-size]: ../v2/admin_guide.md#optimal-cluster-size
[expected-cluster-size]: #specifying-the-expected-cluster-size
[new-discovery-token]: #creating-a-new-discovery-token

View File

@ -0,0 +1,29 @@
# Logging Conventions
etcd uses the [capnslog][capnslog] library for logging application output categorized into *levels*. A log message's level is determined according to these conventions:
* Error: Data has been lost, a request has failed for a bad reason, or a required resource has been lost
* Examples:
* A failure to allocate disk space for WAL
* Warning: (Hopefully) Temporary conditions that may cause errors, but may work fine. A replica disappearing (that may reconnect) is a warning.
* Examples:
* Failure to send raft message to a remote peer
* Failure to receive heartbeat message within the configured election timeout
* Notice: Normal, but important (uncommon) log information.
* Examples:
* Add a new node into the cluster
* Add a new user into auth subsystem
* Info: Normal, working log information, everything is fine, but helpful notices for auditing or common operations.
* Examples:
* Startup configuration
* Start to do snapshot
* Debug: Everything is still fine, but even common operations may be logged, and less helpful but more quantity of notices.
* Examples:
* Send a normal message to a remote peer
* Write a log entry to disk
[capnslog]: [https://github.com/coreos/pkg/tree/master/capnslog]

View File

@ -70,6 +70,9 @@ for i in etcd-*{.zip,.tar.gz}; do gpg2 --default-key $SUBKEYID --output ${i}.asc
for i in etcd-*{.zip,.tar.gz}; do gpg2 --verify ${i}.asc ${i}; done
```
The public key for GPG signing can be found at [CoreOS Application Signing Key](https://coreos.com/security/app-signing-key)
## Publish Release Page in GitHub
- Set release title as the version name.

56
Documentation/dl_build.md Normal file
View File

@ -0,0 +1,56 @@
# Download and Build
## System Requirements
TODO
## Download the Pre-built Binary
The easiest way to get etcd is to use one of the pre-built release binaries which are available for OSX, Linux, Windows, appc, and Docker. Instructions for using these binaries are on the [GitHub releases page][github-release].
## Build the Latest Version
For those wanting to try the very latest version, you can build the latest version of etcd from the `master` branch.
[Go](https://golang.org/) version 1.5+ is required to build the latest version of etcd.
Here are the commands to build an etcd binary from the `master` branch:
```
# go is required
$ go version
go version go1.6 darwin/amd64
# GOPATH should be set correctly
$ echo $GOPATH
/Users/example/go
$ mkdir -p $GOPATH/src/github.com/coreos
$ cd $GOPATH/src/github.com/coreos
$ git clone github.com:coreos/etcd.git
$ cd etcd
$ ./build
$ ./bin/etcd
...
```
## Test your Installation
Check the etcd binary is built correctly by starting etcd and setting a key.
Start etcd:
```
$ ./bin/etcd
```
Set a key:
```
$ ETCDCTL_API=3 ./bin/etcdctl put foo bar
OK
```
If OK is printed, then etcd is working!
[github-release]: https://github.com/coreos/etcd/releases/
[go]: https://golang.org/doc/install

52
Documentation/docs.md Normal file
View File

@ -0,0 +1,52 @@
# Documentation
etcd is a distributed key-value store designed to reliably and quickly preserve and provide access to critical data. It enables reliable distributed coordination through distributed locking, leader elections, and write barriers. An etcd cluster is intended for high availability and permanent data storage and retrieval.
## Getting started
New etcd users and developers should get started by [downloading and building][download_build] etcd.
## Developing with etcd
The easiest way to get started using etcd as a distributed key-value store for your applications is to [set up a local cluster][local_cluster].
- [Setting up local clusters][local_cluster]
- [Interacting with etcd][interacting]
- [API references][api_ref]
## Operating etcd clusters
Administrators who need to create reliable and scalable key-value stores for the developers they support should begin with a [cluster on multiple machines][clustering].
- [Setting up clusters][clustering]
- [Configuration][conf]
- [Security][security]
- Monitoring
- [Maintenance][maintenance]
- [Disaster recovery][recovery]
- [Performance][performance]
## Learning
To learn more about the concepts and internals behind etcd, read the following pages:
- Why etcd
- Concepts
- Internals
- [Glossary][glossary]
## Upgrading and compatibility
## Troubleshooting
[api_ref]: dev-guide/api_reference_v3.md
[clustering]: op-guide/clustering.md
[conf]: op-guide/configuration.md
[download_build]: dl_build.md
[glossary]: learning/glossary.md
[interacting]: dev-guide/interacting_v3.md
[local_cluster]: dev-guide/local_cluster.md
[performance]: op-guide/performance.md
[recovery]: op-guide/recovery.md
[maintenance]: op-guide/maintenance.md
[security]: op-guide/security.md

View File

@ -27,6 +27,11 @@
- [diwakergupta/jetcd](https://github.com/diwakergupta/jetcd) - Supports v2
- [jurmous/etcd4j](https://github.com/jurmous/etcd4j) - Supports v2, Async/Sync, waits and SSL
- [AdoHe/etcd4j](http://github.com/AdoHe/etcd4j) - Supports v2 (enhance for real production cluster)
- [cdancy/etcd-rest](https://github.com/cdancy/etcd-rest) - Uses jclouds to provide a complete implementation of v2 API.
**Scala libraries**
- [maciej/etcd-client](https://github.com/maciej/etcd-client) - Supports v2. Akka HTTP-based fully async client
**Python libraries**

View File

@ -1,86 +1,94 @@
# Metrics
**NOTE: The metrics feature is considered experimental. We may add/change/remove metrics without warning in future releases.**
etcd uses [Prometheus][prometheus] for metrics reporting. The metrics can be used for real-time monitoring and debugging. etcd does not persist its metrics; if a member restarts, the metrics will be reset.
etcd uses [Prometheus][prometheus] for metrics reporting in the server. The metrics can be used for real-time monitoring and debugging.
etcd only stores these data in memory. If a member restarts, metrics will reset.
The simplest way to see the available metrics is to cURL the metrics endpoint `/metrics` of etcd. The format is described [here](http://prometheus.io/docs/instrumenting/exposition_formats/).
The simplest way to see the available metrics is to cURL the metrics endpoint `/metrics`. The format is described [here](http://prometheus.io/docs/instrumenting/exposition_formats/).
Follow the [Prometheus getting started doc][prometheus-getting-started] to spin up a Prometheus server to collect etcd metrics.
The naming of metrics follows the suggested [best practice of Prometheus][prometheus-naming]. A metric name has an `etcd` prefix as its namespace and a subsystem prefix (for example `wal` and `etcdserver`).
The naming of metrics follows the suggested [Prometheus best practices][prometheus-naming]. A metric name has an `etcd` or `etcd_debugging` prefix as its namespace and a subsystem prefix (for example `wal` and `etcdserver`).
etcd now exposes the following metrics:
## etcd namespace metrics
## etcdserver
The metrics under the `etcd` prefix are for monitoring and alerting. They are stable high level metrics. If there is any change of these metrics, it will be included in release notes.
| Name | Description | Type |
|-----------------------------------------|--------------------------------------------------|-----------|
| file_descriptors_used_total | The total number of file descriptors used | Gauge |
| proposal_durations_seconds | The latency distributions of committing proposal | Histogram |
| pending_proposal_total | The total number of pending proposals | Gauge |
| proposal_failed_total | The total number of failed proposals | Counter |
Metrics that are etcd2 related are documented [v2 metrics guide][v2-http-metrics].
High file descriptors (`file_descriptors_used_total`) usage (near the file descriptors limitation of the process) indicates a potential out of file descriptors issue. That might cause etcd fails to create new WAL files and panics.
### server
[Proposal][glossary-proposal] durations (`proposal_durations_seconds`) provides a histogram about the proposal commit latency. Latency can be introduced into this process by network and disk IO.
These metrics describe the status of the etcd server. In order to detect outages or problems for troubleshooting, the server metrics of every production etcd cluster should be closely monitored.
Pending proposal (`pending_proposal_total`) gives you an idea about how many proposal are in the queue and waiting for commit. An increasing pending number indicates a high client load or an unstable cluster.
All these metrics are prefixed with `etcd_server_`
Failed proposals (`proposal_failed_total`) are normally related to two issues: temporary failures related to a leader election or longer duration downtime caused by a loss of quorum in the cluster.
| Name | Description | Type |
|---------------------------|-----------------------------------|---------|
| leader_changes_seen_total | The number of leader changes seen | Counter |
## wal
`leader_changes_seen_total` counts the number of leader changes the member has seen since its start. Rapid leadership changes impact the performance of etcd significantly. It also signals that the leader is unstable, perhaps due to network connectivity issues or excessive load hitting the etcd cluster.
| Name | Description | Type |
|------------------------------------|--------------------------------------------------|-----------|
| fsync_durations_seconds | The latency distributions of fsync called by wal | Histogram |
| last_index_saved | The index of the last entry saved by wal | Gauge |
### gRPC requests
Abnormally high fsync duration (`fsync_durations_seconds`) indicates disk issues and might cause the cluster to be unstable.
These metrics describe the requests served by a specific etcd member: total received requests, total failed requests, and processing latency. They are useful for tracking user-generated traffic hitting the etcd cluster.
## http requests
These metrics describe the serving of requests (non-watch events) served by etcd members in non-proxy mode: total
incoming requests, request failures and processing latency (inc. raft rounds for storage). They are useful for tracking
user-generated traffic hitting the etcd cluster .
All these metrics are prefixed with `etcd_http_`
All these metrics are prefixed with `etcd_grpc_`
| Name | Description | Type |
|--------------------------------|-----------------------------------------------------------------------------------------|--------------------|
| received_total | Total number of events after parsing and auth. | Counter(method) |
| failed_total | Total number of failed events.   | Counter(method,error) |
| successful_duration_second | Bucketed handling times of the requests, including raft rounds for writes. | Histogram(method) |
|--------------------------------|-------------------------------------------------------------------------------------|------------------------|
| requests_total | Total number of received requests | Counter(method) |
| requests_failed_total | Total number of failed requests.   | Counter(method,error) |
| unary_requests_duration_seconds | Bucketed handling duration of the requests. | Histogram(method) |
Example Prometheus queries that may be useful from these metrics (across all etcd members):
* `sum(rate(etcd_http_failed_total{job="etcd"}[1m]) by (method) / sum(rate(etcd_http_events_received_total{job="etcd"})[1m]) by (method)`
* `sum(rate(etcd_grpc_requests_failed_total{job="etcd"}[1m]) by (grpc_method) / sum(rate(etcd_grpc_total{job="etcd"})[1m]) by (grpc_method)`
Shows the fraction of events that failed by HTTP method across all members, across a time window of `1m`.
Shows the fraction of events that failed by gRPC method across all members, across a time window of `1m`.
* `sum(rate(etcd_http_received_total{job="etcd",method="GET})[1m]) by (method)`
`sum(rate(etcd_http_received_total{job="etcd",method~="GET})[1m]) by (method)`
* `sum(rate(etcd_grpc_requests_total{job="etcd",grpc_method="PUT"})[1m]) by (grpc_method)`
Shows the rate of successful readonly/write queries across all servers, across a time window of `1m`.
Shows the rate of PUT requests across all members, across a time window of `1m`.
* `histogram_quantile(0.9, sum(increase(etcd_http_successful_processing_seconds{job="etcd",method="GET"}[5m]) ) by (le))`
`histogram_quantile(0.9, sum(increase(etcd_http_successful_processing_seconds{job="etcd",method!="GET"}[5m]) ) by (le))`
* `histogram_quantile(0.9, sum(rate(etcd_grpc_unary_requests_duration_seconds{job="etcd",grpc_method="PUT"}[5m]) ) by (le))`
Show the 0.90-tile latency (in seconds) of read/write (respectively) event handling across all members, with a window of `5m`.
Show the 0.90-tile latency (in seconds) of PUT request handling across all members, with a window of `5m`.
## snapshot
## etcd_debugging namespace metrics
The metrics under the `etcd_debugging` prefix are for debugging. They are very implementation dependent and volatile. They might be changed or removed without any warning in new etcd releases. Some of the metrics might be moved to the `etcd` prefix when they become more stable.
### etcdserver
| Name | Description | Type |
|-----------------------------------------|--------------------------------------------------|-----------|
| proposal_duration_seconds | The latency distributions of committing proposal | Histogram |
| proposals_pending | The current number of pending proposals | Gauge |
| proposals_failed_total | The total number of failed proposals | Counter |
[Proposal][glossary-proposal] duration (`proposal_duration_seconds`) provides a proposal commit latency histogram. The reported latency reflects network and disk IO delays in etcd.
Proposals pending (`proposals_pending`) indicates how many proposals are queued for commit. Rising pending proposals suggests there is a high client load or the cluster is unstable.
Failed proposals (`proposals_failed_total`) are normally related to two issues: temporary failures related to a leader election or longer duration downtime caused by a loss of quorum in the cluster.
### wal
| Name | Description | Type |
|------------------------------------|--------------------------------------------------|-----------|
| fsync_duration_seconds | The latency distributions of fsync called by wal | Histogram |
| last_index_saved | The index of the last entry saved by wal | Gauge |
Abnormally high fsync duration (`fsync_duration_seconds`) indicates disk issues and might cause the cluster to be unstable.
### snapshot
| Name | Description | Type |
|--------------------------------------------|------------------------------------------------------------|-----------|
| snapshot_save_total_durations_seconds | The total latency distributions of save called by snapshot | Histogram |
| snapshot_save_total_duration_seconds | The total latency distributions of save called by snapshot | Histogram |
Abnormally high snapshot duration (`snapshot_save_total_durations_seconds`) indicates disk issues and might cause the cluster to be unstable.
Abnormally high snapshot duration (`snapshot_save_total_duration_seconds`) indicates disk issues and might cause the cluster to be unstable.
## rafthttp
### rafthttp
| Name | Description | Type | Labels |
|-----------------------------------|--------------------------------------------|--------------|--------------------------------|
@ -94,41 +102,23 @@ An increase in message failures (`message_sent_failed_total`) indicates more sev
Label `sendingType` is the connection type to send messages. `message`, `msgapp` and `msgappv2` use HTTP streaming, while `pipeline` does HTTP request for each message.
Label `msgType` is the type of raft message. `MsgApp` is log replication message; `MsgSnap` is snapshot install message; `MsgProp` is proposal forward message; the others are used to maintain raft internal status. If you have a large snapshot, you would expect a long msgSnap sending latency. For other types of messages, you would expect low latency, which is comparable to your ping latency if you have enough network bandwidth.
Label `msgType` is the type of raft message. `MsgApp` is log replication messages; `MsgSnap` is snapshot install messages; `MsgProp` is proposal forward messages; the others maintain internal raft status. Given large snapshots, a lengthy msgSnap transmission latency should be expected. For other types of messages, given enough network bandwidth, latencies comparable to ping latency should be expected.
Label `remoteID` is the member ID of the message destination.
## Prometheus supplied metrics
## proxy
The Prometheus client library provides a number of metrics under the `go` and `process` namespaces. There are a few that are particlarly interesting.
etcd members operating in proxy mode do not do store operations. They forward all requests
to cluster instances.
| Name | Description | Type |
|-----------------------------------|--------------------------------------------|--------------|
| process_open_fds | Number of open file descriptors. | Gauge |
| process_max_fds | Maximum number of open file descriptors. | Gauge |
Tracking the rate of requests coming from a proxy allows one to pin down which machine is performing most reads/writes.
Heavy file descriptor (`process_open_fds`) usage (i.e., near the process's file descriptor limit, `process_max_fds`) indicates a potential file descriptor exhaustion issue. If the file descriptors are exhausted, etcd may panic because it cannot create new WAL files.
All these metrics are prefixed with `etcd_proxy_`
| Name | Description | Type |
|---------------------------|-----------------------------------------------------------------------------------------|--------------------|
| requests_total | Total number of requests by this proxy instance. . | Counter(method) |
| handled_total | Total number of fully handled requests, with responses from etcd members. | Counter(method) |
| dropped_total | Total number of dropped requests due to forwarding errors to etcd members.  | Counter(method,error) |
| handling_duration_seconds | Bucketed handling times by HTTP method, including round trip to member instances. | Histogram(method) |
Example Prometheus queries that may be useful from these metrics (across all etcd servers):
* `sum(rate(etcd_proxy_handled_total{job="etcd"}[1m])) by (method)`
Rate of requests (by HTTP method) handled by all proxies, across a window of `1m`.
* `histogram_quantile(0.9, sum(increase(etcd_proxy_events_handling_time_seconds_bucket{job="etcd",method="GET"}[5m])) by (le))`
`histogram_quantile(0.9, sum(increase(etcd_proxy_events_handling_time_seconds_bucket{job="etcd",method!="GET"}[5m])) by (le))`
Show the 0.90-tile latency (in seconds) of handling of user requests across all proxy machines, with a window of `5m`.
* `sum(rate(etcd_proxy_dropped_total{job="etcd"}[1m])) by (proxying_error)`
Number of failed request on the proxy. This should be 0, spikes here indicate connectivity issues to etcd cluster.
[glossary-proposal]: glossary.md#proposal
[glossary-proposal]: learning/glossary.md#proposal
[prometheus]: http://prometheus.io/
[prometheus-getting-started](http://prometheus.io/docs/introduction/getting_started/)
[prometheus-getting-started]: http://prometheus.io/docs/introduction/getting_started/
[prometheus-naming]: http://prometheus.io/docs/practices/naming/
[v2-http-metrics]: v2/metrics.md#http-requests

View File

@ -0,0 +1,380 @@
# Clustering Guide
## Overview
Starting an etcd cluster statically requires that each member knows another in the cluster. In a number of cases, you might not know the IPs of your cluster members ahead of time. In these cases, you can bootstrap an etcd cluster with the help of a discovery service.
Once an etcd cluster is up and running, adding or removing members is done via [runtime reconfiguration][runtime-conf]. To better understand the design behind runtime reconfiguration, we suggest you read [the runtime configuration design document][runtime-reconf-design].
This guide will cover the following mechanisms for bootstrapping an etcd cluster:
* [Static](#static)
* [etcd Discovery](#etcd-discovery)
* [DNS Discovery](#dns-discovery)
Each of the bootstrapping mechanisms will be used to create a three machine etcd cluster with the following details:
|Name|Address|Hostname|
|------|---------|------------------|
|infra0|10.0.1.10|infra0.example.com|
|infra1|10.0.1.11|infra1.example.com|
|infra2|10.0.1.12|infra2.example.com|
## Static
As we know the cluster members, their addresses and the size of the cluster before starting, we can use an offline bootstrap configuration by setting the `initial-cluster` flag. Each machine will get either the following command line or environment variables:
```
ETCD_INITIAL_CLUSTER="infra0=http://10.0.1.10:2380,infra1=http://10.0.1.11:2380,infra2=http://10.0.1.12:2380"
ETCD_INITIAL_CLUSTER_STATE=new
```
```
--initial-cluster infra0=http://10.0.1.10:2380,infra1=http://10.0.1.11:2380,infra2=http://10.0.1.12:2380 \
--initial-cluster-state new
```
Note that the URLs specified in `initial-cluster` are the _advertised peer URLs_, i.e. they should match the value of `initial-advertise-peer-urls` on the respective nodes.
If you are spinning up multiple clusters (or creating and destroying a single cluster) with same configuration for testing purpose, it is highly recommended that you specify a unique `initial-cluster-token` for the different clusters. By doing this, etcd can generate unique cluster IDs and member IDs for the clusters even if they otherwise have the exact same configuration. This can protect you from cross-cluster-interaction, which might corrupt your clusters.
etcd listens on [`listen-client-urls`][conf-listen-client] to accept client traffic. etcd member advertises the URLs specified in [`advertise-client-urls`][conf-adv-client] to other members, proxies, clients. Please make sure the `advertise-client-urls` are reachable from intended clients. A common mistake is setting `advertise-client-urls` to localhost or leave it as default when you want the remote clients to reach etcd.
On each machine you would start etcd with these flags:
```
$ etcd --name infra0 --initial-advertise-peer-urls http://10.0.1.10:2380 \
--listen-peer-urls http://10.0.1.10:2380 \
--listen-client-urls http://10.0.1.10:2379,http://127.0.0.1:2379 \
--advertise-client-urls http://10.0.1.10:2379 \
--initial-cluster-token etcd-cluster-1 \
--initial-cluster infra0=http://10.0.1.10:2380,infra1=http://10.0.1.11:2380,infra2=http://10.0.1.12:2380 \
--initial-cluster-state new
```
```
$ etcd --name infra1 --initial-advertise-peer-urls http://10.0.1.11:2380 \
--listen-peer-urls http://10.0.1.11:2380 \
--listen-client-urls http://10.0.1.11:2379,http://127.0.0.1:2379 \
--advertise-client-urls http://10.0.1.11:2379 \
--initial-cluster-token etcd-cluster-1 \
--initial-cluster infra0=http://10.0.1.10:2380,infra1=http://10.0.1.11:2380,infra2=http://10.0.1.12:2380 \
--initial-cluster-state new
```
```
$ etcd --name infra2 --initial-advertise-peer-urls http://10.0.1.12:2380 \
--listen-peer-urls http://10.0.1.12:2380 \
--listen-client-urls http://10.0.1.12:2379,http://127.0.0.1:2379 \
--advertise-client-urls http://10.0.1.12:2379 \
--initial-cluster-token etcd-cluster-1 \
--initial-cluster infra0=http://10.0.1.10:2380,infra1=http://10.0.1.11:2380,infra2=http://10.0.1.12:2380 \
--initial-cluster-state new
```
The command line parameters starting with `--initial-cluster` will be ignored on subsequent runs of etcd. You are free to remove the environment variables or command line flags after the initial bootstrap process. If you need to make changes to the configuration later (for example, adding or removing members to/from the cluster), see the [runtime configuration][runtime-conf] guide.
### Error Cases
In the following example, we have not included our new host in the list of enumerated nodes. If this is a new cluster, the node _must_ be added to the list of initial cluster members.
```
$ etcd --name infra1 --initial-advertise-peer-urls http://10.0.1.11:2380 \
--listen-peer-urls https://10.0.1.11:2380 \
--listen-client-urls http://10.0.1.11:2379,http://127.0.0.1:2379 \
--advertise-client-urls http://10.0.1.11:2379 \
--initial-cluster infra0=http://10.0.1.10:2380 \
--initial-cluster-state new
etcd: infra1 not listed in the initial cluster config
exit 1
```
In this example, we are attempting to map a node (infra0) on a different address (127.0.0.1:2380) than its enumerated address in the cluster list (10.0.1.10:2380). If this node is to listen on multiple addresses, all addresses _must_ be reflected in the "initial-cluster" configuration directive.
```
$ etcd --name infra0 --initial-advertise-peer-urls http://127.0.0.1:2380 \
--listen-peer-urls http://10.0.1.10:2380 \
--listen-client-urls http://10.0.1.10:2379,http://127.0.0.1:2379 \
--advertise-client-urls http://10.0.1.10:2379 \
--initial-cluster infra0=http://10.0.1.10:2380,infra1=http://10.0.1.11:2380,infra2=http://10.0.1.12:2380 \
--initial-cluster-state=new
etcd: error setting up initial cluster: infra0 has different advertised URLs in the cluster and advertised peer URLs list
exit 1
```
If you configure a peer with a different set of configuration and attempt to join this cluster you will get a cluster ID mismatch and etcd will exit.
```
$ etcd --name infra3 --initial-advertise-peer-urls http://10.0.1.13:2380 \
--listen-peer-urls http://10.0.1.13:2380 \
--listen-client-urls http://10.0.1.13:2379,http://127.0.0.1:2379 \
--advertise-client-urls http://10.0.1.13:2379 \
--initial-cluster infra0=http://10.0.1.10:2380,infra1=http://10.0.1.11:2380,infra3=http://10.0.1.13:2380 \
--initial-cluster-state=new
etcd: conflicting cluster ID to the target cluster (c6ab534d07e8fcc4 != bc25ea2a74fb18b0). Exiting.
exit 1
```
## Discovery
In a number of cases, you might not know the IPs of your cluster peers ahead of time. This is common when utilizing cloud providers or when your network uses DHCP. In these cases, rather than specifying a static configuration, you can use an existing etcd cluster to bootstrap a new one. We call this process "discovery".
There two methods that can be used for discovery:
* etcd discovery service
* DNS SRV records
### etcd Discovery
To better understand the design about discovery service protocol, we suggest you read [this][discovery-proto].
#### Lifetime of a Discovery URL
A discovery URL identifies a unique etcd cluster. Instead of reusing a discovery URL, you should always create discovery URLs for new clusters.
Moreover, discovery URLs should ONLY be used for the initial bootstrapping of a cluster. To change cluster membership after the cluster is already running, see the [runtime reconfiguration][runtime-conf] guide.
#### Custom etcd Discovery Service
Discovery uses an existing cluster to bootstrap itself. If you are using your own etcd cluster you can create a URL like so:
```
$ curl -X PUT https://myetcd.local/v2/keys/discovery/6c007a14875d53d9bf0ef5a6fc0257c817f0fb83/_config/size -d value=3
```
By setting the size key to the URL, you create a discovery URL with an expected cluster size of 3.
The URL you will use in this case will be `https://myetcd.local/v2/keys/discovery/6c007a14875d53d9bf0ef5a6fc0257c817f0fb83` and the etcd members will use the `https://myetcd.local/v2/keys/discovery/6c007a14875d53d9bf0ef5a6fc0257c817f0fb83` directory for registration as they start.
**Each member must have a different name flag specified. `Hostname` or `machine-id` can be a good choice. Or discovery will fail due to duplicated name.**
Now we start etcd with those relevant flags for each member:
```
$ etcd --name infra0 --initial-advertise-peer-urls http://10.0.1.10:2380 \
--listen-peer-urls http://10.0.1.10:2380 \
--listen-client-urls http://10.0.1.10:2379,http://127.0.0.1:2379 \
--advertise-client-urls http://10.0.1.10:2379 \
--discovery https://myetcd.local/v2/keys/discovery/6c007a14875d53d9bf0ef5a6fc0257c817f0fb83
```
```
$ etcd --name infra1 --initial-advertise-peer-urls http://10.0.1.11:2380 \
--listen-peer-urls http://10.0.1.11:2380 \
--listen-client-urls http://10.0.1.11:2379,http://127.0.0.1:2379 \
--advertise-client-urls http://10.0.1.11:2379 \
--discovery https://myetcd.local/v2/keys/discovery/6c007a14875d53d9bf0ef5a6fc0257c817f0fb83
```
```
$ etcd --name infra2 --initial-advertise-peer-urls http://10.0.1.12:2380 \
--listen-peer-urls http://10.0.1.12:2380 \
--listen-client-urls http://10.0.1.12:2379,http://127.0.0.1:2379 \
--advertise-client-urls http://10.0.1.12:2379 \
--discovery https://myetcd.local/v2/keys/discovery/6c007a14875d53d9bf0ef5a6fc0257c817f0fb83
```
This will cause each member to register itself with the custom etcd discovery service and begin the cluster once all machines have been registered.
#### Public etcd Discovery Service
If you do not have access to an existing cluster, you can use the public discovery service hosted at `discovery.etcd.io`. You can create a private discovery URL using the "new" endpoint like so:
```
$ curl https://discovery.etcd.io/new?size=3
https://discovery.etcd.io/3e86b59982e49066c5d813af1c2e2579cbf573de
```
This will create the cluster with an initial expected size of 3 members. If you do not specify a size, a default of 3 will be used.
```
ETCD_DISCOVERY=https://discovery.etcd.io/3e86b59982e49066c5d813af1c2e2579cbf573de
```
```
--discovery https://discovery.etcd.io/3e86b59982e49066c5d813af1c2e2579cbf573de
```
**Each member must have a different name flag specified. `Hostname` or `machine-id` can be a good choice. Or discovery will fail due to duplicated name.**
Now we start etcd with those relevant flags for each member:
```
$ etcd --name infra0 --initial-advertise-peer-urls http://10.0.1.10:2380 \
--listen-peer-urls http://10.0.1.10:2380 \
--listen-client-urls http://10.0.1.10:2379,http://127.0.0.1:2379 \
--advertise-client-urls http://10.0.1.10:2379 \
--discovery https://discovery.etcd.io/3e86b59982e49066c5d813af1c2e2579cbf573de
```
```
$ etcd --name infra1 --initial-advertise-peer-urls http://10.0.1.11:2380 \
--listen-peer-urls http://10.0.1.11:2380 \
--listen-client-urls http://10.0.1.11:2379,http://127.0.0.1:2379 \
--advertise-client-urls http://10.0.1.11:2379 \
--discovery https://discovery.etcd.io/3e86b59982e49066c5d813af1c2e2579cbf573de
```
```
$ etcd --name infra2 --initial-advertise-peer-urls http://10.0.1.12:2380 \
--listen-peer-urls http://10.0.1.12:2380 \
--listen-client-urls http://10.0.1.12:2379,http://127.0.0.1:2379 \
--advertise-client-urls http://10.0.1.12:2379 \
--discovery https://discovery.etcd.io/3e86b59982e49066c5d813af1c2e2579cbf573de
```
This will cause each member to register itself with the discovery service and begin the cluster once all members have been registered.
You can use the environment variable `ETCD_DISCOVERY_PROXY` to cause etcd to use an HTTP proxy to connect to the discovery service.
#### Error and Warning Cases
##### Discovery Server Errors
```
$ etcd --name infra0 --initial-advertise-peer-urls http://10.0.1.10:2380 \
--listen-peer-urls http://10.0.1.10:2380 \
--listen-client-urls http://10.0.1.10:2379,http://127.0.0.1:2379 \
--advertise-client-urls http://10.0.1.10:2379 \
--discovery https://discovery.etcd.io/3e86b59982e49066c5d813af1c2e2579cbf573de
etcd: error: the cluster doesnt have a size configuration value in https://discovery.etcd.io/3e86b59982e49066c5d813af1c2e2579cbf573de/_config
exit 1
```
##### Warnings
This is a harmless warning notifying you that the discovery URL will be
ignored on this machine.
```
$ etcd --name infra0 --initial-advertise-peer-urls http://10.0.1.10:2380 \
--listen-peer-urls http://10.0.1.10:2380 \
--listen-client-urls http://10.0.1.10:2379,http://127.0.0.1:2379 \
--advertise-client-urls http://10.0.1.10:2379 \
--discovery https://discovery.etcd.io/3e86b59982e49066c5d813af1c2e2579cbf573de
etcdserver: discovery token ignored since a cluster has already been initialized. Valid log found at /var/lib/etcd
```
### DNS Discovery
DNS [SRV records][rfc-srv] can be used as a discovery mechanism.
The `-discovery-srv` flag can be used to set the DNS domain name where the discovery SRV records can be found.
The following DNS SRV records are looked up in the listed order:
* _etcd-server-ssl._tcp.example.com
* _etcd-server._tcp.example.com
If `_etcd-server-ssl._tcp.example.com` is found then etcd will attempt the bootstrapping process over SSL.
To help clients discover the etcd cluster, the following DNS SRV records are looked up in the listed order:
* _etcd-client._tcp.example.com
* _etcd-client-ssl._tcp.example.com
If `_etcd-client-ssl._tcp.example.com` is found, clients will attempt to communicate with the etcd cluster over SSL.
#### Create DNS SRV records
```
$ dig +noall +answer SRV _etcd-server._tcp.example.com
_etcd-server._tcp.example.com. 300 IN SRV 0 0 2380 infra0.example.com.
_etcd-server._tcp.example.com. 300 IN SRV 0 0 2380 infra1.example.com.
_etcd-server._tcp.example.com. 300 IN SRV 0 0 2380 infra2.example.com.
```
```
$ dig +noall +answer SRV _etcd-client._tcp.example.com
_etcd-client._tcp.example.com. 300 IN SRV 0 0 2379 infra0.example.com.
_etcd-client._tcp.example.com. 300 IN SRV 0 0 2379 infra1.example.com.
_etcd-client._tcp.example.com. 300 IN SRV 0 0 2379 infra2.example.com.
```
```
$ dig +noall +answer infra0.example.com infra1.example.com infra2.example.com
infra0.example.com. 300 IN A 10.0.1.10
infra1.example.com. 300 IN A 10.0.1.11
infra2.example.com. 300 IN A 10.0.1.12
```
#### Bootstrap the etcd cluster using DNS
etcd cluster members can listen on domain names or IP address, the bootstrap process will resolve DNS A records.
The resolved address in `--initial-advertise-peer-urls` *must match* one of the resolved addresses in the SRV targets. The etcd member reads the resolved address to find out if it belongs to the cluster defined in the SRV records.
```
$ etcd --name infra0 \
--discovery-srv example.com \
--initial-advertise-peer-urls http://infra0.example.com:2380 \
--initial-cluster-token etcd-cluster-1 \
--initial-cluster-state new \
--advertise-client-urls http://infra0.example.com:2379 \
--listen-client-urls http://infra0.example.com:2379 \
--listen-peer-urls http://infra0.example.com:2380
```
```
$ etcd --name infra1 \
--discovery-srv example.com \
--initial-advertise-peer-urls http://infra1.example.com:2380 \
--initial-cluster-token etcd-cluster-1 \
--initial-cluster-state new \
--advertise-client-urls http://infra1.example.com:2379 \
--listen-client-urls http://infra1.example.com:2379 \
--listen-peer-urls http://infra1.example.com:2380
```
```
$ etcd --name infra2 \
--discovery-srv example.com \
--initial-advertise-peer-urls http://infra2.example.com:2380 \
--initial-cluster-token etcd-cluster-1 \
--initial-cluster-state new \
--advertise-client-urls http://infra2.example.com:2379 \
--listen-client-urls http://infra2.example.com:2379 \
--listen-peer-urls http://infra2.example.com:2380
```
You can also bootstrap the cluster using IP addresses instead of domain names:
```
$ etcd --name infra0 \
--discovery-srv example.com \
--initial-advertise-peer-urls http://10.0.1.10:2380 \
--initial-cluster-token etcd-cluster-1 \
--initial-cluster-state new \
--advertise-client-urls http://10.0.1.10:2379 \
--listen-client-urls http://10.0.1.10:2379 \
--listen-peer-urls http://10.0.1.10:2380
```
```
$ etcd --name infra1 \
--discovery-srv example.com \
--initial-advertise-peer-urls http://10.0.1.11:2380 \
--initial-cluster-token etcd-cluster-1 \
--initial-cluster-state new \
--advertise-client-urls http://10.0.1.11:2379 \
--listen-client-urls http://10.0.1.11:2379 \
--listen-peer-urls http://10.0.1.11:2380
```
```
$ etcd --name infra2 \
--discovery-srv example.com \
--initial-advertise-peer-urls http://10.0.1.12:2380 \
--initial-cluster-token etcd-cluster-1 \
--initial-cluster-state new \
--advertise-client-urls http://10.0.1.12:2379 \
--listen-client-urls http://10.0.1.12:2379 \
--listen-peer-urls http://10.0.1.12:2380
```
### Proxy
When the `--proxy` flag is set, etcd runs in [proxy mode][proxy]. This proxy mode only supports the etcd v2 API; there are no plans to support the v3 API. Instead, for v3 API support, there will be a new proxy with enhanced features following the etcd 3.0 release.
To setup an etcd cluster with proxies of v2 API, please read the the [clustering doc in etcd 2.3 release][clustering_etcd2].
[conf-adv-client]: configuration.md#-advertise-client-urls
[conf-listen-client]: configuration.md#-listen-client-urls
[discovery-proto]: dev-internal/discovery_protocol.md
[rfc-srv]: http://www.ietf.org/rfc/rfc2052.txt
[runtime-conf]: runtime-configuration.md
[runtime-reconf-design]: runtime-reconf-design.md
[proxy]: https://github.com/coreos/etcd/blob/release-2.3/Documentation/proxy.md
[clustering_etcd2]: https://github.com/coreos/etcd/blob/release-2.3/Documentation/clustering.md

View File

@ -0,0 +1,285 @@
# Configuration Flags
etcd is configurable through command-line flags and environment variables. Options set on the command line take precedence over those from the environment.
The format of environment variable for flag `--my-flag` is `ETCD_MY_FLAG`. It applies to all flags.
The [official etcd ports][iana-ports] are 2379 for client requests, and 2380 for peer communication. Some legacy code and documentation still references ports 4001 and 7001, but all new etcd use and discussion should adopt the assigned ports.
To start etcd automatically using custom settings at startup in Linux, using a [systemd][systemd-intro] unit is highly recommended.
[systemd-intro]: http://freedesktop.org/wiki/Software/systemd/
## Member Flags
### --name
+ Human-readable name for this member.
+ default: "default"
+ env variable: ETCD_NAME
+ This value is referenced as this node's own entries listed in the `--initial-cluster` flag (Ex: `default=http://localhost:2380` or `default=http://localhost:2380,default=http://localhost:7001`). This needs to match the key used in the flag if you're using [static bootstrapping][build-cluster]. When using discovery, each member must have a unique name. `Hostname` or `machine-id` can be a good choice.
### --data-dir
+ Path to the data directory.
+ default: "${name}.etcd"
+ env variable: ETCD_DATA_DIR
### --wal-dir
+ Path to the dedicated wal directory. If this flag is set, etcd will write the WAL files to the walDir rather than the dataDir. This allows a dedicated disk to be used, and helps avoid io competition between logging and other IO operations.
+ default: ""
+ env variable: ETCD_WAL_DIR
### --snapshot-count
+ Number of committed transactions to trigger a snapshot to disk.
+ default: "10000"
+ env variable: ETCD_SNAPSHOT_COUNT
### --heartbeat-interval
+ Time (in milliseconds) of a heartbeat interval.
+ default: "100"
+ env variable: ETCD_HEARTBEAT_INTERVAL
### --election-timeout
+ Time (in milliseconds) for an election to timeout. See [Documentation/tuning.md](tuning.md#time-parameters) for details.
+ default: "1000"
+ env variable: ETCD_ELECTION_TIMEOUT
### --listen-peer-urls
+ List of URLs to listen on for peer traffic. This flag tells the etcd to accept incoming requests from its peers on the specified scheme://IP:port combinations. Scheme can be either http or https.If 0.0.0.0 is specified as the IP, etcd listens to the given port on all interfaces. If an IP address is given as well as a port, etcd will listen on the given port and interface. Multiple URLs may be used to specify a number of addresses and ports to listen on. The etcd will respond to requests from any of the listed addresses and ports.
+ default: "http://localhost:2380,http://localhost:7001"
+ env variable: ETCD_LISTEN_PEER_URLS
+ example: "http://10.0.0.1:2380"
+ invalid example: "http://example.com:2380" (domain name is invalid for binding)
### --listen-client-urls
+ List of URLs to listen on for client traffic. This flag tells the etcd to accept incoming requests from the clients on the specified scheme://IP:port combinations. Scheme can be either http or https. If 0.0.0.0 is specified as the IP, etcd listens to the given port on all interfaces. If an IP address is given as well as a port, etcd will listen on the given port and interface. Multiple URLs may be used to specify a number of addresses and ports to listen on. The etcd will respond to requests from any of the listed addresses and ports.
+ default: "http://localhost:2379,http://localhost:4001"
+ env variable: ETCD_LISTEN_CLIENT_URLS
+ example: "http://10.0.0.1:2379"
+ invalid example: "http://example.com:2379" (domain name is invalid for binding)
### --max-snapshots
+ Maximum number of snapshot files to retain (0 is unlimited)
+ default: 5
+ env variable: ETCD_MAX_SNAPSHOTS
+ The default for users on Windows is unlimited, and manual purging down to 5 (or your preference for safety) is recommended.
### --max-wals
+ Maximum number of wal files to retain (0 is unlimited)
+ default: 5
+ env variable: ETCD_MAX_WALS
+ The default for users on Windows is unlimited, and manual purging down to 5 (or your preference for safety) is recommended.
### --cors
+ Comma-separated white list of origins for CORS (cross-origin resource sharing).
+ default: none
+ env variable: ETCD_CORS
## Clustering Flags
`--initial` prefix flags are used in bootstrapping ([static bootstrap][build-cluster], [discovery-service bootstrap][discovery] or [runtime reconfiguration][reconfig]) a new member, and ignored when restarting an existing member.
`--discovery` prefix flags need to be set when using [discovery service][discovery].
### --initial-advertise-peer-urls
+ List of this member's peer URLs to advertise to the rest of the cluster. These addresses are used for communicating etcd data around the cluster. At least one must be routable to all cluster members. These URLs can contain domain names.
+ default: "http://localhost:2380,http://localhost:7001"
+ env variable: ETCD_INITIAL_ADVERTISE_PEER_URLS
+ example: "http://example.com:2380, http://10.0.0.1:2380"
### --initial-cluster
+ Initial cluster configuration for bootstrapping.
+ default: "default=http://localhost:2380,default=http://localhost:7001"
+ env variable: ETCD_INITIAL_CLUSTER
+ The key is the value of the `--name` flag for each node provided. The default uses `default` for the key because this is the default for the `--name` flag.
### --initial-cluster-state
+ Initial cluster state ("new" or "existing"). Set to `new` for all members present during initial static or DNS bootstrapping. If this option is set to `existing`, etcd will attempt to join the existing cluster. If the wrong value is set, etcd will attempt to start but fail safely.
+ default: "new"
+ env variable: ETCD_INITIAL_CLUSTER_STATE
[static bootstrap]: clustering.md#static
### --initial-cluster-token
+ Initial cluster token for the etcd cluster during bootstrap.
+ default: "etcd-cluster"
+ env variable: ETCD_INITIAL_CLUSTER_TOKEN
### --advertise-client-urls
+ List of this member's client URLs to advertise to the rest of the cluster. These URLs can contain domain names.
+ default: "http://localhost:2379,http://localhost:4001"
+ env variable: ETCD_ADVERTISE_CLIENT_URLS
+ example: "http://example.com:2379, http://10.0.0.1:2379"
+ Be careful if you are advertising URLs such as http://localhost:2379 from a cluster member and are using the proxy feature of etcd. This will cause loops, because the proxy will be forwarding requests to itself until its resources (memory, file descriptors) are eventually depleted.
### --discovery
+ Discovery URL used to bootstrap the cluster.
+ default: none
+ env variable: ETCD_DISCOVERY
### --discovery-srv
+ DNS srv domain used to bootstrap the cluster.
+ default: none
+ env variable: ETCD_DISCOVERY_SRV
### --discovery-fallback
+ Expected behavior ("exit" or "proxy") when discovery services fails.
+ default: "proxy"
+ env variable: ETCD_DISCOVERY_FALLBACK
### --discovery-proxy
+ HTTP proxy to use for traffic to discovery service.
+ default: none
+ env variable: ETCD_DISCOVERY_PROXY
### --strict-reconfig-check
+ Reject reconfiguration requests that would cause quorum loss.
+ default: false
+ env variable: ETCD_STRICT_RECONFIG_CHECK
## Proxy Flags
`--proxy` prefix flags configures etcd to run in [proxy mode][proxy].
### --proxy
+ Proxy mode setting ("off", "readonly" or "on").
+ default: "off"
+ env variable: ETCD_PROXY
### --proxy-failure-wait
+ Time (in milliseconds) an endpoint will be held in a failed state before being reconsidered for proxied requests.
+ default: 5000
+ env variable: ETCD_PROXY_FAILURE_WAIT
### --proxy-refresh-interval
+ Time (in milliseconds) of the endpoints refresh interval.
+ default: 30000
+ env variable: ETCD_PROXY_REFRESH_INTERVAL
### --proxy-dial-timeout
+ Time (in milliseconds) for a dial to timeout or 0 to disable the timeout
+ default: 1000
+ env variable: ETCD_PROXY_DIAL_TIMEOUT
### --proxy-write-timeout
+ Time (in milliseconds) for a write to timeout or 0 to disable the timeout.
+ default: 5000
+ env variable: ETCD_PROXY_WRITE_TIMEOUT
### --proxy-read-timeout
+ Time (in milliseconds) for a read to timeout or 0 to disable the timeout.
+ Don't change this value if you use watches because they are using long polling requests.
+ default: 0
+ env variable: ETCD_PROXY_READ_TIMEOUT
## Security Flags
The security flags help to [build a secure etcd cluster][security].
### --ca-file [DEPRECATED]
+ Path to the client server TLS CA file. `--ca-file ca.crt` could be replaced by `--trusted-ca-file ca.crt --client-cert-auth` and etcd will perform the same.
+ default: none
+ env variable: ETCD_CA_FILE
### --cert-file
+ Path to the client server TLS cert file.
+ default: none
+ env variable: ETCD_CERT_FILE
### --key-file
+ Path to the client server TLS key file.
+ default: none
+ env variable: ETCD_KEY_FILE
### --client-cert-auth
+ Enable client cert authentication.
+ default: false
+ env variable: ETCD_CLIENT_CERT_AUTH
### --trusted-ca-file
+ Path to the client server TLS trusted CA key file.
+ default: none
+ env variable: ETCD_TRUSTED_CA_FILE
### --auto-tls
+ Client TLS using generated certificates
+ default: false
+ env variable: ETCD_AUTO_TLS
### --peer-ca-file [DEPRECATED]
+ Path to the peer server TLS CA file. `--peer-ca-file ca.crt` could be replaced by `--peer-trusted-ca-file ca.crt --peer-client-cert-auth` and etcd will perform the same.
+ default: none
+ env variable: ETCD_PEER_CA_FILE
### --peer-cert-file
+ Path to the peer server TLS cert file.
+ default: none
+ env variable: ETCD_PEER_CERT_FILE
### --peer-key-file
+ Path to the peer server TLS key file.
+ default: none
+ env variable: ETCD_PEER_KEY_FILE
### --peer-client-cert-auth
+ Enable peer client cert authentication.
+ default: false
+ env variable: ETCD_PEER_CLIENT_CERT_AUTH
### --peer-trusted-ca-file
+ Path to the peer server TLS trusted CA file.
+ default: none
+ env variable: ETCD_PEER_TRUSTED_CA_FILE
### --peer-auto-tls
+ Peer TLS using generated certificates
+ default: false
+ env variable: ETCD_PEER_AUTO_TLS
## Logging Flags
### --debug
+ Drop the default log level to DEBUG for all subpackages.
+ default: false (INFO for all packages)
+ env variable: ETCD_DEBUG
### --log-package-levels
+ Set individual etcd subpackages to specific log levels. An example being `etcdserver=WARNING,security=DEBUG`
+ default: none (INFO for all packages)
+ env variable: ETCD_LOG_PACKAGE_LEVELS
## Unsafe Flags
Please be CAUTIOUS when using unsafe flags because it will break the guarantees given by the consensus protocol.
For example, it may panic if other members in the cluster are still alive.
Follow the instructions when using these flags.
### --force-new-cluster
+ Force to create a new one-member cluster. It commits configuration changes forcing to remove all existing members in the cluster and add itself. It needs to be set to [restore a backup][restore].
+ default: false
+ env variable: ETCD_FORCE_NEW_CLUSTER
## Miscellaneous Flags
### --version
+ Print the version and exit.
+ default: false
## Profiling flags
### --enable-pprof
+ Enable runtime profiling data via HTTP server. Address is at client URL + "/debug/pprof"
+ default: false
[build-cluster]: clustering.md#static
[reconfig]: runtime-configuration.md
[discovery]: clustering.md#discovery
[iana-ports]: https://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.xhtml?search=etcd
[proxy]: ../v2/proxy.md
[reconfig]: runtime-configuration.md
[restore]: ../v2/admin_guide.md#restoring-a-backup
[rfc-v3]: rfc/v3api.md
[security]: security.md
[systemd-intro]: http://freedesktop.org/wiki/Software/systemd/
[tuning]: tuning.md#time-parameters

View File

@ -0,0 +1,115 @@
# Maintenance
## Overview
An etcd cluster needs periodic maintenance to remain reliable. Depending on an etcd application's needs, this maintenance can usually be automated and performed without downtime or significantly degraded performance.
All etcd maintenance manages storage resources consumed by the etcd keyspace. Failure to adequately control the keyspace size is guarded by storage space quotas; if an etcd member runs low on space, a quota will trigger cluster-wide alarms which will put the system into a limited-operation maintenance mode. To avoid running out of space for writes to the keyspace, the etcd keyspace history must be compacted. Storage space itself may be reclaimed by defragmenting etcd members. Finally, periodic snapshot backups of etcd member state makes it possible to recover any unintended logical data loss or corruption caused by operational error.
## History compaction
Since etcd keeps an exact history of its keyspace, this history should be periodically compacted to avoid performance degradation and eventual storage space exhaustion. Compacting the keyspace history drops all information about keys superseded prior to a given keyspace revision. The space used by these keys then becomes available for additional writes to the keyspace.
The keyspace can be compacted automatically with `etcd`'s time windowed history retention policy, or manually with `etcdctl`. The `etcdctl` method provides fine-grained control over the compacting process whereas automatic compacting fits applications that only need key history for some length of time.
`etcd` can be set to automatically compact the keyspace with the `--experimental-auto-compaction` option with a period of hours:
```sh
# keep one hour of history
$ etcd --experimental-auto-compaction-retention=1
```
An `etcdctl` initiated compaction works as follows:
```sh
# compact up to revision 3
$ etcdctl compact 3
```
Revisions prior to the compaction revision become inaccessible:
```sh
$ etcdctl get --rev=2 somekey
Error: rpc error: code = 11 desc = etcdserver: mvcc: required revision has been compacted
```
## Defragmentation
After compacting the keyspace, the backend database may exhibit internal fragmentation. Any internal fragmentation is space that is free to use by the backend but still consumes storage space. The process of defragmentation releases this storage space back to the file system. Defragmentation is issued on a per-member so that cluster-wide latency spikes may be avoided.
Compacting old revisions internally fragments `etcd` by leaving gaps in backend database. Fragmented space is available for use by `etcd` but unavailable to the host filesystem.
To defragment an etcd member, use the `etcdctl defrag` command:
```sh
$ etcdctl defrag
Finished defragmenting etcd member[127.0.0.1:2379]
```
## Space quota
The space quota in `etcd` ensures the cluster operates in a reliable fashion. Without a space quota, `etcd` may suffer from poor performance if the keyspace grows excessively large, or it may simply run out of storage space, leading to unpredictable cluster behavior. If the keyspace's backend database for any member exceeds the space quota, `etcd` raises a cluster-wide alarm that puts the cluster into a maintenance mode which only accepts key reads and deletes. After freeing enough space in the keyspace, the alarm can be disarmed and the cluster will resume normal operation.
By default, `etcd` sets a conservative space quota suitable for most applications, but it may be configured on the command line, in bytes:
```sh
# set a very small 16MB quota
$ etcd --quota-backend-bytes=16777216
```
The space quota can be triggered with a loop:
```sh
# fill keyspace
$ while [ 1 ]; do dd if=/dev/urandom bs=1024 count=1024 | etcdctl put key || break; done
...
Error: rpc error: code = 8 desc = etcdserver: mvcc: database space exceeded
# confirm quota space is exceeded
$ etcdctl endpoint status
+----------------+------------------+-----------+---------+-----------+-----------+------------+
| ENDPOINT | ID | VERSION | DB SIZE | IS LEADER | RAFT TERM | RAFT INDEX |
+----------------+------------------+-----------+---------+-----------+-----------+------------+
| 127.0.0.1:2379 | bf9071f4639c75cc | 2.3.0+git | 18 MB | true | 2 | 3332 |
+----------------+------------------+-----------+---------+-----------+-----------+------------+
# confirm alarm is raised
$ etcdctl alarm list
memberID:13803658152347727308 alarm:NOSPACE
```
Removing excessive keyspace data will put the cluster back within the quota limits so the alarm can be disarmed:
```sh
# get current revision
$ etcdctl --endpoints=:2379 endpoint status
[{"Endpoint":"127.0.0.1:2379","Status":{"header":{"cluster_id":8925027824743593106,"member_id":13803658152347727308,"revision":1516,"raft_term":2},"version":"2.3.0+git","dbSize":17973248,"leader":13803658152347727308,"raftIndex":6359,"raftTerm":2}}]
# compact away all old revisions
$ etdctl compact 1516
compacted revision 1516
# defragment away excessive space
$ etcdctl defrag
Finished defragmenting etcd member[127.0.0.1:2379]
# disarm alarm
$ etcdctl alarm disarm
memberID:13803658152347727308 alarm:NOSPACE
# test puts are allowed again
$ etdctl put newkey 123
OK
```
## Snapshot backup
Snapshotting the `etcd` cluster on a regular basis serves as a durable backup for an etcd keyspace. By taking periodic snapshots of an etcd member's backend database, an `etcd` cluster can be recovered to a point in time with a known good state.
A snapshot is taken with `etcdctl`:
```sh
$ etcdctl snapshot save backup.db
$ etcdctl snapshot status backup.db
+----------+----------+------------+------------+
| HASH | REVISION | TOTAL KEYS | TOTAL SIZE |
+----------+----------+------------+------------+
| fe01cf57 | 10 | 7 | 2.1 MB |
+----------+----------+------------+------------+
```

View File

@ -0,0 +1,13 @@
# Performance
## Understanding performance
etcd provides stable, sustained high performance. Two factors define performance: latency and throughput. Latency is the time taken to complete an operation. Throughput is the total operations completed within some time period. Usually average latency increases as the overall throughput increases when etcd accepts concurrent client requests. In common cloud environments, like a standard `n-4` on Google Compute Engine (GCE) or a comparable machine type on AWS, a three member etcd cluster finishes a request in less than one millisecond under light load, and can complete more than 30,000 requests per second under heavy load.
etcd uses the Raft consensus algorithm to replicate requests among members and reach agreement. Consensus performance, especially commit latency, is limited by two physical constraints: network IO latency and disk IO latency. The minimum time to finish an etcd request is the network Round Trip Time (RTT) between members, plus the time `fdatasync` requires to commit the data to permanant storage. The RTT within a datacenter may be as long as several hundred microseconds. A typical RTT within the United States is around 50ms, and can be as slow as 400ms between continents. The typical fdatasync latency for a spinning disk is about 10ms. For SSDs, the latency is often lower than 1ms. To increase throughput, etcd batches multiple requests together and submits them to Raft. This batching policy lets etcd attain high throughput despite heavy load.
There are other sub-systems which impact the overall performance of etcd. Each serialized etcd request must run through etcds boltdb-backed MVCC storage engine, which usually takes tens of microseconds to finish. Periodically etcd incrementally snapshots its recently applied requests, merging them back with the previous on-disk snapshot. This process may lead to a latency spike. Although this is usually not a problem on SSDs, it may double the observed latency on HDD. Likewise, inflight compactions can impact etcds performance. Fortunately, the impact is often insignificant since the compaction is staggered so it does not compete for resources with regular requests. The RPC system, gRPC, gives etcd a well-defined, extensible API, but it also introduces additional latency, especially for local reads.
## Benchmarks
TODO

View File

@ -0,0 +1,61 @@
## Disaster recovery
etcd is designed to withstand machine failures. An etcd cluster automatically recovers from temporary failures (e.g., machine reboots) and tolerates up to *(N-1)/2* permanent failures for a cluster of N members. When a member permanently fails, whether due to hardware failure or disk corruption, it loses access to the cluster. If the cluster permanently loses more than *(N-1)/2* members then it disastrously fails, irrevocably losing quorum. Once quorum is lost, the cluster cannot reach consensus and therefore cannot continue accepting updates.
To recover from disastrous failure, etcd provides snapshot and restore facilities to recreate the cluster without data loss.
TODO(xiangli): add note to clarify this only recovers for the kv store of etcd3.
### Snapshotting the keyspace
Recovering a cluster first needs a snapshot of the keyspace from an etcd member. A snapshot may either be taken from a live member with the `etcdctl snapshot save` command or by copying the `member/snap/db` file from an etcd data directory. For example, the following command snapshots the keyspace served by `$ENDPOINT` to the file `snapshot.db`:
```sh
$ etcdctl --endpoints $ENDPOINT snapshot save snapshot.db
```
### Restoring a cluster
To restore a cluster, all that is needed is a single snapshot "db" file. A cluster restore with `etcdctl snapshot restore` creates new etcd data directories; all members should restore using the same snapshot. Restoring overwrites some snapshot metadata (specifically, the member ID and cluster ID); the member loses its former identity. This metadata overwrite prevents the new member from inadvertently joining an existing cluster. Therefore in order to start a cluster from a snapshot, the restore must start a new logical cluster.
A restore initializes a new member of a new cluster, with a fresh cluster configuration using `etcd`'s cluster configuration flags, but preserves the contents of the etcd keyspace. Continuing from the previous example, the following creates new etcd data directories (`m1.etcd`, `m2.etcd`, `m3.etcd`) for a three member cluster:
```sh
$ etcdctl snapshot restore snapshot.db \
--name m1 \
--initial-cluster m1=http:/host1:2380,m2=http://host2:2380,m3=http://host3:2380 \
--initial-cluster-token etcd-cluster-1 \
--initial-advertise-peer-urls http://host1:2380
$ etcdctl snapshot restore snapshot.db \
--name m2 \
--initial-cluster m1=http:/host1:2380,m2=http://host2:2380,m3=http://host3:2380 \
--initial-cluster-token etcd-cluster-1 \
--initial-advertise-peer-urls http://host2:2380
$ etcdctl snapshot restore snapshot.db \
--name m3 \
--initial-cluster m1=http:/host1:2380,m2=http://host2:2380,m3=http://host3:2380 \
--initial-cluster-token etcd-cluster-1 \
--initial-advertise-peer-urls http://host3:2380
```
Next, start `etcd` with the new data directories:
```sh
$ etcd \
--name m1 \
--listen-client-urls http://host1:2379 \
--advertise-client-urls http://host1:2379 \
--listen-peer-urls http://host1:2380 &
$ etcd \
--name m2 \
--listen-client-urls http://host2:2379 \
--advertise-client-urls http://host2:2379 \
--listen-peer-urls http://host2:2380 &
$ etcd \
--name m3 \
--listen-client-urls http://host3:2379 \
--advertise-client-urls http://host3:2379 \
--listen-peer-urls http://host3:2380 &
```
Now the restored etcd cluster should be available and serving the keyspace given by the snapshot.

View File

@ -0,0 +1,186 @@
# Runtime Reconfiguration
etcd comes with support for incremental runtime reconfiguration, which allows users to update the membership of the cluster at run time.
Reconfiguration requests can only be processed when the majority of the cluster members are functioning. It is **highly recommended** to always have a cluster size greater than two in production. It is unsafe to remove a member from a two member cluster. The majority of a two member cluster is also two. If there is a failure during the removal process, the cluster might not able to make progress and need to [restart from majority failure][majority failure].
To better understand the design behind runtime reconfiguration, we suggest you read [the runtime reconfiguration document][runtime-reconf].
## Reconfiguration Use Cases
Let's walk through some common reasons for reconfiguring a cluster. Most of these just involve combinations of adding or removing a member, which are explained below under [Cluster Reconfiguration Operations][cluster-reconf].
### Cycle or Upgrade Multiple Machines
If you need to move multiple members of your cluster due to planned maintenance (hardware upgrades, network downtime, etc.), it is recommended to modify members one at a time.
It is safe to remove the leader, however there is a brief period of downtime while the election process takes place. If your cluster holds more than 50MB, it is recommended to [migrate the member's data directory][member migration].
### Change the Cluster Size
Increasing the cluster size can enhance [failure tolerance][fault tolerance table] and provide better read performance. Since clients can read from any member, increasing the number of members increases the overall read throughput.
Decreasing the cluster size can improve the write performance of a cluster, with a trade-off of decreased resilience. Writes into the cluster are replicated to a majority of members of the cluster before considered committed. Decreasing the cluster size lowers the majority, and each write is committed more quickly.
### Replace A Failed Machine
If a machine fails due to hardware failure, data directory corruption, or some other fatal situation, it should be replaced as soon as possible. Machines that have failed but haven't been removed adversely affect your quorum and reduce the tolerance for an additional failure.
To replace the machine, follow the instructions for [removing the member][remove member] from the cluster, and then [add a new member][add member] in its place. If your cluster holds more than 50MB, it is recommended to [migrate the failed member's data directory][member migration] if you can still access it.
### Restart Cluster from Majority Failure
If the majority of your cluster is lost or all of your nodes have changed IP addresses, then you need to take manual action in order to recover safely.
The basic steps in the recovery process include [creating a new cluster using the old data][disaster recovery], forcing a single member to act as the leader, and finally using runtime configuration to [add new members][add member] to this new cluster one at a time.
## Cluster Reconfiguration Operations
Now that we have the use cases in mind, let us lay out the operations involved in each.
Before making any change, the simple majority (quorum) of etcd members must be available.
This is essentially the same requirement as for any other write to etcd.
All changes to the cluster are done one at a time:
* To update a single member peerURLs you will make an update operation
* To replace a single member you will make an add then a remove operation
* To increase from 3 to 5 members you will make two add operations
* To decrease from 5 to 3 you will make two remove operations
All of these examples will use the `etcdctl` command line tool that ships with etcd.
If you want to use the members API directly you can find the documentation [here][member-api].
TODO: v3 member API documentation
### Update a Member
#### Update advertise client URLs
If you would like to update the advertise client URLs of a member, you can simply restart
that member with updated client urls flag (`--advertise-client-urls`) or environment variable
(`ETCD_ADVERTISE_CLIENT_URLS`). The restarted member will self publish the updated URLs.
A wrongly updated client URL will not affect the health of the etcd cluster.
#### Update advertise peer URLs
If you would like to update the advertise peer URLs of a member, you have to first update
it explicitly via member command and then restart the member. The additional action is required
since updating peer URLs changes the cluster wide configuration and can affect the health of the etcd cluster.
To update the peer URLs, first, we need to find the target member's ID. You can list all members with `etcdctl`:
```sh
$ etcdctl member list
6e3bd23ae5f1eae0: name=node2 peerURLs=http://localhost:23802 clientURLs=http://127.0.0.1:23792
924e2e83e93f2560: name=node3 peerURLs=http://localhost:23803 clientURLs=http://127.0.0.1:23793
a8266ecf031671f3: name=node1 peerURLs=http://localhost:23801 clientURLs=http://127.0.0.1:23791
```
In this example let's `update` a8266ecf031671f3 member ID and change its peerURLs value to http://10.0.1.10:2380
```sh
$ etcdctl member update a8266ecf031671f3 http://10.0.1.10:2380
Updated member with ID a8266ecf031671f3 in cluster
```
### Remove a Member
Let us say the member ID we want to remove is a8266ecf031671f3.
We then use the `remove` command to perform the removal:
```sh
$ etcdctl member remove a8266ecf031671f3
Removed member a8266ecf031671f3 from cluster
```
The target member will stop itself at this point and print out the removal in the log:
```
etcd: this member has been permanently removed from the cluster. Exiting.
```
It is safe to remove the leader, however the cluster will be inactive while a new leader is elected. This duration is normally the period of election timeout plus the voting process.
### Add a New Member
Adding a member is a two step process:
* Add the new member to the cluster via the [members API][member-api] or the `etcdctl member add` command.
* Start the new member with the new cluster configuration, including a list of the updated members (existing members + the new member).
Using `etcdctl` let's add the new member to the cluster by specifying its [name][conf-name] and [advertised peer URLs][conf-adv-peer]:
```sh
$ etcdctl member add infra3 http://10.0.1.13:2380
added member 9bf1b35fc7761a23 to cluster
ETCD_NAME="infra3"
ETCD_INITIAL_CLUSTER="infra0=http://10.0.1.10:2380,infra1=http://10.0.1.11:2380,infra2=http://10.0.1.12:2380,infra3=http://10.0.1.13:2380"
ETCD_INITIAL_CLUSTER_STATE=existing
```
`etcdctl` has informed the cluster about the new member and printed out the environment variables needed to successfully start it.
Now start the new etcd process with the relevant flags for the new member:
```sh
$ export ETCD_NAME="infra3"
$ export ETCD_INITIAL_CLUSTER="infra0=http://10.0.1.10:2380,infra1=http://10.0.1.11:2380,infra2=http://10.0.1.12:2380,infra3=http://10.0.1.13:2380"
$ export ETCD_INITIAL_CLUSTER_STATE=existing
$ etcd --listen-client-urls http://10.0.1.13:2379 --advertise-client-urls http://10.0.1.13:2379 --listen-peer-urls http://10.0.1.13:2380 --initial-advertise-peer-urls http://10.0.1.13:2380 --data-dir %data_dir%
```
The new member will run as a part of the cluster and immediately begin catching up with the rest of the cluster.
If you are adding multiple members the best practice is to configure a single member at a time and verify it starts correctly before adding more new members.
If you add a new member to a 1-node cluster, the cluster cannot make progress before the new member starts because it needs two members as majority to agree on the consensus. You will only see this behavior between the time `etcdctl member add` informs the cluster about the new member and the new member successfully establishing a connection to the existing one.
#### Error Cases When Adding Members
In the following case we have not included our new host in the list of enumerated nodes.
If this is a new cluster, the node must be added to the list of initial cluster members.
```sh
$ etcd --name infra3 \
--initial-cluster infra0=http://10.0.1.10:2380,infra1=http://10.0.1.11:2380,infra2=http://10.0.1.12:2380 \
--initial-cluster-state existing
etcdserver: assign ids error: the member count is unequal
exit 1
```
In this case we give a different address (10.0.1.14:2380) to the one that we used to join the cluster (10.0.1.13:2380).
```sh
$ etcd --name infra4 \
--initial-cluster infra0=http://10.0.1.10:2380,infra1=http://10.0.1.11:2380,infra2=http://10.0.1.12:2380,infra4=http://10.0.1.14:2380 \
--initial-cluster-state existing
etcdserver: assign ids error: unmatched member while checking PeerURLs
exit 1
```
When we start etcd using the data directory of a removed member, etcd will exit automatically if it connects to any active member in the cluster:
```sh
$ etcd
etcd: this member has been permanently removed from the cluster. Exiting.
exit 1
```
### Strict Reconfiguration Check Mode (`-strict-reconfig-check`)
As described in the above, the best practice of adding new members is to configure a single member at a time and verify it starts correctly before adding more new members. This step by step approach is very important because if newly added members is not configured correctly (for example the peer URLs are incorrect), the cluster can lose quorum. The quorum loss happens since the newly added member are counted in the quorum even if that member is not reachable from other existing members. Also quorum loss might happen if there is a connectivity issue or there are operational issues.
For avoiding this problem, etcd provides an option `-strict-reconfig-check`. If this option is passed to etcd, etcd rejects reconfiguration requests if the number of started members will be less than a quorum of the reconfigured cluster.
It is recommended to enable this option. However, it is disabled by default because of keeping compatibility.
[add member]: #add-a-new-member
[cluster-reconf]: #cluster-reconfiguration-operations
[conf-adv-peer]: configuration.md#-initial-advertise-peer-urls
[conf-name]: configuration.md#-name
[disaster recovery]: recovery.md
[fault tolerance table]: ../v2/admin_guide.md#fault-tolerance-table
[majority failure]: #restart-cluster-from-majority-failure
[member-api]: ../v2/members_api.md
[member migration]: ../v2/admin_guide.md#member-migration
[remove member]: #remove-a-member
[runtime-reconf]: runtime-reconf-design.md

View File

@ -0,0 +1,50 @@
# Design of Runtime Reconfiguration
Runtime reconfiguration is one of the hardest and most error prone features in a distributed system, especially in a consensus based system like etcd.
Read on to learn about the design of etcd's runtime reconfiguration commands and how we tackled these problems.
## Two Phase Config Changes Keep you Safe
In etcd, every runtime reconfiguration has to go through [two phases][add-member] for safety reasons. For example, to add a member you need to first inform cluster of new configuration and then start the new member.
Phase 1 - Inform cluster of new configuration
To add a member into etcd cluster, you need to make an API call to request a new member to be added to the cluster. And this is only way that you can add a new member into an existing cluster. The API call returns when the cluster agrees on the configuration change.
Phase 2 - Start new member
To join the etcd member into the existing cluster, you need to specify the correct `initial-cluster` and set `initial-cluster-state` to `existing`. When the member starts, it will contact the existing cluster first and verify the current cluster configuration matches the expected one specified in `initial-cluster`. When the new member successfully starts, you know your cluster reached the expected configuration.
By splitting the process into two discrete phases users are forced to be explicit regarding cluster membership changes. This actually gives users more flexibility and makes things easier to reason about. For example, if there is an attempt to add a new member with the same ID as an existing member in an etcd cluster, the action will fail immediately during phase one without impacting the running cluster. Similar protection is provided to prevent adding new members by mistake. If a new etcd member attempts to join the cluster before the cluster has accepted the configuration change,, it will not be accepted by the cluster.
Without the explicit workflow around cluster membership etcd would be vulnerable to unexpected cluster membership changes. For example, if etcd is running under an init system such as systemd, etcd would be restarted after being removed via the membership API, and attempt to rejoin the cluster on startup. This cycle would continue every time a member is removed via the API and systemd is set to restart etcd after failing, which is unexpected.
We think runtime reconfiguration should be a low frequent operation. We made the decision to keep it explicit and user-driven to ensure configuration safety and keep your cluster always running smoothly under your control.
## Permanent Loss of Quorum Requires New Cluster
If a cluster permanently loses a majority of its members, a new cluster will need to be started from an old data directory to recover the previous state.
It is entirely possible to force removing the failed members from the existing cluster to recover. However, we decided not to support this method since it bypasses the normal consensus committing phase, which is unsafe. If the member to remove is not actually dead or you force to remove different members through different members in the same cluster, you will end up with diverged cluster with same clusterID. This is very dangerous and hard to debug/fix afterwards.
If you have a correct deployment, the possibility of permanent majority lose is very low. But it is a severe enough problem that worth special care. We strongly suggest you to read the [disaster recovery documentation][disaster-recovery] and prepare for permanent majority lose before you put etcd into production.
## Do Not Use Public Discovery Service For Runtime Reconfiguration
The public discovery service should only be used for bootstrapping a cluster. To join member into an existing cluster, you should use runtime reconfiguration API.
Discovery service is designed for bootstrapping an etcd cluster in the cloud environment, when you do not know the IP addresses of all the members beforehand. After you successfully bootstrap a cluster, the IP addresses of all the members are known. Technically, you should not need the discovery service any more.
It seems that using public discovery service is a convenient way to do runtime reconfiguration, after all discovery service already has all the cluster configuration information. However relying on public discovery service brings troubles:
1. it introduces external dependencies for the entire life-cycle of your cluster, not just bootstrap time. If there is a network issue between your cluster and public discovery service, your cluster will suffer from it.
2. public discovery service must reflect correct runtime configuration of your cluster during it life-cycle. It has to provide security mechanism to avoid bad actions, and it is hard.
3. public discovery service has to keep tens of thousands of cluster configurations. Our public discovery service backend is not ready for that workload.
If you want to have a discovery service that supports runtime reconfiguration, the best choice is to build your private one.
[add-member]: runtime-configuration.md#add-a-new-member
[disaster-recovery]: recovery.md

View File

@ -0,0 +1,193 @@
# Security Model
etcd supports SSL/TLS as well as authentication through client certificates, both for clients to server as well as peer (server to server / cluster) communication.
To get up and running you first need to have a CA certificate and a signed key pair for one member. It is recommended to create and sign a new key pair for every member in a cluster.
For convenience, the [cfssl] tool provides an easy interface to certificate generation, and we provide an example using the tool [here][tls-setup]. You can also examine this [alternative guide to generating self-signed key pairs][tls-guide].
## Basic setup
etcd takes several certificate related configuration options, either through command-line flags or environment variables:
**Client-to-server communication:**
`--cert-file=<path>`: Certificate used for SSL/TLS connections **to** etcd. When this option is set, you can set advertise-client-urls using HTTPS schema.
`--key-file=<path>`: Key for the certificate. Must be unencrypted.
`--client-cert-auth`: When this is set etcd will check all incoming HTTPS requests for a client certificate signed by the trusted CA, requests that don't supply a valid client certificate will fail.
`--trusted-ca-file=<path>`: Trusted certificate authority.
**Peer (server-to-server / cluster) communication:**
The peer options work the same way as the client-to-server options:
`--peer-cert-file=<path>`: Certificate used for SSL/TLS connections between peers. This will be used both for listening on the peer address as well as sending requests to other peers.
`--peer-key-file=<path>`: Key for the certificate. Must be unencrypted.
`--peer-client-cert-auth`: When set, etcd will check all incoming peer requests from the cluster for valid client certificates signed by the supplied CA.
`--peer-trusted-ca-file=<path>`: Trusted certificate authority.
If either a client-to-server or peer certificate is supplied the key must also be set. All of these configuration options are also available through the environment variables, `ETCD_CA_FILE`, `ETCD_PEER_CA_FILE` and so on.
## Example 1: Client-to-server transport security with HTTPS
For this you need your CA certificate (`ca.crt`) and signed key pair (`server.crt`, `server.key`) ready.
Let us configure etcd to provide simple HTTPS transport security step by step:
```sh
$ etcd --name infra0 --data-dir infra0 \
--cert-file=/path/to/server.crt --key-file=/path/to/server.key \
--advertise-client-urls=https://127.0.0.1:2379 --listen-client-urls=https://127.0.0.1:2379
```
This should start up fine and you can now test the configuration by speaking HTTPS to etcd:
```sh
$ curl --cacert /path/to/ca.crt https://127.0.0.1:2379/v2/keys/foo -XPUT -d value=bar -v
```
You should be able to see the handshake succeed. Because we use self-signed certificates with our own certificate authorities you need to provide the CA to curl using the `--cacert` option. Another possibility would be to add your CA certificate to the trusted certificates on your system (usually in `/etc/ssl/certs`).
**OSX 10.9+ Users**: curl 7.30.0 on OSX 10.9+ doesn't understand certificates passed in on the command line.
Instead you must import the dummy ca.crt directly into the keychain or add the `-k` flag to curl to ignore errors.
If you want to test without the `-k` flag run `open ./fixtures/ca/ca.crt` and follow the prompts.
Please remove this certificate after you are done testing!
If you know of a workaround let us know.
## Example 2: Client-to-server authentication with HTTPS client certificates
For now we've given the etcd client the ability to verify the server identity and provide transport security. We can however also use client certificates to prevent unauthorized access to etcd.
The clients will provide their certificates to the server and the server will check whether the cert is signed by the supplied CA and decide whether to serve the request.
You need the same files mentioned in the first example for this, as well as a key pair for the client (`client.crt`, `client.key`) signed by the same certificate authority.
```sh
$ etcd --name infra0 --data-dir infra0 \
--client-cert-auth --trusted-ca-file=/path/to/ca.crt --cert-file=/path/to/server.crt --key-file=/path/to/server.key \
--advertise-client-urls https://127.0.0.1:2379 --listen-client-urls https://127.0.0.1:2379
```
Now try the same request as above to this server:
```sh
$ curl --cacert /path/to/ca.crt https://127.0.0.1:2379/v2/keys/foo -XPUT -d value=bar -v
```
The request should be rejected by the server:
```
...
routines:SSL3_READ_BYTES:sslv3 alert bad certificate
...
```
To make it succeed, we need to give the CA signed client certificate to the server:
```sh
$ curl --cacert /path/to/ca.crt --cert /path/to/client.crt --key /path/to/client.key \
-L https://127.0.0.1:2379/v2/keys/foo -XPUT -d value=bar -v
```
You should be able to see:
```
...
SSLv3, TLS handshake, CERT verify (15):
...
TLS handshake, Finished (20)
```
And also the response from the server:
```json
{
"action": "set",
"node": {
"createdIndex": 12,
"key": "/foo",
"modifiedIndex": 12,
"value": "bar"
}
}
```
## Example 3: Transport security & client certificates in a cluster
etcd supports the same model as above for **peer communication**, that means the communication between etcd members in a cluster.
Assuming we have our `ca.crt` and two members with their own keypairs (`member1.crt` & `member1.key`, `member2.crt` & `member2.key`) signed by this CA, we launch etcd as follows:
```sh
DISCOVERY_URL=... # from https://discovery.etcd.io/new
# member1
$ etcd --name infra1 --data-dir infra1 \
--peer-client-cert-auth --peer-trusted-ca-file=/path/to/ca.crt --peer-cert-file=/path/to/member1.crt --peer-key-file=/path/to/member1.key \
--initial-advertise-peer-urls=https://10.0.1.10:2380 --listen-peer-urls=https://10.0.1.10:2380 \
--discovery ${DISCOVERY_URL}
# member2
$ etcd --name infra2 --data-dir infra2 \
--peer-client-cert-auth --peer-trusted-ca-file=/path/to/ca.crt --peer-cert-file=/path/to/member2.crt --peer-key-file=/path/to/member2.key \
--initial-advertise-peer-urls=https://10.0.1.11:2380 --listen-peer-urls=https://10.0.1.11:2380 \
--discovery ${DISCOVERY_URL}
```
The etcd members will form a cluster and all communication between members in the cluster will be encrypted and authenticated using the client certificates. You will see in the output of etcd that the addresses it connects to use HTTPS.
## Notes For etcd Proxy
etcd proxy terminates the TLS from its client if the connection is secure, and uses proxy's own key/cert specified in `--peer-key-file` and `--peer-cert-file` to communicate with etcd members.
The proxy communicates with etcd members through both the `--advertise-client-urls` and `--advertise-peer-urls` of a given member. It forwards client requests to etcd members advertised client urls, and it syncs the initial cluster configuration through etcd members advertised peer urls.
When client authentication is enabled for an etcd member, the administrator must ensure that the peer certificate specified in the proxy's `--peer-cert-file` option is valid for that authentication. The proxy's peer certificate must also be valid for peer authentication if peer authentication is enabled.
## Frequently Asked Questions
### My cluster is not working with peer tls configuration?
The internal protocol of etcd v2.0.x uses a lot of short-lived HTTP connections.
So, when enabling TLS you may need to increase the heartbeat interval and election timeouts to reduce internal cluster connection churn.
A reasonable place to start are these values: ` --heartbeat-interval 500 --election-timeout 2500`.
These issues are resolved in the etcd v2.1.x series of releases which uses fewer connections.
### I'm seeing a SSLv3 alert handshake failure when using SSL client authentication?
The `crypto/tls` package of `golang` checks the key usage of the certificate public key before using it.
To use the certificate public key to do client auth, we need to add `clientAuth` to `Extended Key Usage` when creating the certificate public key.
Here is how to do it:
Add the following section to your openssl.cnf:
```
[ ssl_client ]
...
extendedKeyUsage = clientAuth
...
```
When creating the cert be sure to reference it in the `-extensions` flag:
```
$ openssl ca -config openssl.cnf -policy policy_anything -extensions ssl_client -out certs/machine.crt -infiles machine.csr
```
### With peer certificate authentication I receive "certificate is valid for 127.0.0.1, not $MY_IP"
Make sure that you sign your certificates with a Subject Name your member's public IP address. The `etcd-ca` tool for example provides an `--ip=` option for its `new-cert` command.
If you need your certificate to be signed for your member's FQDN in its Subject Name then you could use Subject Alternative Names (short IP SANs) to add your IP address. The `etcd-ca` tool provides `--domain=` option for its `new-cert` command, and openssl can make [it][alt-name] too.
[cfssl]: https://github.com/cloudflare/cfssl
[tls-setup]: /hack/tls-setup
[tls-guide]: https://github.com/coreos/docs/blob/master/os/generate-self-signed-certificates.md
[alt-name]: http://wiki.cacert.org/FAQ/subjectAltName

View File

@ -48,4 +48,15 @@ CyCore Systems provides architecture and engineering for computing systems. Thi
Radius Intelligence uses Kubernetes running CoreOS to containerize and scale internal toolsets. Examples include running [JetBrains TeamCity][teamcity] and internal AWS security and cost reporting tools. etcd clusters back these clusters as well as provide some basic environment bootstrapping configuration keys.
## Vonage
- *Application*: system configuration for microservices, scheduling, locks (future - service discovery)
- *Launched*: August 2015
- *Cluster Size*: 2 clusters of 5 members in 2 DCs, n local proxies 1-to-1 with microservice, (ssl and SRV look up)
- *Order of Data Size*: kilobytes
- *Operator*: Vonage [devAdmin][raoofm]
- *Environment*: VMWare, AWS
- *Backups*: Daily snapshots on VMs. Backups done for upgrades.
[teamcity]: https://www.jetbrains.com/teamcity/
[raoofm]:https://github.com/raoofm

View File

@ -208,4 +208,4 @@ WatchResponse {
```
[api-protobuf]: https://github.com/coreos/etcd/blob/master/etcdserver/etcdserverpb/rpc.proto
[kv-protobuf]: https://github.com/coreos/etcd/blob/master/storage/storagepb/kv.proto
[kv-protobuf]: https://github.com/coreos/etcd/blob/master/mvcc/mvccpb/kv.proto

View File

@ -41,7 +41,7 @@ You can override the default values on the command line:
```sh
# Command line arguments:
$ etcd -heartbeat-interval=100 -election-timeout=500
$ etcd --heartbeat-interval=100 --election-timeout=500
# Environment variables:
$ ETCD_HEARTBEAT_INTERVAL=100 ETCD_ELECTION_TIMEOUT=500 etcd
@ -66,7 +66,7 @@ If etcd's memory usage and disk usage are too high, you can lower the snapshot t
```sh
# Command line arguments:
$ etcd -snapshot-count=5000
$ etcd --snapshot-count=5000
# Environment variables:
$ ETCD_SNAPSHOT_COUNT=5000 etcd

154
Documentation/v2/README.md Normal file
View File

@ -0,0 +1,154 @@
# etcd2
[![Go Report Card](https://goreportcard.com/badge/github.com/coreos/etcd)](https://goreportcard.com/report/github.com/coreos/etcd)
[![Build Status](https://travis-ci.org/coreos/etcd.svg?branch=master)](https://travis-ci.org/coreos/etcd)
[![Build Status](https://semaphoreci.com/api/v1/coreos/etcd/branches/master/shields_badge.svg)](https://semaphoreci.com/coreos/etcd)
[![Docker Repository on Quay.io](https://quay.io/repository/coreos/etcd-git/status "Docker Repository on Quay.io")](https://quay.io/repository/coreos/etcd-git)
**Note**: The `master` branch may be in an *unstable or even broken state* during development. Please use [releases][github-release] instead of the `master` branch in order to get stable binaries.
![etcd Logo](../../logos/etcd-horizontal-color.png)
etcd is a distributed, consistent key-value store for shared configuration and service discovery, with a focus on being:
* *Simple*: curl'able user-facing API (HTTP+JSON)
* *Secure*: optional SSL client cert authentication
* *Fast*: benchmarked 1000s of writes/s per instance
* *Reliable*: properly distributed using Raft
etcd is written in Go and uses the [Raft][raft] consensus algorithm to manage a highly-available replicated log.
etcd is used [in production by many companies](./production-users.md), and the development team stands behind it in critical deployment scenarios, where etcd is frequently teamed with applications such as [Kubernetes][k8s], [fleet][fleet], [locksmith][locksmith], [vulcand][vulcand], and many others.
See [etcdctl][etcdctl] for a simple command line client.
Or feel free to just use `curl`, as in the examples below.
[raft]: https://raft.github.io/
[k8s]: http://kubernetes.io/
[fleet]: https://github.com/coreos/fleet
[locksmith]: https://github.com/coreos/locksmith
[vulcand]: https://github.com/vulcand/vulcand
[etcdctl]: https://github.com/coreos/etcd/tree/master/etcdctl
## Getting Started
### Getting etcd
The easiest way to get etcd is to use one of the pre-built release binaries which are available for OSX, Linux, Windows, AppC (ACI), and Docker. Instructions for using these binaries are on the [GitHub releases page][github-release].
For those wanting to try the very latest version, you can build the latest version of etcd from the `master` branch.
You will first need [*Go*](https://golang.org/) installed on your machine (version 1.5+ is required).
All development occurs on `master`, including new features and bug fixes.
Bug fixes are first targeted at `master` and subsequently ported to release branches, as described in the [branch management][branch-management] guide.
[github-release]: https://github.com/coreos/etcd/releases/
[branch-management]: branch_management.md
### Running etcd
First start a single-member cluster of etcd:
```sh
./bin/etcd
```
This will bring up etcd listening on port 2379 for client communication and on port 2380 for server-to-server communication.
Next, let's set a single key, and then retrieve it:
```
curl -L http://127.0.0.1:2379/v2/keys/mykey -XPUT -d value="this is awesome"
curl -L http://127.0.0.1:2379/v2/keys/mykey
```
You have successfully started an etcd and written a key to the store.
### etcd TCP ports
The [official etcd ports][iana-ports] are 2379 for client requests, and 2380 for peer communication. To maintain compatibility, some etcd configuration and documentation continues to refer to the legacy ports 4001 and 7001, but all new etcd use and discussion should adopt the IANA-assigned ports. The legacy ports 4001 and 7001 will be fully deprecated, and support for their use removed, in future etcd releases.
[iana-ports]: https://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.xhtml?search=etcd
### Running local etcd cluster
First install [goreman](https://github.com/mattn/goreman), which manages Procfile-based applications.
Our [Procfile script](./Procfile) will set up a local example cluster. You can start it with:
```sh
goreman start
```
This will bring up 3 etcd members `infra1`, `infra2` and `infra3` and etcd proxy `proxy`, which runs locally and composes a cluster.
You can write a key to the cluster and retrieve the value back from any member or proxy.
### Next Steps
Now it's time to dig into the full etcd API and other guides.
- Explore the full [API][api].
- Set up a [multi-machine cluster][clustering].
- Learn the [config format, env variables and flags][configuration].
- Find [language bindings and tools][libraries-and-tools].
- Use TLS to [secure an etcd cluster][security].
- [Tune etcd][tuning].
- [Upgrade from 0.4.9+ to 2.2.0][upgrade].
[api]: ./api.md
[clustering]: ./clustering.md
[configuration]: ./configuration.md
[libraries-and-tools]: ./libraries-and-tools.md
[security]: ./security.md
[tuning]: ./tuning.md
[upgrade]: ./04_to_2_snapshot_migration.md
## Contact
- Mailing list: [etcd-dev](https://groups.google.com/forum/?hl=en#!forum/etcd-dev)
- IRC: #[etcd](irc://irc.freenode.org:6667/#etcd) on freenode.org
- Planning/Roadmap: [milestones](https://github.com/coreos/etcd/milestones), [roadmap](../../ROADMAP.md)
- Bugs: [issues](https://github.com/coreos/etcd/issues)
## Contributing
See [CONTRIBUTING](../../CONTRIBUTING.md) for details on submitting patches and the contribution workflow.
## Reporting bugs
See [reporting bugs](reporting_bugs.md) for details about reporting any issue you may encounter.
## Project Details
### Versioning
#### Service Versioning
etcd uses [semantic versioning](http://semver.org)
New minor versions may add additional features to the API.
You can get the version of etcd by issuing a request to /version:
```sh
curl -L http://127.0.0.1:2379/version
```
#### API Versioning
The `v2` API responses should not change after the 2.0.0 release but new features will be added over time.
#### 32-bit and other unsupported systems
etcd has known issues on 32-bit systems due to a bug in the Go runtime. See #[358][358] for more information.
To avoid inadvertantly running a possibly unstable etcd server, `etcd` on unsupported architectures will print
a warning message and immediately exit if the environment variable `ETCD_UNSUPPORTED_ARCH` is not set to
the target architecture.
Currently only the amd64 architecture is officially supported by `etcd`.
[358]: https://github.com/coreos/etcd/issues/358
### License
etcd is under the Apache 2.0 license. See the [LICENSE](LICENSE) file for details.

View File

@ -0,0 +1,92 @@
# etcd3 API
TODO: API doc
## Data Model
etcd is designed to reliably store infrequently updated data and provide reliable watch queries. etcd exposes previous versions of key-value pairs to support inexpensive snapshots and watch history events (“time travel queries”). A persistent, multi-version, concurrency-control data model is a good fit for these use cases.
etcd stores data in a multiversion [persistent][persistent-ds] key-value store. The persistent key-value store preserves the previous version of a key-value pair when its value is superseded with new data. The key-value store is effectively immutable; its operations do not update the structure in-place, but instead always generates a new updated structure. All past versions of keys are still accessible and watchable after modification. To prevent the data store from growing indefinitely over time from maintaining old versions, the store may be compacted to shed the oldest versions of superseded data.
### Logical View
The stores logical view is a flat binary key space. The key space has a lexically sorted index on byte string keys so range queries are inexpensive.
The key space maintains multiple revisions. Each atomic mutative operation (e.g., a transaction operation may contain multiple operations) creates a new revision on the key space. All data held by previous revisions remains unchanged. Old versions of key can still be accessed through previous revisions. Likewise, revisions are indexed as well; ranging over revisions with watchers is efficient. If the store is compacted to recover space, revisions before the compact revision will be removed.
A keys lifetime spans a generation. Each key may have one or multiple generations. Creating a key increments the generation of that key, starting at 1 if the key never existed. Deleting a key generates a key tombstone, concluding the keys current generation. Each modification of a key creates a new version of the key. Once a compaction happens, any generation ended before the given revision will be removed and values set before the compaction revision except the latest one will be removed.
### Physical View
etcd stores the physical data as key-value pairs in a persistent [b+tree][b+tree]. Each revision of the stores state only contains the delta from its previous revision to be efficient. A single revision may correspond to multiple keys in the tree.
The key of key-value pair is a 3-tuple (major, sub, type). Major is the store revision holding the key. Sub differentiates among keys within the same revision. Type is an optional suffix for special value (e.g., `t` if the value contains a tombstone). The value of the key-value pair contains the modification from previous revision, thus one delta from previous revision. The b+tree is ordered by key in lexical byte-order. Ranged lookups over revision deltas are fast; this enables quickly finding modifications from one specific revision to another. Compaction removes out-of-date keys-value pairs.
etcd also keeps a secondary in-memory [btree][btree] index to speed up range queries over keys. The keys in the btree index are the keys of the store exposed to user. The value is a pointer to the modification of the persistent b+tree. Compaction removes dead pointers.
## KV API Guarantees
etcd is a consistent and durable key value store with mini-transaction(TODO: link to txn doc when we have it) support. The key value store is exposed through the KV APIs. etcd tries to ensure the strongest consistency and durability guarantees for a distributed system. This specification enumerates the KV API guarantees made by etcd.
### APIs to consider
* Read APIs
* range
* watch
* Write APIs
* put
* delete
* Combination (read-modify-write) APIs
* txn
### etcd Specific Definitions
#### operation completed
An etcd operation is considered complete when it is committed through consensus, and therefore “executed” -- permanently stored -- by the etcd storage engine. The client knows an operation is completed when it receives a response from the etcd server. Note that the client may be uncertain about the status of an operation if it times out, or there is a network disruption between the client and the etcd member. etcd may also abort operations when there is a leader election. etcd does not send `abort` responses to clients outstanding requests in this event.
#### revision
An etcd operation that modifies the key value store is assigned with a single increasing revision. A transaction operation might modifies the key value store multiple times, but only one revision is assigned. The revision attribute of a key value pair that modified by the operation has the same value as the revision of the operation. The revision can be used as a logical clock for key value store. A key value pair that has a larger revision is modified after a key value pair with a smaller revision. Two key value pairs that have the same revision are modified by an operation "concurrently".
### Guarantees Provided
#### Atomicity
All API requests are atomic; an operation either completes entirely or not at all. For watch requests, all events generated by one operation will be in one watch response. Watch never observes partial events for a single operation.
#### Consistency
All API calls ensure [sequential consistency][seq_consistency], the strongest consistency guarantee available from distributed systems. No matter which etcd member server a client makes requests to, a client reads the same events in the same order. If two members complete the same number of operations, the state of the two members is consistent.
For watch operations, etcd guarantees to return the same value for the same key across all members for the same revision. For range operations, etcd has a similar guarantee for [linearized][Linearizability] access; serialized access may be behind the quorum state, so that the later revision is not yet available.
As with all distributed systems, it is impossible for etcd to ensure [strict consistency][strict_consistency]. etcd does not guarantee that it will return to a read the “most recent” value (as measured by a wall clock when a request is completed) available on any cluster member.
#### Isolation
etcd ensures [serializable isolation][serializable_isolation], which is the highest isolation level available in distributed systems. Read operations will never observe any intermediate data.
#### Durability
Any completed operations are durable. All accessible data is also durable data. A read will never return data that has not been made durable.
#### Linearizability
Linearizability (also known as Atomic Consistency or External Consistency) is a consistency level between strict consistency and sequential consistency.
For linearizability, suppose each operation receives a timestamp from a loosely synchronized global clock. Operations are linearized if and only if they always complete as though they were executed in a sequential order and each operation appears to complete in the order specified by the program. Likewise, if an operations timestamp precedes another, that operation must also precede the other operation in the sequence.
For example, consider a client completing a write at time point 1 (*t1*). A client issuing a read at *t2* (for *t2* > *t1*) should receive a value at least as recent as the previous write, completed at *t1*. However, the read might actually complete only by *t3*, and the returned value, current at *t2* when the read began, might be "stale" by *t3*.
etcd does not ensure linearizability for watch operations. Users are expected to verify the revision of watch responses to ensure correct ordering.
etcd ensures linearizability for all other operations by default. Linearizability comes with a cost, however, because linearized requests must go through the Raft consensus process. To obtain lower latencies and higher throughput for read requests, clients can configure a requests consistency mode to `serializable`, which may access stale data with respect to quorum, but removes the performance penalty of linearized accesses' reliance on live consensus.
[persistent-ds]: [https://en.wikipedia.org/wiki/Persistent_data_structure]
[btree]: [https://en.wikipedia.org/wiki/B-tree]
[b+tree]: [https://en.wikipedia.org/wiki/B%2B_tree]
[seq_consistency]: [https://en.wikipedia.org/wiki/Consistency_model#Sequential_consistency]
[strict_consistency]: [https://en.wikipedia.org/wiki/Consistency_model#Strict_consistency]
[serializable_isolation]: [https://en.wikipedia.org/wiki/Isolation_(database_systems)#Serializable]
[Linearizability]: [#Linearizability]

View File

@ -18,7 +18,7 @@ The major flag changes are to mostly related to bootstrapping. The `initial-*` f
- `-peer-election-timeout` is replaced by `-election-timeout`.
The documentation of new command line flags can be found at
https://github.com/coreos/etcd/blob/master/Documentation/configuration.md.
https://github.com/coreos/etcd/blob/master/Documentation/v2/configuration.md.
## Data Directory Naming

View File

@ -0,0 +1,18 @@
# Benchmarks
etcd benchmarks will be published regularly and tracked for each release below:
- [etcd v2.1.0-alpha][2.1]
- [etcd v2.2.0-rc][2.2]
- [etcd v3 demo][3.0]
# Memory Usage Benchmarks
It records expected memory usage in different scenarios.
- [etcd v2.2.0-rc][2.2-mem]
[2.1]: etcd-2-1-0-alpha-benchmarks.md
[2.2]: etcd-2-2-0-rc-benchmarks.md
[2.2-mem]: etcd-2-2-0-rc-memory-benchmarks.md
[3.0]: etcd-3-demo-benchmarks.md

View File

@ -0,0 +1,52 @@
## Physical machines
GCE n1-highcpu-2 machine type
- 1x dedicated local SSD mounted under /var/lib/etcd
- 1x dedicated slow disk for the OS
- 1.8 GB memory
- 2x CPUs
- etcd version 2.1.0 alpha
## etcd Cluster
3 etcd members, each runs on a single machine
## Testing
Bootstrap another machine and use the [boom HTTP benchmark tool][boom] to send requests to each etcd member. Check the [benchmark hacking guide][hack-benchmark] for detailed instructions.
## Performance
### reading one single key
| key size in bytes | number of clients | target etcd server | read QPS | 90th Percentile Latency (ms) |
|-------------------|-------------------|--------------------|----------|---------------|
| 64 | 1 | leader only | 1534 | 0.7 |
| 64 | 64 | leader only | 10125 | 9.1 |
| 64 | 256 | leader only | 13892 | 27.1 |
| 256 | 1 | leader only | 1530 | 0.8 |
| 256 | 64 | leader only | 10106 | 10.1 |
| 256 | 256 | leader only | 14667 | 27.0 |
| 64 | 64 | all servers | 24200 | 3.9 |
| 64 | 256 | all servers | 33300 | 11.8 |
| 256 | 64 | all servers | 24800 | 3.9 |
| 256 | 256 | all servers | 33000 | 11.5 |
### writing one single key
| key size in bytes | number of clients | target etcd server | write QPS | 90th Percentile Latency (ms) |
|-------------------|-------------------|--------------------|-----------|---------------|
| 64 | 1 | leader only | 60 | 21.4 |
| 64 | 64 | leader only | 1742 | 46.8 |
| 64 | 256 | leader only | 3982 | 90.5 |
| 256 | 1 | leader only | 58 | 20.3 |
| 256 | 64 | leader only | 1770 | 47.8 |
| 256 | 256 | leader only | 4157 | 105.3 |
| 64 | 64 | all servers | 1028 | 123.4 |
| 64 | 256 | all servers | 3260 | 123.8 |
| 256 | 64 | all servers | 1033 | 121.5 |
| 256 | 256 | all servers | 3061 | 119.3 |
[boom]: https://github.com/rakyll/boom
[hack-benchmark]: /hack/benchmark/

View File

@ -0,0 +1,69 @@
# Benchmarking etcd v2.2.0
## Physical Machines
GCE n1-highcpu-2 machine type
- 1x dedicated local SSD mounted as etcd data directory
- 1x dedicated slow disk for the OS
- 1.8 GB memory
- 2x CPUs
## etcd Cluster
3 etcd 2.2.0 members, each runs on a single machine.
Detailed versions:
```
etcd Version: 2.2.0
Git SHA: e4561dd
Go Version: go1.5
Go OS/Arch: linux/amd64
```
## Testing
Bootstrap another machine, outside of the etcd cluster, and run the [`boom` HTTP benchmark tool](https://github.com/rakyll/boom) with a connection reuse patch to send requests to each etcd cluster member. See the [benchmark instructions](../../hack/benchmark/) for the patch and the steps to reproduce our procedures.
The performance is calulated through results of 100 benchmark rounds.
## Performance
### Single Key Read Performance
| key size in bytes | number of clients | target etcd server | average read QPS | read QPS stddev | average 90th Percentile Latency (ms) | latency stddev |
|-------------------|-------------------|--------------------|------------------|-----------------|--------------------------------------|----------------|
| 64 | 1 | leader only | 2303 | 200 | 0.49 | 0.06 |
| 64 | 64 | leader only | 15048 | 685 | 7.60 | 0.46 |
| 64 | 256 | leader only | 14508 | 434 | 29.76 | 1.05 |
| 256 | 1 | leader only | 2162 | 214 | 0.52 | 0.06 |
| 256 | 64 | leader only | 14789 | 792 | 7.69| 0.48 |
| 256 | 256 | leader only | 14424 | 512 | 29.92 | 1.42 |
| 64 | 64 | all servers | 45752 | 2048 | 2.47 | 0.14 |
| 64 | 256 | all servers | 46592 | 1273 | 10.14 | 0.59 |
| 256 | 64 | all servers | 45332 | 1847 | 2.48| 0.12 |
| 256 | 256 | all servers | 46485 | 1340 | 10.18 | 0.74 |
### Single Key Write Performance
| key size in bytes | number of clients | target etcd server | average write QPS | write QPS stddev | average 90th Percentile Latency (ms) | latency stddev |
|-------------------|-------------------|--------------------|------------------|-----------------|--------------------------------------|----------------|
| 64 | 1 | leader only | 55 | 4 | 24.51 | 13.26 |
| 64 | 64 | leader only | 2139 | 125 | 35.23 | 3.40 |
| 64 | 256 | leader only | 4581 | 581 | 70.53 | 10.22 |
| 256 | 1 | leader only | 56 | 4 | 22.37| 4.33 |
| 256 | 64 | leader only | 2052 | 151 | 36.83 | 4.20 |
| 256 | 256 | leader only | 4442 | 560 | 71.59 | 10.03 |
| 64 | 64 | all servers | 1625 | 85 | 58.51 | 5.14 |
| 64 | 256 | all servers | 4461 | 298 | 89.47 | 36.48 |
| 256 | 64 | all servers | 1599 | 94 | 60.11| 6.43 |
| 256 | 256 | all servers | 4315 | 193 | 88.98 | 7.01 |
## Performance Changes
- Because etcd now records metrics for each API call, read QPS performance seems to see a minor decrease in most scenarios. This minimal performance impact was judged a reasonable investment for the breadth of monitoring and debugging information returned.
- Write QPS to cluster leaders seems to be increased by a small margin. This is because the main loop and entry apply loops were decoupled in the etcd raft logic, eliminating several blocks between them.
- Write QPS to all members seems to be increased by a significant margin, because followers now receive the latest commit index sooner, and commit proposals more quickly.

View File

@ -0,0 +1,72 @@
## Physical machines
GCE n1-highcpu-2 machine type
- 1x dedicated local SSD mounted under /var/lib/etcd
- 1x dedicated slow disk for the OS
- 1.8 GB memory
- 2x CPUs
## etcd Cluster
3 etcd 2.2.0-rc members, each runs on a single machine.
Detailed versions:
```
etcd Version: 2.2.0-alpha.1+git
Git SHA: 59a5a7e
Go Version: go1.4.2
Go OS/Arch: linux/amd64
```
Also, we use 3 etcd 2.1.0 alpha-stage members to form cluster to get base performance. etcd's commit head is at [c7146bd5][c7146bd5], which is the same as the one that we use in [etcd 2.1 benchmark][etcd-2.1-benchmark].
## Testing
Bootstrap another machine and use the [boom HTTP benchmark tool][boom] to send requests to each etcd member. Check the [benchmark hacking guide][hack-benchmark] for detailed instructions.
## Performance
### reading one single key
| key size in bytes | number of clients | target etcd server | read QPS | 90th Percentile Latency (ms) |
|-------------------|-------------------|--------------------|----------|---------------|
| 64 | 1 | leader only | 2804 (-5%) | 0.4 (+0%) |
| 64 | 64 | leader only | 17816 (+0%) | 5.7 (-6%) |
| 64 | 256 | leader only | 18667 (-6%) | 20.4 (+2%) |
| 256 | 1 | leader only | 2181 (-15%) | 0.5 (+25%) |
| 256 | 64 | leader only | 17435 (-7%) | 6.0 (+9%) |
| 256 | 256 | leader only | 18180 (-8%) | 21.3 (+3%) |
| 64 | 64 | all servers | 46965 (-4%) | 2.1 (+0%) |
| 64 | 256 | all servers | 55286 (-6%) | 7.4 (+6%) |
| 256 | 64 | all servers | 46603 (-6%) | 2.1 (+5%) |
| 256 | 256 | all servers | 55291 (-6%) | 7.3 (+4%) |
### writing one single key
| key size in bytes | number of clients | target etcd server | write QPS | 90th Percentile Latency (ms) |
|-------------------|-------------------|--------------------|-----------|---------------|
| 64 | 1 | leader only | 76 (+22%) | 19.4 (-15%) |
| 64 | 64 | leader only | 2461 (+45%) | 31.8 (-32%) |
| 64 | 256 | leader only | 4275 (+1%) | 69.6 (-10%) |
| 256 | 1 | leader only | 64 (+20%) | 16.7 (-30%) |
| 256 | 64 | leader only | 2385 (+30%) | 31.5 (-19%) |
| 256 | 256 | leader only | 4353 (-3%) | 74.0 (+9%) |
| 64 | 64 | all servers | 2005 (+81%) | 49.8 (-55%) |
| 64 | 256 | all servers | 4868 (+35%) | 81.5 (-40%) |
| 256 | 64 | all servers | 1925 (+72%) | 47.7 (-59%) |
| 256 | 256 | all servers | 4975 (+36%) | 70.3 (-36%) |
### performance changes explanation
- read QPS in most scenarios is decreased by 5~8%. The reason is that etcd records store metrics for each store operation. The metrics is important for monitoring and debugging, so this is acceptable.
- write QPS to leader is increased by 20~30%. This is because we decouple raft main loop and entry apply loop, which avoids them blocking each other.
- write QPS to all servers is increased by 30~80% because follower could receive latest commit index earlier and commit proposals faster.
[boom]: https://github.com/rakyll/boom
[c7146bd5]: https://github.com/coreos/etcd/commits/c7146bd5f2c73716091262edc638401bb8229144
[etcd-2.1-benchmark]: etcd-2-1-0-alpha-benchmarks.md
[hack-benchmark]: /hack/benchmark/

View File

@ -0,0 +1,47 @@
## Physical machine
GCE n1-standard-2 machine type
- 1x dedicated local SSD mounted under /var/lib/etcd
- 1x dedicated slow disk for the OS
- 7.5 GB memory
- 2x CPUs
## etcd
```
etcd Version: 2.2.0-rc.0+git
Git SHA: 103cb5c
Go Version: go1.5
Go OS/Arch: linux/amd64
```
## Testing
Start 3-member etcd cluster, each of which uses 2 cores.
The length of key name is always 64 bytes, which is a reasonable length of average key bytes.
## Memory Maximal Usage
- etcd may use maximal memory if one follower is dead and the leader keeps sending snapshots.
- `max RSS` is the maximal memory usage recorded in 3 runs.
| value bytes | key number | data size(MB) | max RSS(MB) | max RSS/data rate on leader |
|-------------|-------------|---------------|-------------|-----------------------------|
| 128 | 50000 | 6 | 433 | 72x |
| 128 | 100000 | 12 | 659 | 54x |
| 128 | 200000 | 24 | 1466 | 61x |
| 1024 | 50000 | 48 | 1253 | 26x |
| 1024 | 100000 | 96 | 2344 | 24x |
| 1024 | 200000 | 192 | 4361 | 22x |
## Data Size Threshold
- When etcd reaches data size threshold, it may trigger leader election easily and drop part of proposals.
- At most cases, etcd cluster should work smoothly if it doesn't hit the threshold. If it doesn't work well due to insufficient resources, you need to decrease its data size.
| value bytes | key number limitation | suggested data size threshold(MB) | consumed RSS(MB) |
|-------------|-----------------------|-----------------------------------|------------------|
| 128 | 400K | 48 | 2400 |
| 1024 | 300K | 292 | 6500 |

View File

@ -0,0 +1,42 @@
## Physical machines
GCE n1-highcpu-2 machine type
- 1x dedicated local SSD mounted under /var/lib/etcd
- 1x dedicated slow disk for the OS
- 1.8 GB memory
- 2x CPUs
- etcd version 2.2.0
## etcd Cluster
1 etcd member running in v3 demo mode
## Testing
Use [etcd v3 benchmark tool][etcd-v3-benchmark].
## Performance
### reading one single key
| key size in bytes | number of clients | read QPS | 90th Percentile Latency (ms) |
|-------------------|-------------------|----------|---------------|
| 256 | 1 | 2716 | 0.4 |
| 256 | 64 | 16623 | 6.1 |
| 256 | 256 | 16622 | 21.7 |
The performance is nearly the same as the one with empty server handler.
### reading one single key after putting
| key size in bytes | number of clients | read QPS | 90th Percentile Latency (ms) |
|-------------------|-------------------|----------|---------------|
| 256 | 1 | 2269 | 0.5 |
| 256 | 64 | 13582 | 8.6 |
| 256 | 256 | 13262 | 47.5 |
The performance with empty server handler is not affected by one put. So the
performance downgrade should be caused by storage package.
[etcd-v3-benchmark]: /tools/benchmark/

View File

@ -0,0 +1,77 @@
# Watch Memory Usage Benchmark
*NOTE*: The watch features are under active development, and their memory usage may change as that development progresses. We do not expect it to significantly increase beyond the figures stated below.
A primary goal of etcd is supporting a very large number of watchers doing a massively large amount of watching. etcd aims to support O(10k) clients, O(100K) watch streams (O(10) streams per client) and O(10M) total watchings (O(100) watching per stream). The memory consumed by each individual watching accounts for the largest portion of etcd's overall usage, and is therefore the focus of current and future optimizations.
Three related components of etcd watch consume physical memory: each `grpc.Conn`, each watch stream, and each instance of the watching activity. `grpc.Conn` maintains the actual TCP connection and other gRPC connection state. Each `grpc.Conn` consumes O(10kb) of memory, and might have multiple watch streams attached.
Each watch stream is an independent HTTP2 connection which consumes another O(10kb) of memory.
Multiple watchings might share one watch stream.
Watching is the actual struct that tracks the changes on the key-value store. Each watching should only consume < O(1kb).
```
+-------+
| watch |
+---------> | foo |
| +-------+
+------+-----+
| stream |
+--------------> | |
| +------+-----+ +-------+
| | | watch |
| +---------> | bar |
+-----+------+ +-------+
| | +------------+
| conn +-------> | stream |
| | | |
+-----+------+ +------------+
|
|
|
| +------------+
+--------------> | stream |
| |
+------------+
```
The theoretical memory consumption of watch can be approximated with the formula:
`memory = c1 * number_of_conn + c2 * avg_number_of_stream_per_conn + c3 * avg_number_of_watch_stream`
## Testing Environment
etcd version
- git head https://github.com/coreos/etcd/commit/185097ffaa627b909007e772c175e8fefac17af3
GCE n1-standard-2 machine type
- 7.5 GB memory
- 2x CPUs
## Overall memory usage
The overall memory usage captures how much [RSS][rss] etcd consumes with the client watchers. While the result may vary by as much as 10%, it is still meaningful, since the goal is to learn about the rough memory usage and the pattern of allocations.
With the benchmark result, we can calculate roughly that `c1 = 17kb`, `c2 = 18kb` and `c3 = 350bytes`. So each additional client connection consumes 17kb of memory and each additional stream consumes 18kb of memory, and each additional watching only cause 350bytes. A single etcd server can maintain millions of watchings with a few GB of memory in normal case.
| clients | streams per client | watchings per stream | total watching | memory usage |
|---------|---------|-----------|----------------|--------------|
| 1k | 1 | 1 | 1k | 50MB |
| 2k | 1 | 1 | 2k | 90MB |
| 5k | 1 | 1 | 5k | 200MB |
| 1k | 10 | 1 | 10k | 217MB |
| 2k | 10 | 1 | 20k | 417MB |
| 5k | 10 | 1 | 50k | 980MB |
| 1k | 50 | 1 | 50k | 1001MB |
| 2k | 50 | 1 | 100k | 1960MB |
| 5k | 50 | 1 | 250k | 4700MB |
| 1k | 50 | 10 | 500k | 1171MB |
| 2k | 50 | 10 | 1M | 2371MB |
| 5k | 50 | 10 | 2.5M | 5710MB |
| 1k | 50 | 100 | 5M | 2380MB |
| 2k | 50 | 100 | 10M | 4672MB |
| 5k | 50 | 100 | 50M | *OOM* |
[rss]: https://en.wikipedia.org/wiki/Resident_set_size

View File

@ -0,0 +1,98 @@
# Storage Memory Usage Benchmark
<!---todo: link storage to storage design doc-->
Two components of etcd storage consume physical memory. The etcd process allocates an *in-memory index* to speed key lookup. The process's *page cache*, managed by the operating system, stores recently-accessed data from disk for quick re-use.
The in-memory index holds all the keys in a [B-tree][btree] data structure, along with pointers to the on-disk data (the values). Each key in the B-tree may contain multiple pointers, pointing to different versions of its values. The theoretical memory consumption of the in-memory index can hence be approximated with the formula:
`N * (c1 + avg_key_size) + N * (avg_versions_of_key) * (c2 + size_of_pointer)`
where `c1` is the key metadata overhead and `c2` is the version metadata overhead.
The graph shows the detailed structure of the in-memory index B-tree.
```
In mem index
+------------+
| key || ... |
+--------------+ | || |
| | +------------+
| | | v1 || ... |
| disk <----------------| || | Tree Node
| | +------------+
| | | v2 || ... |
| <----------------+ || |
| | +------------+
+--------------+ +-----+ | | |
| | | | |
| +------------+
|
|
^
------+
| ... |
| |
+-----+
| ... | Tree Node
| |
+-----+
| ... |
| |
------+
```
[Page cache memory][pagecache] is managed by the operating system and is not covered in detail in this document.
## Testing Environment
etcd version
- git head https://github.com/coreos/etcd/commit/776e9fb7be7eee5e6b58ab977c8887b4fe4d48db
GCE n1-standard-2 machine type
- 7.5 GB memory
- 2x CPUs
## In-memory index memory usage
In this test, we only benchmark the memory usage of the in-memory index. The goal is to find `c1` and `c2` mentioned above and to understand the hard limit of memory consumption of the storage.
We calculate the memory usage consumption via the Go runtime.ReadMemStats. We calculate the total allocated bytes difference before creating the index and after creating the index. It cannot perfectly reflect the memory usage of the in-memory index itself but can show the rough consumption pattern.
| N | versions | key size | memory usage |
|------|----------|----------|--------------|
| 100K | 1 | 64bytes | 22MB |
| 100K | 5 | 64bytes | 39MB |
| 1M | 1 | 64bytes | 218MB |
| 1M | 5 | 64bytes | 432MB |
| 100K | 1 | 256bytes | 41MB |
| 100K | 5 | 256bytes | 65MB |
| 1M | 1 | 256bytes | 409MB |
| 1M | 5 | 256bytes | 506MB |
Based on the result, we can calculate `c1=120bytes`, `c2=30bytes`. We only need two sets of data to calculate `c1` and `c2`, since they are the only unknown variable in the formula. The `c1=120bytes` and `c2=30bytes` are the average value of the 4 sets of `c1` and `c2` we calculated. The key metadata overhead is still relatively nontrivial (50%) for small key-value pairs. However, this is a significant improvement over the old store, which had at least 1000% overhead.
## Overall memory usage
The overall memory usage captures how much RSS etcd consumes with the storage. The value size should have very little impact on the overall memory usage of etcd, since we keep values on disk and only retain hot values in memory, managed by the OS page cache.
| N | versions | key size | value size | memory usage |
|------|----------|----------|------------|--------------|
| 100K | 1 | 64bytes | 256bytes | 40MB |
| 100K | 5 | 64bytes | 256bytes | 89MB |
| 1M | 1 | 64bytes | 256bytes | 470MB |
| 1M | 5 | 64bytes | 256bytes | 880MB |
| 100K | 1 | 64bytes | 1KB | 102MB |
| 100K | 5 | 64bytes | 1KB | 164MB |
| 1M | 1 | 64bytes | 1KB | 587MB |
| 1M | 5 | 64bytes | 1KB | 836MB |
Based on the result, we know the value size does not significantly impact the memory consumption. There is some minor increase due to more data held in the OS page cache.
[btree]: https://en.wikipedia.org/wiki/B-tree
[pagecache]: https://en.wikipedia.org/wiki/Page_cache

View File

@ -0,0 +1,26 @@
# Branch Management
## Guide
* New development occurs on the [master branch][master].
* Master branch should always have a green build!
* Backwards-compatible bug fixes should target the master branch and subsequently be ported to stable branches.
* Once the master branch is ready for release, it will be tagged and become the new stable branch.
The etcd team has adopted a *rolling release model* and supports one stable version of etcd.
### Master branch
The `master` branch is our development branch. All new features land here first.
If you want to try new features, pull `master` and play with it. Note that `master` may not be stable because new features may introduce bugs.
Before the release of the next stable version, feature PRs will be frozen. We will focus on the testing, bug-fix and documentation for one to two weeks.
### Stable branches
All branches with prefix `release-` are considered _stable_ branches.
After every minor release (http://semver.org/), we will have a new stable branch for that release. We will keep fixing the backwards-compatible bugs for the latest stable release, but not previous releases. The _patch_ release, incorporating any bug fixes, will be once every two weeks, given any patches.
[master]: https://github.com/coreos/etcd/tree/master

View File

@ -39,7 +39,7 @@ To start etcd automatically using custom settings at startup in Linux, using a [
+ env variable: ETCD_HEARTBEAT_INTERVAL
### --election-timeout
+ Time (in milliseconds) for an election to timeout. See [Documentation/tuning.md](tuning.md#time-parameters) for details.
+ Time (in milliseconds) for an election to timeout. See [tuning.md](tuning.md#time-parameters) for details.
+ default: "1000"
+ env variable: ETCD_ELECTION_TIMEOUT

View File

@ -0,0 +1,109 @@
# etcd release guide
The guide talks about how to release a new version of etcd.
The procedure includes some manual steps for sanity checking but it can probably be further scripted. Please keep this document up-to-date if you want to make changes to the release process.
## Prepare Release
Set desired version as environment variable for following steps. Here is an example to release 2.1.3:
```
export VERSION=v2.1.3
export PREV_VERSION=v2.1.2
```
All releases version numbers follow the format of [semantic versioning 2.0.0](http://semver.org/).
### Major, Minor Version Release, or its Pre-release
- Ensure the relevant milestone on GitHub is complete. All referenced issues should be closed, or moved elsewhere.
- Remove this release from [roadmap](https://github.com/coreos/etcd/blob/master/ROADMAP.md), if necessary.
- Ensure the latest upgrade documentation is available.
- Bump [hardcoded MinClusterVerion in the repository](https://github.com/coreos/etcd/blob/master/version/version.go#L29), if necessary.
- Add feature capability maps for the new version, if necessary.
### Patch Version Release
- Discuss about commits that are backported to the patch release. The commits should not include merge commits.
- Cherry-pick these commits starting from the oldest one into stable branch.
## Write Release Note
- Write introduction for the new release. For example, what major bug we fix, what new features we introduce or what performance improvement we make.
- Write changelog for the last release. ChangeLog should be straightforward and easy to understand for the end-user.
- Put `[GH XXXX]` at the head of change line to reference Pull Request that introduces the change. Moreover, add a link on it to jump to the Pull Request.
## Tag Version
- Bump [hardcoded Version in the repository](https://github.com/coreos/etcd/blob/master/version/version.go#L30) to the latest version `${VERSION}`.
- Ensure all tests on CI system are passed.
- Manually check etcd is buildable in Linux, Darwin and Windows.
- Manually check upgrade etcd cluster of previous minor version works well.
- Manually check new features work well.
- Add a signed tag through `git tag -s ${VERSION}`.
- Sanity check tag correctness through `git show tags/$VERSION`.
- Push the tag to GitHub through `git push origin tags/$VERSION`. This assumes `origin` corresponds to "https://github.com/coreos/etcd".
## Build Release Binaries and Images
- Ensure `actool` is available, or installing it through `go get github.com/appc/spec/actool`.
- Ensure `docker` is available.
Run release script in root directory:
```
./scripts/release.sh ${VERSION}
```
It generates all release binaries and images under directory ./release.
## Sign Binaries and Images
Choose appropriate private key to sign the generated binaries and images.
The following commands are used for public release sign:
```
cd release
# personal GPG is okay for now
for i in etcd-*{.zip,.tar.gz}; do gpg --sign ${i}; done
# use `CoreOS ACI Builder <release@coreos.com>` secret key
gpg -u 88182190 -a --output etcd-${VERSION}-linux-amd64.aci.asc --detach-sig etcd-${VERSION}-linux-amd64.aci
```
## Publish Release Page in GitHub
- Set release title as the version name.
- Follow the format of previous release pages.
- Attach the generated binaries, aci image and signatures.
- Select whether it is a pre-release.
- Publish the release!
## Publish Docker Image in Quay.io
- Push docker image:
```
docker login quay.io
docker push quay.io/coreos/etcd:${VERSION}
```
- Add `latest` tag to the new image on [quay.io](https://quay.io/repository/coreos/etcd?tag=latest&tab=tags) if this is a stable release.
## Announce to etcd-dev Googlegroup
- Follow the format of [previous release emails](https://groups.google.com/forum/#!forum/etcd-dev).
- Make sure to include a list of authors that contributed since the previous release - something like the following might be handy:
```
git log ...${PREV_VERSION} --pretty=format:"%an" | sort | uniq | tr '\n' ',' | sed -e 's#,#, #g' -e 's#, $##'
```
- Send email to etcd-dev@googlegroups.com
## Post Release
- Create new stable branch through `git push origin ${VERSION_MAJOR}.${VERSION_MINOR}` if this is a major stable release. This assumes `origin` corresponds to "https://github.com/coreos/etcd".
- Bump [hardcoded Version in the repository](https://github.com/coreos/etcd/blob/master/version/version.go#L30) to the version `${VERSION}+git`.

View File

@ -1,83 +1,84 @@
# FAQ
## 1) How come I can read an old version of the data when a majority of the members are down?
## 1) Why can an etcd client read an old version of data when a majority of the etcd cluster members are down?
In situations where a client connects to a minority, etcd
favors by default availability over consistency. This means that even though
data might be “out of date”, it is still better to return something versus
nothing.
nothing.
In order to confirm that a read is up to date with a majority of the cluster,
the client can use the `quorum=true` parameter on reads of keys. This means
that a majority of the cluster is checked on reads before returning the data,
otherwise the read will timeout and fail.
## 2) With quorum=false, doesnt this mean that if my client switched the member it was connected to, that it could experience a logical ordering where the cluster goes backwards in time?
## 2) With quorum=false, doesnt this mean that if my client switched the member it was connected to, that it could experience a logical ordering where the cluster goes backwards in time?
Yes, but this could be handled at the etcd client implementation via
remembering the last seen index. The “index” is the cluster's single
irrevocable sequence of the entire modification history. The client could
remember the last seen index, and determine via comparing the index returned on
the GET whether or not the state of the key-value pair is before or after its
last seen state.
last seen state.
## 3) What happens if a watch is registered on a minority member?
## 3) What happens if a watch is registered on a minority member?
The watch will stay untriggered, even as modifications are occurring in the
majority quorum. This is an open issue, and is being addressed in v3. There are
multiple ways to work around the watch trigger not firing.
multiple ways to work around the watch trigger not firing.
1) build a signaling mechanism independent of etcd. This could be as simple as
a “pulse” to the client to reissue a GET with quorum=true for the most recent
version of the data.
2) poll on the `/v2/keys` endpoint and check that the raft-index is increasing every
timeout.
version of the data.
## 4) What is a proxy used for?
2) poll on the `/v2/keys` endpoint and check that the raft-index is increasing every
timeout.
## 4) What is a proxy used for?
A proxy is a redirection server to the etcd cluster. The proxy handles the
redirection of a client to the current configuration of the etcd cluster. A
typical use case is to start a proxy on a machine, and on first boot up of the
proxy specify both the `--proxy` flag and the `--initial-cluster` flag.
proxy specify both the `--proxy` flag and the `--initial-cluster` flag.
From there, any etcdctl client that starts up automatically speaks to the local
proxy and the proxy redirects operations to the current configuration of the
cluster it was originally paired with.
cluster it was originally paired with.
In the v2 spec of etcd, proxies cannot be promoted to members of the cluster.
They also cannot be promoted to followers or at any point become part of the
replication of the etcd cluster itself.
replication of the etcd cluster itself.
## 5) How is cluster membership and health handled in etcd v2?
## 5) How is cluster membership and health handled in etcd v2?
The design goal of etcd is that reconfiguration is simply an API, and health
monitoring and addition/removal of members is up to the individual application
and their integration with the reconfiguration API.
and their integration with the reconfiguration API.
Thus, a member that is down, even infinitely, will never be automatically
removed from the etcd cluster member list.
removed from the etcd cluster member list.
This makes sense because it's usually an application level / administrative
action to determine whether a reconfiguration should happen based on health.
action to determine whether a reconfiguration should happen based on health.
For more information, refer to the [runtime reconfiguration design document][runtime-reconf-design].
## 6) how does --endpoint work with etcdctl?
## 6) how does --endpoint work with etcdctl?
The `--endpoint` flag can specify any number of etcd cluster members in a comma
separated list. This list might be a subset, equal to, or more than the actual
etcd cluster member list itself.
etcd cluster member list itself.
If only one peer is specified via the `--endpoint` flag, the etcdctl discovers the
rest of the cluster via the member list of that one peer, and then it randomly
chooses a member to use. Again, the client can use the `quorum=true` flag on
reads, which will always fail when using a member in the minority.
reads, which will always fail when using a member in the minority.
If peers from multiple clusters are specified via the `--endpoint` flag, etcdctl
will randomly choose a peer, and the request will simply get routed to one of
the clusters. This is probably not what you want.
the clusters. This is probably not what you want.
Note: --peers flag is now deprecated and --endpoint should be used instead,
Note: --peers flag is now deprecated and --endpoint should be used instead,
as it might confuse users to give etcdctl a peerURL.
[runtime-reconf-design]: runtime-reconf-design.md

View File

@ -0,0 +1,35 @@
# Glossary
This document defines the various terms used in etcd documentation, command line and source code.
## Node
Node is an instance of raft state machine.
It has a unique identification, and records other nodes' progress internally when it is the leader.
## Member
Member is an instance of etcd. It hosts a node, and provides service to clients.
## Cluster
Cluster consists of several members.
The node in each member follows raft consensus protocol to replicate logs. Cluster receives proposals from members, commits them and apply to local store.
## Peer
Peer is another member of the same cluster.
## Proposal
A proposal is a request (for example a write request, a configuration change request) that needs to go through raft protocol.
## Client
Client is a caller of the cluster's HTTP API.
## Machine (deprecated)
The alternative of Member in etcd before 2.0

View File

@ -0,0 +1,124 @@
# Libraries and Tools
**Tools**
- [etcdctl](https://github.com/coreos/etcd/tree/master/etcdctl) - A command line client for etcd
- [etcd-backup](https://github.com/fanhattan/etcd-backup) - A powerful command line utility for dumping/restoring etcd - Supports v2
- [etcd-dump](https://npmjs.org/package/etcd-dump) - Command line utility for dumping/restoring etcd.
- [etcd-fs](https://github.com/xetorthio/etcd-fs) - FUSE filesystem for etcd
- [etcddir](https://github.com/rekby/etcddir) - Realtime sync etcd and local directory. Work with windows and linux.
- [etcd-browser](https://github.com/henszey/etcd-browser) - A web-based key/value editor for etcd using AngularJS
- [etcd-lock](https://github.com/datawisesystems/etcd-lock) - Master election & distributed r/w lock implementation using etcd - Supports v2
- [etcd-console](https://github.com/matishsiao/etcd-console) - A web-base key/value editor for etcd using PHP
- [etcd-viewer](https://github.com/nikfoundas/etcd-viewer) - An etcd key-value store editor/viewer written in Java
- [etcdtool](https://github.com/mickep76/etcdtool) - Export/Import/Edit etcd directory as JSON/YAML/TOML and Validate directory using JSON schema
- [etcd-rest](https://github.com/mickep76/etcd-rest) - Create generic REST API in Go using etcd as a backend with validation using JSON schema
- [etcdsh](https://github.com/kamilhark/etcdsh) - A command line client with support of command history and tab completion. Supports v2
**Go libraries**
- [etcd/client](https://github.com/coreos/etcd/blob/master/client) - the officially maintained Go client
- [go-etcd](https://github.com/coreos/go-etcd) - the deprecated official client. May be useful for older (<2.0.0) versions of etcd.
**Java libraries**
- [boonproject/etcd](https://github.com/boonproject/boon/blob/master/etcd/README.md) - Supports v2, Async/Sync and waits
- [justinsb/jetcd](https://github.com/justinsb/jetcd)
- [diwakergupta/jetcd](https://github.com/diwakergupta/jetcd) - Supports v2
- [jurmous/etcd4j](https://github.com/jurmous/etcd4j) - Supports v2, Async/Sync, waits and SSL
- [AdoHe/etcd4j](http://github.com/AdoHe/etcd4j) - Supports v2 (enhance for real production cluster)
**Python libraries**
- [jplana/python-etcd](https://github.com/jplana/python-etcd) - Supports v2
- [russellhaering/txetcd](https://github.com/russellhaering/txetcd) - a Twisted Python library
- [cholcombe973/autodock](https://github.com/cholcombe973/autodock) - A docker deployment automation tool
- [lisael/aioetcd](https://github.com/lisael/aioetcd) - (Python 3.4+) Asyncio coroutines client (Supports v2)
**Node libraries**
- [stianeikeland/node-etcd](https://github.com/stianeikeland/node-etcd) - Supports v2 (w Coffeescript)
- [lavagetto/nodejs-etcd](https://github.com/lavagetto/nodejs-etcd) - Supports v2
- [deedubs/node-etcd-config](https://github.com/deedubs/node-etcd-config) - Supports v2
**Ruby libraries**
- [iconara/etcd-rb](https://github.com/iconara/etcd-rb)
- [jpfuentes2/etcd-ruby](https://github.com/jpfuentes2/etcd-ruby)
- [ranjib/etcd-ruby](https://github.com/ranjib/etcd-ruby) - Supports v2
**C libraries**
- [jdarcy/etcd-api](https://github.com/jdarcy/etcd-api) - Supports v2
- [shafreeck/cetcd](https://github.com/shafreeck/cetcd) - Supports v2
**C++ libraries**
- [edwardcapriolo/etcdcpp](https://github.com/edwardcapriolo/etcdcpp) - Supports v2
- [suryanathan/etcdcpp](https://github.com/suryanathan/etcdcpp) - Supports v2 (with waits)
**Clojure libraries**
- [aterreno/etcd-clojure](https://github.com/aterreno/etcd-clojure)
- [dwwoelfel/cetcd](https://github.com/dwwoelfel/cetcd) - Supports v2
- [rthomas/clj-etcd](https://github.com/rthomas/clj-etcd) - Supports v2
**Erlang libraries**
- [marshall-lee/etcd.erl](https://github.com/marshall-lee/etcd.erl)
**.Net Libraries**
- [wangjia184/etcdnet](https://github.com/wangjia184/etcdnet) - Supports v2
- [drusellers/etcetera](https://github.com/drusellers/etcetera)
**PHP Libraries**
- [linkorb/etcd-php](https://github.com/linkorb/etcd-php)
**Haskell libraries**
- [wereHamster/etcd-hs](https://github.com/wereHamster/etcd-hs)
**R libraries**
- [ropensci/etseed](https://github.com/ropensci/etseed)
**Tcl libraries**
- [efrecon/etcd-tcl](https://github.com/efrecon/etcd-tcl) - Supports v2, except wait.
**Chef Integration**
- [coderanger/etcd-chef](https://github.com/coderanger/etcd-chef)
**Chef Cookbook**
- [spheromak/etcd-cookbook](https://github.com/spheromak/etcd-cookbook)
**BOSH Releases**
- [cloudfoundry-community/etcd-boshrelease](https://github.com/cloudfoundry-community/etcd-boshrelease)
- [cloudfoundry/cf-release](https://github.com/cloudfoundry/cf-release/tree/master/jobs/etcd)
**Projects using etcd**
- [binocarlos/yoda](https://github.com/binocarlos/yoda) - etcd + ZeroMQ
- [calavera/active-proxy](https://github.com/calavera/active-proxy) - HTTP Proxy configured with etcd
- [derekchiang/etcdplus](https://github.com/derekchiang/etcdplus) - A set of distributed synchronization primitives built upon etcd
- [go-discover](https://github.com/flynn/go-discover) - service discovery in Go
- [gleicon/goreman](https://github.com/gleicon/goreman/tree/etcd) - Branch of the Go Foreman clone with etcd support
- [garethr/hiera-etcd](https://github.com/garethr/hiera-etcd) - Puppet hiera backend using etcd
- [mattn/etcd-vim](https://github.com/mattn/etcd-vim) - SET and GET keys from inside vim
- [mattn/etcdenv](https://github.com/mattn/etcdenv) - "env" shebang with etcd integration
- [kelseyhightower/confd](https://github.com/kelseyhightower/confd) - Manage local app config files using templates and data from etcd
- [configdb](https://git.autistici.org/ai/configdb/tree/master) - A REST relational abstraction on top of arbitrary database backends, aimed at storing configs and inventories.
- [scrz](https://github.com/scrz/scrz) - Container manager, stores configuration in etcd.
- [fleet](https://github.com/coreos/fleet) - Distributed init system
- [kubernetes/kubernetes](https://github.com/kubernetes/kubernetes) - Container cluster manager introduced by Google.
- [mailgun/vulcand](https://github.com/mailgun/vulcand) - HTTP proxy that uses etcd as a configuration backend.
- [duedil-ltd/discodns](https://github.com/duedil-ltd/discodns) - Simple DNS nameserver using etcd as a database for names and records.
- [skynetservices/skydns](https://github.com/skynetservices/skydns) - RFC compliant DNS server
- [xordataexchange/crypt](https://github.com/xordataexchange/crypt) - Securely store values in etcd using GPG encryption
- [spf13/viper](https://github.com/spf13/viper) - Go configuration library, reads values from ENV, pflags, files, and etcd with optional encryption
- [lytics/metafora](https://github.com/lytics/metafora) - Go distributed task library
- [ryandoyle/nss-etcd](https://github.com/ryandoyle/nss-etcd) - A GNU libc NSS module for resolving names from etcd.

143
Documentation/v2/metrics.md Normal file
View File

@ -0,0 +1,143 @@
# Metrics
etcd uses [Prometheus][prometheus] for metrics reporting. The metrics can be used for real-time monitoring and debugging. etcd does not persist its metrics; if a member restarts, the metrics will be reset.
The simplest way to see the available metrics is to cURL the metrics endpoint `/metrics`. The format is described [here](http://prometheus.io/docs/instrumenting/exposition_formats/).
Follow the [Prometheus getting started doc][prometheus-getting-started] to spin up a Prometheus server to collect etcd metrics.
The naming of metrics follows the suggested [Prometheus best practices][prometheus-naming]. A metric name has an `etcd` or `etcd_debugging` prefix as its namespace and a subsystem prefix (for example `wal` and `etcdserver`).
## etcd namespace metrics
The metrics under the `etcd` prefix are for monitoring and alerting. They are stable high level metrics. If there is any change of these metrics, it will be included in release notes.
### http requests
These metrics describe the serving of requests (non-watch events) served by etcd members in non-proxy mode: total
incoming requests, request failures and processing latency (inc. raft rounds for storage). They are useful for tracking
user-generated traffic hitting the etcd cluster .
All these metrics are prefixed with `etcd_http_`
| Name | Description | Type |
|--------------------------------|-----------------------------------------------------------------------------------------|--------------------|
| received_total | Total number of events after parsing and auth. | Counter(method) |
| failed_total | Total number of failed events.   | Counter(method,error) |
| successful_duration_seconds | Bucketed handling times of the requests, including raft rounds for writes. | Histogram(method) |
Example Prometheus queries that may be useful from these metrics (across all etcd members):
* `sum(rate(etcd_http_failed_total{job="etcd"}[1m]) by (method) / sum(rate(etcd_http_events_received_total{job="etcd"})[1m]) by (method)`
Shows the fraction of events that failed by HTTP method across all members, across a time window of `1m`.
* `sum(rate(etcd_http_received_total{job="etcd",method="GET})[1m]) by (method)`
`sum(rate(etcd_http_received_total{job="etcd",method~="GET})[1m]) by (method)`
Shows the rate of successful readonly/write queries across all servers, across a time window of `1m`.
* `histogram_quantile(0.9, sum(rate(etcd_http_successful_duration_seconds{job="etcd",method="GET"}[5m]) ) by (le))`
`histogram_quantile(0.9, sum(rate(etcd_http_successful_duration_seconds{job="etcd",method!="GET"}[5m]) ) by (le))`
Show the 0.90-tile latency (in seconds) of read/write (respectively) event handling across all members, with a window of `5m`.
### proxy
etcd members operating in proxy mode do not directly perform store operations. They forward all requests to cluster instances.
Tracking the rate of requests coming from a proxy allows one to pin down which machine is performing most reads/writes.
All these metrics are prefixed with `etcd_proxy_`
| Name | Description | Type |
|---------------------------|-----------------------------------------------------------------------------------------|--------------------|
| requests_total | Total number of requests by this proxy instance. | Counter(method) |
| handled_total | Total number of fully handled requests, with responses from etcd members. | Counter(method) |
| dropped_total | Total number of dropped requests due to forwarding errors to etcd members.  | Counter(method,error) |
| handling_duration_seconds | Bucketed handling times by HTTP method, including round trip to member instances. | Histogram(method) |
Example Prometheus queries that may be useful from these metrics (across all etcd servers):
* `sum(rate(etcd_proxy_handled_total{job="etcd"}[1m])) by (method)`
Rate of requests (by HTTP method) handled by all proxies, across a window of `1m`.
* `histogram_quantile(0.9, sum(rate(handling_duration_seconds{job="etcd",method="GET"}[5m])) by (le))`
`histogram_quantile(0.9, sum(rate(handling_duration_seconds{job="etcd",method!="GET"}[5m])) by (le))`
Show the 0.90-tile latency (in seconds) of handling of user requests across all proxy machines, with a window of `5m`.
* `sum(rate(etcd_proxy_dropped_total{job="etcd"}[1m])) by (proxying_error)`
Number of failed request on the proxy. This should be 0, spikes here indicate connectivity issues to the etcd cluster.
## etcd_debugging namespace metrics
The metrics under the `etcd_debugging` prefix are for debugging. They are very implementation dependent and volatile. They might be changed or removed without any warning in new etcd releases. Some of the metrics might be moved to the `etcd` prefix when they become more stable.
### etcdserver
| Name | Description | Type |
|-----------------------------------------|--------------------------------------------------|-----------|
| proposal_duration_seconds | The latency distributions of committing proposal | Histogram |
| proposals_pending | The current number of pending proposals | Gauge |
| proposals_failed_total | The total number of failed proposals | Counter |
[Proposal][glossary-proposal] duration (`proposal_duration_seconds`) provides a proposal commit latency histogram. The reported latency reflects network and disk IO delays in etcd.
Proposals pending (`proposals_pending`) indicates how many proposals are queued for commit. Rising pending proposals suggests there is a high client load or the cluster is unstable.
Failed proposals (`proposals_failed_total`) are normally related to two issues: temporary failures related to a leader election or longer duration downtime caused by a loss of quorum in the cluster.
### wal
| Name | Description | Type |
|------------------------------------|--------------------------------------------------|-----------|
| fsync_duration_seconds | The latency distributions of fsync called by wal | Histogram |
| last_index_saved | The index of the last entry saved by wal | Gauge |
Abnormally high fsync duration (`fsync_duration_seconds`) indicates disk issues and might cause the cluster to be unstable.
### snapshot
| Name | Description | Type |
|--------------------------------------------|------------------------------------------------------------|-----------|
| snapshot_save_total_duration_seconds | The total latency distributions of save called by snapshot | Histogram |
Abnormally high snapshot duration (`snapshot_save_total_duration_seconds`) indicates disk issues and might cause the cluster to be unstable.
### rafthttp
| Name | Description | Type | Labels |
|-----------------------------------|--------------------------------------------|--------------|--------------------------------|
| message_sent_latency_seconds | The latency distributions of messages sent | HistogramVec | sendingType, msgType, remoteID |
| message_sent_failed_total | The total number of failed messages sent | Summary | sendingType, msgType, remoteID |
Abnormally high message duration (`message_sent_latency_seconds`) indicates network issues and might cause the cluster to be unstable.
An increase in message failures (`message_sent_failed_total`) indicates more severe network issues and might cause the cluster to be unstable.
Label `sendingType` is the connection type to send messages. `message`, `msgapp` and `msgappv2` use HTTP streaming, while `pipeline` does HTTP request for each message.
Label `msgType` is the type of raft message. `MsgApp` is log replication messages; `MsgSnap` is snapshot install messages; `MsgProp` is proposal forward messages; the others maintain internal raft status. Given large snapshots, a lengthy msgSnap transmission latency should be expected. For other types of messages, given enough network bandwidth, latencies comparable to ping latency should be expected.
Label `remoteID` is the member ID of the message destination.
## Prometheus supplied metrics
The Prometheus client library provides a number of metrics under the `go` and `process` namespaces. There are a few that are particlarly interesting.
| Name | Description | Type |
|-----------------------------------|--------------------------------------------|--------------|
| process_open_fds | Number of open file descriptors. | Gauge |
| process_max_fds | Maximum number of open file descriptors. | Gauge |
Heavy file descriptor (`process_open_fds`) usage (i.e., near the process's file descriptor limit, `process_max_fds`) indicates a potential file descriptor exhaustion issue. If the file descriptors are exhausted, etcd may panic because it cannot create new WAL files.
[glossary-proposal]: glossary.md#proposal
[prometheus]: http://prometheus.io/
[prometheus-getting-started]: http://prometheus.io/docs/introduction/getting_started/
[prometheus-naming]: http://prometheus.io/docs/practices/naming/

View File

@ -0,0 +1,62 @@
# FreeBSD
Starting with version 0.1.2 both etcd and etcdctl have been ported to FreeBSD and can
be installed either via packages or ports system. Their versions have been recently
updated to 0.2.0 so now you can enjoy using etcd and etcdctl on FreeBSD 10.0 (RC4 as
of now) and 9.x where they have been tested. They might also work when installed from
ports on earlier versions of FreeBSD, but your mileage may vary.
## Installation
### Using pkgng package system
1. If you do not have pkg­ng installed, install it with command `pkg` and answering 'Y'
when asked
2. Update your repository data with `pkg update`
3. Install etcd with `pkg install coreos-etcd coreos-etcdctl`
4. Verify successful installation with `pkg info | grep etcd` and you should get:
```
r@fbsd­10:/ # pkg info | grep etcd
coreos­etcd­0.2.0              Highly­available key value store and service discovery
coreos­etcdctl­0.2.0           Simple commandline client for etcd
r@fbsd­10:/ #
```
5. Youre ready to use etcd and etcdctl! For more information about using pkgng, please
see: http://www.freebsd.org/doc/handbook/pkgng­intro.html
 
### Using ports system
1. If you do not have ports installed, install with with `portsnap fetch extract` (it
may take some time depending on your hardware and network connection)
2. Build etcd with `cd /usr/ports/devel/etcd && make install clean`, you
will get an option to build and install documentation and etcdctl with it.
3. If you haven't installed it with etcdctl, and you would like to install it later, you can build it
with `cd /usr/ports/devel/etcdctl && make install clean`
4. Verify successful installation with `pkg info | grep etcd` and you should get:
 
```
r@fbsd­10:/ # pkg info | grep etcd
coreos­etcd­0.2.0              Highly­available key value store and service discovery
coreos­etcdctl­0.2.0           Simple commandline client for etcd
r@fbsd­10:/ #
```
5. Youre ready to use etcd and etcdctl! For more information about using ports system,
please see: https://www.freebsd.org/doc/handbook/ports­using.html
## Issues
If you find any issues with the build/install procedure or you've found a problem that
you've verified is local to FreeBSD version only (for example, by not being able to
reproduce it on any other platform, like OSX or Linux), please sent a
problem report using this page for more
information: http://www.freebsd.org/send­pr.html

View File

@ -0,0 +1,51 @@
# Production Users
This document tracks people and use cases for etcd in production. By creating a list of production use cases we hope to build a community of advisors that we can reach out to with experience using various etcd applications, operation environments, and cluster sizes. The etcd development team may reach out periodically to check-in on your experience and update this list.
## discovery.etcd.io
- *Application*: https://github.com/coreos/discovery.etcd.io
- *Launched*: Feb. 2014
- *Cluster Size*: 5 members, 5 discovery proxies
- *Order of Data Size*: 100s of Megabytes
- *Operator*: CoreOS, brandon.philips@coreos.com
- *Environment*: AWS
- *Backups*: Periodic async to S3
discovery.etcd.io is the longest continuously running etcd backed service that we know about. It is the basis of automatic cluster bootstrap and was launched in Feb. 2014: https://coreos.com/blog/etcd-0.3.0-released/.
## OpenTable
- *Application*: OpenTable internal service discovery and cluster configuration management
- *Launched*: May 2014
- *Cluster Size*: 3 members each in 6 independent clusters; approximately 50 nodes reading / writing
- *Order of Data Size*: 10s of MB
- *Operator*: OpenTable, Inc; sschlansker@opentable.com
- *Environment*: AWS, VMWare
- *Backups*: None, all data can be re-created if necessary.
## cycoresys.com
- *Application*: multiple
- *Launched*: Jul. 2014
- *Cluster Size*: 3 members, _n_ proxies
- *Order of Data Size*: 100s of kilobytes
- *Operator*: CyCore Systems, Inc, sys@cycoresys.com
- *Environment*: Baremetal
- *Backups*: Periodic sync to Ceph RadosGW and DigitalOcean VM
CyCore Systems provides architecture and engineering for computing systems. This cluster provides microservices, virtual machines, databases, storage clusters to a number of clients. It is built on CoreOS machines, with each machine in the cluster running etcd as a peer or proxy.
## Radius Intelligence
- *Application*: multiple internal tools, Kubernetes clusters, bootstrappable system configs
- *Launched*: June 2015
- *Cluster Size*: 2 clusters of 5 and 3 members; approximately a dozen nodes read/write
- *Order of Data Size*: 100s of kilobytes
- *Operator*: Radius Intelligence; jcderr@radius.com
- *Environment*: AWS, CoreOS, Kubernetes
- *Backups*: None, all data can be recreated if necessary.
Radius Intelligence uses Kubernetes running CoreOS to containerize and scale internal toolsets. Examples include running [JetBrains TeamCity][teamcity] and internal AWS security and cost reporting tools. etcd clusters back these clusters as well as provide some basic environment bootstrapping configuration keys.
[teamcity]: https://www.jetbrains.com/teamcity/

View File

@ -0,0 +1,45 @@
# Reporting Bugs
If you find bugs or documentation mistakes in the etcd project, please let us know by [opening an issue][issue]. We treat bugs and mistakes very seriously and believe no issue is too small. Before creating a bug report, please check that an issue reporting the same problem does not already exist.
To make your bug report accurate and easy to understand, please try to create bug reports that are:
- Specific. Include as much details as possible: which version, what environment, what configuration, etc. You can also attach etcd log (the starting log with etcd configuration is especially important).
- Reproducible. Include the steps to reproduce the problem. We understand some issues might be hard to reproduce, please includes the steps that might lead to the problem. You can also attach the affected etcd data dir and stack strace to the bug report.
- Isolated. Please try to isolate and reproduce the bug with minimum dependencies. It would significantly slow down the speed to fix a bug if too many dependencies are involved in a bug report. Debugging external systems that rely on etcd is out of scope, but we are happy to point you in the right direction or help you interact with etcd in the correct manner.
- Unique. Do not duplicate existing bug report.
- Scoped. One bug per report. Do not follow up with another bug inside one report.
You might also want to read [Elika Etemads article on filing good bug reports][filing-good-bugs] before creating a bug report.
We might ask you for further information to locate a bug. A duplicated bug report will be closed.
## Frequently Asked Questions
### How to get a stack trace
``` bash
$ kill -QUIT $PID
```
### How to get etcd version
``` bash
$ etcd --version
```
### How to get etcd configuration and log when it runs as systemd service etcd2.service
``` bash
$ sudo systemctl cat etcd2
$ sudo journalctl -u etcd2
```
Due to an upstream systemd bug, journald may miss the last few log lines when its process exit. If journalctl tells you that etcd stops without fatal or panic message, you could try `sudo journalctl -f -t etcd2` to get full log.
[etcd-issue]: https://github.com/coreos/etcd/issues/new
[filing-good-bugs]: http://fantasai.inkedblade.net/style/talks/filing-good-bugs/

View File

@ -0,0 +1,211 @@
# Overview
The etcd v3 API is designed to give users a more efficient and cleaner abstraction compared to etcd v2. There are a number of semantic and protocol changes in this new API. For an overview [see Xiang Li's video](https://youtu.be/J5AioGtEPeQ?t=211).
To prove out the design of the v3 API the team has also built [a number of example recipes](https://github.com/coreos/etcd/tree/master/contrib/recipes), there is a [video discussing these recipes too](https://www.youtube.com/watch?v=fj-2RY-3yVU&feature=youtu.be&t=590).
# Design
1. Flatten binary key-value space
2. Keep the event history until compaction
- access to old version of keys
- user controlled history compaction
3. Support range query
- Pagination support with limit argument
- Support consistency guarantee across multiple range queries
4. Replace TTL key with Lease
- more efficient/ low cost keep alive
- a logical group of TTL keys
5. Replace CAS/CAD with multi-object Txn
- MUCH MORE powerful and flexible
6. Support efficient watching with multiple ranges
7. RPC API supports the completed set of APIs.
- more efficient than JSON/HTTP
- additional txn/lease support
8. HTTP API supports a subset of APIs.
- easy for people to try out etcd
- easy for people to write simple etcd application
## Notes
### Request Size Limitation
The max request size is around 1MB. Since etcd replicates requests in a streaming fashion, a very large
request might block other requests for a long time. The use case for etcd is to store small configuration
values, so we prevent user from submitting large requests. This also applies to Txn requests. We might loosen
the size in the future a little bit or make it configurable.
## Protobuf Defined API
[api protobuf][api-protobuf]
[kv protobuf][kv-protobuf]
## Examples
### Put a key (foo=bar)
```
// A put is always successful
Put( PutRequest { key = foo, value = bar } )
PutResponse {
cluster_id = 0x1000,
member_id = 0x1,
revision = 1,
raft_term = 0x1,
}
```
### Get a key (assume we have foo=bar)
```
Get ( RangeRequest { key = foo } )
RangeResponse {
cluster_id = 0x1000,
member_id = 0x1,
revision = 1,
raft_term = 0x1,
kvs = {
{
key = foo,
value = bar,
create_revision = 1,
mod_revision = 1,
version = 1;
},
},
}
```
### Range over a key space (assume we have foo0=bar0… foo100=bar100)
```
Range ( RangeRequest { key = foo, end_key = foo80, limit = 30 } )
RangeResponse {
cluster_id = 0x1000,
member_id = 0x1,
revision = 100,
raft_term = 0x1,
kvs = {
{
key = foo0,
value = bar0,
create_revision = 1,
mod_revision = 1,
version = 1;
},
...,
{
key = foo30,
value = bar30,
create_revision = 30,
mod_revision = 30,
version = 1;
},
},
}
```
### Finish a txn (assume we have foo0=bar0, foo1=bar1)
```
Txn(TxnRequest {
// mod_revision of foo0 is equal to 1, mod_revision of foo1 is greater than 1
compare = {
{compareType = equal, key = foo0, mod_revision = 1},
{compareType = greater, key = foo1, mod_revision = 1}}
},
// if the comparison succeeds, put foo2 = bar2
success = {PutRequest { key = foo2, value = success }},
// if the comparison fails, put foo2=fail
failure = {PutRequest { key = foo2, value = failure }},
)
TxnResponse {
cluster_id = 0x1000,
member_id = 0x1,
revision = 3,
raft_term = 0x1,
succeeded = true,
responses = {
// response of PUT foo2=success
{
cluster_id = 0x1000,
member_id = 0x1,
revision = 3,
raft_term = 0x1,
}
}
}
```
### Watch on a key/range
```
Watch( WatchRequest{
key = foo,
end_key = fop, // prefix foo
start_revision = 20,
end_revision = 10000,
// server decided notification frequency
progress_notification = true,
}
… // this can be a watch request stream
)
// put (foo0=bar0) event at 3
WatchResponse {
cluster_id = 0x1000,
member_id = 0x1,
revision = 3,
raft_term = 0x1,
event_type = put,
kv = {
key = foo0,
value = bar0,
create_revision = 1,
mod_revision = 1,
version = 1;
},
}
// a notification at 2000
WatchResponse {
cluster_id = 0x1000,
member_id = 0x1,
revision = 2000,
raft_term = 0x1,
// nil event as notification
}
// put (foo0=bar3000) event at 3000
WatchResponse {
cluster_id = 0x1000,
member_id = 0x1,
revision = 3000,
raft_term = 0x1,
event_type = put,
kv = {
key = foo0,
value = bar3000,
create_revision = 1,
mod_revision = 3000,
version = 2;
},
}
```
[api-protobuf]: https://github.com/coreos/etcd/blob/master/etcdserver/etcdserverpb/rpc.proto
[kv-protobuf]: https://github.com/coreos/etcd/blob/master/storage/storagepb/kv.proto

View File

@ -0,0 +1,75 @@
# Tuning
The default settings in etcd should work well for installations on a local network where the average network latency is low.
However, when using etcd across multiple data centers or over networks with high latency you may need to tweak the heartbeat interval and election timeout settings.
The network isn't the only source of latency. Each request and response may be impacted by slow disks on both the leader and follower. Each of these timeouts represents the total time from request to successful response from the other machine.
## Time Parameters
The underlying distributed consensus protocol relies on two separate time parameters to ensure that nodes can handoff leadership if one stalls or goes offline.
The first parameter is called the *Heartbeat Interval*.
This is the frequency with which the leader will notify followers that it is still the leader.
For best practices, the parameter should be set around round-trip time between members.
By default, etcd uses a `100ms` heartbeat interval.
The second parameter is the *Election Timeout*.
This timeout is how long a follower node will go without hearing a heartbeat before attempting to become leader itself.
By default, etcd uses a `1000ms` election timeout.
Adjusting these values is a trade off.
The value of heartbeat interval is recommended to be around the maximum of average round-trip time (RTT) between members, normally around 0.5-1.5x the round-trip time.
If heartbeat interval is too low, etcd will send unnecessary messages that increase the usage of CPU and network resources.
On the other side, a too high heartbeat interval leads to high election timeout. Higher election timeout takes longer time to detect a leader failure.
The easiest way to measure round-trip time (RTT) is to use [PING utility][ping].
The election timeout should be set based on the heartbeat interval and average round-trip time between members.
Election timeouts must be at least 10 times the round-trip time so it can account for variance in your network.
For example, if the round-trip time between your members is 10ms then you should have at least a 100ms election timeout.
You should also set your election timeout to at least 5 to 10 times your heartbeat interval to account for variance in leader replication.
For a heartbeat interval of 50ms you should set your election timeout to at least 250ms - 500ms.
The upper limit of election timeout is 50000ms (50s), which should only be used when deploying a globally-distributed etcd cluster.
A reasonable round-trip time for the continental United States is 130ms, and the time between US and Japan is around 350-400ms.
If your network has uneven performance or regular packet delays/loss then it is possible that a couple of retries may be necessary to successfully send a packet. So 5s is a safe upper limit of global round-trip time.
As the election timeout should be an order of magnitude bigger than broadcast time, in the case of ~5s for a globally distributed cluster, then 50 seconds becomes a reasonable maximum.
The heartbeat interval and election timeout value should be the same for all members in one cluster. Setting different values for etcd members may disrupt cluster stability.
You can override the default values on the command line:
```sh
# Command line arguments:
$ etcd -heartbeat-interval=100 -election-timeout=500
# Environment variables:
$ ETCD_HEARTBEAT_INTERVAL=100 ETCD_ELECTION_TIMEOUT=500 etcd
```
The values are specified in milliseconds.
## Snapshots
etcd appends all key changes to a log file.
This log grows forever and is a complete linear history of every change made to the keys.
A complete history works well for lightly used clusters but clusters that are heavily used would carry around a large log.
To avoid having a huge log etcd makes periodic snapshots.
These snapshots provide a way for etcd to compact the log by saving the current state of the system and removing old logs.
### Snapshot Tuning
Creating snapshots can be expensive so they're only created after a given number of changes to etcd.
By default, snapshots will be made after every 10,000 changes.
If etcd's memory usage and disk usage are too high, you can lower the snapshot threshold by setting the following on the command line:
```sh
# Command line arguments:
$ etcd -snapshot-count=5000
# Environment variables:
$ ETCD_SNAPSHOT_COUNT=5000 etcd
```
[ping]: https://en.wikipedia.org/wiki/Ping_(networking_utility)

2
Godeps/_workspace/.gitignore generated vendored
View File

@ -1,2 +0,0 @@
/pkg
/bin

View File

@ -1,33 +0,0 @@
package goautoneg
import (
"testing"
)
var chrome = "application/xml,application/xhtml+xml,text/html;q=0.9,text/plain;q=0.8,image/png,*/*;q=0.5"
func TestParseAccept(t *testing.T) {
alternatives := []string{"text/html", "image/png"}
content_type := Negotiate(chrome, alternatives)
if content_type != "image/png" {
t.Errorf("got %s expected image/png", content_type)
}
alternatives = []string{"text/html", "text/plain", "text/n3"}
content_type = Negotiate(chrome, alternatives)
if content_type != "text/html" {
t.Errorf("got %s expected text/html", content_type)
}
alternatives = []string{"text/n3", "text/plain"}
content_type = Negotiate(chrome, alternatives)
if content_type != "text/plain" {
t.Errorf("got %s expected text/plain", content_type)
}
alternatives = []string{"text/n3", "application/rdf+xml"}
content_type = Negotiate(chrome, alternatives)
if content_type != "text/n3" {
t.Errorf("got %s expected text/n3", content_type)
}
}

View File

@ -1,247 +0,0 @@
package pcap
import (
"bytes"
"testing"
"time"
)
var testSimpleTcpPacket *Packet = &Packet{
Data: []byte{
0x00, 0x00, 0x0c, 0x9f, 0xf0, 0x20, 0xbc, 0x30, 0x5b, 0xe8, 0xd3, 0x49,
0x08, 0x00, 0x45, 0x00, 0x01, 0xa4, 0x39, 0xdf, 0x40, 0x00, 0x40, 0x06,
0x55, 0x5a, 0xac, 0x11, 0x51, 0x49, 0xad, 0xde, 0xfe, 0xe1, 0xc5, 0xf7,
0x00, 0x50, 0xc5, 0x7e, 0x0e, 0x48, 0x49, 0x07, 0x42, 0x32, 0x80, 0x18,
0x00, 0x73, 0xab, 0xb1, 0x00, 0x00, 0x01, 0x01, 0x08, 0x0a, 0x03, 0x77,
0x37, 0x9c, 0x42, 0x77, 0x5e, 0x3a, 0x47, 0x45, 0x54, 0x20, 0x2f, 0x20,
0x48, 0x54, 0x54, 0x50, 0x2f, 0x31, 0x2e, 0x31, 0x0d, 0x0a, 0x48, 0x6f,
0x73, 0x74, 0x3a, 0x20, 0x77, 0x77, 0x77, 0x2e, 0x66, 0x69, 0x73, 0x68,
0x2e, 0x63, 0x6f, 0x6d, 0x0d, 0x0a, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63,
0x74, 0x69, 0x6f, 0x6e, 0x3a, 0x20, 0x6b, 0x65, 0x65, 0x70, 0x2d, 0x61,
0x6c, 0x69, 0x76, 0x65, 0x0d, 0x0a, 0x55, 0x73, 0x65, 0x72, 0x2d, 0x41,
0x67, 0x65, 0x6e, 0x74, 0x3a, 0x20, 0x4d, 0x6f, 0x7a, 0x69, 0x6c, 0x6c,
0x61, 0x2f, 0x35, 0x2e, 0x30, 0x20, 0x28, 0x58, 0x31, 0x31, 0x3b, 0x20,
0x4c, 0x69, 0x6e, 0x75, 0x78, 0x20, 0x78, 0x38, 0x36, 0x5f, 0x36, 0x34,
0x29, 0x20, 0x41, 0x70, 0x70, 0x6c, 0x65, 0x57, 0x65, 0x62, 0x4b, 0x69,
0x74, 0x2f, 0x35, 0x33, 0x35, 0x2e, 0x32, 0x20, 0x28, 0x4b, 0x48, 0x54,
0x4d, 0x4c, 0x2c, 0x20, 0x6c, 0x69, 0x6b, 0x65, 0x20, 0x47, 0x65, 0x63,
0x6b, 0x6f, 0x29, 0x20, 0x43, 0x68, 0x72, 0x6f, 0x6d, 0x65, 0x2f, 0x31,
0x35, 0x2e, 0x30, 0x2e, 0x38, 0x37, 0x34, 0x2e, 0x31, 0x32, 0x31, 0x20,
0x53, 0x61, 0x66, 0x61, 0x72, 0x69, 0x2f, 0x35, 0x33, 0x35, 0x2e, 0x32,
0x0d, 0x0a, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x3a, 0x20, 0x74, 0x65,
0x78, 0x74, 0x2f, 0x68, 0x74, 0x6d, 0x6c, 0x2c, 0x61, 0x70, 0x70, 0x6c,
0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x78, 0x68, 0x74, 0x6d,
0x6c, 0x2b, 0x78, 0x6d, 0x6c, 0x2c, 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63,
0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x78, 0x6d, 0x6c, 0x3b, 0x71, 0x3d,
0x30, 0x2e, 0x39, 0x2c, 0x2a, 0x2f, 0x2a, 0x3b, 0x71, 0x3d, 0x30, 0x2e,
0x38, 0x0d, 0x0a, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x2d, 0x45, 0x6e,
0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x3a, 0x20, 0x67, 0x7a, 0x69, 0x70,
0x2c, 0x64, 0x65, 0x66, 0x6c, 0x61, 0x74, 0x65, 0x2c, 0x73, 0x64, 0x63,
0x68, 0x0d, 0x0a, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x2d, 0x4c, 0x61,
0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x3a, 0x20, 0x65, 0x6e, 0x2d, 0x55,
0x53, 0x2c, 0x65, 0x6e, 0x3b, 0x71, 0x3d, 0x30, 0x2e, 0x38, 0x0d, 0x0a,
0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x2d, 0x43, 0x68, 0x61, 0x72, 0x73,
0x65, 0x74, 0x3a, 0x20, 0x49, 0x53, 0x4f, 0x2d, 0x38, 0x38, 0x35, 0x39,
0x2d, 0x31, 0x2c, 0x75, 0x74, 0x66, 0x2d, 0x38, 0x3b, 0x71, 0x3d, 0x30,
0x2e, 0x37, 0x2c, 0x2a, 0x3b, 0x71, 0x3d, 0x30, 0x2e, 0x33, 0x0d, 0x0a,
0x0d, 0x0a,
}}
func BenchmarkDecodeSimpleTcpPacket(b *testing.B) {
for i := 0; i < b.N; i++ {
testSimpleTcpPacket.Decode()
}
}
func TestDecodeSimpleTcpPacket(t *testing.T) {
p := testSimpleTcpPacket
p.Decode()
if p.DestMac != 0x00000c9ff020 {
t.Error("Dest mac", p.DestMac)
}
if p.SrcMac != 0xbc305be8d349 {
t.Error("Src mac", p.SrcMac)
}
if len(p.Headers) != 2 {
t.Error("Incorrect number of headers", len(p.Headers))
return
}
if ip, ipOk := p.Headers[0].(*Iphdr); ipOk {
if ip.Version != 4 {
t.Error("ip Version", ip.Version)
}
if ip.Ihl != 5 {
t.Error("ip header length", ip.Ihl)
}
if ip.Tos != 0 {
t.Error("ip TOS", ip.Tos)
}
if ip.Length != 420 {
t.Error("ip Length", ip.Length)
}
if ip.Id != 14815 {
t.Error("ip ID", ip.Id)
}
if ip.Flags != 0x02 {
t.Error("ip Flags", ip.Flags)
}
if ip.FragOffset != 0 {
t.Error("ip Fragoffset", ip.FragOffset)
}
if ip.Ttl != 64 {
t.Error("ip TTL", ip.Ttl)
}
if ip.Protocol != 6 {
t.Error("ip Protocol", ip.Protocol)
}
if ip.Checksum != 0x555A {
t.Error("ip Checksum", ip.Checksum)
}
if !bytes.Equal(ip.SrcIp, []byte{172, 17, 81, 73}) {
t.Error("ip Src", ip.SrcIp)
}
if !bytes.Equal(ip.DestIp, []byte{173, 222, 254, 225}) {
t.Error("ip Dest", ip.DestIp)
}
if tcp, tcpOk := p.Headers[1].(*Tcphdr); tcpOk {
if tcp.SrcPort != 50679 {
t.Error("tcp srcport", tcp.SrcPort)
}
if tcp.DestPort != 80 {
t.Error("tcp destport", tcp.DestPort)
}
if tcp.Seq != 0xc57e0e48 {
t.Error("tcp seq", tcp.Seq)
}
if tcp.Ack != 0x49074232 {
t.Error("tcp ack", tcp.Ack)
}
if tcp.DataOffset != 8 {
t.Error("tcp dataoffset", tcp.DataOffset)
}
if tcp.Flags != 0x18 {
t.Error("tcp flags", tcp.Flags)
}
if tcp.Window != 0x73 {
t.Error("tcp window", tcp.Window)
}
if tcp.Checksum != 0xabb1 {
t.Error("tcp checksum", tcp.Checksum)
}
if tcp.Urgent != 0 {
t.Error("tcp urgent", tcp.Urgent)
}
} else {
t.Error("Second header is not TCP header")
}
} else {
t.Error("First header is not IP header")
}
if string(p.Payload) != "GET / HTTP/1.1\r\nHost: www.fish.com\r\nConnection: keep-alive\r\nUser-Agent: Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.2 (KHTML, like Gecko) Chrome/15.0.874.121 Safari/535.2\r\nAccept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\r\nAccept-Encoding: gzip,deflate,sdch\r\nAccept-Language: en-US,en;q=0.8\r\nAccept-Charset: ISO-8859-1,utf-8;q=0.7,*;q=0.3\r\n\r\n" {
t.Error("--- PAYLOAD STRING ---\n", string(p.Payload), "\n--- PAYLOAD BYTES ---\n", p.Payload)
}
}
// Makes sure packet payload doesn't display the 6 trailing null of this packet
// as part of the payload. They're actually the ethernet trailer.
func TestDecodeSmallTcpPacketHasEmptyPayload(t *testing.T) {
p := &Packet{
// This packet is only 54 bits (an empty TCP RST), thus 6 trailing null
// bytes are added by the ethernet layer to make it the minimum packet size.
Data: []byte{
0xbc, 0x30, 0x5b, 0xe8, 0xd3, 0x49, 0xb8, 0xac, 0x6f, 0x92, 0xd5, 0xbf,
0x08, 0x00, 0x45, 0x00, 0x00, 0x28, 0x00, 0x00, 0x40, 0x00, 0x40, 0x06,
0x3f, 0x9f, 0xac, 0x11, 0x51, 0xc5, 0xac, 0x11, 0x51, 0x49, 0x00, 0x63,
0x9a, 0xef, 0x00, 0x00, 0x00, 0x00, 0x2e, 0xc1, 0x27, 0x83, 0x50, 0x14,
0x00, 0x00, 0xc3, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
}}
p.Decode()
if p.Payload == nil {
t.Error("Nil payload")
}
if len(p.Payload) != 0 {
t.Error("Non-empty payload:", p.Payload)
}
}
func TestDecodeVlanPacket(t *testing.T) {
p := &Packet{
Data: []byte{
0x00, 0x10, 0xdb, 0xff, 0x10, 0x00, 0x00, 0x15, 0x2c, 0x9d, 0xcc, 0x00, 0x81, 0x00, 0x01, 0xf7,
0x08, 0x00, 0x45, 0x00, 0x00, 0x28, 0x29, 0x8d, 0x40, 0x00, 0x7d, 0x06, 0x83, 0xa0, 0xac, 0x1b,
0xca, 0x8e, 0x45, 0x16, 0x94, 0xe2, 0xd4, 0x0a, 0x00, 0x50, 0xdf, 0xab, 0x9c, 0xc6, 0xcd, 0x1e,
0xe5, 0xd1, 0x50, 0x10, 0x01, 0x00, 0x5a, 0x74, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
}}
p.Decode()
if p.Type != TYPE_VLAN {
t.Error("Didn't detect vlan")
}
if len(p.Headers) != 3 {
t.Error("Incorrect number of headers:", len(p.Headers))
for i, h := range p.Headers {
t.Errorf("Header %d: %#v", i, h)
}
t.FailNow()
}
if _, ok := p.Headers[0].(*Vlanhdr); !ok {
t.Errorf("First header isn't vlan: %q", p.Headers[0])
}
if _, ok := p.Headers[1].(*Iphdr); !ok {
t.Errorf("Second header isn't IP: %q", p.Headers[1])
}
if _, ok := p.Headers[2].(*Tcphdr); !ok {
t.Errorf("Third header isn't TCP: %q", p.Headers[2])
}
}
func TestDecodeFuzzFallout(t *testing.T) {
testData := []struct {
Data []byte
}{
{[]byte("000000000000\x81\x000")},
{[]byte("000000000000\x81\x00000")},
{[]byte("000000000000\x86\xdd0")},
{[]byte("000000000000\b\x000")},
{[]byte("000000000000\b\x060")},
{[]byte{}},
{[]byte("000000000000\b\x0600000000")},
{[]byte("000000000000\x86\xdd000000\x01000000000000000000000000000000000")},
{[]byte("000000000000\x81\x0000\b\x0600000000")},
{[]byte("000000000000\b\x00n0000000000000000000")},
{[]byte("000000000000\x86\xdd000000\x0100000000000000000000000000000000000")},
{[]byte("000000000000\x81\x0000\b\x00g0000000000000000000")},
//{[]byte()},
{[]byte("000000000000\b\x00400000000\x110000000000")},
{[]byte("0nMء\xfe\x13\x13\x81\x00gr\b\x00&x\xc9\xe5b'\x1e0\x00\x04\x00\x0020596224")},
{[]byte("000000000000\x81\x0000\b\x00400000000\x110000000000")},
{[]byte("000000000000\b\x00000000000\x0600\xff0000000")},
{[]byte("000000000000\x86\xdd000000\x06000000000000000000000000000000000")},
{[]byte("000000000000\x81\x0000\b\x00000000000\x0600b0000000")},
{[]byte("000000000000\x81\x0000\b\x00400000000\x060000000000")},
{[]byte("000000000000\x86\xdd000000\x11000000000000000000000000000000000")},
{[]byte("000000000000\x86\xdd000000\x0600000000000000000000000000000000000000000000M")},
{[]byte("000000000000\b\x00500000000\x0600000000000")},
{[]byte("0nM\xd80\xfe\x13\x13\x81\x00gr\b\x00&x\xc9\xe5b'\x1e0\x00\x04\x00\x0020596224")},
}
for _, entry := range testData {
pkt := &Packet{
Time: time.Now(),
Caplen: uint32(len(entry.Data)),
Len: uint32(len(entry.Data)),
Data: entry.Data,
}
pkt.Decode()
/*
func() {
defer func() {
if err := recover(); err != nil {
t.Fatalf("%d. %q failed: %v", idx, string(entry.Data), err)
}
}()
pkt.Decode()
}()
*/
}
}

View File

@ -1,49 +0,0 @@
package main
import (
"flag"
"fmt"
"os"
"runtime/pprof"
"time"
"github.com/coreos/etcd/Godeps/_workspace/src/github.com/akrennmair/gopcap"
)
func main() {
var filename *string = flag.String("file", "", "filename")
var decode *bool = flag.Bool("d", false, "If true, decode each packet")
var cpuprofile *string = flag.String("cpuprofile", "", "filename")
flag.Parse()
h, err := pcap.Openoffline(*filename)
if err != nil {
fmt.Printf("Couldn't create pcap reader: %v", err)
}
if *cpuprofile != "" {
if out, err := os.Create(*cpuprofile); err == nil {
pprof.StartCPUProfile(out)
defer func() {
pprof.StopCPUProfile()
out.Close()
}()
} else {
panic(err)
}
}
i, nilPackets := 0, 0
start := time.Now()
for pkt, code := h.NextEx(); code != -2; pkt, code = h.NextEx() {
if pkt == nil {
nilPackets++
} else if *decode {
pkt.Decode()
}
i++
}
duration := time.Since(start)
fmt.Printf("Took %v to process %v packets, %v per packet, %d nil packets\n", duration, i, duration/time.Duration(i), nilPackets)
}

View File

@ -1,96 +0,0 @@
package main
// Parses a pcap file, writes it back to disk, then verifies the files
// are the same.
import (
"bufio"
"flag"
"fmt"
"io"
"os"
"github.com/coreos/etcd/Godeps/_workspace/src/github.com/akrennmair/gopcap"
)
var input *string = flag.String("input", "", "input file")
var output *string = flag.String("output", "", "output file")
var decode *bool = flag.Bool("decode", false, "print decoded packets")
func copyPcap(dest, src string) {
f, err := os.Open(src)
if err != nil {
fmt.Printf("couldn't open %q: %v\n", src, err)
return
}
defer f.Close()
reader, err := pcap.NewReader(bufio.NewReader(f))
if err != nil {
fmt.Printf("couldn't create reader: %v\n", err)
return
}
w, err := os.Create(dest)
if err != nil {
fmt.Printf("couldn't open %q: %v\n", dest, err)
return
}
defer w.Close()
buf := bufio.NewWriter(w)
writer, err := pcap.NewWriter(buf, &reader.Header)
if err != nil {
fmt.Printf("couldn't create writer: %v\n", err)
return
}
for {
pkt := reader.Next()
if pkt == nil {
break
}
if *decode {
pkt.Decode()
fmt.Println(pkt.String())
}
writer.Write(pkt)
}
buf.Flush()
}
func check(dest, src string) {
f, err := os.Open(src)
if err != nil {
fmt.Printf("couldn't open %q: %v\n", src, err)
return
}
defer f.Close()
freader := bufio.NewReader(f)
g, err := os.Open(dest)
if err != nil {
fmt.Printf("couldn't open %q: %v\n", src, err)
return
}
defer g.Close()
greader := bufio.NewReader(g)
for {
fb, ferr := freader.ReadByte()
gb, gerr := greader.ReadByte()
if ferr == io.EOF && gerr == io.EOF {
break
}
if fb == gb {
continue
}
fmt.Println("FAIL")
return
}
fmt.Println("PASS")
}
func main() {
flag.Parse()
copyPcap(*output, *input)
check(*output, *input)
}

View File

@ -1,82 +0,0 @@
package main
import (
"flag"
"fmt"
"time"
"github.com/coreos/etcd/Godeps/_workspace/src/github.com/akrennmair/gopcap"
)
func min(x uint32, y uint32) uint32 {
if x < y {
return x
}
return y
}
func main() {
var device *string = flag.String("d", "", "device")
var file *string = flag.String("r", "", "file")
var expr *string = flag.String("e", "", "filter expression")
flag.Parse()
var h *pcap.Pcap
var err error
ifs, err := pcap.Findalldevs()
if len(ifs) == 0 {
fmt.Printf("Warning: no devices found : %s\n", err)
} else {
for i := 0; i < len(ifs); i++ {
fmt.Printf("dev %d: %s (%s)\n", i+1, ifs[i].Name, ifs[i].Description)
}
}
if *device != "" {
h, err = pcap.Openlive(*device, 65535, true, 0)
if h == nil {
fmt.Printf("Openlive(%s) failed: %s\n", *device, err)
return
}
} else if *file != "" {
h, err = pcap.Openoffline(*file)
if h == nil {
fmt.Printf("Openoffline(%s) failed: %s\n", *file, err)
return
}
} else {
fmt.Printf("usage: pcaptest [-d <device> | -r <file>]\n")
return
}
defer h.Close()
fmt.Printf("pcap version: %s\n", pcap.Version())
if *expr != "" {
fmt.Printf("Setting filter: %s\n", *expr)
err := h.Setfilter(*expr)
if err != nil {
fmt.Printf("Warning: setting filter failed: %s\n", err)
}
}
for pkt := h.Next(); pkt != nil; pkt = h.Next() {
fmt.Printf("time: %d.%06d (%s) caplen: %d len: %d\nData:",
int64(pkt.Time.Second()), int64(pkt.Time.Nanosecond()),
time.Unix(int64(pkt.Time.Second()), 0).String(), int64(pkt.Caplen), int64(pkt.Len))
for i := uint32(0); i < pkt.Caplen; i++ {
if i%32 == 0 {
fmt.Printf("\n")
}
if 32 <= pkt.Data[i] && pkt.Data[i] <= 126 {
fmt.Printf("%c", pkt.Data[i])
} else {
fmt.Printf(".")
}
}
fmt.Printf("\n\n")
}
}

View File

@ -1,121 +0,0 @@
package main
import (
"bufio"
"flag"
"fmt"
"os"
"github.com/coreos/etcd/Godeps/_workspace/src/github.com/akrennmair/gopcap"
)
const (
TYPE_IP = 0x0800
TYPE_ARP = 0x0806
TYPE_IP6 = 0x86DD
IP_ICMP = 1
IP_INIP = 4
IP_TCP = 6
IP_UDP = 17
)
var out *bufio.Writer
var errout *bufio.Writer
func main() {
var device *string = flag.String("i", "", "interface")
var snaplen *int = flag.Int("s", 65535, "snaplen")
var hexdump *bool = flag.Bool("X", false, "hexdump")
expr := ""
out = bufio.NewWriter(os.Stdout)
errout = bufio.NewWriter(os.Stderr)
flag.Usage = func() {
fmt.Fprintf(errout, "usage: %s [ -i interface ] [ -s snaplen ] [ -X ] [ expression ]\n", os.Args[0])
errout.Flush()
os.Exit(1)
}
flag.Parse()
if len(flag.Args()) > 0 {
expr = flag.Arg(0)
}
if *device == "" {
devs, err := pcap.Findalldevs()
if err != nil {
fmt.Fprintf(errout, "tcpdump: couldn't find any devices: %s\n", err)
}
if 0 == len(devs) {
flag.Usage()
}
*device = devs[0].Name
}
h, err := pcap.Openlive(*device, int32(*snaplen), true, 0)
if h == nil {
fmt.Fprintf(errout, "tcpdump: %s\n", err)
errout.Flush()
return
}
defer h.Close()
if expr != "" {
ferr := h.Setfilter(expr)
if ferr != nil {
fmt.Fprintf(out, "tcpdump: %s\n", ferr)
out.Flush()
}
}
for pkt := h.Next(); pkt != nil; pkt = h.Next() {
pkt.Decode()
fmt.Fprintf(out, "%s\n", pkt.String())
if *hexdump {
Hexdump(pkt)
}
out.Flush()
}
}
func min(a, b int) int {
if a < b {
return a
}
return b
}
func Hexdump(pkt *pcap.Packet) {
for i := 0; i < len(pkt.Data); i += 16 {
Dumpline(uint32(i), pkt.Data[i:min(i+16, len(pkt.Data))])
}
}
func Dumpline(addr uint32, line []byte) {
fmt.Fprintf(out, "\t0x%04x: ", int32(addr))
var i uint16
for i = 0; i < 16 && i < uint16(len(line)); i++ {
if i%2 == 0 {
out.WriteString(" ")
}
fmt.Fprintf(out, "%02x", line[i])
}
for j := i; j <= 16; j++ {
if j%2 == 0 {
out.WriteString(" ")
}
out.WriteString(" ")
}
out.WriteString(" ")
for i = 0; i < 16 && i < uint16(len(line)); i++ {
if line[i] >= 32 && line[i] <= 126 {
fmt.Fprintf(out, "%c", line[i])
} else {
out.WriteString(".")
}
}
out.WriteString("\n")
}

View File

@ -1,63 +0,0 @@
package quantile
import (
"testing"
)
func BenchmarkInsertTargeted(b *testing.B) {
b.ReportAllocs()
s := NewTargeted(Targets)
b.ResetTimer()
for i := float64(0); i < float64(b.N); i++ {
s.Insert(i)
}
}
func BenchmarkInsertTargetedSmallEpsilon(b *testing.B) {
s := NewTargeted(TargetsSmallEpsilon)
b.ResetTimer()
for i := float64(0); i < float64(b.N); i++ {
s.Insert(i)
}
}
func BenchmarkInsertBiased(b *testing.B) {
s := NewLowBiased(0.01)
b.ResetTimer()
for i := float64(0); i < float64(b.N); i++ {
s.Insert(i)
}
}
func BenchmarkInsertBiasedSmallEpsilon(b *testing.B) {
s := NewLowBiased(0.0001)
b.ResetTimer()
for i := float64(0); i < float64(b.N); i++ {
s.Insert(i)
}
}
func BenchmarkQuery(b *testing.B) {
s := NewTargeted(Targets)
for i := float64(0); i < 1e6; i++ {
s.Insert(i)
}
b.ResetTimer()
n := float64(b.N)
for i := float64(0); i < n; i++ {
s.Query(i / n)
}
}
func BenchmarkQuerySmallEpsilon(b *testing.B) {
s := NewTargeted(TargetsSmallEpsilon)
for i := float64(0); i < 1e6; i++ {
s.Insert(i)
}
b.ResetTimer()
n := float64(b.N)
for i := float64(0); i < n; i++ {
s.Query(i / n)
}
}

View File

@ -1,121 +0,0 @@
// +build go1.1
package quantile_test
import (
"bufio"
"fmt"
"log"
"os"
"strconv"
"time"
"github.com/coreos/etcd/Godeps/_workspace/src/github.com/beorn7/perks/quantile"
)
func Example_simple() {
ch := make(chan float64)
go sendFloats(ch)
// Compute the 50th, 90th, and 99th percentile.
q := quantile.NewTargeted(map[float64]float64{
0.50: 0.005,
0.90: 0.001,
0.99: 0.0001,
})
for v := range ch {
q.Insert(v)
}
fmt.Println("perc50:", q.Query(0.50))
fmt.Println("perc90:", q.Query(0.90))
fmt.Println("perc99:", q.Query(0.99))
fmt.Println("count:", q.Count())
// Output:
// perc50: 5
// perc90: 16
// perc99: 223
// count: 2388
}
func Example_mergeMultipleStreams() {
// Scenario:
// We have multiple database shards. On each shard, there is a process
// collecting query response times from the database logs and inserting
// them into a Stream (created via NewTargeted(0.90)), much like the
// Simple example. These processes expose a network interface for us to
// ask them to serialize and send us the results of their
// Stream.Samples so we may Merge and Query them.
//
// NOTES:
// * These sample sets are small, allowing us to get them
// across the network much faster than sending the entire list of data
// points.
//
// * For this to work correctly, we must supply the same quantiles
// a priori the process collecting the samples supplied to NewTargeted,
// even if we do not plan to query them all here.
ch := make(chan quantile.Samples)
getDBQuerySamples(ch)
q := quantile.NewTargeted(map[float64]float64{0.90: 0.001})
for samples := range ch {
q.Merge(samples)
}
fmt.Println("perc90:", q.Query(0.90))
}
func Example_window() {
// Scenario: We want the 90th, 95th, and 99th percentiles for each
// minute.
ch := make(chan float64)
go sendStreamValues(ch)
tick := time.NewTicker(1 * time.Minute)
q := quantile.NewTargeted(map[float64]float64{
0.90: 0.001,
0.95: 0.0005,
0.99: 0.0001,
})
for {
select {
case t := <-tick.C:
flushToDB(t, q.Samples())
q.Reset()
case v := <-ch:
q.Insert(v)
}
}
}
func sendStreamValues(ch chan float64) {
// Use your imagination
}
func flushToDB(t time.Time, samples quantile.Samples) {
// Use your imagination
}
// This is a stub for the above example. In reality this would hit the remote
// servers via http or something like it.
func getDBQuerySamples(ch chan quantile.Samples) {}
func sendFloats(ch chan<- float64) {
f, err := os.Open("exampledata.txt")
if err != nil {
log.Fatal(err)
}
sc := bufio.NewScanner(f)
for sc.Scan() {
b := sc.Bytes()
v, err := strconv.ParseFloat(string(b), 64)
if err != nil {
log.Fatal(err)
}
ch <- v
}
if sc.Err() != nil {
log.Fatal(sc.Err())
}
close(ch)
}

View File

@ -1,188 +0,0 @@
package quantile
import (
"math"
"math/rand"
"sort"
"testing"
)
var (
Targets = map[float64]float64{
0.01: 0.001,
0.10: 0.01,
0.50: 0.05,
0.90: 0.01,
0.99: 0.001,
}
TargetsSmallEpsilon = map[float64]float64{
0.01: 0.0001,
0.10: 0.001,
0.50: 0.005,
0.90: 0.001,
0.99: 0.0001,
}
LowQuantiles = []float64{0.01, 0.1, 0.5}
HighQuantiles = []float64{0.99, 0.9, 0.5}
)
const RelativeEpsilon = 0.01
func verifyPercsWithAbsoluteEpsilon(t *testing.T, a []float64, s *Stream) {
sort.Float64s(a)
for quantile, epsilon := range Targets {
n := float64(len(a))
k := int(quantile * n)
lower := int((quantile - epsilon) * n)
if lower < 1 {
lower = 1
}
upper := int(math.Ceil((quantile + epsilon) * n))
if upper > len(a) {
upper = len(a)
}
w, min, max := a[k-1], a[lower-1], a[upper-1]
if g := s.Query(quantile); g < min || g > max {
t.Errorf("q=%f: want %v [%f,%f], got %v", quantile, w, min, max, g)
}
}
}
func verifyLowPercsWithRelativeEpsilon(t *testing.T, a []float64, s *Stream) {
sort.Float64s(a)
for _, qu := range LowQuantiles {
n := float64(len(a))
k := int(qu * n)
lowerRank := int((1 - RelativeEpsilon) * qu * n)
upperRank := int(math.Ceil((1 + RelativeEpsilon) * qu * n))
w, min, max := a[k-1], a[lowerRank-1], a[upperRank-1]
if g := s.Query(qu); g < min || g > max {
t.Errorf("q=%f: want %v [%f,%f], got %v", qu, w, min, max, g)
}
}
}
func verifyHighPercsWithRelativeEpsilon(t *testing.T, a []float64, s *Stream) {
sort.Float64s(a)
for _, qu := range HighQuantiles {
n := float64(len(a))
k := int(qu * n)
lowerRank := int((1 - (1+RelativeEpsilon)*(1-qu)) * n)
upperRank := int(math.Ceil((1 - (1-RelativeEpsilon)*(1-qu)) * n))
w, min, max := a[k-1], a[lowerRank-1], a[upperRank-1]
if g := s.Query(qu); g < min || g > max {
t.Errorf("q=%f: want %v [%f,%f], got %v", qu, w, min, max, g)
}
}
}
func populateStream(s *Stream) []float64 {
a := make([]float64, 0, 1e5+100)
for i := 0; i < cap(a); i++ {
v := rand.NormFloat64()
// Add 5% asymmetric outliers.
if i%20 == 0 {
v = v*v + 1
}
s.Insert(v)
a = append(a, v)
}
return a
}
func TestTargetedQuery(t *testing.T) {
rand.Seed(42)
s := NewTargeted(Targets)
a := populateStream(s)
verifyPercsWithAbsoluteEpsilon(t, a, s)
}
func TestLowBiasedQuery(t *testing.T) {
rand.Seed(42)
s := NewLowBiased(RelativeEpsilon)
a := populateStream(s)
verifyLowPercsWithRelativeEpsilon(t, a, s)
}
func TestHighBiasedQuery(t *testing.T) {
rand.Seed(42)
s := NewHighBiased(RelativeEpsilon)
a := populateStream(s)
verifyHighPercsWithRelativeEpsilon(t, a, s)
}
// BrokenTestTargetedMerge is broken, see Merge doc comment.
func BrokenTestTargetedMerge(t *testing.T) {
rand.Seed(42)
s1 := NewTargeted(Targets)
s2 := NewTargeted(Targets)
a := populateStream(s1)
a = append(a, populateStream(s2)...)
s1.Merge(s2.Samples())
verifyPercsWithAbsoluteEpsilon(t, a, s1)
}
// BrokenTestLowBiasedMerge is broken, see Merge doc comment.
func BrokenTestLowBiasedMerge(t *testing.T) {
rand.Seed(42)
s1 := NewLowBiased(RelativeEpsilon)
s2 := NewLowBiased(RelativeEpsilon)
a := populateStream(s1)
a = append(a, populateStream(s2)...)
s1.Merge(s2.Samples())
verifyLowPercsWithRelativeEpsilon(t, a, s2)
}
// BrokenTestHighBiasedMerge is broken, see Merge doc comment.
func BrokenTestHighBiasedMerge(t *testing.T) {
rand.Seed(42)
s1 := NewHighBiased(RelativeEpsilon)
s2 := NewHighBiased(RelativeEpsilon)
a := populateStream(s1)
a = append(a, populateStream(s2)...)
s1.Merge(s2.Samples())
verifyHighPercsWithRelativeEpsilon(t, a, s2)
}
func TestUncompressed(t *testing.T) {
q := NewTargeted(Targets)
for i := 100; i > 0; i-- {
q.Insert(float64(i))
}
if g := q.Count(); g != 100 {
t.Errorf("want count 100, got %d", g)
}
// Before compression, Query should have 100% accuracy.
for quantile := range Targets {
w := quantile * 100
if g := q.Query(quantile); g != w {
t.Errorf("want %f, got %f", w, g)
}
}
}
func TestUncompressedSamples(t *testing.T) {
q := NewTargeted(map[float64]float64{0.99: 0.001})
for i := 1; i <= 100; i++ {
q.Insert(float64(i))
}
if g := q.Samples().Len(); g != 100 {
t.Errorf("want count 100, got %d", g)
}
}
func TestUncompressedOne(t *testing.T) {
q := NewTargeted(map[float64]float64{0.99: 0.01})
q.Insert(3.14)
if g := q.Query(0.90); g != 3.14 {
t.Error("want PI, got", g)
}
}
func TestDefaults(t *testing.T) {
if g := NewTargeted(map[float64]float64{0.99: 0.001}).Query(0.99); g != 0 {
t.Errorf("want 0, got %f", g)
}
}

View File

@ -1,18 +0,0 @@
package main
import (
"fmt"
"os"
"github.com/coreos/etcd/Godeps/_workspace/src/github.com/bgentry/speakeasy"
)
func main() {
password, err := speakeasy.Ask("Please enter a password: ")
if err != nil {
fmt.Println(err)
os.Exit(1)
}
fmt.Printf("Password result: %q\n", password)
fmt.Printf("Password len: %d\n", len(password))
}

File diff suppressed because it is too large Load Diff

View File

@ -1,98 +0,0 @@
## Terminal progress bar for Go
Simple progress bar for console programms.
### Installation
```
go get github.com/cheggaaa/pb
```
### Usage
```Go
package main
import (
"github.com/cheggaaa/pb"
"time"
)
func main() {
count := 100000
bar := pb.StartNew(count)
for i := 0; i < count; i++ {
bar.Increment()
time.Sleep(time.Millisecond)
}
bar.FinishPrint("The End!")
}
```
Result will be like this:
```
> go run test.go
37158 / 100000 [================>_______________________________] 37.16% 1m11s
```
More functions?
```Go
// create bar
bar := pb.New(count)
// refresh info every second (default 200ms)
bar.SetRefreshRate(time.Second)
// show percents (by default already true)
bar.ShowPercent = true
// show bar (by default already true)
bar.ShowBar = true
// no need counters
bar.ShowCounters = false
// show "time left"
bar.ShowTimeLeft = true
// show average speed
bar.ShowSpeed = true
// sets the width of the progress bar
bar.SetWidth(80)
// sets the width of the progress bar, but if terminal size smaller will be ignored
bar.SetMaxWidth(80)
// convert output to readable format (like KB, MB)
bar.SetUnits(pb.U_BYTES)
// and start
bar.Start()
```
Want handle progress of io operations?
```Go
// create and start bar
bar := pb.New(myDataLen).SetUnits(pb.U_BYTES)
bar.Start()
// my io.Reader
r := myReader
// my io.Writer
w := myWriter
// create multi writer
writer := io.MultiWriter(w, bar)
// and copy
io.Copy(writer, r)
// show example/copy/copy.go for advanced example
```
Not like the looks?
```Go
bar.Format("<.- >")
```

View File

@ -1,81 +0,0 @@
package main
import (
"fmt"
"github.com/coreos/etcd/Godeps/_workspace/src/github.com/cheggaaa/pb"
"io"
"net/http"
"os"
"strconv"
"strings"
"time"
)
func main() {
// check args
if len(os.Args) < 3 {
printUsage()
return
}
sourceName, destName := os.Args[1], os.Args[2]
// check source
var source io.Reader
var sourceSize int64
if strings.HasPrefix(sourceName, "http://") {
// open as url
resp, err := http.Get(sourceName)
if err != nil {
fmt.Printf("Can't get %s: %v\n", sourceName, err)
return
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
fmt.Printf("Server return non-200 status: %v\n", resp.Status)
return
}
i, _ := strconv.Atoi(resp.Header.Get("Content-Length"))
sourceSize = int64(i)
source = resp.Body
} else {
// open as file
s, err := os.Open(sourceName)
if err != nil {
fmt.Printf("Can't open %s: %v\n", sourceName, err)
return
}
defer s.Close()
// get source size
sourceStat, err := s.Stat()
if err != nil {
fmt.Printf("Can't stat %s: %v\n", sourceName, err)
return
}
sourceSize = sourceStat.Size()
source = s
}
// create dest
dest, err := os.Create(destName)
if err != nil {
fmt.Printf("Can't create %s: %v\n", destName, err)
return
}
defer dest.Close()
// create bar
bar := pb.New(int(sourceSize)).SetUnits(pb.U_BYTES).SetRefreshRate(time.Millisecond * 10)
bar.ShowSpeed = true
bar.Start()
// create multi writer
writer := io.MultiWriter(dest, bar)
// and copy
io.Copy(writer, source)
bar.Finish()
}
func printUsage() {
fmt.Println("copy [source file or url] [dest file]")
}

View File

@ -1,30 +0,0 @@
package main
import (
"github.com/coreos/etcd/Godeps/_workspace/src/github.com/cheggaaa/pb"
"time"
)
func main() {
count := 5000
bar := pb.New(count)
// show percents (by default already true)
bar.ShowPercent = true
// show bar (by default already true)
bar.ShowBar = true
// no need counters
bar.ShowCounters = true
bar.ShowTimeLeft = true
// and start
bar.Start()
for i := 0; i < count; i++ {
bar.Increment()
time.Sleep(time.Millisecond)
}
bar.FinishPrint("The End!")
}

View File

@ -1,45 +0,0 @@
package pb
import (
"fmt"
"strconv"
"strings"
)
type Units int
const (
// By default, without type handle
U_NO Units = iota
// Handle as b, Kb, Mb, etc
U_BYTES
)
// Format integer
func Format(i int64, units Units) string {
switch units {
case U_BYTES:
return FormatBytes(i)
default:
// by default just convert to string
return strconv.FormatInt(i, 10)
}
}
// Convert bytes to human readable string. Like a 2 MB, 64.2 KB, 52 B
func FormatBytes(i int64) (result string) {
switch {
case i > (1024 * 1024 * 1024 * 1024):
result = fmt.Sprintf("%.02f TB", float64(i)/1024/1024/1024/1024)
case i > (1024 * 1024 * 1024):
result = fmt.Sprintf("%.02f GB", float64(i)/1024/1024/1024)
case i > (1024 * 1024):
result = fmt.Sprintf("%.02f MB", float64(i)/1024/1024)
case i > 1024:
result = fmt.Sprintf("%.02f KB", float64(i)/1024)
default:
result = fmt.Sprintf("%d B", i)
}
result = strings.Trim(result, " ")
return
}

View File

@ -1,37 +0,0 @@
package pb
import (
"fmt"
"strconv"
"testing"
)
func Test_DefaultsToInteger(t *testing.T) {
value := int64(1000)
expected := strconv.Itoa(int(value))
actual := Format(value, -1)
if actual != expected {
t.Error(fmt.Sprintf("Expected {%s} was {%s}", expected, actual))
}
}
func Test_CanFormatAsInteger(t *testing.T) {
value := int64(1000)
expected := strconv.Itoa(int(value))
actual := Format(value, U_NO)
if actual != expected {
t.Error(fmt.Sprintf("Expected {%s} was {%s}", expected, actual))
}
}
func Test_CanFormatAsBytes(t *testing.T) {
value := int64(1000)
expected := "1000 B"
actual := Format(value, U_BYTES)
if actual != expected {
t.Error(fmt.Sprintf("Expected {%s} was {%s}", expected, actual))
}
}

View File

@ -1,7 +0,0 @@
// +build linux darwin freebsd netbsd openbsd
package pb
import "syscall"
const sys_ioctl = syscall.SYS_IOCTL

View File

@ -1,5 +0,0 @@
// +build solaris
package pb
const sys_ioctl = 54

View File

@ -1,37 +0,0 @@
package pb
import (
"testing"
)
func Test_IncrementAddsOne(t *testing.T) {
count := 5000
bar := New(count)
expected := 1
actual := bar.Increment()
if actual != expected {
t.Errorf("Expected {%d} was {%d}", expected, actual)
}
}
func Test_Width(t *testing.T) {
count := 5000
bar := New(count)
width := 100
bar.SetWidth(100).Callback = func(out string) {
if len(out) != width {
t.Errorf("Bar width expected {%d} was {%d}", len(out), width)
}
}
bar.Start()
bar.Increment()
bar.Finish()
}
func Test_MultipleFinish(t *testing.T) {
bar := New(5000)
bar.Add(2000)
bar.Finish()
bar.Finish()
}

View File

@ -1,16 +0,0 @@
// +build windows
package pb
import (
"github.com/coreos/etcd/Godeps/_workspace/src/github.com/olekukonko/ts"
)
func bold(str string) string {
return str
}
func terminalWidth() (int, error) {
size, err := ts.GetSize()
return size.Col(), err
}

View File

@ -1,46 +0,0 @@
// +build linux darwin freebsd netbsd openbsd solaris
package pb
import (
"os"
"runtime"
"syscall"
"unsafe"
)
const (
TIOCGWINSZ = 0x5413
TIOCGWINSZ_OSX = 1074295912
)
var tty *os.File
func init() {
var err error
tty, err = os.Open("/dev/tty")
if err != nil {
tty = os.Stdin
}
}
func bold(str string) string {
return "\033[1m" + str + "\033[0m"
}
func terminalWidth() (int, error) {
w := new(window)
tio := syscall.TIOCGWINSZ
if runtime.GOOS == "darwin" {
tio = TIOCGWINSZ_OSX
}
res, _, err := syscall.Syscall(sys_ioctl,
tty.Fd(),
uintptr(tio),
uintptr(unsafe.Pointer(w)),
)
if int(res) == -1 {
return 0, err
}
return int(w.Col), nil
}

View File

@ -1,14 +0,0 @@
#! /bin/bash
: ${PROG:=$(basename ${BASH_SOURCE})}
_cli_bash_autocomplete() {
local cur opts base
COMPREPLY=()
cur="${COMP_WORDS[COMP_CWORD]}"
opts=$( ${COMP_WORDS[@]:0:$COMP_CWORD} --generate-bash-completion )
COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) )
return 0
}
complete -F _cli_bash_autocomplete $PROG

View File

@ -1,5 +0,0 @@
autoload -U compinit && compinit
autoload -U bashcompinit && bashcompinit
script_dir=$(dirname $0)
source ${script_dir}/bash_autocomplete

View File

@ -1,64 +0,0 @@
# Gexpect
Gexpect is a pure golang expect-like module.
It makes it simpler to create and control other terminal applications.
child, err := gexpect.Spawn("python")
if err != nil {
panic(err)
}
child.Expect(">>>")
child.SendLine("print 'Hello World'")
child.Interact()
child.Close()
## Examples
`Spawn` handles the argument parsing from a string
child.Spawn("/bin/sh -c 'echo \"my complicated command\" | tee log | cat > log2'")
child.ReadLine() // ReadLine() (string, error)
child.ReadUntil(' ') // ReadUntil(delim byte) ([]byte, error)
`ReadLine`, `ReadUntil` and `SendLine` send strings from/to `stdout/stdin` respectively
child := gexpect.Spawn("cat")
child.SendLine("echoing process_stdin") // SendLine(command string) (error)
msg, _ := child.ReadLine() // msg = echoing process_stdin
`Wait` and `Close` allow for graceful and ungraceful termination.
child.Wait() // Waits until the child terminates naturally.
child.Close() // Sends a kill command
`AsyncInteractChannels` spawns two go routines to pipe into and from `stdout`/`stdin`, allowing for some usecases to be a little simpler.
child := gexpect.spawn("sh")
sender, reciever := child.AsyncInteractChannels()
sender <- "echo Hello World\n" // Send to stdin
line, open := <- reciever // Recieve a line from stdout/stderr
// When the subprocess stops (e.g. with child.Close()) , receiver is closed
if open {
fmt.Printf("Received %s", line)]
}
`ExpectRegex` uses golang's internal regex engine to wait until a match from the process with the given regular expression (or an error on process termination with no match).
child := gexpect.Spawn("echo accb")
match, _ := child.ExpectRegex("a..b")
// (match=true)
`ExpectRegexFind` allows for groups to be extracted from process stdout. The first element is an array of containing the total matched text, followed by each subexpression group match.
child := gexpect.Spawn("echo 123 456 789")
result, _ := child.ExpectRegexFind("\d+ (\d+) (\d+)")
// result = []string{"123 456 789", "456", "789"}
See `gexpect_test.go` and the `examples` folder for full syntax
## Credits
github.com/kballard/go-shellquote
github.com/kr/pty
KMP Algorithm: "http://blog.databigbang.com/searching-for-substrings-in-streams-a-slight-modification-of-the-knuth-morris-pratt-algorithm-in-haxe/"

View File

@ -1,27 +0,0 @@
package main
import gexpect "github.com/coreos/etcd/Godeps/_workspace/src/github.com/coreos/gexpect"
import "log"
func main() {
log.Printf("Testing Ftp... ")
child, err := gexpect.Spawn("ftp ftp.openbsd.org")
if err != nil {
panic(err)
}
child.Expect("Name")
child.SendLine("anonymous")
child.Expect("Password")
child.SendLine("pexpect@sourceforge.net")
child.Expect("ftp> ")
child.SendLine("cd /pub/OpenBSD/3.7/packages/i386")
child.Expect("ftp> ")
child.SendLine("bin")
child.Expect("ftp> ")
child.SendLine("prompt")
child.Expect("ftp> ")
child.SendLine("pwd")
child.Expect("ftp> ")
log.Printf("Success\n")
}

View File

@ -1,15 +0,0 @@
package main
import gexpect "github.com/coreos/etcd/Godeps/_workspace/src/github.com/coreos/gexpect"
import "log"
func main() {
log.Printf("Testing Ping interact... \n")
child, err := gexpect.Spawn("ping -c8 127.0.0.1")
if err != nil {
panic(err)
}
child.Interact()
log.Printf("Success\n")
}

View File

@ -1,22 +0,0 @@
package main
import "github.com/coreos/etcd/Godeps/_workspace/src/github.com/coreos/gexpect"
import "fmt"
func main() {
fmt.Printf("Starting python.. \n")
child, err := gexpect.Spawn("python")
if err != nil {
panic(err)
}
fmt.Printf("Expecting >>>.. \n")
child.Expect(">>>")
fmt.Printf("print 'Hello World'..\n")
child.SendLine("print 'Hello World'")
child.Expect(">>>")
fmt.Printf("Interacting.. \n")
child.Interact()
fmt.Printf("Done \n")
child.Close()
}

View File

@ -1,53 +0,0 @@
package main
import "github.com/coreos/etcd/Godeps/_workspace/src/github.com/coreos/gexpect"
import "fmt"
import "strings"
func main() {
waitChan := make(chan string)
fmt.Printf("Starting screen.. \n")
child, err := gexpect.Spawn("screen")
if err != nil {
panic(err)
}
sender, reciever := child.AsyncInteractChannels()
go func() {
waitString := ""
count := 0
for {
select {
case waitString = <-waitChan:
count++
case msg, open := <-reciever:
if !open {
return
}
fmt.Printf("Recieved: %s\n", msg)
if strings.Contains(msg, waitString) {
if count >= 1 {
waitChan <- msg
count -= 1
}
}
}
}
}()
wait := func(str string) {
waitChan <- str
<-waitChan
}
fmt.Printf("Waiting until started.. \n")
wait(" ")
fmt.Printf("Sending Enter.. \n")
sender <- "\n"
wait("$")
fmt.Printf("Sending echo.. \n")
sender <- "echo Hello World\n"
wait("Hello World")
fmt.Printf("Received echo. \n")
}

View File

@ -1,430 +0,0 @@
package gexpect
import (
"bytes"
"errors"
"fmt"
"io"
"os"
"os/exec"
"regexp"
"time"
"unicode/utf8"
shell "github.com/coreos/etcd/Godeps/_workspace/src/github.com/kballard/go-shellquote"
"github.com/coreos/etcd/Godeps/_workspace/src/github.com/kr/pty"
)
type ExpectSubprocess struct {
Cmd *exec.Cmd
buf *buffer
outputBuffer []byte
}
type buffer struct {
f *os.File
b bytes.Buffer
collect bool
collection bytes.Buffer
}
func (buf *buffer) StartCollecting() {
buf.collect = true
}
func (buf *buffer) StopCollecting() (result string) {
result = string(buf.collection.Bytes())
buf.collect = false
buf.collection.Reset()
return result
}
func (buf *buffer) Read(chunk []byte) (int, error) {
nread := 0
if buf.b.Len() > 0 {
n, err := buf.b.Read(chunk)
if err != nil {
return n, err
}
if n == len(chunk) {
return n, nil
}
nread = n
}
fn, err := buf.f.Read(chunk[nread:])
return fn + nread, err
}
func (buf *buffer) ReadRune() (r rune, size int, err error) {
l := buf.b.Len()
chunk := make([]byte, utf8.UTFMax)
if l > 0 {
n, err := buf.b.Read(chunk)
if err != nil {
return 0, 0, err
}
if utf8.FullRune(chunk) {
r, rL := utf8.DecodeRune(chunk)
if n > rL {
buf.PutBack(chunk[rL:n])
}
if buf.collect {
buf.collection.WriteRune(r)
}
return r, rL, nil
}
}
// else add bytes from the file, then try that
for l < utf8.UTFMax {
fn, err := buf.f.Read(chunk[l : l+1])
if err != nil {
return 0, 0, err
}
l = l + fn
if utf8.FullRune(chunk) {
r, rL := utf8.DecodeRune(chunk)
if buf.collect {
buf.collection.WriteRune(r)
}
return r, rL, nil
}
}
return 0, 0, errors.New("File is not a valid UTF=8 encoding")
}
func (buf *buffer) PutBack(chunk []byte) {
if len(chunk) == 0 {
return
}
if buf.b.Len() == 0 {
buf.b.Write(chunk)
return
}
d := make([]byte, 0, len(chunk)+buf.b.Len())
d = append(d, chunk...)
d = append(d, buf.b.Bytes()...)
buf.b.Reset()
buf.b.Write(d)
}
func SpawnAtDirectory(command string, directory string) (*ExpectSubprocess, error) {
expect, err := _spawn(command)
if err != nil {
return nil, err
}
expect.Cmd.Dir = directory
return _start(expect)
}
func Command(command string) (*ExpectSubprocess, error) {
expect, err := _spawn(command)
if err != nil {
return nil, err
}
return expect, nil
}
func (expect *ExpectSubprocess) Start() error {
_, err := _start(expect)
return err
}
func Spawn(command string) (*ExpectSubprocess, error) {
expect, err := _spawn(command)
if err != nil {
return nil, err
}
return _start(expect)
}
func (expect *ExpectSubprocess) Close() error {
return expect.Cmd.Process.Kill()
}
func (expect *ExpectSubprocess) AsyncInteractChannels() (send chan string, receive chan string) {
receive = make(chan string)
send = make(chan string)
go func() {
for {
str, err := expect.ReadLine()
if err != nil {
close(receive)
return
}
receive <- str
}
}()
go func() {
for {
select {
case sendCommand, exists := <-send:
{
if !exists {
return
}
err := expect.Send(sendCommand)
if err != nil {
receive <- "gexpect Error: " + err.Error()
return
}
}
}
}
}()
return
}
func (expect *ExpectSubprocess) ExpectRegex(regex string) (bool, error) {
return regexp.MatchReader(regex, expect.buf)
}
func (expect *ExpectSubprocess) expectRegexFind(regex string, output bool) ([]string, string, error) {
re, err := regexp.Compile(regex)
if err != nil {
return nil, "", err
}
expect.buf.StartCollecting()
pairs := re.FindReaderSubmatchIndex(expect.buf)
stringIndexedInto := expect.buf.StopCollecting()
l := len(pairs)
numPairs := l / 2
result := make([]string, numPairs)
for i := 0; i < numPairs; i += 1 {
result[i] = stringIndexedInto[pairs[i*2]:pairs[i*2+1]]
}
// convert indexes to strings
if len(result) == 0 {
err = fmt.Errorf("ExpectRegex didn't find regex '%v'.", regex)
}
return result, stringIndexedInto, err
}
func (expect *ExpectSubprocess) expectTimeoutRegexFind(regex string, timeout time.Duration) (result []string, out string, err error) {
t := make(chan bool)
go func() {
result, out, err = expect.ExpectRegexFindWithOutput(regex)
t <- false
}()
go func() {
time.Sleep(timeout)
err = fmt.Errorf("ExpectRegex timed out after %v finding '%v'.\nOutput:\n%s", timeout, regex, expect.Collect())
t <- true
}()
<-t
return result, out, err
}
func (expect *ExpectSubprocess) ExpectRegexFind(regex string) ([]string, error) {
result, _, err := expect.expectRegexFind(regex, false)
return result, err
}
func (expect *ExpectSubprocess) ExpectTimeoutRegexFind(regex string, timeout time.Duration) ([]string, error) {
result, _, err := expect.expectTimeoutRegexFind(regex, timeout)
return result, err
}
func (expect *ExpectSubprocess) ExpectRegexFindWithOutput(regex string) ([]string, string, error) {
return expect.expectRegexFind(regex, true)
}
func (expect *ExpectSubprocess) ExpectTimeoutRegexFindWithOutput(regex string, timeout time.Duration) ([]string, string, error) {
return expect.expectTimeoutRegexFind(regex, timeout)
}
func buildKMPTable(searchString string) []int {
pos := 2
cnd := 0
length := len(searchString)
var table []int
if length < 2 {
length = 2
}
table = make([]int, length)
table[0] = -1
table[1] = 0
for pos < len(searchString) {
if searchString[pos-1] == searchString[cnd] {
cnd += 1
table[pos] = cnd
pos += 1
} else if cnd > 0 {
cnd = table[cnd]
} else {
table[pos] = 0
pos += 1
}
}
return table
}
func (expect *ExpectSubprocess) ExpectTimeout(searchString string, timeout time.Duration) (e error) {
result := make(chan error)
go func() {
result <- expect.Expect(searchString)
}()
select {
case e = <-result:
case <-time.After(timeout):
e = fmt.Errorf("Expect timed out after %v waiting for '%v'.\nOutput:\n%s", timeout, searchString, expect.Collect())
}
return e
}
func (expect *ExpectSubprocess) Expect(searchString string) (e error) {
chunk := make([]byte, len(searchString)*2)
target := len(searchString)
if expect.outputBuffer != nil {
expect.outputBuffer = expect.outputBuffer[0:]
}
m := 0
i := 0
// Build KMP Table
table := buildKMPTable(searchString)
for {
n, err := expect.buf.Read(chunk)
if err != nil {
return err
}
if expect.outputBuffer != nil {
expect.outputBuffer = append(expect.outputBuffer, chunk[:n]...)
}
offset := m + i
for m+i-offset < n {
if searchString[i] == chunk[m+i-offset] {
i += 1
if i == target {
unreadIndex := m + i - offset
if len(chunk) > unreadIndex {
expect.buf.PutBack(chunk[unreadIndex:])
}
return nil
}
} else {
m += i - table[i]
if table[i] > -1 {
i = table[i]
} else {
i = 0
}
}
}
}
}
func (expect *ExpectSubprocess) Send(command string) error {
_, err := io.WriteString(expect.buf.f, command)
return err
}
func (expect *ExpectSubprocess) Capture() {
if expect.outputBuffer == nil {
expect.outputBuffer = make([]byte, 0)
}
}
func (expect *ExpectSubprocess) Collect() []byte {
collectOutput := make([]byte, len(expect.outputBuffer))
copy(collectOutput, expect.outputBuffer)
expect.outputBuffer = nil
return collectOutput
}
func (expect *ExpectSubprocess) SendLine(command string) error {
_, err := io.WriteString(expect.buf.f, command+"\r\n")
return err
}
func (expect *ExpectSubprocess) Interact() {
defer expect.Cmd.Wait()
io.Copy(os.Stdout, &expect.buf.b)
go io.Copy(os.Stdout, expect.buf.f)
go io.Copy(expect.buf.f, os.Stdin)
}
func (expect *ExpectSubprocess) ReadUntil(delim byte) ([]byte, error) {
join := make([]byte, 1, 512)
chunk := make([]byte, 255)
for {
n, err := expect.buf.Read(chunk)
if err != nil {
return join, err
}
for i := 0; i < n; i++ {
if chunk[i] == delim {
if len(chunk) > i+1 {
expect.buf.PutBack(chunk[i+1:])
}
return join, nil
} else {
join = append(join, chunk[i])
}
}
}
}
func (expect *ExpectSubprocess) Wait() error {
return expect.Cmd.Wait()
}
func (expect *ExpectSubprocess) ReadLine() (string, error) {
str, err := expect.ReadUntil('\n')
if err != nil {
return "", err
}
return string(str), nil
}
func _start(expect *ExpectSubprocess) (*ExpectSubprocess, error) {
f, err := pty.Start(expect.Cmd)
if err != nil {
return nil, err
}
expect.buf.f = f
return expect, nil
}
func _spawn(command string) (*ExpectSubprocess, error) {
wrapper := new(ExpectSubprocess)
wrapper.outputBuffer = nil
splitArgs, err := shell.Split(command)
if err != nil {
return nil, err
}
numArguments := len(splitArgs) - 1
if numArguments < 0 {
return nil, errors.New("gexpect: No command given to spawn")
}
path, err := exec.LookPath(splitArgs[0])
if err != nil {
return nil, err
}
if numArguments >= 1 {
wrapper.Cmd = exec.Command(path, splitArgs[1:]...)
} else {
wrapper.Cmd = exec.Command(path)
}
wrapper.buf = new(buffer)
return wrapper, nil
}

Some files were not shown because too many files have changed in this diff Show More