Compare commits
1493 Commits
Author | SHA1 | Date | |
---|---|---|---|
494c012659 | |||
4abc381ebe | |||
73c8fdac53 | |||
ee2717493a | |||
2435eb9ecd | |||
8fb533dabe | |||
2f0f5ac504 | |||
9ab811d478 | |||
e0a99fb4ba | |||
d40982fc91 | |||
fe3a1cc31b | |||
70713706a1 | |||
0054e7e89b | |||
97f718b504 | |||
202da9270e | |||
6e83ec0ed7 | |||
5c44cdfdaa | |||
09a239f040 | |||
3faff8b2e2 | |||
2345fda18e | |||
5695120efc | |||
183293e061 | |||
4b48876f0e | |||
5089bf58fb | |||
480a347179 | |||
59e560c7a7 | |||
0bd9bea2e9 | |||
bd7581ac59 | |||
db378c3d26 | |||
23740162dc | |||
96422a955f | |||
6fd996fdac | |||
9efa00d103 | |||
72d30f4c34 | |||
2e92779777 | |||
404415b1e3 | |||
07e421d245 | |||
a7d6e29275 | |||
1a8b295dab | |||
ffc45cc066 | |||
0db1ba8093 | |||
43f7c94ac8 | |||
93d13fb5b4 | |||
6a1e3e73dd | |||
ec576ee5ac | |||
606d79afc4 | |||
f4d15a430c | |||
4a841459f1 | |||
ee8c577fc0 | |||
8ae0f94cd7 | |||
69a97863a9 | |||
12c7e4a9f8 | |||
23cced240b | |||
e73c928d85 | |||
779ad90f9a | |||
dca1740be5 | |||
487b34d857 | |||
a31283cf51 | |||
b722bedf8a | |||
d53923c636 | |||
9356665d60 | |||
0932d17395 | |||
2a3ea3f996 | |||
e5a5e5f7c6 | |||
00bdd907d5 | |||
8eab756d3f | |||
3d9b1d1635 | |||
4218193dd7 | |||
6499d01c9b | |||
83b39b4f6b | |||
21092ca715 | |||
a4e79d7ebf | |||
846883a979 | |||
c7a3edb90f | |||
f308a27e91 | |||
1d37154793 | |||
092d069d3e | |||
ab5c4e23bd | |||
59bf6693c7 | |||
affcbfbf06 | |||
e81df2648c | |||
27a450235a | |||
42454f9ed8 | |||
7ea8860670 | |||
2fb72029ef | |||
77af59796d | |||
b732f96e07 | |||
602198105d | |||
e513cbd562 | |||
4198369dd0 | |||
debecc1868 | |||
140fc04c62 | |||
7e34665774 | |||
be541f3641 | |||
e582416994 | |||
842145ecb3 | |||
d68936c4da | |||
24a90baff8 | |||
6b7891d5f1 | |||
129b271ff8 | |||
a11ee983c4 | |||
bec58d5f58 | |||
4b6f9b79e6 | |||
f7ec7f025b | |||
34c76a47c1 | |||
525653ff51 | |||
a647b79038 | |||
9bc1d08753 | |||
6a79bda691 | |||
1edfcd6859 | |||
f51fdbccec | |||
faeeb2fc75 | |||
d50c487132 | |||
b837feffe4 | |||
4d89640195 | |||
1292d453c3 | |||
ec20b381ed | |||
37cc3f5262 | |||
7f1940e5ed | |||
caccf8e5e6 | |||
ef65dfe2eb | |||
ff6c6916f2 | |||
3dfe8765d3 | |||
a4a52cb15d | |||
014970930a | |||
4628be982c | |||
ff55e5a188 | |||
bf0898266c | |||
b9d69f7698 | |||
6f48bda7ac | |||
316534e09e | |||
3cecbdb464 | |||
62f11e43ee | |||
064c1585ee | |||
15300a1eb8 | |||
58dd047ee4 | |||
4b42ea6cd7 | |||
53c27ae621 | |||
269de67bde | |||
8bbccf1047 | |||
c00e97ea49 | |||
c8a7d281ee | |||
a5c2cd2708 | |||
11fdf2dd18 | |||
3b300f42e9 | |||
b4162f8a45 | |||
f63e6875bd | |||
76e2bf03b8 | |||
859e336d68 | |||
35229eb2d3 | |||
bbed3ecc8d | |||
9614dc6e71 | |||
aab905f7cc | |||
cfc171d5f7 | |||
891ddcba6e | |||
555028f3d1 | |||
efcf03f0b1 | |||
ae6e879812 | |||
6a48961895 | |||
13d0ea7f54 | |||
bbb84ff709 | |||
54d56e2531 | |||
fc1a226d15 | |||
0c820dc7ba | |||
40f62ab4a5 | |||
0da05a896f | |||
8a71f749d7 | |||
3424f95b03 | |||
862b3fe2be | |||
aeb5b3c82b | |||
e1b9ccb1d7 | |||
d284a45a4b | |||
9bde740cf9 | |||
c1d2149a0f | |||
15b267fbfd | |||
33f7e7583b | |||
abc1cb945b | |||
a7189ef073 | |||
78d9ae1820 | |||
9b4dc92fdc | |||
755d192ff7 | |||
244266708b | |||
b2a8acdf10 | |||
9664df1b5e | |||
f9d250ad1b | |||
fa74a0d3bb | |||
c949811752 | |||
5247702d8d | |||
a998fb4af1 | |||
dbc7c2cf4e | |||
b945a3fcc8 | |||
2da5bdd4df | |||
e4ab1540c8 | |||
4a0f922a6c | |||
6cfc03a5f9 | |||
c363fd288b | |||
5720fe812e | |||
187faba3e0 | |||
df9a52e53f | |||
6fbf8be3ac | |||
b7253992d4 | |||
c1e3601776 | |||
e221699fd8 | |||
725ded40f7 | |||
e2138179e3 | |||
6557ef7cd8 | |||
84c416491e | |||
caffcb7fbb | |||
30cfa30490 | |||
aafb2e9430 | |||
27ef4baa9c | |||
6480066054 | |||
8d259d3cf1 | |||
82991074bf | |||
0e7690780f | |||
6496ae005d | |||
0b5ea3ec94 | |||
def21f11a9 | |||
5a6ad1ea76 | |||
de68818f03 | |||
7f8ffd7dbe | |||
6009e88077 | |||
99957e9831 | |||
80aa5978ca | |||
c01c36bcfd | |||
e5d9ca5180 | |||
22bae02fe5 | |||
7c12949b41 | |||
1b8e83ae60 | |||
4106e56d91 | |||
68bcbdc84e | |||
d017814eaa | |||
8920e7c4d5 | |||
a1c7a7df5e | |||
6fe4d9d30a | |||
6d81601df3 | |||
0cc59f3976 | |||
bdca594495 | |||
1e0ff8555e | |||
0ae9d444f9 | |||
c4df15ff3e | |||
ce180bbaf1 | |||
d5696cb6ef | |||
b4f0a8853b | |||
1097d63ff7 | |||
2bd5d66596 | |||
a01f5a2786 | |||
722f5b2a8c | |||
e5583b26eb | |||
50f2f984e4 | |||
35fd81e465 | |||
fb1f1ce1fd | |||
2a2dd1075f | |||
729f5b45fd | |||
6e717775a8 | |||
0173564122 | |||
6f28b43806 | |||
8111e0f7dc | |||
ad5d55dd4c | |||
23621387fc | |||
d37e564eaa | |||
ce50ee14d8 | |||
eaa72dfa0b | |||
b03c832bed | |||
3ddfa16c46 | |||
eec706b9ae | |||
09e5db5a46 | |||
8ea6be38ba | |||
c25ff426af | |||
6dcd020d7d | |||
3f6619ada9 | |||
9feb3d0e51 | |||
f7b84d69a4 | |||
ea21b8ee1f | |||
016be1ef31 | |||
598fa7a10e | |||
aa503f84d5 | |||
bd8627c8ab | |||
6af0917812 | |||
57474697af | |||
74b13aab61 | |||
6c0882145a | |||
e4f56c4eb6 | |||
8bb0ce54e6 | |||
61659302db | |||
63c13e8b98 | |||
63901be674 | |||
d03a3d141e | |||
b0d7455fb1 | |||
d68664841c | |||
3488555bc3 | |||
18253e2723 | |||
cc4f35887c | |||
dde2aea214 | |||
1066c9b806 | |||
2d08e093c1 | |||
adff458895 | |||
b3558894f2 | |||
c98ca2db43 | |||
5f5c3c8f82 | |||
7f9adfd5b8 | |||
0bae7b635c | |||
d26d006fd6 | |||
1c6070ccc7 | |||
699e76b631 | |||
fb165fcc58 | |||
848f539536 | |||
49266dca2d | |||
9c78cda088 | |||
b07fbbf27c | |||
cdf1a2ee2c | |||
5385ca0a43 | |||
11869905ae | |||
555976ea84 | |||
bc69142940 | |||
bd604a029e | |||
df56f9d6f9 | |||
b607b36a6c | |||
c505f03c62 | |||
f392370f73 | |||
7d666ab8b9 | |||
32d766d749 | |||
b98fa063c8 | |||
16db9e68a2 | |||
5676c5cf26 | |||
eca38c109a | |||
16d86fd4f8 | |||
7f569a163c | |||
252adc0caf | |||
5a7b7f7595 | |||
a6fec46c0e | |||
1e38ab1706 | |||
6958334db2 | |||
c97107cf81 | |||
a571bd0271 | |||
c75fa6fdc9 | |||
e67613830e | |||
d78ef8bc72 | |||
a26ebfb675 | |||
38546a9d24 | |||
390c89b7f9 | |||
9be65414eb | |||
2a018240e7 | |||
84953365a2 | |||
18851e70b6 | |||
5d6af0b51f | |||
e9d2eb2b54 | |||
70a2add2b0 | |||
b4aa4607cb | |||
2e29bea8fe | |||
f25b3dbfc8 | |||
667093bbd1 | |||
3243795522 | |||
4aaf7f94cf | |||
c11418b56c | |||
bdb5a321d1 | |||
5225a4e4bc | |||
b2a531d5a3 | |||
1bbe09eb3c | |||
cff5851956 | |||
6b80f0ad7e | |||
ae366ba4f1 | |||
f99ff5d513 | |||
3eab6bef6a | |||
c802c23e6d | |||
43db5515e7 | |||
c6fae5d566 | |||
175c67a552 | |||
65ff76882b | |||
47d5257622 | |||
77efe4cda9 | |||
4570eddc2c | |||
3210bb8181 | |||
a92ea417b4 | |||
64eccd519d | |||
bb6102c00c | |||
f8c1a50195 | |||
2781553a9e | |||
37ac90c419 | |||
8776962008 | |||
ead5096fa9 | |||
65abcc1a59 | |||
cf99d596f5 | |||
0914d65c1f | |||
e854fa1856 | |||
cd569d640b | |||
aa56e47712 | |||
1e22137a9a | |||
b3a0b0502c | |||
ae30ab7897 | |||
247103c40b | |||
1958598a18 | |||
c459073c6d | |||
05f9d1b716 | |||
5631acdb8f | |||
ca4e78687e | |||
bdc7035c10 | |||
4f7622fb9a | |||
d4ac09de0f | |||
6e32e8501a | |||
7da1940dce | |||
f1c6fa48f5 | |||
6bbd8b7efb | |||
a7c5058953 | |||
349eaf117a | |||
ab65d2b848 | |||
78c957df41 | |||
0554ef9c39 | |||
e534532523 | |||
fb0df211f0 | |||
da2f2a5189 | |||
a548cab828 | |||
753073198f | |||
77dee97c2f | |||
253e313c09 | |||
9dad78c68f | |||
bd5e1ea1c0 | |||
87d105c036 | |||
6bb96074da | |||
35329a1674 | |||
0b7e5c70a5 | |||
39eaa37dcf | |||
ff2b24a8ac | |||
4a13c9f9b3 | |||
e551aec339 | |||
66a6ed63cb | |||
4d56f54898 | |||
7abc8f21eb | |||
62f8ec25c0 | |||
1823702cc6 | |||
b382c2c86f | |||
c6496dcff6 | |||
3e057129e2 | |||
0048782d97 | |||
2da6fb6616 | |||
350673f1f8 | |||
cc1155c93b | |||
9a14b796e0 | |||
7eaf73d273 | |||
624d5eb0cb | |||
50ef8f148c | |||
1610391449 | |||
1e4d3603db | |||
6e149e3485 | |||
ca630a0803 | |||
0d1133178f | |||
83ce1051ff | |||
4984d82d27 | |||
7f461b2df9 | |||
dc91da50b5 | |||
93f114c76c | |||
3aadb25c31 | |||
5be39d2c84 | |||
9022137d2b | |||
54aac4ab7e | |||
008081ffb5 | |||
c63eaf45f9 | |||
8b75a33398 | |||
3c2a47ea64 | |||
843c53192a | |||
2baca91ee2 | |||
94f22e8a07 | |||
60fc1e4d4e | |||
8bebd8caa9 | |||
2f00b1e071 | |||
429b2eee58 | |||
c7a1423d45 | |||
0cb1343109 | |||
957b07c408 | |||
3f1af453b9 | |||
0cb4dd4331 | |||
6a35833fc3 | |||
c093234e3a | |||
88afb0b0a6 | |||
6187d812da | |||
f57b4eb46d | |||
7dfe7db243 | |||
267d1cb16f | |||
5f5a203e27 | |||
500296d0fb | |||
948dc5e425 | |||
634b9584ef | |||
5183631f17 | |||
95fc21e38b | |||
5bff4d85d6 | |||
9585daf0a9 | |||
b3fee0abff | |||
10ee69b44c | |||
755567cb3d | |||
bbfe7f401f | |||
85691dbbe5 | |||
6ac67ecd5c | |||
6d96dd581a | |||
84a487f723 | |||
3005f2717f | |||
8b28c647ea | |||
51a048e6b3 | |||
2b77e9a086 | |||
ab0ccdc4df | |||
9098f27745 | |||
ab3398f7fd | |||
29d2caf14a | |||
a047aa4a81 | |||
c25c00fcf9 | |||
2fcac66605 | |||
140e2a18fb | |||
5609fdb9a8 | |||
b95c5b7da9 | |||
232c1914d2 | |||
84e1ab8765 | |||
9fee7732f6 | |||
337ef64ed5 | |||
fb64c8ccfe | |||
bea4268a0b | |||
c451a1b350 | |||
240757729c | |||
22744566f4 | |||
542b7dff64 | |||
a6144bdf3e | |||
fc33fd1aa6 | |||
47ef5f7ca5 | |||
75dc10574a | |||
9ed3b446ca | |||
36fcc9e9d4 | |||
a83051d0fc | |||
1d88130522 | |||
5cb7400cee | |||
8528c8c599 | |||
fc06dd1452 | |||
2d4c7d6886 | |||
51551abef5 | |||
f34a9350c3 | |||
bb2a3ea8d8 | |||
7709cd84bb | |||
cc837dfc6d | |||
86269ab5bf | |||
7b5657cf1a | |||
d116c116fe | |||
b0d4a0a9bd | |||
283318d547 | |||
09e8f5782e | |||
41d3cea9b3 | |||
310ebdd3e1 | |||
e39f436728 | |||
eb9b281741 | |||
05cc3c3dbb | |||
71a9d6fc8b | |||
6686833e51 | |||
ad95ceea2f | |||
6f8cc58214 | |||
cc2e0fad3e | |||
2cd3a3bd59 | |||
9c767cbf98 | |||
4aab13ac06 | |||
5144318af0 | |||
ba68d7bbe6 | |||
efe0ee7e59 | |||
815bc5307f | |||
29cc568659 | |||
4e5c24fcf9 | |||
c43e59338f | |||
3266c809e4 | |||
84e7fa149e | |||
0184288479 | |||
5b2e130f09 | |||
a86ae1d969 | |||
9a0fe2620e | |||
8e821cdc70 | |||
90e9652f70 | |||
cfb3f96c2b | |||
c438310634 | |||
20fc3e968f | |||
099dd1d1fb | |||
c13bf42ac6 | |||
0313484f17 | |||
79fac9ee6f | |||
f7fbcf8209 | |||
fc7da09d67 | |||
0df5bb0002 | |||
33daeb7464 | |||
d8f325dabf | |||
ac2859057a | |||
2d47211589 | |||
c73e8fd946 | |||
45b872fe5d | |||
6e4fa5e773 | |||
04039eb006 | |||
3ed5d28e2e | |||
6acb3d67fb | |||
44b59e24eb | |||
d117684086 | |||
5cba7080bc | |||
86591d64c5 | |||
d7fa07cffa | |||
4c7af825c7 | |||
5ab27e99f2 | |||
9dc0782f45 | |||
8a718f3e56 | |||
53084ebead | |||
9ea1705563 | |||
84ded59f08 | |||
5002114127 | |||
ffd3cb78d4 | |||
f86dc5c7f7 | |||
340df26883 | |||
dd8a36820e | |||
1c544c3ba5 | |||
663db2bbf8 | |||
23b14a8c8d | |||
96d06d4f2c | |||
6a8c65cba9 | |||
fd7685f3a1 | |||
d57164d0c8 | |||
3351ea1ae2 | |||
ad9d18faa9 | |||
a62e4e1e3a | |||
a3a4f51d90 | |||
4df91ae755 | |||
ddbe46543d | |||
f20573b576 | |||
bf8cf39daf | |||
4882330fd7 | |||
394ce5f3b8 | |||
5984e46364 | |||
c9264c5e65 | |||
1226946e2d | |||
374b3ee40b | |||
4c36054610 | |||
edca3cbe44 | |||
0b34b236d6 | |||
751d5fa486 | |||
ff9d16a2e0 | |||
4ee60d6671 | |||
1727f278f2 | |||
e9f3e809a6 | |||
628a38d906 | |||
82c6408f38 | |||
fa1e40c120 | |||
8c17674cda | |||
be4fb634a1 | |||
aa85cf037f | |||
54536af135 | |||
f9306fb817 | |||
edb11881f8 | |||
6f2e7875aa | |||
61a7d3efb3 | |||
9ca84e814f | |||
8e4a83c830 | |||
38ebb6b475 | |||
9ea181e561 | |||
1e54117580 | |||
c703ccab63 | |||
62b4d1cef7 | |||
e4a2dcad9e | |||
782a8802c0 | |||
26783f51b1 | |||
dc073e1aa7 | |||
77775e8e92 | |||
90498b3756 | |||
f2b2e0761a | |||
81b4e6d332 | |||
db9ccb75bf | |||
7678fc153a | |||
d20cb40f4f | |||
ecf192556e | |||
06950e41b4 | |||
fb8d12a9cd | |||
73204e9637 | |||
1a06f5dab5 | |||
f65331b456 | |||
00a2dca619 | |||
86c85b88ad | |||
dd8e81070a | |||
63e6228a0b | |||
e4e4c9dc2c | |||
bc5f626e56 | |||
42f3b4964f | |||
0269afd643 | |||
e2fe80393e | |||
3c78523643 | |||
6a0148e214 | |||
3c8301358c | |||
21c9da1ed4 | |||
7014f6861d | |||
6259318521 | |||
327b01169c | |||
f6e5fe6877 | |||
798718c49b | |||
ac2e3e43bf | |||
e8101ddf09 | |||
3c3bb3f97c | |||
a663828a32 | |||
29c77dee74 | |||
8ffbaef502 | |||
e52fc2d07e | |||
910781ef5b | |||
c21b885dd5 | |||
e312bb675c | |||
46481b17fc | |||
2d3a8541d0 | |||
d41ce0a97c | |||
17e23769d9 | |||
029fe6bf47 | |||
ec2ac72585 | |||
25850e0070 | |||
deb21d3da5 | |||
410c5cd828 | |||
c7c0e1eb7a | |||
002090daec | |||
3ec627d1a8 | |||
8c953499fa | |||
120020fa9c | |||
393725fe5f | |||
2e93c65c96 | |||
4d2424210f | |||
4612e2d59a | |||
4fe91ed1e2 | |||
215afb9b1d | |||
66e5e4f298 | |||
71e6c4b06a | |||
ef44f71da9 | |||
c538e0f9a9 | |||
2a44b9636a | |||
fd9e07a529 | |||
9d9f02c1ee | |||
3d523e34b1 | |||
4a5befc2de | |||
abb4cd5646 | |||
bd71a60875 | |||
fe884f8209 | |||
8b77de4e99 | |||
a880e9c7cb | |||
15c5259e2d | |||
9c103dd0de | |||
68eaf4083a | |||
431c4e7b3b | |||
711be0a567 | |||
f4d1501198 | |||
a32aabc377 | |||
750273afd9 | |||
78d46b71fa | |||
9a6daefb3e | |||
62e5ffac13 | |||
b1f95c314b | |||
527aa1a499 | |||
25d9169e9a | |||
fb65d04291 | |||
78ae4b92a6 | |||
2e011053b1 | |||
9c05f92f2e | |||
9acb7ab41c | |||
6fc3106e68 | |||
17391336af | |||
19221b33cc | |||
be0c38ec2b | |||
dcb3b7aecf | |||
db8f5771f1 | |||
b03a2f0323 | |||
080272be17 | |||
f5165a0149 | |||
2aa4dd52cc | |||
ca105a1c89 | |||
e90313c9c2 | |||
3104507eb2 | |||
b2eb90024f | |||
aaefd52afa | |||
5023996d02 | |||
00b660cc53 | |||
4d0f474034 | |||
a300be92dc | |||
0fb7cb8b00 | |||
5ddb532072 | |||
fd7e2b20b0 | |||
82a6de8b69 | |||
62d4c6d357 | |||
23f9d72870 | |||
d8215c8892 | |||
62a9209088 | |||
6b2d7f9412 | |||
8c4958dd60 | |||
5cbd8cefc9 | |||
ab11415d25 | |||
dad1197c89 | |||
467de8cb4f | |||
efcba23d21 | |||
3e088b3b40 | |||
8daad8e06e | |||
97a2ebe3a2 | |||
fa6670488d | |||
4ae47ad934 | |||
98dbdd5fbb | |||
00398ec98d | |||
07c04c7c75 | |||
8bc5ab9f8d | |||
0d43a2b7e7 | |||
adc981c53d | |||
53491aac0a | |||
cd9e6a1d4f | |||
774030e1b2 | |||
c9c2cdfeaf | |||
824ffded12 | |||
34fbec118a | |||
4481016953 | |||
ebaa54bf6e | |||
824478be5f | |||
ffd1fa6f52 | |||
faca29fc3b | |||
76d073a2b5 | |||
74ea9ea5cd | |||
d17aaae714 | |||
3c2d0a229c | |||
879cfe7666 | |||
712090fc09 | |||
22c3a439bc | |||
cdc8f99658 | |||
cc37632003 | |||
5d86525230 | |||
93d84b9076 | |||
b033167094 | |||
98031a3b6e | |||
063307ec0a | |||
61add11b05 | |||
cc7dd9b729 | |||
3bcd2b5b9f | |||
c5af1d7a88 | |||
b24d0032d2 | |||
a76f5f5ed2 | |||
53ed8750ce | |||
aeff5507e6 | |||
b7761530e1 | |||
b53aaf4c82 | |||
9bf601a921 | |||
0f5b8c39b4 | |||
56dd991b4e | |||
864cbd36bf | |||
1a0d1ab4ab | |||
a288188001 | |||
5d8d684a91 | |||
fd27f9cd28 | |||
4ecb560604 | |||
8b52fd0d2d | |||
b7639b00e0 | |||
c5bf6a9d9e | |||
015acabdbb | |||
6222d46233 | |||
36acde620e | |||
1f5c5abe6d | |||
2fa5b913fe | |||
973ad5aa7c | |||
fee71b18a3 | |||
064c1ff0f3 | |||
7a6d9ea01a | |||
a8139e2b0e | |||
92d673ea59 | |||
b9ea5f6d90 | |||
c071104fc4 | |||
28f3cb0f14 | |||
73ecb61ff4 | |||
262de75a7e | |||
ad327e01d0 | |||
b58f8dd64b | |||
ea1d0f3e0d | |||
c89e348fbc | |||
552a5af10f | |||
b79bb6f164 | |||
4ab1500a6d | |||
00d6f104b5 | |||
c97b74a72f | |||
634a9e833e | |||
0c5bcd5d80 | |||
33968059e9 | |||
ec1fdd3938 | |||
b3ebe66c97 | |||
6049c95dc9 | |||
506cf1f03f | |||
2b361cf06b | |||
d893a78c38 | |||
8e099ab713 | |||
b8850cec93 | |||
29eca4eb88 | |||
c0ff77e809 | |||
3ddcc21179 | |||
c26eb3f241 | |||
60425de0ff | |||
db8588ab93 | |||
51ad5f00bf | |||
419ae757d2 | |||
4480eb6d49 | |||
f148f4b2b9 | |||
2e3d79a7bf | |||
f613052435 | |||
bef5be42b5 | |||
11ec94b7e8 | |||
7c666b533a | |||
85edd66c65 | |||
8291110049 | |||
d1e11842df | |||
6ee5f9c677 | |||
d814e9dc35 | |||
8df52dc6fa | |||
06ea8aee11 | |||
ca83793876 | |||
434f2c356d | |||
e50df7c19b | |||
c697aa7c60 | |||
8b3d1562f9 | |||
c25c8573ac | |||
954535c2b4 | |||
42c09a95a0 | |||
a2ab18fce5 | |||
5464665107 | |||
04fda9d25f | |||
95bac2dc3c | |||
01927cc26a | |||
f4b8e878ed | |||
63c5725fef | |||
afd2cc7373 | |||
08f6c0775a | |||
07daa9fdc0 | |||
c3de53c23c | |||
14415c2187 | |||
81ac766bb4 | |||
de7c18909f | |||
8a4c9c9da1 | |||
a00be40db2 | |||
ecb0e2bd38 | |||
30a9229f38 | |||
22797c7185 | |||
c8ab6c348a | |||
bba08f6f79 | |||
07685bcf97 | |||
78c96e893e | |||
dc55c312b0 | |||
ce76c28805 | |||
af1a0b60e2 | |||
26e52d2bce | |||
67645095e9 | |||
7161eeed8b | |||
cc27c3a1e6 | |||
d923b59190 | |||
b7ac758969 | |||
1440007608 | |||
1d5bfd95dc | |||
12d01bb1eb | |||
4b31acf0e0 | |||
82ef33a8d3 | |||
4b296bf51c | |||
9ec176a9b0 | |||
6de5b45b2f | |||
2a38cb5ad8 | |||
8a82ddadb9 | |||
cbd79c666e | |||
1b98074897 | |||
1378e72bc2 | |||
3ae956eb89 | |||
3ad8e91e00 | |||
663aca701d | |||
736e1d6c33 | |||
844208d7dd | |||
f8673b5f60 | |||
151d0d3831 | |||
90f91ac8ac | |||
579c1342e6 | |||
50471d0c5c | |||
08d879341d | |||
e51e146a19 | |||
bfd6465ea3 | |||
45bf7fb960 | |||
59c5110b73 | |||
0dd9c2520b | |||
6a0664d701 | |||
da1138f8de | |||
d49c044666 | |||
53abaf86c6 | |||
3ffbc4c8dd | |||
0feb88cee1 | |||
af30795752 | |||
24077fb3f6 | |||
69bc0f76bc | |||
2927c90fae | |||
f73cdf4035 | |||
2751a10db6 | |||
fdf6335416 | |||
753630dc37 | |||
b8c35e3af8 | |||
d32113a0e5 | |||
e38710b5f9 | |||
0c191b71ec | |||
fa61bf86d7 | |||
79a91b3450 | |||
4e175a98c3 | |||
c0cf44f134 | |||
4991cda202 | |||
8684d96914 | |||
131e3806bb | |||
e835d24bea | |||
05d5459b1d | |||
6eb25751ec | |||
29dfca883f | |||
d976121e35 | |||
20db51bfb2 | |||
b37a0ad9e7 | |||
f1440f1d63 | |||
0fe24e7ffc | |||
ebace2eb1b | |||
41382bc3f0 | |||
1fe4c34398 | |||
0893dbf7c1 | |||
bfc6309222 | |||
74d50884bb | |||
d2a58cbb0a | |||
fb137f11c5 | |||
0c40f4a7e3 | |||
46dfa682e7 | |||
32a486b462 | |||
d1067d39c7 | |||
8af9c88377 | |||
16630529f7 | |||
531ee93878 | |||
6d06c060b4 | |||
668ea89980 | |||
a9cfbd5414 | |||
bf9cccfc34 | |||
8b6de5f85d | |||
d16628bf50 | |||
97c71f44fd | |||
c4892c7f51 | |||
a2ac639176 | |||
8a0fa5622e | |||
b494ad3a0d | |||
42245a5518 | |||
ea6a747fc1 | |||
68dd22d93d | |||
9504df2917 | |||
86f580fa8f | |||
a2afb513dd | |||
d4ff9364d4 | |||
11e8d01035 | |||
f15b5aa4e6 | |||
da5bd04a1a | |||
73b48dd8eb | |||
c629a30f1f | |||
4ed5f66a7a | |||
caf0e9b9b1 | |||
59a88d1cf6 | |||
a78ece4ac2 | |||
3ee99a496f | |||
9bfa0172f5 | |||
d4dae7e9e9 | |||
ad226f2020 | |||
c1455a4f10 | |||
da153d3f3c | |||
3b72c3da53 | |||
81a5fc16ef | |||
376234f196 | |||
641a1a66e1 | |||
ae27b991b1 | |||
06a4086bf9 | |||
4ee7cad116 | |||
8515ae30fb | |||
67db28f979 | |||
6c1cc1d4ea | |||
21233416e8 | |||
74153ffa45 | |||
df37c75bb9 | |||
f2e915f56e | |||
57448622d9 | |||
01be6933c6 | |||
cfbb8a71db | |||
04ef861c3d | |||
81e344bef9 | |||
6bbdebb281 | |||
6a3b5fe70c | |||
fefb58dc90 | |||
4495559ad6 | |||
ba1c0a2b12 | |||
4a913ae60a | |||
da1132662a | |||
27844a6aef | |||
a016220648 | |||
3b7c8d752c | |||
ac95cc32ef | |||
e913792d0f | |||
cd05ac4217 | |||
b20d171ee1 | |||
66d2ae7a39 | |||
d72bcdc156 | |||
e6ff5a38e1 | |||
793fb2cf64 | |||
f07350735d | |||
6af40ea1e1 | |||
e9aa8ff235 | |||
3dcfe79cc0 | |||
7a2ef3eb00 | |||
2c6176b5f2 | |||
b78886239e | |||
90df7fd738 | |||
22812badc2 | |||
b90e30b28e | |||
a553ea8ba7 | |||
993f25f055 | |||
721ed6ba2b | |||
855a5116a2 | |||
c0971a6ebc | |||
3f0863a1e9 | |||
c8e860c4fa | |||
3fef0eb0d8 | |||
5157b713ed | |||
60548b85c4 | |||
15e865e024 | |||
cb280bae91 | |||
61cfe68247 | |||
52c4595899 | |||
7c5ec417c3 | |||
89f8e66682 | |||
35d2d7b23e | |||
1224044553 | |||
228e772b3a | |||
8763bd1e97 | |||
604a73c833 | |||
fcb5ba98d0 | |||
18992bac4f | |||
209f573083 | |||
2985396768 | |||
ae9b251d99 | |||
0ca949ce90 | |||
c9ce92f635 | |||
a8b7d0b63c | |||
e9735b7bd0 | |||
f13e558ab4 | |||
095a755e4d | |||
a12fd9cc92 | |||
a0d653b630 | |||
040f7b90c7 | |||
f2d558644d | |||
ef0d5c3d7d | |||
ff311ba0a7 | |||
a9a06438f9 | |||
1044fbce2c | |||
c3da2631bf | |||
17e32b6aa9 | |||
50219d4def | |||
8e3d99cd3e | |||
2aab6ff2eb | |||
94d436c5d1 | |||
b5292f6fce | |||
0b4749ea65 | |||
bfd49023a1 | |||
b1c3e7edbf | |||
4481e54017 | |||
c5b8e8dc88 | |||
8c2225f251 | |||
195100a769 | |||
3a695a82a3 | |||
e5a2bd58ec | |||
6e8d01f956 | |||
0a684c10ad | |||
3bad47d691 | |||
be822b05d2 | |||
e838c26f8a | |||
b97b5843a3 | |||
174a996c37 | |||
9423125ce1 | |||
2113b77635 | |||
d5766eab3e | |||
a6b6fcf1c4 | |||
7ba2646d37 | |||
af1b3f061a | |||
6c8428c393 | |||
9108af9046 | |||
1f4f3667a4 | |||
935999a80e | |||
53bb79f240 | |||
097cec8194 | |||
27480f9ea4 | |||
02033b4c47 | |||
f5f0280a63 | |||
c4caa65c51 | |||
130567832f | |||
603c14db9d | |||
0d9039f192 | |||
e3fd246414 | |||
de7692b2b2 | |||
3c0ac9d600 | |||
78554c6de6 | |||
345bdc3db6 | |||
a406c9fa3d | |||
fe810e7b43 | |||
9bb99b5f72 | |||
953a08d841 | |||
4997ed36b4 | |||
97730778e5 | |||
b70e6a6bf1 | |||
ac059eb8cb | |||
4041bbe571 | |||
fb85da92e8 | |||
9aec045fce | |||
1b41ee9c99 | |||
49f9b5470e | |||
9c7fb9c360 | |||
71a492e59e | |||
b13b77f362 | |||
2fe3e1e850 | |||
2b7ad35fa0 | |||
1f5794c117 | |||
4d2d2cabb9 | |||
004ff3d4f0 | |||
a9f1d5dfa6 | |||
8b320e7c55 | |||
1c12b66e35 | |||
868a3e279d | |||
d78345244b | |||
139f23fd13 | |||
29623cccb2 | |||
c91c7ca3bf | |||
f31105bc08 | |||
bf2289ae00 | |||
5d4ee7ac5f | |||
dc17eaace7 | |||
6abbdcdc06 | |||
ee4ff1e448 | |||
84bf6e7462 | |||
a38617d93a | |||
2779341250 | |||
ac232ac9a7 | |||
2e5ee26300 | |||
21eda79451 | |||
5c782a2086 | |||
aa11dafaf8 | |||
a5f341e886 | |||
030865abe3 | |||
b137df77f1 | |||
6e6d64fb9b | |||
79a09e6857 | |||
e72591b4a2 | |||
7408bc2504 | |||
82e58e602d | |||
679e5e379b | |||
62990fb5fa | |||
68db18667a | |||
5250784b09 | |||
6b0eb9c3c0 | |||
34375ef851 | |||
b1d41016b2 | |||
b9e933b850 | |||
e3599e4145 | |||
01c303113d | |||
3e39f36b34 | |||
c3bca3739f | |||
21096bf27f | |||
8662aaada4 | |||
2b17a3919c | |||
88306c9fa7 | |||
2c50eb240e | |||
bfbe0fac8c | |||
9de5b8db80 | |||
b3247356c1 | |||
98504fe863 | |||
1543e7bd95 | |||
fab3c8e705 | |||
46e877b8bb | |||
4ff81678ac | |||
b6ac21374e | |||
c12f263577 | |||
e8a4ed01e2 | |||
3abd137dc5 | |||
dc420d660e | |||
9afae9e2c1 | |||
bb69dd324e | |||
73b0d398e4 | |||
f4eaa3f8fb | |||
c280871714 | |||
37c1edc952 | |||
19136afc2b | |||
d80af00785 | |||
f3ca17ea03 | |||
1d5d2494ed | |||
bbca61252f | |||
3c62bfb7a3 | |||
6770b9c67a | |||
e8877ab180 | |||
584d90cd5d | |||
b866337f25 | |||
d2ce6836af | |||
c09f23c46d | |||
2b54b73b90 | |||
b0cc0e443c | |||
dc0061e4db | |||
ff01a4de65 | |||
6f707b857a | |||
eea56d037e | |||
3083b6d11e | |||
623c7b4df4 | |||
c0e614b0bd | |||
bfe3a3d08e | |||
e1b561cb7c | |||
5d431b4782 | |||
bf6d905a5a | |||
f05f7b475e | |||
802de5f9f8 | |||
307cb5167c | |||
7fffd6ffd2 | |||
c43910f835 | |||
bdaba136a9 | |||
f9b90e13ac | |||
81de5648d9 | |||
2f785015a5 | |||
b98f67095e | |||
d898c68f2c | |||
1d698f093f | |||
eb3919e8cf | |||
de801b500b | |||
73166b41e9 | |||
f328c75ba7 | |||
a6c6bbd81c | |||
324afd7fde | |||
2ad9b5692f | |||
59bb65182a | |||
d8888ded12 | |||
93c3f920ca | |||
eb3351533a | |||
5022dce31a | |||
5707f6b997 | |||
b539d3a411 | |||
6cf198d1b1 | |||
7b37bd332c | |||
7ce5c2b9ff | |||
14f146b9f7 | |||
eddc741b5e | |||
2aca3252e8 | |||
c91b2d098d | |||
dd5b73cfee | |||
cd02cef5e9 | |||
0f64e01f6b | |||
4e2a4b17b5 | |||
a5172974da | |||
1eb375d296 | |||
1bee31a3bb | |||
4c65f3fe7a | |||
4b35cb9462 | |||
a42d1dc1fe | |||
b8d3b15206 | |||
12d8d33a1c | |||
8ee8d755bb | |||
443c677357 | |||
96ee00a322 | |||
2deed74494 | |||
9b2c963179 | |||
b0956d5dbf | |||
d00811428d | |||
8d0d10cce5 | |||
00f222ecad | |||
870b5c5ea7 | |||
720502b25f | |||
92f4aced25 | |||
bb8619f4f7 | |||
9d49d35090 | |||
d533c14881 | |||
75babb82b6 | |||
161bc5e19c | |||
987568c65c | |||
1637b37132 | |||
096abb3f37 | |||
660eef8a95 | |||
0c137b344b | |||
2e3856740d | |||
c53380cd2a | |||
3fbacf4be2 | |||
495bef8b4c | |||
4bdfc0a46d | |||
5ee85bea7c | |||
813afc3d11 | |||
91dc6b29a6 | |||
2c83362e63 | |||
e129223dbe | |||
47db0a2f2e | |||
ffc7488af2 | |||
6e3a0948e4 | |||
a403a94d7b | |||
9e7f47c490 | |||
ae077a2183 | |||
9c8253c543 | |||
fc346041e5 | |||
94e77cfa5d | |||
384c3ec907 | |||
2b83d9c2e5 | |||
87d9f06a45 | |||
83ada7232a | |||
fa98d8d337 | |||
8874545a1e | |||
3f1a1c3192 | |||
68b38e7ade | |||
29fccb3221 | |||
b8fc61bcec | |||
9c3242c6df | |||
7418c1af24 | |||
4e39db4158 | |||
877030ea9d | |||
36db6cd982 | |||
a120ca16c0 | |||
92a73e727b | |||
5449edc025 | |||
f165f8b44e | |||
20a267dc6a | |||
4a17097d00 | |||
05dc2dac70 | |||
0865688c27 | |||
6285455f85 | |||
7d2545c72e | |||
5ee3729738 | |||
d16bfa5e54 | |||
d0d3b32210 | |||
0436223793 | |||
70a9391378 | |||
2fec88ebfc | |||
9fb60deb7c | |||
333ac5789a | |||
e4ac8edb2f | |||
4d2227e5ab | |||
012143e703 | |||
9d55420a00 | |||
ca5dff6682 | |||
900a61b023 | |||
489779d905 | |||
88e738fcb6 | |||
0181725e55 | |||
6081a29c13 | |||
5f72a28157 | |||
86a477c2f6 | |||
2f22ac662c | |||
2bb417bfff | |||
45cf31650c | |||
fb3510b276 | |||
bd832e5b0a | |||
e9b9b228e7 | |||
a10662210a | |||
5686340d26 | |||
096a89117a | |||
70e709c5f4 | |||
43221f0b7a | |||
606889a002 | |||
499b893704 | |||
8396da3e83 | |||
2b44df5440 | |||
565eb61cd3 | |||
a054ae320b | |||
afb1bc242b | |||
4e39f690f2 | |||
bb9a7f5a7c | |||
2364d71ea2 | |||
d80a546ed4 | |||
e73ac5bdd7 | |||
0ac4eba60e | |||
cdb7cfd74b | |||
7e3fc182d5 | |||
7c3432a79f | |||
d3809abe42 | |||
adebd91114 | |||
3fed78ae7b | |||
0df732c052 | |||
24b806d2ee | |||
8f653572ac | |||
5a0bb40a41 | |||
d1ee12566b | |||
7d2aee8eca | |||
c8c0c728a0 | |||
e9b2bd751d | |||
a69c709839 | |||
86164374ef | |||
f58d1348a7 | |||
67c2384bdf | |||
aafe717f2f | |||
7879429a94 | |||
1383da1030 | |||
053bc83fe4 | |||
c740b60db8 | |||
dae7e009b0 | |||
0555a6112d | |||
dc9af8e3f3 | |||
21b33de810 | |||
5bba773199 | |||
0a82c06a2c | |||
33e22fa8d7 | |||
25e47db416 | |||
896cba5cb9 | |||
6aa17f0c76 | |||
16270dba4f | |||
4e4f0ab619 | |||
ac9376ea16 | |||
f38a611b55 | |||
5badbab8b7 | |||
7397e14c0a | |||
a44645e13d |
8
.github/ISSUE_TEMPLATE.md
vendored
Normal file
8
.github/ISSUE_TEMPLATE.md
vendored
Normal file
@ -0,0 +1,8 @@
|
||||
# Bug reporting
|
||||
|
||||
A good bug report has some very specific qualities, so please read over our short document on
|
||||
[reporting bugs][report_bugs] before you submit your bug report.
|
||||
|
||||
To ask a question, go ahead and ignore this.
|
||||
|
||||
[report_bugs]: ../Documentation/reporting_bugs.md
|
5
.github/PULL_REQUEST_TEMPLATE.md
vendored
Normal file
5
.github/PULL_REQUEST_TEMPLATE.md
vendored
Normal file
@ -0,0 +1,5 @@
|
||||
# Contributing guidelines
|
||||
|
||||
Please read our [contribution workflow][contributing] before submitting a pull request.
|
||||
|
||||
[contributing]: ../CONTRIBUTING.md#contribution-flow
|
1
.gitignore
vendored
1
.gitignore
vendored
@ -10,3 +10,4 @@
|
||||
/hack/insta-discovery/.env
|
||||
*.test
|
||||
tools/functional-tester/docker/bin
|
||||
hack/tls-setup/certs
|
||||
|
2
.header
2
.header
@ -1,4 +1,4 @@
|
||||
// Copyright 2016 CoreOS, Inc.
|
||||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
|
36
.travis.yml
36
.travis.yml
@ -1,15 +1,34 @@
|
||||
dist: trusty
|
||||
language: go
|
||||
go_import_path: github.com/coreos/etcd
|
||||
sudo: false
|
||||
|
||||
go:
|
||||
- 1.4
|
||||
- 1.5
|
||||
- 1.6
|
||||
- tip
|
||||
|
||||
env:
|
||||
global:
|
||||
- GO15VENDOREXPERIMENT=1
|
||||
matrix:
|
||||
- TARGET=amd64
|
||||
- TARGET=arm64
|
||||
- TARGET=arm
|
||||
- TARGET=ppc64le
|
||||
|
||||
matrix:
|
||||
fast_finish: true
|
||||
allow_failures:
|
||||
- go: tip
|
||||
exclude:
|
||||
- go: 1.6
|
||||
env: TARGET=arm64
|
||||
- go: tip
|
||||
env: TARGET=arm
|
||||
- go: tip
|
||||
env: TARGET=arm64
|
||||
- go: tip
|
||||
env: TARGET=ppc64le
|
||||
|
||||
addons:
|
||||
apt:
|
||||
@ -20,6 +39,17 @@ addons:
|
||||
|
||||
before_install:
|
||||
- go get -v github.com/chzchzchz/goword
|
||||
- go get -v honnef.co/go/simple/cmd/gosimple
|
||||
- go get -v honnef.co/go/unused/cmd/unused
|
||||
|
||||
# disable godep restore override
|
||||
install:
|
||||
- pushd cmd/ && go get -t -v ./... && popd
|
||||
|
||||
script:
|
||||
- ./test
|
||||
- >
|
||||
if [ "${TARGET}" == "amd64" ]; then
|
||||
GOARCH="${TARGET}" ./test;
|
||||
else
|
||||
GOARCH="${TARGET}" ./build;
|
||||
fi
|
||||
|
@ -12,7 +12,7 @@ etcd is Apache 2.0 licensed and accepts contributions via GitHub pull requests.
|
||||
- Fork the repository on GitHub
|
||||
- Read the README.md for build instructions
|
||||
|
||||
## Reporting Bugs and Creating Issues
|
||||
## Reporting bugs and creating issues
|
||||
|
||||
Reporting bugs is one of the best ways to contribute. However, a good bug report
|
||||
has some very specific qualities, so please read over our short document on
|
||||
@ -39,7 +39,7 @@ The coding style suggested by the Golang community is used in etcd. See the [sty
|
||||
|
||||
Please follow this style to make etcd easy to review, maintain and develop.
|
||||
|
||||
### Format of the Commit Message
|
||||
### Format of the commit message
|
||||
|
||||
We follow a rough convention for commit messages that is designed to answer two
|
||||
questions: what changed and why. The subject line should feature the what and
|
||||
|
@ -1,2 +1,6 @@
|
||||
FROM golang:onbuild
|
||||
EXPOSE 4001 7001 2379 2380
|
||||
FROM golang
|
||||
ADD . /go/src/github.com/coreos/etcd
|
||||
ADD cmd/vendor /go/src/github.com/coreos/etcd/vendor
|
||||
RUN go install github.com/coreos/etcd
|
||||
EXPOSE 2379 2380
|
||||
ENTRYPOINT ["etcd"]
|
||||
|
11
Dockerfile-release
Normal file
11
Dockerfile-release
Normal file
@ -0,0 +1,11 @@
|
||||
FROM alpine:latest
|
||||
|
||||
ADD etcd /usr/local/bin/
|
||||
ADD etcdctl /usr/local/bin/
|
||||
RUN mkdir -p /var/etcd/
|
||||
RUN mkdir -p /var/lib/etcd/
|
||||
|
||||
EXPOSE 2379 2380
|
||||
|
||||
# Define default command.
|
||||
CMD ["/usr/local/bin/etcd"]
|
@ -72,6 +72,6 @@ With the benchmark result, we can calculate roughly that `c1 = 17kb`, `c2 = 18kb
|
||||
| 5k | 50 | 10 | 2.5M | 5710MB |
|
||||
| 1k | 50 | 100 | 5M | 2380MB |
|
||||
| 2k | 50 | 100 | 10M | 4672MB |
|
||||
| 5k | 50 | 100 | 50M | *OOM* |
|
||||
| 5k | 50 | 100 | 25M | *OOM* |
|
||||
|
||||
[rss]: https://en.wikipedia.org/wiki/Resident_set_size
|
||||
|
@ -1,4 +1,4 @@
|
||||
# Branch Management
|
||||
# Branch management
|
||||
|
||||
## Guide
|
||||
|
||||
@ -13,7 +13,7 @@ The etcd team has adopted a *rolling release model* and supports one stable vers
|
||||
|
||||
The `master` branch is our development branch. All new features land here first.
|
||||
|
||||
If you want to try new features, pull `master` and play with it. Note that `master` may not be stable because new features may introduce bugs.
|
||||
To try new and experimental features, pull `master` and play with it. Note that `master` may not be stable because new features may introduce bugs.
|
||||
|
||||
Before the release of the next stable version, feature PRs will be frozen. We will focus on the testing, bug-fix and documentation for one to two weeks.
|
||||
|
||||
|
454
Documentation/demo.md
Normal file
454
Documentation/demo.md
Normal file
@ -0,0 +1,454 @@
|
||||
# Demo
|
||||
|
||||
This series of examples shows the basic procedures for working with an etcd cluster.
|
||||
|
||||
## Set up a cluster
|
||||
|
||||
<img src="https://storage.googleapis.com/etcd/demo/01_etcd_clustering_2016051001.gif" alt="01_etcd_clustering_2016050601"/>
|
||||
|
||||
On each etcd node, specify the cluster members:
|
||||
|
||||
```
|
||||
TOKEN=token-01
|
||||
CLUSTER_STATE=new
|
||||
NAME_1=machine-1
|
||||
NAME_2=machine-2
|
||||
NAME_3=machine-3
|
||||
HOST_1=10.240.0.17
|
||||
HOST_2=10.240.0.18
|
||||
HOST_3=10.240.0.19
|
||||
CLUSTER=${NAME_1}=http://${HOST_1}:2380,${NAME_2}=http://${HOST_2}:2380,${NAME_3}=http://${HOST_3}:2380
|
||||
```
|
||||
|
||||
Run this on each machine:
|
||||
|
||||
```
|
||||
# For machine 1
|
||||
THIS_NAME=${NAME_1}
|
||||
THIS_IP=${HOST_1}
|
||||
etcd --data-dir=data.etcd --name ${THIS_NAME} \
|
||||
--initial-advertise-peer-urls http://${THIS_IP}:2380 --listen-peer-urls http://${THIS_IP}:2380 \
|
||||
--advertise-client-urls http://${THIS_IP}:2379 --listen-client-urls http://${THIS_IP}:2379 \
|
||||
--initial-cluster ${CLUSTER} \
|
||||
--initial-cluster-state ${CLUSTER_STATE} --initial-cluster-token ${TOKEN}
|
||||
|
||||
# For machine 2
|
||||
THIS_NAME=${NAME_2}
|
||||
THIS_IP=${HOST_2}
|
||||
etcd --data-dir=data.etcd --name ${THIS_NAME} \
|
||||
--initial-advertise-peer-urls http://${THIS_IP}:2380 --listen-peer-urls http://${THIS_IP}:2380 \
|
||||
--advertise-client-urls http://${THIS_IP}:2379 --listen-client-urls http://${THIS_IP}:2379 \
|
||||
--initial-cluster ${CLUSTER} \
|
||||
--initial-cluster-state ${CLUSTER_STATE} --initial-cluster-token ${TOKEN}
|
||||
|
||||
# For machine 3
|
||||
THIS_NAME=${NAME_3}
|
||||
THIS_IP=${HOST_3}
|
||||
etcd --data-dir=data.etcd --name ${THIS_NAME} \
|
||||
--initial-advertise-peer-urls http://${THIS_IP}:2380 --listen-peer-urls http://${THIS_IP}:2380 \
|
||||
--advertise-client-urls http://${THIS_IP}:2379 --listen-client-urls http://${THIS_IP}:2379 \
|
||||
--initial-cluster ${CLUSTER} \
|
||||
--initial-cluster-state ${CLUSTER_STATE} --initial-cluster-token ${TOKEN}
|
||||
```
|
||||
|
||||
Or use our public discovery service:
|
||||
|
||||
```
|
||||
curl https://discovery.etcd.io/new?size=3
|
||||
https://discovery.etcd.io/a81b5818e67a6ea83e9d4daea5ecbc92
|
||||
|
||||
# grab this token
|
||||
TOKEN=token-01
|
||||
CLUSTER_STATE=new
|
||||
NAME_1=machine-1
|
||||
NAME_2=machine-2
|
||||
NAME_3=machine-3
|
||||
HOST_1=10.240.0.17
|
||||
HOST_2=10.240.0.18
|
||||
HOST_3=10.240.0.19
|
||||
DISCOVERY=https://discovery.etcd.io/a81b5818e67a6ea83e9d4daea5ecbc92
|
||||
|
||||
THIS_NAME=${NAME_1}
|
||||
THIS_IP=${HOST_1}
|
||||
etcd --data-dir=data.etcd --name ${THIS_NAME} \
|
||||
--initial-advertise-peer-urls http://${THIS_IP}:2380 --listen-peer-urls http://${THIS_IP}:2380 \
|
||||
--advertise-client-urls http://${THIS_IP}:2379 --listen-client-urls http://${THIS_IP}:2379 \
|
||||
--discovery ${DISCOVERY} \
|
||||
--initial-cluster-state ${CLUSTER_STATE} --initial-cluster-token ${TOKEN}
|
||||
|
||||
THIS_NAME=${NAME_2}
|
||||
THIS_IP=${HOST_2}
|
||||
etcd --data-dir=data.etcd --name ${THIS_NAME} \
|
||||
--initial-advertise-peer-urls http://${THIS_IP}:2380 --listen-peer-urls http://${THIS_IP}:2380 \
|
||||
--advertise-client-urls http://${THIS_IP}:2379 --listen-client-urls http://${THIS_IP}:2379 \
|
||||
--discovery ${DISCOVERY} \
|
||||
--initial-cluster-state ${CLUSTER_STATE} --initial-cluster-token ${TOKEN}
|
||||
|
||||
THIS_NAME=${NAME_3}
|
||||
THIS_IP=${HOST_3}
|
||||
etcd --data-dir=data.etcd --name ${THIS_NAME} \
|
||||
--initial-advertise-peer-urls http://${THIS_IP}:2380 --listen-peer-urls http://${THIS_IP}:2380 \
|
||||
--advertise-client-urls http://${THIS_IP}:2379 --listen-client-urls http://${THIS_IP}:2379 \
|
||||
--discovery ${DISCOVERY} \
|
||||
--initial-cluster-state ${CLUSTER_STATE} --initial-cluster-token ${TOKEN}
|
||||
```
|
||||
|
||||
Now etcd is ready! To connect to etcd with etcdctl:
|
||||
|
||||
```
|
||||
export ETCDCTL_API=3
|
||||
HOST_1=10.240.0.17
|
||||
HOST_2=10.240.0.18
|
||||
HOST_3=10.240.0.19
|
||||
ENDPOINTS=$HOST_1:2379,$HOST_2:2379,$HOST_3:2379
|
||||
|
||||
etcdctl --endpoints=$ENDPOINTS member list
|
||||
```
|
||||
|
||||
|
||||
## Access etcd
|
||||
|
||||
<img src="https://storage.googleapis.com/etcd/demo/02_etcdctl_access_etcd_2016051001.gif" alt="02_etcdctl_access_etcd_2016051001"/>
|
||||
|
||||
`put` command to write:
|
||||
|
||||
```
|
||||
etcdctl --endpoints=$ENDPOINTS put foo "Hello World!"
|
||||
```
|
||||
|
||||
`get` to read from etcd:
|
||||
|
||||
```
|
||||
etcdctl --endpoints=$ENDPOINTS get foo
|
||||
etcdctl --endpoints=$ENDPOINTS --write-out="json" get foo
|
||||
```
|
||||
|
||||
|
||||
## Get by prefix
|
||||
|
||||
<img src="https://storage.googleapis.com/etcd/demo/03_etcdctl_get_by_prefix_2016050501.gif" alt="03_etcdctl_get_by_prefix_2016050501"/>
|
||||
|
||||
```
|
||||
etcdctl --endpoints=$ENDPOINTS put web1 value1
|
||||
etcdctl --endpoints=$ENDPOINTS put web2 value2
|
||||
etcdctl --endpoints=$ENDPOINTS put web3 value3
|
||||
|
||||
etcdctl --endpoints=$ENDPOINTS get web --prefix
|
||||
```
|
||||
|
||||
|
||||
## Delete
|
||||
|
||||
<img src="https://storage.googleapis.com/etcd/demo/04_etcdctl_delete_2016050601.gif" alt="04_etcdctl_delete_2016050601"/>
|
||||
|
||||
```
|
||||
etcdctl --endpoints=$ENDPOINTS put key myvalue
|
||||
etcdctl --endpoints=$ENDPOINTS del key
|
||||
|
||||
etcdctl --endpoints=$ENDPOINTS put k1 value1
|
||||
etcdctl --endpoints=$ENDPOINTS put k2 value2
|
||||
etcdctl --endpoints=$ENDPOINTS del k --prefix
|
||||
```
|
||||
|
||||
|
||||
## Transactional write
|
||||
|
||||
`txn` to wrap multiple requests into one transaction:
|
||||
|
||||
<img src="https://storage.googleapis.com/etcd/demo/05_etcdctl_transaction_2016050501.gif" alt="05_etcdctl_transaction_2016050501"/>
|
||||
|
||||
```
|
||||
etcdctl --endpoints=$ENDPOINTS put user1 bad
|
||||
etcdctl --endpoints=$ENDPOINTS txn --interactive
|
||||
|
||||
compares:
|
||||
value("user1") = "bad"
|
||||
|
||||
success requests (get, put, delete):
|
||||
del user1
|
||||
|
||||
failure requests (get, put, delete):
|
||||
put user1 good
|
||||
```
|
||||
|
||||
|
||||
## Watch
|
||||
|
||||
`watch` to get notified of future changes:
|
||||
|
||||
<img src="https://storage.googleapis.com/etcd/demo/06_etcdctl_watch_2016050501.gif" alt="06_etcdctl_watch_2016050501"/>
|
||||
|
||||
```
|
||||
etcdctl --endpoints=$ENDPOINTS watch stock1
|
||||
etcdctl --endpoints=$ENDPOINTS put stock1 1000
|
||||
|
||||
etcdctl --endpoints=$ENDPOINTS watch stock --prefix
|
||||
etcdctl --endpoints=$ENDPOINTS put stock1 10
|
||||
etcdctl --endpoints=$ENDPOINTS put stock2 20
|
||||
```
|
||||
|
||||
|
||||
## Lease
|
||||
|
||||
`lease` to write with TTL:
|
||||
|
||||
<img src="https://storage.googleapis.com/etcd/demo/07_etcdctl_lease_2016050501.gif" alt="07_etcdctl_lease_2016050501"/>
|
||||
|
||||
```
|
||||
etcdctl --endpoints=$ENDPOINTS lease grant 300
|
||||
# lease 2be7547fbc6a5afa granted with TTL(300s)
|
||||
|
||||
etcdctl --endpoints=$ENDPOINTS put sample value --lease=2be7547fbc6a5afa
|
||||
etcdctl --endpoints=$ENDPOINTS get sample
|
||||
|
||||
etcdctl --endpoints=$ENDPOINTS lease keep-alive 2be7547fbc6a5afa
|
||||
etcdctl --endpoints=$ENDPOINTS lease revoke 2be7547fbc6a5afa
|
||||
# or after 300 seconds
|
||||
etcdctl --endpoints=$ENDPOINTS get sample
|
||||
```
|
||||
|
||||
|
||||
## Distributed locks
|
||||
|
||||
`lock` for distributed lock:
|
||||
|
||||
<img src="https://storage.googleapis.com/etcd/demo/08_etcdctl_lock_2016050501.gif" alt="08_etcdctl_lock_2016050501"/>
|
||||
|
||||
```
|
||||
etcdctl --endpoints=$ENDPOINTS lock mutex1
|
||||
|
||||
# another client with the same name blocks
|
||||
etcdctl --endpoints=$ENDPOINTS lock mutex1
|
||||
```
|
||||
|
||||
|
||||
## Elections
|
||||
|
||||
`elect` for leader election:
|
||||
|
||||
<img src="https://storage.googleapis.com/etcd/demo/09_etcdctl_elect_2016050501.gif" alt="09_etcdctl_elect_2016050501"/>
|
||||
|
||||
```
|
||||
etcdctl --endpoints=$ENDPOINTS elect one p1
|
||||
|
||||
# another client with the same name blocks
|
||||
etcdctl --endpoints=$ENDPOINTS elect one p2
|
||||
```
|
||||
|
||||
|
||||
## Cluster status
|
||||
|
||||
Specify the initial cluster configuration for each machine:
|
||||
|
||||
<img src="https://storage.googleapis.com/etcd/demo/10_etcdctl_endpoint_2016050501.gif" alt="10_etcdctl_endpoint_2016050501"/>
|
||||
|
||||
```
|
||||
etcdctl --write-out=table --endpoints=$ENDPOINTS endpoint status
|
||||
|
||||
+------------------+------------------+---------+---------+-----------+-----------+------------+
|
||||
| ENDPOINT | ID | VERSION | DB SIZE | IS LEADER | RAFT TERM | RAFT INDEX |
|
||||
+------------------+------------------+---------+---------+-----------+-----------+------------+
|
||||
| 10.240.0.17:2379 | 4917a7ab173fabe7 | 3.0.0 | 45 kB | true | 4 | 16726 |
|
||||
| 10.240.0.18:2379 | 59796ba9cd1bcd72 | 3.0.0 | 45 kB | false | 4 | 16726 |
|
||||
| 10.240.0.19:2379 | 94df724b66343e6c | 3.0.0 | 45 kB | false | 4 | 16726 |
|
||||
+------------------+------------------+---------+---------+-----------+-----------+------------+
|
||||
```
|
||||
|
||||
```
|
||||
etcdctl --endpoints=$ENDPOINTS endpoint health
|
||||
|
||||
10.240.0.17:2379 is healthy: successfully committed proposal: took = 3.345431ms
|
||||
10.240.0.19:2379 is healthy: successfully committed proposal: took = 3.767967ms
|
||||
10.240.0.18:2379 is healthy: successfully committed proposal: took = 4.025451ms
|
||||
```
|
||||
|
||||
|
||||
## Snapshot
|
||||
|
||||
`snapshot` to save point-in-time snapshot of etcd database:
|
||||
|
||||
<img src="https://storage.googleapis.com/etcd/demo/11_etcdctl_snapshot_2016051001.gif" alt="11_etcdctl_snapshot_2016051001"/>
|
||||
|
||||
```
|
||||
etcdctl --endpoints=$ENDPOINTS snapshot save my.db
|
||||
|
||||
Snapshot saved at my.db
|
||||
```
|
||||
|
||||
```
|
||||
etcdctl --write-out=table --endpoints=$ENDPOINTS snapshot status my.db
|
||||
|
||||
+---------+----------+------------+------------+
|
||||
| HASH | REVISION | TOTAL KEYS | TOTAL SIZE |
|
||||
+---------+----------+------------+------------+
|
||||
| c55e8b8 | 9 | 13 | 25 kB |
|
||||
+---------+----------+------------+------------+
|
||||
```
|
||||
|
||||
|
||||
## Migrate
|
||||
|
||||
`migrate` to transform etcd v2 to v3 data:
|
||||
|
||||
<img src="https://storage.googleapis.com/etcd/demo/12_etcdctl_migrate_2016061602.gif" alt="12_etcdctl_migrate_2016061602"/>
|
||||
|
||||
```
|
||||
# write key in etcd version 2 store
|
||||
export ETCDCTL_API=2
|
||||
etcdctl --endpoints=http://$ENDPOINT set foo bar
|
||||
|
||||
# read key in etcd v2
|
||||
etcdctl --endpoints=$ENDPOINTS --output="json" get foo
|
||||
|
||||
# stop etcd node to migrate, one by one
|
||||
|
||||
# migrate v2 data
|
||||
export ETCDCTL_API=3
|
||||
etcdctl --endpoints=$ENDPOINT migrate --data-dir="default.etcd" --wal-dir="default.etcd/member/wal"
|
||||
|
||||
# restart etcd node after migrate, one by one
|
||||
|
||||
# confirm that the key got migrated
|
||||
etcdctl --endpoints=$ENDPOINTS get /foo
|
||||
```
|
||||
|
||||
|
||||
## Member
|
||||
|
||||
`member` to add,remove,update membership:
|
||||
|
||||
<img src="https://storage.googleapis.com/etcd/demo/13_etcdctl_member_2016062301.gif" alt="13_etcdctl_member_2016062301"/>
|
||||
|
||||
```
|
||||
# For each machine
|
||||
TOKEN=my-etcd-token-1
|
||||
CLUSTER_STATE=new
|
||||
NAME_1=etcd-node-1
|
||||
NAME_2=etcd-node-2
|
||||
NAME_3=etcd-node-3
|
||||
HOST_1=10.240.0.13
|
||||
HOST_2=10.240.0.14
|
||||
HOST_3=10.240.0.15
|
||||
CLUSTER=${NAME_1}=http://${HOST_1}:2380,${NAME_2}=http://${HOST_2}:2380,${NAME_3}=http://${HOST_3}:2380
|
||||
|
||||
# For node 1
|
||||
THIS_NAME=${NAME_1}
|
||||
THIS_IP=${HOST_1}
|
||||
etcd --data-dir=data.etcd --name ${THIS_NAME} \
|
||||
--initial-advertise-peer-urls http://${THIS_IP}:2380 \
|
||||
--listen-peer-urls http://${THIS_IP}:2380 \
|
||||
--advertise-client-urls http://${THIS_IP}:2379 \
|
||||
--listen-client-urls http://${THIS_IP}:2379 \
|
||||
--initial-cluster ${CLUSTER} \
|
||||
--initial-cluster-state ${CLUSTER_STATE} \
|
||||
--initial-cluster-token ${TOKEN}
|
||||
|
||||
# For node 2
|
||||
THIS_NAME=${NAME_2}
|
||||
THIS_IP=${HOST_2}
|
||||
etcd --data-dir=data.etcd --name ${THIS_NAME} \
|
||||
--initial-advertise-peer-urls http://${THIS_IP}:2380 \
|
||||
--listen-peer-urls http://${THIS_IP}:2380 \
|
||||
--advertise-client-urls http://${THIS_IP}:2379 \
|
||||
--listen-client-urls http://${THIS_IP}:2379 \
|
||||
--initial-cluster ${CLUSTER} \
|
||||
--initial-cluster-state ${CLUSTER_STATE} \
|
||||
--initial-cluster-token ${TOKEN}
|
||||
|
||||
# For node 3
|
||||
THIS_NAME=${NAME_3}
|
||||
THIS_IP=${HOST_3}
|
||||
etcd --data-dir=data.etcd --name ${THIS_NAME} \
|
||||
--initial-advertise-peer-urls http://${THIS_IP}:2380 \
|
||||
--listen-peer-urls http://${THIS_IP}:2380 \
|
||||
--advertise-client-urls http://${THIS_IP}:2379 \
|
||||
--listen-client-urls http://${THIS_IP}:2379 \
|
||||
--initial-cluster ${CLUSTER} \
|
||||
--initial-cluster-state ${CLUSTER_STATE} \
|
||||
--initial-cluster-token ${TOKEN}
|
||||
```
|
||||
|
||||
Then replace a member with `member remove` and `member add` commands:
|
||||
|
||||
```
|
||||
# get member ID
|
||||
export ETCDCTL_API=3
|
||||
HOST_1=10.240.0.13
|
||||
HOST_2=10.240.0.14
|
||||
HOST_3=10.240.0.15
|
||||
etcdctl --endpoints=${HOST_1}:2379,${HOST_2}:2379,${HOST_3}:2379 member list
|
||||
|
||||
# remove the member
|
||||
MEMBER_ID=278c654c9a6dfd3b
|
||||
etcdctl --endpoints=${HOST_1}:2379,${HOST_2}:2379,${HOST_3}:2379 \
|
||||
member remove ${MEMBER_ID}
|
||||
|
||||
# add a new member (node 4)
|
||||
export ETCDCTL_API=3
|
||||
NAME_1=etcd-node-1
|
||||
NAME_2=etcd-node-2
|
||||
NAME_4=etcd-node-4
|
||||
HOST_1=10.240.0.13
|
||||
HOST_2=10.240.0.14
|
||||
HOST_4=10.240.0.16 # new member
|
||||
etcdctl --endpoints=${HOST_1}:2379,${HOST_2}:2379 \
|
||||
member add ${NAME_4} \
|
||||
--peer-urls=http://${HOST_4}:2380
|
||||
```
|
||||
|
||||
Next, start the new member with `--initial-cluster-state existing` flag:
|
||||
|
||||
```
|
||||
# [WARNING] If the new member starts from the same disk space,
|
||||
# make sure to remove the data directory of the old member
|
||||
#
|
||||
# restart with 'existing' flag
|
||||
TOKEN=my-etcd-token-1
|
||||
CLUSTER_STATE=existing
|
||||
NAME_1=etcd-node-1
|
||||
NAME_2=etcd-node-2
|
||||
NAME_4=etcd-node-4
|
||||
HOST_1=10.240.0.13
|
||||
HOST_2=10.240.0.14
|
||||
HOST_4=10.240.0.16 # new member
|
||||
CLUSTER=${NAME_1}=http://${HOST_1}:2380,${NAME_2}=http://${HOST_2}:2380,${NAME_4}=http://${HOST_4}:2380
|
||||
|
||||
THIS_NAME=${NAME_4}
|
||||
THIS_IP=${HOST_4}
|
||||
etcd --data-dir=data.etcd --name ${THIS_NAME} \
|
||||
--initial-advertise-peer-urls http://${THIS_IP}:2380 \
|
||||
--listen-peer-urls http://${THIS_IP}:2380 \
|
||||
--advertise-client-urls http://${THIS_IP}:2379 \
|
||||
--listen-client-urls http://${THIS_IP}:2379 \
|
||||
--initial-cluster ${CLUSTER} \
|
||||
--initial-cluster-state ${CLUSTER_STATE} \
|
||||
--initial-cluster-token ${TOKEN}
|
||||
```
|
||||
|
||||
|
||||
## Auth
|
||||
|
||||
`auth`,`user`,`role` for authentication:
|
||||
|
||||
<img src="https://storage.googleapis.com/etcd/demo/14_etcdctl_auth_2016062301.gif" alt="14_etcdctl_auth_2016062301"/>
|
||||
|
||||
```
|
||||
export ETCDCTL_API=3
|
||||
ENDPOINTS=localhost:2379
|
||||
|
||||
etcdctl --endpoints=${ENDPOINTS} role add root
|
||||
etcdctl --endpoints=${ENDPOINTS} role grant-permission root readwrite foo
|
||||
etcdctl --endpoints=${ENDPOINTS} role get root
|
||||
|
||||
etcdctl --endpoints=${ENDPOINTS} user add root
|
||||
etcdctl --endpoints=${ENDPOINTS} user grant-role root root
|
||||
etcdctl --endpoints=${ENDPOINTS} user get root
|
||||
|
||||
etcdctl --endpoints=${ENDPOINTS} auth enable
|
||||
# now all client requests go through auth
|
||||
|
||||
etcdctl --endpoints=${ENDPOINTS} --user=root:123 put foo bar
|
||||
etcdctl --endpoints=${ENDPOINTS} get foo
|
||||
etcdctl --endpoints=${ENDPOINTS} --user=root:123 get foo
|
||||
etcdctl --endpoints=${ENDPOINTS} --user=root:123 get foo1
|
||||
```
|
38
Documentation/dev-guide/api_grpc_gateway.md
Normal file
38
Documentation/dev-guide/api_grpc_gateway.md
Normal file
@ -0,0 +1,38 @@
|
||||
|
||||
## Why grpc-gateway
|
||||
|
||||
etcd v3 uses [gRPC][grpc] for its messaging protocol. The etcd project includes a gRPC-based [Go client][go-client] and a command line utility, [etcdctl][etcdctl], for communicating with an etcd cluster through gRPC. For languages with no gRPC support, etcd provides a JSON [grpc-gateway][grpc-gateway]. This gateway serves a RESTful proxy that translates HTTP/JSON requests into gRPC messages.
|
||||
|
||||
|
||||
## Using grpc-gateway
|
||||
|
||||
The gateway accepts a [JSON mapping][json-mapping] for etcd's [protocol buffer][api-ref] message definitions. Note that `key` and `value` fields are defined as byte arrays and therefore must be base64 encoded in JSON.
|
||||
|
||||
```bash
|
||||
<<COMMENT
|
||||
https://www.base64encode.org/
|
||||
foo is 'Zm9v' in Base64
|
||||
bar is 'YmFy'
|
||||
COMMENT
|
||||
|
||||
curl -L http://localhost:2379/v3alpha/kv/put \
|
||||
-X POST -d '{"key": "Zm9v", "value": "YmFy"}'
|
||||
|
||||
curl -L http://localhost:2379/v3alpha/kv/range \
|
||||
-X POST -d '{"key": "Zm9v"}'
|
||||
```
|
||||
|
||||
|
||||
## Swagger
|
||||
|
||||
Generated [Swagger][swagger] API definitions can be found at [rpc.swagger.json][swagger-doc].
|
||||
|
||||
[api-ref]: ./api_reference_v3.md
|
||||
[go-client]: https://github.com/coreos/etcd/tree/master/clientv3
|
||||
[etcdctl]: https://github.com/coreos/etcd/tree/master/etcdctl
|
||||
[grpc]: http://www.grpc.io/
|
||||
[grpc-gateway]: https://github.com/grpc-ecosystem/grpc-gateway
|
||||
[json-mapping]: https://developers.google.com/protocol-buffers/docs/proto3#json
|
||||
[swagger]: http://swagger.io/
|
||||
[swagger-doc]: apispec/swagger/rpc.swagger.json
|
||||
|
829
Documentation/dev-guide/api_reference_v3.md
Normal file
829
Documentation/dev-guide/api_reference_v3.md
Normal file
@ -0,0 +1,829 @@
|
||||
### etcd API Reference
|
||||
|
||||
|
||||
This is a generated documentation. Please read the proto files for more.
|
||||
|
||||
|
||||
##### service `Auth` (etcdserver/etcdserverpb/rpc.proto)
|
||||
|
||||
| Method | Request Type | Response Type | Description |
|
||||
| ------ | ------------ | ------------- | ----------- |
|
||||
| AuthEnable | AuthEnableRequest | AuthEnableResponse | AuthEnable enables authentication. |
|
||||
| AuthDisable | AuthDisableRequest | AuthDisableResponse | AuthDisable disables authentication. |
|
||||
| Authenticate | AuthenticateRequest | AuthenticateResponse | Authenticate processes an authenticate request. |
|
||||
| UserAdd | AuthUserAddRequest | AuthUserAddResponse | UserAdd adds a new user. |
|
||||
| UserGet | AuthUserGetRequest | AuthUserGetResponse | UserGet gets detailed user information. |
|
||||
| UserList | AuthUserListRequest | AuthUserListResponse | UserList gets a list of all users. |
|
||||
| UserDelete | AuthUserDeleteRequest | AuthUserDeleteResponse | UserDelete deletes a specified user. |
|
||||
| UserChangePassword | AuthUserChangePasswordRequest | AuthUserChangePasswordResponse | UserChangePassword changes the password of a specified user. |
|
||||
| UserGrantRole | AuthUserGrantRoleRequest | AuthUserGrantRoleResponse | UserGrant grants a role to a specified user. |
|
||||
| UserRevokeRole | AuthUserRevokeRoleRequest | AuthUserRevokeRoleResponse | UserRevokeRole revokes a role of specified user. |
|
||||
| RoleAdd | AuthRoleAddRequest | AuthRoleAddResponse | RoleAdd adds a new role. |
|
||||
| RoleGet | AuthRoleGetRequest | AuthRoleGetResponse | RoleGet gets detailed role information. |
|
||||
| RoleList | AuthRoleListRequest | AuthRoleListResponse | RoleList gets lists of all roles. |
|
||||
| RoleDelete | AuthRoleDeleteRequest | AuthRoleDeleteResponse | RoleDelete deletes a specified role. |
|
||||
| RoleGrantPermission | AuthRoleGrantPermissionRequest | AuthRoleGrantPermissionResponse | RoleGrantPermission grants a permission of a specified key or range to a specified role. |
|
||||
| RoleRevokePermission | AuthRoleRevokePermissionRequest | AuthRoleRevokePermissionResponse | RoleRevokePermission revokes a key or range permission of a specified role. |
|
||||
|
||||
|
||||
|
||||
##### service `Cluster` (etcdserver/etcdserverpb/rpc.proto)
|
||||
|
||||
| Method | Request Type | Response Type | Description |
|
||||
| ------ | ------------ | ------------- | ----------- |
|
||||
| MemberAdd | MemberAddRequest | MemberAddResponse | MemberAdd adds a member into the cluster. |
|
||||
| MemberRemove | MemberRemoveRequest | MemberRemoveResponse | MemberRemove removes an existing member from the cluster. |
|
||||
| MemberUpdate | MemberUpdateRequest | MemberUpdateResponse | MemberUpdate updates the member configuration. |
|
||||
| MemberList | MemberListRequest | MemberListResponse | MemberList lists all the members in the cluster. |
|
||||
|
||||
|
||||
|
||||
##### service `KV` (etcdserver/etcdserverpb/rpc.proto)
|
||||
|
||||
for grpc-gateway
|
||||
|
||||
| Method | Request Type | Response Type | Description |
|
||||
| ------ | ------------ | ------------- | ----------- |
|
||||
| Range | RangeRequest | RangeResponse | Range gets the keys in the range from the key-value store. |
|
||||
| Put | PutRequest | PutResponse | Put puts the given key into the key-value store. A put request increments the revision of the key-value store and generates one event in the event history. |
|
||||
| DeleteRange | DeleteRangeRequest | DeleteRangeResponse | DeleteRange deletes the given range from the key-value store. A delete request increments the revision of the key-value store and generates a delete event in the event history for every deleted key. |
|
||||
| Txn | TxnRequest | TxnResponse | Txn processes multiple requests in a single transaction. A txn request increments the revision of the key-value store and generates events with the same revision for every completed request. It is not allowed to modify the same key several times within one txn. |
|
||||
| Compact | CompactionRequest | CompactionResponse | Compact compacts the event history in the etcd key-value store. The key-value store should be periodically compacted or the event history will continue to grow indefinitely. |
|
||||
|
||||
|
||||
|
||||
##### service `Lease` (etcdserver/etcdserverpb/rpc.proto)
|
||||
|
||||
| Method | Request Type | Response Type | Description |
|
||||
| ------ | ------------ | ------------- | ----------- |
|
||||
| LeaseGrant | LeaseGrantRequest | LeaseGrantResponse | LeaseGrant creates a lease which expires if the server does not receive a keepAlive within a given time to live period. All keys attached to the lease will be expired and deleted if the lease expires. Each expired key generates a delete event in the event history. |
|
||||
| LeaseRevoke | LeaseRevokeRequest | LeaseRevokeResponse | LeaseRevoke revokes a lease. All keys attached to the lease will expire and be deleted. |
|
||||
| LeaseKeepAlive | LeaseKeepAliveRequest | LeaseKeepAliveResponse | LeaseKeepAlive keeps the lease alive by streaming keep alive requests from the client to the server and streaming keep alive responses from the server to the client. |
|
||||
|
||||
|
||||
|
||||
##### service `Maintenance` (etcdserver/etcdserverpb/rpc.proto)
|
||||
|
||||
| Method | Request Type | Response Type | Description |
|
||||
| ------ | ------------ | ------------- | ----------- |
|
||||
| Alarm | AlarmRequest | AlarmResponse | Alarm activates, deactivates, and queries alarms regarding cluster health. |
|
||||
| Status | StatusRequest | StatusResponse | Status gets the status of the member. |
|
||||
| Defragment | DefragmentRequest | DefragmentResponse | Defragment defragments a member's backend database to recover storage space. |
|
||||
| Hash | HashRequest | HashResponse | Hash returns the hash of the local KV state for consistency checking purpose. This is designed for testing; do not use this in production when there are ongoing transactions. |
|
||||
| Snapshot | SnapshotRequest | SnapshotResponse | Snapshot sends a snapshot of the entire backend from a member over a stream to a client. |
|
||||
|
||||
|
||||
|
||||
##### service `Watch` (etcdserver/etcdserverpb/rpc.proto)
|
||||
|
||||
| Method | Request Type | Response Type | Description |
|
||||
| ------ | ------------ | ------------- | ----------- |
|
||||
| Watch | WatchRequest | WatchResponse | Watch watches for events happening or that have happened. Both input and output are streams; the input stream is for creating and canceling watchers and the output stream sends events. One watch RPC can watch on multiple key ranges, streaming events for several watches at once. The entire event history can be watched starting from the last compaction revision. |
|
||||
|
||||
|
||||
|
||||
##### message `AlarmMember` (etcdserver/etcdserverpb/rpc.proto)
|
||||
|
||||
| Field | Description | Type |
|
||||
| ----- | ----------- | ---- |
|
||||
| memberID | memberID is the ID of the member associated with the raised alarm. | uint64 |
|
||||
| alarm | alarm is the type of alarm which has been raised. | AlarmType |
|
||||
|
||||
|
||||
|
||||
##### message `AlarmRequest` (etcdserver/etcdserverpb/rpc.proto)
|
||||
|
||||
default, used to query if any alarm is active space quota is exhausted
|
||||
|
||||
| Field | Description | Type |
|
||||
| ----- | ----------- | ---- |
|
||||
| action | action is the kind of alarm request to issue. The action may GET alarm statuses, ACTIVATE an alarm, or DEACTIVATE a raised alarm. | AlarmAction |
|
||||
| memberID | memberID is the ID of the member associated with the alarm. If memberID is 0, the alarm request covers all members. | uint64 |
|
||||
| alarm | alarm is the type of alarm to consider for this request. | AlarmType |
|
||||
|
||||
|
||||
|
||||
##### message `AlarmResponse` (etcdserver/etcdserverpb/rpc.proto)
|
||||
|
||||
| Field | Description | Type |
|
||||
| ----- | ----------- | ---- |
|
||||
| header | | ResponseHeader |
|
||||
| alarms | alarms is a list of alarms associated with the alarm request. | (slice of) AlarmMember |
|
||||
|
||||
|
||||
|
||||
##### message `AuthDisableRequest` (etcdserver/etcdserverpb/rpc.proto)
|
||||
|
||||
Empty field.
|
||||
|
||||
|
||||
|
||||
##### message `AuthDisableResponse` (etcdserver/etcdserverpb/rpc.proto)
|
||||
|
||||
| Field | Description | Type |
|
||||
| ----- | ----------- | ---- |
|
||||
| header | | ResponseHeader |
|
||||
|
||||
|
||||
|
||||
##### message `AuthEnableRequest` (etcdserver/etcdserverpb/rpc.proto)
|
||||
|
||||
Empty field.
|
||||
|
||||
|
||||
|
||||
##### message `AuthEnableResponse` (etcdserver/etcdserverpb/rpc.proto)
|
||||
|
||||
| Field | Description | Type |
|
||||
| ----- | ----------- | ---- |
|
||||
| header | | ResponseHeader |
|
||||
|
||||
|
||||
|
||||
##### message `AuthRoleAddRequest` (etcdserver/etcdserverpb/rpc.proto)
|
||||
|
||||
| Field | Description | Type |
|
||||
| ----- | ----------- | ---- |
|
||||
| name | name is the name of the role to add to the authentication system. | string |
|
||||
|
||||
|
||||
|
||||
##### message `AuthRoleAddResponse` (etcdserver/etcdserverpb/rpc.proto)
|
||||
|
||||
| Field | Description | Type |
|
||||
| ----- | ----------- | ---- |
|
||||
| header | | ResponseHeader |
|
||||
|
||||
|
||||
|
||||
##### message `AuthRoleDeleteRequest` (etcdserver/etcdserverpb/rpc.proto)
|
||||
|
||||
| Field | Description | Type |
|
||||
| ----- | ----------- | ---- |
|
||||
| role | | string |
|
||||
|
||||
|
||||
|
||||
##### message `AuthRoleDeleteResponse` (etcdserver/etcdserverpb/rpc.proto)
|
||||
|
||||
| Field | Description | Type |
|
||||
| ----- | ----------- | ---- |
|
||||
| header | | ResponseHeader |
|
||||
|
||||
|
||||
|
||||
##### message `AuthRoleGetRequest` (etcdserver/etcdserverpb/rpc.proto)
|
||||
|
||||
| Field | Description | Type |
|
||||
| ----- | ----------- | ---- |
|
||||
| role | | string |
|
||||
|
||||
|
||||
|
||||
##### message `AuthRoleGetResponse` (etcdserver/etcdserverpb/rpc.proto)
|
||||
|
||||
| Field | Description | Type |
|
||||
| ----- | ----------- | ---- |
|
||||
| header | | ResponseHeader |
|
||||
| perm | | (slice of) authpb.Permission |
|
||||
|
||||
|
||||
|
||||
##### message `AuthRoleGrantPermissionRequest` (etcdserver/etcdserverpb/rpc.proto)
|
||||
|
||||
| Field | Description | Type |
|
||||
| ----- | ----------- | ---- |
|
||||
| name | name is the name of the role which will be granted the permission. | string |
|
||||
| perm | perm is the permission to grant to the role. | authpb.Permission |
|
||||
|
||||
|
||||
|
||||
##### message `AuthRoleGrantPermissionResponse` (etcdserver/etcdserverpb/rpc.proto)
|
||||
|
||||
| Field | Description | Type |
|
||||
| ----- | ----------- | ---- |
|
||||
| header | | ResponseHeader |
|
||||
|
||||
|
||||
|
||||
##### message `AuthRoleListRequest` (etcdserver/etcdserverpb/rpc.proto)
|
||||
|
||||
Empty field.
|
||||
|
||||
|
||||
|
||||
##### message `AuthRoleListResponse` (etcdserver/etcdserverpb/rpc.proto)
|
||||
|
||||
| Field | Description | Type |
|
||||
| ----- | ----------- | ---- |
|
||||
| header | | ResponseHeader |
|
||||
| roles | | (slice of) string |
|
||||
|
||||
|
||||
|
||||
##### message `AuthRoleRevokePermissionRequest` (etcdserver/etcdserverpb/rpc.proto)
|
||||
|
||||
| Field | Description | Type |
|
||||
| ----- | ----------- | ---- |
|
||||
| role | | string |
|
||||
| key | | string |
|
||||
| range_end | | string |
|
||||
|
||||
|
||||
|
||||
##### message `AuthRoleRevokePermissionResponse` (etcdserver/etcdserverpb/rpc.proto)
|
||||
|
||||
| Field | Description | Type |
|
||||
| ----- | ----------- | ---- |
|
||||
| header | | ResponseHeader |
|
||||
|
||||
|
||||
|
||||
##### message `AuthUserAddRequest` (etcdserver/etcdserverpb/rpc.proto)
|
||||
|
||||
| Field | Description | Type |
|
||||
| ----- | ----------- | ---- |
|
||||
| name | | string |
|
||||
| password | | string |
|
||||
|
||||
|
||||
|
||||
##### message `AuthUserAddResponse` (etcdserver/etcdserverpb/rpc.proto)
|
||||
|
||||
| Field | Description | Type |
|
||||
| ----- | ----------- | ---- |
|
||||
| header | | ResponseHeader |
|
||||
|
||||
|
||||
|
||||
##### message `AuthUserChangePasswordRequest` (etcdserver/etcdserverpb/rpc.proto)
|
||||
|
||||
| Field | Description | Type |
|
||||
| ----- | ----------- | ---- |
|
||||
| name | name is the name of the user whose password is being changed. | string |
|
||||
| password | password is the new password for the user. | string |
|
||||
|
||||
|
||||
|
||||
##### message `AuthUserChangePasswordResponse` (etcdserver/etcdserverpb/rpc.proto)
|
||||
|
||||
| Field | Description | Type |
|
||||
| ----- | ----------- | ---- |
|
||||
| header | | ResponseHeader |
|
||||
|
||||
|
||||
|
||||
##### message `AuthUserDeleteRequest` (etcdserver/etcdserverpb/rpc.proto)
|
||||
|
||||
| Field | Description | Type |
|
||||
| ----- | ----------- | ---- |
|
||||
| name | name is the name of the user to delete. | string |
|
||||
|
||||
|
||||
|
||||
##### message `AuthUserDeleteResponse` (etcdserver/etcdserverpb/rpc.proto)
|
||||
|
||||
| Field | Description | Type |
|
||||
| ----- | ----------- | ---- |
|
||||
| header | | ResponseHeader |
|
||||
|
||||
|
||||
|
||||
##### message `AuthUserGetRequest` (etcdserver/etcdserverpb/rpc.proto)
|
||||
|
||||
| Field | Description | Type |
|
||||
| ----- | ----------- | ---- |
|
||||
| name | | string |
|
||||
|
||||
|
||||
|
||||
##### message `AuthUserGetResponse` (etcdserver/etcdserverpb/rpc.proto)
|
||||
|
||||
| Field | Description | Type |
|
||||
| ----- | ----------- | ---- |
|
||||
| header | | ResponseHeader |
|
||||
| roles | | (slice of) string |
|
||||
|
||||
|
||||
|
||||
##### message `AuthUserGrantRoleRequest` (etcdserver/etcdserverpb/rpc.proto)
|
||||
|
||||
| Field | Description | Type |
|
||||
| ----- | ----------- | ---- |
|
||||
| user | user is the name of the user which should be granted a given role. | string |
|
||||
| role | role is the name of the role to grant to the user. | string |
|
||||
|
||||
|
||||
|
||||
##### message `AuthUserGrantRoleResponse` (etcdserver/etcdserverpb/rpc.proto)
|
||||
|
||||
| Field | Description | Type |
|
||||
| ----- | ----------- | ---- |
|
||||
| header | | ResponseHeader |
|
||||
|
||||
|
||||
|
||||
##### message `AuthUserListRequest` (etcdserver/etcdserverpb/rpc.proto)
|
||||
|
||||
Empty field.
|
||||
|
||||
|
||||
|
||||
##### message `AuthUserListResponse` (etcdserver/etcdserverpb/rpc.proto)
|
||||
|
||||
| Field | Description | Type |
|
||||
| ----- | ----------- | ---- |
|
||||
| header | | ResponseHeader |
|
||||
| users | | (slice of) string |
|
||||
|
||||
|
||||
|
||||
##### message `AuthUserRevokeRoleRequest` (etcdserver/etcdserverpb/rpc.proto)
|
||||
|
||||
| Field | Description | Type |
|
||||
| ----- | ----------- | ---- |
|
||||
| name | | string |
|
||||
| role | | string |
|
||||
|
||||
|
||||
|
||||
##### message `AuthUserRevokeRoleResponse` (etcdserver/etcdserverpb/rpc.proto)
|
||||
|
||||
| Field | Description | Type |
|
||||
| ----- | ----------- | ---- |
|
||||
| header | | ResponseHeader |
|
||||
|
||||
|
||||
|
||||
##### message `AuthenticateRequest` (etcdserver/etcdserverpb/rpc.proto)
|
||||
|
||||
| Field | Description | Type |
|
||||
| ----- | ----------- | ---- |
|
||||
| name | | string |
|
||||
| password | | string |
|
||||
|
||||
|
||||
|
||||
##### message `AuthenticateResponse` (etcdserver/etcdserverpb/rpc.proto)
|
||||
|
||||
| Field | Description | Type |
|
||||
| ----- | ----------- | ---- |
|
||||
| header | | ResponseHeader |
|
||||
| token | token is an authorized token that can be used in succeeding RPCs | string |
|
||||
|
||||
|
||||
|
||||
##### message `CompactionRequest` (etcdserver/etcdserverpb/rpc.proto)
|
||||
|
||||
CompactionRequest compacts the key-value store up to a given revision. All superseded keys with a revision less than the compaction revision will be removed.
|
||||
|
||||
| Field | Description | Type |
|
||||
| ----- | ----------- | ---- |
|
||||
| revision | revision is the key-value store revision for the compaction operation. | int64 |
|
||||
| physical | physical is set so the RPC will wait until the compaction is physically applied to the local database such that compacted entries are totally removed from the backend database. | bool |
|
||||
|
||||
|
||||
|
||||
##### message `CompactionResponse` (etcdserver/etcdserverpb/rpc.proto)
|
||||
|
||||
| Field | Description | Type |
|
||||
| ----- | ----------- | ---- |
|
||||
| header | | ResponseHeader |
|
||||
|
||||
|
||||
|
||||
##### message `Compare` (etcdserver/etcdserverpb/rpc.proto)
|
||||
|
||||
| Field | Description | Type |
|
||||
| ----- | ----------- | ---- |
|
||||
| result | result is logical comparison operation for this comparison. | CompareResult |
|
||||
| target | target is the key-value field to inspect for the comparison. | CompareTarget |
|
||||
| key | key is the subject key for the comparison operation. | bytes |
|
||||
| target_union | | oneof |
|
||||
| version | version is the version of the given key | int64 |
|
||||
| create_revision | create_revision is the creation revision of the given key | int64 |
|
||||
| mod_revision | mod_revision is the last modified revision of the given key. | int64 |
|
||||
| value | value is the value of the given key, in bytes. | bytes |
|
||||
|
||||
|
||||
|
||||
##### message `DefragmentRequest` (etcdserver/etcdserverpb/rpc.proto)
|
||||
|
||||
Empty field.
|
||||
|
||||
|
||||
|
||||
##### message `DefragmentResponse` (etcdserver/etcdserverpb/rpc.proto)
|
||||
|
||||
| Field | Description | Type |
|
||||
| ----- | ----------- | ---- |
|
||||
| header | | ResponseHeader |
|
||||
|
||||
|
||||
|
||||
##### message `DeleteRangeRequest` (etcdserver/etcdserverpb/rpc.proto)
|
||||
|
||||
| Field | Description | Type |
|
||||
| ----- | ----------- | ---- |
|
||||
| key | key is the first key to delete in the range. | bytes |
|
||||
| range_end | range_end is the key following the last key to delete for the range [key, range_end). If range_end is not given, the range is defined to contain only the key argument. If range_end is '\0', the range is all keys greater than or equal to the key argument. | bytes |
|
||||
|
||||
|
||||
|
||||
##### message `DeleteRangeResponse` (etcdserver/etcdserverpb/rpc.proto)
|
||||
|
||||
| Field | Description | Type |
|
||||
| ----- | ----------- | ---- |
|
||||
| header | | ResponseHeader |
|
||||
| deleted | deleted is the number of keys deleted by the delete range request. | int64 |
|
||||
|
||||
|
||||
|
||||
##### message `HashRequest` (etcdserver/etcdserverpb/rpc.proto)
|
||||
|
||||
Empty field.
|
||||
|
||||
|
||||
|
||||
##### message `HashResponse` (etcdserver/etcdserverpb/rpc.proto)
|
||||
|
||||
| Field | Description | Type |
|
||||
| ----- | ----------- | ---- |
|
||||
| header | | ResponseHeader |
|
||||
| hash | hash is the hash value computed from the responding member's key-value store. | uint32 |
|
||||
|
||||
|
||||
|
||||
##### message `LeaseGrantRequest` (etcdserver/etcdserverpb/rpc.proto)
|
||||
|
||||
| Field | Description | Type |
|
||||
| ----- | ----------- | ---- |
|
||||
| TTL | TTL is the advisory time-to-live in seconds. | int64 |
|
||||
| ID | ID is the requested ID for the lease. If ID is set to 0, the lessor chooses an ID. | int64 |
|
||||
|
||||
|
||||
|
||||
##### message `LeaseGrantResponse` (etcdserver/etcdserverpb/rpc.proto)
|
||||
|
||||
| Field | Description | Type |
|
||||
| ----- | ----------- | ---- |
|
||||
| header | | ResponseHeader |
|
||||
| ID | ID is the lease ID for the granted lease. | int64 |
|
||||
| TTL | TTL is the server chosen lease time-to-live in seconds. | int64 |
|
||||
| error | | string |
|
||||
|
||||
|
||||
|
||||
##### message `LeaseKeepAliveRequest` (etcdserver/etcdserverpb/rpc.proto)
|
||||
|
||||
| Field | Description | Type |
|
||||
| ----- | ----------- | ---- |
|
||||
| ID | ID is the lease ID for the lease to keep alive. | int64 |
|
||||
|
||||
|
||||
|
||||
##### message `LeaseKeepAliveResponse` (etcdserver/etcdserverpb/rpc.proto)
|
||||
|
||||
| Field | Description | Type |
|
||||
| ----- | ----------- | ---- |
|
||||
| header | | ResponseHeader |
|
||||
| ID | ID is the lease ID from the keep alive request. | int64 |
|
||||
| TTL | TTL is the new time-to-live for the lease. | int64 |
|
||||
|
||||
|
||||
|
||||
##### message `LeaseRevokeRequest` (etcdserver/etcdserverpb/rpc.proto)
|
||||
|
||||
| Field | Description | Type |
|
||||
| ----- | ----------- | ---- |
|
||||
| ID | ID is the lease ID to revoke. When the ID is revoked, all associated keys will be deleted. | int64 |
|
||||
|
||||
|
||||
|
||||
##### message `LeaseRevokeResponse` (etcdserver/etcdserverpb/rpc.proto)
|
||||
|
||||
| Field | Description | Type |
|
||||
| ----- | ----------- | ---- |
|
||||
| header | | ResponseHeader |
|
||||
|
||||
|
||||
|
||||
##### message `Member` (etcdserver/etcdserverpb/rpc.proto)
|
||||
|
||||
| Field | Description | Type |
|
||||
| ----- | ----------- | ---- |
|
||||
| ID | ID is the member ID for this member. | uint64 |
|
||||
| name | name is the human-readable name of the member. If the member is not started, the name will be an empty string. | string |
|
||||
| peerURLs | peerURLs is the list of URLs the member exposes to the cluster for communication. | (slice of) string |
|
||||
| clientURLs | clientURLs is the list of URLs the member exposes to clients for communication. If the member is not started, clientURLs will be empty. | (slice of) string |
|
||||
|
||||
|
||||
|
||||
##### message `MemberAddRequest` (etcdserver/etcdserverpb/rpc.proto)
|
||||
|
||||
| Field | Description | Type |
|
||||
| ----- | ----------- | ---- |
|
||||
| peerURLs | peerURLs is the list of URLs the added member will use to communicate with the cluster. | (slice of) string |
|
||||
|
||||
|
||||
|
||||
##### message `MemberAddResponse` (etcdserver/etcdserverpb/rpc.proto)
|
||||
|
||||
| Field | Description | Type |
|
||||
| ----- | ----------- | ---- |
|
||||
| header | | ResponseHeader |
|
||||
| member | member is the member information for the added member. | Member |
|
||||
|
||||
|
||||
|
||||
##### message `MemberListRequest` (etcdserver/etcdserverpb/rpc.proto)
|
||||
|
||||
Empty field.
|
||||
|
||||
|
||||
|
||||
##### message `MemberListResponse` (etcdserver/etcdserverpb/rpc.proto)
|
||||
|
||||
| Field | Description | Type |
|
||||
| ----- | ----------- | ---- |
|
||||
| header | | ResponseHeader |
|
||||
| members | members is a list of all members associated with the cluster. | (slice of) Member |
|
||||
|
||||
|
||||
|
||||
##### message `MemberRemoveRequest` (etcdserver/etcdserverpb/rpc.proto)
|
||||
|
||||
| Field | Description | Type |
|
||||
| ----- | ----------- | ---- |
|
||||
| ID | ID is the member ID of the member to remove. | uint64 |
|
||||
|
||||
|
||||
|
||||
##### message `MemberRemoveResponse` (etcdserver/etcdserverpb/rpc.proto)
|
||||
|
||||
| Field | Description | Type |
|
||||
| ----- | ----------- | ---- |
|
||||
| header | | ResponseHeader |
|
||||
|
||||
|
||||
|
||||
##### message `MemberUpdateRequest` (etcdserver/etcdserverpb/rpc.proto)
|
||||
|
||||
| Field | Description | Type |
|
||||
| ----- | ----------- | ---- |
|
||||
| ID | ID is the member ID of the member to update. | uint64 |
|
||||
| peerURLs | peerURLs is the new list of URLs the member will use to communicate with the cluster. | (slice of) string |
|
||||
|
||||
|
||||
|
||||
##### message `MemberUpdateResponse` (etcdserver/etcdserverpb/rpc.proto)
|
||||
|
||||
| Field | Description | Type |
|
||||
| ----- | ----------- | ---- |
|
||||
| header | | ResponseHeader |
|
||||
|
||||
|
||||
|
||||
##### message `PutRequest` (etcdserver/etcdserverpb/rpc.proto)
|
||||
|
||||
| Field | Description | Type |
|
||||
| ----- | ----------- | ---- |
|
||||
| key | key is the key, in bytes, to put into the key-value store. | bytes |
|
||||
| value | value is the value, in bytes, to associate with the key in the key-value store. | bytes |
|
||||
| lease | lease is the lease ID to associate with the key in the key-value store. A lease value of 0 indicates no lease. | int64 |
|
||||
|
||||
|
||||
|
||||
##### message `PutResponse` (etcdserver/etcdserverpb/rpc.proto)
|
||||
|
||||
| Field | Description | Type |
|
||||
| ----- | ----------- | ---- |
|
||||
| header | | ResponseHeader |
|
||||
|
||||
|
||||
|
||||
##### message `RangeRequest` (etcdserver/etcdserverpb/rpc.proto)
|
||||
|
||||
| Field | Description | Type |
|
||||
| ----- | ----------- | ---- |
|
||||
| key | default, no sorting lowest target value first highest target value first key is the first key for the range. If range_end is not given, the request only looks up key. | bytes |
|
||||
| range_end | range_end is the upper bound on the requested range [key, range_end). If range_end is '\0', the range is all keys >= key. If the range_end is one bit larger than the given key, then the range requests get the all keys with the prefix (the given key). If both key and range_end are '\0', then range requests returns all keys. | bytes |
|
||||
| limit | limit is a limit on the number of keys returned for the request. | int64 |
|
||||
| revision | revision is the point-in-time of the key-value store to use for the range. If revision is less or equal to zero, the range is over the newest key-value store. If the revision has been compacted, ErrCompacted is returned as a response. | int64 |
|
||||
| sort_order | sort_order is the order for returned sorted results. | SortOrder |
|
||||
| sort_target | sort_target is the key-value field to use for sorting. | SortTarget |
|
||||
| serializable | serializable sets the range request to use serializable member-local reads. Range requests are linearizable by default; linearizable requests have higher latency and lower throughput than serializable requests but reflect the current consensus of the cluster. For better performance, in exchange for possible stale reads, a serializable range request is served locally without needing to reach consensus with other nodes in the cluster. | bool |
|
||||
| keys_only | keys_only when set returns only the keys and not the values. | bool |
|
||||
| count_only | count_only when set returns only the count of the keys in the range. | bool |
|
||||
|
||||
|
||||
|
||||
##### message `RangeResponse` (etcdserver/etcdserverpb/rpc.proto)
|
||||
|
||||
| Field | Description | Type |
|
||||
| ----- | ----------- | ---- |
|
||||
| header | | ResponseHeader |
|
||||
| kvs | kvs is the list of key-value pairs matched by the range request. kvs is empty when count is requested. | (slice of) mvccpb.KeyValue |
|
||||
| more | more indicates if there are more keys to return in the requested range. | bool |
|
||||
| count | count is set to the number of keys within the range when requested. | int64 |
|
||||
|
||||
|
||||
|
||||
##### message `RequestOp` (etcdserver/etcdserverpb/rpc.proto)
|
||||
|
||||
| Field | Description | Type |
|
||||
| ----- | ----------- | ---- |
|
||||
| request | request is a union of request types accepted by a transaction. | oneof |
|
||||
| request_range | | RangeRequest |
|
||||
| request_put | | PutRequest |
|
||||
| request_delete_range | | DeleteRangeRequest |
|
||||
|
||||
|
||||
|
||||
##### message `ResponseHeader` (etcdserver/etcdserverpb/rpc.proto)
|
||||
|
||||
| Field | Description | Type |
|
||||
| ----- | ----------- | ---- |
|
||||
| cluster_id | cluster_id is the ID of the cluster which sent the response. | uint64 |
|
||||
| member_id | member_id is the ID of the member which sent the response. | uint64 |
|
||||
| revision | revision is the key-value store revision when the request was applied. | int64 |
|
||||
| raft_term | raft_term is the raft term when the request was applied. | uint64 |
|
||||
|
||||
|
||||
|
||||
##### message `ResponseOp` (etcdserver/etcdserverpb/rpc.proto)
|
||||
|
||||
| Field | Description | Type |
|
||||
| ----- | ----------- | ---- |
|
||||
| response | response is a union of response types returned by a transaction. | oneof |
|
||||
| response_range | | RangeResponse |
|
||||
| response_put | | PutResponse |
|
||||
| response_delete_range | | DeleteRangeResponse |
|
||||
|
||||
|
||||
|
||||
##### message `SnapshotRequest` (etcdserver/etcdserverpb/rpc.proto)
|
||||
|
||||
Empty field.
|
||||
|
||||
|
||||
|
||||
##### message `SnapshotResponse` (etcdserver/etcdserverpb/rpc.proto)
|
||||
|
||||
| Field | Description | Type |
|
||||
| ----- | ----------- | ---- |
|
||||
| header | header has the current key-value store information. The first header in the snapshot stream indicates the point in time of the snapshot. | ResponseHeader |
|
||||
| remaining_bytes | remaining_bytes is the number of blob bytes to be sent after this message | uint64 |
|
||||
| blob | blob contains the next chunk of the snapshot in the snapshot stream. | bytes |
|
||||
|
||||
|
||||
|
||||
##### message `StatusRequest` (etcdserver/etcdserverpb/rpc.proto)
|
||||
|
||||
Empty field.
|
||||
|
||||
|
||||
|
||||
##### message `StatusResponse` (etcdserver/etcdserverpb/rpc.proto)
|
||||
|
||||
| Field | Description | Type |
|
||||
| ----- | ----------- | ---- |
|
||||
| header | | ResponseHeader |
|
||||
| version | version is the cluster protocol version used by the responding member. | string |
|
||||
| dbSize | dbSize is the size of the backend database, in bytes, of the responding member. | int64 |
|
||||
| leader | leader is the member ID which the responding member believes is the current leader. | uint64 |
|
||||
| raftIndex | raftIndex is the current raft index of the responding member. | uint64 |
|
||||
| raftTerm | raftTerm is the current raft term of the responding member. | uint64 |
|
||||
|
||||
|
||||
|
||||
##### message `TxnRequest` (etcdserver/etcdserverpb/rpc.proto)
|
||||
|
||||
From google paxosdb paper: Our implementation hinges around a powerful primitive which we call MultiOp. All other database operations except for iteration are implemented as a single call to MultiOp. A MultiOp is applied atomically and consists of three components: 1. A list of tests called guard. Each test in guard checks a single entry in the database. It may check for the absence or presence of a value, or compare with a given value. Two different tests in the guard may apply to the same or different entries in the database. All tests in the guard are applied and MultiOp returns the results. If all tests are true, MultiOp executes t op (see item 2 below), otherwise it executes f op (see item 3 below). 2. A list of database operations called t op. Each operation in the list is either an insert, delete, or lookup operation, and applies to a single database entry. Two different operations in the list may apply to the same or different entries in the database. These operations are executed if guard evaluates to true. 3. A list of database operations called f op. Like t op, but executed if guard evaluates to false.
|
||||
|
||||
| Field | Description | Type |
|
||||
| ----- | ----------- | ---- |
|
||||
| compare | compare is a list of predicates representing a conjunction of terms. If the comparisons succeed, then the success requests will be processed in order, and the response will contain their respective responses in order. If the comparisons fail, then the failure requests will be processed in order, and the response will contain their respective responses in order. | (slice of) Compare |
|
||||
| success | success is a list of requests which will be applied when compare evaluates to true. | (slice of) RequestOp |
|
||||
| failure | failure is a list of requests which will be applied when compare evaluates to false. | (slice of) RequestOp |
|
||||
|
||||
|
||||
|
||||
##### message `TxnResponse` (etcdserver/etcdserverpb/rpc.proto)
|
||||
|
||||
| Field | Description | Type |
|
||||
| ----- | ----------- | ---- |
|
||||
| header | | ResponseHeader |
|
||||
| succeeded | succeeded is set to true if the compare evaluated to true or false otherwise. | bool |
|
||||
| responses | responses is a list of responses corresponding to the results from applying success if succeeded is true or failure if succeeded is false. | (slice of) ResponseOp |
|
||||
|
||||
|
||||
|
||||
##### message `WatchCancelRequest` (etcdserver/etcdserverpb/rpc.proto)
|
||||
|
||||
| Field | Description | Type |
|
||||
| ----- | ----------- | ---- |
|
||||
| watch_id | watch_id is the watcher id to cancel so that no more events are transmitted. | int64 |
|
||||
|
||||
|
||||
|
||||
##### message `WatchCreateRequest` (etcdserver/etcdserverpb/rpc.proto)
|
||||
|
||||
| Field | Description | Type |
|
||||
| ----- | ----------- | ---- |
|
||||
| key | key is the key to register for watching. | bytes |
|
||||
| range_end | range_end is the end of the range [key, range_end) to watch. If range_end is not given, only the key argument is watched. If range_end is equal to '\0', all keys greater than or equal to the key argument are watched. | bytes |
|
||||
| start_revision | start_revision is an optional revision to watch from (inclusive). No start_revision is "now". | int64 |
|
||||
| progress_notify | progress_notify is set so that the etcd server will periodically send a WatchResponse with no events to the new watcher if there are no recent events. It is useful when clients wish to recover a disconnected watcher starting from a recent known revision. The etcd server may decide how often it will send notifications based on current load. | bool |
|
||||
|
||||
|
||||
|
||||
##### message `WatchRequest` (etcdserver/etcdserverpb/rpc.proto)
|
||||
|
||||
| Field | Description | Type |
|
||||
| ----- | ----------- | ---- |
|
||||
| request_union | request_union is a request to either create a new watcher or cancel an existing watcher. | oneof |
|
||||
| create_request | | WatchCreateRequest |
|
||||
| cancel_request | | WatchCancelRequest |
|
||||
|
||||
|
||||
|
||||
##### message `WatchResponse` (etcdserver/etcdserverpb/rpc.proto)
|
||||
|
||||
| Field | Description | Type |
|
||||
| ----- | ----------- | ---- |
|
||||
| header | | ResponseHeader |
|
||||
| watch_id | watch_id is the ID of the watcher that corresponds to the response. | int64 |
|
||||
| created | created is set to true if the response is for a create watch request. The client should record the watch_id and expect to receive events for the created watcher from the same stream. All events sent to the created watcher will attach with the same watch_id. | bool |
|
||||
| canceled | canceled is set to true if the response is for a cancel watch request. No further events will be sent to the canceled watcher. | bool |
|
||||
| compact_revision | compact_revision is set to the minimum index if a watcher tries to watch at a compacted index. This happens when creating a watcher at a compacted revision or the watcher cannot catch up with the progress of the key-value store. The client should treat the watcher as canceled and should not try to create any watcher with the same start_revision again. | int64 |
|
||||
| events | | (slice of) mvccpb.Event |
|
||||
|
||||
|
||||
|
||||
##### message `Event` (mvcc/mvccpb/kv.proto)
|
||||
|
||||
| Field | Description | Type |
|
||||
| ----- | ----------- | ---- |
|
||||
| type | type is the kind of event. If type is a PUT, it indicates new data has been stored to the key. If type is a DELETE, it indicates the key was deleted. | EventType |
|
||||
| kv | kv holds the KeyValue for the event. A PUT event contains current kv pair. A PUT event with kv.Version=1 indicates the creation of a key. A DELETE/EXPIRE event contains the deleted key with its modification revision set to the revision of deletion. | KeyValue |
|
||||
|
||||
|
||||
|
||||
##### message `KeyValue` (mvcc/mvccpb/kv.proto)
|
||||
|
||||
| Field | Description | Type |
|
||||
| ----- | ----------- | ---- |
|
||||
| key | key is the key in bytes. An empty key is not allowed. | bytes |
|
||||
| create_revision | create_revision is the revision of last creation on this key. | int64 |
|
||||
| mod_revision | mod_revision is the revision of last modification on this key. | int64 |
|
||||
| version | version is the version of the key. A deletion resets the version to zero and any modification of the key increases its version. | int64 |
|
||||
| value | value is the value held by the key, in bytes. | bytes |
|
||||
| lease | lease is the ID of the lease that attached to key. When the attached lease expires, the key will be deleted. If lease is 0, then no lease is attached to the key. | int64 |
|
||||
|
||||
|
||||
|
||||
##### message `Lease` (lease/leasepb/lease.proto)
|
||||
|
||||
| Field | Description | Type |
|
||||
| ----- | ----------- | ---- |
|
||||
| ID | | int64 |
|
||||
| TTL | | int64 |
|
||||
|
||||
|
||||
|
||||
##### message `Permission` (auth/authpb/auth.proto)
|
||||
|
||||
Permission is a single entity
|
||||
|
||||
| Field | Description | Type |
|
||||
| ----- | ----------- | ---- |
|
||||
| permType | | Type |
|
||||
| key | | bytes |
|
||||
| range_end | | bytes |
|
||||
|
||||
|
||||
|
||||
##### message `Role` (auth/authpb/auth.proto)
|
||||
|
||||
Role is a single entry in the bucket authRoles
|
||||
|
||||
| Field | Description | Type |
|
||||
| ----- | ----------- | ---- |
|
||||
| name | | bytes |
|
||||
| keyPermission | | (slice of) Permission |
|
||||
|
||||
|
||||
|
||||
##### message `User` (auth/authpb/auth.proto)
|
||||
|
||||
User is a single entry in the bucket authUsers
|
||||
|
||||
| Field | Description | Type |
|
||||
| ----- | ----------- | ---- |
|
||||
| name | | bytes |
|
||||
| password | | bytes |
|
||||
| roles | | (slice of) string |
|
||||
|
||||
|
||||
|
2102
Documentation/dev-guide/apispec/swagger/rpc.swagger.json
Normal file
2102
Documentation/dev-guide/apispec/swagger/rpc.swagger.json
Normal file
File diff suppressed because it is too large
Load Diff
8
Documentation/dev-guide/experimental_apis.md
Normal file
8
Documentation/dev-guide/experimental_apis.md
Normal file
@ -0,0 +1,8 @@
|
||||
# Experimental APIs and features
|
||||
|
||||
For the most part, the etcd project is stable, but we are still moving fast! We believe in the release fast philosophy. We want to get early feedback on features still in development and stabilizing. Thus, there are, and will be more, experimental features and APIs. We plan to improve these features based on the early feedback from the community, or abandon them if there is little interest, in the next few releases. If you are running a production system, please do not rely on any experimental features or APIs.
|
||||
|
||||
## The current experimental API/features are:
|
||||
|
||||
- v3 auth API: expect to be stable in 3.1 release
|
||||
- etcd gateway: expect to be stable in 3.1 release
|
243
Documentation/dev-guide/interacting_v3.md
Normal file
243
Documentation/dev-guide/interacting_v3.md
Normal file
@ -0,0 +1,243 @@
|
||||
# Interacting with etcd
|
||||
|
||||
Users mostly interact with etcd by putting or getting the value of a key. This section describes how to do that by using etcdctl, a command line tool for interacting with etcd server. The concepts described here should apply to the gRPC APIs or client library APIs.
|
||||
|
||||
By default, etcdctl talks to the etcd server with the v2 API for backward compatibility. For etcdctl to speak to etcd using the v3 API, the API version must be set to version 3 via the `ETCDCTL_API` environment variable.
|
||||
|
||||
``` bash
|
||||
export ETCDCTL_API=3
|
||||
```
|
||||
|
||||
## Write a key
|
||||
|
||||
Applications store keys into the etcd cluster by writing to keys. Every stored key is replicated to all etcd cluster members through the Raft protocol to achieve consistency and reliability.
|
||||
|
||||
Here is the command to set the value of key `foo` to `bar`:
|
||||
|
||||
``` bash
|
||||
$ etcdctl put foo bar
|
||||
OK
|
||||
```
|
||||
|
||||
## Read keys
|
||||
|
||||
Applications can read values of keys from an etcd cluster. Queries may read a single key, or a range of keys.
|
||||
|
||||
Suppose the etcd cluster has stored the following keys:
|
||||
|
||||
```
|
||||
foo = bar
|
||||
foo1 = bar1
|
||||
foo3 = bar3
|
||||
```
|
||||
|
||||
Here is the command to read the value of key `foo`:
|
||||
|
||||
```bash
|
||||
$ etcdctl get foo
|
||||
foo
|
||||
bar
|
||||
```
|
||||
|
||||
Here is the command to range over the keys from `foo` to `foo9`:
|
||||
|
||||
```bash
|
||||
$ etcdctl get foo foo9
|
||||
foo
|
||||
bar
|
||||
foo1
|
||||
bar1
|
||||
foo3
|
||||
bar3
|
||||
```
|
||||
|
||||
## Read past version of keys
|
||||
|
||||
Applications may want to read superseded versions of a key. For example, an application may wish to roll back to an old configuration by accessing an earlier version of a key. Alternatively, an application may want a consistent view over multiple keys through multiple requests by accessing key history.
|
||||
Since every modification to the etcd cluster key-value store increments the global revision of an etcd cluster, an application can read superseded keys by providing an older etcd revision.
|
||||
|
||||
Suppose an etcd cluster already has the following keys:
|
||||
|
||||
``` bash
|
||||
$ etcdctl put foo bar # revision = 2
|
||||
$ etcdctl put foo1 bar1 # revision = 3
|
||||
$ etcdctl put foo bar_new # revision = 4
|
||||
$ etcdctl put foo1 bar1_new # revision = 5
|
||||
```
|
||||
|
||||
Here are an example to access the past versions of keys:
|
||||
|
||||
```bash
|
||||
$ etcdctl get foo foo9 # access the most recent versions of keys
|
||||
foo
|
||||
bar_new
|
||||
foo1
|
||||
bar1_new
|
||||
|
||||
$ etcdctl get --rev=4 foo foo9 # access the versions of keys at revision 4
|
||||
foo
|
||||
bar_new
|
||||
foo1
|
||||
bar1
|
||||
|
||||
$ etcdctl get --rev=3 foo foo9 # access the versions of keys at revision 3
|
||||
foo
|
||||
bar
|
||||
foo1
|
||||
bar1
|
||||
|
||||
$ etcdctl get --rev=2 foo foo9 # access the versions of keys at revision 2
|
||||
foo
|
||||
bar
|
||||
|
||||
$ etcdctl get --rev=1 foo foo9 # access the versions of keys at revision 1
|
||||
```
|
||||
|
||||
## Delete keys
|
||||
|
||||
Applications can delete a key or a range of keys from an etcd cluster.
|
||||
|
||||
Here is the command to delete key `foo`:
|
||||
|
||||
```bash
|
||||
$ etcdctl del foo
|
||||
1 # one key is deleted
|
||||
```
|
||||
|
||||
Here is the command to delete keys ranging from `foo` to `foo9`:
|
||||
|
||||
```bash
|
||||
$ etcdctl del foo foo9
|
||||
2 # two keys are deleted
|
||||
```
|
||||
|
||||
## Watch key changes
|
||||
|
||||
Applications can watch on a key or a range of keys to monitor for any updates.
|
||||
|
||||
Here is the command to watch on key `foo`:
|
||||
|
||||
```bash
|
||||
$ etcdctl watch foo
|
||||
# in another terminal: etcdctl put foo bar
|
||||
foo
|
||||
bar
|
||||
```
|
||||
|
||||
Here is the command to watch on a range key from `foo` to `foo9`:
|
||||
|
||||
```bash
|
||||
$ etcdctl watch foo foo9
|
||||
# in another terminal: etcdctl put foo bar
|
||||
foo
|
||||
bar
|
||||
# in another terminal: etcdctl put foo1 bar1
|
||||
foo1
|
||||
bar1
|
||||
```
|
||||
|
||||
## Watch historical changes of keys
|
||||
|
||||
Applications may want to watch for historical changes of keys in etcd. For example, an application may wish to receive all the modifications of a key; if the application stays connected to etcd, then `watch` is good enough. However, if the application or etcd fails, a change may happen during the failure, and the application will not receive the update in real time. To guarantee the update is delivered, the application must be able to watch for historical changes to keys. To do this, an application can specify a historical revision on a watch, just like reading past version of keys.
|
||||
|
||||
Suppose we finished the following sequence of operations:
|
||||
|
||||
``` bash
|
||||
etcdctl put foo bar # revision = 2
|
||||
etcdctl put foo1 bar1 # revision = 3
|
||||
etcdctl put foo bar_new # revision = 4
|
||||
etcdctl put foo1 bar1_new # revision = 5
|
||||
```
|
||||
|
||||
Here is an example to watch the historical changes:
|
||||
```bash
|
||||
# watch for changes on key `foo` since revision 2
|
||||
$ etcdctl watch --rev=2 foo
|
||||
PUT
|
||||
foo
|
||||
bar
|
||||
PUT
|
||||
foo
|
||||
bar_new
|
||||
|
||||
# watch for changes on key `foo` since revision 3
|
||||
$ etcdctl watch --rev=3 foo
|
||||
PUT
|
||||
foo
|
||||
bar_new
|
||||
```
|
||||
|
||||
## Compacted revisions
|
||||
|
||||
As we mentioned, etcd keeps revisions so that applications can read past versions of keys. However, to avoid accumulating an unbounded amount of history, it is important to compact past revisions. After compacting, etcd removes historical revisions, releasing resources for future use. All superseded data with revisions before the compacted revision will be unavailable.
|
||||
|
||||
Here is the command to compact the revisions:
|
||||
|
||||
```bash
|
||||
$ etcdctl compact 5
|
||||
compacted revision 5
|
||||
|
||||
# any revisions before the compacted one are not accessible
|
||||
$ etcdctl get --rev=4 foo
|
||||
Error: rpc error: code = 11 desc = etcdserver: mvcc: required revision has been compacted
|
||||
```
|
||||
|
||||
## Grant leases
|
||||
|
||||
Applications can grant leases for keys from an etcd cluster. When a key is attached to a lease, its lifetime is bound to the lease's lifetime which in turn is governed by a time-to-live (TTL). Each lease has a minimum time-to-live (TTL) value specified by the application at grant time. The lease's actual TTL value is at least the minimum TTL and is chosen by the etcd cluster. Once a lease's TTL elapses, the lease expires and all attached keys are deleted.
|
||||
|
||||
Here is the command to grant a lease:
|
||||
|
||||
```
|
||||
# grant a lease with 10 second TTL
|
||||
$ etcdctl lease grant 10
|
||||
lease 32695410dcc0ca06 granted with TTL(10s)
|
||||
|
||||
# attach key foo to lease 32695410dcc0ca06
|
||||
$ etcdctl put --lease=32695410dcc0ca06 foo bar
|
||||
OK
|
||||
```
|
||||
|
||||
## Revoke leases
|
||||
|
||||
Applications revoke leases by lease ID. Revoking a lease deletes all of its attached keys.
|
||||
|
||||
Suppose we finished the following sequence of operations:
|
||||
|
||||
```
|
||||
$ etcdctl lease grant 10
|
||||
lease 32695410dcc0ca06 granted with TTL(10s)
|
||||
$ etcdctl put --lease=32695410dcc0ca06 foo bar
|
||||
OK
|
||||
```
|
||||
|
||||
Here is the command to revoke the same lease:
|
||||
|
||||
```
|
||||
$ etcdctl lease revoke 32695410dcc0ca06
|
||||
lease 32695410dcc0ca06 revoked
|
||||
|
||||
$ etcdctl get foo
|
||||
# empty response since foo is deleted due to lease revocation
|
||||
```
|
||||
|
||||
## Keep leases alive
|
||||
|
||||
Applications can keep a lease alive by refreshing its TTL so it does not expire.
|
||||
|
||||
Suppose we finished the following sequence of operations:
|
||||
|
||||
```
|
||||
$ etcdctl lease grant 10
|
||||
lease 32695410dcc0ca06 granted with TTL(10s)
|
||||
```
|
||||
|
||||
Here is the command to keep the same lease alive:
|
||||
|
||||
```
|
||||
$ etcdctl lease keep-alive 32695410dcc0ca0
|
||||
lease 32695410dcc0ca0 keepalived with TTL(100)
|
||||
lease 32695410dcc0ca0 keepalived with TTL(100)
|
||||
lease 32695410dcc0ca0 keepalived with TTL(100)
|
||||
...
|
||||
```
|
90
Documentation/dev-guide/local_cluster.md
Normal file
90
Documentation/dev-guide/local_cluster.md
Normal file
@ -0,0 +1,90 @@
|
||||
# Setup a local cluster
|
||||
|
||||
For testing and development deployments, the quickest and easiest way is to set up a local cluster. For a production deployment, refer to the [clustering][clustering] section.
|
||||
|
||||
## Local standalone cluster
|
||||
|
||||
Deploying an etcd cluster as a standalone cluster is straightforward. Start it with just one command:
|
||||
|
||||
```
|
||||
$ ./etcd
|
||||
...
|
||||
```
|
||||
|
||||
The started etcd member listens on `localhost:2379` for client requests.
|
||||
|
||||
To interact with the started cluster by using etcdctl:
|
||||
|
||||
```
|
||||
# use API version 3
|
||||
$ export ETCDCTL_API=3
|
||||
|
||||
$ ./etcdctl put foo bar
|
||||
OK
|
||||
|
||||
$ ./etcdctl get foo
|
||||
bar
|
||||
```
|
||||
|
||||
## Local multi-member cluster
|
||||
|
||||
A Procfile is provided to easily set up a local multi-member cluster. Start a multi-member cluster with a few commands:
|
||||
|
||||
```
|
||||
# install goreman program to control Profile-based applications.
|
||||
$ go get github.com/mattn/goreman
|
||||
$ goreman -f Procfile start
|
||||
...
|
||||
```
|
||||
|
||||
The started members listen on `localhost:12379`, `localhost:22379`, and `localhost:32379` for client requests respectively.
|
||||
|
||||
To interact with the started cluster by using etcdctl:
|
||||
|
||||
```
|
||||
# use API version 3
|
||||
$ export ETCDCTL_API=3
|
||||
|
||||
$ etcdctl --write-out=table --endpoints=localhost:12379 member list
|
||||
+------------------+---------+--------+------------------------+------------------------+
|
||||
| ID | STATUS | NAME | PEER ADDRS | CLIENT ADDRS |
|
||||
+------------------+---------+--------+------------------------+------------------------+
|
||||
| 8211f1d0f64f3269 | started | infra1 | http://127.0.0.1:12380 | http://127.0.0.1:12379 |
|
||||
| 91bc3c398fb3c146 | started | infra2 | http://127.0.0.1:22380 | http://127.0.0.1:22379 |
|
||||
| fd422379fda50e48 | started | infra3 | http://127.0.0.1:32380 | http://127.0.0.1:32379 |
|
||||
+------------------+---------+--------+------------------------+------------------------+
|
||||
|
||||
$ etcdctl --endpoints=localhost:12379 put foo bar
|
||||
OK
|
||||
```
|
||||
|
||||
To exercise etcd's fault tolerance, kill a member:
|
||||
|
||||
```
|
||||
# kill etcd2
|
||||
$ goreman run stop etcd2
|
||||
|
||||
$ etcdctl --endpoints=localhost:12379 put key hello
|
||||
OK
|
||||
|
||||
$ etcdctl --endpoints=localhost:12379 get key
|
||||
hello
|
||||
|
||||
# try to get key from the killed member
|
||||
$ etcdctl --endpoints=localhost:22379 get key
|
||||
2016/04/18 23:07:35 grpc: Conn.resetTransport failed to create client transport: connection error: desc = "transport: dial tcp 127.0.0.1:22379: getsockopt: connection refused"; Reconnecting to "localhost:22379"
|
||||
Error: grpc: timed out trying to connect
|
||||
|
||||
# restart the killed member
|
||||
$ goreman run restart etcd2
|
||||
|
||||
# get the key from restarted member
|
||||
$ etcdctl --endpoints=localhost:22379 get key
|
||||
hello
|
||||
```
|
||||
|
||||
To learn more about interacting with etcd, read [interacting with etcd section][interacting].
|
||||
|
||||
[interacting]: ./interacting_v3.md
|
||||
[clustering]: ../op-guide/clustering.md
|
||||
|
113
Documentation/dev-internal/discovery_protocol.md
Normal file
113
Documentation/dev-internal/discovery_protocol.md
Normal file
@ -0,0 +1,113 @@
|
||||
# Discovery service protocol
|
||||
|
||||
Discovery service protocol helps new etcd member to discover all other members in cluster bootstrap phase using a shared discovery URL.
|
||||
|
||||
Discovery service protocol is _only_ used in cluster bootstrap phase, and cannot be used for runtime reconfiguration or cluster monitoring.
|
||||
|
||||
The protocol uses a new discovery token to bootstrap one _unique_ etcd cluster. Remember that one discovery token can represent only one etcd cluster. As long as discovery protocol on this token starts, even if it fails halfway, it must not be used to bootstrap another etcd cluster.
|
||||
|
||||
The rest of this article will walk through the discovery process with examples that correspond to a self-hosted discovery cluster. The public discovery service, discovery.etcd.io, functions the same way, but with a layer of polish to abstract away ugly URLs, generate UUIDs automatically, and provide some protections against excessive requests. At its core, the public discovery service still uses an etcd cluster as the data store as described in this document.
|
||||
|
||||
## Protocol workflow
|
||||
|
||||
The idea of discovery protocol is to use an internal etcd cluster to coordinate bootstrap of a new cluster. First, all new members interact with discovery service and help to generate the expected member list. Then each new member bootstraps its server using this list, which performs the same functionality as -initial-cluster flag.
|
||||
|
||||
In the following example workflow, we will list each step of protocol in curl format for ease of understanding.
|
||||
|
||||
By convention the etcd discovery protocol uses the key prefix `_etcd/registry`. If `http://example.com` hosts an etcd cluster for discovery service, a full URL to discovery keyspace will be `http://example.com/v2/keys/_etcd/registry`. We will use this as the URL prefix in the example.
|
||||
|
||||
### Creating a new discovery token
|
||||
|
||||
Generate a unique token that will identify the new cluster. This will be used as a unique prefix in discovery keyspace in the following steps. An easy way to do this is to use `uuidgen`:
|
||||
|
||||
```
|
||||
UUID=$(uuidgen)
|
||||
```
|
||||
|
||||
### Specifying the expected cluster size
|
||||
|
||||
The discovery token expects a cluster size that must be specified. The size is used by the discovery service to know when it has found all members that will initially form the cluster.
|
||||
|
||||
```
|
||||
curl -X PUT http://example.com/v2/keys/_etcd/registry/${UUID}/_config/size -d value=${cluster_size}
|
||||
```
|
||||
|
||||
Usually the cluster size is 3, 5 or 7. Check [optimal cluster size][cluster-size] for more details.
|
||||
|
||||
### Bringing up etcd processes
|
||||
|
||||
Given the discovery URL, use it as `-discovery` flag and bring up etcd processes. Every etcd process will follow this next few steps internally if given a `-discovery` flag.
|
||||
|
||||
### Registering itself
|
||||
|
||||
The first thing for etcd process is to register itself into the discovery URL as a member. This is done by creating member ID as a key in the discovery URL.
|
||||
|
||||
```
|
||||
curl -X PUT http://example.com/v2/keys/_etcd/registry/${UUID}/${member_id}?prevExist=false -d value="${member_name}=${member_peer_url_1}&${member_name}=${member_peer_url_2}"
|
||||
```
|
||||
|
||||
### Checking the status
|
||||
|
||||
It checks the expected cluster size and registration status in discovery URL, and decides what the next action is.
|
||||
|
||||
```
|
||||
curl -X GET http://example.com/v2/keys/_etcd/registry/${UUID}/_config/size
|
||||
curl -X GET http://example.com/v2/keys/_etcd/registry/${UUID}
|
||||
```
|
||||
|
||||
If registered members are still not enough, it will wait for left members to appear.
|
||||
|
||||
If the number of registered members is bigger than the expected size N, it treats the first N registered members as the member list for the cluster. If the member itself is in the member list, the discovery procedure succeeds and it fetches all peers through the member list. If it is not in the member list, the discovery procedure finishes with the failure that the cluster has been full.
|
||||
|
||||
In etcd implementation, the member may check the cluster status even before registering itself. So it could fail quickly if the cluster has been full.
|
||||
|
||||
### Waiting for all members
|
||||
|
||||
The wait process is described in detail in the [etcd API documentation][api].
|
||||
|
||||
```
|
||||
curl -X GET http://example.com/v2/keys/_etcd/registry/${UUID}?wait=true&waitIndex=${current_etcd_index}
|
||||
```
|
||||
|
||||
It keeps waiting until finding all members.
|
||||
|
||||
## Public discovery service
|
||||
|
||||
CoreOS Inc. hosts a public discovery service at https://discovery.etcd.io/ , which provides some nice features for ease of use.
|
||||
|
||||
### Mask key prefix
|
||||
|
||||
Public discovery service will redirect `https://discovery.etcd.io/${UUID}` to etcd cluster behind for the key at `/v2/keys/_etcd/registry`. It masks register key prefix for short and readable discovery url.
|
||||
|
||||
### Get new token
|
||||
|
||||
```
|
||||
GET /new
|
||||
|
||||
Sent query:
|
||||
size=${cluster_size}
|
||||
Possible status codes:
|
||||
200 OK
|
||||
400 Bad Request
|
||||
200 Body:
|
||||
generated discovery url
|
||||
```
|
||||
|
||||
The generation process in the service follows the steps from [Creating a New Discovery Token][new-discovery-token] to [Specifying the Expected Cluster Size][expected-cluster-size].
|
||||
|
||||
### Check discovery status
|
||||
|
||||
```
|
||||
GET /${UUID}
|
||||
```
|
||||
|
||||
The status for this discovery token, including the machines that have been registered, can be checked by requesting the value of the UUID.
|
||||
|
||||
### Open-source repository
|
||||
|
||||
The repository is located at https://github.com/coreos/discovery.etcd.io. It could be used to build a custom discovery service.
|
||||
|
||||
[api]: ../v2/api.md#waiting-for-a-change
|
||||
[cluster-size]: ../v2/admin_guide.md#optimal-cluster-size
|
||||
[expected-cluster-size]: #specifying-the-expected-cluster-size
|
||||
[new-discovery-token]: #creating-a-new-discovery-token
|
29
Documentation/dev-internal/logging.md
Normal file
29
Documentation/dev-internal/logging.md
Normal file
@ -0,0 +1,29 @@
|
||||
# Logging conventions
|
||||
|
||||
etcd uses the [capnslog][capnslog] library for logging application output categorized into *levels*. A log message's level is determined according to these conventions:
|
||||
|
||||
* Error: Data has been lost, a request has failed for a bad reason, or a required resource has been lost
|
||||
* Examples:
|
||||
* A failure to allocate disk space for WAL
|
||||
|
||||
* Warning: (Hopefully) Temporary conditions that may cause errors, but may work fine. A replica disappearing (that may reconnect) is a warning.
|
||||
* Examples:
|
||||
* Failure to send raft message to a remote peer
|
||||
* Failure to receive heartbeat message within the configured election timeout
|
||||
|
||||
* Notice: Normal, but important (uncommon) log information.
|
||||
* Examples:
|
||||
* Add a new node into the cluster
|
||||
* Add a new user into auth subsystem
|
||||
|
||||
* Info: Normal, working log information, everything is fine, but helpful notices for auditing or common operations.
|
||||
* Examples:
|
||||
* Startup configuration
|
||||
* Start to do snapshot
|
||||
|
||||
* Debug: Everything is still fine, but even common operations may be logged, and less helpful but more quantity of notices.
|
||||
* Examples:
|
||||
* Send a normal message to a remote peer
|
||||
* Write a log entry to disk
|
||||
|
||||
[capnslog]: [https://github.com/coreos/pkg/tree/master/capnslog]
|
109
Documentation/dev-internal/release.md
Normal file
109
Documentation/dev-internal/release.md
Normal file
@ -0,0 +1,109 @@
|
||||
# etcd release guide
|
||||
|
||||
The guide talks about how to release a new version of etcd.
|
||||
|
||||
The procedure includes some manual steps for sanity checking but it can probably be further scripted. Please keep this document up-to-date if making changes to the release process.
|
||||
|
||||
## Prepare release
|
||||
|
||||
Set desired version as environment variable for following steps. Here is an example to release 2.3.0:
|
||||
|
||||
```
|
||||
export VERSION=v2.3.0
|
||||
export PREV_VERSION=v2.2.5
|
||||
```
|
||||
|
||||
All releases version numbers follow the format of [semantic versioning 2.0.0](http://semver.org/).
|
||||
|
||||
### Major, minor version release, or its pre-release
|
||||
|
||||
- Ensure the relevant milestone on GitHub is complete. All referenced issues should be closed, or moved elsewhere.
|
||||
- Remove this release from [roadmap](https://github.com/coreos/etcd/blob/master/ROADMAP.md), if necessary.
|
||||
- Ensure the latest upgrade documentation is available.
|
||||
- Bump [hardcoded MinClusterVerion in the repository](https://github.com/coreos/etcd/blob/master/version/version.go#L29), if necessary.
|
||||
- Add feature capability maps for the new version, if necessary.
|
||||
|
||||
### Patch version release
|
||||
|
||||
- Discuss about commits that are backported to the patch release. The commits should not include merge commits.
|
||||
- Cherry-pick these commits starting from the oldest one into stable branch.
|
||||
|
||||
## Write release note
|
||||
|
||||
- Write introduction for the new release. For example, what major bug we fix, what new features we introduce or what performance improvement we make.
|
||||
- Write changelog for the last release. ChangeLog should be straightforward and easy to understand for the end-user.
|
||||
- Put `[GH XXXX]` at the head of change line to reference Pull Request that introduces the change. Moreover, add a link on it to jump to the Pull Request.
|
||||
|
||||
## Tag version
|
||||
|
||||
- Bump [hardcoded Version in the repository](https://github.com/coreos/etcd/blob/master/version/version.go#L30) to the latest version `${VERSION}`.
|
||||
- Ensure all tests on CI system are passed.
|
||||
- Manually check etcd is buildable in Linux, Darwin and Windows.
|
||||
- Manually check upgrade etcd cluster of previous minor version works well.
|
||||
- Manually check new features work well.
|
||||
- Add a signed tag through `git tag -s ${VERSION}`.
|
||||
- Sanity check tag correctness through `git show tags/$VERSION`.
|
||||
- Push the tag to GitHub through `git push origin tags/$VERSION`. This assumes `origin` corresponds to "https://github.com/coreos/etcd".
|
||||
|
||||
## Build release binaries and images
|
||||
|
||||
- Ensure `actool` is available, or installing it through `go get github.com/appc/spec/actool`.
|
||||
- Ensure `docker` is available.
|
||||
|
||||
Run release script in root directory:
|
||||
|
||||
```
|
||||
./scripts/release.sh ${VERSION}
|
||||
```
|
||||
|
||||
It generates all release binaries and images under directory ./release.
|
||||
|
||||
## Sign binaries and images
|
||||
|
||||
etcd project key must be used to sign the generated binaries and images.`$SUBKEYID` is the key ID of etcd project Yubikey. Connect the key and run `gpg2 --card-status` to get the ID.
|
||||
|
||||
The following commands are used for public release sign:
|
||||
|
||||
```
|
||||
cd release
|
||||
for i in etcd-*{.zip,.tar.gz}; do gpg2 --default-key $SUBKEYID --armor --output ${i}.asc --detach-sign ${i}; done
|
||||
for i in etcd-*{.zip,.tar.gz}; do gpg2 --verify ${i}.asc ${i}; done
|
||||
```
|
||||
|
||||
The public key for GPG signing can be found at [CoreOS Application Signing Key](https://coreos.com/security/app-signing-key)
|
||||
|
||||
|
||||
## Publish release page in GitHub
|
||||
|
||||
- Set release title as the version name.
|
||||
- Follow the format of previous release pages.
|
||||
- Attach the generated binaries, aci image and signatures.
|
||||
- Select whether it is a pre-release.
|
||||
- Publish the release!
|
||||
|
||||
## Publish docker image in Quay.io
|
||||
|
||||
- Push docker image:
|
||||
|
||||
```
|
||||
docker login quay.io
|
||||
docker push quay.io/coreos/etcd:${VERSION}
|
||||
```
|
||||
|
||||
- Add `latest` tag to the new image on [quay.io](https://quay.io/repository/coreos/etcd?tag=latest&tab=tags) if this is a stable release.
|
||||
|
||||
## Announce to the etcd-dev Googlegroup
|
||||
|
||||
- Follow the format of [previous release emails](https://groups.google.com/forum/#!forum/etcd-dev).
|
||||
- Make sure to include a list of authors that contributed since the previous release - something like the following might be handy:
|
||||
|
||||
```
|
||||
git log ...${PREV_VERSION} --pretty=format:"%an" | sort | uniq | tr '\n' ',' | sed -e 's#,#, #g' -e 's#, $##'
|
||||
```
|
||||
|
||||
- Send email to etcd-dev@googlegroups.com
|
||||
|
||||
## Post release
|
||||
|
||||
- Create new stable branch through `git push origin ${VERSION_MAJOR}.${VERSION_MINOR}` if this is a major stable release. This assumes `origin` corresponds to "https://github.com/coreos/etcd".
|
||||
- Bump [hardcoded Version in the repository](https://github.com/coreos/etcd/blob/master/version/version.go#L30) to the version `${VERSION}+git`.
|
56
Documentation/dl_build.md
Normal file
56
Documentation/dl_build.md
Normal file
@ -0,0 +1,56 @@
|
||||
# Download and build
|
||||
|
||||
## System requirements
|
||||
|
||||
The etcd performance benchmarks run etcd on 8 vCPU, 16GB RAM, 50GB SSD GCE instances, but any relatively modern machine with low latency storage and a few gigabytes of memory should suffice for most use cases. Applications with large v2 data stores will require more memory than a large v3 data store since data is kept in anonymous memory instead of memory mapped from a file. than For running etcd on a cloud provider, we suggest at least a medium instance on AWS or a standard-1 instance on GCE.
|
||||
|
||||
## Download the pre-built binary
|
||||
|
||||
The easiest way to get etcd is to use one of the pre-built release binaries which are available for OSX, Linux, Windows, appc, and Docker. Instructions for using these binaries are on the [GitHub releases page][github-release].
|
||||
|
||||
## Build the latest version
|
||||
|
||||
For those wanting to try the very latest version, build etcd from the `master` branch.
|
||||
[Go](https://golang.org/) version 1.6+ (with HTTP2 support) is required to build the latest version of etcd.
|
||||
|
||||
Here are the commands to build an etcd binary from the `master` branch:
|
||||
|
||||
```
|
||||
# go is required
|
||||
$ go version
|
||||
go version go1.6 darwin/amd64
|
||||
|
||||
# GOPATH should be set correctly
|
||||
$ echo $GOPATH
|
||||
/Users/example/go
|
||||
|
||||
$ mkdir -p $GOPATH/src/github.com/coreos
|
||||
$ cd $GOPATH/src/github.com/coreos
|
||||
$ git clone github.com:coreos/etcd.git
|
||||
$ cd etcd
|
||||
$ ./build
|
||||
$ ./bin/etcd
|
||||
...
|
||||
```
|
||||
|
||||
## Test the installation
|
||||
|
||||
Check the etcd binary is built correctly by starting etcd and setting a key.
|
||||
|
||||
Start etcd:
|
||||
|
||||
```
|
||||
$ ./bin/etcd
|
||||
```
|
||||
|
||||
Set a key:
|
||||
|
||||
```
|
||||
$ ETCDCTL_API=3 ./bin/etcdctl put foo bar
|
||||
OK
|
||||
```
|
||||
|
||||
If OK is printed, then etcd is working!
|
||||
|
||||
[github-release]: https://github.com/coreos/etcd/releases/
|
||||
[go]: https://golang.org/doc/install
|
73
Documentation/docs.md
Normal file
73
Documentation/docs.md
Normal file
@ -0,0 +1,73 @@
|
||||
# Documentation
|
||||
|
||||
etcd is a distributed key-value store designed to reliably and quickly preserve and provide access to critical data. It enables reliable distributed coordination through distributed locking, leader elections, and write barriers. An etcd cluster is intended for high availability and permanent data storage and retrieval.
|
||||
|
||||
## Getting started
|
||||
|
||||
New etcd users and developers should get started by [downloading and building][download_build] etcd. After getting etcd, follow this [quick demo][demo] to see the basics of creating and working with an etcd cluster.
|
||||
|
||||
## Developing with etcd
|
||||
|
||||
The easiest way to get started using etcd as a distributed key-value store is to [set up a local cluster][local_cluster].
|
||||
|
||||
- [Setting up local clusters][local_cluster]
|
||||
- [Interacting with etcd][interacting]
|
||||
- [API references][api_ref]
|
||||
- [gRPC gateway][api_grpc_gateway]
|
||||
- [Experimental features and APIs][experimental]
|
||||
|
||||
## Operating etcd clusters
|
||||
|
||||
Administrators who need to create reliable and scalable key-value stores for the developers they support should begin with a [cluster on multiple machines][clustering].
|
||||
|
||||
- [Setting up clusters][clustering]
|
||||
- [Run etcd clusters inside containers][container]
|
||||
- [Configuration][conf]
|
||||
- [Security][security]
|
||||
- Monitoring
|
||||
- [Maintenance][maintenance]
|
||||
- [Understand failures][failures]
|
||||
- [Disaster recovery][recovery]
|
||||
- [Performance][performance]
|
||||
- [Versioning][versioning]
|
||||
- [Supported platform][supported_platform]
|
||||
|
||||
## Learning
|
||||
|
||||
To learn more about the concepts and internals behind etcd, read the following pages:
|
||||
|
||||
- Why etcd (TODO)
|
||||
- [Understand data model][data_model]
|
||||
- [Understand APIs][understand_apis]
|
||||
- [Glossary][glossary]
|
||||
- Internals (TODO)
|
||||
|
||||
## Upgrading and compatibility
|
||||
|
||||
- [Migrate applications from using API v2 to API v3][v2_migration]
|
||||
- [Updating v2.3 to v3.0][v3_upgrade]
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
[api_ref]: dev-guide/api_reference_v3.md
|
||||
[api_grpc_gateway]: dev-guide/api_grpc_gateway.md
|
||||
[clustering]: op-guide/clustering.md
|
||||
[conf]: op-guide/configuration.md
|
||||
[data_model]: learning/data_model.md
|
||||
[demo]: demo.md
|
||||
[download_build]: dl_build.md
|
||||
[failures]: op-guide/failures.md
|
||||
[glossary]: learning/glossary.md
|
||||
[interacting]: dev-guide/interacting_v3.md
|
||||
[local_cluster]: dev-guide/local_cluster.md
|
||||
[performance]: op-guide/performance.md
|
||||
[recovery]: op-guide/recovery.md
|
||||
[maintenance]: op-guide/maintenance.md
|
||||
[security]: op-guide/security.md
|
||||
[v2_migration]: op-guide/v2-migration.md
|
||||
[container]: op-guide/container.md
|
||||
[understand_apis]: learning/api.md
|
||||
[versioning]: op-guide/versioning.md
|
||||
[supported_platform]: op-guide/supported-platform.md
|
||||
[experimental]: dev-guide/experimental_apis.md
|
||||
[v3_upgrade]: upgrades/upgrade_3_0.md
|
57
Documentation/learning/api.md
Normal file
57
Documentation/learning/api.md
Normal file
@ -0,0 +1,57 @@
|
||||
# etcd3 API
|
||||
|
||||
NOTE: this doc is not finished!
|
||||
|
||||
## Response header
|
||||
|
||||
All Responses from etcd API have a [response header][response_header] attached. The response header includes the metadata of the response.
|
||||
|
||||
```proto
|
||||
message ResponseHeader {
|
||||
uint64 cluster_id = 1;
|
||||
uint64 member_id = 2;
|
||||
int64 revision = 3;
|
||||
uint64 raft_term = 4;
|
||||
}
|
||||
```
|
||||
|
||||
* Cluster_ID - the ID of the cluster that generates the response
|
||||
* Member_ID - the ID of the member that generates the response
|
||||
* Revision - the revision of the key-value store when the response is generated
|
||||
* Raft_Term - the Raft term of the member when the response is generated
|
||||
|
||||
An application may read the Cluster_ID (Member_ID) field to ensure it is communicating with the intended cluster (member).
|
||||
|
||||
Applications can use the `Revision` to know the latest revision of the key-value store. This is especially useful when applications specify a historical revision to make time `travel query` and wishes to know the latest revision at the time of the request.
|
||||
|
||||
Applications can use `Raft_Term` to detect when the cluster completes a new leader election.
|
||||
|
||||
## Key-Value API
|
||||
|
||||
Key-Value API is used to manipulate key-value pairs stored inside etcd. The key-value API is defined as a [gRPC service][kv-service]. The Key-Value pair is defined as structured data in [protobuf format][kv-proto].
|
||||
|
||||
### Key-Value pair
|
||||
|
||||
A key-value pair is the smallest unit that the key-value API can manipulate. Each key-value pair has a number of fields:
|
||||
|
||||
```protobuf
|
||||
message KeyValue {
|
||||
bytes key = 1;
|
||||
int64 create_revision = 2;
|
||||
int64 mod_revision = 3;
|
||||
int64 version = 4;
|
||||
bytes value = 5;
|
||||
int64 lease = 6;
|
||||
}
|
||||
```
|
||||
|
||||
* Key - key in bytes. An empty key is not allowed.
|
||||
* Value - value in bytes.
|
||||
* Version - version is the version of the key. A deletion resets the version to zero and any modification of the key increases its version.
|
||||
* Create_Revision - revision of the last creation on the key.
|
||||
* Mod_Revision - revision of the last modification on the key.
|
||||
* Lease - the ID of the lease attached to the key. If lease is 0, then no lease is attached to the key.
|
||||
|
||||
[kv-proto]: https://github.com/coreos/etcd/blob/master/mvcc/mvccpb/kv.proto
|
||||
[kv-service]: https://github.com/coreos/etcd/blob/master/etcdserver/etcdserverpb/rpc.proto
|
||||
[response_header]: https://github.com/coreos/etcd/blob/master/etcdserver/etcdserverpb/rpc.proto
|
63
Documentation/learning/api_guarantees.md
Normal file
63
Documentation/learning/api_guarantees.md
Normal file
@ -0,0 +1,63 @@
|
||||
# KV API guarantees
|
||||
|
||||
etcd is a consistent and durable key value store with mini-transaction(TODO: link to txn doc when we have it) support. The key value store is exposed through the KV APIs. etcd tries to ensure the strongest consistency and durability guarantees for a distributed system. This specification enumerates the KV API guarantees made by etcd.
|
||||
|
||||
### APIs to consider
|
||||
|
||||
* Read APIs
|
||||
* range
|
||||
* watch
|
||||
* Write APIs
|
||||
* put
|
||||
* delete
|
||||
* Combination (read-modify-write) APIs
|
||||
* txn
|
||||
|
||||
### etcd specific definitions
|
||||
|
||||
#### Operation completed
|
||||
|
||||
An etcd operation is considered complete when it is committed through consensus, and therefore “executed” -- permanently stored -- by the etcd storage engine. The client knows an operation is completed when it receives a response from the etcd server. Note that the client may be uncertain about the status of an operation if it times out, or there is a network disruption between the client and the etcd member. etcd may also abort operations when there is a leader election. etcd does not send `abort` responses to clients’ outstanding requests in this event.
|
||||
|
||||
#### Revision
|
||||
|
||||
An etcd operation that modifies the key value store is assigned with a single increasing revision. A transaction operation might modifies the key value store multiple times, but only one revision is assigned. The revision attribute of a key value pair that modified by the operation has the same value as the revision of the operation. The revision can be used as a logical clock for key value store. A key value pair that has a larger revision is modified after a key value pair with a smaller revision. Two key value pairs that have the same revision are modified by an operation "concurrently".
|
||||
|
||||
### Guarantees provided
|
||||
|
||||
#### Atomicity
|
||||
|
||||
All API requests are atomic; an operation either completes entirely or not at all. For watch requests, all events generated by one operation will be in one watch response. Watch never observes partial events for a single operation.
|
||||
|
||||
#### Consistency
|
||||
|
||||
All API calls ensure [sequential consistency][seq_consistency], the strongest consistency guarantee available from distributed systems. No matter which etcd member server a client makes requests to, a client reads the same events in the same order. If two members complete the same number of operations, the state of the two members is consistent.
|
||||
|
||||
For watch operations, etcd guarantees to return the same value for the same key across all members for the same revision. For range operations, etcd has a similar guarantee for [linearized][Linearizability] access; serialized access may be behind the quorum state, so that the later revision is not yet available.
|
||||
|
||||
As with all distributed systems, it is impossible for etcd to ensure [strict consistency][strict_consistency]. etcd does not guarantee that it will return to a read the “most recent” value (as measured by a wall clock when a request is completed) available on any cluster member.
|
||||
|
||||
#### Isolation
|
||||
|
||||
etcd ensures [serializable isolation][serializable_isolation], which is the highest isolation level available in distributed systems. Read operations will never observe any intermediate data.
|
||||
|
||||
#### Durability
|
||||
|
||||
Any completed operations are durable. All accessible data is also durable data. A read will never return data that has not been made durable.
|
||||
|
||||
#### Linearizability
|
||||
|
||||
Linearizability (also known as Atomic Consistency or External Consistency) is a consistency level between strict consistency and sequential consistency.
|
||||
|
||||
For linearizability, suppose each operation receives a timestamp from a loosely synchronized global clock. Operations are linearized if and only if they always complete as though they were executed in a sequential order and each operation appears to complete in the order specified by the program. Likewise, if an operation’s timestamp precedes another, that operation must also precede the other operation in the sequence.
|
||||
|
||||
For example, consider a client completing a write at time point 1 (*t1*). A client issuing a read at *t2* (for *t2* > *t1*) should receive a value at least as recent as the previous write, completed at *t1*. However, the read might actually complete only by *t3*, and the returned value, current at *t2* when the read began, might be "stale" by *t3*.
|
||||
|
||||
etcd does not ensure linearizability for watch operations. Users are expected to verify the revision of watch responses to ensure correct ordering.
|
||||
|
||||
etcd ensures linearizability for all other operations by default. Linearizability comes with a cost, however, because linearized requests must go through the Raft consensus process. To obtain lower latencies and higher throughput for read requests, clients can configure a request’s consistency mode to `serializable`, which may access stale data with respect to quorum, but removes the performance penalty of linearized accesses' reliance on live consensus.
|
||||
|
||||
[seq_consistency]: https://en.wikipedia.org/wiki/Consistency_model#Sequential_consistency
|
||||
[strict_consistency]: https://en.wikipedia.org/wiki/Consistency_model#Strict_consistency
|
||||
[serializable_isolation]: https://en.wikipedia.org/wiki/Isolation_(database_systems)#Serializable
|
||||
[Linearizability]: #Linearizability
|
25
Documentation/learning/data_model.md
Normal file
25
Documentation/learning/data_model.md
Normal file
@ -0,0 +1,25 @@
|
||||
# Data model
|
||||
|
||||
etcd is designed to reliably store infrequently updated data and provide reliable watch queries. etcd exposes previous versions of key-value pairs to support inexpensive snapshots and watch history events (“time travel queries”). A persistent, multi-version, concurrency-control data model is a good fit for these use cases.
|
||||
|
||||
etcd stores data in a multiversion [persistent][persistent-ds] key-value store. The persistent key-value store preserves the previous version of a key-value pair when its value is superseded with new data. The key-value store is effectively immutable; its operations do not update the structure in-place, but instead always generates a new updated structure. All past versions of keys are still accessible and watchable after modification. To prevent the data store from growing indefinitely over time from maintaining old versions, the store may be compacted to shed the oldest versions of superseded data.
|
||||
|
||||
### Logical view
|
||||
|
||||
The store’s logical view is a flat binary key space. The key space has a lexically sorted index on byte string keys so range queries are inexpensive.
|
||||
|
||||
The key space maintains multiple revisions. Each atomic mutative operation (e.g., a transaction operation may contain multiple operations) creates a new revision on the key space. All data held by previous revisions remains unchanged. Old versions of key can still be accessed through previous revisions. Likewise, revisions are indexed as well; ranging over revisions with watchers is efficient. If the store is compacted to recover space, revisions before the compact revision will be removed.
|
||||
|
||||
A key’s lifetime spans a generation. Each key may have one or multiple generations. Creating a key increments the generation of that key, starting at 1 if the key never existed. Deleting a key generates a key tombstone, concluding the key’s current generation. Each modification of a key creates a new version of the key. Once a compaction happens, any generation ended before the given revision will be removed and values set before the compaction revision except the latest one will be removed.
|
||||
|
||||
### Physical view
|
||||
|
||||
etcd stores the physical data as key-value pairs in a persistent [b+tree][b+tree]. Each revision of the store’s state only contains the delta from its previous revision to be efficient. A single revision may correspond to multiple keys in the tree.
|
||||
|
||||
The key of key-value pair is a 3-tuple (major, sub, type). Major is the store revision holding the key. Sub differentiates among keys within the same revision. Type is an optional suffix for special value (e.g., `t` if the value contains a tombstone). The value of the key-value pair contains the modification from previous revision, thus one delta from previous revision. The b+tree is ordered by key in lexical byte-order. Ranged lookups over revision deltas are fast; this enables quickly finding modifications from one specific revision to another. Compaction removes out-of-date keys-value pairs.
|
||||
|
||||
etcd also keeps a secondary in-memory [btree][btree] index to speed up range queries over keys. The keys in the btree index are the keys of the store exposed to user. The value is a pointer to the modification of the persistent b+tree. Compaction removes dead pointers.
|
||||
|
||||
[persistent-ds]: https://en.wikipedia.org/wiki/Persistent_data_structure
|
||||
[btree]: https://en.wikipedia.org/wiki/B-tree
|
||||
[b+tree]: https://en.wikipedia.org/wiki/B%2B_tree
|
@ -1,4 +1,4 @@
|
||||
# Libraries and Tools
|
||||
# Libraries and tools
|
||||
|
||||
**Tools**
|
||||
|
||||
@ -17,7 +17,8 @@
|
||||
|
||||
**Go libraries**
|
||||
|
||||
- [etcd/client](https://github.com/coreos/etcd/blob/master/client) - the officially maintained Go client
|
||||
- [etcd/clientv3](https://github.com/coreos/etcd/blob/master/clientv3) - the officially maintained Go client for v3
|
||||
- [etcd/client](https://github.com/coreos/etcd/blob/master/client) - the officially maintained Go client for v2
|
||||
- [go-etcd](https://github.com/coreos/go-etcd) - the deprecated official client. May be useful for older (<2.0.0) versions of etcd.
|
||||
|
||||
**Java libraries**
|
||||
@ -27,6 +28,11 @@
|
||||
- [diwakergupta/jetcd](https://github.com/diwakergupta/jetcd) - Supports v2
|
||||
- [jurmous/etcd4j](https://github.com/jurmous/etcd4j) - Supports v2, Async/Sync, waits and SSL
|
||||
- [AdoHe/etcd4j](http://github.com/AdoHe/etcd4j) - Supports v2 (enhance for real production cluster)
|
||||
- [cdancy/etcd-rest](https://github.com/cdancy/etcd-rest) - Uses jclouds to provide a complete implementation of v2 API.
|
||||
|
||||
**Scala libraries**
|
||||
|
||||
- [maciej/etcd-client](https://github.com/maciej/etcd-client) - Supports v2. Akka HTTP-based fully async client
|
||||
|
||||
**Python libraries**
|
||||
|
||||
@ -87,6 +93,10 @@
|
||||
|
||||
- [efrecon/etcd-tcl](https://github.com/efrecon/etcd-tcl) - Supports v2, except wait.
|
||||
|
||||
**Gradle Plugins**
|
||||
|
||||
- [gradle-etcd-rest-plugin](https://github.com/cdancy/gradle-etcd-rest-plugin) - Supports v2
|
||||
|
||||
**Chef Integration**
|
||||
|
||||
- [coderanger/etcd-chef](https://github.com/coderanger/etcd-chef)
|
||||
@ -122,3 +132,4 @@
|
||||
- [spf13/viper](https://github.com/spf13/viper) - Go configuration library, reads values from ENV, pflags, files, and etcd with optional encryption
|
||||
- [lytics/metafora](https://github.com/lytics/metafora) - Go distributed task library
|
||||
- [ryandoyle/nss-etcd](https://github.com/ryandoyle/nss-etcd) - A GNU libc NSS module for resolving names from etcd.
|
||||
- [Gru](https://github.com/dnaeon/gru) - Orchestration made easy with Go
|
||||
|
@ -1,134 +1,136 @@
|
||||
# Metrics
|
||||
|
||||
**NOTE: The metrics feature is considered experimental. We may add/change/remove metrics without warning in future releases.**
|
||||
etcd uses [Prometheus][prometheus] for metrics reporting. The metrics can be used for real-time monitoring and debugging. etcd does not persist its metrics; if a member restarts, the metrics will be reset.
|
||||
|
||||
etcd uses [Prometheus][prometheus] for metrics reporting in the server. The metrics can be used for real-time monitoring and debugging.
|
||||
etcd only stores these data in memory. If a member restarts, metrics will reset.
|
||||
|
||||
The simplest way to see the available metrics is to cURL the metrics endpoint `/metrics` of etcd. The format is described [here](http://prometheus.io/docs/instrumenting/exposition_formats/).
|
||||
The simplest way to see the available metrics is to cURL the metrics endpoint `/metrics`. The format is described [here](http://prometheus.io/docs/instrumenting/exposition_formats/).
|
||||
|
||||
Follow the [Prometheus getting started doc][prometheus-getting-started] to spin up a Prometheus server to collect etcd metrics.
|
||||
|
||||
The naming of metrics follows the suggested [best practice of Prometheus][prometheus-naming]. A metric name has an `etcd` prefix as its namespace and a subsystem prefix (for example `wal` and `etcdserver`).
|
||||
The naming of metrics follows the suggested [Prometheus best practices][prometheus-naming]. A metric name has an `etcd` or `etcd_debugging` prefix as its namespace and a subsystem prefix (for example `wal` and `etcdserver`).
|
||||
|
||||
etcd now exposes the following metrics:
|
||||
## etcd namespace metrics
|
||||
|
||||
## etcdserver
|
||||
The metrics under the `etcd` prefix are for monitoring and alerting. They are stable high level metrics. If there is any change of these metrics, it will be included in release notes.
|
||||
|
||||
| Name | Description | Type |
|
||||
|-----------------------------------------|--------------------------------------------------|-----------|
|
||||
| file_descriptors_used_total | The total number of file descriptors used | Gauge |
|
||||
| proposal_durations_seconds | The latency distributions of committing proposal | Histogram |
|
||||
| pending_proposal_total | The total number of pending proposals | Gauge |
|
||||
| proposal_failed_total | The total number of failed proposals | Counter |
|
||||
Metrics that are etcd2 related are documented [v2 metrics guide][v2-http-metrics].
|
||||
|
||||
High file descriptors (`file_descriptors_used_total`) usage (near the file descriptors limitation of the process) indicates a potential out of file descriptors issue. That might cause etcd fails to create new WAL files and panics.
|
||||
### Server
|
||||
|
||||
[Proposal][glossary-proposal] durations (`proposal_durations_seconds`) provides a histogram about the proposal commit latency. Latency can be introduced into this process by network and disk IO.
|
||||
These metrics describe the status of the etcd server. In order to detect outages or problems for troubleshooting, the server metrics of every production etcd cluster should be closely monitored.
|
||||
|
||||
Pending proposal (`pending_proposal_total`) gives you an idea about how many proposal are in the queue and waiting for commit. An increasing pending number indicates a high client load or an unstable cluster.
|
||||
All these metrics are prefixed with `etcd_server_`
|
||||
|
||||
Failed proposals (`proposal_failed_total`) are normally related to two issues: temporary failures related to a leader election or longer duration downtime caused by a loss of quorum in the cluster.
|
||||
| Name | Description | Type |
|
||||
|---------------------------|----------------------------------------------------------|---------|
|
||||
| has_leader | Whether or not a leader exists. 1 is existence, 0 is not.| Gauge |
|
||||
| leader_changes_seen_total | The number of leader changes seen. | Counter |
|
||||
| proposals_committed_total | The total number of consensus proposals committed. | Gauge |
|
||||
| proposals_applied_total | The total number of consensus proposals applied. | Gauge |
|
||||
| proposals_pending | The current number of pending proposals. | Gauge |
|
||||
| proposals_failed_total | The total number of failed proposals seen. | Counter |
|
||||
|
||||
## wal
|
||||
`has_leader` indicates whether the member has a leader. If a member does not have a leader, it is
|
||||
totally unavailable. If all the members in the cluster do not have any leader, the entire cluster
|
||||
is totally unavailable.
|
||||
|
||||
| Name | Description | Type |
|
||||
|------------------------------------|--------------------------------------------------|-----------|
|
||||
| fsync_durations_seconds | The latency distributions of fsync called by wal | Histogram |
|
||||
| last_index_saved | The index of the last entry saved by wal | Gauge |
|
||||
`leader_changes_seen_total` counts the number of leader changes the member has seen since its start. Rapid leadership changes impact the performance of etcd significantly. It also signals that the leader is unstable, perhaps due to network connectivity issues or excessive load hitting the etcd cluster.
|
||||
|
||||
Abnormally high fsync duration (`fsync_durations_seconds`) indicates disk issues and might cause the cluster to be unstable.
|
||||
`proposals_committed_total` records the total number of consensus proposals committed. This gauge should increase over time if the cluster is healthy. Several healthy members of an etcd cluster may have different total committed proposals at once. This discrepancy may be due to recovering from peers after starting, lagging behind the leader, or being the leader and therefore having the most commits. It is important to monitor this metric across all the members in the cluster; a consistently large lag between a single member and its leader indicates that member is slow or unhealthy.
|
||||
|
||||
`proposals_applied_total` records the total number of consensus proposals applied. The etcd server applies every committed proposal asynchronously. The difference between `proposals_committed_total` and `proposals_applied_total` should usually be small (within a few thousands even under high load). If the difference between them continues to rise, it indicates that the etcd server is overloaded. This might happen when applying expensive queries like heavy range queries or large txn operations.
|
||||
|
||||
## http requests
|
||||
`proposals_pending` indicates how many proposals are queued to commit. Rising pending proposals suggests there is a high client load or the member cannot commit proposals.
|
||||
|
||||
These metrics describe the serving of requests (non-watch events) served by etcd members in non-proxy mode: total
|
||||
incoming requests, request failures and processing latency (inc. raft rounds for storage). They are useful for tracking
|
||||
user-generated traffic hitting the etcd cluster .
|
||||
`proposals_failed_total` are normally related to two issues: temporary failures related to a leader election or longer downtime caused by a loss of quorum in the cluster.
|
||||
|
||||
All these metrics are prefixed with `etcd_http_`
|
||||
### Disk
|
||||
|
||||
These metrics describe the status of the disk operations.
|
||||
|
||||
All these metrics are prefixed with `etcd_disk_`.
|
||||
|
||||
| Name | Description | Type |
|
||||
|------------------------------------|-------------------------------------------------------|-----------|
|
||||
| wal_fsync_duration_seconds | The latency distributions of fsync called by wal | Histogram |
|
||||
| backend_commit_duration_seconds | The latency distributions of commit called by backend.| Histogram |
|
||||
|
||||
A `wal_fsync` is called when etcd persists its log entries to disk before applying them.
|
||||
|
||||
A `backend_commit` is called when etcd commits an incremental snapshot of its most recent changes to disk.
|
||||
|
||||
High disk operation latencies (`wal_fsync_duration_seconds` or `backend_commit_duration_seconds`) often indicate disk issues. It may cause high request latency or make the cluster unstable.
|
||||
|
||||
### Network
|
||||
|
||||
These metrics describe the status of the network.
|
||||
|
||||
All these metrics are prefixed with `etcd_network_`
|
||||
|
||||
| Name | Description | Type |
|
||||
|---------------------------|--------------------------------------------------------------------|---------------|
|
||||
| peer_sent_bytes_total | The total number of bytes sent to the peer with ID `To`. | Counter(To) |
|
||||
| peer_received_bytes_total | The total number of bytes received from the peer with ID `From`. | Counter(From) |
|
||||
| peer_round_trip_time_seconds | Round-Trip-Time histogram between peers. | Histogram(To) |
|
||||
| client_grpc_sent_bytes_total | The total number of bytes sent to grpc clients. | Counter |
|
||||
| client_grpc_received_bytes_total| The total number of bytes received to grpc clients. | Counter |
|
||||
|
||||
`peer_sent_bytes_total` counts the total number of bytes sent to a specific peer. Usually the leader member sends more data than other members since it is responsible for transmitting replicated data.
|
||||
|
||||
`peer_received_bytes_total` counts the total number of bytes received from a specific peer. Usually follower members receive data only from the leader member.
|
||||
|
||||
### gRPC requests
|
||||
|
||||
These metrics describe the requests served by a specific etcd member: total received requests, total failed requests, and processing latency. They are useful for tracking user-generated traffic hitting the etcd cluster.
|
||||
|
||||
All these metrics are prefixed with `etcd_grpc_`
|
||||
|
||||
| Name | Description | Type |
|
||||
|--------------------------------|-----------------------------------------------------------------------------------------|--------------------|
|
||||
| received_total | Total number of events after parsing and auth. | Counter(method) |
|
||||
| failed_total | Total number of failed events. | Counter(method,error) |
|
||||
| successful_duration_second | Bucketed handling times of the requests, including raft rounds for writes. | Histogram(method) |
|
||||
|--------------------------------|-------------------------------------------------------------------------------------|------------------------|
|
||||
| requests_total | Total number of received requests | Counter(method) |
|
||||
| requests_failed_total | Total number of failed requests. | Counter(method,error) |
|
||||
| unary_requests_duration_seconds | Bucketed handling duration of the requests. | Histogram(method) |
|
||||
|
||||
|
||||
Example Prometheus queries that may be useful from these metrics (across all etcd members):
|
||||
|
||||
* `sum(rate(etcd_http_failed_total{job="etcd"}[1m]) by (method) / sum(rate(etcd_http_events_received_total{job="etcd"})[1m]) by (method)`
|
||||
* `sum(rate(etcd_grpc_requests_failed_total{job="etcd"}[1m]) by (grpc_method) / sum(rate(etcd_grpc_total{job="etcd"})[1m]) by (grpc_method)`
|
||||
|
||||
Shows the fraction of events that failed by HTTP method across all members, across a time window of `1m`.
|
||||
Shows the fraction of events that failed by gRPC method across all members, across a time window of `1m`.
|
||||
|
||||
* `sum(rate(etcd_http_received_total{job="etcd",method="GET})[1m]) by (method)`
|
||||
`sum(rate(etcd_http_received_total{job="etcd",method~="GET})[1m]) by (method)`
|
||||
* `sum(rate(etcd_grpc_requests_total{job="etcd",grpc_method="PUT"})[1m]) by (grpc_method)`
|
||||
|
||||
Shows the rate of successful readonly/write queries across all servers, across a time window of `1m`.
|
||||
Shows the rate of PUT requests across all members, across a time window of `1m`.
|
||||
|
||||
* `histogram_quantile(0.9, sum(increase(etcd_http_successful_processing_seconds{job="etcd",method="GET"}[5m]) ) by (le))`
|
||||
`histogram_quantile(0.9, sum(increase(etcd_http_successful_processing_seconds{job="etcd",method!="GET"}[5m]) ) by (le))`
|
||||
* `histogram_quantile(0.9, sum(rate(etcd_grpc_unary_requests_duration_seconds{job="etcd",grpc_method="PUT"}[5m]) ) by (le))`
|
||||
|
||||
Show the 0.90-tile latency (in seconds) of read/write (respectively) event handling across all members, with a window of `5m`.
|
||||
Show the 0.90-tile latency (in seconds) of PUT request handling across all members, with a window of `5m`.
|
||||
|
||||
## snapshot
|
||||
## etcd_debugging namespace metrics
|
||||
|
||||
The metrics under the `etcd_debugging` prefix are for debugging. They are very implementation dependent and volatile. They might be changed or removed without any warning in new etcd releases. Some of the metrics might be moved to the `etcd` prefix when they become more stable.
|
||||
|
||||
|
||||
### Snapshot
|
||||
|
||||
| Name | Description | Type |
|
||||
|--------------------------------------------|------------------------------------------------------------|-----------|
|
||||
| snapshot_save_total_durations_seconds | The total latency distributions of save called by snapshot | Histogram |
|
||||
| snapshot_save_total_duration_seconds | The total latency distributions of save called by snapshot | Histogram |
|
||||
|
||||
Abnormally high snapshot duration (`snapshot_save_total_durations_seconds`) indicates disk issues and might cause the cluster to be unstable.
|
||||
Abnormally high snapshot duration (`snapshot_save_total_duration_seconds`) indicates disk issues and might cause the cluster to be unstable.
|
||||
|
||||
## Prometheus supplied metrics
|
||||
|
||||
## rafthttp
|
||||
The Prometheus client library provides a number of metrics under the `go` and `process` namespaces. There are a few that are particlarly interesting.
|
||||
|
||||
| Name | Description | Type | Labels |
|
||||
|-----------------------------------|--------------------------------------------|--------------|--------------------------------|
|
||||
| message_sent_latency_seconds | The latency distributions of messages sent | HistogramVec | sendingType, msgType, remoteID |
|
||||
| message_sent_failed_total | The total number of failed messages sent | Summary | sendingType, msgType, remoteID |
|
||||
| Name | Description | Type |
|
||||
|-----------------------------------|--------------------------------------------|--------------|
|
||||
| process_open_fds | Number of open file descriptors. | Gauge |
|
||||
| process_max_fds | Maximum number of open file descriptors. | Gauge |
|
||||
|
||||
Heavy file descriptor (`process_open_fds`) usage (i.e., near the process's file descriptor limit, `process_max_fds`) indicates a potential file descriptor exhaustion issue. If the file descriptors are exhausted, etcd may panic because it cannot create new WAL files.
|
||||
|
||||
Abnormally high message duration (`message_sent_latency_seconds`) indicates network issues and might cause the cluster to be unstable.
|
||||
|
||||
An increase in message failures (`message_sent_failed_total`) indicates more severe network issues and might cause the cluster to be unstable.
|
||||
|
||||
Label `sendingType` is the connection type to send messages. `message`, `msgapp` and `msgappv2` use HTTP streaming, while `pipeline` does HTTP request for each message.
|
||||
|
||||
Label `msgType` is the type of raft message. `MsgApp` is log replication message; `MsgSnap` is snapshot install message; `MsgProp` is proposal forward message; the others are used to maintain raft internal status. If you have a large snapshot, you would expect a long msgSnap sending latency. For other types of messages, you would expect low latency, which is comparable to your ping latency if you have enough network bandwidth.
|
||||
|
||||
Label `remoteID` is the member ID of the message destination.
|
||||
|
||||
|
||||
## proxy
|
||||
|
||||
etcd members operating in proxy mode do not do store operations. They forward all requests
|
||||
to cluster instances.
|
||||
|
||||
Tracking the rate of requests coming from a proxy allows one to pin down which machine is performing most reads/writes.
|
||||
|
||||
All these metrics are prefixed with `etcd_proxy_`
|
||||
|
||||
| Name | Description | Type |
|
||||
|---------------------------|-----------------------------------------------------------------------------------------|--------------------|
|
||||
| requests_total | Total number of requests by this proxy instance. . | Counter(method) |
|
||||
| handled_total | Total number of fully handled requests, with responses from etcd members. | Counter(method) |
|
||||
| dropped_total | Total number of dropped requests due to forwarding errors to etcd members. | Counter(method,error) |
|
||||
| handling_duration_seconds | Bucketed handling times by HTTP method, including round trip to member instances. | Histogram(method) |
|
||||
|
||||
Example Prometheus queries that may be useful from these metrics (across all etcd servers):
|
||||
|
||||
* `sum(rate(etcd_proxy_handled_total{job="etcd"}[1m])) by (method)`
|
||||
|
||||
Rate of requests (by HTTP method) handled by all proxies, across a window of `1m`.
|
||||
* `histogram_quantile(0.9, sum(increase(etcd_proxy_events_handling_time_seconds_bucket{job="etcd",method="GET"}[5m])) by (le))`
|
||||
`histogram_quantile(0.9, sum(increase(etcd_proxy_events_handling_time_seconds_bucket{job="etcd",method!="GET"}[5m])) by (le))`
|
||||
|
||||
Show the 0.90-tile latency (in seconds) of handling of user requests across all proxy machines, with a window of `5m`.
|
||||
* `sum(rate(etcd_proxy_dropped_total{job="etcd"}[1m])) by (proxying_error)`
|
||||
|
||||
Number of failed request on the proxy. This should be 0, spikes here indicate connectivity issues to etcd cluster.
|
||||
|
||||
[glossary-proposal]: glossary.md#proposal
|
||||
[glossary-proposal]: learning/glossary.md#proposal
|
||||
[prometheus]: http://prometheus.io/
|
||||
[prometheus-getting-started](http://prometheus.io/docs/introduction/getting_started/)
|
||||
[prometheus-getting-started]: http://prometheus.io/docs/introduction/getting_started/
|
||||
[prometheus-naming]: http://prometheus.io/docs/practices/naming/
|
||||
[v2-http-metrics]: v2/metrics.md#http-requests
|
||||
|
474
Documentation/op-guide/clustering.md
Normal file
474
Documentation/op-guide/clustering.md
Normal file
@ -0,0 +1,474 @@
|
||||
# Clustering Guide
|
||||
|
||||
## Overview
|
||||
|
||||
Starting an etcd cluster statically requires that each member knows another in the cluster. In a number of cases, the IPs of the cluster members may be unknown ahead of time. In these cases, the etcd cluster can be bootstrapped with the help of a discovery service.
|
||||
|
||||
Once an etcd cluster is up and running, adding or removing members is done via [runtime reconfiguration][runtime-conf]. To better understand the design behind runtime reconfiguration, we suggest reading [the runtime configuration design document][runtime-reconf-design].
|
||||
|
||||
This guide will cover the following mechanisms for bootstrapping an etcd cluster:
|
||||
|
||||
* [Static](#static)
|
||||
* [etcd Discovery](#etcd-discovery)
|
||||
* [DNS Discovery](#dns-discovery)
|
||||
|
||||
Each of the bootstrapping mechanisms will be used to create a three machine etcd cluster with the following details:
|
||||
|
||||
|Name|Address|Hostname|
|
||||
|------|---------|------------------|
|
||||
|infra0|10.0.1.10|infra0.example.com|
|
||||
|infra1|10.0.1.11|infra1.example.com|
|
||||
|infra2|10.0.1.12|infra2.example.com|
|
||||
|
||||
## Static
|
||||
|
||||
As we know the cluster members, their addresses and the size of the cluster before starting, we can use an offline bootstrap configuration by setting the `initial-cluster` flag. Each machine will get either the following environment variables or command line:
|
||||
|
||||
```
|
||||
ETCD_INITIAL_CLUSTER="infra0=http://10.0.1.10:2380,infra1=http://10.0.1.11:2380,infra2=http://10.0.1.12:2380"
|
||||
ETCD_INITIAL_CLUSTER_STATE=new
|
||||
```
|
||||
|
||||
```
|
||||
--initial-cluster infra0=http://10.0.1.10:2380,infra1=http://10.0.1.11:2380,infra2=http://10.0.1.12:2380 \
|
||||
--initial-cluster-state new
|
||||
```
|
||||
|
||||
Note that the URLs specified in `initial-cluster` are the _advertised peer URLs_, i.e. they should match the value of `initial-advertise-peer-urls` on the respective nodes.
|
||||
|
||||
If spinning up multiple clusters (or creating and destroying a single cluster) with same configuration for testing purpose, it is highly recommended that each cluster is given a unique `initial-cluster-token`. By doing this, etcd can generate unique cluster IDs and member IDs for the clusters even if they otherwise have the exact same configuration. This can protect etcd from cross-cluster-interaction, which might corrupt the clusters.
|
||||
|
||||
etcd listens on [`listen-client-urls`][conf-listen-client] to accept client traffic. etcd member advertises the URLs specified in [`advertise-client-urls`][conf-adv-client] to other members, proxies, clients. Please make sure the `advertise-client-urls` are reachable from intended clients. A common mistake is setting `advertise-client-urls` to localhost or leave it as default if the remote clients should reach etcd.
|
||||
|
||||
On each machine, start etcd with these flags:
|
||||
|
||||
```
|
||||
$ etcd --name infra0 --initial-advertise-peer-urls http://10.0.1.10:2380 \
|
||||
--listen-peer-urls http://10.0.1.10:2380 \
|
||||
--listen-client-urls http://10.0.1.10:2379,http://127.0.0.1:2379 \
|
||||
--advertise-client-urls http://10.0.1.10:2379 \
|
||||
--initial-cluster-token etcd-cluster-1 \
|
||||
--initial-cluster infra0=http://10.0.1.10:2380,infra1=http://10.0.1.11:2380,infra2=http://10.0.1.12:2380 \
|
||||
--initial-cluster-state new
|
||||
```
|
||||
```
|
||||
$ etcd --name infra1 --initial-advertise-peer-urls http://10.0.1.11:2380 \
|
||||
--listen-peer-urls http://10.0.1.11:2380 \
|
||||
--listen-client-urls http://10.0.1.11:2379,http://127.0.0.1:2379 \
|
||||
--advertise-client-urls http://10.0.1.11:2379 \
|
||||
--initial-cluster-token etcd-cluster-1 \
|
||||
--initial-cluster infra0=http://10.0.1.10:2380,infra1=http://10.0.1.11:2380,infra2=http://10.0.1.12:2380 \
|
||||
--initial-cluster-state new
|
||||
```
|
||||
```
|
||||
$ etcd --name infra2 --initial-advertise-peer-urls http://10.0.1.12:2380 \
|
||||
--listen-peer-urls http://10.0.1.12:2380 \
|
||||
--listen-client-urls http://10.0.1.12:2379,http://127.0.0.1:2379 \
|
||||
--advertise-client-urls http://10.0.1.12:2379 \
|
||||
--initial-cluster-token etcd-cluster-1 \
|
||||
--initial-cluster infra0=http://10.0.1.10:2380,infra1=http://10.0.1.11:2380,infra2=http://10.0.1.12:2380 \
|
||||
--initial-cluster-state new
|
||||
```
|
||||
|
||||
The command line parameters starting with `--initial-cluster` will be ignored on subsequent runs of etcd. Feel free to remove the environment variables or command line flags after the initial bootstrap process. If the configuration needs changes later (for example, adding or removing members to/from the cluster), see the [runtime configuration][runtime-conf] guide.
|
||||
|
||||
### TLS
|
||||
|
||||
etcd supports encrypted communication through the TLS protocol. TLS channels can be used for encrypted internal cluster communication between peers as well as encrypted client traffic. This section provides examples for setting up a cluster with peer and client TLS. Additional information detailing etcd's TLS support can be found in the [security guide][security-guide].
|
||||
|
||||
#### Self-signed certificates
|
||||
|
||||
A cluster using self-signed certificates both encrypts traffic and authenticates its connections. To start a cluster with self-signed certificates, each cluster member should have a unique key pair (`member.crt`, `member.key`) signed by a shared cluster CA certificate (`ca.crt`) for both peer connections and client connections. Certificates may be generated by following the etcd [TLS setup][tls-setup] example.
|
||||
|
||||
On each machine, etcd would be started with these flags:
|
||||
|
||||
```
|
||||
$ etcd --name infra0 --initial-advertise-peer-urls http://10.0.1.10:2380 \
|
||||
--listen-peer-urls https://10.0.1.10:2380 \
|
||||
--listen-client-urls https://10.0.1.10:2379,https://127.0.0.1:2379 \
|
||||
--advertise-client-urls https://10.0.1.10:2379 \
|
||||
--initial-cluster-token etcd-cluster-1 \
|
||||
--initial-cluster infra0=https://10.0.1.10:2380,infra1=https://10.0.1.11:2380,infra2=https://10.0.1.12:2380 \
|
||||
--initial-cluster-state new \
|
||||
--client-cert-auth --trusted-ca-file=/path/to/ca-client.crt \
|
||||
--cert-file=/path/to/infra0-client.crt --key-file=/path/to/infra0-client.key \
|
||||
--peer-client-cert-auth --peer-trusted-ca-file=ca-peer.crt \
|
||||
--peer-cert-file=/path/to/infra0-peer.crt --peer-key-file=/path/to/infra0-peer.key
|
||||
```
|
||||
```
|
||||
$ etcd --name infra1 --initial-advertise-peer-urls https://10.0.1.11:2380 \
|
||||
--listen-peer-urls https://10.0.1.11:2380 \
|
||||
--listen-client-urls https://10.0.1.11:2379,https://127.0.0.1:2379 \
|
||||
--advertise-client-urls https://10.0.1.11:2379 \
|
||||
--initial-cluster-token etcd-cluster-1 \
|
||||
--initial-cluster infra0=https://10.0.1.10:2380,infra1=https://10.0.1.11:2380,infra2=https://10.0.1.12:2380 \
|
||||
--initial-cluster-state new \
|
||||
--client-cert-auth --trusted-ca-file=/path/to/ca-client.crt \
|
||||
--cert-file=/path/to/infra1-client.crt --key-file=/path/to/infra1-client.key \
|
||||
--peer-client-cert-auth --peer-trusted-ca-file=ca-peer.crt \
|
||||
--peer-cert-file=/path/to/infra1-peer.crt --peer-key-file=/path/to/infra1-peer.key
|
||||
```
|
||||
```
|
||||
$ etcd --name infra2 --initial-advertise-peer-urls https://10.0.1.12:2380 \
|
||||
--listen-peer-urls https://10.0.1.12:2380 \
|
||||
--listen-client-urls https://10.0.1.12:2379,https://127.0.0.1:2379 \
|
||||
--advertise-client-urls https://10.0.1.12:2379 \
|
||||
--initial-cluster-token etcd-cluster-1 \
|
||||
--initial-cluster infra0=https://10.0.1.10:2380,infra1=https://10.0.1.11:2380,infra2=https://10.0.1.12:2380 \
|
||||
--initial-cluster-state new \
|
||||
--client-cert-auth --trusted-ca-file=/path/to/ca-client.crt \
|
||||
--cert-file=/path/to/infra2-client.crt --key-file=/path/to/infra2-client.key \
|
||||
--peer-client-cert-auth --peer-trusted-ca-file=ca-peer.crt \
|
||||
--peer-cert-file=/path/to/infra2-peer.crt --peer-key-file=/path/to/infra2-peer.key
|
||||
```
|
||||
|
||||
#### Automatic certificates
|
||||
|
||||
If the cluster needs encrypted communication but does not require authenticated connections, etcd can be configured to automatically generate its keys. On initialization, each member creates its own set of keys based on its advertised IP addresses and hosts.
|
||||
|
||||
On each machine, etcd would be started with these flag:
|
||||
|
||||
```
|
||||
$ etcd --name infra0 --initial-advertise-peer-urls https://10.0.1.10:2380 \
|
||||
--listen-peer-urls https://10.0.1.10:2380 \
|
||||
--listen-client-urls https://10.0.1.10:2379,https://127.0.0.1:2379 \
|
||||
--advertise-client-urls https://10.0.1.10:2379 \
|
||||
--initial-cluster-token etcd-cluster-1 \
|
||||
--initial-cluster infra0=https://10.0.1.10:2380,infra1=https://10.0.1.11:2380,infra2=https://10.0.1.12:2380 \
|
||||
--initial-cluster-state new \
|
||||
--auto-tls \
|
||||
--peer-auto-tls
|
||||
```
|
||||
```
|
||||
$ etcd --name infra1 --initial-advertise-peer-urls https://10.0.1.11:2380 \
|
||||
--listen-peer-urls https://10.0.1.11:2380 \
|
||||
--listen-client-urls https://10.0.1.11:2379,https://127.0.0.1:2379 \
|
||||
--advertise-client-urls https://10.0.1.11:2379 \
|
||||
--initial-cluster-token etcd-cluster-1 \
|
||||
--initial-cluster infra0=https://10.0.1.10:2380,infra1=https://10.0.1.11:2380,infra2=https://10.0.1.12:2380 \
|
||||
--initial-cluster-state new \
|
||||
--auto-tls \
|
||||
--peer-auto-tls
|
||||
```
|
||||
```
|
||||
$ etcd --name infra2 --initial-advertise-peer-urls https://10.0.1.12:2380 \
|
||||
--listen-peer-urls https://10.0.1.12:2380 \
|
||||
--listen-client-urls https://10.0.1.12:2379,https://127.0.0.1:2379 \
|
||||
--advertise-client-urls https://10.0.1.12:2379 \
|
||||
--initial-cluster-token etcd-cluster-1 \
|
||||
--initial-cluster infra0=https://10.0.1.10:2380,infra1=https://10.0.1.11:2380,infra2=https://10.0.1.12:2380 \
|
||||
--initial-cluster-state new \
|
||||
--auto-tls \
|
||||
--peer-auto-tls
|
||||
```
|
||||
|
||||
### Error cases
|
||||
|
||||
In the following example, we have not included our new host in the list of enumerated nodes. If this is a new cluster, the node _must_ be added to the list of initial cluster members.
|
||||
|
||||
```
|
||||
$ etcd --name infra1 --initial-advertise-peer-urls http://10.0.1.11:2380 \
|
||||
--listen-peer-urls https://10.0.1.11:2380 \
|
||||
--listen-client-urls http://10.0.1.11:2379,http://127.0.0.1:2379 \
|
||||
--advertise-client-urls http://10.0.1.11:2379 \
|
||||
--initial-cluster infra0=http://10.0.1.10:2380 \
|
||||
--initial-cluster-state new
|
||||
etcd: infra1 not listed in the initial cluster config
|
||||
exit 1
|
||||
```
|
||||
|
||||
In this example, we are attempting to map a node (infra0) on a different address (127.0.0.1:2380) than its enumerated address in the cluster list (10.0.1.10:2380). If this node is to listen on multiple addresses, all addresses _must_ be reflected in the "initial-cluster" configuration directive.
|
||||
|
||||
```
|
||||
$ etcd --name infra0 --initial-advertise-peer-urls http://127.0.0.1:2380 \
|
||||
--listen-peer-urls http://10.0.1.10:2380 \
|
||||
--listen-client-urls http://10.0.1.10:2379,http://127.0.0.1:2379 \
|
||||
--advertise-client-urls http://10.0.1.10:2379 \
|
||||
--initial-cluster infra0=http://10.0.1.10:2380,infra1=http://10.0.1.11:2380,infra2=http://10.0.1.12:2380 \
|
||||
--initial-cluster-state=new
|
||||
etcd: error setting up initial cluster: infra0 has different advertised URLs in the cluster and advertised peer URLs list
|
||||
exit 1
|
||||
```
|
||||
|
||||
If a peer is configured with a different set of configuration arguments and attempts to join this cluster, etcd will report a cluster ID mismatch will exit.
|
||||
|
||||
```
|
||||
$ etcd --name infra3 --initial-advertise-peer-urls http://10.0.1.13:2380 \
|
||||
--listen-peer-urls http://10.0.1.13:2380 \
|
||||
--listen-client-urls http://10.0.1.13:2379,http://127.0.0.1:2379 \
|
||||
--advertise-client-urls http://10.0.1.13:2379 \
|
||||
--initial-cluster infra0=http://10.0.1.10:2380,infra1=http://10.0.1.11:2380,infra3=http://10.0.1.13:2380 \
|
||||
--initial-cluster-state=new
|
||||
etcd: conflicting cluster ID to the target cluster (c6ab534d07e8fcc4 != bc25ea2a74fb18b0). Exiting.
|
||||
exit 1
|
||||
```
|
||||
|
||||
## Discovery
|
||||
|
||||
In a number of cases, the IPs of the cluster peers may not be known ahead of time. This is common when utilizing cloud providers or when the network uses DHCP. In these cases, rather than specifying a static configuration, use an existing etcd cluster to bootstrap a new one. We call this process "discovery".
|
||||
|
||||
There two methods that can be used for discovery:
|
||||
|
||||
* etcd discovery service
|
||||
* DNS SRV records
|
||||
|
||||
### etcd discovery
|
||||
|
||||
To better understand the design about discovery service protocol, we suggest reading the discovery service protocol [documentation][discovery-proto].
|
||||
|
||||
#### Lifetime of a discovery URL
|
||||
|
||||
A discovery URL identifies a unique etcd cluster. Instead of reusing a discovery URL, always create discovery URLs for new clusters.
|
||||
|
||||
Moreover, discovery URLs should ONLY be used for the initial bootstrapping of a cluster. To change cluster membership after the cluster is already running, see the [runtime reconfiguration][runtime-conf] guide.
|
||||
|
||||
#### Custom etcd discovery service
|
||||
|
||||
Discovery uses an existing cluster to bootstrap itself. If using a private etcd cluster, can create a URL like so:
|
||||
|
||||
```
|
||||
$ curl -X PUT https://myetcd.local/v2/keys/discovery/6c007a14875d53d9bf0ef5a6fc0257c817f0fb83/_config/size -d value=3
|
||||
```
|
||||
|
||||
By setting the size key to the URL, a discovery URL is created with an expected cluster size of 3.
|
||||
|
||||
The URL to use in this case will be `https://myetcd.local/v2/keys/discovery/6c007a14875d53d9bf0ef5a6fc0257c817f0fb83` and the etcd members will use the `https://myetcd.local/v2/keys/discovery/6c007a14875d53d9bf0ef5a6fc0257c817f0fb83` directory for registration as they start.
|
||||
|
||||
**Each member must have a different name flag specified. `Hostname` or `machine-id` can be a good choice. Or discovery will fail due to duplicated name.**
|
||||
|
||||
Now we start etcd with those relevant flags for each member:
|
||||
|
||||
```
|
||||
$ etcd --name infra0 --initial-advertise-peer-urls http://10.0.1.10:2380 \
|
||||
--listen-peer-urls http://10.0.1.10:2380 \
|
||||
--listen-client-urls http://10.0.1.10:2379,http://127.0.0.1:2379 \
|
||||
--advertise-client-urls http://10.0.1.10:2379 \
|
||||
--discovery https://myetcd.local/v2/keys/discovery/6c007a14875d53d9bf0ef5a6fc0257c817f0fb83
|
||||
```
|
||||
```
|
||||
$ etcd --name infra1 --initial-advertise-peer-urls http://10.0.1.11:2380 \
|
||||
--listen-peer-urls http://10.0.1.11:2380 \
|
||||
--listen-client-urls http://10.0.1.11:2379,http://127.0.0.1:2379 \
|
||||
--advertise-client-urls http://10.0.1.11:2379 \
|
||||
--discovery https://myetcd.local/v2/keys/discovery/6c007a14875d53d9bf0ef5a6fc0257c817f0fb83
|
||||
```
|
||||
```
|
||||
$ etcd --name infra2 --initial-advertise-peer-urls http://10.0.1.12:2380 \
|
||||
--listen-peer-urls http://10.0.1.12:2380 \
|
||||
--listen-client-urls http://10.0.1.12:2379,http://127.0.0.1:2379 \
|
||||
--advertise-client-urls http://10.0.1.12:2379 \
|
||||
--discovery https://myetcd.local/v2/keys/discovery/6c007a14875d53d9bf0ef5a6fc0257c817f0fb83
|
||||
```
|
||||
|
||||
This will cause each member to register itself with the custom etcd discovery service and begin the cluster once all machines have been registered.
|
||||
|
||||
#### Public etcd discovery service
|
||||
|
||||
If no exiting cluster is available, use the public discovery service hosted at `discovery.etcd.io`. To create a private discovery URL using the "new" endpoint, use the command:
|
||||
|
||||
```
|
||||
$ curl https://discovery.etcd.io/new?size=3
|
||||
https://discovery.etcd.io/3e86b59982e49066c5d813af1c2e2579cbf573de
|
||||
```
|
||||
|
||||
This will create the cluster with an initial expected size of 3 members. If no size is specified, a default of 3 is used.
|
||||
|
||||
```
|
||||
ETCD_DISCOVERY=https://discovery.etcd.io/3e86b59982e49066c5d813af1c2e2579cbf573de
|
||||
```
|
||||
|
||||
```
|
||||
--discovery https://discovery.etcd.io/3e86b59982e49066c5d813af1c2e2579cbf573de
|
||||
```
|
||||
|
||||
**Each member must have a different name flag specified. `Hostname` or `machine-id` can be a good choice. Or discovery will fail due to duplicated name.**
|
||||
|
||||
Now we start etcd with those relevant flags for each member:
|
||||
|
||||
```
|
||||
$ etcd --name infra0 --initial-advertise-peer-urls http://10.0.1.10:2380 \
|
||||
--listen-peer-urls http://10.0.1.10:2380 \
|
||||
--listen-client-urls http://10.0.1.10:2379,http://127.0.0.1:2379 \
|
||||
--advertise-client-urls http://10.0.1.10:2379 \
|
||||
--discovery https://discovery.etcd.io/3e86b59982e49066c5d813af1c2e2579cbf573de
|
||||
```
|
||||
```
|
||||
$ etcd --name infra1 --initial-advertise-peer-urls http://10.0.1.11:2380 \
|
||||
--listen-peer-urls http://10.0.1.11:2380 \
|
||||
--listen-client-urls http://10.0.1.11:2379,http://127.0.0.1:2379 \
|
||||
--advertise-client-urls http://10.0.1.11:2379 \
|
||||
--discovery https://discovery.etcd.io/3e86b59982e49066c5d813af1c2e2579cbf573de
|
||||
```
|
||||
```
|
||||
$ etcd --name infra2 --initial-advertise-peer-urls http://10.0.1.12:2380 \
|
||||
--listen-peer-urls http://10.0.1.12:2380 \
|
||||
--listen-client-urls http://10.0.1.12:2379,http://127.0.0.1:2379 \
|
||||
--advertise-client-urls http://10.0.1.12:2379 \
|
||||
--discovery https://discovery.etcd.io/3e86b59982e49066c5d813af1c2e2579cbf573de
|
||||
```
|
||||
|
||||
This will cause each member to register itself with the discovery service and begin the cluster once all members have been registered.
|
||||
|
||||
Use the environment variable `ETCD_DISCOVERY_PROXY` to cause etcd to use an HTTP proxy to connect to the discovery service.
|
||||
|
||||
#### Error and warning cases
|
||||
|
||||
##### Discovery server errors
|
||||
|
||||
|
||||
```
|
||||
$ etcd --name infra0 --initial-advertise-peer-urls http://10.0.1.10:2380 \
|
||||
--listen-peer-urls http://10.0.1.10:2380 \
|
||||
--listen-client-urls http://10.0.1.10:2379,http://127.0.0.1:2379 \
|
||||
--advertise-client-urls http://10.0.1.10:2379 \
|
||||
--discovery https://discovery.etcd.io/3e86b59982e49066c5d813af1c2e2579cbf573de
|
||||
etcd: error: the cluster doesn’t have a size configuration value in https://discovery.etcd.io/3e86b59982e49066c5d813af1c2e2579cbf573de/_config
|
||||
exit 1
|
||||
```
|
||||
|
||||
##### Warnings
|
||||
|
||||
This is a harmless warning indicating the discovery URL will be ignored on this machine.
|
||||
|
||||
```
|
||||
$ etcd --name infra0 --initial-advertise-peer-urls http://10.0.1.10:2380 \
|
||||
--listen-peer-urls http://10.0.1.10:2380 \
|
||||
--listen-client-urls http://10.0.1.10:2379,http://127.0.0.1:2379 \
|
||||
--advertise-client-urls http://10.0.1.10:2379 \
|
||||
--discovery https://discovery.etcd.io/3e86b59982e49066c5d813af1c2e2579cbf573de
|
||||
etcdserver: discovery token ignored since a cluster has already been initialized. Valid log found at /var/lib/etcd
|
||||
```
|
||||
|
||||
### DNS discovery
|
||||
|
||||
DNS [SRV records][rfc-srv] can be used as a discovery mechanism.
|
||||
The `-discovery-srv` flag can be used to set the DNS domain name where the discovery SRV records can be found.
|
||||
The following DNS SRV records are looked up in the listed order:
|
||||
|
||||
* _etcd-server-ssl._tcp.example.com
|
||||
* _etcd-server._tcp.example.com
|
||||
|
||||
If `_etcd-server-ssl._tcp.example.com` is found then etcd will attempt the bootstrapping process over TLS.
|
||||
|
||||
To help clients discover the etcd cluster, the following DNS SRV records are looked up in the listed order:
|
||||
|
||||
* _etcd-client._tcp.example.com
|
||||
* _etcd-client-ssl._tcp.example.com
|
||||
|
||||
If `_etcd-client-ssl._tcp.example.com` is found, clients will attempt to communicate with the etcd cluster over SSL/TLS.
|
||||
|
||||
If etcd is using TLS without a custom certificate authority, the discovery domain (e.g., example.com) must match the SRV record domain (e.g., infra1.example.com). This is to mitigate attacks that forge SRV records to point to a different domain; the domain would have a valid certificate under PKI but be controlled by an unknown third party.
|
||||
|
||||
#### Create DNS SRV records
|
||||
|
||||
```
|
||||
$ dig +noall +answer SRV _etcd-server._tcp.example.com
|
||||
_etcd-server._tcp.example.com. 300 IN SRV 0 0 2380 infra0.example.com.
|
||||
_etcd-server._tcp.example.com. 300 IN SRV 0 0 2380 infra1.example.com.
|
||||
_etcd-server._tcp.example.com. 300 IN SRV 0 0 2380 infra2.example.com.
|
||||
```
|
||||
|
||||
```
|
||||
$ dig +noall +answer SRV _etcd-client._tcp.example.com
|
||||
_etcd-client._tcp.example.com. 300 IN SRV 0 0 2379 infra0.example.com.
|
||||
_etcd-client._tcp.example.com. 300 IN SRV 0 0 2379 infra1.example.com.
|
||||
_etcd-client._tcp.example.com. 300 IN SRV 0 0 2379 infra2.example.com.
|
||||
```
|
||||
|
||||
```
|
||||
$ dig +noall +answer infra0.example.com infra1.example.com infra2.example.com
|
||||
infra0.example.com. 300 IN A 10.0.1.10
|
||||
infra1.example.com. 300 IN A 10.0.1.11
|
||||
infra2.example.com. 300 IN A 10.0.1.12
|
||||
```
|
||||
|
||||
#### Bootstrap the etcd cluster using DNS
|
||||
|
||||
etcd cluster members can listen on domain names or IP address, the bootstrap process will resolve DNS A records.
|
||||
|
||||
The resolved address in `--initial-advertise-peer-urls` *must match* one of the resolved addresses in the SRV targets. The etcd member reads the resolved address to find out if it belongs to the cluster defined in the SRV records.
|
||||
|
||||
```
|
||||
$ etcd --name infra0 \
|
||||
--discovery-srv example.com \
|
||||
--initial-advertise-peer-urls http://infra0.example.com:2380 \
|
||||
--initial-cluster-token etcd-cluster-1 \
|
||||
--initial-cluster-state new \
|
||||
--advertise-client-urls http://infra0.example.com:2379 \
|
||||
--listen-client-urls http://infra0.example.com:2379 \
|
||||
--listen-peer-urls http://infra0.example.com:2380
|
||||
```
|
||||
|
||||
```
|
||||
$ etcd --name infra1 \
|
||||
--discovery-srv example.com \
|
||||
--initial-advertise-peer-urls http://infra1.example.com:2380 \
|
||||
--initial-cluster-token etcd-cluster-1 \
|
||||
--initial-cluster-state new \
|
||||
--advertise-client-urls http://infra1.example.com:2379 \
|
||||
--listen-client-urls http://infra1.example.com:2379 \
|
||||
--listen-peer-urls http://infra1.example.com:2380
|
||||
```
|
||||
|
||||
```
|
||||
$ etcd --name infra2 \
|
||||
--discovery-srv example.com \
|
||||
--initial-advertise-peer-urls http://infra2.example.com:2380 \
|
||||
--initial-cluster-token etcd-cluster-1 \
|
||||
--initial-cluster-state new \
|
||||
--advertise-client-urls http://infra2.example.com:2379 \
|
||||
--listen-client-urls http://infra2.example.com:2379 \
|
||||
--listen-peer-urls http://infra2.example.com:2380
|
||||
```
|
||||
|
||||
The cluster can also bootstrap using IP addresses instead of domain names:
|
||||
|
||||
```
|
||||
$ etcd --name infra0 \
|
||||
--discovery-srv example.com \
|
||||
--initial-advertise-peer-urls http://10.0.1.10:2380 \
|
||||
--initial-cluster-token etcd-cluster-1 \
|
||||
--initial-cluster-state new \
|
||||
--advertise-client-urls http://10.0.1.10:2379 \
|
||||
--listen-client-urls http://10.0.1.10:2379 \
|
||||
--listen-peer-urls http://10.0.1.10:2380
|
||||
```
|
||||
|
||||
```
|
||||
$ etcd --name infra1 \
|
||||
--discovery-srv example.com \
|
||||
--initial-advertise-peer-urls http://10.0.1.11:2380 \
|
||||
--initial-cluster-token etcd-cluster-1 \
|
||||
--initial-cluster-state new \
|
||||
--advertise-client-urls http://10.0.1.11:2379 \
|
||||
--listen-client-urls http://10.0.1.11:2379 \
|
||||
--listen-peer-urls http://10.0.1.11:2380
|
||||
```
|
||||
|
||||
```
|
||||
$ etcd --name infra2 \
|
||||
--discovery-srv example.com \
|
||||
--initial-advertise-peer-urls http://10.0.1.12:2380 \
|
||||
--initial-cluster-token etcd-cluster-1 \
|
||||
--initial-cluster-state new \
|
||||
--advertise-client-urls http://10.0.1.12:2379 \
|
||||
--listen-client-urls http://10.0.1.12:2379 \
|
||||
--listen-peer-urls http://10.0.1.12:2380
|
||||
```
|
||||
|
||||
### Proxy
|
||||
|
||||
When the `--proxy` flag is set, etcd runs in [proxy mode][proxy]. This proxy mode only supports the etcd v2 API; there are no plans to support the v3 API. Instead, for v3 API support, there will be a new proxy with enhanced features following the etcd 3.0 release.
|
||||
|
||||
To setup an etcd cluster with proxies of v2 API, please read the the [clustering doc in etcd 2.3 release][clustering_etcd2].
|
||||
|
||||
[conf-adv-client]: configuration.md#--advertise-client-urls
|
||||
[conf-listen-client]: configuration.md#--listen-client-urls
|
||||
[discovery-proto]: ../dev-internal/discovery_protocol.md
|
||||
[rfc-srv]: http://www.ietf.org/rfc/rfc2052.txt
|
||||
[runtime-conf]: runtime-configuration.md
|
||||
[runtime-reconf-design]: runtime-reconf-design.md
|
||||
[proxy]: https://github.com/coreos/etcd/blob/release-2.3/Documentation/proxy.md
|
||||
[clustering_etcd2]: https://github.com/coreos/etcd/blob/release-2.3/Documentation/clustering.md
|
||||
[security-guide]: security.md
|
||||
[tls-setup]: /hack/tls-setup
|
290
Documentation/op-guide/configuration.md
Normal file
290
Documentation/op-guide/configuration.md
Normal file
@ -0,0 +1,290 @@
|
||||
# Configuration flags
|
||||
|
||||
etcd is configurable through command-line flags and environment variables. Options set on the command line take precedence over those from the environment.
|
||||
|
||||
The format of environment variable for flag `--my-flag` is `ETCD_MY_FLAG`. It applies to all flags.
|
||||
|
||||
The [official etcd ports][iana-ports] are 2379 for client requests and 2380 for peer communication. The etcd ports can be set to accept TLS traffic, non-TLS traffic, or both TLS and non-TLS traffic.
|
||||
|
||||
To start etcd automatically using custom settings at startup in Linux, using a [systemd][systemd-intro] unit is highly recommended.
|
||||
|
||||
## Member flags
|
||||
|
||||
### --name
|
||||
+ Human-readable name for this member.
|
||||
+ default: "default"
|
||||
+ env variable: ETCD_NAME
|
||||
+ This value is referenced as this node's own entries listed in the `--initial-cluster` flag (e.g., `default=http://localhost:2380`). This needs to match the key used in the flag if using [static bootstrapping][build-cluster]. When using discovery, each member must have a unique name. `Hostname` or `machine-id` can be a good choice.
|
||||
|
||||
### --data-dir
|
||||
+ Path to the data directory.
|
||||
+ default: "${name}.etcd"
|
||||
+ env variable: ETCD_DATA_DIR
|
||||
|
||||
### --wal-dir
|
||||
+ Path to the dedicated wal directory. If this flag is set, etcd will write the WAL files to the walDir rather than the dataDir. This allows a dedicated disk to be used, and helps avoid io competition between logging and other IO operations.
|
||||
+ default: ""
|
||||
+ env variable: ETCD_WAL_DIR
|
||||
|
||||
### --snapshot-count
|
||||
+ Number of committed transactions to trigger a snapshot to disk.
|
||||
+ default: "10000"
|
||||
+ env variable: ETCD_SNAPSHOT_COUNT
|
||||
|
||||
### --heartbeat-interval
|
||||
+ Time (in milliseconds) of a heartbeat interval.
|
||||
+ default: "100"
|
||||
+ env variable: ETCD_HEARTBEAT_INTERVAL
|
||||
|
||||
### --election-timeout
|
||||
+ Time (in milliseconds) for an election to timeout. See [Documentation/tuning.md][tuning] for details.
|
||||
+ default: "1000"
|
||||
+ env variable: ETCD_ELECTION_TIMEOUT
|
||||
|
||||
### --listen-peer-urls
|
||||
+ List of URLs to listen on for peer traffic. This flag tells the etcd to accept incoming requests from its peers on the specified scheme://IP:port combinations. Scheme can be either http or https.If 0.0.0.0 is specified as the IP, etcd listens to the given port on all interfaces. If an IP address is given as well as a port, etcd will listen on the given port and interface. Multiple URLs may be used to specify a number of addresses and ports to listen on. The etcd will respond to requests from any of the listed addresses and ports.
|
||||
+ default: "http://localhost:2380"
|
||||
+ env variable: ETCD_LISTEN_PEER_URLS
|
||||
+ example: "http://10.0.0.1:2380"
|
||||
+ invalid example: "http://example.com:2380" (domain name is invalid for binding)
|
||||
|
||||
### --listen-client-urls
|
||||
+ List of URLs to listen on for client traffic. This flag tells the etcd to accept incoming requests from the clients on the specified scheme://IP:port combinations. Scheme can be either http or https. If 0.0.0.0 is specified as the IP, etcd listens to the given port on all interfaces. If an IP address is given as well as a port, etcd will listen on the given port and interface. Multiple URLs may be used to specify a number of addresses and ports to listen on. The etcd will respond to requests from any of the listed addresses and ports.
|
||||
+ default: "http://localhost:2379"
|
||||
+ env variable: ETCD_LISTEN_CLIENT_URLS
|
||||
+ example: "http://10.0.0.1:2379"
|
||||
+ invalid example: "http://example.com:2379" (domain name is invalid for binding)
|
||||
|
||||
### --max-snapshots
|
||||
+ Maximum number of snapshot files to retain (0 is unlimited)
|
||||
+ default: 5
|
||||
+ env variable: ETCD_MAX_SNAPSHOTS
|
||||
+ The default for users on Windows is unlimited, and manual purging down to 5 (or some preference for safety) is recommended.
|
||||
|
||||
### --max-wals
|
||||
+ Maximum number of wal files to retain (0 is unlimited)
|
||||
+ default: 5
|
||||
+ env variable: ETCD_MAX_WALS
|
||||
+ The default for users on Windows is unlimited, and manual purging down to 5 (or some preference for safety) is recommended.
|
||||
|
||||
### --cors
|
||||
+ Comma-separated white list of origins for CORS (cross-origin resource sharing).
|
||||
+ default: none
|
||||
+ env variable: ETCD_CORS
|
||||
|
||||
## Clustering flags
|
||||
|
||||
`--initial` prefix flags are used in bootstrapping ([static bootstrap][build-cluster], [discovery-service bootstrap][discovery] or [runtime reconfiguration][reconfig]) a new member, and ignored when restarting an existing member.
|
||||
|
||||
`--discovery` prefix flags need to be set when using [discovery service][discovery].
|
||||
|
||||
### --initial-advertise-peer-urls
|
||||
|
||||
+ List of this member's peer URLs to advertise to the rest of the cluster. These addresses are used for communicating etcd data around the cluster. At least one must be routable to all cluster members. These URLs can contain domain names.
|
||||
+ default: "http://localhost:2380"
|
||||
+ env variable: ETCD_INITIAL_ADVERTISE_PEER_URLS
|
||||
+ example: "http://example.com:2380, http://10.0.0.1:2380"
|
||||
|
||||
### --initial-cluster
|
||||
+ Initial cluster configuration for bootstrapping.
|
||||
+ default: "default=http://localhost:2380"
|
||||
+ env variable: ETCD_INITIAL_CLUSTER
|
||||
+ The key is the value of the `--name` flag for each node provided. The default uses `default` for the key because this is the default for the `--name` flag.
|
||||
|
||||
### --initial-cluster-state
|
||||
+ Initial cluster state ("new" or "existing"). Set to `new` for all members present during initial static or DNS bootstrapping. If this option is set to `existing`, etcd will attempt to join the existing cluster. If the wrong value is set, etcd will attempt to start but fail safely.
|
||||
+ default: "new"
|
||||
+ env variable: ETCD_INITIAL_CLUSTER_STATE
|
||||
|
||||
[static bootstrap]: clustering.md#static
|
||||
|
||||
### --initial-cluster-token
|
||||
+ Initial cluster token for the etcd cluster during bootstrap.
|
||||
+ default: "etcd-cluster"
|
||||
+ env variable: ETCD_INITIAL_CLUSTER_TOKEN
|
||||
|
||||
### --advertise-client-urls
|
||||
+ List of this member's client URLs to advertise to the rest of the cluster. These URLs can contain domain names.
|
||||
+ default: "http://localhost:2379"
|
||||
+ env variable: ETCD_ADVERTISE_CLIENT_URLS
|
||||
+ example: "http://example.com:2379, http://10.0.0.1:2379"
|
||||
+ Be careful if advertising URLs such as http://localhost:2379 from a cluster member and are using the proxy feature of etcd. This will cause loops, because the proxy will be forwarding requests to itself until its resources (memory, file descriptors) are eventually depleted.
|
||||
|
||||
### --discovery
|
||||
+ Discovery URL used to bootstrap the cluster.
|
||||
+ default: none
|
||||
+ env variable: ETCD_DISCOVERY
|
||||
|
||||
### --discovery-srv
|
||||
+ DNS srv domain used to bootstrap the cluster.
|
||||
+ default: none
|
||||
+ env variable: ETCD_DISCOVERY_SRV
|
||||
|
||||
### --discovery-fallback
|
||||
+ Expected behavior ("exit" or "proxy") when discovery services fails. "proxy" supports v2 API only.
|
||||
+ default: "proxy"
|
||||
+ env variable: ETCD_DISCOVERY_FALLBACK
|
||||
|
||||
### --discovery-proxy
|
||||
+ HTTP proxy to use for traffic to discovery service.
|
||||
+ default: none
|
||||
+ env variable: ETCD_DISCOVERY_PROXY
|
||||
|
||||
### --strict-reconfig-check
|
||||
+ Reject reconfiguration requests that would cause quorum loss.
|
||||
+ default: false
|
||||
+ env variable: ETCD_STRICT_RECONFIG_CHECK
|
||||
|
||||
### --auto-compaction-retention
|
||||
+ Auto compaction retention for mvcc key value store in hour. 0 means disable auto compaction.
|
||||
+ default: 0
|
||||
+ env variable: ETCD_AUTO_COMPACTION_RETENTION
|
||||
|
||||
## Proxy flags
|
||||
|
||||
`--proxy` prefix flags configures etcd to run in [proxy mode][proxy]. "proxy" supports v2 API only.
|
||||
|
||||
### --proxy
|
||||
+ Proxy mode setting ("off", "readonly" or "on").
|
||||
+ default: "off"
|
||||
+ env variable: ETCD_PROXY
|
||||
|
||||
### --proxy-failure-wait
|
||||
+ Time (in milliseconds) an endpoint will be held in a failed state before being reconsidered for proxied requests.
|
||||
+ default: 5000
|
||||
+ env variable: ETCD_PROXY_FAILURE_WAIT
|
||||
|
||||
### --proxy-refresh-interval
|
||||
+ Time (in milliseconds) of the endpoints refresh interval.
|
||||
+ default: 30000
|
||||
+ env variable: ETCD_PROXY_REFRESH_INTERVAL
|
||||
|
||||
### --proxy-dial-timeout
|
||||
+ Time (in milliseconds) for a dial to timeout or 0 to disable the timeout
|
||||
+ default: 1000
|
||||
+ env variable: ETCD_PROXY_DIAL_TIMEOUT
|
||||
|
||||
### --proxy-write-timeout
|
||||
+ Time (in milliseconds) for a write to timeout or 0 to disable the timeout.
|
||||
+ default: 5000
|
||||
+ env variable: ETCD_PROXY_WRITE_TIMEOUT
|
||||
|
||||
### --proxy-read-timeout
|
||||
+ Time (in milliseconds) for a read to timeout or 0 to disable the timeout.
|
||||
+ Don't change this value if using watches because use long polling requests.
|
||||
+ default: 0
|
||||
+ env variable: ETCD_PROXY_READ_TIMEOUT
|
||||
|
||||
## Security flags
|
||||
|
||||
The security flags help to [build a secure etcd cluster][security].
|
||||
|
||||
### --ca-file [DEPRECATED]
|
||||
+ Path to the client server TLS CA file. `--ca-file ca.crt` could be replaced by `--trusted-ca-file ca.crt --client-cert-auth` and etcd will perform the same.
|
||||
+ default: none
|
||||
+ env variable: ETCD_CA_FILE
|
||||
|
||||
### --cert-file
|
||||
+ Path to the client server TLS cert file.
|
||||
+ default: none
|
||||
+ env variable: ETCD_CERT_FILE
|
||||
|
||||
### --key-file
|
||||
+ Path to the client server TLS key file.
|
||||
+ default: none
|
||||
+ env variable: ETCD_KEY_FILE
|
||||
|
||||
### --client-cert-auth
|
||||
+ Enable client cert authentication.
|
||||
+ default: false
|
||||
+ env variable: ETCD_CLIENT_CERT_AUTH
|
||||
|
||||
### --trusted-ca-file
|
||||
+ Path to the client server TLS trusted CA key file.
|
||||
+ default: none
|
||||
+ env variable: ETCD_TRUSTED_CA_FILE
|
||||
|
||||
### --auto-tls
|
||||
+ Client TLS using generated certificates
|
||||
+ default: false
|
||||
+ env variable: ETCD_AUTO_TLS
|
||||
|
||||
### --peer-ca-file [DEPRECATED]
|
||||
+ Path to the peer server TLS CA file. `--peer-ca-file ca.crt` could be replaced by `--peer-trusted-ca-file ca.crt --peer-client-cert-auth` and etcd will perform the same.
|
||||
+ default: none
|
||||
+ env variable: ETCD_PEER_CA_FILE
|
||||
|
||||
### --peer-cert-file
|
||||
+ Path to the peer server TLS cert file.
|
||||
+ default: none
|
||||
+ env variable: ETCD_PEER_CERT_FILE
|
||||
|
||||
### --peer-key-file
|
||||
+ Path to the peer server TLS key file.
|
||||
+ default: none
|
||||
+ env variable: ETCD_PEER_KEY_FILE
|
||||
|
||||
### --peer-client-cert-auth
|
||||
+ Enable peer client cert authentication.
|
||||
+ default: false
|
||||
+ env variable: ETCD_PEER_CLIENT_CERT_AUTH
|
||||
|
||||
### --peer-trusted-ca-file
|
||||
+ Path to the peer server TLS trusted CA file.
|
||||
+ default: none
|
||||
+ env variable: ETCD_PEER_TRUSTED_CA_FILE
|
||||
|
||||
### --peer-auto-tls
|
||||
+ Peer TLS using generated certificates
|
||||
+ default: false
|
||||
+ env variable: ETCD_PEER_AUTO_TLS
|
||||
|
||||
## Logging flags
|
||||
|
||||
### --debug
|
||||
+ Drop the default log level to DEBUG for all subpackages.
|
||||
+ default: false (INFO for all packages)
|
||||
+ env variable: ETCD_DEBUG
|
||||
|
||||
### --log-package-levels
|
||||
+ Set individual etcd subpackages to specific log levels. An example being `etcdserver=WARNING,security=DEBUG`
|
||||
+ default: none (INFO for all packages)
|
||||
+ env variable: ETCD_LOG_PACKAGE_LEVELS
|
||||
|
||||
|
||||
## Unsafe flags
|
||||
|
||||
Please be CAUTIOUS when using unsafe flags because it will break the guarantees given by the consensus protocol.
|
||||
For example, it may panic if other members in the cluster are still alive.
|
||||
Follow the instructions when using these flags.
|
||||
|
||||
### --force-new-cluster
|
||||
+ Force to create a new one-member cluster. It commits configuration changes forcing to remove all existing members in the cluster and add itself. It needs to be set to [restore a backup][restore].
|
||||
+ default: false
|
||||
+ env variable: ETCD_FORCE_NEW_CLUSTER
|
||||
|
||||
## Miscellaneous flags
|
||||
|
||||
### --version
|
||||
+ Print the version and exit.
|
||||
+ default: false
|
||||
|
||||
### --config-file
|
||||
+ Load server configuration from a file.
|
||||
+ default: none
|
||||
|
||||
## Profiling flags
|
||||
|
||||
### --enable-pprof
|
||||
+ Enable runtime profiling data via HTTP server. Address is at client URL + "/debug/pprof"
|
||||
+ default: false
|
||||
|
||||
[build-cluster]: clustering.md#static
|
||||
[reconfig]: runtime-configuration.md
|
||||
[discovery]: clustering.md#discovery
|
||||
[iana-ports]: https://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.xhtml?search=etcd
|
||||
[proxy]: ../v2/proxy.md
|
||||
[restore]: ../v2/admin_guide.md#restoring-a-backup
|
||||
[security]: security.md
|
||||
[systemd-intro]: http://freedesktop.org/wiki/Software/systemd/
|
||||
[tuning]: ../tuning.md#time-parameters
|
61
Documentation/op-guide/container.md
Normal file
61
Documentation/op-guide/container.md
Normal file
@ -0,0 +1,61 @@
|
||||
# Run etcd clusters inside containers
|
||||
|
||||
The following guide shows how to run etcd with rkt and Docker using the [static bootstrap process](clustering.md#static).
|
||||
|
||||
## Docker
|
||||
|
||||
In order to expose the etcd API to clients outside of Docker host, use the host IP address of the container. Please see [`docker inspect`](https://docs.docker.com/engine/reference/commandline/inspect) for more detail on how to get the IP address. Alternatively, specify `--net=host` flag to `docker run` command to skip placing the container inside of a separate network stack.
|
||||
|
||||
```
|
||||
# For each machine
|
||||
ETCD_VERSION=v3.0.0
|
||||
TOKEN=my-etcd-token
|
||||
CLUSTER_STATE=new
|
||||
NAME_1=etcd-node-0
|
||||
NAME_2=etcd-node-1
|
||||
NAME_3=etcd-node-2
|
||||
HOST_1=10.20.30.1
|
||||
HOST_2=10.20.30.2
|
||||
HOST_3=10.20.30.3
|
||||
CLUSTER=${NAME_1}=http://${HOST_1}:2380,${NAME_2}=http://${HOST_2}:2380,${NAME_3}=http://${HOST_3}:2380
|
||||
|
||||
# For node 1
|
||||
THIS_NAME=${NAME_1}
|
||||
THIS_IP=${HOST_1}
|
||||
sudo docker run --net=host --name etcd quay.io/coreos/etcd:${ETCD_VERSION} \
|
||||
/usr/local/bin/etcd \
|
||||
--data-dir=data.etcd --name ${THIS_NAME} \
|
||||
--initial-advertise-peer-urls http://${THIS_IP}:2380 --listen-peer-urls http://${THIS_IP}:2380 \
|
||||
--advertise-client-urls http://${THIS_IP}:2379 --listen-client-urls http://${THIS_IP}:2379 \
|
||||
--initial-cluster ${CLUSTER} \
|
||||
--initial-cluster-state ${CLUSTER_STATE} --initial-cluster-token ${TOKEN}
|
||||
|
||||
# For node 2
|
||||
THIS_NAME=${NAME_2}
|
||||
THIS_IP=${HOST_2}
|
||||
sudo docker run --net=host --name etcd quay.io/coreos/etcd:${ETCD_VERSION} \
|
||||
/usr/local/bin/etcd \
|
||||
--data-dir=data.etcd --name ${THIS_NAME} \
|
||||
--initial-advertise-peer-urls http://${THIS_IP}:2380 --listen-peer-urls http://${THIS_IP}:2380 \
|
||||
--advertise-client-urls http://${THIS_IP}:2379 --listen-client-urls http://${THIS_IP}:2379 \
|
||||
--initial-cluster ${CLUSTER} \
|
||||
--initial-cluster-state ${CLUSTER_STATE} --initial-cluster-token ${TOKEN}
|
||||
|
||||
# For node 3
|
||||
THIS_NAME=${NAME_3}
|
||||
THIS_IP=${HOST_3}
|
||||
sudo docker run --net=host --name etcd quay.io/coreos/etcd:${ETCD_VERSION} \
|
||||
/usr/local/bin/etcd \
|
||||
--data-dir=data.etcd --name ${THIS_NAME} \
|
||||
--initial-advertise-peer-urls http://${THIS_IP}:2380 --listen-peer-urls http://${THIS_IP}:2380 \
|
||||
--advertise-client-urls http://${THIS_IP}:2379 --listen-client-urls http://${THIS_IP}:2379 \
|
||||
--initial-cluster ${CLUSTER} \
|
||||
--initial-cluster-state ${CLUSTER_STATE} --initial-cluster-token ${TOKEN}
|
||||
```
|
||||
|
||||
To run `etcdctl` using API version 3:
|
||||
|
||||
```
|
||||
docker exec etcd /bin/sh -c "export ETCDCTL_API=3 && /usr/local/bin/etcdctl put foo bar"
|
||||
```
|
||||
|
44
Documentation/op-guide/failures.md
Normal file
44
Documentation/op-guide/failures.md
Normal file
@ -0,0 +1,44 @@
|
||||
# Understand failures
|
||||
|
||||
Failures are common in a large deployment of machines. A machine fails when its hardware or software malfunctions. Multiple machines fail together when there are power failures or network issues. Multiple kinds of failures can also happen at once; it is almost impossible to enumerate all possible failure cases.
|
||||
|
||||
In this section, we catalog kinds of failures and discuss how etcd is designed to tolerate these failures. Most users, if not all, can map a particular failure into one kind of failure. To prepare for rare or [unrecoverable failures][unrecoverable], always [back up][backup] the etcd cluster.
|
||||
|
||||
## Minor followers failure
|
||||
|
||||
When fewer than half of the followers fail, the etcd cluster can still accept requests and make progress without any major disruption. For example, two follower failures will not affect a five member etcd cluster’s operation. However, clients will lose connectivity to the failed members. Client libraries should hide these interruptions from users for read requests by automatically reconnecting to other members. Operators should expect the system load on the other members to increase due to the reconnections.
|
||||
|
||||
## Leader failure
|
||||
|
||||
When a leader fails, the etcd cluster automatically elects a new leader. The election does not happen instantly once the leader fails. It takes about an election timeout to elect a new leader since the failure detection model is timeout based.
|
||||
|
||||
During the leader election the cluster cannot process any writes. Write requests sent during the election are queued for processing until a new leader is elected.
|
||||
|
||||
Writes already sent to the old leader but not yet committed may be lost. The new leader has the power to rewrite any uncommitted entries from the previous leader. From the user perspective, some write requests might time out after a new leader election. However, no committed writes are ever lost.
|
||||
|
||||
The new leader extends timeouts automatically for all leases. This mechanism ensures a lease will not expire before the granted TTL even if it was granted by the old leader.
|
||||
|
||||
## Majority failure
|
||||
|
||||
When the majority members of the cluster fail, the etcd cluster fails and cannot accept more writes.
|
||||
|
||||
The etcd cluster can only recover from a majority failure once the majority of members become available. If a majority of members cannot come back online, then the operator must start [disaster recovery][unrecoverable] to recover the cluster.
|
||||
|
||||
Once a majority of members works, the etcd cluster elects a new leader automatically and returns to a healthy state. The new leader extends timeouts automatically for all leases. This mechanism ensures no lease expires due to server side unavailability.
|
||||
|
||||
## Network partition
|
||||
|
||||
A network partition is similar to a minor followers failure or a leader failure. A network partition divides the etcd cluster into two parts; one with a member majority and the other with a member minority. The majority side becomes the available cluster and the minority side is unavailable; there is no “split-brain” in etcd.
|
||||
|
||||
If the leader is on the majority side, then from the majority point of view the failure is a minority follower failure. If the leader is on the minority side, then it is a leader failure. The leader on the minority side steps down and the majority side elects a new leader.
|
||||
|
||||
Once the network partition clears, the minority side automatically recognizes the leader from the majority side and recovers its state.
|
||||
|
||||
## Failure during bootstrapping
|
||||
|
||||
A cluster bootstrap is only successful if all required members successfully start. If any failure happens during bootstrapping, remove the data directories on all members and re-bootstrap the cluster with a new cluster-token or new discovery token.
|
||||
|
||||
Of course, it is possible to recover a failed bootstrapped cluster like recovering a running cluster. However, it almost always takes more time and resources to recover that cluster than bootstrapping a new one, since there is no data to recover.
|
||||
|
||||
[backup]: maintenance.md#snapshot-backup
|
||||
[unrecoverable]: recovery.md#disaster-recovery
|
115
Documentation/op-guide/maintenance.md
Normal file
115
Documentation/op-guide/maintenance.md
Normal file
@ -0,0 +1,115 @@
|
||||
# Maintenance
|
||||
|
||||
## Overview
|
||||
|
||||
An etcd cluster needs periodic maintenance to remain reliable. Depending on an etcd application's needs, this maintenance can usually be automated and performed without downtime or significantly degraded performance.
|
||||
|
||||
All etcd maintenance manages storage resources consumed by the etcd keyspace. Failure to adequately control the keyspace size is guarded by storage space quotas; if an etcd member runs low on space, a quota will trigger cluster-wide alarms which will put the system into a limited-operation maintenance mode. To avoid running out of space for writes to the keyspace, the etcd keyspace history must be compacted. Storage space itself may be reclaimed by defragmenting etcd members. Finally, periodic snapshot backups of etcd member state makes it possible to recover any unintended logical data loss or corruption caused by operational error.
|
||||
|
||||
## History compaction
|
||||
|
||||
Since etcd keeps an exact history of its keyspace, this history should be periodically compacted to avoid performance degradation and eventual storage space exhaustion. Compacting the keyspace history drops all information about keys superseded prior to a given keyspace revision. The space used by these keys then becomes available for additional writes to the keyspace.
|
||||
|
||||
The keyspace can be compacted automatically with `etcd`'s time windowed history retention policy, or manually with `etcdctl`. The `etcdctl` method provides fine-grained control over the compacting process whereas automatic compacting fits applications that only need key history for some length of time.
|
||||
|
||||
`etcd` can be set to automatically compact the keyspace with the `--auto-compaction` option with a period of hours:
|
||||
|
||||
```sh
|
||||
# keep one hour of history
|
||||
$ etcd --auto-compaction-retention=1
|
||||
```
|
||||
|
||||
An `etcdctl` initiated compaction works as follows:
|
||||
|
||||
```sh
|
||||
# compact up to revision 3
|
||||
$ etcdctl compact 3
|
||||
|
||||
```
|
||||
|
||||
Revisions prior to the compaction revision become inaccessible:
|
||||
|
||||
```sh
|
||||
$ etcdctl get --rev=2 somekey
|
||||
Error: rpc error: code = 11 desc = etcdserver: mvcc: required revision has been compacted
|
||||
```
|
||||
|
||||
## Defragmentation
|
||||
|
||||
After compacting the keyspace, the backend database may exhibit internal fragmentation. Any internal fragmentation is space that is free to use by the backend but still consumes storage space. The process of defragmentation releases this storage space back to the file system. Defragmentation is issued on a per-member so that cluster-wide latency spikes may be avoided.
|
||||
|
||||
Compacting old revisions internally fragments `etcd` by leaving gaps in backend database. Fragmented space is available for use by `etcd` but unavailable to the host filesystem.
|
||||
|
||||
To defragment an etcd member, use the `etcdctl defrag` command:
|
||||
|
||||
```sh
|
||||
$ etcdctl defrag
|
||||
Finished defragmenting etcd member[127.0.0.1:2379]
|
||||
```
|
||||
|
||||
## Space quota
|
||||
|
||||
The space quota in `etcd` ensures the cluster operates in a reliable fashion. Without a space quota, `etcd` may suffer from poor performance if the keyspace grows excessively large, or it may simply run out of storage space, leading to unpredictable cluster behavior. If the keyspace's backend database for any member exceeds the space quota, `etcd` raises a cluster-wide alarm that puts the cluster into a maintenance mode which only accepts key reads and deletes. After freeing enough space in the keyspace, the alarm can be disarmed and the cluster will resume normal operation.
|
||||
|
||||
By default, `etcd` sets a conservative space quota suitable for most applications, but it may be configured on the command line, in bytes:
|
||||
|
||||
```sh
|
||||
# set a very small 16MB quota
|
||||
$ etcd --quota-backend-bytes=16777216
|
||||
```
|
||||
|
||||
The space quota can be triggered with a loop:
|
||||
|
||||
```sh
|
||||
# fill keyspace
|
||||
$ while [ 1 ]; do dd if=/dev/urandom bs=1024 count=1024 | etcdctl put key || break; done
|
||||
...
|
||||
Error: rpc error: code = 8 desc = etcdserver: mvcc: database space exceeded
|
||||
# confirm quota space is exceeded
|
||||
$ etcdctl --write-out=table endpoint status
|
||||
+----------------+------------------+-----------+---------+-----------+-----------+------------+
|
||||
| ENDPOINT | ID | VERSION | DB SIZE | IS LEADER | RAFT TERM | RAFT INDEX |
|
||||
+----------------+------------------+-----------+---------+-----------+-----------+------------+
|
||||
| 127.0.0.1:2379 | bf9071f4639c75cc | 2.3.0+git | 18 MB | true | 2 | 3332 |
|
||||
+----------------+------------------+-----------+---------+-----------+-----------+------------+
|
||||
# confirm alarm is raised
|
||||
$ etcdctl alarm list
|
||||
memberID:13803658152347727308 alarm:NOSPACE
|
||||
```
|
||||
|
||||
Removing excessive keyspace data will put the cluster back within the quota limits so the alarm can be disarmed:
|
||||
|
||||
```sh
|
||||
# get current revision
|
||||
$ etcdctl --endpoints=:2379 endpoint status
|
||||
[{"Endpoint":"127.0.0.1:2379","Status":{"header":{"cluster_id":8925027824743593106,"member_id":13803658152347727308,"revision":1516,"raft_term":2},"version":"2.3.0+git","dbSize":17973248,"leader":13803658152347727308,"raftIndex":6359,"raftTerm":2}}]
|
||||
# compact away all old revisions
|
||||
$ etdctl compact 1516
|
||||
compacted revision 1516
|
||||
# defragment away excessive space
|
||||
$ etcdctl defrag
|
||||
Finished defragmenting etcd member[127.0.0.1:2379]
|
||||
# disarm alarm
|
||||
$ etcdctl alarm disarm
|
||||
memberID:13803658152347727308 alarm:NOSPACE
|
||||
# test puts are allowed again
|
||||
$ etdctl put newkey 123
|
||||
OK
|
||||
```
|
||||
|
||||
## Snapshot backup
|
||||
|
||||
Snapshotting the `etcd` cluster on a regular basis serves as a durable backup for an etcd keyspace. By taking periodic snapshots of an etcd member's backend database, an `etcd` cluster can be recovered to a point in time with a known good state.
|
||||
|
||||
A snapshot is taken with `etcdctl`:
|
||||
|
||||
```sh
|
||||
$ etcdctl snapshot save backup.db
|
||||
$ etcdctl --write-out=table snapshot status backup.db
|
||||
+----------+----------+------------+------------+
|
||||
| HASH | REVISION | TOTAL KEYS | TOTAL SIZE |
|
||||
+----------+----------+------------+------------+
|
||||
| fe01cf57 | 10 | 7 | 2.1 MB |
|
||||
+----------+----------+------------+------------+
|
||||
|
||||
```
|
74
Documentation/op-guide/performance.md
Normal file
74
Documentation/op-guide/performance.md
Normal file
@ -0,0 +1,74 @@
|
||||
# Performance
|
||||
|
||||
## Understanding performance
|
||||
|
||||
etcd provides stable, sustained high performance. Two factors define performance: latency and throughput. Latency is the time taken to complete an operation. Throughput is the total operations completed within some time period. Usually average latency increases as the overall throughput increases when etcd accepts concurrent client requests. In common cloud environments, like a standard `n-4` on Google Compute Engine (GCE) or a comparable machine type on AWS, a three member etcd cluster finishes a request in less than one millisecond under light load, and can complete more than 30,000 requests per second under heavy load.
|
||||
|
||||
etcd uses the Raft consensus algorithm to replicate requests among members and reach agreement. Consensus performance, especially commit latency, is limited by two physical constraints: network IO latency and disk IO latency. The minimum time to finish an etcd request is the network Round Trip Time (RTT) between members, plus the time `fdatasync` requires to commit the data to permanant storage. The RTT within a datacenter may be as long as several hundred microseconds. A typical RTT within the United States is around 50ms, and can be as slow as 400ms between continents. The typical fdatasync latency for a spinning disk is about 10ms. For SSDs, the latency is often lower than 1ms. To increase throughput, etcd batches multiple requests together and submits them to Raft. This batching policy lets etcd attain high throughput despite heavy load.
|
||||
|
||||
There are other sub-systems which impact the overall performance of etcd. Each serialized etcd request must run through etcd’s boltdb-backed MVCC storage engine, which usually takes tens of microseconds to finish. Periodically etcd incrementally snapshots its recently applied requests, merging them back with the previous on-disk snapshot. This process may lead to a latency spike. Although this is usually not a problem on SSDs, it may double the observed latency on HDD. Likewise, inflight compactions can impact etcd’s performance. Fortunately, the impact is often insignificant since the compaction is staggered so it does not compete for resources with regular requests. The RPC system, gRPC, gives etcd a well-defined, extensible API, but it also introduces additional latency, especially for local reads.
|
||||
|
||||
## Benchmarks
|
||||
|
||||
Benchmarking etcd performance can be done with the [benchmark](https://github.com/coreos/etcd/tree/master/tools/benchmark) CLI tool included with etcd.
|
||||
|
||||
For some baseline performance numbers, we consider a three member etcd cluster with the following hardware configuration:
|
||||
|
||||
- Google Cloud Compute Engine
|
||||
- 3 machines of 8 vCPUs + 16GB Memory + 50GB SSD
|
||||
- 1 machine(client) of 16 vCPUs + 30GB Memory + 50GB SSD
|
||||
- Ubuntu 15.10
|
||||
- etcd v3 master branch (commit SHA d8f325d), Go 1.6.2
|
||||
|
||||
With this configuration, etcd can approximately write:
|
||||
|
||||
| Number of keys | Key size in bytes | Value size in bytes | Number of connections | Number of clients | Target etcd server | Average write QPS | Average latency per request | Memory |
|
||||
|----------------|-------------------|---------------------|-----------------------|-------------------|--------------------|-------------------|-----------------------------|--------|
|
||||
| 10,000 | 8 | 256 | 1 | 1 | leader only | 525 | 2ms | 35 MB |
|
||||
| 100,000 | 8 | 256 | 100 | 1000 | leader only | 25,000 | 30ms | 35 MB |
|
||||
| 100,000 | 8 | 256 | 100 | 1000 | all members | 33,000 | 25ms | 35 MB |
|
||||
|
||||
Sample commands are:
|
||||
|
||||
```
|
||||
# assuming IP_1 is leader, write requests to the leader
|
||||
benchmark --endpoints={IP_1} --conns=1 --clients=1 \
|
||||
put --key-size=8 --sequential-keys --total=10000 --val-size=256
|
||||
benchmark --endpoints={IP_1} --conns=100 --clients=1000 \
|
||||
put --key-size=8 --sequential-keys --total=100000 --val-size=256
|
||||
|
||||
# write to all members
|
||||
benchmark --endpoints={IP_1},{IP_2},{IP_3} --conns=100 --clients=1000 \
|
||||
put --key-size=8 --sequential-keys --total=100000 --val-size=256
|
||||
```
|
||||
|
||||
Linearizable read requests go through a quorum of cluster members for consensus to fetch the most recent data. Serializable read requests are cheaper than linearizable reads since they are served by any single etcd member, instead of a quorum of members, in exchange for possibly serving stale data. etcd can read:
|
||||
|
||||
| Number of requests | Key size in bytes | Value size in bytes | Number of connections | Number of clients | Consistency | Average latency per request | Average read QPS |
|
||||
|--------------------|-------------------|---------------------|-----------------------|-------------------|-------------|-----------------------------|------------------|
|
||||
| 10,000 | 8 | 256 | 1 | 1 | Linearizable | 2ms | 560 |
|
||||
| 10,000 | 8 | 256 | 1 | 1 | Serializable | 0.4ms | 7,500 |
|
||||
| 100,000 | 8 | 256 | 100 | 1000 | Linearizable | 15ms | 43,000 |
|
||||
| 100,000 | 8 | 256 | 100 | 1000 | Serializable | 9ms | 93,000 |
|
||||
|
||||
Sample commands are:
|
||||
|
||||
```
|
||||
# Linearizable read requests
|
||||
benchmark --endpoints={IP_1},{IP_2},{IP_3} --conns=1 --clients=1 \
|
||||
range YOUR_KEY --consistency=l --total=10000
|
||||
benchmark --endpoints={IP_1},{IP_2},{IP_3} --conns=100 --clients=1000 \
|
||||
range YOUR_KEY --consistency=l --total=100000
|
||||
|
||||
# Serializable read requests for each member and sum up the numbers
|
||||
for endpoint in {IP_1} {IP_2} {IP_3}; do
|
||||
benchmark --endpoints=$endpoint --conns=1 --clients=1 \
|
||||
range YOUR_KEY --consistency=s --total=10000
|
||||
done
|
||||
for endpoint in {IP_1} {IP_2} {IP_3}; do
|
||||
benchmark --endpoints=$endpoint --conns=100 --clients=1000 \
|
||||
range YOUR_KEY --consistency=s --total=100000
|
||||
done
|
||||
```
|
||||
|
||||
We encourage running the benchmark test when setting up an etcd cluster for the first time in a new environment to ensure the cluster achieves adequate performance; cluster latency and throughput can be sensitive to minor environment differences.
|
63
Documentation/op-guide/recovery.md
Normal file
63
Documentation/op-guide/recovery.md
Normal file
@ -0,0 +1,63 @@
|
||||
## Disaster recovery
|
||||
|
||||
etcd is designed to withstand machine failures. An etcd cluster automatically recovers from temporary failures (e.g., machine reboots) and tolerates up to *(N-1)/2* permanent failures for a cluster of N members. When a member permanently fails, whether due to hardware failure or disk corruption, it loses access to the cluster. If the cluster permanently loses more than *(N-1)/2* members then it disastrously fails, irrevocably losing quorum. Once quorum is lost, the cluster cannot reach consensus and therefore cannot continue accepting updates.
|
||||
|
||||
To recover from disastrous failure, etcd v3 provides snapshot and restore facilities to recreate the cluster without v3 key data loss. To recover v2 keys, refer to the [v2 admin guide][v2_recover].
|
||||
|
||||
[v2_recover]: ../v2/admin_guide.md#disaster-recovery
|
||||
|
||||
### Snapshotting the keyspace
|
||||
|
||||
Recovering a cluster first needs a snapshot of the keyspace from an etcd member. A snapshot may either be taken from a live member with the `etcdctl snapshot save` command or by copying the `member/snap/db` file from an etcd data directory. For example, the following command snapshots the keyspace served by `$ENDPOINT` to the file `snapshot.db`:
|
||||
|
||||
```sh
|
||||
$ etcdctl --endpoints $ENDPOINT snapshot save snapshot.db
|
||||
```
|
||||
|
||||
### Restoring a cluster
|
||||
|
||||
To restore a cluster, all that is needed is a single snapshot "db" file. A cluster restore with `etcdctl snapshot restore` creates new etcd data directories; all members should restore using the same snapshot. Restoring overwrites some snapshot metadata (specifically, the member ID and cluster ID); the member loses its former identity. This metadata overwrite prevents the new member from inadvertently joining an existing cluster. Therefore in order to start a cluster from a snapshot, the restore must start a new logical cluster.
|
||||
|
||||
Snapshot integrity may be optionally verified at restore time. If the snapshot is taken with `etcdctl snapshot save`, it will have an integrity hash that is checked by `etcdctl snapshot restore`. If the snapshot is copied from the data directory, there is no integrity hash and it will only restore by using `--skip-hash-check`.
|
||||
|
||||
A restore initializes a new member of a new cluster, with a fresh cluster configuration using `etcd`'s cluster configuration flags, but preserves the contents of the etcd keyspace. Continuing from the previous example, the following creates new etcd data directories (`m1.etcd`, `m2.etcd`, `m3.etcd`) for a three member cluster:
|
||||
|
||||
```sh
|
||||
$ etcdctl snapshot restore snapshot.db \
|
||||
--name m1 \
|
||||
--initial-cluster m1=http:/host1:2380,m2=http://host2:2380,m3=http://host3:2380 \
|
||||
--initial-cluster-token etcd-cluster-1 \
|
||||
--initial-advertise-peer-urls http://host1:2380
|
||||
$ etcdctl snapshot restore snapshot.db \
|
||||
--name m2 \
|
||||
--initial-cluster m1=http:/host1:2380,m2=http://host2:2380,m3=http://host3:2380 \
|
||||
--initial-cluster-token etcd-cluster-1 \
|
||||
--initial-advertise-peer-urls http://host2:2380
|
||||
$ etcdctl snapshot restore snapshot.db \
|
||||
--name m3 \
|
||||
--initial-cluster m1=http:/host1:2380,m2=http://host2:2380,m3=http://host3:2380 \
|
||||
--initial-cluster-token etcd-cluster-1 \
|
||||
--initial-advertise-peer-urls http://host3:2380
|
||||
```
|
||||
|
||||
Next, start `etcd` with the new data directories:
|
||||
|
||||
```sh
|
||||
$ etcd \
|
||||
--name m1 \
|
||||
--listen-client-urls http://host1:2379 \
|
||||
--advertise-client-urls http://host1:2379 \
|
||||
--listen-peer-urls http://host1:2380 &
|
||||
$ etcd \
|
||||
--name m2 \
|
||||
--listen-client-urls http://host2:2379 \
|
||||
--advertise-client-urls http://host2:2379 \
|
||||
--listen-peer-urls http://host2:2380 &
|
||||
$ etcd \
|
||||
--name m3 \
|
||||
--listen-client-urls http://host3:2379 \
|
||||
--advertise-client-urls http://host3:2379 \
|
||||
--listen-peer-urls http://host3:2380 &
|
||||
```
|
||||
|
||||
Now the restored etcd cluster should be available and serving the keyspace given by the snapshot.
|
185
Documentation/op-guide/runtime-configuration.md
Normal file
185
Documentation/op-guide/runtime-configuration.md
Normal file
@ -0,0 +1,185 @@
|
||||
# Runtime reconfiguration
|
||||
|
||||
etcd comes with support for incremental runtime reconfiguration, which allows users to update the membership of the cluster at run time.
|
||||
|
||||
Reconfiguration requests can only be processed when the majority of the cluster members are functioning. It is **highly recommended** to always have a cluster size greater than two in production. It is unsafe to remove a member from a two member cluster. The majority of a two member cluster is also two. If there is a failure during the removal process, the cluster might not able to make progress and need to [restart from majority failure][majority failure].
|
||||
|
||||
To better understand the design behind runtime reconfiguration, we suggest reading [the runtime reconfiguration document][runtime-reconf].
|
||||
|
||||
## Reconfiguration use cases
|
||||
|
||||
Let's walk through some common reasons for reconfiguring a cluster. Most of these just involve combinations of adding or removing a member, which are explained below under [Cluster Reconfiguration Operations][cluster-reconf].
|
||||
|
||||
### Cycle or upgrade multiple machines
|
||||
|
||||
If multiple cluster members need to move due to planned maintenance (hardware upgrades, network downtime, etc.), it is recommended to modify members one at a time.
|
||||
|
||||
It is safe to remove the leader, however there is a brief period of downtime while the election process takes place. If the cluster holds more than 50MB, it is recommended to [migrate the member's data directory][member migration].
|
||||
|
||||
### Change the cluster size
|
||||
|
||||
Increasing the cluster size can enhance [failure tolerance][fault tolerance table] and provide better read performance. Since clients can read from any member, increasing the number of members increases the overall read throughput.
|
||||
|
||||
Decreasing the cluster size can improve the write performance of a cluster, with a trade-off of decreased resilience. Writes into the cluster are replicated to a majority of members of the cluster before considered committed. Decreasing the cluster size lowers the majority, and each write is committed more quickly.
|
||||
|
||||
### Replace a failed machine
|
||||
|
||||
If a machine fails due to hardware failure, data directory corruption, or some other fatal situation, it should be replaced as soon as possible. Machines that have failed but haven't been removed adversely affect the quorum and reduce the tolerance for an additional failure.
|
||||
|
||||
To replace the machine, follow the instructions for [removing the member][remove member] from the cluster, and then [add a new member][add member] in its place. If the cluster holds more than 50MB, it is recommended to [migrate the failed member's data directory][member migration] if it is still accessible.
|
||||
|
||||
### Restart cluster from majority failure
|
||||
|
||||
If the majority of the cluster is lost or all of the nodes have changed IP addresses, then manual action is necessary to recover safely.
|
||||
The basic steps in the recovery process include [creating a new cluster using the old data][disaster recovery], forcing a single member to act as the leader, and finally using runtime configuration to [add new members][add member] to this new cluster one at a time.
|
||||
|
||||
## Cluster reconfiguration operations
|
||||
|
||||
Now that we have the use cases in mind, let us lay out the operations involved in each.
|
||||
|
||||
Before making any change, the simple majority (quorum) of etcd members must be available.
|
||||
This is essentially the same requirement as for any other write to etcd.
|
||||
|
||||
All changes to the cluster are done one at a time:
|
||||
|
||||
* To update a single member peerURLs, make an update operation
|
||||
* To replace a single member, make an add then a remove operation
|
||||
* To increase from 3 to 5 members, make two add operations
|
||||
* To decrease from 5 to 3, make two remove operations
|
||||
|
||||
All of these examples will use the `etcdctl` command line tool that ships with etcd.
|
||||
To change membership without `etcdctl`, use the [v2 HTTP members API][member-api] or the [v3 gRPC members API][member-api-grpc].
|
||||
|
||||
### Update a member
|
||||
|
||||
#### Update advertise client URLs
|
||||
|
||||
To update the advertise client URLs of a member, simply restart
|
||||
that member with updated client urls flag (`--advertise-client-urls`) or environment variable
|
||||
(`ETCD_ADVERTISE_CLIENT_URLS`). The restarted member will self publish the updated URLs.
|
||||
A wrongly updated client URL will not affect the health of the etcd cluster.
|
||||
|
||||
#### Update advertise peer URLs
|
||||
|
||||
To update the advertise peer URLs of a member, first update
|
||||
it explicitly via member command and then restart the member. The additional action is required
|
||||
since updating peer URLs changes the cluster wide configuration and can affect the health of the etcd cluster.
|
||||
|
||||
To update the peer URLs, first, we need to find the target member's ID. To list all members with `etcdctl`:
|
||||
|
||||
```sh
|
||||
$ etcdctl member list
|
||||
6e3bd23ae5f1eae0: name=node2 peerURLs=http://localhost:23802 clientURLs=http://127.0.0.1:23792
|
||||
924e2e83e93f2560: name=node3 peerURLs=http://localhost:23803 clientURLs=http://127.0.0.1:23793
|
||||
a8266ecf031671f3: name=node1 peerURLs=http://localhost:23801 clientURLs=http://127.0.0.1:23791
|
||||
```
|
||||
|
||||
In this example let's `update` a8266ecf031671f3 member ID and change its peerURLs value to http://10.0.1.10:2380
|
||||
|
||||
```sh
|
||||
$ etcdctl member update a8266ecf031671f3 http://10.0.1.10:2380
|
||||
Updated member with ID a8266ecf031671f3 in cluster
|
||||
```
|
||||
|
||||
### Remove a member
|
||||
|
||||
Let us say the member ID we want to remove is a8266ecf031671f3.
|
||||
We then use the `remove` command to perform the removal:
|
||||
|
||||
```sh
|
||||
$ etcdctl member remove a8266ecf031671f3
|
||||
Removed member a8266ecf031671f3 from cluster
|
||||
```
|
||||
|
||||
The target member will stop itself at this point and print out the removal in the log:
|
||||
|
||||
```
|
||||
etcd: this member has been permanently removed from the cluster. Exiting.
|
||||
```
|
||||
|
||||
It is safe to remove the leader, however the cluster will be inactive while a new leader is elected. This duration is normally the period of election timeout plus the voting process.
|
||||
|
||||
### Add a new member
|
||||
|
||||
Adding a member is a two step process:
|
||||
|
||||
* Add the new member to the cluster via the [HTTP members API][member-api], the [gRPC members API][member-api-grpc], or the `etcdctl member add` command.
|
||||
* Start the new member with the new cluster configuration, including a list of the updated members (existing members + the new member).
|
||||
|
||||
Using `etcdctl` let's add the new member to the cluster by specifying its [name][conf-name] and [advertised peer URLs][conf-adv-peer]:
|
||||
|
||||
```sh
|
||||
$ etcdctl member add infra3 http://10.0.1.13:2380
|
||||
added member 9bf1b35fc7761a23 to cluster
|
||||
|
||||
ETCD_NAME="infra3"
|
||||
ETCD_INITIAL_CLUSTER="infra0=http://10.0.1.10:2380,infra1=http://10.0.1.11:2380,infra2=http://10.0.1.12:2380,infra3=http://10.0.1.13:2380"
|
||||
ETCD_INITIAL_CLUSTER_STATE=existing
|
||||
```
|
||||
|
||||
`etcdctl` has informed the cluster about the new member and printed out the environment variables needed to successfully start it.
|
||||
Now start the new etcd process with the relevant flags for the new member:
|
||||
|
||||
```sh
|
||||
$ export ETCD_NAME="infra3"
|
||||
$ export ETCD_INITIAL_CLUSTER="infra0=http://10.0.1.10:2380,infra1=http://10.0.1.11:2380,infra2=http://10.0.1.12:2380,infra3=http://10.0.1.13:2380"
|
||||
$ export ETCD_INITIAL_CLUSTER_STATE=existing
|
||||
$ etcd --listen-client-urls http://10.0.1.13:2379 --advertise-client-urls http://10.0.1.13:2379 --listen-peer-urls http://10.0.1.13:2380 --initial-advertise-peer-urls http://10.0.1.13:2380 --data-dir %data_dir%
|
||||
```
|
||||
|
||||
The new member will run as a part of the cluster and immediately begin catching up with the rest of the cluster.
|
||||
|
||||
If adding multiple members the best practice is to configure a single member at a time and verify it starts correctly before adding more new members.
|
||||
If adding a new member to a 1-node cluster, the cluster cannot make progress before the new member starts because it needs two members as majority to agree on the consensus. This behavior only happens between the time `etcdctl member add` informs the cluster about the new member and the new member successfully establishing a connection to the existing one.
|
||||
|
||||
#### Error cases when adding members
|
||||
|
||||
In the following case we have not included our new host in the list of enumerated nodes.
|
||||
If this is a new cluster, the node must be added to the list of initial cluster members.
|
||||
|
||||
```sh
|
||||
$ etcd --name infra3 \
|
||||
--initial-cluster infra0=http://10.0.1.10:2380,infra1=http://10.0.1.11:2380,infra2=http://10.0.1.12:2380 \
|
||||
--initial-cluster-state existing
|
||||
etcdserver: assign ids error: the member count is unequal
|
||||
exit 1
|
||||
```
|
||||
|
||||
In this case we give a different address (10.0.1.14:2380) to the one that we used to join the cluster (10.0.1.13:2380).
|
||||
|
||||
```sh
|
||||
$ etcd --name infra4 \
|
||||
--initial-cluster infra0=http://10.0.1.10:2380,infra1=http://10.0.1.11:2380,infra2=http://10.0.1.12:2380,infra4=http://10.0.1.14:2380 \
|
||||
--initial-cluster-state existing
|
||||
etcdserver: assign ids error: unmatched member while checking PeerURLs
|
||||
exit 1
|
||||
```
|
||||
|
||||
When we start etcd using the data directory of a removed member, etcd will exit automatically if it connects to any active member in the cluster:
|
||||
|
||||
```sh
|
||||
$ etcd
|
||||
etcd: this member has been permanently removed from the cluster. Exiting.
|
||||
exit 1
|
||||
```
|
||||
|
||||
### Strict reconfiguration check mode (`-strict-reconfig-check`)
|
||||
|
||||
As described in the above, the best practice of adding new members is to configure a single member at a time and verify it starts correctly before adding more new members. This step by step approach is very important because if newly added members is not configured correctly (for example the peer URLs are incorrect), the cluster can lose quorum. The quorum loss happens since the newly added member are counted in the quorum even if that member is not reachable from other existing members. Also quorum loss might happen if there is a connectivity issue or there are operational issues.
|
||||
|
||||
For avoiding this problem, etcd provides an option `-strict-reconfig-check`. If this option is passed to etcd, etcd rejects reconfiguration requests if the number of started members will be less than a quorum of the reconfigured cluster.
|
||||
|
||||
It is recommended to enable this option. However, it is disabled by default because of keeping compatibility.
|
||||
|
||||
[add member]: #add-a-new-member
|
||||
[cluster-reconf]: #cluster-reconfiguration-operations
|
||||
[conf-adv-peer]: configuration.md#-initial-advertise-peer-urls
|
||||
[conf-name]: configuration.md#-name
|
||||
[disaster recovery]: recovery.md
|
||||
[fault tolerance table]: ../v2/admin_guide.md#fault-tolerance-table
|
||||
[majority failure]: #restart-cluster-from-majority-failure
|
||||
[member-api]: ../v2/members_api.md
|
||||
[member-api-grpc]: ../dev-guide/api_reference_v3.md#service-cluster-etcdserveretcdserverpbrpcproto
|
||||
[member migration]: ../v2/admin_guide.md#member-migration
|
||||
[remove member]: #remove-a-member
|
||||
[runtime-reconf]: runtime-reconf-design.md
|
50
Documentation/op-guide/runtime-reconf-design.md
Normal file
50
Documentation/op-guide/runtime-reconf-design.md
Normal file
@ -0,0 +1,50 @@
|
||||
# Design of runtime reconfiguration
|
||||
|
||||
Runtime reconfiguration is one of the hardest and most error prone features in a distributed system, especially in a consensus based system like etcd.
|
||||
|
||||
Read on to learn about the design of etcd's runtime reconfiguration commands and how we tackled these problems.
|
||||
|
||||
## Two phase config changes keep the cluster safe
|
||||
|
||||
In etcd, every runtime reconfiguration has to go through [two phases][add-member] for safety reasons. For example, to add a member, first inform cluster of new configuration and then start the new member.
|
||||
|
||||
Phase 1 - Inform cluster of new configuration
|
||||
|
||||
To add a member into etcd cluster, make an API call to request a new member to be added to the cluster. This is only way to add a new member into an existing cluster. The API call returns when the cluster agrees on the configuration change.
|
||||
|
||||
Phase 2 - Start new member
|
||||
|
||||
To join the etcd member into the existing cluster, specify the correct `initial-cluster` and set `initial-cluster-state` to `existing`. When the member starts, it will contact the existing cluster first and verify the current cluster configuration matches the expected one specified in `initial-cluster`. When the new member successfully starts, the cluster has reached the expected configuration.
|
||||
|
||||
By splitting the process into two discrete phases users are forced to be explicit regarding cluster membership changes. This actually gives users more flexibility and makes things easier to reason about. For example, if there is an attempt to add a new member with the same ID as an existing member in an etcd cluster, the action will fail immediately during phase one without impacting the running cluster. Similar protection is provided to prevent adding new members by mistake. If a new etcd member attempts to join the cluster before the cluster has accepted the configuration change,, it will not be accepted by the cluster.
|
||||
|
||||
Without the explicit workflow around cluster membership etcd would be vulnerable to unexpected cluster membership changes. For example, if etcd is running under an init system such as systemd, etcd would be restarted after being removed via the membership API, and attempt to rejoin the cluster on startup. This cycle would continue every time a member is removed via the API and systemd is set to restart etcd after failing, which is unexpected.
|
||||
|
||||
We expect runtime reconfiguration to be an infrequent operation. We decided to keep it explicit and user-driven to ensure configuration safety and keep the cluster always running smoothly under explicit control.
|
||||
|
||||
## Permanent loss of quorum requires new cluster
|
||||
|
||||
If a cluster permanently loses a majority of its members, a new cluster will need to be started from an old data directory to recover the previous state.
|
||||
|
||||
It is entirely possible to force removing the failed members from the existing cluster to recover. However, we decided not to support this method since it bypasses the normal consensus committing phase, which is unsafe. If the member to remove is not actually dead or force removed through different members in the same cluster, etcd will end up with a diverged cluster with same clusterID. This is very dangerous and hard to debug/fix afterwards.
|
||||
|
||||
With a correct deployment, the possibility of permanent majority lose is very low. But it is a severe enough problem that worth special care. We strongly suggest reading the [disaster recovery documentation][disaster-recovery] and prepare for permanent majority lose before putting etcd into production.
|
||||
|
||||
## Do not use public discovery service for runtime reconfiguration
|
||||
|
||||
The public discovery service should only be used for bootstrapping a cluster. To join member into an existing cluster, use runtime reconfiguration API.
|
||||
|
||||
Discovery service is designed for bootstrapping an etcd cluster in the cloud environment, when the IP addresses of all the members are not known beforehand. After successfully bootstrapping a cluster, the IP addresses of all the members are known. Technically, the discovery service should no longer be needed.
|
||||
|
||||
It seems that using public discovery service is a convenient way to do runtime reconfiguration, after all discovery service already has all the cluster configuration information. However relying on public discovery service brings troubles:
|
||||
|
||||
1. it introduces external dependencies for the entire life-cycle of the cluster, not just bootstrap time. If there is a network issue between the cluster and public discovery service, the cluster will suffer from it.
|
||||
|
||||
2. public discovery service must reflect correct runtime configuration of the cluster during it life-cycle. It has to provide security mechanism to avoid bad actions, and it is hard.
|
||||
|
||||
3. public discovery service has to keep tens of thousands of cluster configurations. Our public discovery service backend is not ready for that workload.
|
||||
|
||||
To have a discovery service that supports runtime reconfiguration, the best choice is to build a private one.
|
||||
|
||||
[add-member]: runtime-configuration.md#add-a-new-member
|
||||
[disaster-recovery]: recovery.md
|
224
Documentation/op-guide/security.md
Normal file
224
Documentation/op-guide/security.md
Normal file
@ -0,0 +1,224 @@
|
||||
# Security model
|
||||
|
||||
etcd supports automatic TLS as well as authentication through client certificates for both clients to server as well as peer (server to server / cluster) communication.
|
||||
|
||||
To get up and running, first have a CA certificate and a signed key pair for one member. It is recommended to create and sign a new key pair for every member in a cluster.
|
||||
|
||||
For convenience, the [cfssl] tool provides an easy interface to certificate generation, and we provide an example using the tool [here][tls-setup]. Alternatively, try this [guide to generating self-signed key pairs][tls-guide].
|
||||
|
||||
## Basic setup
|
||||
|
||||
etcd takes several certificate related configuration options, either through command-line flags or environment variables:
|
||||
|
||||
**Client-to-server communication:**
|
||||
|
||||
`--cert-file=<path>`: Certificate used for SSL/TLS connections **to** etcd. When this option is set, advertise-client-urls can use the HTTPS schema.
|
||||
|
||||
`--key-file=<path>`: Key for the certificate. Must be unencrypted.
|
||||
|
||||
`--client-cert-auth`: When this is set etcd will check all incoming HTTPS requests for a client certificate signed by the trusted CA, requests that don't supply a valid client certificate will fail.
|
||||
|
||||
`--trusted-ca-file=<path>`: Trusted certificate authority.
|
||||
|
||||
`--auto-tls`: Use automatically generated self-signed certificates for TLS connections with clients.
|
||||
|
||||
**Peer (server-to-server / cluster) communication:**
|
||||
|
||||
The peer options work the same way as the client-to-server options:
|
||||
|
||||
`--peer-cert-file=<path>`: Certificate used for SSL/TLS connections between peers. This will be used both for listening on the peer address as well as sending requests to other peers.
|
||||
|
||||
`--peer-key-file=<path>`: Key for the certificate. Must be unencrypted.
|
||||
|
||||
`--peer-client-cert-auth`: When set, etcd will check all incoming peer requests from the cluster for valid client certificates signed by the supplied CA.
|
||||
|
||||
`--peer-trusted-ca-file=<path>`: Trusted certificate authority.
|
||||
|
||||
`--peer-auto-tls`: Use automatically generated self-signed certificates for TLS connections between peers.
|
||||
|
||||
If either a client-to-server or peer certificate is supplied the key must also be set. All of these configuration options are also available through the environment variables, `ETCD_CA_FILE`, `ETCD_PEER_CA_FILE` and so on.
|
||||
|
||||
## Example 1: Client-to-server transport security with HTTPS
|
||||
|
||||
For this, have a CA certificate (`ca.crt`) and signed key pair (`server.crt`, `server.key`) ready.
|
||||
|
||||
Let us configure etcd to provide simple HTTPS transport security step by step:
|
||||
|
||||
```sh
|
||||
$ etcd --name infra0 --data-dir infra0 \
|
||||
--cert-file=/path/to/server.crt --key-file=/path/to/server.key \
|
||||
--advertise-client-urls=https://127.0.0.1:2379 --listen-client-urls=https://127.0.0.1:2379
|
||||
```
|
||||
|
||||
This should start up fine and it will be possible to test the configuration by speaking HTTPS to etcd:
|
||||
|
||||
```sh
|
||||
$ curl --cacert /path/to/ca.crt https://127.0.0.1:2379/v2/keys/foo -XPUT -d value=bar -v
|
||||
```
|
||||
|
||||
The command should show that the handshake succeed. Since we use self-signed certificates with our own certificate authority, the CA must be passed to curl using the `--cacert` option. Another possibility would be to add the CA certificate to the system's trusted certificates directory (usually in `/etc/pki/tls/certs` or `/etc/ssl/certs`).
|
||||
|
||||
**OSX 10.9+ Users**: curl 7.30.0 on OSX 10.9+ doesn't understand certificates passed in on the command line.
|
||||
Instead, import the dummy ca.crt directly into the keychain or add the `-k` flag to curl to ignore errors.
|
||||
To test without the `-k` flag, run `open ./fixtures/ca/ca.crt` and follow the prompts.
|
||||
Please remove this certificate after testing!
|
||||
If there is a workaround, let us know.
|
||||
|
||||
## Example 2: Client-to-server authentication with HTTPS client certificates
|
||||
|
||||
For now we've given the etcd client the ability to verify the server identity and provide transport security. We can however also use client certificates to prevent unauthorized access to etcd.
|
||||
|
||||
The clients will provide their certificates to the server and the server will check whether the cert is signed by the supplied CA and decide whether to serve the request.
|
||||
|
||||
The same files mentioned in the first example are needed for this, as well as a key pair for the client (`client.crt`, `client.key`) signed by the same certificate authority.
|
||||
|
||||
```sh
|
||||
$ etcd --name infra0 --data-dir infra0 \
|
||||
--client-cert-auth --trusted-ca-file=/path/to/ca.crt --cert-file=/path/to/server.crt --key-file=/path/to/server.key \
|
||||
--advertise-client-urls https://127.0.0.1:2379 --listen-client-urls https://127.0.0.1:2379
|
||||
```
|
||||
|
||||
Now try the same request as above to this server:
|
||||
|
||||
```sh
|
||||
$ curl --cacert /path/to/ca.crt https://127.0.0.1:2379/v2/keys/foo -XPUT -d value=bar -v
|
||||
```
|
||||
|
||||
The request should be rejected by the server:
|
||||
|
||||
```
|
||||
...
|
||||
routines:SSL3_READ_BYTES:sslv3 alert bad certificate
|
||||
...
|
||||
```
|
||||
|
||||
To make it succeed, we need to give the CA signed client certificate to the server:
|
||||
|
||||
```sh
|
||||
$ curl --cacert /path/to/ca.crt --cert /path/to/client.crt --key /path/to/client.key \
|
||||
-L https://127.0.0.1:2379/v2/keys/foo -XPUT -d value=bar -v
|
||||
```
|
||||
|
||||
The output should include:
|
||||
|
||||
```
|
||||
...
|
||||
SSLv3, TLS handshake, CERT verify (15):
|
||||
...
|
||||
TLS handshake, Finished (20)
|
||||
```
|
||||
|
||||
And also the response from the server:
|
||||
|
||||
```json
|
||||
{
|
||||
"action": "set",
|
||||
"node": {
|
||||
"createdIndex": 12,
|
||||
"key": "/foo",
|
||||
"modifiedIndex": 12,
|
||||
"value": "bar"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Example 3: Transport security & client certificates in a cluster
|
||||
|
||||
etcd supports the same model as above for **peer communication**, that means the communication between etcd members in a cluster.
|
||||
|
||||
Assuming we have our `ca.crt` and two members with their own keypairs (`member1.crt` & `member1.key`, `member2.crt` & `member2.key`) signed by this CA, we launch etcd as follows:
|
||||
|
||||
|
||||
```sh
|
||||
DISCOVERY_URL=... # from https://discovery.etcd.io/new
|
||||
|
||||
# member1
|
||||
$ etcd --name infra1 --data-dir infra1 \
|
||||
--peer-client-cert-auth --peer-trusted-ca-file=/path/to/ca.crt --peer-cert-file=/path/to/member1.crt --peer-key-file=/path/to/member1.key \
|
||||
--initial-advertise-peer-urls=https://10.0.1.10:2380 --listen-peer-urls=https://10.0.1.10:2380 \
|
||||
--discovery ${DISCOVERY_URL}
|
||||
|
||||
# member2
|
||||
$ etcd --name infra2 --data-dir infra2 \
|
||||
--peer-client-cert-auth --peer-trusted-ca-file=/path/to/ca.crt --peer-cert-file=/path/to/member2.crt --peer-key-file=/path/to/member2.key \
|
||||
--initial-advertise-peer-urls=https://10.0.1.11:2380 --listen-peer-urls=https://10.0.1.11:2380 \
|
||||
--discovery ${DISCOVERY_URL}
|
||||
```
|
||||
|
||||
The etcd members will form a cluster and all communication between members in the cluster will be encrypted and authenticated using the client certificates. The output of etcd will show that the addresses it connects to use HTTPS.
|
||||
|
||||
## Example 4: Automatic self-signed transport security
|
||||
|
||||
For cases where communication encryption, but not authentication, is needed, etcd supports encrypting its messages with automatically generated self-signed certificates. This simplifies deployment because there is no need for managing certificates and keys outside of etcd.
|
||||
|
||||
Configure etcd to use self-signed certificates for client and peer connections with the flags `--auto-tls` and `--peer-auto-tls`:
|
||||
|
||||
```sh
|
||||
DISCOVERY_URL=... # from https://discovery.etcd.io/new
|
||||
|
||||
# member1
|
||||
$ etcd --name infra1 --data-dir infra1 \
|
||||
--auto-tls --peer-auto-tls \
|
||||
--initial-advertise-peer-urls=https://10.0.1.10:2380 --listen-peer-urls=https://10.0.1.10:2380 \
|
||||
--discovery ${DISCOVERY_URL}
|
||||
|
||||
# member2
|
||||
$ etcd --name infra2 --data-dir infra2 \
|
||||
--auto-tls --peer-auto-tls \
|
||||
--initial-advertise-peer-urls=https://10.0.1.11:2380 --listen-peer-urls=https://10.0.1.11:2380 \
|
||||
--discovery ${DISCOVERY_URL}
|
||||
```
|
||||
|
||||
Self-signed certificates do not authenticate identity so curl will return an error:
|
||||
|
||||
```sh
|
||||
curl: (60) SSL certificate problem: Invalid certificate chain
|
||||
```
|
||||
|
||||
To disable certificate chain checking, invoke curl with the `-k` flag:
|
||||
|
||||
```sh
|
||||
$ curl -k https://127.0.0.1:2379/v2/keys/foo -Xput -d value=bar -v
|
||||
```
|
||||
|
||||
## Notes for etcd proxy
|
||||
|
||||
etcd proxy terminates the TLS from its client if the connection is secure, and uses proxy's own key/cert specified in `--peer-key-file` and `--peer-cert-file` to communicate with etcd members.
|
||||
|
||||
The proxy communicates with etcd members through both the `--advertise-client-urls` and `--advertise-peer-urls` of a given member. It forwards client requests to etcd members’ advertised client urls, and it syncs the initial cluster configuration through etcd members’ advertised peer urls.
|
||||
|
||||
When client authentication is enabled for an etcd member, the administrator must ensure that the peer certificate specified in the proxy's `--peer-cert-file` option is valid for that authentication. The proxy's peer certificate must also be valid for peer authentication if peer authentication is enabled.
|
||||
|
||||
## Frequently asked questions
|
||||
|
||||
### I'm seeing a SSLv3 alert handshake failure when using TLS client authentication?
|
||||
|
||||
The `crypto/tls` package of `golang` checks the key usage of the certificate public key before using it.
|
||||
To use the certificate public key to do client auth, we need to add `clientAuth` to `Extended Key Usage` when creating the certificate public key.
|
||||
|
||||
Here is how to do it:
|
||||
|
||||
Add the following section to openssl.cnf:
|
||||
|
||||
```
|
||||
[ ssl_client ]
|
||||
...
|
||||
extendedKeyUsage = clientAuth
|
||||
...
|
||||
```
|
||||
|
||||
When creating the cert be sure to reference it in the `-extensions` flag:
|
||||
|
||||
```
|
||||
$ openssl ca -config openssl.cnf -policy policy_anything -extensions ssl_client -out certs/machine.crt -infiles machine.csr
|
||||
```
|
||||
|
||||
### With peer certificate authentication I receive "certificate is valid for 127.0.0.1, not $MY_IP"
|
||||
Make sure to sign the certificates with a Subject Name the member's public IP address. The `etcd-ca` tool for example provides an `--ip=` option for its `new-cert` command.
|
||||
|
||||
The certificate needs to be signed for the member's FQDN in its Subject Name, use Subject Alternative Names (short IP SANs) to add the IP address. The `etcd-ca` tool provides `--domain=` option for its `new-cert` command, and openssl can make [it][alt-name] too.
|
||||
|
||||
[cfssl]: https://github.com/cloudflare/cfssl
|
||||
[tls-setup]: /hack/tls-setup
|
||||
[tls-guide]: https://github.com/coreos/docs/blob/master/os/generate-self-signed-certificates.md
|
||||
[alt-name]: http://wiki.cacert.org/FAQ/subjectAltName
|
14
Documentation/op-guide/supported-platform.md
Normal file
14
Documentation/op-guide/supported-platform.md
Normal file
@ -0,0 +1,14 @@
|
||||
## Supported platform
|
||||
|
||||
### 32-bit and other unsupported systems
|
||||
|
||||
etcd has known issues on 32-bit systems due to a bug in the Go runtime. See #[358][358] for more information.
|
||||
|
||||
To avoid inadvertently running a possibly unstable etcd server, `etcd` on unsupported architectures will print
|
||||
a warning message and immediately exit if the environment variable `ETCD_UNSUPPORTED_ARCH` is not set to
|
||||
the target architecture.
|
||||
|
||||
Currently only the amd64 architecture is officially supported by `etcd`.
|
||||
|
||||
[358]: https://github.com/coreos/etcd/issues/358
|
||||
|
47
Documentation/op-guide/v2-migration.md
Normal file
47
Documentation/op-guide/v2-migration.md
Normal file
@ -0,0 +1,47 @@
|
||||
# Migrate applications from using API v2 to API v3
|
||||
|
||||
The data store v2 is still accessible from the API v2 after upgrading to etcd3. Thus, it will work as before and require no application changes. With etcd 3, applications use the new grpc API v3 to access the mvcc store, which provides more features and improved performance. The mvcc store and the old store v2 are separate and isolated; writes to the store v2 will not affect the mvcc store and, similarly, writes to the mvcc store will not affect the store v2.
|
||||
|
||||
Migrating an application from the API v2 to the API v3 involves two steps: 1) migrate the client library and, 2) migrate the data. If the application can rebuild the data, then migrating the data is unnecessary.
|
||||
|
||||
## Migrate client library
|
||||
|
||||
API v3 is different from API v2, thus application developers need to use a new client library to send requests to etcd API v3. The documentation of the client v3 is available at https://godoc.org/github.com/coreos/etcd/clientv3.
|
||||
|
||||
There are some notable differences between API v2 and API v3:
|
||||
|
||||
- Transaction: In v3, etcd provides multi-key conditional transactions. Applications should use transactions in place of `Compare-And-Swap` operations.
|
||||
|
||||
- Flat key space: There are no directories in API v3, only keys. For example, "/a/b/c/" is a key. Range queries support getting all keys matching a given prefix.
|
||||
|
||||
- Compacted responses: Operations like `Delete` no longer return previous values. To get the deleted value, a transaction can be used to atomically get the key and then delete its value.
|
||||
|
||||
- Leases: A replacement for v2 TTLs; the TTL is bound to a lease and keys attach to the lease. When the TTL expires, the lease is revoked and all attached keys are removed.
|
||||
|
||||
## Migrate data
|
||||
|
||||
Application data can be migrated either offline or online. Offline migration is much simpler than online migration and is recommended.
|
||||
|
||||
### Offline migration
|
||||
|
||||
Offline migration is very simple but requires etcd downtime. If an etcd downtime window spanning from seconds to minutes is acceptable, offline migration is a good choice and is easy to automate.
|
||||
|
||||
First, all members in the etcd cluster must converge to the same state. This can be achieved by stopping all applications that write keys to etcd. Alternatively, if the applications must remain running, configure etcd to listen on a different client URL and restart all etcd members. To check if the states converged, within a few seconds, use the `ETCDCTL_API=3 etcdctl endpoint status` command to confirm that the `raft index` of all members match (or differ by at most 1 due to an internal sync raft command).
|
||||
|
||||
Second, migrate the v2 keys into v3 with the [migrate][migrate_command] (`ETCDCTL_API=3 etcdctl migrate`) command. The migrate command writes keys in the v2 store to a user-provided transformer program and reads back transformed keys. It then writes transformed keys into the mvcc store. This usually takes at most tens of seconds.
|
||||
|
||||
Restart the etcd members and everything should just work.
|
||||
|
||||
### Online migration
|
||||
|
||||
If the application cannot tolerate any downtime, then it must migrate online. The implementation of online migration will vary from application to application but the overall idea is the same.
|
||||
|
||||
First, write application code using the v3 API. The application must support two modes: a migration mode and a normal mode. The application starts in migration mode. When running in migration mode, the application reads keys using the v3 API first, and, if it cannot find the key, it retries with the API v2. In normal mode, the application only reads keys using the v3 API. The application writes keys over the API v3 in both modes. To acknowledge a switch from migration mode to normal mode, the application watches on a switch mode key. When switch key’s value turns to `true`, the application switches over from migration mode to normal mode.
|
||||
|
||||
Second, start a background job to migrate data from the store v2 to the mvcc store by reading keys from the API v2 and writing keys to the API v3.
|
||||
|
||||
After finishing data migration, the background job writes `true` into the switch mode key to notify the application that it may switch modes.
|
||||
|
||||
Online migration can be difficult when the application logic depends on store v2 indexes. Applications will need additional logic to convert mvcc store revisions to store v2 indexes.
|
||||
|
||||
[migrate_command]: ../../etcdctl/README.md#migrate-options
|
17
Documentation/op-guide/versioning.md
Normal file
17
Documentation/op-guide/versioning.md
Normal file
@ -0,0 +1,17 @@
|
||||
## Versioning
|
||||
|
||||
### Service versioning
|
||||
|
||||
etcd uses [semantic versioning](http://semver.org)
|
||||
New minor versions may add additional features to the API.
|
||||
|
||||
Get the running etcd cluster version with `etcdctl`:
|
||||
|
||||
```sh
|
||||
ETCDCTL_API=3 etcdctl --endpoints=127.0.0.1:2379 endpoint status
|
||||
```
|
||||
|
||||
### API versioning
|
||||
|
||||
The `v3` API responses should not change after the 3.0.0 release but new features will be added over time.
|
||||
|
@ -1,6 +1,6 @@
|
||||
# Production Users
|
||||
# Production users
|
||||
|
||||
This document tracks people and use cases for etcd in production. By creating a list of production use cases we hope to build a community of advisors that we can reach out to with experience using various etcd applications, operation environments, and cluster sizes. The etcd development team may reach out periodically to check-in on your experience and update this list.
|
||||
This document tracks people and use cases for etcd in production. By creating a list of production use cases we hope to build a community of advisors that we can reach out to with experience using various etcd applications, operation environments, and cluster sizes. The etcd development team may reach out periodically to check-in on how etcd is working in the field and update this list.
|
||||
|
||||
## discovery.etcd.io
|
||||
|
||||
@ -48,4 +48,15 @@ CyCore Systems provides architecture and engineering for computing systems. Thi
|
||||
|
||||
Radius Intelligence uses Kubernetes running CoreOS to containerize and scale internal toolsets. Examples include running [JetBrains TeamCity][teamcity] and internal AWS security and cost reporting tools. etcd clusters back these clusters as well as provide some basic environment bootstrapping configuration keys.
|
||||
|
||||
## Vonage
|
||||
|
||||
- *Application*: system configuration for microservices, scheduling, locks (future - service discovery)
|
||||
- *Launched*: August 2015
|
||||
- *Cluster Size*: 2 clusters of 5 members in 2 DCs, n local proxies 1-to-1 with microservice, (ssl and SRV look up)
|
||||
- *Order of Data Size*: kilobytes
|
||||
- *Operator*: Vonage [devAdmin][raoofm]
|
||||
- *Environment*: VMWare, AWS
|
||||
- *Backups*: Daily snapshots on VMs. Backups done for upgrades.
|
||||
|
||||
[teamcity]: https://www.jetbrains.com/teamcity/
|
||||
[raoofm]:https://github.com/raoofm
|
||||
|
@ -1,24 +1,24 @@
|
||||
# Reporting Bugs
|
||||
# Reporting bugs
|
||||
|
||||
If you find bugs or documentation mistakes in the etcd project, please let us know by [opening an issue][issue]. We treat bugs and mistakes very seriously and believe no issue is too small. Before creating a bug report, please check that an issue reporting the same problem does not already exist.
|
||||
If any part of the etcd project has bugs or documentation mistakes, please let us know by [opening an issue][issue]. We treat bugs and mistakes very seriously and believe no issue is too small. Before creating a bug report, please check that an issue reporting the same problem does not already exist.
|
||||
|
||||
To make your bug report accurate and easy to understand, please try to create bug reports that are:
|
||||
To make the bug report accurate and easy to understand, please try to create bug reports that are:
|
||||
|
||||
- Specific. Include as much details as possible: which version, what environment, what configuration, etc. You can also attach etcd log (the starting log with etcd configuration is especially important).
|
||||
- Specific. Include as much details as possible: which version, what environment, what configuration, etc. If the bug is related to running the etcd server, please attach the etcd log (the starting log with etcd configuration is especially important).
|
||||
|
||||
- Reproducible. Include the steps to reproduce the problem. We understand some issues might be hard to reproduce, please includes the steps that might lead to the problem. You can also attach the affected etcd data dir and stack strace to the bug report.
|
||||
- Reproducible. Include the steps to reproduce the problem. We understand some issues might be hard to reproduce, please includes the steps that might lead to the problem. If possible, please attach the affected etcd data dir and stack strace to the bug report.
|
||||
|
||||
- Isolated. Please try to isolate and reproduce the bug with minimum dependencies. It would significantly slow down the speed to fix a bug if too many dependencies are involved in a bug report. Debugging external systems that rely on etcd is out of scope, but we are happy to point you in the right direction or help you interact with etcd in the correct manner.
|
||||
- Isolated. Please try to isolate and reproduce the bug with minimum dependencies. It would significantly slow down the speed to fix a bug if too many dependencies are involved in a bug report. Debugging external systems that rely on etcd is out of scope, but we are happy to provide guidance in the right direction or help with using etcd itself.
|
||||
|
||||
- Unique. Do not duplicate existing bug report.
|
||||
|
||||
- Scoped. One bug per report. Do not follow up with another bug inside one report.
|
||||
|
||||
You might also want to read [Elika Etemad’s article on filing good bug reports][filing-good-bugs] before creating a bug report.
|
||||
It may be worthwhile to read [Elika Etemad’s article on filing good bug reports][filing-good-bugs] before creating a bug report.
|
||||
|
||||
We might ask you for further information to locate a bug. A duplicated bug report will be closed.
|
||||
We might ask for further information to locate a bug. A duplicated bug report will be closed.
|
||||
|
||||
## Frequently Asked Questions
|
||||
## Frequently asked questions
|
||||
|
||||
### How to get a stack trace
|
||||
|
||||
@ -39,7 +39,7 @@ $ sudo systemctl cat etcd2
|
||||
$ sudo journalctl -u etcd2
|
||||
```
|
||||
|
||||
Due to an upstream systemd bug, journald may miss the last few log lines when its process exit. If journalctl tells you that etcd stops without fatal or panic message, you could try `sudo journalctl -f -t etcd2` to get full log.
|
||||
Due to an upstream systemd bug, journald may miss the last few log lines when its processes exit. If journalctl says etcd stopped without fatal or panic message, try `sudo journalctl -f -t etcd2` to get full log.
|
||||
|
||||
[etcd-issue]: https://github.com/coreos/etcd/issues/new
|
||||
[filing-good-bugs]: http://fantasai.inkedblade.net/style/talks/filing-good-bugs/
|
||||
|
@ -208,4 +208,4 @@ WatchResponse {
|
||||
```
|
||||
|
||||
[api-protobuf]: https://github.com/coreos/etcd/blob/master/etcdserver/etcdserverpb/rpc.proto
|
||||
[kv-protobuf]: https://github.com/coreos/etcd/blob/master/storage/storagepb/kv.proto
|
||||
[kv-protobuf]: https://github.com/coreos/etcd/blob/master/mvcc/mvccpb/kv.proto
|
||||
|
@ -1,11 +1,10 @@
|
||||
# Tuning
|
||||
|
||||
The default settings in etcd should work well for installations on a local network where the average network latency is low.
|
||||
However, when using etcd across multiple data centers or over networks with high latency you may need to tweak the heartbeat interval and election timeout settings.
|
||||
The default settings in etcd should work well for installations on a local network where the average network latency is low. However, when using etcd across multiple data centers or over networks with high latency, the heartbeat interval and election timeout settings may need tuning.
|
||||
|
||||
The network isn't the only source of latency. Each request and response may be impacted by slow disks on both the leader and follower. Each of these timeouts represents the total time from request to successful response from the other machine.
|
||||
|
||||
## Time Parameters
|
||||
## Time parameters
|
||||
|
||||
The underlying distributed consensus protocol relies on two separate time parameters to ensure that nodes can handoff leadership if one stalls or goes offline.
|
||||
The first parameter is called the *Heartbeat Interval*.
|
||||
@ -24,24 +23,24 @@ On the other side, a too high heartbeat interval leads to high election timeout.
|
||||
The easiest way to measure round-trip time (RTT) is to use [PING utility][ping].
|
||||
|
||||
The election timeout should be set based on the heartbeat interval and average round-trip time between members.
|
||||
Election timeouts must be at least 10 times the round-trip time so it can account for variance in your network.
|
||||
For example, if the round-trip time between your members is 10ms then you should have at least a 100ms election timeout.
|
||||
Election timeouts must be at least 10 times the round-trip time so it can account for variance in the network.
|
||||
For example, if the round-trip time between members is 10ms then the election timeout should be at least 100ms.
|
||||
|
||||
You should also set your election timeout to at least 5 to 10 times your heartbeat interval to account for variance in leader replication.
|
||||
For a heartbeat interval of 50ms you should set your election timeout to at least 250ms - 500ms.
|
||||
The election timeout should be set to at least 5 to 10 times the heartbeat interval to account for variance in leader replication.
|
||||
For a heartbeat interval of 50ms, set the election timeout to at least 250ms - 500ms.
|
||||
|
||||
The upper limit of election timeout is 50000ms (50s), which should only be used when deploying a globally-distributed etcd cluster.
|
||||
A reasonable round-trip time for the continental United States is 130ms, and the time between US and Japan is around 350-400ms.
|
||||
If your network has uneven performance or regular packet delays/loss then it is possible that a couple of retries may be necessary to successfully send a packet. So 5s is a safe upper limit of global round-trip time.
|
||||
If the network has uneven performance or regular packet delays/loss then it is possible that a couple of retries may be necessary to successfully send a packet. So 5s is a safe upper limit of global round-trip time.
|
||||
As the election timeout should be an order of magnitude bigger than broadcast time, in the case of ~5s for a globally distributed cluster, then 50 seconds becomes a reasonable maximum.
|
||||
|
||||
The heartbeat interval and election timeout value should be the same for all members in one cluster. Setting different values for etcd members may disrupt cluster stability.
|
||||
|
||||
You can override the default values on the command line:
|
||||
The default values can be overridden on the command line:
|
||||
|
||||
```sh
|
||||
# Command line arguments:
|
||||
$ etcd -heartbeat-interval=100 -election-timeout=500
|
||||
$ etcd --heartbeat-interval=100 --election-timeout=500
|
||||
|
||||
# Environment variables:
|
||||
$ ETCD_HEARTBEAT_INTERVAL=100 ETCD_ELECTION_TIMEOUT=500 etcd
|
||||
@ -58,15 +57,15 @@ A complete history works well for lightly used clusters but clusters that are he
|
||||
To avoid having a huge log etcd makes periodic snapshots.
|
||||
These snapshots provide a way for etcd to compact the log by saving the current state of the system and removing old logs.
|
||||
|
||||
### Snapshot Tuning
|
||||
### Snapshot tuning
|
||||
|
||||
Creating snapshots can be expensive so they're only created after a given number of changes to etcd.
|
||||
By default, snapshots will be made after every 10,000 changes.
|
||||
If etcd's memory usage and disk usage are too high, you can lower the snapshot threshold by setting the following on the command line:
|
||||
If etcd's memory usage and disk usage are too high, try lowering the snapshot threshold by setting the following on the command line:
|
||||
|
||||
```sh
|
||||
# Command line arguments:
|
||||
$ etcd -snapshot-count=5000
|
||||
$ etcd --snapshot-count=5000
|
||||
|
||||
# Environment variables:
|
||||
$ ETCD_SNAPSHOT_COUNT=5000 etcd
|
||||
|
119
Documentation/upgrades/upgrade_3_0.md
Normal file
119
Documentation/upgrades/upgrade_3_0.md
Normal file
@ -0,0 +1,119 @@
|
||||
## Upgrade etcd from 2.3 to 3.0
|
||||
|
||||
In the general case, upgrading from etcd 2.3 to 3.0 can be a zero-downtime, rolling upgrade:
|
||||
- one by one, stop the etcd v2.3 processes and replace them with etcd v3.0 processes
|
||||
- after running all v3.0 processes, new features in v3.0 are available to the cluster
|
||||
|
||||
Before [starting an upgrade](#upgrade-procedure), read through the rest of this guide to prepare.
|
||||
|
||||
### Upgrade Checklists
|
||||
|
||||
#### Upgrade Requirements
|
||||
|
||||
To upgrade an existing etcd deployment to 3.0, the running cluster must be 2.3 or greater. If it's before 2.3, please upgrade to [2.3](https://github.com/coreos/etcd/releases/tag/v2.3.0) before upgrading to 3.0.
|
||||
|
||||
Also, to ensure a smooth rolling upgrade, the running cluster must be healthy. You can check the health of the cluster by using the `etcdctl cluster-health` command.
|
||||
|
||||
#### Preparation
|
||||
|
||||
Before upgrading etcd, always test the services relying on etcd in a staging environment before deploying the upgrade to the production environment.
|
||||
|
||||
Before beginning, [backup the etcd data directory](../v2/admin_guide.md#backing-up-the-datastore). Should something go wrong with the upgrade, it is possible to use this backup to [downgrade](#downgrade) back to existing etcd version.
|
||||
|
||||
#### Mixed Versions
|
||||
|
||||
While upgrading, an etcd cluster supports mixed versions of etcd members, and operates with the protocol of the lowest common version. The cluster is only considered upgraded once all of its members are upgraded to version 3.0. Internally, etcd members negotiate with each other to determine the overall cluster version, which controls the reported version and the supported features.
|
||||
|
||||
#### Limitations
|
||||
|
||||
It might take up to 2 minutes for the newly upgraded member to catch up with the existing cluster when the total data size is larger than 50MB. Check the size of a recent snapshot to estimate the total data size. In other words, it is safest to wait for 2 minutes between upgrading each member.
|
||||
|
||||
For a much larger total data size, 100MB or more , this one-time process might take even more time. Administrators of very large etcd clusters of this magnitude can feel free to contact the [etcd team][etcd-contact] before upgrading, and we’ll be happy to provide advice on the procedure.
|
||||
|
||||
#### Downgrade
|
||||
|
||||
If all members have been upgraded to v3.0, the cluster will be upgraded to v3.0, and downgrade from this completed state is **not possible**. If any single member is still v2.3, however, the cluster and its operations remains “v2.3”, and it is possible from this mixed cluster state to return to using a v2.3 etcd binary on all members.
|
||||
|
||||
Please [backup the data directory](../v2/admin_guide.md#backing-up-the-datastore) of all etcd members to make downgrading the cluster possible even after it has been completely upgraded.
|
||||
|
||||
### Upgrade Procedure
|
||||
|
||||
This example details the upgrade of a three-member v2.3 ectd cluster running on a local machine.
|
||||
|
||||
#### 1. Check upgrade requirements.
|
||||
|
||||
Is the the cluster healthy and running v.2.3.x?
|
||||
|
||||
```
|
||||
$ etcdctl cluster-health
|
||||
member 6e3bd23ae5f1eae0 is healthy: got healthy result from http://localhost:22379
|
||||
member 924e2e83e93f2560 is healthy: got healthy result from http://localhost:32379
|
||||
member 8211f1d0f64f3269 is healthy: got healthy result from http://localhost:12379
|
||||
cluster is healthy
|
||||
|
||||
$ curl http://localhost:2379/version
|
||||
{"etcdserver":"2.3.x","etcdcluster":"2.3.0"}
|
||||
```
|
||||
|
||||
#### 2. Stop the existing etcd process
|
||||
|
||||
When each etcd process is stopped, expected errors will be logged by other cluster members. This is normal since a cluster member connection has been (temporarily) broken:
|
||||
|
||||
```
|
||||
2016-06-27 15:21:48.624124 E | rafthttp: failed to dial 8211f1d0f64f3269 on stream Message (dial tcp 127.0.0.1:12380: getsockopt: connection refused)
|
||||
2016-06-27 15:21:48.624175 I | rafthttp: the connection with 8211f1d0f64f3269 became inactive
|
||||
```
|
||||
|
||||
It’s a good idea at this point to [backup the etcd data directory](../v2/admin_guide.md#backing-up-the-datastore) to provide a downgrade path should any problems occur:
|
||||
|
||||
```
|
||||
$ etcdctl backup \
|
||||
--data-dir /var/lib/etcd \
|
||||
--backup-dir /tmp/etcd_backup
|
||||
```
|
||||
|
||||
#### 3. Drop-in etcd v3.0 binary and start the new etcd process
|
||||
|
||||
The new v3.0 etcd will publish its information to the cluster:
|
||||
|
||||
```
|
||||
09:58:25.938673 I | etcdserver: published {Name:infra1 ClientURLs:[http://localhost:12379]} to cluster 524400597fb1d5f6
|
||||
```
|
||||
|
||||
Verify that each member, and then the entire cluster, becomes healthy with the new v3.0 etcd binary:
|
||||
|
||||
```
|
||||
$ etcdctl cluster-health
|
||||
member 6e3bd23ae5f1eae0 is healthy: got healthy result from http://localhost:22379
|
||||
member 924e2e83e93f2560 is healthy: got healthy result from http://localhost:32379
|
||||
member 8211f1d0f64f3269 is healthy: got healthy result from http://localhost:12379
|
||||
cluster is healthy
|
||||
```
|
||||
|
||||
|
||||
Upgraded members will log warnings like the following until the entire cluster is upgraded. This is expected and will cease after all etcd cluster members are upgraded to v3.0:
|
||||
|
||||
```
|
||||
2016-06-27 15:22:05.679644 W | etcdserver: the local etcd version 2.3.7 is not up-to-date
|
||||
2016-06-27 15:22:05.679660 W | etcdserver: member 8211f1d0f64f3269 has a higher version 3.0.0
|
||||
```
|
||||
|
||||
#### 4. Repeat step 2 to step 3 for all other members
|
||||
|
||||
#### 5. Finish
|
||||
|
||||
When all members are upgraded, the cluster will report upgrading to 3.0 successfully:
|
||||
|
||||
```
|
||||
2016-06-27 15:22:19.873751 N | membership: updated the cluster version from 2.3 to 3.0
|
||||
2016-06-27 15:22:19.914574 I | api: enabled capabilities for version 3.0.0
|
||||
```
|
||||
|
||||
```
|
||||
$ ETCDCTL_API=3 etcdctl endpoint health
|
||||
127.0.0.1:12379 is healthy: successfully committed proposal: took = 18.440155ms
|
||||
127.0.0.1:32379 is healthy: successfully committed proposal: took = 13.651368ms
|
||||
127.0.0.1:22379 is healthy: successfully committed proposal: took = 18.513301ms
|
||||
```
|
||||
|
||||
[etcd-contact]: https://groups.google.com/forum/#!forum/etcd-dev
|
165
Documentation/v2/README.md
Normal file
165
Documentation/v2/README.md
Normal file
@ -0,0 +1,165 @@
|
||||
# etcd2
|
||||
|
||||
[](https://goreportcard.com/report/github.com/coreos/etcd)
|
||||
[](https://travis-ci.org/coreos/etcd)
|
||||
[](https://semaphoreci.com/coreos/etcd)
|
||||
[](https://quay.io/repository/coreos/etcd-git)
|
||||
|
||||
**Note**: The `master` branch may be in an *unstable or even broken state* during development. Please use [releases][github-release] instead of the `master` branch in order to get stable binaries.
|
||||
|
||||

|
||||
|
||||
etcd is a distributed, consistent key-value store for shared configuration and service discovery, with a focus on being:
|
||||
|
||||
* *Simple*: curl'able user-facing API (HTTP+JSON)
|
||||
* *Secure*: optional SSL client cert authentication
|
||||
* *Fast*: benchmarked 1000s of writes/s per instance
|
||||
* *Reliable*: properly distributed using Raft
|
||||
|
||||
etcd is written in Go and uses the [Raft][raft] consensus algorithm to manage a highly-available replicated log.
|
||||
|
||||
etcd is used [in production by many companies](./production-users.md), and the development team stands behind it in critical deployment scenarios, where etcd is frequently teamed with applications such as [Kubernetes][k8s], [fleet][fleet], [locksmith][locksmith], [vulcand][vulcand], and many others.
|
||||
|
||||
See [etcdctl][etcdctl] for a simple command line client.
|
||||
Or feel free to just use `curl`, as in the examples below.
|
||||
|
||||
[raft]: https://raft.github.io/
|
||||
[k8s]: http://kubernetes.io/
|
||||
[fleet]: https://github.com/coreos/fleet
|
||||
[locksmith]: https://github.com/coreos/locksmith
|
||||
[vulcand]: https://github.com/vulcand/vulcand
|
||||
[etcdctl]: https://github.com/coreos/etcd/tree/master/etcdctl
|
||||
|
||||
## Getting Started
|
||||
|
||||
### Getting etcd
|
||||
|
||||
The easiest way to get etcd is to use one of the pre-built release binaries which are available for OSX, Linux, Windows, AppC (ACI), and Docker. Instructions for using these binaries are on the [GitHub releases page][github-release].
|
||||
|
||||
For those wanting to try the very latest version, you can build the latest version of etcd from the `master` branch.
|
||||
You will first need [*Go*](https://golang.org/) installed on your machine (version 1.5+ is required).
|
||||
All development occurs on `master`, including new features and bug fixes.
|
||||
Bug fixes are first targeted at `master` and subsequently ported to release branches, as described in the [branch management][branch-management] guide.
|
||||
|
||||
[github-release]: https://github.com/coreos/etcd/releases/
|
||||
[branch-management]: branch_management.md
|
||||
|
||||
### Running etcd
|
||||
|
||||
First start a single-member cluster of etcd:
|
||||
|
||||
```sh
|
||||
./bin/etcd
|
||||
```
|
||||
|
||||
This will bring up etcd listening on port 2379 for client communication and on port 2380 for server-to-server communication.
|
||||
|
||||
Next, let's set a single key, and then retrieve it:
|
||||
|
||||
```
|
||||
curl -L http://127.0.0.1:2379/v2/keys/mykey -XPUT -d value="this is awesome"
|
||||
curl -L http://127.0.0.1:2379/v2/keys/mykey
|
||||
```
|
||||
|
||||
You have successfully started an etcd and written a key to the store.
|
||||
|
||||
### etcd TCP ports
|
||||
|
||||
The [official etcd ports][iana-ports] are 2379 for client requests, and 2380 for peer communication. To maintain compatibility, some etcd configuration and documentation continues to refer to the legacy ports 4001 and 7001, but all new etcd use and discussion should adopt the IANA-assigned ports. The legacy ports 4001 and 7001 will be fully deprecated, and support for their use removed, in future etcd releases.
|
||||
|
||||
[iana-ports]: https://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.xhtml?search=etcd
|
||||
|
||||
### Running local etcd cluster
|
||||
|
||||
First install [goreman](https://github.com/mattn/goreman), which manages Procfile-based applications.
|
||||
|
||||
Our [Procfile script](./Procfile) will set up a local example cluster. You can start it with:
|
||||
|
||||
```sh
|
||||
goreman start
|
||||
```
|
||||
|
||||
This will bring up 3 etcd members `infra1`, `infra2` and `infra3` and etcd proxy `proxy`, which runs locally and composes a cluster.
|
||||
|
||||
You can write a key to the cluster and retrieve the value back from any member or proxy.
|
||||
|
||||
### Next Steps
|
||||
|
||||
Now it's time to dig into the full etcd API and other guides.
|
||||
|
||||
- Explore the full [API][api].
|
||||
- Set up a [multi-machine cluster][clustering].
|
||||
- Learn the [config format, env variables and flags][configuration].
|
||||
- Find [language bindings and tools][libraries-and-tools].
|
||||
- Use TLS to [secure an etcd cluster][security].
|
||||
- [Tune etcd][tuning].
|
||||
- [Upgrade from 0.4.9+ to 2.2.0][upgrade].
|
||||
|
||||
[api]: ./api.md
|
||||
[clustering]: ./clustering.md
|
||||
[configuration]: ./configuration.md
|
||||
[libraries-and-tools]: ./libraries-and-tools.md
|
||||
[security]: ./security.md
|
||||
[tuning]: ./tuning.md
|
||||
[upgrade]: ./04_to_2_snapshot_migration.md
|
||||
|
||||
## Contact
|
||||
|
||||
- Mailing list: [etcd-dev](https://groups.google.com/forum/?hl=en#!forum/etcd-dev)
|
||||
- IRC: #[etcd](irc://irc.freenode.org:6667/#etcd) on freenode.org
|
||||
- Planning/Roadmap: [milestones](https://github.com/coreos/etcd/milestones), [roadmap](../../ROADMAP.md)
|
||||
- Bugs: [issues](https://github.com/coreos/etcd/issues)
|
||||
|
||||
## Contributing
|
||||
|
||||
See [CONTRIBUTING](../../CONTRIBUTING.md) for details on submitting patches and the contribution workflow.
|
||||
|
||||
## Reporting bugs
|
||||
|
||||
See [reporting bugs](reporting_bugs.md) for details about reporting any issue you may encounter.
|
||||
|
||||
## Known bugs
|
||||
|
||||
[GH518](https://github.com/coreos/etcd/issues/518) is a known bug. Issue is that:
|
||||
|
||||
```
|
||||
curl http://127.0.0.1:2379/v2/keys/foo -XPUT -d value=bar
|
||||
curl http://127.0.0.1:2379/v2/keys/foo -XPUT -d dir=true -d prevExist=true
|
||||
```
|
||||
|
||||
If the previous node is a key and client tries to overwrite it with `dir=true`, it does not give warnings such as `Not a directory`. Instead, the key is set to empty value.
|
||||
|
||||
## Project Details
|
||||
|
||||
### Versioning
|
||||
|
||||
#### Service Versioning
|
||||
|
||||
etcd uses [semantic versioning](http://semver.org)
|
||||
New minor versions may add additional features to the API.
|
||||
|
||||
You can get the version of etcd by issuing a request to /version:
|
||||
|
||||
```sh
|
||||
curl -L http://127.0.0.1:2379/version
|
||||
```
|
||||
|
||||
#### API Versioning
|
||||
|
||||
The `v2` API responses should not change after the 2.0.0 release but new features will be added over time.
|
||||
|
||||
#### 32-bit and other unsupported systems
|
||||
|
||||
etcd has known issues on 32-bit systems due to a bug in the Go runtime. See #[358][358] for more information.
|
||||
|
||||
To avoid inadvertently running a possibly unstable etcd server, `etcd` on unsupported architectures will print
|
||||
a warning message and immediately exit if the environment variable `ETCD_UNSUPPORTED_ARCH` is not set to
|
||||
the target architecture.
|
||||
|
||||
Currently only the amd64 architecture is officially supported by `etcd`.
|
||||
|
||||
[358]: https://github.com/coreos/etcd/issues/358
|
||||
|
||||
### License
|
||||
|
||||
etcd is under the Apache 2.0 license. See the [LICENSE](LICENSE) file for details.
|
@ -113,7 +113,8 @@ It is recommended to have an odd number of members in a cluster. Having an odd c
|
||||
| Cluster Size | Majority | Failure Tolerance |
|
||||
|--------------|------------|-------------------|
|
||||
| 1 | 1 | 0 |
|
||||
| 3 | 2 | 1 |
|
||||
| 2 | 2 | 0 |
|
||||
| 3 | 2 | **1** |
|
||||
| 4 | 3 | 1 |
|
||||
| 5 | 3 | **2** |
|
||||
| 6 | 4 | 2 |
|
||||
@ -135,7 +136,7 @@ The data directory contains all the data to recover a member to its point-in-tim
|
||||
|
||||
* Stop the member process.
|
||||
* Copy the data directory of the now-idle member to the new machine.
|
||||
* Update the peer URLs for the replaced member to reflect the new machine according to the [runtime reconfiguration instructions][update-member].
|
||||
* Update the peer URLs for the replaced member to reflect the new machine according to the [runtime reconfiguration instructions][update-a-member].
|
||||
* Start etcd on the new machine, using the same configuration and the copy of the data directory.
|
||||
|
||||
This example will walk you through the process of migrating the infra1 member to a new machine:
|
||||
@ -217,12 +218,14 @@ To recover from such scenarios, etcd provides functionality to backup and restor
|
||||
|
||||
**NB:** Windows users must stop etcd before running the backup command.
|
||||
|
||||
The first step of the recovery is to backup the data directory on a functioning etcd node. To do this, use the `etcdctl backup` command, passing in the original data directory used by etcd. For example:
|
||||
The first step of the recovery is to backup the data directory and wal directory, if stored separately, on a functioning etcd node. To do this, use the `etcdctl backup` command, passing in the original data (and wal) directory used by etcd. For example:
|
||||
|
||||
```sh
|
||||
etcdctl backup \
|
||||
--data-dir %data_dir% \
|
||||
[--wal-dir %wal_dir%] \
|
||||
--backup-dir %backup_data_dir%
|
||||
[--backup-wal-dir %backup_wal_dir%]
|
||||
```
|
||||
|
||||
This command will rewrite some of the metadata contained in the backup (specifically, the node ID and cluster ID), which means that the node will lose its former identity. In order to recreate a cluster from the backup, you will need to start a new, single-node cluster. The metadata is rewritten to prevent the new node from inadvertently being joined onto an existing cluster.
|
||||
@ -234,26 +237,30 @@ To restore a backup using the procedure created above, start etcd with the `-for
|
||||
```sh
|
||||
etcd \
|
||||
-data-dir=%backup_data_dir% \
|
||||
[-wal-dir=%backup_wal_dir%] \
|
||||
-force-new-cluster \
|
||||
...
|
||||
```
|
||||
|
||||
Now etcd should be available on this node and serving the original datastore.
|
||||
|
||||
Once you have verified that etcd has started successfully, shut it down and move the data back to the previous location (you may wish to make another copy as well to be safe):
|
||||
Once you have verified that etcd has started successfully, shut it down and move the data and wal, if stored separately, back to the previous location (you may wish to make another copy as well to be safe):
|
||||
|
||||
```sh
|
||||
pkill etcd
|
||||
rm -fr %data_dir%
|
||||
rm -fr %wal_dir%
|
||||
mv %backup_data_dir% %data_dir%
|
||||
mv %backup_wal_dir% %wal_dir%
|
||||
etcd \
|
||||
-data-dir=%data_dir% \
|
||||
[-wal-dir=%wal_dir%] \
|
||||
...
|
||||
```
|
||||
|
||||
#### Restoring the cluster
|
||||
|
||||
Now that the node is running successfully, [change its advertised peer URLs][update-member], as the `--force-new-cluster` option has set the peer URL to the default listening on localhost.
|
||||
Now that the node is running successfully, [change its advertised peer URLs][update-a-member], as the `--force-new-cluster` option has set the peer URL to the default listening on localhost.
|
||||
|
||||
You can then add more nodes to the cluster and restore resiliency. See the [add a new member][add-a-member] guide for more details. **NB:** If you are trying to restore your cluster using old failed etcd nodes, please make sure you have stopped old etcd instances and removed their old data directories specified by the data-dir configuration parameter.
|
||||
|
@ -233,10 +233,11 @@ curl http://127.0.0.1:2379/v2/keys/foo -XPUT -d value=bar -d ttl= -d prevExist=t
|
||||
|
||||
### Refreshing key TTL
|
||||
|
||||
Keys in etcd can be refreshed without notifying watchers
|
||||
this can be achieved by setting the refresh to true when updating a TTL
|
||||
Keys in etcd can be refreshed without notifying current watchers.
|
||||
|
||||
You cannot update the value of a key when refreshing it
|
||||
This can be achieved by setting the refresh to true when updating a TTL.
|
||||
|
||||
You cannot update the value of a key when refreshing it.
|
||||
|
||||
```sh
|
||||
curl http://127.0.0.1:2379/v2/keys/foo -XPUT -d value=bar -d ttl=5
|
@ -145,8 +145,8 @@ GET/HEAD /v2/auth/users
|
||||
"role": "root",
|
||||
"permissions": {
|
||||
"kv": {
|
||||
"read": ["*"],
|
||||
"write": ["*"]
|
||||
"read": ["/*"],
|
||||
"write": ["/*"]
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -159,8 +159,8 @@ GET/HEAD /v2/auth/users
|
||||
"role": "guest",
|
||||
"permissions": {
|
||||
"kv": {
|
||||
"read": ["*"],
|
||||
"write": ["*"]
|
||||
"read": ["/*"],
|
||||
"write": ["/*"]
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -198,8 +198,8 @@ GET/HEAD /v2/auth/users/alice
|
||||
"role": "etcd",
|
||||
"permissions" : {
|
||||
"kv" : {
|
||||
"read": [ "*" ],
|
||||
"write": [ "*" ]
|
||||
"read": [ "/*" ],
|
||||
"write": [ "/*" ]
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -311,8 +311,8 @@ GET/HEAD /v2/auth/roles
|
||||
"role": "etcd",
|
||||
"permissions": {
|
||||
"kv": {
|
||||
"read": ["*"],
|
||||
"write": ["*"]
|
||||
"read": ["/*"],
|
||||
"write": ["/*"]
|
||||
}
|
||||
}
|
||||
},
|
||||
@ -320,8 +320,8 @@ GET/HEAD /v2/auth/roles
|
||||
"role": "quay",
|
||||
"permissions": {
|
||||
"kv": {
|
||||
"read": ["*"],
|
||||
"write": ["*"]
|
||||
"read": ["/*"],
|
||||
"write": ["/*"]
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -393,7 +393,7 @@ PUT /v2/auth/roles/guest
|
||||
"revoke" : {
|
||||
"kv" : {
|
||||
"write": [
|
||||
"*"
|
||||
"/*"
|
||||
]
|
||||
}
|
||||
}
|
@ -18,7 +18,7 @@ The major flag changes are to mostly related to bootstrapping. The `initial-*` f
|
||||
- `-peer-election-timeout` is replaced by `-election-timeout`.
|
||||
|
||||
The documentation of new command line flags can be found at
|
||||
https://github.com/coreos/etcd/blob/master/Documentation/configuration.md.
|
||||
https://github.com/coreos/etcd/blob/master/Documentation/v2/configuration.md.
|
||||
|
||||
## Data Directory Naming
|
||||
|
18
Documentation/v2/benchmarks/README.md
Normal file
18
Documentation/v2/benchmarks/README.md
Normal file
@ -0,0 +1,18 @@
|
||||
# Benchmarks
|
||||
|
||||
etcd benchmarks will be published regularly and tracked for each release below:
|
||||
|
||||
- [etcd v2.1.0-alpha][2.1]
|
||||
- [etcd v2.2.0-rc][2.2]
|
||||
- [etcd v3 demo][3.0]
|
||||
|
||||
# Memory Usage Benchmarks
|
||||
|
||||
It records expected memory usage in different scenarios.
|
||||
|
||||
- [etcd v2.2.0-rc][2.2-mem]
|
||||
|
||||
[2.1]: etcd-2-1-0-alpha-benchmarks.md
|
||||
[2.2]: etcd-2-2-0-rc-benchmarks.md
|
||||
[2.2-mem]: etcd-2-2-0-rc-memory-benchmarks.md
|
||||
[3.0]: etcd-3-demo-benchmarks.md
|
52
Documentation/v2/benchmarks/etcd-2-1-0-alpha-benchmarks.md
Normal file
52
Documentation/v2/benchmarks/etcd-2-1-0-alpha-benchmarks.md
Normal file
@ -0,0 +1,52 @@
|
||||
## Physical machines
|
||||
|
||||
GCE n1-highcpu-2 machine type
|
||||
|
||||
- 1x dedicated local SSD mounted under /var/lib/etcd
|
||||
- 1x dedicated slow disk for the OS
|
||||
- 1.8 GB memory
|
||||
- 2x CPUs
|
||||
- etcd version 2.1.0 alpha
|
||||
|
||||
## etcd Cluster
|
||||
|
||||
3 etcd members, each runs on a single machine
|
||||
|
||||
## Testing
|
||||
|
||||
Bootstrap another machine and use the [boom HTTP benchmark tool][boom] to send requests to each etcd member. Check the [benchmark hacking guide][hack-benchmark] for detailed instructions.
|
||||
|
||||
## Performance
|
||||
|
||||
### reading one single key
|
||||
|
||||
| key size in bytes | number of clients | target etcd server | read QPS | 90th Percentile Latency (ms) |
|
||||
|-------------------|-------------------|--------------------|----------|---------------|
|
||||
| 64 | 1 | leader only | 1534 | 0.7 |
|
||||
| 64 | 64 | leader only | 10125 | 9.1 |
|
||||
| 64 | 256 | leader only | 13892 | 27.1 |
|
||||
| 256 | 1 | leader only | 1530 | 0.8 |
|
||||
| 256 | 64 | leader only | 10106 | 10.1 |
|
||||
| 256 | 256 | leader only | 14667 | 27.0 |
|
||||
| 64 | 64 | all servers | 24200 | 3.9 |
|
||||
| 64 | 256 | all servers | 33300 | 11.8 |
|
||||
| 256 | 64 | all servers | 24800 | 3.9 |
|
||||
| 256 | 256 | all servers | 33000 | 11.5 |
|
||||
|
||||
### writing one single key
|
||||
|
||||
| key size in bytes | number of clients | target etcd server | write QPS | 90th Percentile Latency (ms) |
|
||||
|-------------------|-------------------|--------------------|-----------|---------------|
|
||||
| 64 | 1 | leader only | 60 | 21.4 |
|
||||
| 64 | 64 | leader only | 1742 | 46.8 |
|
||||
| 64 | 256 | leader only | 3982 | 90.5 |
|
||||
| 256 | 1 | leader only | 58 | 20.3 |
|
||||
| 256 | 64 | leader only | 1770 | 47.8 |
|
||||
| 256 | 256 | leader only | 4157 | 105.3 |
|
||||
| 64 | 64 | all servers | 1028 | 123.4 |
|
||||
| 64 | 256 | all servers | 3260 | 123.8 |
|
||||
| 256 | 64 | all servers | 1033 | 121.5 |
|
||||
| 256 | 256 | all servers | 3061 | 119.3 |
|
||||
|
||||
[boom]: https://github.com/rakyll/boom
|
||||
[hack-benchmark]: /hack/benchmark/
|
69
Documentation/v2/benchmarks/etcd-2-2-0-benchmarks.md
Normal file
69
Documentation/v2/benchmarks/etcd-2-2-0-benchmarks.md
Normal file
@ -0,0 +1,69 @@
|
||||
# Benchmarking etcd v2.2.0
|
||||
|
||||
## Physical Machines
|
||||
|
||||
GCE n1-highcpu-2 machine type
|
||||
|
||||
- 1x dedicated local SSD mounted as etcd data directory
|
||||
- 1x dedicated slow disk for the OS
|
||||
- 1.8 GB memory
|
||||
- 2x CPUs
|
||||
|
||||
## etcd Cluster
|
||||
|
||||
3 etcd 2.2.0 members, each runs on a single machine.
|
||||
|
||||
Detailed versions:
|
||||
|
||||
```
|
||||
etcd Version: 2.2.0
|
||||
Git SHA: e4561dd
|
||||
Go Version: go1.5
|
||||
Go OS/Arch: linux/amd64
|
||||
```
|
||||
|
||||
## Testing
|
||||
|
||||
Bootstrap another machine, outside of the etcd cluster, and run the [`boom` HTTP benchmark tool](https://github.com/rakyll/boom) with a connection reuse patch to send requests to each etcd cluster member. See the [benchmark instructions](../../hack/benchmark/) for the patch and the steps to reproduce our procedures.
|
||||
|
||||
The performance is calulated through results of 100 benchmark rounds.
|
||||
|
||||
## Performance
|
||||
|
||||
### Single Key Read Performance
|
||||
|
||||
| key size in bytes | number of clients | target etcd server | average read QPS | read QPS stddev | average 90th Percentile Latency (ms) | latency stddev |
|
||||
|-------------------|-------------------|--------------------|------------------|-----------------|--------------------------------------|----------------|
|
||||
| 64 | 1 | leader only | 2303 | 200 | 0.49 | 0.06 |
|
||||
| 64 | 64 | leader only | 15048 | 685 | 7.60 | 0.46 |
|
||||
| 64 | 256 | leader only | 14508 | 434 | 29.76 | 1.05 |
|
||||
| 256 | 1 | leader only | 2162 | 214 | 0.52 | 0.06 |
|
||||
| 256 | 64 | leader only | 14789 | 792 | 7.69| 0.48 |
|
||||
| 256 | 256 | leader only | 14424 | 512 | 29.92 | 1.42 |
|
||||
| 64 | 64 | all servers | 45752 | 2048 | 2.47 | 0.14 |
|
||||
| 64 | 256 | all servers | 46592 | 1273 | 10.14 | 0.59 |
|
||||
| 256 | 64 | all servers | 45332 | 1847 | 2.48| 0.12 |
|
||||
| 256 | 256 | all servers | 46485 | 1340 | 10.18 | 0.74 |
|
||||
|
||||
### Single Key Write Performance
|
||||
|
||||
| key size in bytes | number of clients | target etcd server | average write QPS | write QPS stddev | average 90th Percentile Latency (ms) | latency stddev |
|
||||
|-------------------|-------------------|--------------------|------------------|-----------------|--------------------------------------|----------------|
|
||||
| 64 | 1 | leader only | 55 | 4 | 24.51 | 13.26 |
|
||||
| 64 | 64 | leader only | 2139 | 125 | 35.23 | 3.40 |
|
||||
| 64 | 256 | leader only | 4581 | 581 | 70.53 | 10.22 |
|
||||
| 256 | 1 | leader only | 56 | 4 | 22.37| 4.33 |
|
||||
| 256 | 64 | leader only | 2052 | 151 | 36.83 | 4.20 |
|
||||
| 256 | 256 | leader only | 4442 | 560 | 71.59 | 10.03 |
|
||||
| 64 | 64 | all servers | 1625 | 85 | 58.51 | 5.14 |
|
||||
| 64 | 256 | all servers | 4461 | 298 | 89.47 | 36.48 |
|
||||
| 256 | 64 | all servers | 1599 | 94 | 60.11| 6.43 |
|
||||
| 256 | 256 | all servers | 4315 | 193 | 88.98 | 7.01 |
|
||||
|
||||
## Performance Changes
|
||||
|
||||
- Because etcd now records metrics for each API call, read QPS performance seems to see a minor decrease in most scenarios. This minimal performance impact was judged a reasonable investment for the breadth of monitoring and debugging information returned.
|
||||
|
||||
- Write QPS to cluster leaders seems to be increased by a small margin. This is because the main loop and entry apply loops were decoupled in the etcd raft logic, eliminating several blocks between them.
|
||||
|
||||
- Write QPS to all members seems to be increased by a significant margin, because followers now receive the latest commit index sooner, and commit proposals more quickly.
|
72
Documentation/v2/benchmarks/etcd-2-2-0-rc-benchmarks.md
Normal file
72
Documentation/v2/benchmarks/etcd-2-2-0-rc-benchmarks.md
Normal file
@ -0,0 +1,72 @@
|
||||
## Physical machines
|
||||
|
||||
GCE n1-highcpu-2 machine type
|
||||
|
||||
- 1x dedicated local SSD mounted under /var/lib/etcd
|
||||
- 1x dedicated slow disk for the OS
|
||||
- 1.8 GB memory
|
||||
- 2x CPUs
|
||||
|
||||
## etcd Cluster
|
||||
|
||||
3 etcd 2.2.0-rc members, each runs on a single machine.
|
||||
|
||||
Detailed versions:
|
||||
|
||||
```
|
||||
etcd Version: 2.2.0-alpha.1+git
|
||||
Git SHA: 59a5a7e
|
||||
Go Version: go1.4.2
|
||||
Go OS/Arch: linux/amd64
|
||||
```
|
||||
|
||||
Also, we use 3 etcd 2.1.0 alpha-stage members to form cluster to get base performance. etcd's commit head is at [c7146bd5][c7146bd5], which is the same as the one that we use in [etcd 2.1 benchmark][etcd-2.1-benchmark].
|
||||
|
||||
## Testing
|
||||
|
||||
Bootstrap another machine and use the [boom HTTP benchmark tool][boom] to send requests to each etcd member. Check the [benchmark hacking guide][hack-benchmark] for detailed instructions.
|
||||
|
||||
## Performance
|
||||
|
||||
### reading one single key
|
||||
|
||||
| key size in bytes | number of clients | target etcd server | read QPS | 90th Percentile Latency (ms) |
|
||||
|-------------------|-------------------|--------------------|----------|---------------|
|
||||
| 64 | 1 | leader only | 2804 (-5%) | 0.4 (+0%) |
|
||||
| 64 | 64 | leader only | 17816 (+0%) | 5.7 (-6%) |
|
||||
| 64 | 256 | leader only | 18667 (-6%) | 20.4 (+2%) |
|
||||
| 256 | 1 | leader only | 2181 (-15%) | 0.5 (+25%) |
|
||||
| 256 | 64 | leader only | 17435 (-7%) | 6.0 (+9%) |
|
||||
| 256 | 256 | leader only | 18180 (-8%) | 21.3 (+3%) |
|
||||
| 64 | 64 | all servers | 46965 (-4%) | 2.1 (+0%) |
|
||||
| 64 | 256 | all servers | 55286 (-6%) | 7.4 (+6%) |
|
||||
| 256 | 64 | all servers | 46603 (-6%) | 2.1 (+5%) |
|
||||
| 256 | 256 | all servers | 55291 (-6%) | 7.3 (+4%) |
|
||||
|
||||
### writing one single key
|
||||
|
||||
| key size in bytes | number of clients | target etcd server | write QPS | 90th Percentile Latency (ms) |
|
||||
|-------------------|-------------------|--------------------|-----------|---------------|
|
||||
| 64 | 1 | leader only | 76 (+22%) | 19.4 (-15%) |
|
||||
| 64 | 64 | leader only | 2461 (+45%) | 31.8 (-32%) |
|
||||
| 64 | 256 | leader only | 4275 (+1%) | 69.6 (-10%) |
|
||||
| 256 | 1 | leader only | 64 (+20%) | 16.7 (-30%) |
|
||||
| 256 | 64 | leader only | 2385 (+30%) | 31.5 (-19%) |
|
||||
| 256 | 256 | leader only | 4353 (-3%) | 74.0 (+9%) |
|
||||
| 64 | 64 | all servers | 2005 (+81%) | 49.8 (-55%) |
|
||||
| 64 | 256 | all servers | 4868 (+35%) | 81.5 (-40%) |
|
||||
| 256 | 64 | all servers | 1925 (+72%) | 47.7 (-59%) |
|
||||
| 256 | 256 | all servers | 4975 (+36%) | 70.3 (-36%) |
|
||||
|
||||
### performance changes explanation
|
||||
|
||||
- read QPS in most scenarios is decreased by 5~8%. The reason is that etcd records store metrics for each store operation. The metrics is important for monitoring and debugging, so this is acceptable.
|
||||
|
||||
- write QPS to leader is increased by 20~30%. This is because we decouple raft main loop and entry apply loop, which avoids them blocking each other.
|
||||
|
||||
- write QPS to all servers is increased by 30~80% because follower could receive latest commit index earlier and commit proposals faster.
|
||||
|
||||
[boom]: https://github.com/rakyll/boom
|
||||
[c7146bd5]: https://github.com/coreos/etcd/commits/c7146bd5f2c73716091262edc638401bb8229144
|
||||
[etcd-2.1-benchmark]: etcd-2-1-0-alpha-benchmarks.md
|
||||
[hack-benchmark]: /hack/benchmark/
|
@ -0,0 +1,47 @@
|
||||
## Physical machine
|
||||
|
||||
GCE n1-standard-2 machine type
|
||||
|
||||
- 1x dedicated local SSD mounted under /var/lib/etcd
|
||||
- 1x dedicated slow disk for the OS
|
||||
- 7.5 GB memory
|
||||
- 2x CPUs
|
||||
|
||||
## etcd
|
||||
|
||||
```
|
||||
etcd Version: 2.2.0-rc.0+git
|
||||
Git SHA: 103cb5c
|
||||
Go Version: go1.5
|
||||
Go OS/Arch: linux/amd64
|
||||
```
|
||||
|
||||
## Testing
|
||||
|
||||
Start 3-member etcd cluster, each of which uses 2 cores.
|
||||
|
||||
The length of key name is always 64 bytes, which is a reasonable length of average key bytes.
|
||||
|
||||
## Memory Maximal Usage
|
||||
|
||||
- etcd may use maximal memory if one follower is dead and the leader keeps sending snapshots.
|
||||
- `max RSS` is the maximal memory usage recorded in 3 runs.
|
||||
|
||||
| value bytes | key number | data size(MB) | max RSS(MB) | max RSS/data rate on leader |
|
||||
|-------------|-------------|---------------|-------------|-----------------------------|
|
||||
| 128 | 50000 | 6 | 433 | 72x |
|
||||
| 128 | 100000 | 12 | 659 | 54x |
|
||||
| 128 | 200000 | 24 | 1466 | 61x |
|
||||
| 1024 | 50000 | 48 | 1253 | 26x |
|
||||
| 1024 | 100000 | 96 | 2344 | 24x |
|
||||
| 1024 | 200000 | 192 | 4361 | 22x |
|
||||
|
||||
## Data Size Threshold
|
||||
|
||||
- When etcd reaches data size threshold, it may trigger leader election easily and drop part of proposals.
|
||||
- At most cases, etcd cluster should work smoothly if it doesn't hit the threshold. If it doesn't work well due to insufficient resources, you need to decrease its data size.
|
||||
|
||||
| value bytes | key number limitation | suggested data size threshold(MB) | consumed RSS(MB) |
|
||||
|-------------|-----------------------|-----------------------------------|------------------|
|
||||
| 128 | 400K | 48 | 2400 |
|
||||
| 1024 | 300K | 292 | 6500 |
|
42
Documentation/v2/benchmarks/etcd-3-demo-benchmarks.md
Normal file
42
Documentation/v2/benchmarks/etcd-3-demo-benchmarks.md
Normal file
@ -0,0 +1,42 @@
|
||||
## Physical machines
|
||||
|
||||
GCE n1-highcpu-2 machine type
|
||||
|
||||
- 1x dedicated local SSD mounted under /var/lib/etcd
|
||||
- 1x dedicated slow disk for the OS
|
||||
- 1.8 GB memory
|
||||
- 2x CPUs
|
||||
- etcd version 2.2.0
|
||||
|
||||
## etcd Cluster
|
||||
|
||||
1 etcd member running in v3 demo mode
|
||||
|
||||
## Testing
|
||||
|
||||
Use [etcd v3 benchmark tool][etcd-v3-benchmark].
|
||||
|
||||
## Performance
|
||||
|
||||
### reading one single key
|
||||
|
||||
| key size in bytes | number of clients | read QPS | 90th Percentile Latency (ms) |
|
||||
|-------------------|-------------------|----------|---------------|
|
||||
| 256 | 1 | 2716 | 0.4 |
|
||||
| 256 | 64 | 16623 | 6.1 |
|
||||
| 256 | 256 | 16622 | 21.7 |
|
||||
|
||||
The performance is nearly the same as the one with empty server handler.
|
||||
|
||||
### reading one single key after putting
|
||||
|
||||
| key size in bytes | number of clients | read QPS | 90th Percentile Latency (ms) |
|
||||
|-------------------|-------------------|----------|---------------|
|
||||
| 256 | 1 | 2269 | 0.5 |
|
||||
| 256 | 64 | 13582 | 8.6 |
|
||||
| 256 | 256 | 13262 | 47.5 |
|
||||
|
||||
The performance with empty server handler is not affected by one put. So the
|
||||
performance downgrade should be caused by storage package.
|
||||
|
||||
[etcd-v3-benchmark]: /tools/benchmark/
|
77
Documentation/v2/benchmarks/etcd-3-watch-memory-benchmark.md
Normal file
77
Documentation/v2/benchmarks/etcd-3-watch-memory-benchmark.md
Normal file
@ -0,0 +1,77 @@
|
||||
# Watch Memory Usage Benchmark
|
||||
|
||||
*NOTE*: The watch features are under active development, and their memory usage may change as that development progresses. We do not expect it to significantly increase beyond the figures stated below.
|
||||
|
||||
A primary goal of etcd is supporting a very large number of watchers doing a massively large amount of watching. etcd aims to support O(10k) clients, O(100K) watch streams (O(10) streams per client) and O(10M) total watchings (O(100) watching per stream). The memory consumed by each individual watching accounts for the largest portion of etcd's overall usage, and is therefore the focus of current and future optimizations.
|
||||
|
||||
|
||||
Three related components of etcd watch consume physical memory: each `grpc.Conn`, each watch stream, and each instance of the watching activity. `grpc.Conn` maintains the actual TCP connection and other gRPC connection state. Each `grpc.Conn` consumes O(10kb) of memory, and might have multiple watch streams attached.
|
||||
|
||||
Each watch stream is an independent HTTP2 connection which consumes another O(10kb) of memory.
|
||||
Multiple watchings might share one watch stream.
|
||||
|
||||
Watching is the actual struct that tracks the changes on the key-value store. Each watching should only consume < O(1kb).
|
||||
|
||||
```
|
||||
+-------+
|
||||
| watch |
|
||||
+---------> | foo |
|
||||
| +-------+
|
||||
+------+-----+
|
||||
| stream |
|
||||
+--------------> | |
|
||||
| +------+-----+ +-------+
|
||||
| | | watch |
|
||||
| +---------> | bar |
|
||||
+-----+------+ +-------+
|
||||
| | +------------+
|
||||
| conn +-------> | stream |
|
||||
| | | |
|
||||
+-----+------+ +------------+
|
||||
|
|
||||
|
|
||||
|
|
||||
| +------------+
|
||||
+--------------> | stream |
|
||||
| |
|
||||
+------------+
|
||||
```
|
||||
|
||||
The theoretical memory consumption of watch can be approximated with the formula:
|
||||
`memory = c1 * number_of_conn + c2 * avg_number_of_stream_per_conn + c3 * avg_number_of_watch_stream`
|
||||
|
||||
## Testing Environment
|
||||
|
||||
etcd version
|
||||
- git head https://github.com/coreos/etcd/commit/185097ffaa627b909007e772c175e8fefac17af3
|
||||
|
||||
GCE n1-standard-2 machine type
|
||||
- 7.5 GB memory
|
||||
- 2x CPUs
|
||||
|
||||
## Overall memory usage
|
||||
|
||||
The overall memory usage captures how much [RSS][rss] etcd consumes with the client watchers. While the result may vary by as much as 10%, it is still meaningful, since the goal is to learn about the rough memory usage and the pattern of allocations.
|
||||
|
||||
With the benchmark result, we can calculate roughly that `c1 = 17kb`, `c2 = 18kb` and `c3 = 350bytes`. So each additional client connection consumes 17kb of memory and each additional stream consumes 18kb of memory, and each additional watching only cause 350bytes. A single etcd server can maintain millions of watchings with a few GB of memory in normal case.
|
||||
|
||||
|
||||
| clients | streams per client | watchings per stream | total watching | memory usage |
|
||||
|---------|---------|-----------|----------------|--------------|
|
||||
| 1k | 1 | 1 | 1k | 50MB |
|
||||
| 2k | 1 | 1 | 2k | 90MB |
|
||||
| 5k | 1 | 1 | 5k | 200MB |
|
||||
| 1k | 10 | 1 | 10k | 217MB |
|
||||
| 2k | 10 | 1 | 20k | 417MB |
|
||||
| 5k | 10 | 1 | 50k | 980MB |
|
||||
| 1k | 50 | 1 | 50k | 1001MB |
|
||||
| 2k | 50 | 1 | 100k | 1960MB |
|
||||
| 5k | 50 | 1 | 250k | 4700MB |
|
||||
| 1k | 50 | 10 | 500k | 1171MB |
|
||||
| 2k | 50 | 10 | 1M | 2371MB |
|
||||
| 5k | 50 | 10 | 2.5M | 5710MB |
|
||||
| 1k | 50 | 100 | 5M | 2380MB |
|
||||
| 2k | 50 | 100 | 10M | 4672MB |
|
||||
| 5k | 50 | 100 | 50M | *OOM* |
|
||||
|
||||
[rss]: https://en.wikipedia.org/wiki/Resident_set_size
|
98
Documentation/v2/benchmarks/etcd-storage-memory-benchmark.md
Normal file
98
Documentation/v2/benchmarks/etcd-storage-memory-benchmark.md
Normal file
@ -0,0 +1,98 @@
|
||||
# Storage Memory Usage Benchmark
|
||||
|
||||
<!---todo: link storage to storage design doc-->
|
||||
Two components of etcd storage consume physical memory. The etcd process allocates an *in-memory index* to speed key lookup. The process's *page cache*, managed by the operating system, stores recently-accessed data from disk for quick re-use.
|
||||
|
||||
The in-memory index holds all the keys in a [B-tree][btree] data structure, along with pointers to the on-disk data (the values). Each key in the B-tree may contain multiple pointers, pointing to different versions of its values. The theoretical memory consumption of the in-memory index can hence be approximated with the formula:
|
||||
|
||||
`N * (c1 + avg_key_size) + N * (avg_versions_of_key) * (c2 + size_of_pointer)`
|
||||
|
||||
where `c1` is the key metadata overhead and `c2` is the version metadata overhead.
|
||||
|
||||
The graph shows the detailed structure of the in-memory index B-tree.
|
||||
|
||||
```
|
||||
|
||||
|
||||
In mem index
|
||||
|
||||
+------------+
|
||||
| key || ... |
|
||||
+--------------+ | || |
|
||||
| | +------------+
|
||||
| | | v1 || ... |
|
||||
| disk <----------------| || | Tree Node
|
||||
| | +------------+
|
||||
| | | v2 || ... |
|
||||
| <----------------+ || |
|
||||
| | +------------+
|
||||
+--------------+ +-----+ | | |
|
||||
| | | | |
|
||||
| +------------+
|
||||
|
|
||||
|
|
||||
^
|
||||
------+
|
||||
| ... |
|
||||
| |
|
||||
+-----+
|
||||
| ... | Tree Node
|
||||
| |
|
||||
+-----+
|
||||
| ... |
|
||||
| |
|
||||
------+
|
||||
```
|
||||
|
||||
[Page cache memory][pagecache] is managed by the operating system and is not covered in detail in this document.
|
||||
|
||||
## Testing Environment
|
||||
|
||||
etcd version
|
||||
- git head https://github.com/coreos/etcd/commit/776e9fb7be7eee5e6b58ab977c8887b4fe4d48db
|
||||
|
||||
GCE n1-standard-2 machine type
|
||||
|
||||
- 7.5 GB memory
|
||||
- 2x CPUs
|
||||
|
||||
## In-memory index memory usage
|
||||
|
||||
In this test, we only benchmark the memory usage of the in-memory index. The goal is to find `c1` and `c2` mentioned above and to understand the hard limit of memory consumption of the storage.
|
||||
|
||||
We calculate the memory usage consumption via the Go runtime.ReadMemStats. We calculate the total allocated bytes difference before creating the index and after creating the index. It cannot perfectly reflect the memory usage of the in-memory index itself but can show the rough consumption pattern.
|
||||
|
||||
| N | versions | key size | memory usage |
|
||||
|------|----------|----------|--------------|
|
||||
| 100K | 1 | 64bytes | 22MB |
|
||||
| 100K | 5 | 64bytes | 39MB |
|
||||
| 1M | 1 | 64bytes | 218MB |
|
||||
| 1M | 5 | 64bytes | 432MB |
|
||||
| 100K | 1 | 256bytes | 41MB |
|
||||
| 100K | 5 | 256bytes | 65MB |
|
||||
| 1M | 1 | 256bytes | 409MB |
|
||||
| 1M | 5 | 256bytes | 506MB |
|
||||
|
||||
|
||||
Based on the result, we can calculate `c1=120bytes`, `c2=30bytes`. We only need two sets of data to calculate `c1` and `c2`, since they are the only unknown variable in the formula. The `c1=120bytes` and `c2=30bytes` are the average value of the 4 sets of `c1` and `c2` we calculated. The key metadata overhead is still relatively nontrivial (50%) for small key-value pairs. However, this is a significant improvement over the old store, which had at least 1000% overhead.
|
||||
|
||||
## Overall memory usage
|
||||
|
||||
The overall memory usage captures how much RSS etcd consumes with the storage. The value size should have very little impact on the overall memory usage of etcd, since we keep values on disk and only retain hot values in memory, managed by the OS page cache.
|
||||
|
||||
| N | versions | key size | value size | memory usage |
|
||||
|------|----------|----------|------------|--------------|
|
||||
| 100K | 1 | 64bytes | 256bytes | 40MB |
|
||||
| 100K | 5 | 64bytes | 256bytes | 89MB |
|
||||
| 1M | 1 | 64bytes | 256bytes | 470MB |
|
||||
| 1M | 5 | 64bytes | 256bytes | 880MB |
|
||||
| 100K | 1 | 64bytes | 1KB | 102MB |
|
||||
| 100K | 5 | 64bytes | 1KB | 164MB |
|
||||
| 1M | 1 | 64bytes | 1KB | 587MB |
|
||||
| 1M | 5 | 64bytes | 1KB | 836MB |
|
||||
|
||||
Based on the result, we know the value size does not significantly impact the memory consumption. There is some minor increase due to more data held in the OS page cache.
|
||||
|
||||
[btree]: https://en.wikipedia.org/wiki/B-tree
|
||||
[pagecache]: https://en.wikipedia.org/wiki/Page_cache
|
||||
|
26
Documentation/v2/branch_management.md
Normal file
26
Documentation/v2/branch_management.md
Normal file
@ -0,0 +1,26 @@
|
||||
# Branch Management
|
||||
|
||||
## Guide
|
||||
|
||||
* New development occurs on the [master branch][master].
|
||||
* Master branch should always have a green build!
|
||||
* Backwards-compatible bug fixes should target the master branch and subsequently be ported to stable branches.
|
||||
* Once the master branch is ready for release, it will be tagged and become the new stable branch.
|
||||
|
||||
The etcd team has adopted a *rolling release model* and supports one stable version of etcd.
|
||||
|
||||
### Master branch
|
||||
|
||||
The `master` branch is our development branch. All new features land here first.
|
||||
|
||||
If you want to try new features, pull `master` and play with it. Note that `master` may not be stable because new features may introduce bugs.
|
||||
|
||||
Before the release of the next stable version, feature PRs will be frozen. We will focus on the testing, bug-fix and documentation for one to two weeks.
|
||||
|
||||
### Stable branches
|
||||
|
||||
All branches with prefix `release-` are considered _stable_ branches.
|
||||
|
||||
After every minor release (http://semver.org/), we will have a new stable branch for that release. We will keep fixing the backwards-compatible bugs for the latest stable release, but not previous releases. The _patch_ release, incorporating any bug fixes, will be once every two weeks, given any patches.
|
||||
|
||||
[master]: https://github.com/coreos/etcd/tree/master
|
@ -309,6 +309,7 @@ infra0.example.com. 300 IN A 10.0.1.10
|
||||
infra1.example.com. 300 IN A 10.0.1.11
|
||||
infra2.example.com. 300 IN A 10.0.1.12
|
||||
```
|
||||
|
||||
#### Bootstrap the etcd cluster using DNS
|
||||
|
||||
etcd cluster members can listen on domain names or IP address, the bootstrap process will resolve DNS A records.
|
@ -39,7 +39,7 @@ To start etcd automatically using custom settings at startup in Linux, using a [
|
||||
+ env variable: ETCD_HEARTBEAT_INTERVAL
|
||||
|
||||
### --election-timeout
|
||||
+ Time (in milliseconds) for an election to timeout. See [Documentation/tuning.md](tuning.md#time-parameters) for details.
|
||||
+ Time (in milliseconds) for an election to timeout. See [tuning.md](tuning.md#time-parameters) for details.
|
||||
+ default: "1000"
|
||||
+ env variable: ETCD_ELECTION_TIMEOUT
|
||||
|
@ -6,11 +6,11 @@ The procedure includes some manual steps for sanity checking but it can probably
|
||||
|
||||
## Prepare Release
|
||||
|
||||
Set desired version as environment variable for following steps. Here is an example to release 2.3.0:
|
||||
Set desired version as environment variable for following steps. Here is an example to release 2.1.3:
|
||||
|
||||
```
|
||||
export VERSION=v2.3.0
|
||||
export PREV_VERSION=v2.2.5
|
||||
export VERSION=v2.1.3
|
||||
export PREV_VERSION=v2.1.2
|
||||
```
|
||||
|
||||
All releases version numbers follow the format of [semantic versioning 2.0.0](http://semver.org/).
|
||||
@ -30,6 +30,7 @@ All releases version numbers follow the format of [semantic versioning 2.0.0](ht
|
||||
|
||||
## Write Release Note
|
||||
|
||||
|
||||
- Write introduction for the new release. For example, what major bug we fix, what new features we introduce or what performance improvement we make.
|
||||
- Write changelog for the last release. ChangeLog should be straightforward and easy to understand for the end-user.
|
||||
- Put `[GH XXXX]` at the head of change line to reference Pull Request that introduces the change. Moreover, add a link on it to jump to the Pull Request.
|
||||
@ -60,14 +61,16 @@ It generates all release binaries and images under directory ./release.
|
||||
|
||||
## Sign Binaries and Images
|
||||
|
||||
etcd project key must be used to sign the generated binaries and images.`$SUBKEYID` is the key ID of etcd project Yubikey. Connect the key and run `gpg2 --card-status` to get the ID.
|
||||
Choose appropriate private key to sign the generated binaries and images.
|
||||
|
||||
The following commands are used for public release sign:
|
||||
|
||||
```
|
||||
cd release
|
||||
for i in etcd-*{.zip,.tar.gz}; do gpg2 --default-key $SUBKEYID --output ${i}.asc --detach-sign ${i}; done
|
||||
for i in etcd-*{.zip,.tar.gz}; do gpg2 --verify ${i}.asc ${i}; done
|
||||
# personal GPG is okay for now
|
||||
for i in etcd-*{.zip,.tar.gz}; do gpg --sign ${i}; done
|
||||
# use `CoreOS ACI Builder <release@coreos.com>` secret key
|
||||
gpg -u 88182190 -a --output etcd-${VERSION}-linux-amd64.aci.asc --detach-sig etcd-${VERSION}-linux-amd64.aci
|
||||
```
|
||||
|
||||
## Publish Release Page in GitHub
|
@ -1,83 +1,84 @@
|
||||
# FAQ
|
||||
## 1) How come I can read an old version of the data when a majority of the members are down?
|
||||
|
||||
## 1) Why can an etcd client read an old version of data when a majority of the etcd cluster members are down?
|
||||
|
||||
In situations where a client connects to a minority, etcd
|
||||
favors by default availability over consistency. This means that even though
|
||||
data might be “out of date”, it is still better to return something versus
|
||||
nothing.
|
||||
nothing.
|
||||
|
||||
In order to confirm that a read is up to date with a majority of the cluster,
|
||||
the client can use the `quorum=true` parameter on reads of keys. This means
|
||||
that a majority of the cluster is checked on reads before returning the data,
|
||||
otherwise the read will timeout and fail.
|
||||
|
||||
## 2) With quorum=false, doesn’t this mean that if my client switched the member it was connected to, that it could experience a logical ordering where the cluster goes backwards in time?
|
||||
## 2) With quorum=false, doesn’t this mean that if my client switched the member it was connected to, that it could experience a logical ordering where the cluster goes backwards in time?
|
||||
|
||||
Yes, but this could be handled at the etcd client implementation via
|
||||
remembering the last seen index. The “index” is the cluster's single
|
||||
irrevocable sequence of the entire modification history. The client could
|
||||
remember the last seen index, and determine via comparing the index returned on
|
||||
the GET whether or not the state of the key-value pair is before or after its
|
||||
last seen state.
|
||||
last seen state.
|
||||
|
||||
## 3) What happens if a watch is registered on a minority member?
|
||||
## 3) What happens if a watch is registered on a minority member?
|
||||
|
||||
The watch will stay untriggered, even as modifications are occurring in the
|
||||
majority quorum. This is an open issue, and is being addressed in v3. There are
|
||||
multiple ways to work around the watch trigger not firing.
|
||||
multiple ways to work around the watch trigger not firing.
|
||||
|
||||
1) build a signaling mechanism independent of etcd. This could be as simple as
|
||||
a “pulse” to the client to reissue a GET with quorum=true for the most recent
|
||||
version of the data.
|
||||
|
||||
2) poll on the `/v2/keys` endpoint and check that the raft-index is increasing every
|
||||
timeout.
|
||||
version of the data.
|
||||
|
||||
## 4) What is a proxy used for?
|
||||
2) poll on the `/v2/keys` endpoint and check that the raft-index is increasing every
|
||||
timeout.
|
||||
|
||||
## 4) What is a proxy used for?
|
||||
|
||||
A proxy is a redirection server to the etcd cluster. The proxy handles the
|
||||
redirection of a client to the current configuration of the etcd cluster. A
|
||||
typical use case is to start a proxy on a machine, and on first boot up of the
|
||||
proxy specify both the `--proxy` flag and the `--initial-cluster` flag.
|
||||
proxy specify both the `--proxy` flag and the `--initial-cluster` flag.
|
||||
|
||||
From there, any etcdctl client that starts up automatically speaks to the local
|
||||
proxy and the proxy redirects operations to the current configuration of the
|
||||
cluster it was originally paired with.
|
||||
cluster it was originally paired with.
|
||||
|
||||
In the v2 spec of etcd, proxies cannot be promoted to members of the cluster.
|
||||
They also cannot be promoted to followers or at any point become part of the
|
||||
replication of the etcd cluster itself.
|
||||
replication of the etcd cluster itself.
|
||||
|
||||
## 5) How is cluster membership and health handled in etcd v2?
|
||||
## 5) How is cluster membership and health handled in etcd v2?
|
||||
|
||||
The design goal of etcd is that reconfiguration is simply an API, and health
|
||||
monitoring and addition/removal of members is up to the individual application
|
||||
and their integration with the reconfiguration API.
|
||||
and their integration with the reconfiguration API.
|
||||
|
||||
Thus, a member that is down, even infinitely, will never be automatically
|
||||
removed from the etcd cluster member list.
|
||||
removed from the etcd cluster member list.
|
||||
|
||||
This makes sense because it's usually an application level / administrative
|
||||
action to determine whether a reconfiguration should happen based on health.
|
||||
action to determine whether a reconfiguration should happen based on health.
|
||||
|
||||
For more information, refer to the [runtime reconfiguration design document][runtime-reconf-design].
|
||||
|
||||
## 6) how does --endpoint work with etcdctl?
|
||||
## 6) how does --endpoint work with etcdctl?
|
||||
|
||||
The `--endpoint` flag can specify any number of etcd cluster members in a comma
|
||||
separated list. This list might be a subset, equal to, or more than the actual
|
||||
etcd cluster member list itself.
|
||||
etcd cluster member list itself.
|
||||
|
||||
If only one peer is specified via the `--endpoint` flag, the etcdctl discovers the
|
||||
rest of the cluster via the member list of that one peer, and then it randomly
|
||||
chooses a member to use. Again, the client can use the `quorum=true` flag on
|
||||
reads, which will always fail when using a member in the minority.
|
||||
reads, which will always fail when using a member in the minority.
|
||||
|
||||
If peers from multiple clusters are specified via the `--endpoint` flag, etcdctl
|
||||
will randomly choose a peer, and the request will simply get routed to one of
|
||||
the clusters. This is probably not what you want.
|
||||
the clusters. This is probably not what you want.
|
||||
|
||||
Note: --peers flag is now deprecated and --endpoint should be used instead,
|
||||
Note: --peers flag is now deprecated and --endpoint should be used instead,
|
||||
as it might confuse users to give etcdctl a peerURL.
|
||||
|
||||
[runtime-reconf-design]: runtime-reconf-design.md
|
35
Documentation/v2/glossary.md
Normal file
35
Documentation/v2/glossary.md
Normal file
@ -0,0 +1,35 @@
|
||||
# Glossary
|
||||
|
||||
This document defines the various terms used in etcd documentation, command line and source code.
|
||||
|
||||
## Node
|
||||
|
||||
Node is an instance of raft state machine.
|
||||
|
||||
It has a unique identification, and records other nodes' progress internally when it is the leader.
|
||||
|
||||
## Member
|
||||
|
||||
Member is an instance of etcd. It hosts a node, and provides service to clients.
|
||||
|
||||
## Cluster
|
||||
|
||||
Cluster consists of several members.
|
||||
|
||||
The node in each member follows raft consensus protocol to replicate logs. Cluster receives proposals from members, commits them and apply to local store.
|
||||
|
||||
## Peer
|
||||
|
||||
Peer is another member of the same cluster.
|
||||
|
||||
## Proposal
|
||||
|
||||
A proposal is a request (for example a write request, a configuration change request) that needs to go through raft protocol.
|
||||
|
||||
## Client
|
||||
|
||||
Client is a caller of the cluster's HTTP API.
|
||||
|
||||
## Machine (deprecated)
|
||||
|
||||
The alternative of Member in etcd before 2.0
|
124
Documentation/v2/libraries-and-tools.md
Normal file
124
Documentation/v2/libraries-and-tools.md
Normal file
@ -0,0 +1,124 @@
|
||||
# Libraries and Tools
|
||||
|
||||
**Tools**
|
||||
|
||||
- [etcdctl](https://github.com/coreos/etcd/tree/master/etcdctl) - A command line client for etcd
|
||||
- [etcd-backup](https://github.com/fanhattan/etcd-backup) - A powerful command line utility for dumping/restoring etcd - Supports v2
|
||||
- [etcd-dump](https://npmjs.org/package/etcd-dump) - Command line utility for dumping/restoring etcd.
|
||||
- [etcd-fs](https://github.com/xetorthio/etcd-fs) - FUSE filesystem for etcd
|
||||
- [etcddir](https://github.com/rekby/etcddir) - Realtime sync etcd and local directory. Work with windows and linux.
|
||||
- [etcd-browser](https://github.com/henszey/etcd-browser) - A web-based key/value editor for etcd using AngularJS
|
||||
- [etcd-lock](https://github.com/datawisesystems/etcd-lock) - Master election & distributed r/w lock implementation using etcd - Supports v2
|
||||
- [etcd-console](https://github.com/matishsiao/etcd-console) - A web-base key/value editor for etcd using PHP
|
||||
- [etcd-viewer](https://github.com/nikfoundas/etcd-viewer) - An etcd key-value store editor/viewer written in Java
|
||||
- [etcdtool](https://github.com/mickep76/etcdtool) - Export/Import/Edit etcd directory as JSON/YAML/TOML and Validate directory using JSON schema
|
||||
- [etcd-rest](https://github.com/mickep76/etcd-rest) - Create generic REST API in Go using etcd as a backend with validation using JSON schema
|
||||
- [etcdsh](https://github.com/kamilhark/etcdsh) - A command line client with support of command history and tab completion. Supports v2
|
||||
|
||||
**Go libraries**
|
||||
|
||||
- [etcd/client](https://github.com/coreos/etcd/blob/master/client) - the officially maintained Go client
|
||||
- [go-etcd](https://github.com/coreos/go-etcd) - the deprecated official client. May be useful for older (<2.0.0) versions of etcd.
|
||||
|
||||
**Java libraries**
|
||||
|
||||
- [boonproject/etcd](https://github.com/boonproject/boon/blob/master/etcd/README.md) - Supports v2, Async/Sync and waits
|
||||
- [justinsb/jetcd](https://github.com/justinsb/jetcd)
|
||||
- [diwakergupta/jetcd](https://github.com/diwakergupta/jetcd) - Supports v2
|
||||
- [jurmous/etcd4j](https://github.com/jurmous/etcd4j) - Supports v2, Async/Sync, waits and SSL
|
||||
- [AdoHe/etcd4j](http://github.com/AdoHe/etcd4j) - Supports v2 (enhance for real production cluster)
|
||||
|
||||
**Python libraries**
|
||||
|
||||
- [jplana/python-etcd](https://github.com/jplana/python-etcd) - Supports v2
|
||||
- [russellhaering/txetcd](https://github.com/russellhaering/txetcd) - a Twisted Python library
|
||||
- [cholcombe973/autodock](https://github.com/cholcombe973/autodock) - A docker deployment automation tool
|
||||
- [lisael/aioetcd](https://github.com/lisael/aioetcd) - (Python 3.4+) Asyncio coroutines client (Supports v2)
|
||||
|
||||
**Node libraries**
|
||||
|
||||
- [stianeikeland/node-etcd](https://github.com/stianeikeland/node-etcd) - Supports v2 (w Coffeescript)
|
||||
- [lavagetto/nodejs-etcd](https://github.com/lavagetto/nodejs-etcd) - Supports v2
|
||||
- [deedubs/node-etcd-config](https://github.com/deedubs/node-etcd-config) - Supports v2
|
||||
|
||||
**Ruby libraries**
|
||||
|
||||
- [iconara/etcd-rb](https://github.com/iconara/etcd-rb)
|
||||
- [jpfuentes2/etcd-ruby](https://github.com/jpfuentes2/etcd-ruby)
|
||||
- [ranjib/etcd-ruby](https://github.com/ranjib/etcd-ruby) - Supports v2
|
||||
|
||||
**C libraries**
|
||||
|
||||
- [jdarcy/etcd-api](https://github.com/jdarcy/etcd-api) - Supports v2
|
||||
- [shafreeck/cetcd](https://github.com/shafreeck/cetcd) - Supports v2
|
||||
|
||||
**C++ libraries**
|
||||
- [edwardcapriolo/etcdcpp](https://github.com/edwardcapriolo/etcdcpp) - Supports v2
|
||||
- [suryanathan/etcdcpp](https://github.com/suryanathan/etcdcpp) - Supports v2 (with waits)
|
||||
|
||||
**Clojure libraries**
|
||||
|
||||
- [aterreno/etcd-clojure](https://github.com/aterreno/etcd-clojure)
|
||||
- [dwwoelfel/cetcd](https://github.com/dwwoelfel/cetcd) - Supports v2
|
||||
- [rthomas/clj-etcd](https://github.com/rthomas/clj-etcd) - Supports v2
|
||||
|
||||
**Erlang libraries**
|
||||
|
||||
- [marshall-lee/etcd.erl](https://github.com/marshall-lee/etcd.erl)
|
||||
|
||||
**.Net Libraries**
|
||||
|
||||
- [wangjia184/etcdnet](https://github.com/wangjia184/etcdnet) - Supports v2
|
||||
- [drusellers/etcetera](https://github.com/drusellers/etcetera)
|
||||
|
||||
**PHP Libraries**
|
||||
|
||||
- [linkorb/etcd-php](https://github.com/linkorb/etcd-php)
|
||||
|
||||
**Haskell libraries**
|
||||
|
||||
- [wereHamster/etcd-hs](https://github.com/wereHamster/etcd-hs)
|
||||
|
||||
**R libraries**
|
||||
|
||||
- [ropensci/etseed](https://github.com/ropensci/etseed)
|
||||
|
||||
**Tcl libraries**
|
||||
|
||||
- [efrecon/etcd-tcl](https://github.com/efrecon/etcd-tcl) - Supports v2, except wait.
|
||||
|
||||
**Chef Integration**
|
||||
|
||||
- [coderanger/etcd-chef](https://github.com/coderanger/etcd-chef)
|
||||
|
||||
**Chef Cookbook**
|
||||
|
||||
- [spheromak/etcd-cookbook](https://github.com/spheromak/etcd-cookbook)
|
||||
|
||||
**BOSH Releases**
|
||||
|
||||
- [cloudfoundry-community/etcd-boshrelease](https://github.com/cloudfoundry-community/etcd-boshrelease)
|
||||
- [cloudfoundry/cf-release](https://github.com/cloudfoundry/cf-release/tree/master/jobs/etcd)
|
||||
|
||||
**Projects using etcd**
|
||||
|
||||
- [binocarlos/yoda](https://github.com/binocarlos/yoda) - etcd + ZeroMQ
|
||||
- [calavera/active-proxy](https://github.com/calavera/active-proxy) - HTTP Proxy configured with etcd
|
||||
- [derekchiang/etcdplus](https://github.com/derekchiang/etcdplus) - A set of distributed synchronization primitives built upon etcd
|
||||
- [go-discover](https://github.com/flynn/go-discover) - service discovery in Go
|
||||
- [gleicon/goreman](https://github.com/gleicon/goreman/tree/etcd) - Branch of the Go Foreman clone with etcd support
|
||||
- [garethr/hiera-etcd](https://github.com/garethr/hiera-etcd) - Puppet hiera backend using etcd
|
||||
- [mattn/etcd-vim](https://github.com/mattn/etcd-vim) - SET and GET keys from inside vim
|
||||
- [mattn/etcdenv](https://github.com/mattn/etcdenv) - "env" shebang with etcd integration
|
||||
- [kelseyhightower/confd](https://github.com/kelseyhightower/confd) - Manage local app config files using templates and data from etcd
|
||||
- [configdb](https://git.autistici.org/ai/configdb/tree/master) - A REST relational abstraction on top of arbitrary database backends, aimed at storing configs and inventories.
|
||||
- [scrz](https://github.com/scrz/scrz) - Container manager, stores configuration in etcd.
|
||||
- [fleet](https://github.com/coreos/fleet) - Distributed init system
|
||||
- [kubernetes/kubernetes](https://github.com/kubernetes/kubernetes) - Container cluster manager introduced by Google.
|
||||
- [mailgun/vulcand](https://github.com/mailgun/vulcand) - HTTP proxy that uses etcd as a configuration backend.
|
||||
- [duedil-ltd/discodns](https://github.com/duedil-ltd/discodns) - Simple DNS nameserver using etcd as a database for names and records.
|
||||
- [skynetservices/skydns](https://github.com/skynetservices/skydns) - RFC compliant DNS server
|
||||
- [xordataexchange/crypt](https://github.com/xordataexchange/crypt) - Securely store values in etcd using GPG encryption
|
||||
- [spf13/viper](https://github.com/spf13/viper) - Go configuration library, reads values from ENV, pflags, files, and etcd with optional encryption
|
||||
- [lytics/metafora](https://github.com/lytics/metafora) - Go distributed task library
|
||||
- [ryandoyle/nss-etcd](https://github.com/ryandoyle/nss-etcd) - A GNU libc NSS module for resolving names from etcd.
|
143
Documentation/v2/metrics.md
Normal file
143
Documentation/v2/metrics.md
Normal file
@ -0,0 +1,143 @@
|
||||
# Metrics
|
||||
|
||||
etcd uses [Prometheus][prometheus] for metrics reporting. The metrics can be used for real-time monitoring and debugging. etcd does not persist its metrics; if a member restarts, the metrics will be reset.
|
||||
|
||||
The simplest way to see the available metrics is to cURL the metrics endpoint `/metrics`. The format is described [here](http://prometheus.io/docs/instrumenting/exposition_formats/).
|
||||
|
||||
Follow the [Prometheus getting started doc][prometheus-getting-started] to spin up a Prometheus server to collect etcd metrics.
|
||||
|
||||
The naming of metrics follows the suggested [Prometheus best practices][prometheus-naming]. A metric name has an `etcd` or `etcd_debugging` prefix as its namespace and a subsystem prefix (for example `wal` and `etcdserver`).
|
||||
|
||||
## etcd namespace metrics
|
||||
|
||||
The metrics under the `etcd` prefix are for monitoring and alerting. They are stable high level metrics. If there is any change of these metrics, it will be included in release notes.
|
||||
|
||||
### http requests
|
||||
|
||||
These metrics describe the serving of requests (non-watch events) served by etcd members in non-proxy mode: total
|
||||
incoming requests, request failures and processing latency (inc. raft rounds for storage). They are useful for tracking
|
||||
user-generated traffic hitting the etcd cluster .
|
||||
|
||||
All these metrics are prefixed with `etcd_http_`
|
||||
|
||||
| Name | Description | Type |
|
||||
|--------------------------------|-----------------------------------------------------------------------------------------|--------------------|
|
||||
| received_total | Total number of events after parsing and auth. | Counter(method) |
|
||||
| failed_total | Total number of failed events. | Counter(method,error) |
|
||||
| successful_duration_seconds | Bucketed handling times of the requests, including raft rounds for writes. | Histogram(method) |
|
||||
|
||||
|
||||
Example Prometheus queries that may be useful from these metrics (across all etcd members):
|
||||
|
||||
* `sum(rate(etcd_http_failed_total{job="etcd"}[1m]) by (method) / sum(rate(etcd_http_events_received_total{job="etcd"})[1m]) by (method)`
|
||||
|
||||
Shows the fraction of events that failed by HTTP method across all members, across a time window of `1m`.
|
||||
|
||||
* `sum(rate(etcd_http_received_total{job="etcd",method="GET})[1m]) by (method)`
|
||||
`sum(rate(etcd_http_received_total{job="etcd",method~="GET})[1m]) by (method)`
|
||||
|
||||
Shows the rate of successful readonly/write queries across all servers, across a time window of `1m`.
|
||||
|
||||
* `histogram_quantile(0.9, sum(rate(etcd_http_successful_duration_seconds{job="etcd",method="GET"}[5m]) ) by (le))`
|
||||
`histogram_quantile(0.9, sum(rate(etcd_http_successful_duration_seconds{job="etcd",method!="GET"}[5m]) ) by (le))`
|
||||
|
||||
Show the 0.90-tile latency (in seconds) of read/write (respectively) event handling across all members, with a window of `5m`.
|
||||
|
||||
### proxy
|
||||
|
||||
etcd members operating in proxy mode do not directly perform store operations. They forward all requests to cluster instances.
|
||||
|
||||
Tracking the rate of requests coming from a proxy allows one to pin down which machine is performing most reads/writes.
|
||||
|
||||
All these metrics are prefixed with `etcd_proxy_`
|
||||
|
||||
| Name | Description | Type |
|
||||
|---------------------------|-----------------------------------------------------------------------------------------|--------------------|
|
||||
| requests_total | Total number of requests by this proxy instance. | Counter(method) |
|
||||
| handled_total | Total number of fully handled requests, with responses from etcd members. | Counter(method) |
|
||||
| dropped_total | Total number of dropped requests due to forwarding errors to etcd members. | Counter(method,error) |
|
||||
| handling_duration_seconds | Bucketed handling times by HTTP method, including round trip to member instances. | Histogram(method) |
|
||||
|
||||
Example Prometheus queries that may be useful from these metrics (across all etcd servers):
|
||||
|
||||
* `sum(rate(etcd_proxy_handled_total{job="etcd"}[1m])) by (method)`
|
||||
|
||||
Rate of requests (by HTTP method) handled by all proxies, across a window of `1m`.
|
||||
|
||||
* `histogram_quantile(0.9, sum(rate(handling_duration_seconds{job="etcd",method="GET"}[5m])) by (le))`
|
||||
`histogram_quantile(0.9, sum(rate(handling_duration_seconds{job="etcd",method!="GET"}[5m])) by (le))`
|
||||
|
||||
Show the 0.90-tile latency (in seconds) of handling of user requests across all proxy machines, with a window of `5m`.
|
||||
|
||||
* `sum(rate(etcd_proxy_dropped_total{job="etcd"}[1m])) by (proxying_error)`
|
||||
|
||||
Number of failed request on the proxy. This should be 0, spikes here indicate connectivity issues to the etcd cluster.
|
||||
|
||||
## etcd_debugging namespace metrics
|
||||
|
||||
The metrics under the `etcd_debugging` prefix are for debugging. They are very implementation dependent and volatile. They might be changed or removed without any warning in new etcd releases. Some of the metrics might be moved to the `etcd` prefix when they become more stable.
|
||||
|
||||
### etcdserver
|
||||
|
||||
| Name | Description | Type |
|
||||
|-----------------------------------------|--------------------------------------------------|-----------|
|
||||
| proposal_duration_seconds | The latency distributions of committing proposal | Histogram |
|
||||
| proposals_pending | The current number of pending proposals | Gauge |
|
||||
| proposals_failed_total | The total number of failed proposals | Counter |
|
||||
|
||||
[Proposal][glossary-proposal] duration (`proposal_duration_seconds`) provides a proposal commit latency histogram. The reported latency reflects network and disk IO delays in etcd.
|
||||
|
||||
Proposals pending (`proposals_pending`) indicates how many proposals are queued for commit. Rising pending proposals suggests there is a high client load or the cluster is unstable.
|
||||
|
||||
Failed proposals (`proposals_failed_total`) are normally related to two issues: temporary failures related to a leader election or longer duration downtime caused by a loss of quorum in the cluster.
|
||||
|
||||
### wal
|
||||
|
||||
| Name | Description | Type |
|
||||
|------------------------------------|--------------------------------------------------|-----------|
|
||||
| fsync_duration_seconds | The latency distributions of fsync called by wal | Histogram |
|
||||
| last_index_saved | The index of the last entry saved by wal | Gauge |
|
||||
|
||||
Abnormally high fsync duration (`fsync_duration_seconds`) indicates disk issues and might cause the cluster to be unstable.
|
||||
|
||||
### snapshot
|
||||
|
||||
| Name | Description | Type |
|
||||
|--------------------------------------------|------------------------------------------------------------|-----------|
|
||||
| snapshot_save_total_duration_seconds | The total latency distributions of save called by snapshot | Histogram |
|
||||
|
||||
Abnormally high snapshot duration (`snapshot_save_total_duration_seconds`) indicates disk issues and might cause the cluster to be unstable.
|
||||
|
||||
### rafthttp
|
||||
|
||||
| Name | Description | Type | Labels |
|
||||
|-----------------------------------|--------------------------------------------|--------------|--------------------------------|
|
||||
| message_sent_latency_seconds | The latency distributions of messages sent | HistogramVec | sendingType, msgType, remoteID |
|
||||
| message_sent_failed_total | The total number of failed messages sent | Summary | sendingType, msgType, remoteID |
|
||||
|
||||
|
||||
Abnormally high message duration (`message_sent_latency_seconds`) indicates network issues and might cause the cluster to be unstable.
|
||||
|
||||
An increase in message failures (`message_sent_failed_total`) indicates more severe network issues and might cause the cluster to be unstable.
|
||||
|
||||
Label `sendingType` is the connection type to send messages. `message`, `msgapp` and `msgappv2` use HTTP streaming, while `pipeline` does HTTP request for each message.
|
||||
|
||||
Label `msgType` is the type of raft message. `MsgApp` is log replication messages; `MsgSnap` is snapshot install messages; `MsgProp` is proposal forward messages; the others maintain internal raft status. Given large snapshots, a lengthy msgSnap transmission latency should be expected. For other types of messages, given enough network bandwidth, latencies comparable to ping latency should be expected.
|
||||
|
||||
Label `remoteID` is the member ID of the message destination.
|
||||
|
||||
## Prometheus supplied metrics
|
||||
|
||||
The Prometheus client library provides a number of metrics under the `go` and `process` namespaces. There are a few that are particlarly interesting.
|
||||
|
||||
| Name | Description | Type |
|
||||
|-----------------------------------|--------------------------------------------|--------------|
|
||||
| process_open_fds | Number of open file descriptors. | Gauge |
|
||||
| process_max_fds | Maximum number of open file descriptors. | Gauge |
|
||||
|
||||
Heavy file descriptor (`process_open_fds`) usage (i.e., near the process's file descriptor limit, `process_max_fds`) indicates a potential file descriptor exhaustion issue. If the file descriptors are exhausted, etcd may panic because it cannot create new WAL files.
|
||||
|
||||
[glossary-proposal]: glossary.md#proposal
|
||||
[prometheus]: http://prometheus.io/
|
||||
[prometheus-getting-started]: http://prometheus.io/docs/introduction/getting_started/
|
||||
[prometheus-naming]: http://prometheus.io/docs/practices/naming/
|
62
Documentation/v2/platforms/freebsd.md
Normal file
62
Documentation/v2/platforms/freebsd.md
Normal file
@ -0,0 +1,62 @@
|
||||
# FreeBSD
|
||||
|
||||
Starting with version 0.1.2 both etcd and etcdctl have been ported to FreeBSD and can
|
||||
be installed either via packages or ports system. Their versions have been recently
|
||||
updated to 0.2.0 so now you can enjoy using etcd and etcdctl on FreeBSD 10.0 (RC4 as
|
||||
of now) and 9.x where they have been tested. They might also work when installed from
|
||||
ports on earlier versions of FreeBSD, but your mileage may vary.
|
||||
|
||||
## Installation
|
||||
|
||||
### Using pkgng package system
|
||||
|
||||
1. If you do not have pkgng installed, install it with command `pkg` and answering 'Y'
|
||||
when asked
|
||||
|
||||
2. Update your repository data with `pkg update`
|
||||
|
||||
3. Install etcd with `pkg install coreos-etcd coreos-etcdctl`
|
||||
|
||||
4. Verify successful installation with `pkg info | grep etcd` and you should get:
|
||||
|
||||
```
|
||||
r@fbsd10:/ # pkg info | grep etcd
|
||||
coreosetcd0.2.0 Highlyavailable key value store and service discovery
|
||||
coreosetcdctl0.2.0 Simple commandline client for etcd
|
||||
r@fbsd10:/ #
|
||||
```
|
||||
|
||||
5. You’re ready to use etcd and etcdctl! For more information about using pkgng, please
|
||||
see: http://www.freebsd.org/doc/handbook/pkgngintro.html
|
||||
|
||||
### Using ports system
|
||||
|
||||
1. If you do not have ports installed, install with with `portsnap fetch extract` (it
|
||||
may take some time depending on your hardware and network connection)
|
||||
|
||||
2. Build etcd with `cd /usr/ports/devel/etcd && make install clean`, you
|
||||
will get an option to build and install documentation and etcdctl with it.
|
||||
|
||||
3. If you haven't installed it with etcdctl, and you would like to install it later, you can build it
|
||||
with `cd /usr/ports/devel/etcdctl && make install clean`
|
||||
|
||||
4. Verify successful installation with `pkg info | grep etcd` and you should get:
|
||||
|
||||
|
||||
```
|
||||
r@fbsd10:/ # pkg info | grep etcd
|
||||
coreosetcd0.2.0 Highlyavailable key value store and service discovery
|
||||
coreosetcdctl0.2.0 Simple commandline client for etcd
|
||||
r@fbsd10:/ #
|
||||
```
|
||||
|
||||
5. You’re ready to use etcd and etcdctl! For more information about using ports system,
|
||||
please see: https://www.freebsd.org/doc/handbook/portsusing.html
|
||||
|
||||
## Issues
|
||||
|
||||
If you find any issues with the build/install procedure or you've found a problem that
|
||||
you've verified is local to FreeBSD version only (for example, by not being able to
|
||||
reproduce it on any other platform, like OSX or Linux), please sent a
|
||||
problem report using this page for more
|
||||
information: http://www.freebsd.org/sendpr.html
|
51
Documentation/v2/production-users.md
Normal file
51
Documentation/v2/production-users.md
Normal file
@ -0,0 +1,51 @@
|
||||
# Production Users
|
||||
|
||||
This document tracks people and use cases for etcd in production. By creating a list of production use cases we hope to build a community of advisors that we can reach out to with experience using various etcd applications, operation environments, and cluster sizes. The etcd development team may reach out periodically to check-in on your experience and update this list.
|
||||
|
||||
## discovery.etcd.io
|
||||
|
||||
- *Application*: https://github.com/coreos/discovery.etcd.io
|
||||
- *Launched*: Feb. 2014
|
||||
- *Cluster Size*: 5 members, 5 discovery proxies
|
||||
- *Order of Data Size*: 100s of Megabytes
|
||||
- *Operator*: CoreOS, brandon.philips@coreos.com
|
||||
- *Environment*: AWS
|
||||
- *Backups*: Periodic async to S3
|
||||
|
||||
discovery.etcd.io is the longest continuously running etcd backed service that we know about. It is the basis of automatic cluster bootstrap and was launched in Feb. 2014: https://coreos.com/blog/etcd-0.3.0-released/.
|
||||
|
||||
## OpenTable
|
||||
|
||||
- *Application*: OpenTable internal service discovery and cluster configuration management
|
||||
- *Launched*: May 2014
|
||||
- *Cluster Size*: 3 members each in 6 independent clusters; approximately 50 nodes reading / writing
|
||||
- *Order of Data Size*: 10s of MB
|
||||
- *Operator*: OpenTable, Inc; sschlansker@opentable.com
|
||||
- *Environment*: AWS, VMWare
|
||||
- *Backups*: None, all data can be re-created if necessary.
|
||||
|
||||
## cycoresys.com
|
||||
|
||||
- *Application*: multiple
|
||||
- *Launched*: Jul. 2014
|
||||
- *Cluster Size*: 3 members, _n_ proxies
|
||||
- *Order of Data Size*: 100s of kilobytes
|
||||
- *Operator*: CyCore Systems, Inc, sys@cycoresys.com
|
||||
- *Environment*: Baremetal
|
||||
- *Backups*: Periodic sync to Ceph RadosGW and DigitalOcean VM
|
||||
|
||||
CyCore Systems provides architecture and engineering for computing systems. This cluster provides microservices, virtual machines, databases, storage clusters to a number of clients. It is built on CoreOS machines, with each machine in the cluster running etcd as a peer or proxy.
|
||||
|
||||
## Radius Intelligence
|
||||
|
||||
- *Application*: multiple internal tools, Kubernetes clusters, bootstrappable system configs
|
||||
- *Launched*: June 2015
|
||||
- *Cluster Size*: 2 clusters of 5 and 3 members; approximately a dozen nodes read/write
|
||||
- *Order of Data Size*: 100s of kilobytes
|
||||
- *Operator*: Radius Intelligence; jcderr@radius.com
|
||||
- *Environment*: AWS, CoreOS, Kubernetes
|
||||
- *Backups*: None, all data can be recreated if necessary.
|
||||
|
||||
Radius Intelligence uses Kubernetes running CoreOS to containerize and scale internal toolsets. Examples include running [JetBrains TeamCity][teamcity] and internal AWS security and cost reporting tools. etcd clusters back these clusters as well as provide some basic environment bootstrapping configuration keys.
|
||||
|
||||
[teamcity]: https://www.jetbrains.com/teamcity/
|
@ -49,7 +49,7 @@ To start a proxy that will connect to a statically defined etcd cluster, specify
|
||||
|
||||
```
|
||||
etcd --proxy on \
|
||||
--listen-client-urls http://127.0.0.1:8080 \
|
||||
--listen-client-urls http://127.0.0.1:2379 \
|
||||
--initial-cluster infra0=http://10.0.1.10:2380,infra1=http://10.0.1.11:2380,infra2=http://10.0.1.12:2380
|
||||
```
|
||||
|
||||
@ -60,7 +60,7 @@ To start a proxy using the discovery service, specify the `discovery` flag. The
|
||||
|
||||
```
|
||||
etcd --proxy on \
|
||||
--listen-client-urls http://127.0.0.1:8080 \
|
||||
--listen-client-urls http://127.0.0.1:2379 \
|
||||
--discovery https://discovery.etcd.io/3e86b59982e49066c5d813af1c2e2579cbf573de \
|
||||
```
|
||||
|
45
Documentation/v2/reporting_bugs.md
Normal file
45
Documentation/v2/reporting_bugs.md
Normal file
@ -0,0 +1,45 @@
|
||||
# Reporting Bugs
|
||||
|
||||
If you find bugs or documentation mistakes in the etcd project, please let us know by [opening an issue][issue]. We treat bugs and mistakes very seriously and believe no issue is too small. Before creating a bug report, please check that an issue reporting the same problem does not already exist.
|
||||
|
||||
To make your bug report accurate and easy to understand, please try to create bug reports that are:
|
||||
|
||||
- Specific. Include as much details as possible: which version, what environment, what configuration, etc. You can also attach etcd log (the starting log with etcd configuration is especially important).
|
||||
|
||||
- Reproducible. Include the steps to reproduce the problem. We understand some issues might be hard to reproduce, please includes the steps that might lead to the problem. You can also attach the affected etcd data dir and stack strace to the bug report.
|
||||
|
||||
- Isolated. Please try to isolate and reproduce the bug with minimum dependencies. It would significantly slow down the speed to fix a bug if too many dependencies are involved in a bug report. Debugging external systems that rely on etcd is out of scope, but we are happy to point you in the right direction or help you interact with etcd in the correct manner.
|
||||
|
||||
- Unique. Do not duplicate existing bug report.
|
||||
|
||||
- Scoped. One bug per report. Do not follow up with another bug inside one report.
|
||||
|
||||
You might also want to read [Elika Etemad’s article on filing good bug reports][filing-good-bugs] before creating a bug report.
|
||||
|
||||
We might ask you for further information to locate a bug. A duplicated bug report will be closed.
|
||||
|
||||
## Frequently Asked Questions
|
||||
|
||||
### How to get a stack trace
|
||||
|
||||
``` bash
|
||||
$ kill -QUIT $PID
|
||||
```
|
||||
|
||||
### How to get etcd version
|
||||
|
||||
``` bash
|
||||
$ etcd --version
|
||||
```
|
||||
|
||||
### How to get etcd configuration and log when it runs as systemd service ‘etcd2.service’
|
||||
|
||||
``` bash
|
||||
$ sudo systemctl cat etcd2
|
||||
$ sudo journalctl -u etcd2
|
||||
```
|
||||
|
||||
Due to an upstream systemd bug, journald may miss the last few log lines when its process exit. If journalctl tells you that etcd stops without fatal or panic message, you could try `sudo journalctl -f -t etcd2` to get full log.
|
||||
|
||||
[etcd-issue]: https://github.com/coreos/etcd/issues/new
|
||||
[filing-good-bugs]: http://fantasai.inkedblade.net/style/talks/filing-good-bugs/
|
211
Documentation/v2/rfc/v3api.md
Normal file
211
Documentation/v2/rfc/v3api.md
Normal file
@ -0,0 +1,211 @@
|
||||
# Overview
|
||||
|
||||
The etcd v3 API is designed to give users a more efficient and cleaner abstraction compared to etcd v2. There are a number of semantic and protocol changes in this new API. For an overview [see Xiang Li's video](https://youtu.be/J5AioGtEPeQ?t=211).
|
||||
|
||||
To prove out the design of the v3 API the team has also built [a number of example recipes](https://github.com/coreos/etcd/tree/master/contrib/recipes), there is a [video discussing these recipes too](https://www.youtube.com/watch?v=fj-2RY-3yVU&feature=youtu.be&t=590).
|
||||
|
||||
# Design
|
||||
|
||||
1. Flatten binary key-value space
|
||||
|
||||
2. Keep the event history until compaction
|
||||
- access to old version of keys
|
||||
- user controlled history compaction
|
||||
|
||||
3. Support range query
|
||||
- Pagination support with limit argument
|
||||
- Support consistency guarantee across multiple range queries
|
||||
|
||||
4. Replace TTL key with Lease
|
||||
- more efficient/ low cost keep alive
|
||||
- a logical group of TTL keys
|
||||
|
||||
5. Replace CAS/CAD with multi-object Txn
|
||||
- MUCH MORE powerful and flexible
|
||||
|
||||
6. Support efficient watching with multiple ranges
|
||||
|
||||
7. RPC API supports the completed set of APIs.
|
||||
- more efficient than JSON/HTTP
|
||||
- additional txn/lease support
|
||||
|
||||
8. HTTP API supports a subset of APIs.
|
||||
- easy for people to try out etcd
|
||||
- easy for people to write simple etcd application
|
||||
|
||||
|
||||
## Notes
|
||||
|
||||
### Request Size Limitation
|
||||
|
||||
The max request size is around 1MB. Since etcd replicates requests in a streaming fashion, a very large
|
||||
request might block other requests for a long time. The use case for etcd is to store small configuration
|
||||
values, so we prevent user from submitting large requests. This also applies to Txn requests. We might loosen
|
||||
the size in the future a little bit or make it configurable.
|
||||
|
||||
## Protobuf Defined API
|
||||
|
||||
[api protobuf][api-protobuf]
|
||||
|
||||
[kv protobuf][kv-protobuf]
|
||||
|
||||
## Examples
|
||||
|
||||
### Put a key (foo=bar)
|
||||
```
|
||||
// A put is always successful
|
||||
Put( PutRequest { key = foo, value = bar } )
|
||||
|
||||
PutResponse {
|
||||
cluster_id = 0x1000,
|
||||
member_id = 0x1,
|
||||
revision = 1,
|
||||
raft_term = 0x1,
|
||||
}
|
||||
```
|
||||
|
||||
### Get a key (assume we have foo=bar)
|
||||
```
|
||||
Get ( RangeRequest { key = foo } )
|
||||
|
||||
RangeResponse {
|
||||
cluster_id = 0x1000,
|
||||
member_id = 0x1,
|
||||
revision = 1,
|
||||
raft_term = 0x1,
|
||||
kvs = {
|
||||
{
|
||||
key = foo,
|
||||
value = bar,
|
||||
create_revision = 1,
|
||||
mod_revision = 1,
|
||||
version = 1;
|
||||
},
|
||||
},
|
||||
}
|
||||
```
|
||||
|
||||
### Range over a key space (assume we have foo0=bar0… foo100=bar100)
|
||||
```
|
||||
Range ( RangeRequest { key = foo, end_key = foo80, limit = 30 } )
|
||||
|
||||
RangeResponse {
|
||||
cluster_id = 0x1000,
|
||||
member_id = 0x1,
|
||||
revision = 100,
|
||||
raft_term = 0x1,
|
||||
kvs = {
|
||||
{
|
||||
key = foo0,
|
||||
value = bar0,
|
||||
create_revision = 1,
|
||||
mod_revision = 1,
|
||||
version = 1;
|
||||
},
|
||||
...,
|
||||
{
|
||||
key = foo30,
|
||||
value = bar30,
|
||||
create_revision = 30,
|
||||
mod_revision = 30,
|
||||
version = 1;
|
||||
},
|
||||
},
|
||||
}
|
||||
```
|
||||
|
||||
### Finish a txn (assume we have foo0=bar0, foo1=bar1)
|
||||
```
|
||||
Txn(TxnRequest {
|
||||
// mod_revision of foo0 is equal to 1, mod_revision of foo1 is greater than 1
|
||||
compare = {
|
||||
{compareType = equal, key = foo0, mod_revision = 1},
|
||||
{compareType = greater, key = foo1, mod_revision = 1}}
|
||||
},
|
||||
// if the comparison succeeds, put foo2 = bar2
|
||||
success = {PutRequest { key = foo2, value = success }},
|
||||
// if the comparison fails, put foo2=fail
|
||||
failure = {PutRequest { key = foo2, value = failure }},
|
||||
)
|
||||
|
||||
TxnResponse {
|
||||
cluster_id = 0x1000,
|
||||
member_id = 0x1,
|
||||
revision = 3,
|
||||
raft_term = 0x1,
|
||||
succeeded = true,
|
||||
responses = {
|
||||
// response of PUT foo2=success
|
||||
{
|
||||
cluster_id = 0x1000,
|
||||
member_id = 0x1,
|
||||
revision = 3,
|
||||
raft_term = 0x1,
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Watch on a key/range
|
||||
|
||||
```
|
||||
Watch( WatchRequest{
|
||||
key = foo,
|
||||
end_key = fop, // prefix foo
|
||||
start_revision = 20,
|
||||
end_revision = 10000,
|
||||
// server decided notification frequency
|
||||
progress_notification = true,
|
||||
}
|
||||
… // this can be a watch request stream
|
||||
)
|
||||
|
||||
// put (foo0=bar0) event at 3
|
||||
WatchResponse {
|
||||
cluster_id = 0x1000,
|
||||
member_id = 0x1,
|
||||
revision = 3,
|
||||
raft_term = 0x1,
|
||||
event_type = put,
|
||||
kv = {
|
||||
key = foo0,
|
||||
value = bar0,
|
||||
create_revision = 1,
|
||||
mod_revision = 1,
|
||||
version = 1;
|
||||
},
|
||||
}
|
||||
…
|
||||
|
||||
// a notification at 2000
|
||||
WatchResponse {
|
||||
cluster_id = 0x1000,
|
||||
member_id = 0x1,
|
||||
revision = 2000,
|
||||
raft_term = 0x1,
|
||||
// nil event as notification
|
||||
}
|
||||
|
||||
…
|
||||
|
||||
// put (foo0=bar3000) event at 3000
|
||||
WatchResponse {
|
||||
cluster_id = 0x1000,
|
||||
member_id = 0x1,
|
||||
revision = 3000,
|
||||
raft_term = 0x1,
|
||||
event_type = put,
|
||||
kv = {
|
||||
key = foo0,
|
||||
value = bar3000,
|
||||
create_revision = 1,
|
||||
mod_revision = 3000,
|
||||
version = 2;
|
||||
},
|
||||
}
|
||||
…
|
||||
|
||||
```
|
||||
|
||||
[api-protobuf]: https://github.com/coreos/etcd/blob/master/etcdserver/etcdserverpb/rpc.proto
|
||||
[kv-protobuf]: https://github.com/coreos/etcd/blob/master/storage/storagepb/kv.proto
|
75
Documentation/v2/tuning.md
Normal file
75
Documentation/v2/tuning.md
Normal file
@ -0,0 +1,75 @@
|
||||
# Tuning
|
||||
|
||||
The default settings in etcd should work well for installations on a local network where the average network latency is low.
|
||||
However, when using etcd across multiple data centers or over networks with high latency you may need to tweak the heartbeat interval and election timeout settings.
|
||||
|
||||
The network isn't the only source of latency. Each request and response may be impacted by slow disks on both the leader and follower. Each of these timeouts represents the total time from request to successful response from the other machine.
|
||||
|
||||
## Time Parameters
|
||||
|
||||
The underlying distributed consensus protocol relies on two separate time parameters to ensure that nodes can handoff leadership if one stalls or goes offline.
|
||||
The first parameter is called the *Heartbeat Interval*.
|
||||
This is the frequency with which the leader will notify followers that it is still the leader.
|
||||
For best practices, the parameter should be set around round-trip time between members.
|
||||
By default, etcd uses a `100ms` heartbeat interval.
|
||||
|
||||
The second parameter is the *Election Timeout*.
|
||||
This timeout is how long a follower node will go without hearing a heartbeat before attempting to become leader itself.
|
||||
By default, etcd uses a `1000ms` election timeout.
|
||||
|
||||
Adjusting these values is a trade off.
|
||||
The value of heartbeat interval is recommended to be around the maximum of average round-trip time (RTT) between members, normally around 0.5-1.5x the round-trip time.
|
||||
If heartbeat interval is too low, etcd will send unnecessary messages that increase the usage of CPU and network resources.
|
||||
On the other side, a too high heartbeat interval leads to high election timeout. Higher election timeout takes longer time to detect a leader failure.
|
||||
The easiest way to measure round-trip time (RTT) is to use [PING utility][ping].
|
||||
|
||||
The election timeout should be set based on the heartbeat interval and average round-trip time between members.
|
||||
Election timeouts must be at least 10 times the round-trip time so it can account for variance in your network.
|
||||
For example, if the round-trip time between your members is 10ms then you should have at least a 100ms election timeout.
|
||||
|
||||
You should also set your election timeout to at least 5 to 10 times your heartbeat interval to account for variance in leader replication.
|
||||
For a heartbeat interval of 50ms you should set your election timeout to at least 250ms - 500ms.
|
||||
|
||||
The upper limit of election timeout is 50000ms (50s), which should only be used when deploying a globally-distributed etcd cluster.
|
||||
A reasonable round-trip time for the continental United States is 130ms, and the time between US and Japan is around 350-400ms.
|
||||
If your network has uneven performance or regular packet delays/loss then it is possible that a couple of retries may be necessary to successfully send a packet. So 5s is a safe upper limit of global round-trip time.
|
||||
As the election timeout should be an order of magnitude bigger than broadcast time, in the case of ~5s for a globally distributed cluster, then 50 seconds becomes a reasonable maximum.
|
||||
|
||||
The heartbeat interval and election timeout value should be the same for all members in one cluster. Setting different values for etcd members may disrupt cluster stability.
|
||||
|
||||
You can override the default values on the command line:
|
||||
|
||||
```sh
|
||||
# Command line arguments:
|
||||
$ etcd -heartbeat-interval=100 -election-timeout=500
|
||||
|
||||
# Environment variables:
|
||||
$ ETCD_HEARTBEAT_INTERVAL=100 ETCD_ELECTION_TIMEOUT=500 etcd
|
||||
```
|
||||
|
||||
The values are specified in milliseconds.
|
||||
|
||||
## Snapshots
|
||||
|
||||
etcd appends all key changes to a log file.
|
||||
This log grows forever and is a complete linear history of every change made to the keys.
|
||||
A complete history works well for lightly used clusters but clusters that are heavily used would carry around a large log.
|
||||
|
||||
To avoid having a huge log etcd makes periodic snapshots.
|
||||
These snapshots provide a way for etcd to compact the log by saving the current state of the system and removing old logs.
|
||||
|
||||
### Snapshot Tuning
|
||||
|
||||
Creating snapshots can be expensive so they're only created after a given number of changes to etcd.
|
||||
By default, snapshots will be made after every 10,000 changes.
|
||||
If etcd's memory usage and disk usage are too high, you can lower the snapshot threshold by setting the following on the command line:
|
||||
|
||||
```sh
|
||||
# Command line arguments:
|
||||
$ etcd -snapshot-count=5000
|
||||
|
||||
# Environment variables:
|
||||
$ ETCD_SNAPSHOT_COUNT=5000 etcd
|
||||
```
|
||||
|
||||
[ping]: https://en.wikipedia.org/wiki/Ping_(networking_utility)
|
2
Godeps/_workspace/.gitignore
generated
vendored
2
Godeps/_workspace/.gitignore
generated
vendored
@ -1,2 +0,0 @@
|
||||
/pkg
|
||||
/bin
|
33
Godeps/_workspace/src/bitbucket.org/ww/goautoneg/autoneg_test.go
generated
vendored
33
Godeps/_workspace/src/bitbucket.org/ww/goautoneg/autoneg_test.go
generated
vendored
@ -1,33 +0,0 @@
|
||||
package goautoneg
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
var chrome = "application/xml,application/xhtml+xml,text/html;q=0.9,text/plain;q=0.8,image/png,*/*;q=0.5"
|
||||
|
||||
func TestParseAccept(t *testing.T) {
|
||||
alternatives := []string{"text/html", "image/png"}
|
||||
content_type := Negotiate(chrome, alternatives)
|
||||
if content_type != "image/png" {
|
||||
t.Errorf("got %s expected image/png", content_type)
|
||||
}
|
||||
|
||||
alternatives = []string{"text/html", "text/plain", "text/n3"}
|
||||
content_type = Negotiate(chrome, alternatives)
|
||||
if content_type != "text/html" {
|
||||
t.Errorf("got %s expected text/html", content_type)
|
||||
}
|
||||
|
||||
alternatives = []string{"text/n3", "text/plain"}
|
||||
content_type = Negotiate(chrome, alternatives)
|
||||
if content_type != "text/plain" {
|
||||
t.Errorf("got %s expected text/plain", content_type)
|
||||
}
|
||||
|
||||
alternatives = []string{"text/n3", "application/rdf+xml"}
|
||||
content_type = Negotiate(chrome, alternatives)
|
||||
if content_type != "text/n3" {
|
||||
t.Errorf("got %s expected text/n3", content_type)
|
||||
}
|
||||
}
|
247
Godeps/_workspace/src/github.com/akrennmair/gopcap/decode_test.go
generated
vendored
247
Godeps/_workspace/src/github.com/akrennmair/gopcap/decode_test.go
generated
vendored
@ -1,247 +0,0 @@
|
||||
package pcap
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
var testSimpleTcpPacket *Packet = &Packet{
|
||||
Data: []byte{
|
||||
0x00, 0x00, 0x0c, 0x9f, 0xf0, 0x20, 0xbc, 0x30, 0x5b, 0xe8, 0xd3, 0x49,
|
||||
0x08, 0x00, 0x45, 0x00, 0x01, 0xa4, 0x39, 0xdf, 0x40, 0x00, 0x40, 0x06,
|
||||
0x55, 0x5a, 0xac, 0x11, 0x51, 0x49, 0xad, 0xde, 0xfe, 0xe1, 0xc5, 0xf7,
|
||||
0x00, 0x50, 0xc5, 0x7e, 0x0e, 0x48, 0x49, 0x07, 0x42, 0x32, 0x80, 0x18,
|
||||
0x00, 0x73, 0xab, 0xb1, 0x00, 0x00, 0x01, 0x01, 0x08, 0x0a, 0x03, 0x77,
|
||||
0x37, 0x9c, 0x42, 0x77, 0x5e, 0x3a, 0x47, 0x45, 0x54, 0x20, 0x2f, 0x20,
|
||||
0x48, 0x54, 0x54, 0x50, 0x2f, 0x31, 0x2e, 0x31, 0x0d, 0x0a, 0x48, 0x6f,
|
||||
0x73, 0x74, 0x3a, 0x20, 0x77, 0x77, 0x77, 0x2e, 0x66, 0x69, 0x73, 0x68,
|
||||
0x2e, 0x63, 0x6f, 0x6d, 0x0d, 0x0a, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63,
|
||||
0x74, 0x69, 0x6f, 0x6e, 0x3a, 0x20, 0x6b, 0x65, 0x65, 0x70, 0x2d, 0x61,
|
||||
0x6c, 0x69, 0x76, 0x65, 0x0d, 0x0a, 0x55, 0x73, 0x65, 0x72, 0x2d, 0x41,
|
||||
0x67, 0x65, 0x6e, 0x74, 0x3a, 0x20, 0x4d, 0x6f, 0x7a, 0x69, 0x6c, 0x6c,
|
||||
0x61, 0x2f, 0x35, 0x2e, 0x30, 0x20, 0x28, 0x58, 0x31, 0x31, 0x3b, 0x20,
|
||||
0x4c, 0x69, 0x6e, 0x75, 0x78, 0x20, 0x78, 0x38, 0x36, 0x5f, 0x36, 0x34,
|
||||
0x29, 0x20, 0x41, 0x70, 0x70, 0x6c, 0x65, 0x57, 0x65, 0x62, 0x4b, 0x69,
|
||||
0x74, 0x2f, 0x35, 0x33, 0x35, 0x2e, 0x32, 0x20, 0x28, 0x4b, 0x48, 0x54,
|
||||
0x4d, 0x4c, 0x2c, 0x20, 0x6c, 0x69, 0x6b, 0x65, 0x20, 0x47, 0x65, 0x63,
|
||||
0x6b, 0x6f, 0x29, 0x20, 0x43, 0x68, 0x72, 0x6f, 0x6d, 0x65, 0x2f, 0x31,
|
||||
0x35, 0x2e, 0x30, 0x2e, 0x38, 0x37, 0x34, 0x2e, 0x31, 0x32, 0x31, 0x20,
|
||||
0x53, 0x61, 0x66, 0x61, 0x72, 0x69, 0x2f, 0x35, 0x33, 0x35, 0x2e, 0x32,
|
||||
0x0d, 0x0a, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x3a, 0x20, 0x74, 0x65,
|
||||
0x78, 0x74, 0x2f, 0x68, 0x74, 0x6d, 0x6c, 0x2c, 0x61, 0x70, 0x70, 0x6c,
|
||||
0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x78, 0x68, 0x74, 0x6d,
|
||||
0x6c, 0x2b, 0x78, 0x6d, 0x6c, 0x2c, 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63,
|
||||
0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x78, 0x6d, 0x6c, 0x3b, 0x71, 0x3d,
|
||||
0x30, 0x2e, 0x39, 0x2c, 0x2a, 0x2f, 0x2a, 0x3b, 0x71, 0x3d, 0x30, 0x2e,
|
||||
0x38, 0x0d, 0x0a, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x2d, 0x45, 0x6e,
|
||||
0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x3a, 0x20, 0x67, 0x7a, 0x69, 0x70,
|
||||
0x2c, 0x64, 0x65, 0x66, 0x6c, 0x61, 0x74, 0x65, 0x2c, 0x73, 0x64, 0x63,
|
||||
0x68, 0x0d, 0x0a, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x2d, 0x4c, 0x61,
|
||||
0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x3a, 0x20, 0x65, 0x6e, 0x2d, 0x55,
|
||||
0x53, 0x2c, 0x65, 0x6e, 0x3b, 0x71, 0x3d, 0x30, 0x2e, 0x38, 0x0d, 0x0a,
|
||||
0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x2d, 0x43, 0x68, 0x61, 0x72, 0x73,
|
||||
0x65, 0x74, 0x3a, 0x20, 0x49, 0x53, 0x4f, 0x2d, 0x38, 0x38, 0x35, 0x39,
|
||||
0x2d, 0x31, 0x2c, 0x75, 0x74, 0x66, 0x2d, 0x38, 0x3b, 0x71, 0x3d, 0x30,
|
||||
0x2e, 0x37, 0x2c, 0x2a, 0x3b, 0x71, 0x3d, 0x30, 0x2e, 0x33, 0x0d, 0x0a,
|
||||
0x0d, 0x0a,
|
||||
}}
|
||||
|
||||
func BenchmarkDecodeSimpleTcpPacket(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
testSimpleTcpPacket.Decode()
|
||||
}
|
||||
}
|
||||
|
||||
func TestDecodeSimpleTcpPacket(t *testing.T) {
|
||||
p := testSimpleTcpPacket
|
||||
p.Decode()
|
||||
if p.DestMac != 0x00000c9ff020 {
|
||||
t.Error("Dest mac", p.DestMac)
|
||||
}
|
||||
if p.SrcMac != 0xbc305be8d349 {
|
||||
t.Error("Src mac", p.SrcMac)
|
||||
}
|
||||
if len(p.Headers) != 2 {
|
||||
t.Error("Incorrect number of headers", len(p.Headers))
|
||||
return
|
||||
}
|
||||
if ip, ipOk := p.Headers[0].(*Iphdr); ipOk {
|
||||
if ip.Version != 4 {
|
||||
t.Error("ip Version", ip.Version)
|
||||
}
|
||||
if ip.Ihl != 5 {
|
||||
t.Error("ip header length", ip.Ihl)
|
||||
}
|
||||
if ip.Tos != 0 {
|
||||
t.Error("ip TOS", ip.Tos)
|
||||
}
|
||||
if ip.Length != 420 {
|
||||
t.Error("ip Length", ip.Length)
|
||||
}
|
||||
if ip.Id != 14815 {
|
||||
t.Error("ip ID", ip.Id)
|
||||
}
|
||||
if ip.Flags != 0x02 {
|
||||
t.Error("ip Flags", ip.Flags)
|
||||
}
|
||||
if ip.FragOffset != 0 {
|
||||
t.Error("ip Fragoffset", ip.FragOffset)
|
||||
}
|
||||
if ip.Ttl != 64 {
|
||||
t.Error("ip TTL", ip.Ttl)
|
||||
}
|
||||
if ip.Protocol != 6 {
|
||||
t.Error("ip Protocol", ip.Protocol)
|
||||
}
|
||||
if ip.Checksum != 0x555A {
|
||||
t.Error("ip Checksum", ip.Checksum)
|
||||
}
|
||||
if !bytes.Equal(ip.SrcIp, []byte{172, 17, 81, 73}) {
|
||||
t.Error("ip Src", ip.SrcIp)
|
||||
}
|
||||
if !bytes.Equal(ip.DestIp, []byte{173, 222, 254, 225}) {
|
||||
t.Error("ip Dest", ip.DestIp)
|
||||
}
|
||||
if tcp, tcpOk := p.Headers[1].(*Tcphdr); tcpOk {
|
||||
if tcp.SrcPort != 50679 {
|
||||
t.Error("tcp srcport", tcp.SrcPort)
|
||||
}
|
||||
if tcp.DestPort != 80 {
|
||||
t.Error("tcp destport", tcp.DestPort)
|
||||
}
|
||||
if tcp.Seq != 0xc57e0e48 {
|
||||
t.Error("tcp seq", tcp.Seq)
|
||||
}
|
||||
if tcp.Ack != 0x49074232 {
|
||||
t.Error("tcp ack", tcp.Ack)
|
||||
}
|
||||
if tcp.DataOffset != 8 {
|
||||
t.Error("tcp dataoffset", tcp.DataOffset)
|
||||
}
|
||||
if tcp.Flags != 0x18 {
|
||||
t.Error("tcp flags", tcp.Flags)
|
||||
}
|
||||
if tcp.Window != 0x73 {
|
||||
t.Error("tcp window", tcp.Window)
|
||||
}
|
||||
if tcp.Checksum != 0xabb1 {
|
||||
t.Error("tcp checksum", tcp.Checksum)
|
||||
}
|
||||
if tcp.Urgent != 0 {
|
||||
t.Error("tcp urgent", tcp.Urgent)
|
||||
}
|
||||
} else {
|
||||
t.Error("Second header is not TCP header")
|
||||
}
|
||||
} else {
|
||||
t.Error("First header is not IP header")
|
||||
}
|
||||
if string(p.Payload) != "GET / HTTP/1.1\r\nHost: www.fish.com\r\nConnection: keep-alive\r\nUser-Agent: Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.2 (KHTML, like Gecko) Chrome/15.0.874.121 Safari/535.2\r\nAccept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\r\nAccept-Encoding: gzip,deflate,sdch\r\nAccept-Language: en-US,en;q=0.8\r\nAccept-Charset: ISO-8859-1,utf-8;q=0.7,*;q=0.3\r\n\r\n" {
|
||||
t.Error("--- PAYLOAD STRING ---\n", string(p.Payload), "\n--- PAYLOAD BYTES ---\n", p.Payload)
|
||||
}
|
||||
}
|
||||
|
||||
// Makes sure packet payload doesn't display the 6 trailing null of this packet
|
||||
// as part of the payload. They're actually the ethernet trailer.
|
||||
func TestDecodeSmallTcpPacketHasEmptyPayload(t *testing.T) {
|
||||
p := &Packet{
|
||||
// This packet is only 54 bits (an empty TCP RST), thus 6 trailing null
|
||||
// bytes are added by the ethernet layer to make it the minimum packet size.
|
||||
Data: []byte{
|
||||
0xbc, 0x30, 0x5b, 0xe8, 0xd3, 0x49, 0xb8, 0xac, 0x6f, 0x92, 0xd5, 0xbf,
|
||||
0x08, 0x00, 0x45, 0x00, 0x00, 0x28, 0x00, 0x00, 0x40, 0x00, 0x40, 0x06,
|
||||
0x3f, 0x9f, 0xac, 0x11, 0x51, 0xc5, 0xac, 0x11, 0x51, 0x49, 0x00, 0x63,
|
||||
0x9a, 0xef, 0x00, 0x00, 0x00, 0x00, 0x2e, 0xc1, 0x27, 0x83, 0x50, 0x14,
|
||||
0x00, 0x00, 0xc3, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
}}
|
||||
p.Decode()
|
||||
if p.Payload == nil {
|
||||
t.Error("Nil payload")
|
||||
}
|
||||
if len(p.Payload) != 0 {
|
||||
t.Error("Non-empty payload:", p.Payload)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDecodeVlanPacket(t *testing.T) {
|
||||
p := &Packet{
|
||||
Data: []byte{
|
||||
0x00, 0x10, 0xdb, 0xff, 0x10, 0x00, 0x00, 0x15, 0x2c, 0x9d, 0xcc, 0x00, 0x81, 0x00, 0x01, 0xf7,
|
||||
0x08, 0x00, 0x45, 0x00, 0x00, 0x28, 0x29, 0x8d, 0x40, 0x00, 0x7d, 0x06, 0x83, 0xa0, 0xac, 0x1b,
|
||||
0xca, 0x8e, 0x45, 0x16, 0x94, 0xe2, 0xd4, 0x0a, 0x00, 0x50, 0xdf, 0xab, 0x9c, 0xc6, 0xcd, 0x1e,
|
||||
0xe5, 0xd1, 0x50, 0x10, 0x01, 0x00, 0x5a, 0x74, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
}}
|
||||
p.Decode()
|
||||
if p.Type != TYPE_VLAN {
|
||||
t.Error("Didn't detect vlan")
|
||||
}
|
||||
if len(p.Headers) != 3 {
|
||||
t.Error("Incorrect number of headers:", len(p.Headers))
|
||||
for i, h := range p.Headers {
|
||||
t.Errorf("Header %d: %#v", i, h)
|
||||
}
|
||||
t.FailNow()
|
||||
}
|
||||
if _, ok := p.Headers[0].(*Vlanhdr); !ok {
|
||||
t.Errorf("First header isn't vlan: %q", p.Headers[0])
|
||||
}
|
||||
if _, ok := p.Headers[1].(*Iphdr); !ok {
|
||||
t.Errorf("Second header isn't IP: %q", p.Headers[1])
|
||||
}
|
||||
if _, ok := p.Headers[2].(*Tcphdr); !ok {
|
||||
t.Errorf("Third header isn't TCP: %q", p.Headers[2])
|
||||
}
|
||||
}
|
||||
|
||||
func TestDecodeFuzzFallout(t *testing.T) {
|
||||
testData := []struct {
|
||||
Data []byte
|
||||
}{
|
||||
{[]byte("000000000000\x81\x000")},
|
||||
{[]byte("000000000000\x81\x00000")},
|
||||
{[]byte("000000000000\x86\xdd0")},
|
||||
{[]byte("000000000000\b\x000")},
|
||||
{[]byte("000000000000\b\x060")},
|
||||
{[]byte{}},
|
||||
{[]byte("000000000000\b\x0600000000")},
|
||||
{[]byte("000000000000\x86\xdd000000\x01000000000000000000000000000000000")},
|
||||
{[]byte("000000000000\x81\x0000\b\x0600000000")},
|
||||
{[]byte("000000000000\b\x00n0000000000000000000")},
|
||||
{[]byte("000000000000\x86\xdd000000\x0100000000000000000000000000000000000")},
|
||||
{[]byte("000000000000\x81\x0000\b\x00g0000000000000000000")},
|
||||
//{[]byte()},
|
||||
{[]byte("000000000000\b\x00400000000\x110000000000")},
|
||||
{[]byte("0nMء\xfe\x13\x13\x81\x00gr\b\x00&x\xc9\xe5b'\x1e0\x00\x04\x00\x0020596224")},
|
||||
{[]byte("000000000000\x81\x0000\b\x00400000000\x110000000000")},
|
||||
{[]byte("000000000000\b\x00000000000\x0600\xff0000000")},
|
||||
{[]byte("000000000000\x86\xdd000000\x06000000000000000000000000000000000")},
|
||||
{[]byte("000000000000\x81\x0000\b\x00000000000\x0600b0000000")},
|
||||
{[]byte("000000000000\x81\x0000\b\x00400000000\x060000000000")},
|
||||
{[]byte("000000000000\x86\xdd000000\x11000000000000000000000000000000000")},
|
||||
{[]byte("000000000000\x86\xdd000000\x0600000000000000000000000000000000000000000000M")},
|
||||
{[]byte("000000000000\b\x00500000000\x0600000000000")},
|
||||
{[]byte("0nM\xd80\xfe\x13\x13\x81\x00gr\b\x00&x\xc9\xe5b'\x1e0\x00\x04\x00\x0020596224")},
|
||||
}
|
||||
|
||||
for _, entry := range testData {
|
||||
pkt := &Packet{
|
||||
Time: time.Now(),
|
||||
Caplen: uint32(len(entry.Data)),
|
||||
Len: uint32(len(entry.Data)),
|
||||
Data: entry.Data,
|
||||
}
|
||||
|
||||
pkt.Decode()
|
||||
/*
|
||||
func() {
|
||||
defer func() {
|
||||
if err := recover(); err != nil {
|
||||
t.Fatalf("%d. %q failed: %v", idx, string(entry.Data), err)
|
||||
}
|
||||
}()
|
||||
pkt.Decode()
|
||||
}()
|
||||
*/
|
||||
}
|
||||
}
|
49
Godeps/_workspace/src/github.com/akrennmair/gopcap/tools/benchmark/benchmark.go
generated
vendored
49
Godeps/_workspace/src/github.com/akrennmair/gopcap/tools/benchmark/benchmark.go
generated
vendored
@ -1,49 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"os"
|
||||
"runtime/pprof"
|
||||
"time"
|
||||
|
||||
"github.com/coreos/etcd/Godeps/_workspace/src/github.com/akrennmair/gopcap"
|
||||
)
|
||||
|
||||
func main() {
|
||||
var filename *string = flag.String("file", "", "filename")
|
||||
var decode *bool = flag.Bool("d", false, "If true, decode each packet")
|
||||
var cpuprofile *string = flag.String("cpuprofile", "", "filename")
|
||||
|
||||
flag.Parse()
|
||||
|
||||
h, err := pcap.Openoffline(*filename)
|
||||
if err != nil {
|
||||
fmt.Printf("Couldn't create pcap reader: %v", err)
|
||||
}
|
||||
|
||||
if *cpuprofile != "" {
|
||||
if out, err := os.Create(*cpuprofile); err == nil {
|
||||
pprof.StartCPUProfile(out)
|
||||
defer func() {
|
||||
pprof.StopCPUProfile()
|
||||
out.Close()
|
||||
}()
|
||||
} else {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
i, nilPackets := 0, 0
|
||||
start := time.Now()
|
||||
for pkt, code := h.NextEx(); code != -2; pkt, code = h.NextEx() {
|
||||
if pkt == nil {
|
||||
nilPackets++
|
||||
} else if *decode {
|
||||
pkt.Decode()
|
||||
}
|
||||
i++
|
||||
}
|
||||
duration := time.Since(start)
|
||||
fmt.Printf("Took %v to process %v packets, %v per packet, %d nil packets\n", duration, i, duration/time.Duration(i), nilPackets)
|
||||
}
|
96
Godeps/_workspace/src/github.com/akrennmair/gopcap/tools/pass/pass.go
generated
vendored
96
Godeps/_workspace/src/github.com/akrennmair/gopcap/tools/pass/pass.go
generated
vendored
@ -1,96 +0,0 @@
|
||||
package main
|
||||
|
||||
// Parses a pcap file, writes it back to disk, then verifies the files
|
||||
// are the same.
|
||||
import (
|
||||
"bufio"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
|
||||
"github.com/coreos/etcd/Godeps/_workspace/src/github.com/akrennmair/gopcap"
|
||||
)
|
||||
|
||||
var input *string = flag.String("input", "", "input file")
|
||||
var output *string = flag.String("output", "", "output file")
|
||||
var decode *bool = flag.Bool("decode", false, "print decoded packets")
|
||||
|
||||
func copyPcap(dest, src string) {
|
||||
f, err := os.Open(src)
|
||||
if err != nil {
|
||||
fmt.Printf("couldn't open %q: %v\n", src, err)
|
||||
return
|
||||
}
|
||||
defer f.Close()
|
||||
reader, err := pcap.NewReader(bufio.NewReader(f))
|
||||
if err != nil {
|
||||
fmt.Printf("couldn't create reader: %v\n", err)
|
||||
return
|
||||
}
|
||||
w, err := os.Create(dest)
|
||||
if err != nil {
|
||||
fmt.Printf("couldn't open %q: %v\n", dest, err)
|
||||
return
|
||||
}
|
||||
defer w.Close()
|
||||
buf := bufio.NewWriter(w)
|
||||
writer, err := pcap.NewWriter(buf, &reader.Header)
|
||||
if err != nil {
|
||||
fmt.Printf("couldn't create writer: %v\n", err)
|
||||
return
|
||||
}
|
||||
for {
|
||||
pkt := reader.Next()
|
||||
if pkt == nil {
|
||||
break
|
||||
}
|
||||
if *decode {
|
||||
pkt.Decode()
|
||||
fmt.Println(pkt.String())
|
||||
}
|
||||
writer.Write(pkt)
|
||||
}
|
||||
buf.Flush()
|
||||
}
|
||||
|
||||
func check(dest, src string) {
|
||||
f, err := os.Open(src)
|
||||
if err != nil {
|
||||
fmt.Printf("couldn't open %q: %v\n", src, err)
|
||||
return
|
||||
}
|
||||
defer f.Close()
|
||||
freader := bufio.NewReader(f)
|
||||
|
||||
g, err := os.Open(dest)
|
||||
if err != nil {
|
||||
fmt.Printf("couldn't open %q: %v\n", src, err)
|
||||
return
|
||||
}
|
||||
defer g.Close()
|
||||
greader := bufio.NewReader(g)
|
||||
|
||||
for {
|
||||
fb, ferr := freader.ReadByte()
|
||||
gb, gerr := greader.ReadByte()
|
||||
|
||||
if ferr == io.EOF && gerr == io.EOF {
|
||||
break
|
||||
}
|
||||
if fb == gb {
|
||||
continue
|
||||
}
|
||||
fmt.Println("FAIL")
|
||||
return
|
||||
}
|
||||
|
||||
fmt.Println("PASS")
|
||||
}
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
|
||||
copyPcap(*output, *input)
|
||||
check(*output, *input)
|
||||
}
|
82
Godeps/_workspace/src/github.com/akrennmair/gopcap/tools/pcaptest/pcaptest.go
generated
vendored
82
Godeps/_workspace/src/github.com/akrennmair/gopcap/tools/pcaptest/pcaptest.go
generated
vendored
@ -1,82 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/coreos/etcd/Godeps/_workspace/src/github.com/akrennmair/gopcap"
|
||||
)
|
||||
|
||||
func min(x uint32, y uint32) uint32 {
|
||||
if x < y {
|
||||
return x
|
||||
}
|
||||
return y
|
||||
}
|
||||
|
||||
func main() {
|
||||
var device *string = flag.String("d", "", "device")
|
||||
var file *string = flag.String("r", "", "file")
|
||||
var expr *string = flag.String("e", "", "filter expression")
|
||||
|
||||
flag.Parse()
|
||||
|
||||
var h *pcap.Pcap
|
||||
var err error
|
||||
|
||||
ifs, err := pcap.Findalldevs()
|
||||
if len(ifs) == 0 {
|
||||
fmt.Printf("Warning: no devices found : %s\n", err)
|
||||
} else {
|
||||
for i := 0; i < len(ifs); i++ {
|
||||
fmt.Printf("dev %d: %s (%s)\n", i+1, ifs[i].Name, ifs[i].Description)
|
||||
}
|
||||
}
|
||||
|
||||
if *device != "" {
|
||||
h, err = pcap.Openlive(*device, 65535, true, 0)
|
||||
if h == nil {
|
||||
fmt.Printf("Openlive(%s) failed: %s\n", *device, err)
|
||||
return
|
||||
}
|
||||
} else if *file != "" {
|
||||
h, err = pcap.Openoffline(*file)
|
||||
if h == nil {
|
||||
fmt.Printf("Openoffline(%s) failed: %s\n", *file, err)
|
||||
return
|
||||
}
|
||||
} else {
|
||||
fmt.Printf("usage: pcaptest [-d <device> | -r <file>]\n")
|
||||
return
|
||||
}
|
||||
defer h.Close()
|
||||
|
||||
fmt.Printf("pcap version: %s\n", pcap.Version())
|
||||
|
||||
if *expr != "" {
|
||||
fmt.Printf("Setting filter: %s\n", *expr)
|
||||
err := h.Setfilter(*expr)
|
||||
if err != nil {
|
||||
fmt.Printf("Warning: setting filter failed: %s\n", err)
|
||||
}
|
||||
}
|
||||
|
||||
for pkt := h.Next(); pkt != nil; pkt = h.Next() {
|
||||
fmt.Printf("time: %d.%06d (%s) caplen: %d len: %d\nData:",
|
||||
int64(pkt.Time.Second()), int64(pkt.Time.Nanosecond()),
|
||||
time.Unix(int64(pkt.Time.Second()), 0).String(), int64(pkt.Caplen), int64(pkt.Len))
|
||||
for i := uint32(0); i < pkt.Caplen; i++ {
|
||||
if i%32 == 0 {
|
||||
fmt.Printf("\n")
|
||||
}
|
||||
if 32 <= pkt.Data[i] && pkt.Data[i] <= 126 {
|
||||
fmt.Printf("%c", pkt.Data[i])
|
||||
} else {
|
||||
fmt.Printf(".")
|
||||
}
|
||||
}
|
||||
fmt.Printf("\n\n")
|
||||
}
|
||||
|
||||
}
|
121
Godeps/_workspace/src/github.com/akrennmair/gopcap/tools/tcpdump/tcpdump.go
generated
vendored
121
Godeps/_workspace/src/github.com/akrennmair/gopcap/tools/tcpdump/tcpdump.go
generated
vendored
@ -1,121 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"flag"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/coreos/etcd/Godeps/_workspace/src/github.com/akrennmair/gopcap"
|
||||
)
|
||||
|
||||
const (
|
||||
TYPE_IP = 0x0800
|
||||
TYPE_ARP = 0x0806
|
||||
TYPE_IP6 = 0x86DD
|
||||
|
||||
IP_ICMP = 1
|
||||
IP_INIP = 4
|
||||
IP_TCP = 6
|
||||
IP_UDP = 17
|
||||
)
|
||||
|
||||
var out *bufio.Writer
|
||||
var errout *bufio.Writer
|
||||
|
||||
func main() {
|
||||
var device *string = flag.String("i", "", "interface")
|
||||
var snaplen *int = flag.Int("s", 65535, "snaplen")
|
||||
var hexdump *bool = flag.Bool("X", false, "hexdump")
|
||||
expr := ""
|
||||
|
||||
out = bufio.NewWriter(os.Stdout)
|
||||
errout = bufio.NewWriter(os.Stderr)
|
||||
|
||||
flag.Usage = func() {
|
||||
fmt.Fprintf(errout, "usage: %s [ -i interface ] [ -s snaplen ] [ -X ] [ expression ]\n", os.Args[0])
|
||||
errout.Flush()
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
flag.Parse()
|
||||
|
||||
if len(flag.Args()) > 0 {
|
||||
expr = flag.Arg(0)
|
||||
}
|
||||
|
||||
if *device == "" {
|
||||
devs, err := pcap.Findalldevs()
|
||||
if err != nil {
|
||||
fmt.Fprintf(errout, "tcpdump: couldn't find any devices: %s\n", err)
|
||||
}
|
||||
if 0 == len(devs) {
|
||||
flag.Usage()
|
||||
}
|
||||
*device = devs[0].Name
|
||||
}
|
||||
|
||||
h, err := pcap.Openlive(*device, int32(*snaplen), true, 0)
|
||||
if h == nil {
|
||||
fmt.Fprintf(errout, "tcpdump: %s\n", err)
|
||||
errout.Flush()
|
||||
return
|
||||
}
|
||||
defer h.Close()
|
||||
|
||||
if expr != "" {
|
||||
ferr := h.Setfilter(expr)
|
||||
if ferr != nil {
|
||||
fmt.Fprintf(out, "tcpdump: %s\n", ferr)
|
||||
out.Flush()
|
||||
}
|
||||
}
|
||||
|
||||
for pkt := h.Next(); pkt != nil; pkt = h.Next() {
|
||||
pkt.Decode()
|
||||
fmt.Fprintf(out, "%s\n", pkt.String())
|
||||
if *hexdump {
|
||||
Hexdump(pkt)
|
||||
}
|
||||
out.Flush()
|
||||
}
|
||||
}
|
||||
|
||||
func min(a, b int) int {
|
||||
if a < b {
|
||||
return a
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
func Hexdump(pkt *pcap.Packet) {
|
||||
for i := 0; i < len(pkt.Data); i += 16 {
|
||||
Dumpline(uint32(i), pkt.Data[i:min(i+16, len(pkt.Data))])
|
||||
}
|
||||
}
|
||||
|
||||
func Dumpline(addr uint32, line []byte) {
|
||||
fmt.Fprintf(out, "\t0x%04x: ", int32(addr))
|
||||
var i uint16
|
||||
for i = 0; i < 16 && i < uint16(len(line)); i++ {
|
||||
if i%2 == 0 {
|
||||
out.WriteString(" ")
|
||||
}
|
||||
fmt.Fprintf(out, "%02x", line[i])
|
||||
}
|
||||
for j := i; j <= 16; j++ {
|
||||
if j%2 == 0 {
|
||||
out.WriteString(" ")
|
||||
}
|
||||
out.WriteString(" ")
|
||||
}
|
||||
out.WriteString(" ")
|
||||
for i = 0; i < 16 && i < uint16(len(line)); i++ {
|
||||
if line[i] >= 32 && line[i] <= 126 {
|
||||
fmt.Fprintf(out, "%c", line[i])
|
||||
} else {
|
||||
out.WriteString(".")
|
||||
}
|
||||
}
|
||||
out.WriteString("\n")
|
||||
}
|
63
Godeps/_workspace/src/github.com/beorn7/perks/quantile/bench_test.go
generated
vendored
63
Godeps/_workspace/src/github.com/beorn7/perks/quantile/bench_test.go
generated
vendored
@ -1,63 +0,0 @@
|
||||
package quantile
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func BenchmarkInsertTargeted(b *testing.B) {
|
||||
b.ReportAllocs()
|
||||
|
||||
s := NewTargeted(Targets)
|
||||
b.ResetTimer()
|
||||
for i := float64(0); i < float64(b.N); i++ {
|
||||
s.Insert(i)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkInsertTargetedSmallEpsilon(b *testing.B) {
|
||||
s := NewTargeted(TargetsSmallEpsilon)
|
||||
b.ResetTimer()
|
||||
for i := float64(0); i < float64(b.N); i++ {
|
||||
s.Insert(i)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkInsertBiased(b *testing.B) {
|
||||
s := NewLowBiased(0.01)
|
||||
b.ResetTimer()
|
||||
for i := float64(0); i < float64(b.N); i++ {
|
||||
s.Insert(i)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkInsertBiasedSmallEpsilon(b *testing.B) {
|
||||
s := NewLowBiased(0.0001)
|
||||
b.ResetTimer()
|
||||
for i := float64(0); i < float64(b.N); i++ {
|
||||
s.Insert(i)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkQuery(b *testing.B) {
|
||||
s := NewTargeted(Targets)
|
||||
for i := float64(0); i < 1e6; i++ {
|
||||
s.Insert(i)
|
||||
}
|
||||
b.ResetTimer()
|
||||
n := float64(b.N)
|
||||
for i := float64(0); i < n; i++ {
|
||||
s.Query(i / n)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkQuerySmallEpsilon(b *testing.B) {
|
||||
s := NewTargeted(TargetsSmallEpsilon)
|
||||
for i := float64(0); i < 1e6; i++ {
|
||||
s.Insert(i)
|
||||
}
|
||||
b.ResetTimer()
|
||||
n := float64(b.N)
|
||||
for i := float64(0); i < n; i++ {
|
||||
s.Query(i / n)
|
||||
}
|
||||
}
|
121
Godeps/_workspace/src/github.com/beorn7/perks/quantile/example_test.go
generated
vendored
121
Godeps/_workspace/src/github.com/beorn7/perks/quantile/example_test.go
generated
vendored
@ -1,121 +0,0 @@
|
||||
// +build go1.1
|
||||
|
||||
package quantile_test
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/coreos/etcd/Godeps/_workspace/src/github.com/beorn7/perks/quantile"
|
||||
)
|
||||
|
||||
func Example_simple() {
|
||||
ch := make(chan float64)
|
||||
go sendFloats(ch)
|
||||
|
||||
// Compute the 50th, 90th, and 99th percentile.
|
||||
q := quantile.NewTargeted(map[float64]float64{
|
||||
0.50: 0.005,
|
||||
0.90: 0.001,
|
||||
0.99: 0.0001,
|
||||
})
|
||||
for v := range ch {
|
||||
q.Insert(v)
|
||||
}
|
||||
|
||||
fmt.Println("perc50:", q.Query(0.50))
|
||||
fmt.Println("perc90:", q.Query(0.90))
|
||||
fmt.Println("perc99:", q.Query(0.99))
|
||||
fmt.Println("count:", q.Count())
|
||||
// Output:
|
||||
// perc50: 5
|
||||
// perc90: 16
|
||||
// perc99: 223
|
||||
// count: 2388
|
||||
}
|
||||
|
||||
func Example_mergeMultipleStreams() {
|
||||
// Scenario:
|
||||
// We have multiple database shards. On each shard, there is a process
|
||||
// collecting query response times from the database logs and inserting
|
||||
// them into a Stream (created via NewTargeted(0.90)), much like the
|
||||
// Simple example. These processes expose a network interface for us to
|
||||
// ask them to serialize and send us the results of their
|
||||
// Stream.Samples so we may Merge and Query them.
|
||||
//
|
||||
// NOTES:
|
||||
// * These sample sets are small, allowing us to get them
|
||||
// across the network much faster than sending the entire list of data
|
||||
// points.
|
||||
//
|
||||
// * For this to work correctly, we must supply the same quantiles
|
||||
// a priori the process collecting the samples supplied to NewTargeted,
|
||||
// even if we do not plan to query them all here.
|
||||
ch := make(chan quantile.Samples)
|
||||
getDBQuerySamples(ch)
|
||||
q := quantile.NewTargeted(map[float64]float64{0.90: 0.001})
|
||||
for samples := range ch {
|
||||
q.Merge(samples)
|
||||
}
|
||||
fmt.Println("perc90:", q.Query(0.90))
|
||||
}
|
||||
|
||||
func Example_window() {
|
||||
// Scenario: We want the 90th, 95th, and 99th percentiles for each
|
||||
// minute.
|
||||
|
||||
ch := make(chan float64)
|
||||
go sendStreamValues(ch)
|
||||
|
||||
tick := time.NewTicker(1 * time.Minute)
|
||||
q := quantile.NewTargeted(map[float64]float64{
|
||||
0.90: 0.001,
|
||||
0.95: 0.0005,
|
||||
0.99: 0.0001,
|
||||
})
|
||||
for {
|
||||
select {
|
||||
case t := <-tick.C:
|
||||
flushToDB(t, q.Samples())
|
||||
q.Reset()
|
||||
case v := <-ch:
|
||||
q.Insert(v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func sendStreamValues(ch chan float64) {
|
||||
// Use your imagination
|
||||
}
|
||||
|
||||
func flushToDB(t time.Time, samples quantile.Samples) {
|
||||
// Use your imagination
|
||||
}
|
||||
|
||||
// This is a stub for the above example. In reality this would hit the remote
|
||||
// servers via http or something like it.
|
||||
func getDBQuerySamples(ch chan quantile.Samples) {}
|
||||
|
||||
func sendFloats(ch chan<- float64) {
|
||||
f, err := os.Open("exampledata.txt")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
sc := bufio.NewScanner(f)
|
||||
for sc.Scan() {
|
||||
b := sc.Bytes()
|
||||
v, err := strconv.ParseFloat(string(b), 64)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
ch <- v
|
||||
}
|
||||
if sc.Err() != nil {
|
||||
log.Fatal(sc.Err())
|
||||
}
|
||||
close(ch)
|
||||
}
|
188
Godeps/_workspace/src/github.com/beorn7/perks/quantile/stream_test.go
generated
vendored
188
Godeps/_workspace/src/github.com/beorn7/perks/quantile/stream_test.go
generated
vendored
@ -1,188 +0,0 @@
|
||||
package quantile
|
||||
|
||||
import (
|
||||
"math"
|
||||
"math/rand"
|
||||
"sort"
|
||||
"testing"
|
||||
)
|
||||
|
||||
var (
|
||||
Targets = map[float64]float64{
|
||||
0.01: 0.001,
|
||||
0.10: 0.01,
|
||||
0.50: 0.05,
|
||||
0.90: 0.01,
|
||||
0.99: 0.001,
|
||||
}
|
||||
TargetsSmallEpsilon = map[float64]float64{
|
||||
0.01: 0.0001,
|
||||
0.10: 0.001,
|
||||
0.50: 0.005,
|
||||
0.90: 0.001,
|
||||
0.99: 0.0001,
|
||||
}
|
||||
LowQuantiles = []float64{0.01, 0.1, 0.5}
|
||||
HighQuantiles = []float64{0.99, 0.9, 0.5}
|
||||
)
|
||||
|
||||
const RelativeEpsilon = 0.01
|
||||
|
||||
func verifyPercsWithAbsoluteEpsilon(t *testing.T, a []float64, s *Stream) {
|
||||
sort.Float64s(a)
|
||||
for quantile, epsilon := range Targets {
|
||||
n := float64(len(a))
|
||||
k := int(quantile * n)
|
||||
lower := int((quantile - epsilon) * n)
|
||||
if lower < 1 {
|
||||
lower = 1
|
||||
}
|
||||
upper := int(math.Ceil((quantile + epsilon) * n))
|
||||
if upper > len(a) {
|
||||
upper = len(a)
|
||||
}
|
||||
w, min, max := a[k-1], a[lower-1], a[upper-1]
|
||||
if g := s.Query(quantile); g < min || g > max {
|
||||
t.Errorf("q=%f: want %v [%f,%f], got %v", quantile, w, min, max, g)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func verifyLowPercsWithRelativeEpsilon(t *testing.T, a []float64, s *Stream) {
|
||||
sort.Float64s(a)
|
||||
for _, qu := range LowQuantiles {
|
||||
n := float64(len(a))
|
||||
k := int(qu * n)
|
||||
|
||||
lowerRank := int((1 - RelativeEpsilon) * qu * n)
|
||||
upperRank := int(math.Ceil((1 + RelativeEpsilon) * qu * n))
|
||||
w, min, max := a[k-1], a[lowerRank-1], a[upperRank-1]
|
||||
if g := s.Query(qu); g < min || g > max {
|
||||
t.Errorf("q=%f: want %v [%f,%f], got %v", qu, w, min, max, g)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func verifyHighPercsWithRelativeEpsilon(t *testing.T, a []float64, s *Stream) {
|
||||
sort.Float64s(a)
|
||||
for _, qu := range HighQuantiles {
|
||||
n := float64(len(a))
|
||||
k := int(qu * n)
|
||||
|
||||
lowerRank := int((1 - (1+RelativeEpsilon)*(1-qu)) * n)
|
||||
upperRank := int(math.Ceil((1 - (1-RelativeEpsilon)*(1-qu)) * n))
|
||||
w, min, max := a[k-1], a[lowerRank-1], a[upperRank-1]
|
||||
if g := s.Query(qu); g < min || g > max {
|
||||
t.Errorf("q=%f: want %v [%f,%f], got %v", qu, w, min, max, g)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func populateStream(s *Stream) []float64 {
|
||||
a := make([]float64, 0, 1e5+100)
|
||||
for i := 0; i < cap(a); i++ {
|
||||
v := rand.NormFloat64()
|
||||
// Add 5% asymmetric outliers.
|
||||
if i%20 == 0 {
|
||||
v = v*v + 1
|
||||
}
|
||||
s.Insert(v)
|
||||
a = append(a, v)
|
||||
}
|
||||
return a
|
||||
}
|
||||
|
||||
func TestTargetedQuery(t *testing.T) {
|
||||
rand.Seed(42)
|
||||
s := NewTargeted(Targets)
|
||||
a := populateStream(s)
|
||||
verifyPercsWithAbsoluteEpsilon(t, a, s)
|
||||
}
|
||||
|
||||
func TestLowBiasedQuery(t *testing.T) {
|
||||
rand.Seed(42)
|
||||
s := NewLowBiased(RelativeEpsilon)
|
||||
a := populateStream(s)
|
||||
verifyLowPercsWithRelativeEpsilon(t, a, s)
|
||||
}
|
||||
|
||||
func TestHighBiasedQuery(t *testing.T) {
|
||||
rand.Seed(42)
|
||||
s := NewHighBiased(RelativeEpsilon)
|
||||
a := populateStream(s)
|
||||
verifyHighPercsWithRelativeEpsilon(t, a, s)
|
||||
}
|
||||
|
||||
// BrokenTestTargetedMerge is broken, see Merge doc comment.
|
||||
func BrokenTestTargetedMerge(t *testing.T) {
|
||||
rand.Seed(42)
|
||||
s1 := NewTargeted(Targets)
|
||||
s2 := NewTargeted(Targets)
|
||||
a := populateStream(s1)
|
||||
a = append(a, populateStream(s2)...)
|
||||
s1.Merge(s2.Samples())
|
||||
verifyPercsWithAbsoluteEpsilon(t, a, s1)
|
||||
}
|
||||
|
||||
// BrokenTestLowBiasedMerge is broken, see Merge doc comment.
|
||||
func BrokenTestLowBiasedMerge(t *testing.T) {
|
||||
rand.Seed(42)
|
||||
s1 := NewLowBiased(RelativeEpsilon)
|
||||
s2 := NewLowBiased(RelativeEpsilon)
|
||||
a := populateStream(s1)
|
||||
a = append(a, populateStream(s2)...)
|
||||
s1.Merge(s2.Samples())
|
||||
verifyLowPercsWithRelativeEpsilon(t, a, s2)
|
||||
}
|
||||
|
||||
// BrokenTestHighBiasedMerge is broken, see Merge doc comment.
|
||||
func BrokenTestHighBiasedMerge(t *testing.T) {
|
||||
rand.Seed(42)
|
||||
s1 := NewHighBiased(RelativeEpsilon)
|
||||
s2 := NewHighBiased(RelativeEpsilon)
|
||||
a := populateStream(s1)
|
||||
a = append(a, populateStream(s2)...)
|
||||
s1.Merge(s2.Samples())
|
||||
verifyHighPercsWithRelativeEpsilon(t, a, s2)
|
||||
}
|
||||
|
||||
func TestUncompressed(t *testing.T) {
|
||||
q := NewTargeted(Targets)
|
||||
for i := 100; i > 0; i-- {
|
||||
q.Insert(float64(i))
|
||||
}
|
||||
if g := q.Count(); g != 100 {
|
||||
t.Errorf("want count 100, got %d", g)
|
||||
}
|
||||
// Before compression, Query should have 100% accuracy.
|
||||
for quantile := range Targets {
|
||||
w := quantile * 100
|
||||
if g := q.Query(quantile); g != w {
|
||||
t.Errorf("want %f, got %f", w, g)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestUncompressedSamples(t *testing.T) {
|
||||
q := NewTargeted(map[float64]float64{0.99: 0.001})
|
||||
for i := 1; i <= 100; i++ {
|
||||
q.Insert(float64(i))
|
||||
}
|
||||
if g := q.Samples().Len(); g != 100 {
|
||||
t.Errorf("want count 100, got %d", g)
|
||||
}
|
||||
}
|
||||
|
||||
func TestUncompressedOne(t *testing.T) {
|
||||
q := NewTargeted(map[float64]float64{0.99: 0.01})
|
||||
q.Insert(3.14)
|
||||
if g := q.Query(0.90); g != 3.14 {
|
||||
t.Error("want PI, got", g)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDefaults(t *testing.T) {
|
||||
if g := NewTargeted(map[float64]float64{0.99: 0.001}).Query(0.99); g != 0 {
|
||||
t.Errorf("want 0, got %f", g)
|
||||
}
|
||||
}
|
18
Godeps/_workspace/src/github.com/bgentry/speakeasy/example/main.go
generated
vendored
18
Godeps/_workspace/src/github.com/bgentry/speakeasy/example/main.go
generated
vendored
@ -1,18 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/coreos/etcd/Godeps/_workspace/src/github.com/bgentry/speakeasy"
|
||||
)
|
||||
|
||||
func main() {
|
||||
password, err := speakeasy.Ask("Please enter a password: ")
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
os.Exit(1)
|
||||
}
|
||||
fmt.Printf("Password result: %q\n", password)
|
||||
fmt.Printf("Password len: %d\n", len(password))
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user