Compare commits
713 Commits
v3.1.0-rc.
...
v3.1.13
Author | SHA1 | Date | |
---|---|---|---|
1558170293 | |||
c3a14a2b28 | |||
6f75c56c5e | |||
908c0f4f98 | |||
35c6ea7a67 | |||
8eeab582d0 | |||
c536205249 | |||
2e57d99a2c | |||
2fdc4aa06c | |||
918698add7 | |||
00d14cfd03 | |||
6e11a79fd8 | |||
028f99b103 | |||
290fa0c1be | |||
0874fcbed4 | |||
7a148fee36 | |||
087b9aa3dc | |||
b6373f1625 | |||
4178b75411 | |||
9c8e39e7f4 | |||
af3021aa1a | |||
df0b652d6a | |||
8e5d62cf1e | |||
212d801294 | |||
c2d8d9fd26 | |||
960f4604bc | |||
22b67da920 | |||
4b53ab0909 | |||
b32ec69f9b | |||
3ab9894b04 | |||
00d6d4aba7 | |||
88b5e22b73 | |||
2bb8278fbf | |||
935c76b8c3 | |||
e83f50ec7c | |||
232a81d804 | |||
6ffc32cd06 | |||
f8aeb21c2d | |||
0520cb9304 | |||
424e4ae1cc | |||
a631a80a39 | |||
fc08fd75ee | |||
0f4a535c2f | |||
c765bef483 | |||
5586a5806e | |||
d267ca9c18 | |||
4176fe768f | |||
950c846144 | |||
0b78d66abe | |||
2d58079626 | |||
be171fa424 | |||
4b60243fc5 | |||
2c5d79f49f | |||
424abca6ac | |||
43b75072bf | |||
78141fae60 | |||
3be37f042e | |||
7c896098d2 | |||
30f4e36de4 | |||
557abbe437 | |||
4b448c209b | |||
e5b7ee2d03 | |||
a4c5731c38 | |||
1f558ae678 | |||
df93627bbb | |||
a20295c65b | |||
9f7bb0df3a | |||
6a805e5222 | |||
38f79fa565 | |||
37a502cc88 | |||
9be7fc5320 | |||
288bccd288 | |||
8cb5b48f58 | |||
6538217528 | |||
e983d6b343 | |||
20490caaf0 | |||
e156746959 | |||
d84bf983cc | |||
b44c6bff9d | |||
8c3c1b4a9c | |||
b478387a59 | |||
dfc1f21f9d | |||
41e52ebc22 | |||
7bb538d4d4 | |||
1622782e49 | |||
99b47e0c1e | |||
350d0cd211 | |||
72f37ff79a | |||
3221454cab | |||
4a1bffdbc6 | |||
9d9be2bc86 | |||
e5462f74f1 | |||
c68c1d9344 | |||
6ed56cd723 | |||
a3c6f6bf81 | |||
21fdcc6443 | |||
8d122e7011 | |||
ade1d97893 | |||
1300189581 | |||
1971517806 | |||
d614bb0799 | |||
059dc91d4c | |||
5fdbaee761 | |||
714e7ec8db | |||
2cdaf6d661 | |||
77a51e0dbf | |||
d96d3aa0ed | |||
66e7532f57 | |||
3eff360e79 | |||
1487071966 | |||
5d62bba9c7 | |||
114e293119 | |||
1439955536 | |||
2c8ecc7e13 | |||
7b4d622a7e | |||
db8abbd975 | |||
ac1c7eba21 | |||
9cc6d4852a | |||
ff7fa9843d | |||
f66138d403 | |||
8c87916f68 | |||
9e81b002c4 | |||
4962c5cff7 | |||
e5bf25a3b6 | |||
98c60e8faa | |||
3ac3fa6f3d | |||
eaa8b9e155 | |||
ea2aae464d | |||
776739ebc2 | |||
a7a8a47ba0 | |||
379f7ae10e | |||
ead2d95914 | |||
8ba2897a21 | |||
bc31e27cb9 | |||
fce20a0b0b | |||
f10363fecd | |||
a7ec6c88fd | |||
62c591d223 | |||
5676226867 | |||
898b9e608f | |||
b84be6b11b | |||
7a12d65528 | |||
53ac04b118 | |||
fbcd5375b3 | |||
c2e8d06eec | |||
6c8f1986c8 | |||
be9ae300c6 | |||
9ba3632614 | |||
c2d8b5a9e8 | |||
0c88795a19 | |||
21e3418553 | |||
bb797c1ee9 | |||
304606ab0b | |||
74bad576ed | |||
7dfe503f1c | |||
af51f87ad2 | |||
9fa6c95054 | |||
5e3b20e70c | |||
c89eae790d | |||
432bda4dec | |||
6d443ba3f9 | |||
6ce03389c8 | |||
34136a69c8 | |||
c23d666328 | |||
da8fd18d8e | |||
824277cb3a | |||
c512839382 | |||
d431b64d97 | |||
0df543dbb3 | |||
6e730af65a | |||
43dd751c47 | |||
6f801d2ae8 | |||
925d1d74ce | |||
e44d3abc77 | |||
88bdd8a5d9 | |||
f0fa5ec507 | |||
b32a8010a7 | |||
522232212d | |||
16135165c2 | |||
d20f23c795 | |||
c39a59c0be | |||
5278ea5ed0 | |||
8adfc06084 | |||
4a245a632a | |||
7bb768ba34 | |||
f99c76cb47 | |||
6ab8dcb679 | |||
bc2d47118d | |||
953b0c6ba2 | |||
628e83ecc7 | |||
998f8bf291 | |||
af5b8190d2 | |||
cf382dbe60 | |||
acfa601075 | |||
6825ffe1a4 | |||
a42b399f4e | |||
5feb4e1027 | |||
fd72ecfe92 | |||
e179225f28 | |||
154f268031 | |||
10d3b81c39 | |||
f9f691ef1f | |||
729dcd51ce | |||
559a82f66e | |||
40ae83beab | |||
37501e2a5d | |||
7aeddf6cd7 | |||
d0f301adb7 | |||
b8444d4d35 | |||
5fac6b8d15 | |||
2b5f9e1c6b | |||
fc8cd44c72 | |||
61064a7be3 | |||
5cb6dd268b | |||
0af1679b61 | |||
24601ca24b | |||
75441390b6 | |||
9b5eb1ae5a | |||
29e14dde0c | |||
cbb6ede69d | |||
d25f9feb19 | |||
74e7614759 | |||
d9a3472894 | |||
0dce29ae57 | |||
8242049a33 | |||
734dd75565 | |||
2a1bae0c2a | |||
b741452d03 | |||
4e1010c1b9 | |||
67c75606db | |||
b5cde6b321 | |||
1643ed5667 | |||
f876ccb055 | |||
12d930b40f | |||
3519a9784e | |||
9690220cd1 | |||
e2463569e7 | |||
46062efa78 | |||
e63059ec31 | |||
36b2d3f5eb | |||
00e00f16bb | |||
b940e0d514 | |||
a662ddefbb | |||
407afc69ed | |||
c00084812c | |||
db8b15bf8f | |||
89b18ff1af | |||
2faf72f47c | |||
17873f7be8 | |||
d9b9821551 | |||
920b155f17 | |||
7b7feb46fc | |||
fef4a79528 | |||
1a8e3cad9a | |||
591bb5e7f6 | |||
acbf0fa452 | |||
e625400f1d | |||
8151d4d0bc | |||
d62ce55584 | |||
e58287f026 | |||
af3451be26 | |||
bef87cc953 | |||
f95f7a3027 | |||
2f0e82a31e | |||
780d2f2a59 | |||
531c3061c1 | |||
a375e91c66 | |||
46bd842db9 | |||
87b1d9571f | |||
d9e928de7a | |||
109577351b | |||
fa733e1e9c | |||
e71ff361a4 | |||
52e3dc5eb9 | |||
93e303ec71 | |||
a1e572b460 | |||
5aeee917a7 | |||
14c851c863 | |||
86a43849fb | |||
35fd5dc9fc | |||
b126e31132 | |||
d46b753186 | |||
86d7390804 | |||
5183ce0118 | |||
e0bcd4d516 | |||
d8513adf1d | |||
26a3e9a740 | |||
29c30b2387 | |||
13b05aeff8 | |||
246fb29d8a | |||
a9f72ee0d4 | |||
8f88632218 | |||
626df4d77c | |||
cc931a2319 | |||
4ca78aa89f | |||
972ef3c92e | |||
1e60f88786 | |||
cb9277f339 | |||
cdde0368ad | |||
a53175949e | |||
454f1da2f2 | |||
e3d8ef4cea | |||
1a8e78cd55 | |||
301abddc72 | |||
cc37beff35 | |||
4e831810c9 | |||
7d16e7d27e | |||
b294ab13a4 | |||
797d826117 | |||
5f3140987e | |||
be740dc436 | |||
5b7582365e | |||
468187de31 | |||
7e74b3f846 | |||
eb8646a381 | |||
3512f114e4 | |||
b8e09bf849 | |||
0c5d1d5641 | |||
f3cb93015c | |||
55307d48ac | |||
6ec4b9c26a | |||
0a15c1b9c6 | |||
6969369a32 | |||
20dca1eb80 | |||
cf60588b27 | |||
2c06def8ca | |||
cb75c40a8b | |||
46e63cc14a | |||
d2a6bbd9c6 | |||
01c8b25284 | |||
f8b480cd6f | |||
1e92b7929c | |||
de58a9c733 | |||
f095334788 | |||
367f513674 | |||
b713113094 | |||
fcbfff6a00 | |||
a98de7efa7 | |||
69cc9fdd17 | |||
7c0ae91d78 | |||
09252c4e07 | |||
2f96a68a20 | |||
da3b71b531 | |||
96626d0a23 | |||
1bee237acf | |||
c4e5081562 | |||
529806dba1 | |||
be1f36d97c | |||
f6042890b7 | |||
fdd89df1eb | |||
cfd10b4bbf | |||
58150937c0 | |||
1b0ffdaff0 | |||
9c364efef6 | |||
b21731c022 | |||
9603d5e31f | |||
994e0d2182 | |||
cbee2b74a3 | |||
3fd1d951f8 | |||
91ff6f30b5 | |||
2509e7ad2c | |||
8fefd1f471 | |||
f62ed3d642 | |||
b9b14b15d6 | |||
62398954e4 | |||
5559a026d7 | |||
2b6ad93036 | |||
e62e9ce193 | |||
40f0193c4c | |||
f60a5d6025 | |||
340ba8353c | |||
d844440ffb | |||
0cb680800e | |||
1f954dc9f4 | |||
a686c994cd | |||
f61b4ae5ad | |||
76bb33781f | |||
9647012cb1 | |||
b9e9c9483b | |||
5e351956b9 | |||
5d60482357 | |||
4e52b80590 | |||
5f2b5e8b9d | |||
394ab43587 | |||
60908c64a6 | |||
98cd3fddc9 | |||
f1e0525c81 | |||
7079bf9a75 | |||
8eec86f7fb | |||
e7f4010cca | |||
cac30beed5 | |||
d680b8b5fb | |||
a076510cc1 | |||
8aa03a5959 | |||
ad16b63cce | |||
dfe853ebff | |||
c31b1ab8d1 | |||
a08103c088 | |||
aea9c6668f | |||
ede51b10f8 | |||
ec5f9bce63 | |||
f7c721b746 | |||
2ccba33dd1 | |||
2ac1c4c9ed | |||
ff96769b55 | |||
0326d6fdd3 | |||
69470b5e5f | |||
d7c98a4695 | |||
f2eb8560ed | |||
859142033f | |||
e6d1ebcc1d | |||
bc6f5ad53e | |||
62bd5477b9 | |||
16e3ab0f11 | |||
e8d06d8e4d | |||
b1178469be | |||
7e7c7e157e | |||
bb4884e957 | |||
a39509ee5b | |||
7618fdd1d6 | |||
2acf0806fb | |||
c1581732fd | |||
428cb21a3f | |||
74ae67b835 | |||
b7cc698444 | |||
ccf154e706 | |||
6d9168a2ec | |||
3d5ba43211 | |||
7da3019f42 | |||
43078d3ced | |||
097cdbd0e4 | |||
68b04b7067 | |||
456569f45d | |||
9a20743190 | |||
4401d88546 | |||
aa2b5aec1b | |||
f014cca644 | |||
95fb41a923 | |||
377f19b003 | |||
7afc490c95 | |||
e55b8485dd | |||
1358a9d460 | |||
7c8f13aed7 | |||
98a7c642d4 | |||
677606da7d | |||
7cac755df2 | |||
5e810e30cc | |||
d073512def | |||
90ea3fbadc | |||
e40da39143 | |||
a2e86c1371 | |||
45bba11f12 | |||
625366875d | |||
70fd684843 | |||
6d83590434 | |||
6604306398 | |||
3c97e7a475 | |||
e5b6324771 | |||
b4726a9501 | |||
5af4de0930 | |||
395cf7de51 | |||
ec459c2185 | |||
4d5a12a248 | |||
4b417da1be | |||
38ce362629 | |||
1f7e88d851 | |||
1ef243e436 | |||
745cd730a7 | |||
952eb4fade | |||
7c0035637d | |||
ef024049df | |||
b8b72f80f9 | |||
baa4e4ee56 | |||
8631f47568 | |||
0a8e28524b | |||
0b78ef8de1 | |||
b16c93a885 | |||
523a859ad9 | |||
1a25b2ff3e | |||
55d25f6f4d | |||
5b8300f08b | |||
859ac6dfd8 | |||
0f68810505 | |||
4cf5b76d18 | |||
ab6b175a2a | |||
c2fd42b556 | |||
4a1e89150b | |||
3ed63af51a | |||
a4dcceb8aa | |||
c20d31adc5 | |||
9c7a0a68e5 | |||
c817df1d32 | |||
dbb692e50f | |||
0f5d9f00ad | |||
9dd75a946f | |||
396a71ee9e | |||
425acb28c4 | |||
107d7b663c | |||
a93d8dfe62 | |||
510676fea9 | |||
2955d58776 | |||
754daf918b | |||
1a969ffc52 | |||
1aeeb38459 | |||
7b5e5eadb1 | |||
b2f8d8c397 | |||
57fb2a2b35 | |||
2af31f99c3 | |||
97ac128fef | |||
c9cc1efb67 | |||
2f34547d39 | |||
ecd4803ccc | |||
011c452b65 | |||
352d4fa3fa | |||
476ff67047 | |||
e5987dea37 | |||
91360e1495 | |||
67082e5bd1 | |||
bf08a6142c | |||
27459425fa | |||
a0d206c51f | |||
6a0a0a7ea1 | |||
6ffd7e3ed1 | |||
aa526cd53d | |||
31a6efbc13 | |||
f82aac2fc6 | |||
b7ab5c6384 | |||
6968028020 | |||
649fe7f2af | |||
b28b38fb6d | |||
c5ac02164d | |||
97e96feb1d | |||
4d2ec2fec1 | |||
bbc1cdafef | |||
cc304ac03c | |||
2fb2b463a3 | |||
f85701a46f | |||
2ba42990ec | |||
c931f4d164 | |||
378257161f | |||
fe755b6250 | |||
844378f0a7 | |||
195570b621 | |||
8ec4215279 | |||
13acad85b3 | |||
7d777a4a64 | |||
5b7728f3cb | |||
9b470ef4c0 | |||
c33d04fb54 | |||
71bad561e8 | |||
43045500b2 | |||
71a533fec3 | |||
4f60f1b71f | |||
8a03c95dd4 | |||
b30dc10812 | |||
7ef17d3e97 | |||
4575353693 | |||
0684d8c4c6 | |||
94c804b81a | |||
de008c8a4a | |||
c781f30ed5 | |||
db83736b7b | |||
fdf433024f | |||
136c02da71 | |||
b64de4707d | |||
d51a7dba43 | |||
73b4a58ac0 | |||
4969a0e9e7 | |||
308f2a1695 | |||
72fc5f7d1b | |||
9f0ee53e86 | |||
30d37b2165 | |||
5bd00ab1f6 | |||
a1a2d2b1e7 | |||
7e06a95942 | |||
4a42c72b5e | |||
e5c3978725 | |||
86c4a74139 | |||
4a08678ce1 | |||
cb5c92f69b | |||
0345226759 | |||
bc3e056b4a | |||
34c906be55 | |||
d8ea9d22b6 | |||
a0360a83c9 | |||
8f718e2e5a | |||
c6cd63dc35 | |||
a1bfb31219 | |||
ea05711522 | |||
7f5a7d1da5 | |||
89107a49fa | |||
1b36162659 | |||
0a3d45a307 | |||
8fd1dd7862 | |||
c99a9f4075 | |||
2c974abcb9 | |||
6ec03d3f7c | |||
8825392da2 | |||
8d9e2623e1 | |||
d7c21e6837 | |||
1dc60bb97e | |||
c58ae95429 | |||
e489229153 | |||
12e4dfa9c4 | |||
b398233f4f | |||
99539ff031 | |||
12488d4a70 | |||
bb97adda0d | |||
6e7e346c93 | |||
d7bc15300b | |||
ddc94aaf9e | |||
4ba63237ce | |||
cd618323d0 | |||
924ece6ae7 | |||
4e1d3f0f52 | |||
18739e766a | |||
efeceef0ca | |||
8d5e969f12 | |||
69ae49a1dd | |||
f2cff42cb8 | |||
8e5f34fd97 | |||
e7e29ba249 | |||
c549978b8e | |||
4eefdaa4bb | |||
0bb2384547 | |||
77be124391 | |||
0642b4e61e | |||
a1afb21e33 | |||
06e2ce116c | |||
ae99c91903 | |||
43df091067 | |||
22aa710c1f | |||
81f151eed2 | |||
92c987f75d | |||
90146d863c | |||
cb3a5eaff1 | |||
a5c93840b4 | |||
f38a5d19a8 | |||
1e330a90c7 | |||
bd1985d84b | |||
65eb3038fe | |||
8f3abda5b8 | |||
21e65eec08 | |||
d582fdcc1b | |||
1ad038d02e | |||
ef9d55800f | |||
994e8e4f40 | |||
7d30326968 | |||
791aeb39a6 | |||
60c0a5503e | |||
b72a413b71 | |||
0626ee048e | |||
46716fe9fb | |||
161eb2c457 | |||
c100e40715 | |||
a66c25121b | |||
a25d4ac821 | |||
a2cfb56581 | |||
0f1eb14374 | |||
dd1920883c | |||
9e6912fe82 | |||
e719d8641e | |||
970abbb60a | |||
9bfbc12d7d | |||
a47797fdf1 | |||
9205a242b9 | |||
94ea82c00d | |||
b3f0eeabe4 | |||
6b1b13eabb | |||
4dab78e72c | |||
751a8d5b04 | |||
50523e22d8 | |||
e95b571e7c | |||
0bd9179835 | |||
28a29d9ecd | |||
46d4ff823f | |||
401ef96ace | |||
00837b0736 | |||
cf93a74aa8 | |||
73cae7abd0 | |||
ca87a13b18 | |||
10cead3139 | |||
255670106f | |||
c6ebc13b43 | |||
11c38fb1eb | |||
e69c2fd382 | |||
c9b7fc46ff | |||
f550af7ef4 | |||
4de2128344 | |||
3a6d4b7f12 | |||
1cd6fefd49 | |||
20bdb315f5 | |||
ab2b58a80f | |||
ed75d93625 | |||
7d86d1050e | |||
5c60478953 | |||
6a33f0ffd5 | |||
24c284160b | |||
8297322176 | |||
7022d2d00c | |||
75a65e1a70 | |||
fac20b228d | |||
5457c029d7 | |||
39e9b1f75a | |||
cc96f91156 | |||
1e29715185 | |||
e1547a775b | |||
698a789644 | |||
f2b953d4f7 | |||
ce6276a2e8 | |||
522be31192 | |||
296427fc78 | |||
052e314372 |
2
.github/ISSUE_TEMPLATE.md
vendored
2
.github/ISSUE_TEMPLATE.md
vendored
@ -5,4 +5,4 @@ A good bug report has some very specific qualities, so please read over our shor
|
||||
|
||||
To ask a question, go ahead and ignore this.
|
||||
|
||||
[report_bugs]: ../Documentation/reporting_bugs.md
|
||||
[report_bugs]: https://github.com/coreos/etcd/blob/master/Documentation/reporting_bugs.md
|
||||
|
2
.github/PULL_REQUEST_TEMPLATE.md
vendored
2
.github/PULL_REQUEST_TEMPLATE.md
vendored
@ -2,4 +2,4 @@
|
||||
|
||||
Please read our [contribution workflow][contributing] before submitting a pull request.
|
||||
|
||||
[contributing]: ../CONTRIBUTING.md#contribution-flow
|
||||
[contributing]: https://github.com/coreos/etcd/blob/master/CONTRIBUTING.md#contribution-flow
|
||||
|
16
.semaphore.sh
Executable file
16
.semaphore.sh
Executable file
@ -0,0 +1,16 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
TEST_SUFFIX=$(date +%s | base64 | head -c 15)
|
||||
|
||||
TEST_OPTS="PASSES='build unit release integration_e2e functional' MANUAL_VER=v3.1.12"
|
||||
if [ "$TEST_ARCH" == "386" ]; then
|
||||
TEST_OPTS="GOARCH=386 PASSES='build unit integration_e2e'"
|
||||
fi
|
||||
|
||||
docker run \
|
||||
--rm \
|
||||
--volume=`pwd`:/go/src/github.com/coreos/etcd \
|
||||
gcr.io/etcd-development/etcd-test:go1.8.7 \
|
||||
/bin/bash -c "${TEST_OPTS} ./test 2>&1 | tee test-${TEST_SUFFIX}.log"
|
||||
|
||||
! egrep "(--- FAIL:|panic: test timed out|appears to have leaked)" -B50 -A10 test-${TEST_SUFFIX}.log
|
79
.travis.yml
79
.travis.yml
@ -1,60 +1,89 @@
|
||||
dist: trusty
|
||||
language: go
|
||||
go_import_path: github.com/coreos/etcd
|
||||
sudo: false
|
||||
|
||||
sudo: required
|
||||
|
||||
services: docker
|
||||
|
||||
go:
|
||||
- 1.7.1
|
||||
- tip
|
||||
- "1.8.7"
|
||||
- tip
|
||||
|
||||
notifications:
|
||||
on_success: never
|
||||
on_failure: never
|
||||
|
||||
env:
|
||||
global:
|
||||
- GO15VENDOREXPERIMENT=1
|
||||
matrix:
|
||||
- TARGET=amd64
|
||||
- TARGET=arm64
|
||||
- TARGET=arm
|
||||
- TARGET=386
|
||||
- TARGET=amd64
|
||||
- TARGET=amd64-go-tip
|
||||
- TARGET=darwin-amd64
|
||||
- TARGET=windows-amd64
|
||||
- TARGET=arm64
|
||||
- TARGET=arm
|
||||
- TARGET=386
|
||||
- TARGET=ppc64le
|
||||
|
||||
matrix:
|
||||
fast_finish: true
|
||||
allow_failures:
|
||||
- go: tip
|
||||
- go: tip
|
||||
env: TARGET=amd64-go-tip
|
||||
exclude:
|
||||
- go: "1.8.7"
|
||||
env: TARGET=amd64-go-tip
|
||||
- go: tip
|
||||
env: TARGET=amd64
|
||||
- go: tip
|
||||
env: TARGET=darwin-amd64
|
||||
- go: tip
|
||||
env: TARGET=windows-amd64
|
||||
- go: tip
|
||||
env: TARGET=arm
|
||||
- go: tip
|
||||
env: TARGET=arm64
|
||||
- go: tip
|
||||
env: TARGET=386
|
||||
|
||||
addons:
|
||||
apt:
|
||||
packages:
|
||||
- libpcap-dev
|
||||
- libaspell-dev
|
||||
- libhunspell-dev
|
||||
- go: tip
|
||||
env: TARGET=ppc64le
|
||||
|
||||
before_install:
|
||||
- go get -v github.com/chzchzchz/goword
|
||||
- go get -v honnef.co/go/simple/cmd/gosimple
|
||||
- go get -v honnef.co/go/unused/cmd/unused
|
||||
- if [[ $TRAVIS_GO_VERSION == 1.* ]]; then docker pull gcr.io/etcd-development/etcd-test:go${TRAVIS_GO_VERSION}; fi
|
||||
|
||||
# disable godep restore override
|
||||
install:
|
||||
- pushd cmd/etcd && go get -t -v ./... && popd
|
||||
- pushd cmd/etcd && go get -t -v ./... && popd
|
||||
|
||||
script:
|
||||
- echo "TRAVIS_GO_VERSION=${TRAVIS_GO_VERSION}"
|
||||
- >
|
||||
case "${TARGET}" in
|
||||
amd64)
|
||||
docker run --rm \
|
||||
--volume=`pwd`:/go/src/github.com/coreos/etcd gcr.io/etcd-development/etcd-test:go${TRAVIS_GO_VERSION} \
|
||||
/bin/bash -c "GOARCH=amd64 ./test"
|
||||
;;
|
||||
amd64-go-tip)
|
||||
GOARCH=amd64 ./test
|
||||
;;
|
||||
darwin-amd64)
|
||||
docker run --rm \
|
||||
--volume=`pwd`:/go/src/github.com/coreos/etcd gcr.io/etcd-development/etcd-test:go${TRAVIS_GO_VERSION} \
|
||||
/bin/bash -c "GO_BUILD_FLAGS='-a -v' GOOS=darwin GOARCH=amd64 ./build"
|
||||
;;
|
||||
windows-amd64)
|
||||
docker run --rm \
|
||||
--volume=`pwd`:/go/src/github.com/coreos/etcd gcr.io/etcd-development/etcd-test:go${TRAVIS_GO_VERSION} \
|
||||
/bin/bash -c "GO_BUILD_FLAGS='-a -v' GOOS=windows GOARCH=amd64 ./build"
|
||||
;;
|
||||
386)
|
||||
GOARCH=386 PASSES="build unit" ./test
|
||||
docker run --rm \
|
||||
--volume=`pwd`:/go/src/github.com/coreos/etcd gcr.io/etcd-development/etcd-test:go${TRAVIS_GO_VERSION} \
|
||||
/bin/bash -c "GOARCH=386 PASSES='build unit' ./test"
|
||||
;;
|
||||
*)
|
||||
# test building out of gopath
|
||||
GO_BUILD_FLAGS="-a -v" GOPATH="" GOARCH="${TARGET}" ./build
|
||||
docker run --rm \
|
||||
--volume=`pwd`:/go/src/github.com/coreos/etcd gcr.io/etcd-development/etcd-test:go${TRAVIS_GO_VERSION} \
|
||||
/bin/bash -c "GO_BUILD_FLAGS='-a -v' GOARCH='${TARGET}' ./build"
|
||||
;;
|
||||
esac
|
||||
|
@ -5,6 +5,12 @@ ADD etcdctl /usr/local/bin/
|
||||
RUN mkdir -p /var/etcd/
|
||||
RUN mkdir -p /var/lib/etcd/
|
||||
|
||||
# Alpine Linux doesn't use pam, which means that there is no /etc/nsswitch.conf,
|
||||
# but Golang relies on /etc/nsswitch.conf to check the order of DNS resolving
|
||||
# (see https://github.com/golang/go/commit/9dee7771f561cf6aee081c0af6658cc81fac3918)
|
||||
# To fix this we just create /etc/nsswitch.conf and add the following line:
|
||||
RUN echo 'hosts: files mdns4_minimal [NOTFOUND=return] dns mdns4' >> /etc/nsswitch.conf
|
||||
|
||||
EXPOSE 2379 2380
|
||||
|
||||
# Define default command.
|
||||
|
57
Dockerfile-test
Normal file
57
Dockerfile-test
Normal file
@ -0,0 +1,57 @@
|
||||
FROM ubuntu:16.10
|
||||
|
||||
RUN rm /bin/sh && ln -s /bin/bash /bin/sh
|
||||
RUN echo 'debconf debconf/frontend select Noninteractive' | debconf-set-selections
|
||||
|
||||
RUN apt-get -y update \
|
||||
&& apt-get -y install \
|
||||
build-essential \
|
||||
gcc \
|
||||
apt-utils \
|
||||
pkg-config \
|
||||
software-properties-common \
|
||||
apt-transport-https \
|
||||
libssl-dev \
|
||||
sudo \
|
||||
bash \
|
||||
curl \
|
||||
wget \
|
||||
tar \
|
||||
git \
|
||||
netcat \
|
||||
libaspell-dev \
|
||||
libhunspell-dev \
|
||||
hunspell-en-us \
|
||||
aspell-en \
|
||||
shellcheck \
|
||||
&& apt-get -y update \
|
||||
&& apt-get -y upgrade \
|
||||
&& apt-get -y autoremove \
|
||||
&& apt-get -y autoclean
|
||||
|
||||
ENV GOROOT /usr/local/go
|
||||
ENV GOPATH /go
|
||||
ENV PATH ${GOPATH}/bin:${GOROOT}/bin:${PATH}
|
||||
ENV GO_VERSION REPLACE_ME_GO_VERSION
|
||||
ENV GO_DOWNLOAD_URL https://storage.googleapis.com/golang
|
||||
RUN rm -rf ${GOROOT} \
|
||||
&& curl -s ${GO_DOWNLOAD_URL}/go${GO_VERSION}.linux-amd64.tar.gz | tar -v -C /usr/local/ -xz \
|
||||
&& mkdir -p ${GOPATH}/src ${GOPATH}/bin \
|
||||
&& go version
|
||||
|
||||
RUN mkdir -p ${GOPATH}/src/github.com/coreos/etcd
|
||||
WORKDIR ${GOPATH}/src/github.com/coreos/etcd
|
||||
|
||||
ADD ./scripts/install-marker.sh /tmp/install-marker.sh
|
||||
|
||||
RUN go get -v -u -tags spell github.com/chzchzchz/goword \
|
||||
&& go get -v -u github.com/coreos/license-bill-of-materials \
|
||||
&& go get -v -u honnef.co/go/tools/cmd/gosimple \
|
||||
&& go get -v -u honnef.co/go/tools/cmd/unused \
|
||||
&& go get -v -u honnef.co/go/tools/cmd/staticcheck \
|
||||
&& go get -v -u github.com/wadey/gocovmerge \
|
||||
&& go get -v -u github.com/gordonklaus/ineffassign \
|
||||
&& /tmp/install-marker.sh amd64 \
|
||||
&& rm -f /tmp/install-marker.sh \
|
||||
&& curl -s https://codecov.io/bash >/codecov \
|
||||
&& chmod 700 /codecov
|
1
Documentation/README.md
Symbolic link
1
Documentation/README.md
Symbolic link
@ -0,0 +1 @@
|
||||
docs.md
|
@ -14,7 +14,7 @@ GCE n1-highcpu-2 machine type
|
||||
|
||||
## Testing
|
||||
|
||||
Bootstrap another machine and use the [boom HTTP benchmark tool][boom] to send requests to each etcd member. Check the [benchmark hacking guide][hack-benchmark] for detailed instructions.
|
||||
Bootstrap another machine and use the [hey HTTP benchmark tool][hey] to send requests to each etcd member. Check the [benchmark hacking guide][hack-benchmark] for detailed instructions.
|
||||
|
||||
## Performance
|
||||
|
||||
@ -48,5 +48,5 @@ Bootstrap another machine and use the [boom HTTP benchmark tool][boom] to send r
|
||||
| 256 | 64 | all servers | 1033 | 121.5 |
|
||||
| 256 | 256 | all servers | 3061 | 119.3 |
|
||||
|
||||
[boom]: https://github.com/rakyll/boom
|
||||
[hack-benchmark]: /hack/benchmark/
|
||||
[hey]: https://github.com/rakyll/hey
|
||||
[hack-benchmark]: https://github.com/coreos/etcd/tree/master/hack/benchmark
|
||||
|
@ -24,7 +24,7 @@ Go OS/Arch: linux/amd64
|
||||
|
||||
## Testing
|
||||
|
||||
Bootstrap another machine, outside of the etcd cluster, and run the [`boom` HTTP benchmark tool](https://github.com/rakyll/boom) with a connection reuse patch to send requests to each etcd cluster member. See the [benchmark instructions](../../hack/benchmark/) for the patch and the steps to reproduce our procedures.
|
||||
Bootstrap another machine, outside of the etcd cluster, and run the [`hey` HTTP benchmark tool](https://github.com/rakyll/hey) with a connection reuse patch to send requests to each etcd cluster member. See the [benchmark instructions](../../hack/benchmark/) for the patch and the steps to reproduce our procedures.
|
||||
|
||||
The performance is calulated through results of 100 benchmark rounds.
|
||||
|
||||
@ -66,4 +66,4 @@ The performance is calulated through results of 100 benchmark rounds.
|
||||
|
||||
- Write QPS to cluster leaders seems to be increased by a small margin. This is because the main loop and entry apply loops were decoupled in the etcd raft logic, eliminating several blocks between them.
|
||||
|
||||
- Write QPS to all members seems to be increased by a significant margin, because followers now receive the latest commit index sooner, and commit proposals more quickly.
|
||||
- Write QPS to all members seems to be increased by a significant margin, because followers now receive the latest commit index sooner, and commit proposals more quickly.
|
||||
|
@ -24,7 +24,7 @@ Also, we use 3 etcd 2.1.0 alpha-stage members to form cluster to get base perfor
|
||||
|
||||
## Testing
|
||||
|
||||
Bootstrap another machine and use the [boom HTTP benchmark tool][boom] to send requests to each etcd member. Check the [benchmark hacking guide][hack-benchmark] for detailed instructions.
|
||||
Bootstrap another machine and use the [hey HTTP benchmark tool][hey] to send requests to each etcd member. Check the [benchmark hacking guide][hack-benchmark] for detailed instructions.
|
||||
|
||||
## Performance
|
||||
|
||||
@ -66,7 +66,7 @@ Bootstrap another machine and use the [boom HTTP benchmark tool][boom] to send r
|
||||
|
||||
- write QPS to all servers is increased by 30~80% because follower could receive latest commit index earlier and commit proposals faster.
|
||||
|
||||
[boom]: https://github.com/rakyll/boom
|
||||
[hey]: https://github.com/rakyll/hey
|
||||
[c7146bd5]: https://github.com/coreos/etcd/commits/c7146bd5f2c73716091262edc638401bb8229144
|
||||
[etcd-2.1-benchmark]: etcd-2-1-0-alpha-benchmarks.md
|
||||
[hack-benchmark]: /hack/benchmark/
|
||||
[hack-benchmark]: ../../hack/benchmark/
|
||||
|
@ -39,4 +39,4 @@ The performance is nearly the same as the one with empty server handler.
|
||||
The performance with empty server handler is not affected by one put. So the
|
||||
performance downgrade should be caused by storage package.
|
||||
|
||||
[etcd-v3-benchmark]: /tools/benchmark/
|
||||
[etcd-v3-benchmark]: ../../tools/benchmark/
|
||||
|
@ -8,6 +8,8 @@ etcd v3 uses [gRPC][grpc] for its messaging protocol. The etcd project includes
|
||||
|
||||
The gateway accepts a [JSON mapping][json-mapping] for etcd's [protocol buffer][api-ref] message definitions. Note that `key` and `value` fields are defined as byte arrays and therefore must be base64 encoded in JSON.
|
||||
|
||||
Use `curl` to put and get a key:
|
||||
|
||||
```bash
|
||||
<<COMMENT
|
||||
https://www.base64encode.org/
|
||||
@ -17,11 +19,24 @@ COMMENT
|
||||
|
||||
curl -L http://localhost:2379/v3alpha/kv/put \
|
||||
-X POST -d '{"key": "Zm9v", "value": "YmFy"}'
|
||||
# {"header":{"cluster_id":"12585971608760269493","member_id":"13847567121247652255","revision":"2","raft_term":"3"}}
|
||||
|
||||
curl -L http://localhost:2379/v3alpha/kv/range \
|
||||
-X POST -d '{"key": "Zm9v"}'
|
||||
# {"header":{"cluster_id":"12585971608760269493","member_id":"13847567121247652255","revision":"2","raft_term":"3"},"kvs":[{"key":"Zm9v","create_revision":"2","mod_revision":"2","version":"1","value":"YmFy"}],"count":"1"}
|
||||
```
|
||||
|
||||
Use `curl` to watch a key:
|
||||
|
||||
```bash
|
||||
curl http://localhost:2379/v3alpha/watch \
|
||||
-X POST -d '{"create_request": {"key":"Zm9v"} }' &
|
||||
# {"result":{"header":{"cluster_id":"12585971608760269493","member_id":"13847567121247652255","revision":"1","raft_term":"2"},"created":true}}
|
||||
|
||||
curl -L http://localhost:2379/v3alpha/kv/put \
|
||||
-X POST -d '{"key": "Zm9v", "value": "YmFy"}' >/dev/null 2>&1
|
||||
# {"result":{"header":{"cluster_id":"12585971608760269493","member_id":"13847567121247652255","revision":"2","raft_term":"2"},"events":[{"kv":{"key":"Zm9v","create_revision":"2","mod_revision":"2","version":"1","value":"YmFy"}}]}}
|
||||
```
|
||||
|
||||
## Swagger
|
||||
|
||||
|
@ -427,7 +427,7 @@ Empty field.
|
||||
| Field | Description | Type |
|
||||
| ----- | ----------- | ---- |
|
||||
| key | key is the first key to delete in the range. | bytes |
|
||||
| range_end | range_end is the key following the last key to delete for the range [key, range_end). If range_end is not given, the range is defined to contain only the key argument. If range_end is '\0', the range is all keys greater than or equal to the key argument. | bytes |
|
||||
| range_end | range_end is the key following the last key to delete for the range [key, range_end). If range_end is not given, the range is defined to contain only the key argument. If range_end is one bit larger than the given key, then the range is all the all keys with the prefix (the given key). If range_end is '\0', the range is all keys greater than or equal to the key argument. | bytes |
|
||||
| prev_kv | If prev_kv is set, etcd gets the previous key-value pairs before deleting it. The previous key-value pairs will be returned in the delte response. | bool |
|
||||
|
||||
|
||||
@ -762,7 +762,7 @@ From google paxosdb paper: Our implementation hinges around a powerful primitive
|
||||
| Field | Description | Type |
|
||||
| ----- | ----------- | ---- |
|
||||
| key | key is the key to register for watching. | bytes |
|
||||
| range_end | range_end is the end of the range [key, range_end) to watch. If range_end is not given, only the key argument is watched. If range_end is equal to '\0', all keys greater than or equal to the key argument are watched. | bytes |
|
||||
| range_end | range_end is the end of the range [key, range_end) to watch. If range_end is not given, only the key argument is watched. If range_end is equal to '\0', all keys greater than or equal to the key argument are watched. If the range_end is one bit larger than the given key, then all keys with the prefix (the given key) will be watched. | bytes |
|
||||
| start_revision | start_revision is an optional revision to watch from (inclusive). No start_revision is "now". | int64 |
|
||||
| progress_notify | progress_notify is set so that the etcd server will periodically send a WatchResponse with no events to the new watcher if there are no recent events. It is useful when clients wish to recover a disconnected watcher starting from a recent known revision. The etcd server may decide how often it will send notifications based on current load. | bool |
|
||||
| filters | filter out put event. filter out delete event. filters filter the events at server side before it sends back to the watcher. | (slice of) FilterType |
|
||||
|
@ -978,7 +978,8 @@
|
||||
"enum": [
|
||||
"EQUAL",
|
||||
"GREATER",
|
||||
"LESS"
|
||||
"LESS",
|
||||
"NOT_EQUAL"
|
||||
],
|
||||
"default": "EQUAL"
|
||||
},
|
||||
@ -1518,7 +1519,7 @@
|
||||
"range_end": {
|
||||
"type": "string",
|
||||
"format": "byte",
|
||||
"description": "range_end is the key following the last key to delete for the range [key, range_end).\nIf range_end is not given, the range is defined to contain only the key argument.\nIf range_end is '\\0', the range is all keys greater than or equal to the key argument."
|
||||
"description": "range_end is the key following the last key to delete for the range [key, range_end).\nIf range_end is not given, the range is defined to contain only the key argument.\nIf range_end is one bit larger than the given key, then the range is all\nthe all keys with the prefix (the given key).\nIf range_end is '\\0', the range is all keys greater than or equal to the key argument."
|
||||
}
|
||||
}
|
||||
},
|
||||
|
@ -1,8 +1,11 @@
|
||||
# Experimental APIs and features
|
||||
|
||||
For the most part, the etcd project is stable, but we are still moving fast! We believe in the release fast philosophy. We want to get early feedback on features still in development and stabilizing. Thus, there are, and will be more, experimental features and APIs. We plan to improve these features based on the early feedback from the community, or abandon them if there is little interest, in the next few releases. If you are running a production system, please do not rely on any experimental features or APIs.
|
||||
For the most part, the etcd project is stable, but we are still moving fast! We believe in the release fast philosophy. We want to get early feedback on features still in development and stabilizing. Thus, there are, and will be more, experimental features and APIs. We plan to improve these features based on the early feedback from the community, or abandon them if there is little interest, in the next few releases. Please do not rely on any experimental features or APIs in production environment.
|
||||
|
||||
## The current experimental API/features are:
|
||||
|
||||
- v3 auth API: expect to be stable in 3.1 release
|
||||
- etcd gateway: expect to be stable in 3.1 release
|
||||
- [gateway][gateway]: beta, to be stable in 3.2 release
|
||||
- [gRPC proxy][grpc-proxy]: alpha, to be stable in 3.2 release
|
||||
|
||||
[gateway]: ../op-guide/gateway.md
|
||||
[grpc-proxy]: ../op-guide/grpc_proxy.md
|
||||
|
@ -9,7 +9,7 @@ The etcd client provides a gRPC resolver for resolving gRPC endpoints with an et
|
||||
```go
|
||||
import (
|
||||
"github.com/coreos/etcd/clientv3"
|
||||
etcdnaming "github.com/coroes/etcd/clientv3/naming"
|
||||
etcdnaming "github.com/coreos/etcd/clientv3/naming"
|
||||
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
@ -51,6 +51,7 @@ Suppose the etcd cluster has stored the following keys:
|
||||
```bash
|
||||
foo = bar
|
||||
foo1 = bar1
|
||||
foo2 = bar2
|
||||
foo3 = bar3
|
||||
```
|
||||
|
||||
@ -77,22 +78,38 @@ $ etcdctl get foo --print-value-only
|
||||
bar
|
||||
```
|
||||
|
||||
Here is the command to range over the keys from `foo` to `foo9`:
|
||||
Here is the command to range over the keys from `foo` to `foo3`:
|
||||
|
||||
```bash
|
||||
$ etcdctl get foo foo9
|
||||
$ etcdctl get foo foo3
|
||||
foo
|
||||
bar
|
||||
foo1
|
||||
bar1
|
||||
foo2
|
||||
bar2
|
||||
```
|
||||
|
||||
Note that `foo3` is excluded since the range is over the half-open interval `[foo, foo3)`, excluding `foo3`.
|
||||
|
||||
Here is the command to range over all keys prefixed with `foo`:
|
||||
|
||||
```bash
|
||||
$ etcdctl get --prefix foo
|
||||
foo
|
||||
bar
|
||||
foo1
|
||||
bar1
|
||||
foo2
|
||||
bar2
|
||||
foo3
|
||||
bar3
|
||||
```
|
||||
|
||||
Here is the command to range over the keys from `foo` to `foo9` limiting the number of results to 2:
|
||||
Here is the command to range over all keys prefixed with `foo`, limiting the number of results to 2:
|
||||
|
||||
```bash
|
||||
$ etcdctl get foo foo9 --limit 2
|
||||
$ etcdctl get --prefix --limit=2 foo
|
||||
foo
|
||||
bar
|
||||
foo1
|
||||
@ -116,29 +133,29 @@ foo1 = bar1_new # revision = 5
|
||||
Here are an example to access the past versions of keys:
|
||||
|
||||
```bash
|
||||
$ etcdctl get foo foo9 # access the most recent versions of keys
|
||||
$ etcdctl get --prefix foo # access the most recent versions of keys
|
||||
foo
|
||||
bar_new
|
||||
foo1
|
||||
bar1_new
|
||||
|
||||
$ etcdctl get --rev=4 foo foo9 # access the versions of keys at revision 4
|
||||
$ etcdctl get --prefix --rev=4 foo # access the versions of keys at revision 4
|
||||
foo
|
||||
bar_new
|
||||
foo1
|
||||
bar1
|
||||
|
||||
$ etcdctl get --rev=3 foo foo9 # access the versions of keys at revision 3
|
||||
$ etcdctl get --prefix --rev=3 foo # access the versions of keys at revision 3
|
||||
foo
|
||||
bar
|
||||
foo1
|
||||
bar1
|
||||
|
||||
$ etcdctl get --rev=2 foo foo9 # access the versions of keys at revision 2
|
||||
$ etcdctl get --prefix --rev=2 foo # access the versions of keys at revision 2
|
||||
foo
|
||||
bar
|
||||
|
||||
$ etcdctl get --rev=1 foo foo9 # access the versions of keys at revision 1
|
||||
$ etcdctl get --prefix --rev=1 foo # access the versions of keys at revision 1
|
||||
```
|
||||
|
||||
## Read keys which are greater than or equal to the byte value of the specified key
|
||||
@ -454,4 +471,5 @@ lease 694d5765fc71500b granted with TTL(500s), remaining(132s), attached keys([z
|
||||
|
||||
# if the lease has expired or does not exist it will give the below response:
|
||||
Error: etcdserver: requested lease not found
|
||||
```
|
||||
```
|
||||
|
||||
|
10
Documentation/dev-guide/limit.md
Normal file
10
Documentation/dev-guide/limit.md
Normal file
@ -0,0 +1,10 @@
|
||||
# System limits
|
||||
|
||||
## Request size limit
|
||||
|
||||
etcd is designed to handle small key value pairs typical for metadata. Larger requests will work, but may increase the latency of other requests. For the time being, etcd guarantees to support RPC requests with up to 1MB of data. In the future, the size limit may be loosened or made it configurable.
|
||||
|
||||
## Storage size limit
|
||||
|
||||
The default storage size limit is 2GB, configurable with `--quota-backend-bytes` flag; supports up to 8GB.
|
||||
|
@ -45,7 +45,7 @@ To interact with the started cluster by using etcdctl:
|
||||
# use API version 3
|
||||
$ export ETCDCTL_API=3
|
||||
|
||||
$ etcdctl --write-out=table --endpoints=localhost:12379 member list
|
||||
$ etcdctl --write-out=table --endpoints=localhost:2379 member list
|
||||
+------------------+---------+--------+------------------------+------------------------+
|
||||
| ID | STATUS | NAME | PEER ADDRS | CLIENT ADDRS |
|
||||
+------------------+---------+--------+------------------------+------------------------+
|
||||
|
@ -3,7 +3,7 @@
|
||||
etcd uses the [capnslog][capnslog] library for logging application output categorized into *levels*. A log message's level is determined according to these conventions:
|
||||
|
||||
* Error: Data has been lost, a request has failed for a bad reason, or a required resource has been lost
|
||||
* Examples:
|
||||
* Examples:
|
||||
* A failure to allocate disk space for WAL
|
||||
|
||||
* Warning: (Hopefully) Temporary conditions that may cause errors, but may work fine. A replica disappearing (that may reconnect) is a warning.
|
||||
@ -26,4 +26,4 @@ etcd uses the [capnslog][capnslog] library for logging application output catego
|
||||
* Send a normal message to a remote peer
|
||||
* Write a log entry to disk
|
||||
|
||||
[capnslog]: [https://github.com/coreos/pkg/tree/master/capnslog]
|
||||
[capnslog]: https://github.com/coreos/pkg/tree/master/capnslog
|
||||
|
@ -10,29 +10,44 @@ The easiest way to get etcd is to use one of the pre-built release binaries whic
|
||||
|
||||
## Build the latest version
|
||||
|
||||
For those wanting to try the very latest version, build etcd from the `master` branch.
|
||||
[Go](https://golang.org/) version 1.6+ (with HTTP2 support) is required to build the latest version of etcd.
|
||||
etcd vendors its dependency for official release binaries, while making vendoring optional to avoid import conflicts.
|
||||
[`build` script][build-script] would automatically include the vendored dependencies from [`cmd`][cmd-directory] directory.
|
||||
For those wanting to try the very latest version, build etcd from the `master` branch. [Go](https://golang.org/) version 1.7+ is required to build the latest version of etcd. To ensure etcd is built against well-tested libraries, etcd vendors its dependencies for official release binaries. However, etcd's vendoring is also optional to avoid potential import conflicts when embedding the etcd server or using the etcd client.
|
||||
|
||||
Here are the commands to build an etcd binary from the `master` branch:
|
||||
First, confirm go 1.7+ is installed:
|
||||
|
||||
```
|
||||
```sh
|
||||
# go is required
|
||||
$ go version
|
||||
go version go1.6 darwin/amd64
|
||||
go version go1.7.3 darwin/amd64
|
||||
|
||||
# GOPATH should be set correctly
|
||||
$ echo $GOPATH
|
||||
/Users/example/go
|
||||
```
|
||||
|
||||
$ mkdir -p $GOPATH/src/github.com/coreos
|
||||
$ cd $GOPATH/src/github.com/coreos
|
||||
To build `etcd` from the `master` branch without a `GOPATH` using the official `build` script:
|
||||
|
||||
```sh
|
||||
$ git clone https://github.com/coreos/etcd.git
|
||||
$ cd etcd
|
||||
$ ./build
|
||||
$ ./bin/etcd
|
||||
...
|
||||
```
|
||||
|
||||
To build a vendored `etcd` from the `master` branch via `go get`:
|
||||
|
||||
```sh
|
||||
# GOPATH should be set
|
||||
$ echo $GOPATH
|
||||
/Users/example/go
|
||||
$ go get github.com/coreos/etcd/cmd/etcd
|
||||
$ $GOPATH/bin/etcd
|
||||
```
|
||||
|
||||
To build `etcd` from the `master` branch without vendoring (may not build due to upstream conflicts):
|
||||
|
||||
```sh
|
||||
# GOPATH should be set
|
||||
$ echo $GOPATH
|
||||
/Users/example/go
|
||||
$ go get github.com/coreos/etcd
|
||||
$ $GOPATH/bin/etcd
|
||||
```
|
||||
|
||||
## Test the installation
|
||||
|
@ -17,6 +17,7 @@ The easiest way to get started using etcd as a distributed key-value store is to
|
||||
- [gRPC naming and discovery][grpc_naming]
|
||||
- [Embedding etcd][embed_etcd]
|
||||
- [Experimental features and APIs][experimental]
|
||||
- [System limits][system-limit]
|
||||
|
||||
## Operating etcd clusters
|
||||
|
||||
@ -26,9 +27,10 @@ Administrators who need to create reliable and scalable key-value stores for the
|
||||
- [Setting up etcd gateways][gateway]
|
||||
- [Setting up etcd gRPC proxy (pre-alpha)][grpc_proxy]
|
||||
- [Run etcd clusters inside containers][container]
|
||||
- [Hardware recommendations][hardware]
|
||||
- [Configuration][conf]
|
||||
- [Security][security]
|
||||
- Monitoring
|
||||
- [Monitoring][monitoring]
|
||||
- [Maintenance][maintenance]
|
||||
- [Understand failures][failures]
|
||||
- [Disaster recovery][recovery]
|
||||
@ -40,7 +42,7 @@ Administrators who need to create reliable and scalable key-value stores for the
|
||||
|
||||
To learn more about the concepts and internals behind etcd, read the following pages:
|
||||
|
||||
- Why etcd (TODO)
|
||||
- [Why etcd][why] (TODO)
|
||||
- [Understand data model][data_model]
|
||||
- [Understand APIs][understand_apis]
|
||||
- [Glossary][glossary]
|
||||
@ -50,13 +52,19 @@ To learn more about the concepts and internals behind etcd, read the following p
|
||||
|
||||
- [Migrate applications from using API v2 to API v3][v2_migration]
|
||||
- [Updating v2.3 to v3.0][v3_upgrade]
|
||||
- [Updating v3.0 to v3.1][v31_upgrade]
|
||||
|
||||
## Troubleshooting
|
||||
## Frequently Asked Questions (FAQ)
|
||||
|
||||
Answers to [common questions] about etcd.
|
||||
|
||||
[api_ref]: dev-guide/api_reference_v3.md
|
||||
[api_grpc_gateway]: dev-guide/api_grpc_gateway.md
|
||||
[clustering]: op-guide/clustering.md
|
||||
[conf]: op-guide/configuration.md
|
||||
[system-limit]: dev-guide/limit.md
|
||||
[common questions]: faq.md
|
||||
[why]: learning/why.md
|
||||
[data_model]: learning/data_model.md
|
||||
[demo]: demo.md
|
||||
[download_build]: dl_build.md
|
||||
@ -66,12 +74,14 @@ To learn more about the concepts and internals behind etcd, read the following p
|
||||
[gateway]: op-guide/gateway.md
|
||||
[glossary]: learning/glossary.md
|
||||
[grpc_proxy]: op-guide/grpc_proxy.md
|
||||
[hardware]: op-guide/hardware.md
|
||||
[interacting]: dev-guide/interacting_v3.md
|
||||
[local_cluster]: dev-guide/local_cluster.md
|
||||
[performance]: op-guide/performance.md
|
||||
[recovery]: op-guide/recovery.md
|
||||
[maintenance]: op-guide/maintenance.md
|
||||
[security]: op-guide/security.md
|
||||
[monitoring]: op-guide/monitoring.md
|
||||
[v2_migration]: op-guide/v2-migration.md
|
||||
[container]: op-guide/container.md
|
||||
[understand_apis]: learning/api.md
|
||||
@ -79,3 +89,4 @@ To learn more about the concepts and internals behind etcd, read the following p
|
||||
[supported_platform]: op-guide/supported-platform.md
|
||||
[experimental]: dev-guide/experimental_apis.md
|
||||
[v3_upgrade]: upgrades/upgrade_3_0.md
|
||||
[v31_upgrade]: upgrades/upgrade_3_1.md
|
||||
|
128
Documentation/faq.md
Normal file
128
Documentation/faq.md
Normal file
@ -0,0 +1,128 @@
|
||||
## Frequently Asked Questions (FAQ)
|
||||
|
||||
### etcd, general
|
||||
|
||||
#### Do clients have to send requests to the etcd leader?
|
||||
|
||||
[Raft][raft] is leader-based; the leader handles all client requests which need cluster consensus. However, the client does not need to know which node is the leader. Any request that requires consensus sent to a follower is automatically forwarded to the leader. Requests that do not require consensus (e.g., serialized reads) can be processed by any cluster member.
|
||||
|
||||
### Configuration
|
||||
|
||||
#### What is the difference between advertise-urls and listen-urls?
|
||||
|
||||
`listen-urls` specifies the local addresses etcd server binds to for accepting incoming connections. To listen on a port for all interfaces, specify `0.0.0.0` as the listen IP address.
|
||||
|
||||
`advertise-urls` specifies the addresses etcd clients or other etcd members should use to contact the etcd server. The advertise addresses must be reachable from the remote machines. Do not advertise addresses like `localhost` or `0.0.0.0` for a production setup since these addresses are unreachable from remote machines.
|
||||
|
||||
### Deployment
|
||||
|
||||
#### System requirements
|
||||
|
||||
Since etcd writes data to disk, SSD is highly recommended. To prevent performance degradation or unintentionally overloading the key-value store, etcd enforces a 2GB default storage size quota, configurable up to 8GB. To avoid swapping or running out of memory, the machine should have at least as much RAM to cover the quota. At CoreOS, an etcd cluster is usually deployed on dedicated CoreOS Container Linux machines with dual-core processors, 2GB of RAM, and 80GB of SSD *at the very least*. **Note that performance is intrinsically workload dependent; please test before production deployment**. See [hardware][hardware-setup] for more recommendations.
|
||||
|
||||
Most stable production environment is Linux operating system with amd64 architecture; see [supported platform][supported-platform] for more.
|
||||
|
||||
#### Why an odd number of cluster members?
|
||||
|
||||
An etcd cluster needs a majority of nodes, a quorum, to agree on updates to the cluster state. For a cluster with n members, quorum is (n/2)+1. For any odd-sized cluster, adding one node will always increase the number of nodes necessary for quorum. Although adding a node to an odd-sized cluster appears better since there are more machines, the fault tolerance is worse since exactly the same number of nodes may fail without losing quorum but there are more nodes that can fail. If the cluster is in a state where it can't tolerate any more failures, adding a node before removing nodes is dangerous because if the new node fails to register with the cluster (e.g., the address is misconfigured), quorum will be permanently lost.
|
||||
|
||||
#### What is maximum cluster size?
|
||||
|
||||
Theoretically, there is no hard limit. However, an etcd cluster probably should have no more than seven nodes. [Google Chubby lock service][chubby], similar to etcd and widely deployed within Google for many years, suggests running five nodes. A 5-member etcd cluster can tolerate two member failures, which is enough in most cases. Although larger clusters provide better fault tolerance, the write performance suffers because data must be replicated across more machines.
|
||||
|
||||
#### What is failure tolerance?
|
||||
|
||||
An etcd cluster operates so long as a member quorum can be established. If quorum is lost through transient network failures (e.g., partitions), etcd automatically and safely resumes once the network recovers and restores quorum; Raft enforces cluster consistency. For power loss, etcd persists the Raft log to disk; etcd replays the log to the point of failure and resumes cluster participation. For permanent hardware failure, the node may be removed from the cluster through [runtime reconfiguration][runtime reconfiguration].
|
||||
|
||||
It is recommended to have an odd number of members in a cluster. An odd-size cluster tolerates the same number of failures as an even-size cluster but with fewer nodes. The difference can be seen by comparing even and odd sized clusters:
|
||||
|
||||
| Cluster Size | Majority | Failure Tolerance |
|
||||
|:-:|:-:|:-:|
|
||||
| 1 | 1 | 0 |
|
||||
| 2 | 2 | 0 |
|
||||
| 3 | 2 | 1 |
|
||||
| 4 | 3 | 1 |
|
||||
| 5 | 3 | 2 |
|
||||
| 6 | 4 | 2 |
|
||||
| 7 | 4 | 3 |
|
||||
| 8 | 5 | 3 |
|
||||
| 9 | 5 | 4 |
|
||||
|
||||
Adding a member to bring the size of cluster up to an even number doesn't buy additional fault tolerance. Likewise, during a network partition, an odd number of members guarantees that there will always be a majority partition that can continue to operate and be the source of truth when the partition ends.
|
||||
|
||||
#### Does etcd work in cross-region or cross data center deployments?
|
||||
|
||||
Deploying etcd across regions improves etcd's fault tolerance since members are in separate failure domains. The cost is higher consensus request latency from crossing data center boundaries. Since etcd relies on a member quorum for consensus, the latency from crossing data centers will be somewhat pronounced because at least a majority of cluster members must respond to consensus requests. Additionally, cluster data must be replicated across all peers, so there will be bandwidth cost as well.
|
||||
|
||||
With longer latencies, the default etcd configuration may cause frequent elections or heartbeat timeouts. See [tuning] for adjusting timeouts for high latency deployments.
|
||||
|
||||
### Operation
|
||||
|
||||
#### How to backup a etcd cluster?
|
||||
|
||||
etcdctl provides a `snapshot` command to create backups. See [backup][backup] for more details.
|
||||
|
||||
#### Should I add a member before removing an unhealthy member?
|
||||
|
||||
When replacing an etcd node, it's important to remove the member first and then add its replacement.
|
||||
|
||||
etcd employs distributed consensus based on a quorum model; (n+1)/2 members, a majority, must agree on a proposal before it can be committed to the cluster. These proposals include key-value updates and membership changes. This model totally avoids any possibility of split brain inconsistency. The downside is permanent quorum loss is catastrophic.
|
||||
|
||||
How this applies to membership: If a 3-member cluster has 1 downed member, it can still make forward progress because the quorum is 2 and 2 members are still live. However, adding a new member to a 3-member cluster will increase the quorum to 3 because 3 votes are required for a majority of 4 members. Since the quorum increased, this extra member buys nothing in terms of fault tolerance; the cluster is still one node failure away from being unrecoverable.
|
||||
|
||||
Additionally, that new member is risky because it may turn out to be misconfigured or incapable of joining the cluster. In that case, there's no way to recover quorum because the cluster has two members down and two members up, but needs three votes to change membership to undo the botched membership addition. etcd will by default reject member add attempts that could take down the cluster in this manner.
|
||||
|
||||
On the other hand, if the downed member is removed from cluster membership first, the number of members becomes 2 and the quorum remains at 2. Following that removal by adding a new member will also keep the quorum steady at 2. So, even if the new node can't be brought up, it's still possible to remove the new member through quorum on the remaining live members.
|
||||
|
||||
#### Why won't etcd accept my membership changes?
|
||||
|
||||
etcd sets `strict-reconfig-check` in order to reject reconfiguration requests that would cause quorum loss. Abandoning quorum is really risky (especially when the cluster is already unhealthy). Although it may be tempting to disable quorum checking if there's quorum loss to add a new member, this could lead to full fledged cluster inconsistency. For many applications, this will make the problem even worse ("disk geometry corruption" being a candidate for most terrifying).
|
||||
|
||||
### Performance
|
||||
|
||||
#### How should I benchmark etcd?
|
||||
|
||||
Try the [benchmark] tool. Current [benchmark results][benchmark-result] are available for comparison.
|
||||
|
||||
#### What does the etcd warning "apply entries took too long" mean?
|
||||
|
||||
After a majority of etcd members agree to commit a request, each etcd server applies the request to its data store and persists the result to disk. Even with a slow mechanical disk or a virtualized network disk, such as Amazon’s EBS or Google’s PD, applying a request should normally take fewer than 50 milliseconds. If the average apply duration exceeds 100 milliseconds, etcd will warn that entries are taking too long to apply.
|
||||
|
||||
Usually this issue is caused by a slow disk. The disk could be experiencing contention among etcd and other applications, or the disk is too simply slow (e.g., a shared virtualized disk). To rule out a slow disk from causing this warning, monitor [backend_commit_duration_seconds][backend_commit_metrics] (p99 duration should be less than 25ms) to confirm the disk is reasonably fast. If the disk is too slow, assigning a dedicated disk to etcd or using faster disk will typically solve the problem.
|
||||
|
||||
The second most common cause is CPU starvation. If monitoring of the machine’s CPU usage shows heavy utilization, there may not be enough compute capacity for etcd. Moving etcd to dedicated machine, increasing process resource isolation cgroups, or renicing the etcd server process into a higher priority can usually solve the problem.
|
||||
|
||||
Expensive user requests which access too many keys (e.g., fetching the entire keyspace) can also cause long apply latencies. Accessing fewer than a several hundred keys per request, however, should always be performant.
|
||||
|
||||
If none of the above suggestions clear the warnings, please [open an issue][new_issue] with detailed logging, monitoring, metrics and optionally workload information.
|
||||
|
||||
#### What does the etcd warning "failed to send out heartbeat on time" mean?
|
||||
|
||||
etcd uses a leader-based consensus protocol for consistent data replication and log execution. Cluster members elect a single leader, all other members become followers. The elected leader must periodically send heartbeats to its followers to maintain its leadership. Followers infer leader failure if no heartbeats are received within an election interval and trigger an election. If a leader doesn’t send its heartbeats in time but is still running, the election is spurious and likely caused by insufficient resources. To catch these soft failures, if the leader skips two heartbeat intervals, etcd will warn it failed to send a heartbeat on time.
|
||||
|
||||
Usually this issue is caused by a slow disk. Before the leader sends heartbeats attached with metadata, it may need to persist the metadata to disk. The disk could be experiencing contention among etcd and other applications, or the disk is too simply slow (e.g., a shared virtualized disk). To rule out a slow disk from causing this warning, monitor [wal_fsync_duration_seconds][wal_fsync_duration_seconds] (p99 duration should be less than 10ms) to confirm the disk is reasonably fast. If the disk is too slow, assigning a dedicated disk to etcd or using faster disk will typically solve the problem.
|
||||
|
||||
The second most common cause is CPU starvation. If monitoring of the machine’s CPU usage shows heavy utilization, there may not be enough compute capacity for etcd. Moving etcd to dedicated machine, increasing process resource isolation with cgroups, or renicing the etcd server process into a higher priority can usually solve the problem.
|
||||
|
||||
A slow network can also cause this issue. If network metrics among the etcd machines shows long latencies or high drop rate, there may not be enough network capacity for etcd. Moving etcd members to a less congested network will typically solve the problem. However, if the etcd cluster is deployed across data centers, long latency between members is expected. For such deployments, tune the `heartbeat-interval` configuration to roughly match the round trip time between the machines, and the `election-timeout` configuration to be at least 5 * `heartbeat-interval`. See [tuning documentation][tuning] for detailed information.
|
||||
|
||||
If none of the above suggestions clear the warnings, please [open an issue][new_issue] with detailed logging, monitoring, metrics and optionally workload information.
|
||||
|
||||
#### What does the etcd warning "request ignored (cluster ID mismatch)" mean?
|
||||
|
||||
Every new etcd cluster generates a new cluster ID based on the initial cluster configuration and a user-provided unique `initial-cluster-token` value. By having unique cluster ID's, etcd is protected from cross-cluster interaction which could corrupt the cluster.
|
||||
|
||||
Usually this warning happens after tearing down an old cluster, then reusing some of the peer addresses for the new cluster. If any etcd process from the old cluster is still running it will try to contact the new cluster. The new cluster will recognize a cluster ID mismatch, then ignore the request and emit this warning. This warning is often cleared by ensuring peer addresses among distinct clusters are disjoint.
|
||||
|
||||
[hardware-setup]: ./op-guide/hardware.md
|
||||
[supported-platform]: ./op-guide/supported-platform.md
|
||||
[wal_fsync_duration_seconds]: ./metrics.md#disk
|
||||
[tuning]: ./tuning.md
|
||||
[new_issue]: https://github.com/coreos/etcd/issues/new
|
||||
[backend_commit_metrics]: ./metrics.md#disk
|
||||
[raft]: https://raft.github.io/raft.pdf
|
||||
[backup]: https://github.com/coreos/etcd/blob/master/Documentation/op-guide/recovery.md#snapshotting-the-keyspace
|
||||
[chubby]: http://static.googleusercontent.com/media/research.google.com/en//archive/chubby-osdi06.pdf
|
||||
[runtime reconfiguration]: https://github.com/coreos/etcd/blob/master/Documentation/op-guide/runtime-configuration.md
|
||||
[benchmark]: https://github.com/coreos/etcd/tree/master/tools/benchmark
|
||||
[benchmark-result]: https://github.com/coreos/etcd/blob/master/Documentation/op-guide/performance.md
|
21
Documentation/learning/why.md
Normal file
21
Documentation/learning/why.md
Normal file
@ -0,0 +1,21 @@
|
||||
# Why etcd
|
||||
|
||||
The name "etcd" originated from two ideas, the unix "/etc" folder and "d"istibuted systems. The "/etc" folder is a place to store configuration data for a single system whereas etcd stores configuration information for large scale distributed systems. Hence, a "d"istributed "/etc" is "etcd".
|
||||
|
||||
etcd stores metadata in a consistent and fault-tolerant way. Distributed systems use etcd as a consistent key-value store for configuration management, service discovery, and coordinating distributed work. Common distributed patterns using etcd include leader election, [distributed locks][etcd-concurrency], and monitoring machine liveness.
|
||||
|
||||
## Use cases
|
||||
|
||||
- Container Linux by CoreOS: Application running on [Container Linux][container-linux] gets automatic, zero-downtime Linux kernel updates. Container Linux uses [locksmith] to coordinate updates. locksmith implements a distributed semaphore over etcd to ensure only a subset of a cluster is rebooting at any given time.
|
||||
- [Kubernetes][kubernetes] stores configuration data into etcd for service discovery and cluster management; etcd's consistency is crucial for correctly scheduling and operating services. The Kubernetes API server persists cluster state into etcd. It uses etcd's watch API to monitor the cluster and roll out critical configuration changes.
|
||||
|
||||
|
||||
## Features and system comparisons
|
||||
|
||||
TODO
|
||||
|
||||
[etcd-concurrency]: https://godoc.org/github.com/coreos/etcd/clientv3/concurrency
|
||||
[container-linux]: https://coreos.com/why
|
||||
[locksmith]: https://github.com/coreos/locksmith
|
||||
[kubernetes]: http://kubernetes.io/docs/whatisk8s
|
||||
|
@ -14,6 +14,7 @@
|
||||
- [etcdtool](https://github.com/mickep76/etcdtool) - Export/Import/Edit etcd directory as JSON/YAML/TOML and Validate directory using JSON schema
|
||||
- [etcd-rest](https://github.com/mickep76/etcd-rest) - Create generic REST API in Go using etcd as a backend with validation using JSON schema
|
||||
- [etcdsh](https://github.com/kamilhark/etcdsh) - A command line client with support of command history and tab completion. Supports v2
|
||||
- [etcdloadtest](https://github.com/sinsharat/etcdloadtest) - A command line load test client for etcd version 3.0 and above.
|
||||
|
||||
**Go libraries**
|
||||
|
||||
@ -34,9 +35,11 @@
|
||||
**Scala libraries**
|
||||
|
||||
- [maciej/etcd-client](https://github.com/maciej/etcd-client) - Supports v2. Akka HTTP-based fully async client
|
||||
- [eiipii/etcdhttpclient](https://bitbucket.org/eiipii/etcdhttpclient) - Supports v2. Async HTTP client based on Netty and Scala Futures.
|
||||
|
||||
**Python libraries**
|
||||
|
||||
- [kragniz/python-etcd3](https://github.com/kragniz/python-etcd3) - Work in progress client for v3
|
||||
- [jplana/python-etcd](https://github.com/jplana/python-etcd) - Supports v2
|
||||
- [russellhaering/txetcd](https://github.com/russellhaering/txetcd) - a Twisted Python library
|
||||
- [cholcombe973/autodock](https://github.com/cholcombe973/autodock) - A docker deployment automation tool
|
||||
@ -93,6 +96,10 @@
|
||||
|
||||
- [ropensci/etseed](https://github.com/ropensci/etseed)
|
||||
|
||||
**Nim libraries**
|
||||
|
||||
- [etcd_client](https://github.com/FedericoCeratto/nim-etcd-client)
|
||||
|
||||
**Tcl libraries**
|
||||
|
||||
- [efrecon/etcd-tcl](https://github.com/efrecon/etcd-tcl) - Supports v2, except wait.
|
||||
@ -117,7 +124,9 @@
|
||||
**Projects using etcd**
|
||||
|
||||
- [binocarlos/yoda](https://github.com/binocarlos/yoda) - etcd + ZeroMQ
|
||||
- [blox/blox](https://github.com/blox/blox) - a collection of open source projects for container management and orchestration with AWS ECS
|
||||
- [calavera/active-proxy](https://github.com/calavera/active-proxy) - HTTP Proxy configured with etcd
|
||||
- [chain/chain](https://github.com/chain/chain) - software designed to operate and connect to highly scalable permissioned blockchain networks
|
||||
- [derekchiang/etcdplus](https://github.com/derekchiang/etcdplus) - A set of distributed synchronization primitives built upon etcd
|
||||
- [go-discover](https://github.com/flynn/go-discover) - service discovery in Go
|
||||
- [gleicon/goreman](https://github.com/gleicon/goreman/tree/etcd) - Branch of the Go Foreman clone with etcd support
|
||||
|
@ -82,30 +82,7 @@ All these metrics are prefixed with `etcd_network_`
|
||||
|
||||
### gRPC requests
|
||||
|
||||
These metrics describe the requests served by a specific etcd member: total received requests, total failed requests, and processing latency. They are useful for tracking user-generated traffic hitting the etcd cluster.
|
||||
|
||||
All these metrics are prefixed with `etcd_grpc_`
|
||||
|
||||
| Name | Description | Type |
|
||||
|--------------------------------|-------------------------------------------------------------------------------------|------------------------|
|
||||
| requests_total | Total number of received requests | Counter(method) |
|
||||
| requests_failed_total | Total number of failed requests. | Counter(method,error) |
|
||||
| unary_requests_duration_seconds | Bucketed handling duration of the requests. | Histogram(method) |
|
||||
|
||||
|
||||
Example Prometheus queries that may be useful from these metrics (across all etcd members):
|
||||
|
||||
* `sum(rate(etcd_grpc_requests_failed_total{job="etcd"}[1m]) by (grpc_method) / sum(rate(etcd_grpc_total{job="etcd"})[1m]) by (grpc_method)`
|
||||
|
||||
Shows the fraction of events that failed by gRPC method across all members, across a time window of `1m`.
|
||||
|
||||
* `sum(rate(etcd_grpc_requests_total{job="etcd",grpc_method="PUT"})[1m]) by (grpc_method)`
|
||||
|
||||
Shows the rate of PUT requests across all members, across a time window of `1m`.
|
||||
|
||||
* `histogram_quantile(0.9, sum(rate(etcd_grpc_unary_requests_duration_seconds{job="etcd",grpc_method="PUT"}[5m]) ) by (le))`
|
||||
|
||||
Show the 0.90-tile latency (in seconds) of PUT request handling across all members, with a window of `5m`.
|
||||
These metrics are exposed via [go-grpc-prometheus][go-grpc-prometheus].
|
||||
|
||||
## etcd_debugging namespace metrics
|
||||
|
||||
@ -136,3 +113,4 @@ Heavy file descriptor (`process_open_fds`) usage (i.e., near the process's file
|
||||
[prometheus-getting-started]: http://prometheus.io/docs/introduction/getting_started/
|
||||
[prometheus-naming]: http://prometheus.io/docs/practices/naming/
|
||||
[v2-http-metrics]: v2/metrics.md#http-requests
|
||||
[go-grpc-prometheus]: https://github.com/grpc-ecosystem/go-grpc-prometheus
|
@ -83,7 +83,7 @@ A cluster using self-signed certificates both encrypts traffic and authenticates
|
||||
On each machine, etcd would be started with these flags:
|
||||
|
||||
```
|
||||
$ etcd --name infra0 --initial-advertise-peer-urls http://10.0.1.10:2380 \
|
||||
$ etcd --name infra0 --initial-advertise-peer-urls https://10.0.1.10:2380 \
|
||||
--listen-peer-urls https://10.0.1.10:2380 \
|
||||
--listen-client-urls https://10.0.1.10:2379,https://127.0.0.1:2379 \
|
||||
--advertise-client-urls https://10.0.1.10:2379 \
|
||||
@ -126,7 +126,7 @@ $ etcd --name infra2 --initial-advertise-peer-urls https://10.0.1.12:2380 \
|
||||
|
||||
If the cluster needs encrypted communication but does not require authenticated connections, etcd can be configured to automatically generate its keys. On initialization, each member creates its own set of keys based on its advertised IP addresses and hosts.
|
||||
|
||||
On each machine, etcd would be started with these flag:
|
||||
On each machine, etcd would be started with these flags:
|
||||
|
||||
```
|
||||
$ etcd --name infra0 --initial-advertise-peer-urls https://10.0.1.10:2380 \
|
||||
@ -205,7 +205,7 @@ exit 1
|
||||
|
||||
## Discovery
|
||||
|
||||
In a number of cases, the IPs of the cluster peers may not be known ahead of time. This is common when utilizing cloud providers or when the network uses DHCP. In these cases, rather than specifying a static configuration, use an existing etcd cluster to bootstrap a new one. We call this process "discovery".
|
||||
In a number of cases, the IPs of the cluster peers may not be known ahead of time. This is common when utilizing cloud providers or when the network uses DHCP. In these cases, rather than specifying a static configuration, use an existing etcd cluster to bootstrap a new one. This process is called "discovery".
|
||||
|
||||
There two methods that can be used for discovery:
|
||||
|
||||
@ -214,17 +214,17 @@ There two methods that can be used for discovery:
|
||||
|
||||
### etcd discovery
|
||||
|
||||
To better understand the design about discovery service protocol, we suggest reading the discovery service protocol [documentation][discovery-proto].
|
||||
To better understand the design of the discovery service protocol, we suggest reading the discovery service protocol [documentation][discovery-proto].
|
||||
|
||||
#### Lifetime of a discovery URL
|
||||
|
||||
A discovery URL identifies a unique etcd cluster. Instead of reusing a discovery URL, always create discovery URLs for new clusters.
|
||||
A discovery URL identifies a unique etcd cluster. Instead of reusing an existing discovery URL, each etcd instance shares a new discovery URL to bootstrap the new cluster.
|
||||
|
||||
Moreover, discovery URLs should ONLY be used for the initial bootstrapping of a cluster. To change cluster membership after the cluster is already running, see the [runtime reconfiguration][runtime-conf] guide.
|
||||
|
||||
#### Custom etcd discovery service
|
||||
|
||||
Discovery uses an existing cluster to bootstrap itself. If using a private etcd cluster, can create a URL like so:
|
||||
Discovery uses an existing cluster to bootstrap itself. If using a private etcd cluster, create a URL like so:
|
||||
|
||||
```
|
||||
$ curl -X PUT https://myetcd.local/v2/keys/discovery/6c007a14875d53d9bf0ef5a6fc0257c817f0fb83/_config/size -d value=3
|
||||
@ -271,7 +271,7 @@ $ curl https://discovery.etcd.io/new?size=3
|
||||
https://discovery.etcd.io/3e86b59982e49066c5d813af1c2e2579cbf573de
|
||||
```
|
||||
|
||||
This will create the cluster with an initial expected size of 3 members. If no size is specified, a default of 3 is used.
|
||||
This will create the cluster with an initial size of 3 members. If no size is specified, a default of 3 is used.
|
||||
|
||||
```
|
||||
ETCD_DISCOVERY=https://discovery.etcd.io/3e86b59982e49066c5d813af1c2e2579cbf573de
|
||||
@ -281,7 +281,7 @@ ETCD_DISCOVERY=https://discovery.etcd.io/3e86b59982e49066c5d813af1c2e2579cbf573d
|
||||
--discovery https://discovery.etcd.io/3e86b59982e49066c5d813af1c2e2579cbf573de
|
||||
```
|
||||
|
||||
**Each member must have a different name flag specified. `Hostname` or `machine-id` can be a good choice. Or discovery will fail due to duplicated name.**
|
||||
**Each member must have a different name flag specified or else discovery will fail due to duplicated names. `Hostname` or `machine-id` can be a good choice. **
|
||||
|
||||
Now we start etcd with those relevant flags for each member:
|
||||
|
||||
@ -475,5 +475,5 @@ To setup an etcd cluster with proxies of v2 API, please read the the [clustering
|
||||
[proxy]: https://github.com/coreos/etcd/blob/release-2.3/Documentation/proxy.md
|
||||
[clustering_etcd2]: https://github.com/coreos/etcd/blob/release-2.3/Documentation/clustering.md
|
||||
[security-guide]: security.md
|
||||
[tls-setup]: /hack/tls-setup
|
||||
[tls-setup]: ../../hack/tls-setup
|
||||
[gateway]: gateway.md
|
||||
|
@ -247,7 +247,7 @@ The security flags help to [build a secure etcd cluster][security].
|
||||
+ env variable: ETCD_DEBUG
|
||||
|
||||
### --log-package-levels
|
||||
+ Set individual etcd subpackages to specific log levels. An example being `etcdserver=WARNING,security=DEBUG`
|
||||
+ Set individual etcd subpackages to specific log levels. An example being `etcdserver=WARNING,security=DEBUG`
|
||||
+ default: none (INFO for all packages)
|
||||
+ env variable: ETCD_LOG_PACKAGE_LEVELS
|
||||
|
||||
@ -279,10 +279,14 @@ Follow the instructions when using these flags.
|
||||
+ Enable runtime profiling data via HTTP server. Address is at client URL + "/debug/pprof/"
|
||||
+ default: false
|
||||
|
||||
### --metrics
|
||||
+ Set level of detail for exported metrics, specify 'extensive' to include histogram metrics.
|
||||
+ default: basic
|
||||
|
||||
[build-cluster]: clustering.md#static
|
||||
[reconfig]: runtime-configuration.md
|
||||
[discovery]: clustering.md#discovery
|
||||
[iana-ports]: https://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.xhtml?search=etcd
|
||||
[iana-ports]: http://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.txt
|
||||
[proxy]: ../v2/proxy.md
|
||||
[restore]: ../v2/admin_guide.md#restoring-a-backup
|
||||
[security]: security.md
|
||||
|
@ -57,7 +57,7 @@ sudo rkt run --net=default:IP=${NODE3} coreos.com/etcd:v3.0.6 -- -name=node3 -ad
|
||||
Verify the cluster is healthy and can be reached.
|
||||
|
||||
```
|
||||
ETCDCTL_API=3 etcdctl --endpoints=http://172.16.28.21:2379,http://172.16.28.22:2379,http://172.16.28.23:2379 endpoint-health
|
||||
ETCDCTL_API=3 etcdctl --endpoints=http://172.16.28.21:2379,http://172.16.28.22:2379,http://172.16.28.23:2379 endpoint health
|
||||
```
|
||||
|
||||
### DNS
|
||||
|
BIN
Documentation/op-guide/etcd-sample-grafana.png
Normal file
BIN
Documentation/op-guide/etcd-sample-grafana.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 96 KiB |
1012
Documentation/op-guide/grafana.json
Normal file
1012
Documentation/op-guide/grafana.json
Normal file
File diff suppressed because it is too large
Load Diff
@ -1,6 +1,6 @@
|
||||
# gRPC proxy
|
||||
|
||||
*This is a pre-alpha feature, we are looking for early feedback.*
|
||||
*This is an alpha feature, we are looking for early feedback.*
|
||||
|
||||
The gRPC proxy is a stateless etcd reverse proxy operating at the gRPC layer (L7). The proxy is designed to reduce the total processing load on the core etcd cluster. For horizontal scalability, it coalesces watch and lease API requests. To protect the cluster against abusive clients, it caches key range requests.
|
||||
|
||||
@ -36,9 +36,9 @@ watch key A ^ ^ watch key A |
|
||||
|
||||
To effectively coalesce multiple client watchers into a single watcher, the gRPC proxy coalesces new `c-watchers` into an existing `s-watcher` when possible. This coalesced `s-watcher` may be out of sync with the etcd server due to network delays or buffered undelivered events. When the watch revision is unspecified, the gRPC proxy will not guarantee the `c-watcher` will start watching from the most recent store revision. For example, if a client watches from an etcd server with revision 1000, that watcher will begin at revision 1000. If a client watches from the gRPC proxy, may begin watching from revision 990.
|
||||
|
||||
Similar limitations apply to cancellation. When the watcher is cancelled, the etcd server’s revision may be greater than the cancellation response revision.
|
||||
Similar limitations apply to cancellation. When the watcher is cancelled, the etcd server’s revision may be greater than the cancellation response revision.
|
||||
|
||||
These two limitations should not cause problems for most use cases. In the future, there may be additional options to force the watcher to bypass the gRPC proxy for more accurate revision responses.
|
||||
These two limitations should not cause problems for most use cases. In the future, there may be additional options to force the watcher to bypass the gRPC proxy for more accurate revision responses.
|
||||
|
||||
## Scalable lease API
|
||||
|
||||
@ -47,3 +47,32 @@ TODO
|
||||
## Abusive clients protection
|
||||
|
||||
The gRPC proxy caches responses for requests when it does not break consistency requirements. This can protect the etcd server from abusive clients in tight for loops.
|
||||
|
||||
## Start etcd gRPC proxy
|
||||
|
||||
Consider an etcd cluster with the following static endpoints:
|
||||
|
||||
|Name|Address|Hostname|
|
||||
|------|---------|------------------|
|
||||
|infra0|10.0.1.10|infra0.example.com|
|
||||
|infra1|10.0.1.11|infra1.example.com|
|
||||
|infra2|10.0.1.12|infra2.example.com|
|
||||
|
||||
Start the etcd gRPC proxy to use these static endpoints with the command:
|
||||
|
||||
```bash
|
||||
$ etcd grpc-proxy start --endpoints=infra0.example.com,infra1.example.com,infra2.example.com --listen-addr=127.0.0.1:2379
|
||||
```
|
||||
|
||||
The etcd gRPC proxy starts and listens on port 8080. It forwards client requests to one of the three endpoints provided above.
|
||||
|
||||
Sending requests through the proxy:
|
||||
|
||||
```bash
|
||||
$ ETCDCTL_API=3 ./etcdctl --endpoints=127.0.0.1:2379 put foo bar
|
||||
OK
|
||||
$ ETCDCTL_API=3 ./etcdctl --endpoints=127.0.0.1:2379 get foo
|
||||
foo
|
||||
bar
|
||||
```
|
||||
|
||||
|
93
Documentation/op-guide/hardware.md
Normal file
93
Documentation/op-guide/hardware.md
Normal file
@ -0,0 +1,93 @@
|
||||
# Hardware recommendations
|
||||
|
||||
etcd usually runs well with limited resources for development or testing purposes; it’s common to develop with etcd on a laptop or a cheap cloud machine. However, when running etcd clusters in production, some hardware guidelines are useful for proper administration. These suggestions are not hard rules; they serve as a good starting point for a robust production deployment. As always, deployments should be tested with simulated workloads before running in production.
|
||||
|
||||
## CPUs
|
||||
|
||||
Few etcd deployments require a lot of CPU capacity. Typical clusters need two to four cores to run smoothly.
|
||||
Heavily loaded etcd deployments, serving thousands of clients or tens of thousands of requests per second, tend to be CPU bound since etcd can serve requests from memory. Such heavy deployments usually need eight to sixteen dedicated cores.
|
||||
|
||||
|
||||
## Memory
|
||||
|
||||
etcd has a relatively small memory footprint but its performance still depends on having enough memory. An etcd server will aggressively cache key-value data and spends most of the rest of its memory tracking watchers. Typically 8GB is enough. For heavy deployments with thousands of watchers and millions of keys, allocate 16GB to 64GB memory accordingly.
|
||||
|
||||
|
||||
## Disks
|
||||
|
||||
Fast disks are the most critical factor for etcd deployment performance and stability.
|
||||
|
||||
A slow disk will increase etcd request latency and potentially hurt cluster stability. Since etcd’s consensus protocol depends on persistently storing metadata to a log, a majority of etcd cluster members must write every request down to disk. Additionally, etcd will also incrementally checkpoint its state to disk so it can truncate this log. If these writes take too long, heartbeats may time out and trigger an election, undermining the stability of the cluster.
|
||||
|
||||
etcd is very sensitive to disk write latency. Typically 50 sequential IOPS (e.g., a 7200 RPM disk) is required. For heavily loaded clusters, 500 sequential IOPS (e.g., a typical local SSD or a high performance virtualized block device) is recommended. Note that most cloud providers publish concurrent IOPS rather than sequential IOPS; the published concurrent IOPS can be 10x greater than the sequential IOPS. To measure actual sequential IOPS, we suggest using a disk benchmarking tool such as [diskbench][diskbench] or [fio][fio].
|
||||
|
||||
etcd requires only modest disk bandwidth but more disk bandwidth buys faster recovery times when a failed member has to catch up with the cluster. Typically 10MB/s will recover 100MB data within 15 seconds. For large clusters, 100MB/s or higher is suggested for recovering 1GB data within 15 seconds.
|
||||
|
||||
When possible, back etcd’s storage with a SSD. A SSD usually provides lower write latencies and with less variance than a spinning disk, thus improving the stability and reliability of etcd. If using spinning disk, get the fastest disks possible (15,000 RPM). Using RAID 0 is also an effective way to increase disk speed, for both spinning disks and SSD. With at least three cluster members, mirroring and/or parity variants of RAID are unnecessary; etcd's consistent replication already gets high availability.
|
||||
|
||||
|
||||
## Network
|
||||
|
||||
Multi-member etcd deployments benefit from a fast and reliable network. In order for etcd to be both consistent and partition tolerant, an unreliable network with partitioning outages will lead to poor availability. Low latency ensures etcd members can communicate fast. High bandwidth can reduce the time to recover a failed etcd member. 1GbE is sufficient for common etcd deployments. For large etcd clusters, a 10GbE network will reduce mean time to recovery.
|
||||
|
||||
Deploy etcd members within a single data center when possible to avoid latency overheads and lessen the possibility of partitioning events. If a failure domain in another data center is required, choose a data center closer to the existing one. Please also read the [tuning][tuning] documentation for more information on cross data center deployment.
|
||||
|
||||
|
||||
## Example hardware configurations
|
||||
|
||||
Here are a few example hardware setups on AWS and GCE environments. As mentioned before, but must be stressed regardless, administrators should test an etcd deployment with a simulated workload before putting it into production.
|
||||
|
||||
Note that these configurations assume these machines are totally dedicated to etcd. Running other applications along with etcd on these machines may cause resource contentions and lead to cluster instability.
|
||||
|
||||
### Small cluster
|
||||
|
||||
A small cluster serves fewer than 100 clients, fewer than 200 of requests per second, and stores no more than 100MB of data.
|
||||
|
||||
Example application workload: A 50-node Kubernetes cluster
|
||||
|
||||
| Provider | Type | vCPUs | Memory (GB) | Max concurrent IOPS | Disk bandwidth (MB/s) |
|
||||
|----------|------|-------|--------|------|----------------|
|
||||
| AWS | m4.large | 2 | 8 | 3600 | 56.25 |
|
||||
| GCE | n1-standard-1 + 50GB PD SSD | 2 | 7.5 | 1500 | 25 |
|
||||
|
||||
|
||||
### Medium cluster
|
||||
|
||||
A medium cluster serves fewer than 500 clients, fewer than 1,000 of requests per second, and stores no more than 500MB of data.
|
||||
|
||||
Example application workload: A 250-node Kubernetes cluster
|
||||
|
||||
| Provider | Type | vCPUs | Memory (GB) | Max concurrent IOPS | Disk bandwidth (MB/s) |
|
||||
|----------|------|-------|--------|------|----------------|
|
||||
| AWS | m4.xlarge | 4 | 16 | 6000 | 93.75 |
|
||||
| GCE | n1-standard-4 + 150GB PD SSD | 4 | 15 | 4500 | 75 |
|
||||
|
||||
|
||||
### Large cluster
|
||||
|
||||
A large cluster serves fewer than 1,500 clients, fewer than 10,000 of requests per second, and stores no more than 1GB of data.
|
||||
|
||||
Example application workload: A 1,000-node Kubernetes cluster
|
||||
|
||||
| Provider | Type | vCPUs | Memory (GB) | Max concurrent IOPS | Disk bandwidth (MB/s) |
|
||||
|----------|------|-------|--------|------|----------------|
|
||||
| AWS | m4.2xlarge | 8 | 32 | 8000 | 125 |
|
||||
| GCE | n1-standard-8 + 250GB PD SSD | 8 | 30 | 7500 | 125 |
|
||||
|
||||
|
||||
### xLarge cluster
|
||||
|
||||
An xLarge cluster serves more than 1,500 clients, more than 10,000 of requests per second, and stores more than 1GB data.
|
||||
|
||||
Example application workload: A 3,000 node Kubernetes cluster
|
||||
|
||||
| Provider | Type | vCPUs | Memory (GB) | Max concurrent IOPS | Disk bandwidth (MB/s) |
|
||||
|----------|------|-------|--------|------|----------------|
|
||||
| AWS | m4.4xlarge | 16 | 64 | 16,000 | 250 |
|
||||
| GCE | n1-standard-16 + 500GB PD SSD | 16 | 60 | 15,000 | 250 |
|
||||
|
||||
|
||||
[diskbench]: https://github.com/ongardie/diskbenchmark
|
||||
[fio]: https://github.com/axboe/fio
|
||||
[tuning]: ../tuning.md
|
||||
|
82
Documentation/op-guide/monitoring.md
Normal file
82
Documentation/op-guide/monitoring.md
Normal file
@ -0,0 +1,82 @@
|
||||
# Monitoring etcd
|
||||
|
||||
Each etcd server exports metrics under the `/metrics` path on its client port.
|
||||
|
||||
The metrics can be fetched with `curl`:
|
||||
|
||||
```sh
|
||||
$ curl -L http://localhost:2379/metrics
|
||||
|
||||
# HELP etcd_debugging_mvcc_keys_total Total number of keys.
|
||||
# TYPE etcd_debugging_mvcc_keys_total gauge
|
||||
etcd_debugging_mvcc_keys_total 0
|
||||
# HELP etcd_debugging_mvcc_pending_events_total Total number of pending events to be sent.
|
||||
# TYPE etcd_debugging_mvcc_pending_events_total gauge
|
||||
etcd_debugging_mvcc_pending_events_total 0
|
||||
...
|
||||
```
|
||||
|
||||
|
||||
## Prometheus
|
||||
|
||||
Running a [Prometheus][prometheus] monitoring service is the easiest way to ingest and record etcd's metrics.
|
||||
|
||||
First, install Prometheus:
|
||||
|
||||
```sh
|
||||
PROMETHEUS_VERSION="1.3.1"
|
||||
wget https://github.com/prometheus/prometheus/releases/download/v$PROMETHEUS_VERSION/prometheus-$PROMETHEUS_VERSION.linux-amd64.tar.gz -O /tmp/prometheus-$PROMETHEUS_VERSION.linux-amd64.tar.gz
|
||||
tar -xvzf /tmp/prometheus-$PROMETHEUS_VERSION.linux-amd64.tar.gz --directory /tmp/ --strip-components=1
|
||||
/tmp/prometheus -version
|
||||
```
|
||||
|
||||
Set Prometheus's scraper to target the etcd cluster endpoints:
|
||||
|
||||
```sh
|
||||
cat > /tmp/test-etcd.yaml <<EOF
|
||||
global:
|
||||
scrape_interval: 10s
|
||||
scrape_configs:
|
||||
- job_name: test-etcd
|
||||
static_configs:
|
||||
- targets: ['10.240.0.32:2379','10.240.0.33:2379','10.240.0.34:2379']
|
||||
EOF
|
||||
cat /tmp/test-etcd.yaml
|
||||
```
|
||||
|
||||
Set up the Prometheus handler:
|
||||
|
||||
```sh
|
||||
nohup /tmp/prometheus \
|
||||
-config.file /tmp/test-etcd.yaml \
|
||||
-web.listen-address ":9090" \
|
||||
-storage.local.path "test-etcd.data" >> /tmp/test-etcd.log 2>&1 &
|
||||
```
|
||||
|
||||
Now Prometheus will scrape etcd metrics every 10 seconds.
|
||||
|
||||
|
||||
## Grafana
|
||||
|
||||
[Grafana][grafana] has built-in Prometheus support; just add a Prometheus data source:
|
||||
|
||||
```
|
||||
Name: test-etcd
|
||||
Type: Prometheus
|
||||
Url: http://localhost:9090
|
||||
Access: proxy
|
||||
```
|
||||
|
||||
Then import the default [etcd dashboard template][template] and customize. For instance, if Prometheus data source name is `my-etcd`, the `datasource` field values in JSON also need to be `my-etcd`.
|
||||
|
||||
See the [demo][demo].
|
||||
|
||||
Sample dashboard:
|
||||
|
||||

|
||||
|
||||
|
||||
[prometheus]: https://prometheus.io/
|
||||
[grafana]: http://grafana.org/
|
||||
[template]: ./grafana.json
|
||||
[demo]: http://dash.etcd.io/dashboard/db/test-etcd
|
@ -11,7 +11,7 @@ To recover from disastrous failure, etcd v3 provides snapshot and restore facili
|
||||
Recovering a cluster first needs a snapshot of the keyspace from an etcd member. A snapshot may either be taken from a live member with the `etcdctl snapshot save` command or by copying the `member/snap/db` file from an etcd data directory. For example, the following command snapshots the keyspace served by `$ENDPOINT` to the file `snapshot.db`:
|
||||
|
||||
```sh
|
||||
$ etcdctl --endpoints $ENDPOINT snapshot save snapshot.db
|
||||
$ ETCDCTL_API=3 etcdctl --endpoints $ENDPOINT snapshot save snapshot.db
|
||||
```
|
||||
|
||||
### Restoring a cluster
|
||||
@ -23,19 +23,19 @@ Snapshot integrity may be optionally verified at restore time. If the snapshot i
|
||||
A restore initializes a new member of a new cluster, with a fresh cluster configuration using `etcd`'s cluster configuration flags, but preserves the contents of the etcd keyspace. Continuing from the previous example, the following creates new etcd data directories (`m1.etcd`, `m2.etcd`, `m3.etcd`) for a three member cluster:
|
||||
|
||||
```sh
|
||||
$ etcdctl snapshot restore snapshot.db \
|
||||
$ ETCDCTL_API=3 etcdctl snapshot restore snapshot.db \
|
||||
--name m1 \
|
||||
--initial-cluster m1=http:/host1:2380,m2=http://host2:2380,m3=http://host3:2380 \
|
||||
--initial-cluster m1=http://host1:2380,m2=http://host2:2380,m3=http://host3:2380 \
|
||||
--initial-cluster-token etcd-cluster-1 \
|
||||
--initial-advertise-peer-urls http://host1:2380
|
||||
$ etcdctl snapshot restore snapshot.db \
|
||||
$ ETCDCTL_API=3 etcdctl snapshot restore snapshot.db \
|
||||
--name m2 \
|
||||
--initial-cluster m1=http:/host1:2380,m2=http://host2:2380,m3=http://host3:2380 \
|
||||
--initial-cluster m1=http://host1:2380,m2=http://host2:2380,m3=http://host3:2380 \
|
||||
--initial-cluster-token etcd-cluster-1 \
|
||||
--initial-advertise-peer-urls http://host2:2380
|
||||
$ etcdctl snapshot restore snapshot.db \
|
||||
$ ETCDCTL_API=3 etcdctl snapshot restore snapshot.db \
|
||||
--name m3 \
|
||||
--initial-cluster m1=http:/host1:2380,m2=http://host2:2380,m3=http://host3:2380 \
|
||||
--initial-cluster m1=http://host1:2380,m2=http://host2:2380,m3=http://host3:2380 \
|
||||
--initial-cluster-token etcd-cluster-1 \
|
||||
--initial-advertise-peer-urls http://host3:2380
|
||||
```
|
||||
|
@ -169,7 +169,7 @@ As described in the above, the best practice of adding new members is to configu
|
||||
|
||||
For avoiding this problem, etcd provides an option `-strict-reconfig-check`. If this option is passed to etcd, etcd rejects reconfiguration requests if the number of started members will be less than a quorum of the reconfigured cluster.
|
||||
|
||||
It is recommended to enable this option. However, it is disabled by default because of keeping compatibility.
|
||||
It is enabled by default.
|
||||
|
||||
[add member]: #add-a-new-member
|
||||
[cluster-reconf]: #cluster-reconfiguration-operations
|
||||
|
@ -219,6 +219,6 @@ Make sure to sign the certificates with a Subject Name the member's public IP ad
|
||||
The certificate needs to be signed for the member's FQDN in its Subject Name, use Subject Alternative Names (short IP SANs) to add the IP address. The `etcd-ca` tool provides `--domain=` option for its `new-cert` command, and openssl can make [it][alt-name] too.
|
||||
|
||||
[cfssl]: https://github.com/cloudflare/cfssl
|
||||
[tls-setup]: /hack/tls-setup
|
||||
[tls-setup]: ../../hack/tls-setup
|
||||
[tls-guide]: https://github.com/coreos/docs/blob/master/os/generate-self-signed-certificates.md
|
||||
[alt-name]: http://wiki.cacert.org/FAQ/subjectAltName
|
||||
|
@ -50,7 +50,7 @@ Radius Intelligence uses Kubernetes running CoreOS to containerize and scale int
|
||||
|
||||
## Vonage
|
||||
|
||||
- *Application*: system configuration for microservices, scheduling, locks (future - service discovery)
|
||||
- *Application*: system configuration for microservices, scheduling, locks (future - service discovery)
|
||||
- *Launched*: August 2015
|
||||
- *Cluster Size*: 2 clusters of 5 members in 2 DCs, n local proxies 1-to-1 with microservice, (ssl and SRV look up)
|
||||
- *Order of Data Size*: kilobytes
|
||||
@ -60,3 +60,148 @@ Radius Intelligence uses Kubernetes running CoreOS to containerize and scale int
|
||||
|
||||
[teamcity]: https://www.jetbrains.com/teamcity/
|
||||
[raoofm]:https://github.com/raoofm
|
||||
|
||||
## Qiniu Cloud
|
||||
|
||||
- *Application*: system configuration for microservices, distributed locks
|
||||
- *Launched*: Jan. 2016
|
||||
- *Cluster Size*: 3 members each with several clusters
|
||||
- *Order of Data Size*: kilobytes
|
||||
- *Operator*: Pandora, chenchao@qiniu.com
|
||||
- *Environment*: Baremetal
|
||||
- *Backups*: None, all data can be recreated if necessary
|
||||
|
||||
## QingCloud
|
||||
|
||||
- *Application*: [QingCloud][qingcloud] appcenter cluster for service discovery as [metad][metad] backend.
|
||||
- *Launched*: December 2016
|
||||
- *Cluster Size*: 1 cluster of 3 members per user.
|
||||
- *Order of Data Size*: kilobytes
|
||||
- *Operator*: [yunify][yunify]
|
||||
- *Environment*: QingCloud IaaS
|
||||
- *Backups*: None, all data can be recreated if necessary.
|
||||
|
||||
[metad]:https://github.com/yunify/metad
|
||||
[yunify]:https://github.com/yunify
|
||||
[qingcloud]:https://qingcloud.com/
|
||||
|
||||
|
||||
## Yandex
|
||||
|
||||
- *Application*: system configuration for services, service discovery
|
||||
- *Launched*: March 2016
|
||||
- *Cluster Size*: 3 clusters of 5 members
|
||||
- *Order of Data Size*: several gigabytes
|
||||
- *Operator*: Yandex; [nekto0n][nekto0n]
|
||||
- *Environment*: Bare Metal
|
||||
- *Backups*: None
|
||||
|
||||
[nekto0n]:https://github.com/nekto0n
|
||||
|
||||
## Tencent Games
|
||||
|
||||
- *Application*: Meta data and configuration data for service discovery, Kubernetes, etc.
|
||||
- *Launched*: Jan. 2015
|
||||
- *Cluster Size*: 3 members each with 10s of clusters
|
||||
- *Order of Data Size*: 10s of Megabytes
|
||||
- *Operator*: Tencent Game Operations Department
|
||||
- *Environment*: Baremetal
|
||||
- *Backups*: Periodic sync to backup server
|
||||
|
||||
In Tencent games, we use Docker and Kubernetes to deploy and run our applications, and use etcd to save meta data for service discovery, Kubernetes, etc.
|
||||
|
||||
## Hyper.sh
|
||||
|
||||
- *Application*: Kubernetes, distributed locks, etc.
|
||||
- *Launched*: April 2016
|
||||
- *Cluster Size*: 1 cluster of 3 members
|
||||
- *Order of Data Size*: 10s of MB
|
||||
- *Operator*: Hyper.sh
|
||||
- *Environment*: Baremetal
|
||||
- *Backups*: None, all data can be recreated if necessary.
|
||||
|
||||
In [hyper.sh][hyper.sh], the container service is backed by [hypernetes][hypernetes], a multi-tenant kubernetes distro. Moreover, we use etcd to coordinate the multiple manage services and store global meta data.
|
||||
|
||||
[hypernetes]:https://github.com/hyperhq/hypernetes
|
||||
[Hyper.sh]:https://www.hyper.sh
|
||||
|
||||
## Meitu
|
||||
- *Application*: system configuration for services, service discovery, kubernetes in test environment
|
||||
- *Launched*: October 2015
|
||||
- *Cluster Size*: 1 cluster of 3 members
|
||||
- *Order of Data Size*: megabytes
|
||||
- *Operator*: Meitu, hxj@meitu.com, [shafreeck][shafreeck]
|
||||
- *Environment*: Bare Metal
|
||||
- *Backups*: None, all data can be recreated if necessary.
|
||||
|
||||
[shafreeck]:https://github.com/shafreeck
|
||||
|
||||
## Grab
|
||||
- *Application*: system configuration for services, service discovery
|
||||
- *Launched*: June 2016
|
||||
- *Cluster Size*: 1 cluster of 7 members
|
||||
- *Order of Data Size*: megabytes
|
||||
- *Operator*: Grab, [taxitan][taxitan], [reterVision][reterVision]
|
||||
- *Environment*: AWS
|
||||
- *Backups*: None, all data can be recreated if necessary.
|
||||
|
||||
[taxitan]:https://github.com/taxitan
|
||||
[reterVision]:https://github.com/reterVision
|
||||
|
||||
## DaoCloud.io
|
||||
|
||||
- *Application*: container management
|
||||
- *Launched*: Sep. 2015
|
||||
- *Cluster Size*: 1000+ deployments, each deployment contains a 3 node cluster.
|
||||
- *Order of Data Size*: 100s of Megabytes
|
||||
- *Operator*: daocloud.io
|
||||
- *Environment*: Baremetal and virtual machines
|
||||
- *Backups*: None, all data can be recreated if necessary.
|
||||
|
||||
In [DaoCloud][DaoCloud], we use Docker and Swarm to deploy and run our applications, and we use etcd to save metadata for service discovery.
|
||||
|
||||
[DaoCloud]:https://www.daocloud.io
|
||||
|
||||
## Branch.io
|
||||
|
||||
- *Application*: Kubernetes
|
||||
- *Launched*: April 2016
|
||||
- *Cluster Size*: Multiple clusters, multiple sizes
|
||||
- *Order of Data Size*: 100s of Megabytes
|
||||
- *Operator*: branch.io
|
||||
- *Environment*: AWS, Kubernetes
|
||||
- *Backups*: EBS volume backups
|
||||
|
||||
At [Branch][branch], we use kubernetes heavily as our core microservice platform for staging and production.
|
||||
|
||||
[branch]: https://branch.io
|
||||
|
||||
## Baidu Waimai
|
||||
|
||||
- *Application*: SkyDNS, Kubernetes, UDC, CMDB and other distributed systems
|
||||
- *Launched*: April. 2016
|
||||
- *Cluster Size*: 3 clusters of 5 members
|
||||
- *Order of Data Size*: several gigabytes
|
||||
- *Operator*: Baidu Waimai Operations Department
|
||||
- *Environment*: CentOS 6.5
|
||||
- *Backups*: backup scripts
|
||||
|
||||
## Salesforce.com
|
||||
|
||||
- *Application*: Kubernetes
|
||||
- *Launched*: Jan 2017
|
||||
- *Cluster Size*: Multiple clusters of 3 members
|
||||
- *Order of Data Size*: 100s of Megabytes
|
||||
- *Operator*: Salesforce.com (krmayankk@github)
|
||||
- *Environment*: BareMetal
|
||||
- *Backups*: None, all data can be recreated
|
||||
|
||||
## Hosted Graphite
|
||||
|
||||
- *Application*: Service discovery, locking, ephemeral application data
|
||||
- *Launched*: January 2017
|
||||
- *Cluster Size*: 2 clusters of 7 members
|
||||
- *Order of Data Size*: Megabytes
|
||||
- *Operator*: Hosted Graphite (sre@hostedgraphite.com)
|
||||
- *Environment*: Bare Metal
|
||||
- *Backups*: None, all data is considered ephemeral.
|
||||
|
@ -1,6 +1,6 @@
|
||||
# Reporting bugs
|
||||
|
||||
If any part of the etcd project has bugs or documentation mistakes, please let us know by [opening an issue][issue]. We treat bugs and mistakes very seriously and believe no issue is too small. Before creating a bug report, please check that an issue reporting the same problem does not already exist.
|
||||
If any part of the etcd project has bugs or documentation mistakes, please let us know by [opening an issue][etcd-issue]. We treat bugs and mistakes very seriously and believe no issue is too small. Before creating a bug report, please check that an issue reporting the same problem does not already exist.
|
||||
|
||||
To make the bug report accurate and easy to understand, please try to create bug reports that are:
|
||||
|
||||
|
@ -6,27 +6,29 @@ In the general case, upgrading from etcd 2.3 to 3.0 can be a zero-downtime, roll
|
||||
|
||||
Before [starting an upgrade](#upgrade-procedure), read through the rest of this guide to prepare.
|
||||
|
||||
### Upgrade Checklists
|
||||
### Upgrade checklists
|
||||
|
||||
#### Upgrade Requirements
|
||||
**NOTE:** When [migrating from v2 with no v3 data](https://github.com/coreos/etcd/issues/9480), etcd server v3.2+ panics when etcd restores from existing snapshots but no v3 `ETCD_DATA_DIR/member/snap/db` file. This happens when the server had migrated from v2 with no previous v3 data. This also prevents accidental v3 data loss (e.g. `db` file might have been moved). etcd requires that post v3 migration can only happen with v3 data. Do not upgrade to newer v3 versions until v3.0 server contains v3 data.
|
||||
|
||||
To upgrade an existing etcd deployment to 3.0, the running cluster must be 2.3 or greater. If it's before 2.3, please upgrade to [2.3](https://github.com/coreos/etcd/releases/tag/v2.3.0) before upgrading to 3.0.
|
||||
#### Upgrade requirements
|
||||
|
||||
Also, to ensure a smooth rolling upgrade, the running cluster must be healthy. You can check the health of the cluster by using the `etcdctl cluster-health` command.
|
||||
To upgrade an existing etcd deployment to 3.0, the running cluster must be 2.3 or greater. If it's before 2.3, please upgrade to [2.3](https://github.com/coreos/etcd/releases/tag/v2.3.8) before upgrading to 3.0.
|
||||
|
||||
Also, to ensure a smooth rolling upgrade, the running cluster must be healthy. Check the health of the cluster by using the `etcdctl cluster-health` command before proceeding.
|
||||
|
||||
#### Preparation
|
||||
|
||||
Before upgrading etcd, always test the services relying on etcd in a staging environment before deploying the upgrade to the production environment.
|
||||
|
||||
Before beginning, [backup the etcd data directory](../v2/admin_guide.md#backing-up-the-datastore). Should something go wrong with the upgrade, it is possible to use this backup to [downgrade](#downgrade) back to existing etcd version.
|
||||
Before beginning, [backup the etcd data directory](../v2/admin_guide.md#backing-up-the-datastore). Should something go wrong with the upgrade, it is possible to use this backup to [downgrade](#downgrade) back to existing etcd version.
|
||||
|
||||
#### Mixed Versions
|
||||
#### Mixed versions
|
||||
|
||||
While upgrading, an etcd cluster supports mixed versions of etcd members, and operates with the protocol of the lowest common version. The cluster is only considered upgraded once all of its members are upgraded to version 3.0. Internally, etcd members negotiate with each other to determine the overall cluster version, which controls the reported version and the supported features.
|
||||
|
||||
#### Limitations
|
||||
|
||||
It might take up to 2 minutes for the newly upgraded member to catch up with the existing cluster when the total data size is larger than 50MB. Check the size of a recent snapshot to estimate the total data size. In other words, it is safest to wait for 2 minutes between upgrading each member.
|
||||
It might take up to 2 minutes for the newly upgraded member to catch up with the existing cluster when the total data size is larger than 50MB. Check the size of a recent snapshot to estimate the total data size. In other words, it is safest to wait for 2 minutes between upgrading each member.
|
||||
|
||||
For a much larger total data size, 100MB or more , this one-time process might take even more time. Administrators of very large etcd clusters of this magnitude can feel free to contact the [etcd team][etcd-contact] before upgrading, and we’ll be happy to provide advice on the procedure.
|
||||
|
||||
@ -36,13 +38,13 @@ If all members have been upgraded to v3.0, the cluster will be upgraded to v3.0,
|
||||
|
||||
Please [backup the data directory](../v2/admin_guide.md#backing-up-the-datastore) of all etcd members to make downgrading the cluster possible even after it has been completely upgraded.
|
||||
|
||||
### Upgrade Procedure
|
||||
### Upgrade procedure
|
||||
|
||||
This example details the upgrade of a three-member v2.3 ectd cluster running on a local machine.
|
||||
This example details the upgrade of a three-member v2.3 ectd cluster running on a local machine.
|
||||
|
||||
#### 1. Check upgrade requirements.
|
||||
|
||||
Is the the cluster healthy and running v.2.3.x?
|
||||
Is the cluster healthy and running v.2.3.x?
|
||||
|
||||
```
|
||||
$ etcdctl cluster-health
|
||||
@ -52,7 +54,7 @@ member 8211f1d0f64f3269 is healthy: got healthy result from http://localhost:123
|
||||
cluster is healthy
|
||||
|
||||
$ curl http://localhost:2379/version
|
||||
{"etcdserver":"2.3.x","etcdcluster":"2.3.0"}
|
||||
{"etcdserver":"2.3.x","etcdcluster":"2.3.8"}
|
||||
```
|
||||
|
||||
#### 2. Stop the existing etcd process
|
||||
@ -64,7 +66,7 @@ When each etcd process is stopped, expected errors will be logged by other clust
|
||||
2016-06-27 15:21:48.624175 I | rafthttp: the connection with 8211f1d0f64f3269 became inactive
|
||||
```
|
||||
|
||||
It’s a good idea at this point to [backup the etcd data directory](../v2/admin_guide.md#backing-up-the-datastore) to provide a downgrade path should any problems occur:
|
||||
It’s a good idea at this point to [backup the etcd data directory](../v2/admin_guide.md#backing-up-the-datastore) to provide a downgrade path should any problems occur:
|
||||
|
||||
```
|
||||
$ etcdctl backup \
|
||||
@ -102,7 +104,7 @@ Upgraded members will log warnings like the following until the entire cluster i
|
||||
|
||||
#### 5. Finish
|
||||
|
||||
When all members are upgraded, the cluster will report upgrading to 3.0 successfully:
|
||||
When all members are upgraded, the cluster will report upgrading to 3.0 successfully:
|
||||
|
||||
```
|
||||
2016-06-27 15:22:19.873751 N | membership: updated the cluster version from 2.3 to 3.0
|
||||
@ -116,4 +118,14 @@ $ ETCDCTL_API=3 etcdctl endpoint health
|
||||
127.0.0.1:22379 is healthy: successfully committed proposal: took = 18.513301ms
|
||||
```
|
||||
|
||||
## Further considerations
|
||||
|
||||
- etcdctl environment variables have been updated. If `ETCDCTL_API=2 etcdctl cluster-health` works properly but `ETCDCTL_API=3 etcdctl endpoints health` responds with `Error: grpc: timed out when dialing`, be sure to use the [new variable names](https://github.com/coreos/etcd/tree/master/etcdctl#etcdctl).
|
||||
|
||||
## Known Issues
|
||||
|
||||
- etcd < v3.1 does not work properly if built with Go > v1.7. See [Issue 6951](https://github.com/coreos/etcd/issues/6951) for additional information.
|
||||
- If an error such as `transport: http2Client.notifyError got notified that the client transport was broken unexpected EOF.` shows up in the etcd server logs, be sure etcd is a pre-built release or built with (etcd v3.1+ & go v1.7+) or (etcd <v3.1 & go v1.6.x).
|
||||
- Adding a v3 node to v2.3 cluster during upgrades is not supported and could trigger panics. See [Issue 7249](https://github.com/coreos/etcd/issues/7429) for additional information. Mixed versions of etcd members are only allowed during v3 migration. Finish upgrades before making any membership changes.
|
||||
|
||||
[etcd-contact]: https://groups.google.com/forum/#!forum/etcd-dev
|
||||
|
134
Documentation/upgrades/upgrade_3_1.md
Normal file
134
Documentation/upgrades/upgrade_3_1.md
Normal file
@ -0,0 +1,134 @@
|
||||
## Upgrade etcd from 3.0 to 3.1
|
||||
|
||||
In the general case, upgrading from etcd 3.0 to 3.1 can be a zero-downtime, rolling upgrade:
|
||||
- one by one, stop the etcd v3.0 processes and replace them with etcd v3.1 processes
|
||||
- after running all v3.1 processes, new features in v3.1 are available to the cluster
|
||||
|
||||
Before [starting an upgrade](#upgrade-procedure), read through the rest of this guide to prepare.
|
||||
|
||||
### Upgrade checklists
|
||||
|
||||
**NOTE:** When [migrating from v2 with no v3 data](https://github.com/coreos/etcd/issues/9480), etcd server v3.2+ panics when etcd restores from existing snapshots but no v3 `ETCD_DATA_DIR/member/snap/db` file. This happens when the server had migrated from v2 with no previous v3 data. This also prevents accidental v3 data loss (e.g. `db` file might have been moved). etcd requires that post v3 migration can only happen with v3 data. Do not upgrade to newer v3 versions until v3.0 server contains v3 data.
|
||||
|
||||
#### Monitoring
|
||||
|
||||
Following metrics from v3.0.x have been deprecated in favor of [go-grpc-prometheus](https://github.com/grpc-ecosystem/go-grpc-prometheus):
|
||||
|
||||
- `etcd_grpc_requests_total`
|
||||
- `etcd_grpc_requests_failed_total`
|
||||
- `etcd_grpc_active_streams`
|
||||
- `etcd_grpc_unary_requests_duration_seconds`
|
||||
|
||||
#### Upgrade requirements
|
||||
|
||||
To upgrade an existing etcd deployment to 3.1, the running cluster must be 3.0 or greater. If it's before 3.0, please [upgrade to 3.0](upgrade_3_0.md) before upgrading to 3.1.
|
||||
|
||||
Also, to ensure a smooth rolling upgrade, the running cluster must be healthy. Check the health of the cluster by using the `etcdctl endpoint health` command before proceeding.
|
||||
|
||||
#### Preparation
|
||||
|
||||
Before upgrading etcd, always test the services relying on etcd in a staging environment before deploying the upgrade to the production environment.
|
||||
|
||||
Before beginning, [backup the etcd data](../op-guide/maintenance.md#snapshot-backup). Should something go wrong with the upgrade, it is possible to use this backup to [downgrade](#downgrade) back to existing etcd version. Please note that the `snapshot` command only backs up the v3 data. For v2 data, see [backing up v2 datastore](../v2/admin_guide.md#backing-up-the-datastore).
|
||||
|
||||
#### Mixed versions
|
||||
|
||||
While upgrading, an etcd cluster supports mixed versions of etcd members, and operates with the protocol of the lowest common version. The cluster is only considered upgraded once all of its members are upgraded to version 3.1. Internally, etcd members negotiate with each other to determine the overall cluster version, which controls the reported version and the supported features.
|
||||
|
||||
#### Limitations
|
||||
|
||||
Note: If the cluster only has v3 data and no v2 data, it is not subject to this limitation.
|
||||
|
||||
If the cluster is serving a v2 data set larger than 50MB, each newly upgraded member may take up to two minutes to catch up with the existing cluster. Check the size of a recent snapshot to estimate the total data size. In other words, it is safest to wait for 2 minutes between upgrading each member.
|
||||
|
||||
For a much larger total data size, 100MB or more , this one-time process might take even more time. Administrators of very large etcd clusters of this magnitude can feel free to contact the [etcd team][etcd-contact] before upgrading, and we'll be happy to provide advice on the procedure.
|
||||
|
||||
#### Downgrade
|
||||
|
||||
If all members have been upgraded to v3.1, the cluster will be upgraded to v3.1, and downgrade from this completed state is **not possible**. If any single member is still v3.0, however, the cluster and its operations remains "v3.0", and it is possible from this mixed cluster state to return to using a v3.0 etcd binary on all members.
|
||||
|
||||
Please [backup the data directory](../op-guide/maintenance.md#snapshot-backup) of all etcd members to make downgrading the cluster possible even after it has been completely upgraded.
|
||||
|
||||
### Upgrade procedure
|
||||
|
||||
This example shows how to upgrade a 3-member v3.0 ectd cluster running on a local machine.
|
||||
|
||||
#### 1. Check upgrade requirements
|
||||
|
||||
Is the cluster healthy and running v3.0.x?
|
||||
|
||||
```
|
||||
$ ETCDCTL_API=3 etcdctl endpoint health --endpoints=localhost:2379,localhost:22379,localhost:32379
|
||||
localhost:2379 is healthy: successfully committed proposal: took = 6.600684ms
|
||||
localhost:22379 is healthy: successfully committed proposal: took = 8.540064ms
|
||||
localhost:32379 is healthy: successfully committed proposal: took = 8.763432ms
|
||||
|
||||
$ curl http://localhost:2379/version
|
||||
{"etcdserver":"3.0.16","etcdcluster":"3.0.0"}
|
||||
```
|
||||
|
||||
#### 2. Stop the existing etcd process
|
||||
|
||||
When each etcd process is stopped, expected errors will be logged by other cluster members. This is normal since a cluster member connection has been (temporarily) broken:
|
||||
|
||||
```
|
||||
2017-01-17 09:34:18.352662 I | raft: raft.node: 1640829d9eea5cfb elected leader 1640829d9eea5cfb at term 5
|
||||
2017-01-17 09:34:18.359630 W | etcdserver: failed to reach the peerURL(http://localhost:2380) of member fd32987dcd0511e0 (Get http://localhost:2380/version: dial tcp 127.0.0.1:2380: getsockopt: connection refused)
|
||||
2017-01-17 09:34:18.359679 W | etcdserver: cannot get the version of member fd32987dcd0511e0 (Get http://localhost:2380/version: dial tcp 127.0.0.1:2380: getsockopt: connection refused)
|
||||
2017-01-17 09:34:18.548116 W | rafthttp: lost the TCP streaming connection with peer fd32987dcd0511e0 (stream Message writer)
|
||||
2017-01-17 09:34:19.147816 W | rafthttp: lost the TCP streaming connection with peer fd32987dcd0511e0 (stream MsgApp v2 writer)
|
||||
2017-01-17 09:34:34.364907 W | etcdserver: failed to reach the peerURL(http://localhost:2380) of member fd32987dcd0511e0 (Get http://localhost:2380/version: dial tcp 127.0.0.1:2380: getsockopt: connection refused)
|
||||
```
|
||||
|
||||
It's a good idea at this point to [backup the etcd data](../op-guide/maintenance.md#snapshot-backup) to provide a downgrade path should any problems occur:
|
||||
|
||||
```
|
||||
$ etcdctl snapshot save backup.db
|
||||
```
|
||||
|
||||
#### 3. Drop-in etcd v3.1 binary and start the new etcd process
|
||||
|
||||
The new v3.1 etcd will publish its information to the cluster:
|
||||
|
||||
```
|
||||
2017-01-17 09:36:00.996590 I | etcdserver: published {Name:my-etcd-1 ClientURLs:[http://localhost:2379]} to cluster 46bc3ce73049e678
|
||||
```
|
||||
|
||||
Verify that each member, and then the entire cluster, becomes healthy with the new v3.1 etcd binary:
|
||||
|
||||
```
|
||||
$ ETCDCTL_API=3 /etcdctl endpoint health --endpoints=localhost:2379,localhost:22379,localhost:32379
|
||||
localhost:22379 is healthy: successfully committed proposal: took = 5.540129ms
|
||||
localhost:32379 is healthy: successfully committed proposal: took = 7.321671ms
|
||||
localhost:2379 is healthy: successfully committed proposal: took = 10.629901ms
|
||||
```
|
||||
|
||||
Upgraded members will log warnings like the following until the entire cluster is upgraded. This is expected and will cease after all etcd cluster members are upgraded to v3.1:
|
||||
|
||||
```
|
||||
2017-01-17 09:36:38.406268 W | etcdserver: the local etcd version 3.0.16 is not up-to-date
|
||||
2017-01-17 09:36:38.406295 W | etcdserver: member fd32987dcd0511e0 has a higher version 3.1.0
|
||||
2017-01-17 09:36:42.407695 W | etcdserver: the local etcd version 3.0.16 is not up-to-date
|
||||
2017-01-17 09:36:42.407730 W | etcdserver: member fd32987dcd0511e0 has a higher version 3.1.0
|
||||
```
|
||||
|
||||
#### 4. Repeat step 2 to step 3 for all other members
|
||||
|
||||
#### 5. Finish
|
||||
|
||||
When all members are upgraded, the cluster will report upgrading to 3.1 successfully:
|
||||
|
||||
```
|
||||
2017-01-17 09:37:03.100015 I | etcdserver: updating the cluster version from 3.0 to 3.1
|
||||
2017-01-17 09:37:03.104263 N | etcdserver/membership: updated the cluster version from 3.0 to 3.1
|
||||
2017-01-17 09:37:03.104374 I | etcdserver/api: enabled capabilities for version 3.1
|
||||
```
|
||||
|
||||
```
|
||||
$ ETCDCTL_API=3 /etcdctl endpoint health --endpoints=localhost:2379,localhost:22379,localhost:32379
|
||||
localhost:2379 is healthy: successfully committed proposal: took = 2.312897ms
|
||||
localhost:22379 is healthy: successfully committed proposal: took = 2.553476ms
|
||||
localhost:32379 is healthy: successfully committed proposal: took = 2.516902ms
|
||||
```
|
||||
|
||||
[etcd-contact]: https://groups.google.com/forum/#!forum/etcd-dev
|
338
Documentation/upgrades/upgrade_3_2.md
Normal file
338
Documentation/upgrades/upgrade_3_2.md
Normal file
@ -0,0 +1,338 @@
|
||||
## Upgrade etcd from 3.1 to 3.2
|
||||
|
||||
In the general case, upgrading from etcd 3.1 to 3.2 can be a zero-downtime, rolling upgrade:
|
||||
- one by one, stop the etcd v3.1 processes and replace them with etcd v3.2 processes
|
||||
- after running all v3.2 processes, new features in v3.2 are available to the cluster
|
||||
|
||||
Before [starting an upgrade](#upgrade-procedure), read through the rest of this guide to prepare.
|
||||
|
||||
### Upgrade checklists
|
||||
|
||||
**NOTE:** When [migrating from v2 with no v3 data](https://github.com/coreos/etcd/issues/9480), etcd server v3.2+ panics when etcd restores from existing snapshots but no v3 `ETCD_DATA_DIR/member/snap/db` file. This happens when the server had migrated from v2 with no previous v3 data. This also prevents accidental v3 data loss (e.g. `db` file might have been moved). etcd requires that post v3 migration can only happen with v3 data. Do not upgrade to newer v3 versions until v3.0 server contains v3 data.
|
||||
|
||||
Highlighted breaking changes in 3.2.
|
||||
|
||||
#### Change in default `snapshot-count` value
|
||||
|
||||
The default value of `--snapshot-count` has [changed from from 10,000 to 100,000](https://github.com/coreos/etcd/pull/7160). Higher snapshot count means it holds Raft entries in memory for longer before discarding old entries. It is a trade-off between less frequent snapshotting and [higher memory usage](https://github.com/kubernetes/kubernetes/issues/60589#issuecomment-371977156). Higher `--snapshot-count` will be manifested with higher memory usage, while retaining more Raft entries helps with the availabilities of slow followers: leader is still able to replicate its logs to followers, rather than forcing followers to rebuild its stores from leader snapshots.
|
||||
|
||||
#### Change in gRPC dependency (>=3.2.10)
|
||||
|
||||
3.2.10 or later now requires [grpc/grpc-go](https://github.com/grpc/grpc-go/releases) `v1.7.5` (<=3.2.9 requires `v1.2.1`).
|
||||
|
||||
##### Deprecate `grpclog.Logger`
|
||||
|
||||
`grpclog.Logger` has been deprecated in favor of [`grpclog.LoggerV2`](https://github.com/grpc/grpc-go/blob/master/grpclog/loggerv2.go). `clientv3.Logger` is now `grpclog.LoggerV2`.
|
||||
|
||||
Before
|
||||
|
||||
```go
|
||||
import "github.com/coreos/etcd/clientv3"
|
||||
clientv3.SetLogger(log.New(os.Stderr, "grpc: ", 0))
|
||||
```
|
||||
|
||||
After
|
||||
|
||||
```go
|
||||
import "github.com/coreos/etcd/clientv3"
|
||||
import "google.golang.org/grpc/grpclog"
|
||||
clientv3.SetLogger(grpclog.NewLoggerV2(os.Stderr, os.Stderr, os.Stderr))
|
||||
|
||||
// log.New above cannot be used (not implement grpclog.LoggerV2 interface)
|
||||
```
|
||||
|
||||
##### Deprecate `grpc.ErrClientConnTimeout`
|
||||
|
||||
Previously, `grpc.ErrClientConnTimeout` error is returned on client dial time-outs. 3.2 instead returns `context.DeadlineExceeded` (see [#8504](https://github.com/coreos/etcd/issues/8504)).
|
||||
|
||||
Before
|
||||
|
||||
```go
|
||||
// expect dial time-out on ipv4 blackhole
|
||||
_, err := clientv3.New(clientv3.Config{
|
||||
Endpoints: []string{"http://254.0.0.1:12345"},
|
||||
DialTimeout: 2 * time.Second
|
||||
})
|
||||
if err == grpc.ErrClientConnTimeout {
|
||||
// handle errors
|
||||
}
|
||||
```
|
||||
|
||||
After
|
||||
|
||||
```go
|
||||
_, err := clientv3.New(clientv3.Config{
|
||||
Endpoints: []string{"http://254.0.0.1:12345"},
|
||||
DialTimeout: 2 * time.Second
|
||||
})
|
||||
if err == context.DeadlineExceeded {
|
||||
// handle errors
|
||||
}
|
||||
```
|
||||
|
||||
#### Change in maximum request size limits (>=3.2.10)
|
||||
|
||||
3.2.10 and 3.2.11 allow custom request size limits in server side. >=3.2.12 allows custom request size limits for both server and **client side**. In previous versions(v3.2.10, v3.2.11), client response size was limited to only 4 MiB.
|
||||
|
||||
Server-side request limits can be configured with `--max-request-bytes` flag:
|
||||
|
||||
```bash
|
||||
# limits request size to 1.5 KiB
|
||||
etcd --max-request-bytes 1536
|
||||
|
||||
# client writes exceeding 1.5 KiB will be rejected
|
||||
etcdctl put foo [LARGE VALUE...]
|
||||
# etcdserver: request is too large
|
||||
```
|
||||
|
||||
Or configure `embed.Config.MaxRequestBytes` field:
|
||||
|
||||
```go
|
||||
import "github.com/coreos/etcd/embed"
|
||||
import "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
|
||||
|
||||
// limit requests to 5 MiB
|
||||
cfg := embed.NewConfig()
|
||||
cfg.MaxRequestBytes = 5 * 1024 * 1024
|
||||
|
||||
// client writes exceeding 5 MiB will be rejected
|
||||
_, err := cli.Put(ctx, "foo", [LARGE VALUE...])
|
||||
err == rpctypes.ErrRequestTooLarge
|
||||
```
|
||||
|
||||
**If not specified, server-side limit defaults to 1.5 MiB**.
|
||||
|
||||
Client-side request limits must be configured based on server-side limits.
|
||||
|
||||
```bash
|
||||
# limits request size to 1 MiB
|
||||
etcd --max-request-bytes 1048576
|
||||
```
|
||||
|
||||
```go
|
||||
import "github.com/coreos/etcd/clientv3"
|
||||
|
||||
cli, _ := clientv3.New(clientv3.Config{
|
||||
Endpoints: []string{"127.0.0.1:2379"},
|
||||
MaxCallSendMsgSize: 2 * 1024 * 1024,
|
||||
MaxCallRecvMsgSize: 3 * 1024 * 1024,
|
||||
})
|
||||
|
||||
|
||||
// client writes exceeding "--max-request-bytes" will be rejected from etcd server
|
||||
_, err := cli.Put(ctx, "foo", strings.Repeat("a", 1*1024*1024+5))
|
||||
err == rpctypes.ErrRequestTooLarge
|
||||
|
||||
|
||||
// client writes exceeding "MaxCallSendMsgSize" will be rejected from client-side
|
||||
_, err = cli.Put(ctx, "foo", strings.Repeat("a", 5*1024*1024))
|
||||
err.Error() == "rpc error: code = ResourceExhausted desc = grpc: trying to send message larger than max (5242890 vs. 2097152)"
|
||||
|
||||
|
||||
// some writes under limits
|
||||
for i := range []int{0,1,2,3,4} {
|
||||
_, err = cli.Put(ctx, fmt.Sprintf("foo%d", i), strings.Repeat("a", 1*1024*1024-500))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
// client reads exceeding "MaxCallRecvMsgSize" will be rejected from client-side
|
||||
_, err = cli.Get(ctx, "foo", clientv3.WithPrefix())
|
||||
err.Error() == "rpc error: code = ResourceExhausted desc = grpc: received message larger than max (5240509 vs. 3145728)"
|
||||
```
|
||||
|
||||
**If not specified, client-side send limit defaults to 2 MiB (1.5 MiB + gRPC overhead bytes) and receive limit to `math.MaxInt32`**. Please see [clientv3 godoc](https://godoc.org/github.com/coreos/etcd/clientv3#Config) for more detail.
|
||||
|
||||
#### Change in raw gRPC client wrappers
|
||||
|
||||
3.2.12 or later changes the function signatures of `clientv3` gRPC client wrapper. This change was needed to support [custom `grpc.CallOption` on message size limits](https://github.com/coreos/etcd/pull/9047).
|
||||
|
||||
Before and after
|
||||
|
||||
```diff
|
||||
-func NewKVFromKVClient(remote pb.KVClient) KV {
|
||||
+func NewKVFromKVClient(remote pb.KVClient, c *Client) KV {
|
||||
|
||||
-func NewClusterFromClusterClient(remote pb.ClusterClient) Cluster {
|
||||
+func NewClusterFromClusterClient(remote pb.ClusterClient, c *Client) Cluster {
|
||||
|
||||
-func NewLeaseFromLeaseClient(remote pb.LeaseClient, keepAliveTimeout time.Duration) Lease {
|
||||
+func NewLeaseFromLeaseClient(remote pb.LeaseClient, c *Client, keepAliveTimeout time.Duration) Lease {
|
||||
|
||||
-func NewMaintenanceFromMaintenanceClient(remote pb.MaintenanceClient) Maintenance {
|
||||
+func NewMaintenanceFromMaintenanceClient(remote pb.MaintenanceClient, c *Client) Maintenance {
|
||||
|
||||
-func NewWatchFromWatchClient(wc pb.WatchClient) Watcher {
|
||||
+func NewWatchFromWatchClient(wc pb.WatchClient, c *Client) Watcher {
|
||||
```
|
||||
|
||||
#### Change in `clientv3.Lease.TimeToLive` API
|
||||
|
||||
Previously, `clientv3.Lease.TimeToLive` API returned `lease.ErrLeaseNotFound` on non-existent lease ID. 3.2 instead returns TTL=-1 in its response and no error (see [#7305](https://github.com/coreos/etcd/pull/7305)).
|
||||
|
||||
Before
|
||||
|
||||
```go
|
||||
// when leaseID does not exist
|
||||
resp, err := TimeToLive(ctx, leaseID)
|
||||
resp == nil
|
||||
err == lease.ErrLeaseNotFound
|
||||
```
|
||||
|
||||
After
|
||||
|
||||
```go
|
||||
// when leaseID does not exist
|
||||
resp, err := TimeToLive(ctx, leaseID)
|
||||
resp.TTL == -1
|
||||
err == nil
|
||||
```
|
||||
|
||||
#### Change in `clientv3.NewFromConfigFile`
|
||||
|
||||
`clientv3.NewFromConfigFile` is moved to `yaml.NewConfig`.
|
||||
|
||||
Before
|
||||
|
||||
```go
|
||||
import "github.com/coreos/etcd/clientv3"
|
||||
clientv3.NewFromConfigFile
|
||||
```
|
||||
|
||||
After
|
||||
|
||||
```go
|
||||
import clientv3yaml "github.com/coreos/etcd/clientv3/yaml"
|
||||
clientv3yaml.NewConfig
|
||||
```
|
||||
|
||||
#### Change in `--listen-peer-urls` and `--listen-client-urls`
|
||||
|
||||
3.2 now rejects domains names for `--listen-peer-urls` and `--listen-client-urls` (3.1 only prints out warnings), since domain name is invalid for network interface binding. Make sure that those URLs are properly formated as `scheme://IP:port`.
|
||||
|
||||
See [issue #6336](https://github.com/coreos/etcd/issues/6336) for more contexts.
|
||||
|
||||
### Server upgrade checklists
|
||||
|
||||
#### Upgrade requirements
|
||||
|
||||
To upgrade an existing etcd deployment to 3.2, the running cluster must be 3.1 or greater. If it's before 3.1, please [upgrade to 3.1](upgrade_3_1.md) before upgrading to 3.2.
|
||||
|
||||
Also, to ensure a smooth rolling upgrade, the running cluster must be healthy. Check the health of the cluster by using the `etcdctl endpoint health` command before proceeding.
|
||||
|
||||
#### Preparation
|
||||
|
||||
Before upgrading etcd, always test the services relying on etcd in a staging environment before deploying the upgrade to the production environment.
|
||||
|
||||
Before beginning, [backup the etcd data](../op-guide/maintenance.md#snapshot-backup). Should something go wrong with the upgrade, it is possible to use this backup to [downgrade](#downgrade) back to existing etcd version. Please note that the `snapshot` command only backs up the v3 data. For v2 data, see [backing up v2 datastore](../v2/admin_guide.md#backing-up-the-datastore).
|
||||
|
||||
#### Mixed versions
|
||||
|
||||
While upgrading, an etcd cluster supports mixed versions of etcd members, and operates with the protocol of the lowest common version. The cluster is only considered upgraded once all of its members are upgraded to version 3.2. Internally, etcd members negotiate with each other to determine the overall cluster version, which controls the reported version and the supported features.
|
||||
|
||||
#### Limitations
|
||||
|
||||
Note: If the cluster only has v3 data and no v2 data, it is not subject to this limitation.
|
||||
|
||||
If the cluster is serving a v2 data set larger than 50MB, each newly upgraded member may take up to two minutes to catch up with the existing cluster. Check the size of a recent snapshot to estimate the total data size. In other words, it is safest to wait for 2 minutes between upgrading each member.
|
||||
|
||||
For a much larger total data size, 100MB or more , this one-time process might take even more time. Administrators of very large etcd clusters of this magnitude can feel free to contact the [etcd team][etcd-contact] before upgrading, and we'll be happy to provide advice on the procedure.
|
||||
|
||||
#### Downgrade
|
||||
|
||||
If all members have been upgraded to v3.2, the cluster will be upgraded to v3.2, and downgrade from this completed state is **not possible**. If any single member is still v3.1, however, the cluster and its operations remains "v3.1", and it is possible from this mixed cluster state to return to using a v3.1 etcd binary on all members.
|
||||
|
||||
Please [backup the data directory](../op-guide/maintenance.md#snapshot-backup) of all etcd members to make downgrading the cluster possible even after it has been completely upgraded.
|
||||
|
||||
### Upgrade procedure
|
||||
|
||||
This example shows how to upgrade a 3-member v3.1 ectd cluster running on a local machine.
|
||||
|
||||
#### 1. Check upgrade requirements
|
||||
|
||||
Is the cluster healthy and running v3.1.x?
|
||||
|
||||
```
|
||||
$ ETCDCTL_API=3 etcdctl endpoint health --endpoints=localhost:2379,localhost:22379,localhost:32379
|
||||
localhost:2379 is healthy: successfully committed proposal: took = 6.600684ms
|
||||
localhost:22379 is healthy: successfully committed proposal: took = 8.540064ms
|
||||
localhost:32379 is healthy: successfully committed proposal: took = 8.763432ms
|
||||
|
||||
$ curl http://localhost:2379/version
|
||||
{"etcdserver":"3.1.7","etcdcluster":"3.1.0"}
|
||||
```
|
||||
|
||||
#### 2. Stop the existing etcd process
|
||||
|
||||
When each etcd process is stopped, expected errors will be logged by other cluster members. This is normal since a cluster member connection has been (temporarily) broken:
|
||||
|
||||
```
|
||||
2017-04-27 14:13:31.491746 I | raft: c89feb932daef420 [term 3] received MsgTimeoutNow from 6d4f535bae3ab960 and starts an election to get leadership.
|
||||
2017-04-27 14:13:31.491769 I | raft: c89feb932daef420 became candidate at term 4
|
||||
2017-04-27 14:13:31.491788 I | raft: c89feb932daef420 received MsgVoteResp from c89feb932daef420 at term 4
|
||||
2017-04-27 14:13:31.491797 I | raft: c89feb932daef420 [logterm: 3, index: 9] sent MsgVote request to 6d4f535bae3ab960 at term 4
|
||||
2017-04-27 14:13:31.491805 I | raft: c89feb932daef420 [logterm: 3, index: 9] sent MsgVote request to 9eda174c7df8a033 at term 4
|
||||
2017-04-27 14:13:31.491815 I | raft: raft.node: c89feb932daef420 lost leader 6d4f535bae3ab960 at term 4
|
||||
2017-04-27 14:13:31.524084 I | raft: c89feb932daef420 received MsgVoteResp from 6d4f535bae3ab960 at term 4
|
||||
2017-04-27 14:13:31.524108 I | raft: c89feb932daef420 [quorum:2] has received 2 MsgVoteResp votes and 0 vote rejections
|
||||
2017-04-27 14:13:31.524123 I | raft: c89feb932daef420 became leader at term 4
|
||||
2017-04-27 14:13:31.524136 I | raft: raft.node: c89feb932daef420 elected leader c89feb932daef420 at term 4
|
||||
2017-04-27 14:13:31.592650 W | rafthttp: lost the TCP streaming connection with peer 6d4f535bae3ab960 (stream MsgApp v2 reader)
|
||||
2017-04-27 14:13:31.592825 W | rafthttp: lost the TCP streaming connection with peer 6d4f535bae3ab960 (stream Message reader)
|
||||
2017-04-27 14:13:31.693275 E | rafthttp: failed to dial 6d4f535bae3ab960 on stream Message (dial tcp [::1]:2380: getsockopt: connection refused)
|
||||
2017-04-27 14:13:31.693289 I | rafthttp: peer 6d4f535bae3ab960 became inactive
|
||||
2017-04-27 14:13:31.936678 W | rafthttp: lost the TCP streaming connection with peer 6d4f535bae3ab960 (stream Message writer)
|
||||
```
|
||||
|
||||
It's a good idea at this point to [backup the etcd data](../op-guide/maintenance.md#snapshot-backup) to provide a downgrade path should any problems occur:
|
||||
|
||||
```
|
||||
$ etcdctl snapshot save backup.db
|
||||
```
|
||||
|
||||
#### 3. Drop-in etcd v3.2 binary and start the new etcd process
|
||||
|
||||
The new v3.2 etcd will publish its information to the cluster:
|
||||
|
||||
```
|
||||
2017-04-27 14:14:25.363225 I | etcdserver: published {Name:s1 ClientURLs:[http://localhost:2379]} to cluster a9ededbffcb1b1f1
|
||||
```
|
||||
|
||||
Verify that each member, and then the entire cluster, becomes healthy with the new v3.2 etcd binary:
|
||||
|
||||
```
|
||||
$ ETCDCTL_API=3 /etcdctl endpoint health --endpoints=localhost:2379,localhost:22379,localhost:32379
|
||||
localhost:22379 is healthy: successfully committed proposal: took = 5.540129ms
|
||||
localhost:32379 is healthy: successfully committed proposal: took = 7.321771ms
|
||||
localhost:2379 is healthy: successfully committed proposal: took = 10.629901ms
|
||||
```
|
||||
|
||||
Upgraded members will log warnings like the following until the entire cluster is upgraded. This is expected and will cease after all etcd cluster members are upgraded to v3.2:
|
||||
|
||||
```
|
||||
2017-04-27 14:15:17.071804 W | etcdserver: member c89feb932daef420 has a higher version 3.2.0
|
||||
2017-04-27 14:15:21.073110 W | etcdserver: the local etcd version 3.1.7 is not up-to-date
|
||||
2017-04-27 14:15:21.073142 W | etcdserver: member 6d4f535bae3ab960 has a higher version 3.2.0
|
||||
2017-04-27 14:15:21.073157 W | etcdserver: the local etcd version 3.1.7 is not up-to-date
|
||||
2017-04-27 14:15:21.073164 W | etcdserver: member c89feb932daef420 has a higher version 3.2.0
|
||||
```
|
||||
|
||||
#### 4. Repeat step 2 to step 3 for all other members
|
||||
|
||||
#### 5. Finish
|
||||
|
||||
When all members are upgraded, the cluster will report upgrading to 3.2 successfully:
|
||||
|
||||
```
|
||||
2017-04-27 14:15:54.536901 N | etcdserver/membership: updated the cluster version from 3.1 to 3.2
|
||||
2017-04-27 14:15:54.537035 I | etcdserver/api: enabled capabilities for version 3.2
|
||||
```
|
||||
|
||||
```
|
||||
$ ETCDCTL_API=3 /etcdctl endpoint health --endpoints=localhost:2379,localhost:22379,localhost:32379
|
||||
localhost:2379 is healthy: successfully committed proposal: took = 2.312897ms
|
||||
localhost:22379 is healthy: successfully committed proposal: took = 2.553476ms
|
||||
localhost:32379 is healthy: successfully committed proposal: took = 2.517902ms
|
||||
```
|
||||
|
||||
[etcd-contact]: https://groups.google.com/forum/#!forum/etcd-dev
|
476
Documentation/upgrades/upgrade_3_3.md
Normal file
476
Documentation/upgrades/upgrade_3_3.md
Normal file
@ -0,0 +1,476 @@
|
||||
## Upgrade etcd from 3.2 to 3.3
|
||||
|
||||
In the general case, upgrading from etcd 3.2 to 3.3 can be a zero-downtime, rolling upgrade:
|
||||
- one by one, stop the etcd v3.2 processes and replace them with etcd v3.3 processes
|
||||
- after running all v3.3 processes, new features in v3.3 are available to the cluster
|
||||
|
||||
Before [starting an upgrade](#upgrade-procedure), read through the rest of this guide to prepare.
|
||||
|
||||
### Upgrade checklists
|
||||
|
||||
**NOTE:** When [migrating from v2 with no v3 data](https://github.com/coreos/etcd/issues/9480), etcd server v3.2+ panics when etcd restores from existing snapshots but no v3 `ETCD_DATA_DIR/member/snap/db` file. This happens when the server had migrated from v2 with no previous v3 data. This also prevents accidental v3 data loss (e.g. `db` file might have been moved). etcd requires that post v3 migration can only happen with v3 data. Do not upgrade to newer v3 versions until v3.0 server contains v3 data.
|
||||
|
||||
Highlighted breaking changes in 3.3.
|
||||
|
||||
#### Change in `etcdserver.EtcdServer` struct
|
||||
|
||||
`etcdserver.EtcdServer` has changed the type of its member field `*etcdserver.ServerConfig` to `etcdserver.ServerConfig`. And `etcdserver.NewServer` now takes `etcdserver.ServerConfig`, instead of `*etcdserver.ServerConfig`.
|
||||
|
||||
Before and after (e.g. [k8s.io/kubernetes/test/e2e_node/services/etcd.go](https://github.com/kubernetes/kubernetes/blob/release-1.8/test/e2e_node/services/etcd.go#L50-L55))
|
||||
|
||||
```diff
|
||||
import "github.com/coreos/etcd/etcdserver"
|
||||
|
||||
type EtcdServer struct {
|
||||
*etcdserver.EtcdServer
|
||||
- config *etcdserver.ServerConfig
|
||||
+ config etcdserver.ServerConfig
|
||||
}
|
||||
|
||||
func NewEtcd(dataDir string) *EtcdServer {
|
||||
- config := &etcdserver.ServerConfig{
|
||||
+ config := etcdserver.ServerConfig{
|
||||
DataDir: dataDir,
|
||||
...
|
||||
}
|
||||
return &EtcdServer{config: config}
|
||||
}
|
||||
|
||||
func (e *EtcdServer) Start() error {
|
||||
var err error
|
||||
e.EtcdServer, err = etcdserver.NewServer(e.config)
|
||||
...
|
||||
```
|
||||
|
||||
#### Change in `embed.EtcdServer` struct
|
||||
|
||||
Field `LogOutput` is added to `embed.Config`:
|
||||
|
||||
```diff
|
||||
package embed
|
||||
|
||||
type Config struct {
|
||||
Debug bool `json:"debug"`
|
||||
LogPkgLevels string `json:"log-package-levels"`
|
||||
+ LogOutput string `json:"log-output"`
|
||||
...
|
||||
```
|
||||
|
||||
Before gRPC server warnings were logged in etcdserver.
|
||||
|
||||
```
|
||||
WARNING: 2017/11/02 11:35:51 grpc: addrConn.resetTransport failed to create client transport: connection error: desc = "transport: Error while dialing dial tcp: operation was canceled"; Reconnecting to {localhost:2379 <nil>}
|
||||
WARNING: 2017/11/02 11:35:51 grpc: addrConn.resetTransport failed to create client transport: connection error: desc = "transport: Error while dialing dial tcp: operation was canceled"; Reconnecting to {localhost:2379 <nil>}
|
||||
```
|
||||
|
||||
From v3.3, gRPC server logs are disabled by default.
|
||||
|
||||
```go
|
||||
import "github.com/coreos/etcd/embed"
|
||||
|
||||
cfg := &embed.Config{Debug: false}
|
||||
cfg.SetupLogging()
|
||||
```
|
||||
|
||||
Set `embed.Config.Debug` field to `true` to enable gRPC server logs.
|
||||
|
||||
#### Change in `/health` endpoint response
|
||||
|
||||
Previously, `[endpoint]:[client-port]/health` returned manually marshaled JSON value. 3.3 now defines [`etcdhttp.Health`](https://godoc.org/github.com/coreos/etcd/etcdserver/api/etcdhttp#Health) struct.
|
||||
|
||||
Note that in v3.3.0-rc.0, v3.3.0-rc.1, and v3.3.0-rc.2, `etcdhttp.Health` has boolean type `"health"` and `"errors"` fields. For backward compatibilities, we reverted `"health"` field to `string` type and removed `"errors"` field. Further health information will be provided in separate APIs.
|
||||
|
||||
```bash
|
||||
$ curl http://localhost:2379/health
|
||||
{"health":"true"}
|
||||
```
|
||||
|
||||
#### Change in gRPC gateway HTTP endpoints (replaced `/v3alpha` with `/v3beta`)
|
||||
|
||||
Before
|
||||
|
||||
```bash
|
||||
curl -L http://localhost:2379/v3alpha/kv/put \
|
||||
-X POST -d '{"key": "Zm9v", "value": "YmFy"}'
|
||||
```
|
||||
|
||||
After
|
||||
|
||||
```bash
|
||||
curl -L http://localhost:2379/v3beta/kv/put \
|
||||
-X POST -d '{"key": "Zm9v", "value": "YmFy"}'
|
||||
```
|
||||
|
||||
Requests to `/v3alpha` endpoints will redirect to `/v3beta`, and `/v3alpha` will be removed in 3.4 release.
|
||||
|
||||
#### Change in maximum request size limits
|
||||
|
||||
3.3 now allows custom request size limits for both server and **client side**. In previous versions(v3.2.10, v3.2.11), client response size was limited to only 4 MiB.
|
||||
|
||||
Server-side request limits can be configured with `--max-request-bytes` flag:
|
||||
|
||||
```bash
|
||||
# limits request size to 1.5 KiB
|
||||
etcd --max-request-bytes 1536
|
||||
|
||||
# client writes exceeding 1.5 KiB will be rejected
|
||||
etcdctl put foo [LARGE VALUE...]
|
||||
# etcdserver: request is too large
|
||||
```
|
||||
|
||||
Or configure `embed.Config.MaxRequestBytes` field:
|
||||
|
||||
```go
|
||||
import "github.com/coreos/etcd/embed"
|
||||
import "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
|
||||
|
||||
// limit requests to 5 MiB
|
||||
cfg := embed.NewConfig()
|
||||
cfg.MaxRequestBytes = 5 * 1024 * 1024
|
||||
|
||||
// client writes exceeding 5 MiB will be rejected
|
||||
_, err := cli.Put(ctx, "foo", [LARGE VALUE...])
|
||||
err == rpctypes.ErrRequestTooLarge
|
||||
```
|
||||
|
||||
**If not specified, server-side limit defaults to 1.5 MiB**.
|
||||
|
||||
Client-side request limits must be configured based on server-side limits.
|
||||
|
||||
```bash
|
||||
# limits request size to 1 MiB
|
||||
etcd --max-request-bytes 1048576
|
||||
```
|
||||
|
||||
```go
|
||||
import "github.com/coreos/etcd/clientv3"
|
||||
|
||||
cli, _ := clientv3.New(clientv3.Config{
|
||||
Endpoints: []string{"127.0.0.1:2379"},
|
||||
MaxCallSendMsgSize: 2 * 1024 * 1024,
|
||||
MaxCallRecvMsgSize: 3 * 1024 * 1024,
|
||||
})
|
||||
|
||||
|
||||
// client writes exceeding "--max-request-bytes" will be rejected from etcd server
|
||||
_, err := cli.Put(ctx, "foo", strings.Repeat("a", 1*1024*1024+5))
|
||||
err == rpctypes.ErrRequestTooLarge
|
||||
|
||||
|
||||
// client writes exceeding "MaxCallSendMsgSize" will be rejected from client-side
|
||||
_, err = cli.Put(ctx, "foo", strings.Repeat("a", 5*1024*1024))
|
||||
err.Error() == "rpc error: code = ResourceExhausted desc = grpc: trying to send message larger than max (5242890 vs. 2097152)"
|
||||
|
||||
|
||||
// some writes under limits
|
||||
for i := range []int{0,1,2,3,4} {
|
||||
_, err = cli.Put(ctx, fmt.Sprintf("foo%d", i), strings.Repeat("a", 1*1024*1024-500))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
// client reads exceeding "MaxCallRecvMsgSize" will be rejected from client-side
|
||||
_, err = cli.Get(ctx, "foo", clientv3.WithPrefix())
|
||||
err.Error() == "rpc error: code = ResourceExhausted desc = grpc: received message larger than max (5240509 vs. 3145728)"
|
||||
```
|
||||
|
||||
**If not specified, client-side send limit defaults to 2 MiB (1.5 MiB + gRPC overhead bytes) and receive limit to `math.MaxInt32`**. Please see [clientv3 godoc](https://godoc.org/github.com/coreos/etcd/clientv3#Config) for more detail.
|
||||
|
||||
#### Change in raw gRPC client wrappers
|
||||
|
||||
3.3 changes the function signatures of `clientv3` gRPC client wrapper. This change was needed to support [custom `grpc.CallOption` on message size limits](https://github.com/coreos/etcd/pull/9047).
|
||||
|
||||
Before and after
|
||||
|
||||
```diff
|
||||
-func NewKVFromKVClient(remote pb.KVClient) KV {
|
||||
+func NewKVFromKVClient(remote pb.KVClient, c *Client) KV {
|
||||
|
||||
-func NewClusterFromClusterClient(remote pb.ClusterClient) Cluster {
|
||||
+func NewClusterFromClusterClient(remote pb.ClusterClient, c *Client) Cluster {
|
||||
|
||||
-func NewLeaseFromLeaseClient(remote pb.LeaseClient, keepAliveTimeout time.Duration) Lease {
|
||||
+func NewLeaseFromLeaseClient(remote pb.LeaseClient, c *Client, keepAliveTimeout time.Duration) Lease {
|
||||
|
||||
-func NewMaintenanceFromMaintenanceClient(remote pb.MaintenanceClient) Maintenance {
|
||||
+func NewMaintenanceFromMaintenanceClient(remote pb.MaintenanceClient, c *Client) Maintenance {
|
||||
|
||||
-func NewWatchFromWatchClient(wc pb.WatchClient) Watcher {
|
||||
+func NewWatchFromWatchClient(wc pb.WatchClient, c *Client) Watcher {
|
||||
```
|
||||
|
||||
#### Change in clientv3 `Snapshot` API error type
|
||||
|
||||
Previously, clientv3 `Snapshot` API returned raw [`grpc/*status.statusError`] type error. v3.3 now translates those errors to corresponding public error types, to be consistent with other APIs.
|
||||
|
||||
Before
|
||||
|
||||
```go
|
||||
import "context"
|
||||
|
||||
// reading snapshot with canceled context should error out
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
rc, _ := cli.Snapshot(ctx)
|
||||
cancel()
|
||||
_, err := io.Copy(f, rc)
|
||||
err.Error() == "rpc error: code = Canceled desc = context canceled"
|
||||
|
||||
// reading snapshot with deadline exceeded should error out
|
||||
ctx, cancel = context.WithTimeout(context.Background(), time.Second)
|
||||
defer cancel()
|
||||
rc, _ = cli.Snapshot(ctx)
|
||||
time.Sleep(2 * time.Second)
|
||||
_, err = io.Copy(f, rc)
|
||||
err.Error() == "rpc error: code = DeadlineExceeded desc = context deadline exceeded"
|
||||
```
|
||||
|
||||
After
|
||||
|
||||
```go
|
||||
import "context"
|
||||
|
||||
// reading snapshot with canceled context should error out
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
rc, _ := cli.Snapshot(ctx)
|
||||
cancel()
|
||||
_, err := io.Copy(f, rc)
|
||||
err == context.Canceled
|
||||
|
||||
// reading snapshot with deadline exceeded should error out
|
||||
ctx, cancel = context.WithTimeout(context.Background(), time.Second)
|
||||
defer cancel()
|
||||
rc, _ = cli.Snapshot(ctx)
|
||||
time.Sleep(2 * time.Second)
|
||||
_, err = io.Copy(f, rc)
|
||||
err == context.DeadlineExceeded
|
||||
```
|
||||
|
||||
#### Change in `etcdctl lease timetolive` command output
|
||||
|
||||
Previously, `lease timetolive LEASE_ID` command on expired lease prints `-1s` for remaining seconds. 3.3 now outputs clearer messages.
|
||||
|
||||
Before
|
||||
|
||||
|
||||
```bash
|
||||
lease 2d8257079fa1bc0c granted with TTL(0s), remaining(-1s)
|
||||
```
|
||||
|
||||
After
|
||||
|
||||
```bash
|
||||
lease 2d8257079fa1bc0c already expired
|
||||
```
|
||||
|
||||
#### Change in `golang.org/x/net/context` imports
|
||||
|
||||
`clientv3` has deprecated `golang.org/x/net/context`. If a project vendors `golang.org/x/net/context` in other code (e.g. etcd generated protocol buffer code) and imports `github.com/coreos/etcd/clientv3`, it requires Go 1.9+ to compile.
|
||||
|
||||
Before
|
||||
|
||||
```go
|
||||
import "golang.org/x/net/context"
|
||||
cli.Put(context.Background(), "f", "v")
|
||||
```
|
||||
|
||||
After
|
||||
|
||||
```go
|
||||
import "context"
|
||||
cli.Put(context.Background(), "f", "v")
|
||||
```
|
||||
|
||||
#### Change in gRPC dependency
|
||||
|
||||
3.3 now requires [grpc/grpc-go](https://github.com/grpc/grpc-go/releases) `v1.7.5`.
|
||||
|
||||
##### Deprecate `grpclog.Logger`
|
||||
|
||||
`grpclog.Logger` has been deprecated in favor of [`grpclog.LoggerV2`](https://github.com/grpc/grpc-go/blob/master/grpclog/loggerv2.go). `clientv3.Logger` is now `grpclog.LoggerV2`.
|
||||
|
||||
Before
|
||||
|
||||
```go
|
||||
import "github.com/coreos/etcd/clientv3"
|
||||
clientv3.SetLogger(log.New(os.Stderr, "grpc: ", 0))
|
||||
```
|
||||
|
||||
After
|
||||
|
||||
```go
|
||||
import "github.com/coreos/etcd/clientv3"
|
||||
import "google.golang.org/grpc/grpclog"
|
||||
clientv3.SetLogger(grpclog.NewLoggerV2(os.Stderr, os.Stderr, os.Stderr))
|
||||
|
||||
// log.New above cannot be used (not implement grpclog.LoggerV2 interface)
|
||||
```
|
||||
|
||||
##### Deprecate `grpc.ErrClientConnTimeout`
|
||||
|
||||
Previously, `grpc.ErrClientConnTimeout` error is returned on client dial time-outs. 3.3 instead returns `context.DeadlineExceeded` (see [#8504](https://github.com/coreos/etcd/issues/8504)).
|
||||
|
||||
Before
|
||||
|
||||
```go
|
||||
// expect dial time-out on ipv4 blackhole
|
||||
_, err := clientv3.New(clientv3.Config{
|
||||
Endpoints: []string{"http://254.0.0.1:12345"},
|
||||
DialTimeout: 2 * time.Second
|
||||
})
|
||||
if err == grpc.ErrClientConnTimeout {
|
||||
// handle errors
|
||||
}
|
||||
```
|
||||
|
||||
After
|
||||
|
||||
```go
|
||||
_, err := clientv3.New(clientv3.Config{
|
||||
Endpoints: []string{"http://254.0.0.1:12345"},
|
||||
DialTimeout: 2 * time.Second
|
||||
})
|
||||
if err == context.DeadlineExceeded {
|
||||
// handle errors
|
||||
}
|
||||
```
|
||||
|
||||
#### Change in official container registry
|
||||
|
||||
etcd now uses [`gcr.io/etcd-development/etcd`](https://gcr.io/etcd-development/etcd) as a primary container registry, and [`quay.io/coreos/etcd`](https://quay.io/coreos/etcd) as secondary.
|
||||
|
||||
Before
|
||||
|
||||
```bash
|
||||
docker pull quay.io/coreos/etcd:v3.2.5
|
||||
```
|
||||
|
||||
After
|
||||
|
||||
```bash
|
||||
docker pull gcr.io/etcd-development/etcd:v3.3.0
|
||||
```
|
||||
|
||||
### Server upgrade checklists
|
||||
|
||||
#### Upgrade requirements
|
||||
|
||||
To upgrade an existing etcd deployment to 3.3, the running cluster must be 3.2 or greater. If it's before 3.2, please [upgrade to 3.2](upgrade_3_2.md) before upgrading to 3.3.
|
||||
|
||||
Also, to ensure a smooth rolling upgrade, the running cluster must be healthy. Check the health of the cluster by using the `etcdctl endpoint health` command before proceeding.
|
||||
|
||||
#### Preparation
|
||||
|
||||
Before upgrading etcd, always test the services relying on etcd in a staging environment before deploying the upgrade to the production environment.
|
||||
|
||||
Before beginning, [backup the etcd data](../op-guide/maintenance.md#snapshot-backup). Should something go wrong with the upgrade, it is possible to use this backup to [downgrade](#downgrade) back to existing etcd version. Please note that the `snapshot` command only backs up the v3 data. For v2 data, see [backing up v2 datastore](../v2/admin_guide.md#backing-up-the-datastore).
|
||||
|
||||
#### Mixed versions
|
||||
|
||||
While upgrading, an etcd cluster supports mixed versions of etcd members, and operates with the protocol of the lowest common version. The cluster is only considered upgraded once all of its members are upgraded to version 3.3. Internally, etcd members negotiate with each other to determine the overall cluster version, which controls the reported version and the supported features.
|
||||
|
||||
#### Limitations
|
||||
|
||||
Note: If the cluster only has v3 data and no v2 data, it is not subject to this limitation.
|
||||
|
||||
If the cluster is serving a v2 data set larger than 50MB, each newly upgraded member may take up to two minutes to catch up with the existing cluster. Check the size of a recent snapshot to estimate the total data size. In other words, it is safest to wait for 2 minutes between upgrading each member.
|
||||
|
||||
For a much larger total data size, 100MB or more , this one-time process might take even more time. Administrators of very large etcd clusters of this magnitude can feel free to contact the [etcd team][etcd-contact] before upgrading, and we'll be happy to provide advice on the procedure.
|
||||
|
||||
#### Downgrade
|
||||
|
||||
If all members have been upgraded to v3.3, the cluster will be upgraded to v3.3, and downgrade from this completed state is **not possible**. If any single member is still v3.2, however, the cluster and its operations remains "v3.2", and it is possible from this mixed cluster state to return to using a v3.2 etcd binary on all members.
|
||||
|
||||
Please [backup the data directory](../op-guide/maintenance.md#snapshot-backup) of all etcd members to make downgrading the cluster possible even after it has been completely upgraded.
|
||||
|
||||
### Upgrade procedure
|
||||
|
||||
This example shows how to upgrade a 3-member v3.2 ectd cluster running on a local machine.
|
||||
|
||||
#### 1. Check upgrade requirements
|
||||
|
||||
Is the cluster healthy and running v3.2.x?
|
||||
|
||||
```
|
||||
$ ETCDCTL_API=3 etcdctl endpoint health --endpoints=localhost:2379,localhost:22379,localhost:32379
|
||||
localhost:2379 is healthy: successfully committed proposal: took = 6.600684ms
|
||||
localhost:22379 is healthy: successfully committed proposal: took = 8.540064ms
|
||||
localhost:32379 is healthy: successfully committed proposal: took = 8.763432ms
|
||||
|
||||
$ curl http://localhost:2379/version
|
||||
{"etcdserver":"3.2.7","etcdcluster":"3.2.0"}
|
||||
```
|
||||
|
||||
#### 2. Stop the existing etcd process
|
||||
|
||||
When each etcd process is stopped, expected errors will be logged by other cluster members. This is normal since a cluster member connection has been (temporarily) broken:
|
||||
|
||||
```
|
||||
14:13:31.491746 I | raft: c89feb932daef420 [term 3] received MsgTimeoutNow from 6d4f535bae3ab960 and starts an election to get leadership.
|
||||
14:13:31.491769 I | raft: c89feb932daef420 became candidate at term 4
|
||||
14:13:31.491788 I | raft: c89feb932daef420 received MsgVoteResp from c89feb932daef420 at term 4
|
||||
14:13:31.491797 I | raft: c89feb932daef420 [logterm: 3, index: 9] sent MsgVote request to 6d4f535bae3ab960 at term 4
|
||||
14:13:31.491805 I | raft: c89feb932daef420 [logterm: 3, index: 9] sent MsgVote request to 9eda174c7df8a033 at term 4
|
||||
14:13:31.491815 I | raft: raft.node: c89feb932daef420 lost leader 6d4f535bae3ab960 at term 4
|
||||
14:13:31.524084 I | raft: c89feb932daef420 received MsgVoteResp from 6d4f535bae3ab960 at term 4
|
||||
14:13:31.524108 I | raft: c89feb932daef420 [quorum:2] has received 2 MsgVoteResp votes and 0 vote rejections
|
||||
14:13:31.524123 I | raft: c89feb932daef420 became leader at term 4
|
||||
14:13:31.524136 I | raft: raft.node: c89feb932daef420 elected leader c89feb932daef420 at term 4
|
||||
14:13:31.592650 W | rafthttp: lost the TCP streaming connection with peer 6d4f535bae3ab960 (stream MsgApp v2 reader)
|
||||
14:13:31.592825 W | rafthttp: lost the TCP streaming connection with peer 6d4f535bae3ab960 (stream Message reader)
|
||||
14:13:31.693275 E | rafthttp: failed to dial 6d4f535bae3ab960 on stream Message (dial tcp [::1]:2380: getsockopt: connection refused)
|
||||
14:13:31.693289 I | rafthttp: peer 6d4f535bae3ab960 became inactive
|
||||
14:13:31.936678 W | rafthttp: lost the TCP streaming connection with peer 6d4f535bae3ab960 (stream Message writer)
|
||||
```
|
||||
|
||||
It's a good idea at this point to [backup the etcd data](../op-guide/maintenance.md#snapshot-backup) to provide a downgrade path should any problems occur:
|
||||
|
||||
```
|
||||
$ etcdctl snapshot save backup.db
|
||||
```
|
||||
|
||||
#### 3. Drop-in etcd v3.3 binary and start the new etcd process
|
||||
|
||||
The new v3.3 etcd will publish its information to the cluster:
|
||||
|
||||
```
|
||||
14:14:25.363225 I | etcdserver: published {Name:s1 ClientURLs:[http://localhost:2379]} to cluster a9ededbffcb1b1f1
|
||||
```
|
||||
|
||||
Verify that each member, and then the entire cluster, becomes healthy with the new v3.3 etcd binary:
|
||||
|
||||
```
|
||||
$ ETCDCTL_API=3 /etcdctl endpoint health --endpoints=localhost:2379,localhost:22379,localhost:32379
|
||||
localhost:22379 is healthy: successfully committed proposal: took = 5.540129ms
|
||||
localhost:32379 is healthy: successfully committed proposal: took = 7.321771ms
|
||||
localhost:2379 is healthy: successfully committed proposal: took = 10.629901ms
|
||||
```
|
||||
|
||||
Upgraded members will log warnings like the following until the entire cluster is upgraded. This is expected and will cease after all etcd cluster members are upgraded to v3.3:
|
||||
|
||||
```
|
||||
14:15:17.071804 W | etcdserver: member c89feb932daef420 has a higher version 3.3.0
|
||||
14:15:21.073110 W | etcdserver: the local etcd version 3.2.7 is not up-to-date
|
||||
14:15:21.073142 W | etcdserver: member 6d4f535bae3ab960 has a higher version 3.3.0
|
||||
14:15:21.073157 W | etcdserver: the local etcd version 3.2.7 is not up-to-date
|
||||
14:15:21.073164 W | etcdserver: member c89feb932daef420 has a higher version 3.3.0
|
||||
```
|
||||
|
||||
#### 4. Repeat step 2 to step 3 for all other members
|
||||
|
||||
#### 5. Finish
|
||||
|
||||
When all members are upgraded, the cluster will report upgrading to 3.3 successfully:
|
||||
|
||||
```
|
||||
14:15:54.536901 N | etcdserver/membership: updated the cluster version from 3.2 to 3.3
|
||||
14:15:54.537035 I | etcdserver/api: enabled capabilities for version 3.3
|
||||
```
|
||||
|
||||
```
|
||||
$ ETCDCTL_API=3 /etcdctl endpoint health --endpoints=localhost:2379,localhost:22379,localhost:32379
|
||||
localhost:2379 is healthy: successfully committed proposal: took = 2.312897ms
|
||||
localhost:22379 is healthy: successfully committed proposal: took = 2.553476ms
|
||||
localhost:32379 is healthy: successfully committed proposal: took = 2.517902ms
|
||||
```
|
||||
|
||||
[etcd-contact]: https://groups.google.com/forum/#!forum/etcd-dev
|
171
Documentation/upgrades/upgrade_3_4.md
Normal file
171
Documentation/upgrades/upgrade_3_4.md
Normal file
@ -0,0 +1,171 @@
|
||||
## Upgrade etcd from 3.3 to 3.4
|
||||
|
||||
In the general case, upgrading from etcd 3.3 to 3.4 can be a zero-downtime, rolling upgrade:
|
||||
- one by one, stop the etcd v3.3 processes and replace them with etcd v3.4 processes
|
||||
- after running all v3.4 processes, new features in v3.4 are available to the cluster
|
||||
|
||||
Before [starting an upgrade](#upgrade-procedure), read through the rest of this guide to prepare.
|
||||
|
||||
### Upgrade checklists
|
||||
|
||||
**NOTE:** When [migrating from v2 with no v3 data](https://github.com/coreos/etcd/issues/9480), etcd server v3.2+ panics when etcd restores from existing snapshots but no v3 `ETCD_DATA_DIR/member/snap/db` file. This happens when the server had migrated from v2 with no previous v3 data. This also prevents accidental v3 data loss (e.g. `db` file might have been moved). etcd requires that post v3 migration can only happen with v3 data. Do not upgrade to newer v3 versions until v3.0 server contains v3 data.
|
||||
|
||||
Highlighted breaking changes in 3.4.
|
||||
|
||||
#### Change in `etcd` flags
|
||||
|
||||
`--ca-file` and `--peer-ca-file` flags are deprecated; they have been deprecated since v2.1.
|
||||
|
||||
```diff
|
||||
-etcd --ca-file ca-client.crt
|
||||
+etcd --trusted-ca-file ca-client.crt
|
||||
```
|
||||
|
||||
```diff
|
||||
-etcd --peer-ca-file ca-peer.crt
|
||||
+etcd --peer-trusted-ca-file ca-peer.crt
|
||||
```
|
||||
|
||||
#### Change in ``pkg/transport`
|
||||
|
||||
Deprecated `pkg/transport.TLSInfo.CAFile` field.
|
||||
|
||||
```diff
|
||||
import "github.com/coreos/etcd/pkg/transport"
|
||||
|
||||
tlsInfo := transport.TLSInfo{
|
||||
CertFile: "/tmp/test-certs/test.pem",
|
||||
KeyFile: "/tmp/test-certs/test-key.pem",
|
||||
- CAFile: "/tmp/test-certs/trusted-ca.pem",
|
||||
+ TrustedCAFile: "/tmp/test-certs/trusted-ca.pem",
|
||||
}
|
||||
tlsConfig, err := tlsInfo.ClientConfig()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
```
|
||||
|
||||
### Server upgrade checklists
|
||||
|
||||
#### Upgrade requirements
|
||||
|
||||
To upgrade an existing etcd deployment to 3.4, the running cluster must be 3.3 or greater. If it's before 3.3, please [upgrade to 3.3](upgrade_3_3.md) before upgrading to 3.4.
|
||||
|
||||
Also, to ensure a smooth rolling upgrade, the running cluster must be healthy. Check the health of the cluster by using the `etcdctl endpoint health` command before proceeding.
|
||||
|
||||
#### Preparation
|
||||
|
||||
Before upgrading etcd, always test the services relying on etcd in a staging environment before deploying the upgrade to the production environment.
|
||||
|
||||
Before beginning, [backup the etcd data](../op-guide/maintenance.md#snapshot-backup). Should something go wrong with the upgrade, it is possible to use this backup to [downgrade](#downgrade) back to existing etcd version. Please note that the `snapshot` command only backs up the v3 data. For v2 data, see [backing up v2 datastore](../v2/admin_guide.md#backing-up-the-datastore).
|
||||
|
||||
#### Mixed versions
|
||||
|
||||
While upgrading, an etcd cluster supports mixed versions of etcd members, and operates with the protocol of the lowest common version. The cluster is only considered upgraded once all of its members are upgraded to version 3.4. Internally, etcd members negotiate with each other to determine the overall cluster version, which controls the reported version and the supported features.
|
||||
|
||||
#### Limitations
|
||||
|
||||
Note: If the cluster only has v3 data and no v2 data, it is not subject to this limitation.
|
||||
|
||||
If the cluster is serving a v2 data set larger than 50MB, each newly upgraded member may take up to two minutes to catch up with the existing cluster. Check the size of a recent snapshot to estimate the total data size. In other words, it is safest to wait for 2 minutes between upgrading each member.
|
||||
|
||||
For a much larger total data size, 100MB or more , this one-time process might take even more time. Administrators of very large etcd clusters of this magnitude can feel free to contact the [etcd team][etcd-contact] before upgrading, and we'll be happy to provide advice on the procedure.
|
||||
|
||||
#### Downgrade
|
||||
|
||||
If all members have been upgraded to v3.4, the cluster will be upgraded to v3.4, and downgrade from this completed state is **not possible**. If any single member is still v3.3, however, the cluster and its operations remains "v3.3", and it is possible from this mixed cluster state to return to using a v3.3 etcd binary on all members.
|
||||
|
||||
Please [backup the data directory](../op-guide/maintenance.md#snapshot-backup) of all etcd members to make downgrading the cluster possible even after it has been completely upgraded.
|
||||
|
||||
### Upgrade procedure
|
||||
|
||||
This example shows how to upgrade a 3-member v3.3 ectd cluster running on a local machine.
|
||||
|
||||
#### 1. Check upgrade requirements
|
||||
|
||||
Is the cluster healthy and running v3.3.x?
|
||||
|
||||
```
|
||||
$ ETCDCTL_API=3 etcdctl endpoint health --endpoints=localhost:2379,localhost:22379,localhost:32379
|
||||
localhost:2379 is healthy: successfully committed proposal: took = 6.600684ms
|
||||
localhost:22379 is healthy: successfully committed proposal: took = 8.540064ms
|
||||
localhost:32379 is healthy: successfully committed proposal: took = 8.763432ms
|
||||
|
||||
$ curl http://localhost:2379/version
|
||||
{"etcdserver":"3.3.0","etcdcluster":"3.3.0"}
|
||||
```
|
||||
|
||||
#### 2. Stop the existing etcd process
|
||||
|
||||
When each etcd process is stopped, expected errors will be logged by other cluster members. This is normal since a cluster member connection has been (temporarily) broken:
|
||||
|
||||
```
|
||||
14:13:31.491746 I | raft: c89feb932daef420 [term 3] received MsgTimeoutNow from 6d4f535bae3ab960 and starts an election to get leadership.
|
||||
14:13:31.491769 I | raft: c89feb932daef420 became candidate at term 4
|
||||
14:13:31.491788 I | raft: c89feb932daef420 received MsgVoteResp from c89feb932daef420 at term 4
|
||||
14:13:31.491797 I | raft: c89feb932daef420 [logterm: 3, index: 9] sent MsgVote request to 6d4f535bae3ab960 at term 4
|
||||
14:13:31.491805 I | raft: c89feb932daef420 [logterm: 3, index: 9] sent MsgVote request to 9eda174c7df8a033 at term 4
|
||||
14:13:31.491815 I | raft: raft.node: c89feb932daef420 lost leader 6d4f535bae3ab960 at term 4
|
||||
14:13:31.524084 I | raft: c89feb932daef420 received MsgVoteResp from 6d4f535bae3ab960 at term 4
|
||||
14:13:31.524108 I | raft: c89feb932daef420 [quorum:2] has received 2 MsgVoteResp votes and 0 vote rejections
|
||||
14:13:31.524123 I | raft: c89feb932daef420 became leader at term 4
|
||||
14:13:31.524136 I | raft: raft.node: c89feb932daef420 elected leader c89feb932daef420 at term 4
|
||||
14:13:31.592650 W | rafthttp: lost the TCP streaming connection with peer 6d4f535bae3ab960 (stream MsgApp v2 reader)
|
||||
14:13:31.592825 W | rafthttp: lost the TCP streaming connection with peer 6d4f535bae3ab960 (stream Message reader)
|
||||
14:13:31.693275 E | rafthttp: failed to dial 6d4f535bae3ab960 on stream Message (dial tcp [::1]:2380: getsockopt: connection refused)
|
||||
14:13:31.693289 I | rafthttp: peer 6d4f535bae3ab960 became inactive
|
||||
14:13:31.936678 W | rafthttp: lost the TCP streaming connection with peer 6d4f535bae3ab960 (stream Message writer)
|
||||
```
|
||||
|
||||
It's a good idea at this point to [backup the etcd data](../op-guide/maintenance.md#snapshot-backup) to provide a downgrade path should any problems occur:
|
||||
|
||||
```
|
||||
$ etcdctl snapshot save backup.db
|
||||
```
|
||||
|
||||
#### 3. Drop-in etcd v3.4 binary and start the new etcd process
|
||||
|
||||
The new v3.4 etcd will publish its information to the cluster:
|
||||
|
||||
```
|
||||
14:14:25.363225 I | etcdserver: published {Name:s1 ClientURLs:[http://localhost:2379]} to cluster a9ededbffcb1b1f1
|
||||
```
|
||||
|
||||
Verify that each member, and then the entire cluster, becomes healthy with the new v3.4 etcd binary:
|
||||
|
||||
```
|
||||
$ ETCDCTL_API=3 /etcdctl endpoint health --endpoints=localhost:2379,localhost:22379,localhost:32379
|
||||
localhost:22379 is healthy: successfully committed proposal: took = 5.540129ms
|
||||
localhost:32379 is healthy: successfully committed proposal: took = 7.321771ms
|
||||
localhost:2379 is healthy: successfully committed proposal: took = 10.629901ms
|
||||
```
|
||||
|
||||
Upgraded members will log warnings like the following until the entire cluster is upgraded. This is expected and will cease after all etcd cluster members are upgraded to v3.4:
|
||||
|
||||
```
|
||||
14:15:17.071804 W | etcdserver: member c89feb932daef420 has a higher version 3.4.0
|
||||
14:15:21.073110 W | etcdserver: the local etcd version 3.3.0 is not up-to-date
|
||||
14:15:21.073142 W | etcdserver: member 6d4f535bae3ab960 has a higher version 3.4.0
|
||||
14:15:21.073157 W | etcdserver: the local etcd version 3.3.0 is not up-to-date
|
||||
14:15:21.073164 W | etcdserver: member c89feb932daef420 has a higher version 3.4.0
|
||||
```
|
||||
|
||||
#### 4. Repeat step 2 to step 3 for all other members
|
||||
|
||||
#### 5. Finish
|
||||
|
||||
When all members are upgraded, the cluster will report upgrading to 3.4 successfully:
|
||||
|
||||
```
|
||||
14:15:54.536901 N | etcdserver/membership: updated the cluster version from 3.3 to 3.4
|
||||
14:15:54.537035 I | etcdserver/api: enabled capabilities for version 3.4
|
||||
```
|
||||
|
||||
```
|
||||
$ ETCDCTL_API=3 /etcdctl endpoint health --endpoints=localhost:2379,localhost:22379,localhost:32379
|
||||
localhost:2379 is healthy: successfully committed proposal: took = 2.312897ms
|
||||
localhost:22379 is healthy: successfully committed proposal: took = 2.553476ms
|
||||
localhost:32379 is healthy: successfully committed proposal: took = 2.517902ms
|
||||
```
|
||||
|
||||
[etcd-contact]: https://groups.google.com/forum/#!forum/etcd-dev
|
19
Documentation/upgrades/upgrading-etcd.md
Normal file
19
Documentation/upgrades/upgrading-etcd.md
Normal file
@ -0,0 +1,19 @@
|
||||
# Upgrading etcd clusters and applications
|
||||
|
||||
This section contains documents specific to upgrading etcd clusters and applications.
|
||||
|
||||
## Moving from etcd API v2 to API v3
|
||||
* [Migrate applications from using API v2 to API v3][migrate-apps]
|
||||
|
||||
## Upgrading an etcd v3.x cluster
|
||||
* [Upgrade etcd from 3.0 to 3.1][upgrade-3-1]
|
||||
* [Upgrade etcd from 3.1 to 3.2][upgrade-3-2]
|
||||
|
||||
## Upgrading from etcd v2.3
|
||||
* [Upgrade a v2.3 cluster to v3.0][upgrade-cluster]
|
||||
|
||||
|
||||
[migrate-apps]: ../op-guide/v2-migration.md
|
||||
[upgrade-cluster]: upgrade_3_0.md
|
||||
[upgrade-3-1]: upgrade_3_1.md
|
||||
[upgrade-3-2]: upgrade_3_2.md
|
@ -67,13 +67,13 @@ You have successfully started an etcd and written a key to the store.
|
||||
|
||||
The [official etcd ports][iana-ports] are 2379 for client requests, and 2380 for peer communication. To maintain compatibility, some etcd configuration and documentation continues to refer to the legacy ports 4001 and 7001, but all new etcd use and discussion should adopt the IANA-assigned ports. The legacy ports 4001 and 7001 will be fully deprecated, and support for their use removed, in future etcd releases.
|
||||
|
||||
[iana-ports]: https://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.xhtml?search=etcd
|
||||
[iana-ports]: http://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.txt
|
||||
|
||||
### Running local etcd cluster
|
||||
|
||||
First install [goreman](https://github.com/mattn/goreman), which manages Procfile-based applications.
|
||||
|
||||
Our [Procfile script](./Procfile) will set up a local example cluster. You can start it with:
|
||||
Our [Procfile script](../../V2Procfile) will set up a local example cluster. You can start it with:
|
||||
|
||||
```sh
|
||||
goreman start
|
||||
@ -162,4 +162,4 @@ Currently only the amd64 architecture is officially supported by `etcd`.
|
||||
|
||||
### License
|
||||
|
||||
etcd is under the Apache 2.0 license. See the [LICENSE](LICENSE) file for details.
|
||||
etcd is under the Apache 2.0 license. See the [LICENSE](../../LICENSE) file for details.
|
||||
|
@ -216,7 +216,7 @@ To recover from such scenarios, etcd provides functionality to backup and restor
|
||||
|
||||
#### Backing up the datastore
|
||||
|
||||
**NB:** Windows users must stop etcd before running the backup command.
|
||||
**Note:** Windows users must stop etcd before running the backup command.
|
||||
|
||||
The first step of the recovery is to backup the data directory and wal directory, if stored separately, on a functioning etcd node. To do this, use the `etcdctl backup` command, passing in the original data (and wal) directory used by etcd. For example:
|
||||
|
||||
@ -262,7 +262,9 @@ Once you have verified that etcd has started successfully, shut it down and move
|
||||
|
||||
Now that the node is running successfully, [change its advertised peer URLs][update-a-member], as the `--force-new-cluster` option has set the peer URL to the default listening on localhost.
|
||||
|
||||
You can then add more nodes to the cluster and restore resiliency. See the [add a new member][add-a-member] guide for more details. **NB:** If you are trying to restore your cluster using old failed etcd nodes, please make sure you have stopped old etcd instances and removed their old data directories specified by the data-dir configuration parameter.
|
||||
You can then add more nodes to the cluster and restore resiliency. See the [add a new member][add-a-member] guide for more details.
|
||||
|
||||
**Note:** If you are trying to restore your cluster using old failed etcd nodes, please make sure you have stopped old etcd instances and removed their old data directories specified by the data-dir configuration parameter.
|
||||
|
||||
### Client Request Timeout
|
||||
|
||||
|
@ -18,7 +18,7 @@ A key’s lifetime spans a generation. Each key may have one or multiple generat
|
||||
|
||||
### Physical View
|
||||
|
||||
etcd stores the physical data as key-value pairs in a persistent [b+tree][b+tree]. Each revision of the store’s state only contains the delta from its previous revision to be efficient. A single revision may correspond to multiple keys in the tree.
|
||||
etcd stores the physical data as key-value pairs in a persistent [b+tree][b+tree]. Each revision of the store’s state only contains the delta from its previous revision to be efficient. A single revision may correspond to multiple keys in the tree.
|
||||
|
||||
The key of key-value pair is a 3-tuple (major, sub, type). Major is the store revision holding the key. Sub differentiates among keys within the same revision. Type is an optional suffix for special value (e.g., `t` if the value contains a tombstone). The value of the key-value pair contains the modification from previous revision, thus one delta from previous revision. The b+tree is ordered by key in lexical byte-order. Ranged lookups over revision deltas are fast; this enables quickly finding modifications from one specific revision to another. Compaction removes out-of-date keys-value pairs.
|
||||
|
||||
@ -73,7 +73,7 @@ Any completed operations are durable. All accessible data is also durable data.
|
||||
|
||||
#### Linearizability
|
||||
|
||||
Linearizability (also known as Atomic Consistency or External Consistency) is a consistency level between strict consistency and sequential consistency.
|
||||
Linearizability (also known as Atomic Consistency or External Consistency) is a consistency level between strict consistency and sequential consistency.
|
||||
|
||||
For linearizability, suppose each operation receives a timestamp from a loosely synchronized global clock. Operations are linearized if and only if they always complete as though they were executed in a sequential order and each operation appears to complete in the order specified by the program. Likewise, if an operation’s timestamp precedes another, that operation must also precede the other operation in the sequence.
|
||||
|
||||
@ -83,10 +83,10 @@ etcd does not ensure linearizability for watch operations. Users are expected to
|
||||
|
||||
etcd ensures linearizability for all other operations by default. Linearizability comes with a cost, however, because linearized requests must go through the Raft consensus process. To obtain lower latencies and higher throughput for read requests, clients can configure a request’s consistency mode to `serializable`, which may access stale data with respect to quorum, but removes the performance penalty of linearized accesses' reliance on live consensus.
|
||||
|
||||
[persistent-ds]: [https://en.wikipedia.org/wiki/Persistent_data_structure]
|
||||
[btree]: [https://en.wikipedia.org/wiki/B-tree]
|
||||
[b+tree]: [https://en.wikipedia.org/wiki/B%2B_tree]
|
||||
[seq_consistency]: [https://en.wikipedia.org/wiki/Consistency_model#Sequential_consistency]
|
||||
[strict_consistency]: [https://en.wikipedia.org/wiki/Consistency_model#Strict_consistency]
|
||||
[serializable_isolation]: [https://en.wikipedia.org/wiki/Isolation_(database_systems)#Serializable]
|
||||
[Linearizability]: [#Linearizability]
|
||||
[persistent-ds]: https://en.wikipedia.org/wiki/Persistent_data_structure
|
||||
[btree]: https://en.wikipedia.org/wiki/B-tree
|
||||
[b+tree]: https://en.wikipedia.org/wiki/B%2B_tree
|
||||
[seq_consistency]: https://en.wikipedia.org/wiki/Consistency_model#Sequential_consistency
|
||||
[strict_consistency]: https://en.wikipedia.org/wiki/Consistency_model#Strict_consistency
|
||||
[serializable_isolation]: https://en.wikipedia.org/wiki/Isolation_(database_systems)#Serializable
|
||||
[Linearizability]: #linearizability
|
||||
|
@ -32,7 +32,7 @@ The consistent flag for read operations is removed in etcd 2.0.0. The normal rea
|
||||
|
||||
The read consistency guarantees are:
|
||||
|
||||
The consistent read guarantees the sequential consistency within one client that talks to one etcd server. Read/Write from one client to one etcd member should be observed in order. If one client write a value to an etcd server successfully, it should be able to get the value out of the server immediately.
|
||||
The consistent read guarantees the sequential consistency within one client that talks to one etcd server. Read/Write from one client to one etcd member should be observed in order. If one client write a value to an etcd server successfully, it should be able to get the value out of the server immediately.
|
||||
|
||||
Each etcd member will proxy the request to leader and only return the result to user after the result is applied on the local member. Thus after the write succeed, the user is guaranteed to see the value on the member it sent the request to.
|
||||
|
||||
@ -56,6 +56,7 @@ Proxy mode in 2.0 will provide similar functionality, and with improved control
|
||||
## Discovery Service
|
||||
|
||||
A size key needs to be provided inside a [discovery token][discoverytoken].
|
||||
|
||||
[discoverytoken]: clustering.md#custom-etcd-discovery-service
|
||||
|
||||
## HTTP Admin API
|
||||
|
@ -49,4 +49,4 @@ Bootstrap another machine and use the [boom HTTP benchmark tool][boom] to send r
|
||||
| 256 | 256 | all servers | 3061 | 119.3 |
|
||||
|
||||
[boom]: https://github.com/rakyll/boom
|
||||
[hack-benchmark]: /hack/benchmark/
|
||||
[hack-benchmark]: ../../../hack/benchmark/
|
||||
|
@ -24,7 +24,7 @@ Go OS/Arch: linux/amd64
|
||||
|
||||
## Testing
|
||||
|
||||
Bootstrap another machine, outside of the etcd cluster, and run the [`boom` HTTP benchmark tool](https://github.com/rakyll/boom) with a connection reuse patch to send requests to each etcd cluster member. See the [benchmark instructions](../../hack/benchmark/) for the patch and the steps to reproduce our procedures.
|
||||
Bootstrap another machine, outside of the etcd cluster, and run the [`boom` HTTP benchmark tool][boom] with a connection reuse patch to send requests to each etcd cluster member. See the [benchmark instructions][hack] for the patch and the steps to reproduce our procedures.
|
||||
|
||||
The performance is calulated through results of 100 benchmark rounds.
|
||||
|
||||
@ -66,4 +66,7 @@ The performance is calulated through results of 100 benchmark rounds.
|
||||
|
||||
- Write QPS to cluster leaders seems to be increased by a small margin. This is because the main loop and entry apply loops were decoupled in the etcd raft logic, eliminating several blocks between them.
|
||||
|
||||
- Write QPS to all members seems to be increased by a significant margin, because followers now receive the latest commit index sooner, and commit proposals more quickly.
|
||||
- Write QPS to all members seems to be increased by a significant margin, because followers now receive the latest commit index sooner, and commit proposals more quickly.
|
||||
|
||||
[boom]: https://github.com/rakyll/boom
|
||||
[hack]: ../../../hack/benchmark/
|
||||
|
@ -69,4 +69,4 @@ Bootstrap another machine and use the [boom HTTP benchmark tool][boom] to send r
|
||||
[boom]: https://github.com/rakyll/boom
|
||||
[c7146bd5]: https://github.com/coreos/etcd/commits/c7146bd5f2c73716091262edc638401bb8229144
|
||||
[etcd-2.1-benchmark]: etcd-2-1-0-alpha-benchmarks.md
|
||||
[hack-benchmark]: /hack/benchmark/
|
||||
[hack-benchmark]: ../../../hack/benchmark/
|
||||
|
@ -39,4 +39,4 @@ The performance is nearly the same as the one with empty server handler.
|
||||
The performance with empty server handler is not affected by one put. So the
|
||||
performance downgrade should be caused by storage package.
|
||||
|
||||
[etcd-v3-benchmark]: /tools/benchmark/
|
||||
[etcd-v3-benchmark]: ../../../tools/benchmark/
|
||||
|
@ -423,7 +423,7 @@ To make understanding this feature easier, we changed the naming of some flags,
|
||||
|-peers |none |Deprecated. The --initial-cluster flag provides a similar concept with different semantics. Please read this guide on cluster startup.|
|
||||
|-peers-file |none |Deprecated. The --initial-cluster flag provides a similar concept with different semantics. Please read this guide on cluster startup.|
|
||||
|
||||
[client]: /client
|
||||
[client]: ../../client
|
||||
[client-discoverer]: https://godoc.org/github.com/coreos/etcd/client#Discoverer
|
||||
[conf-adv-client]: configuration.md#-advertise-client-urls
|
||||
[conf-listen-client]: configuration.md#-listen-client-urls
|
||||
|
@ -234,7 +234,7 @@ The security flags help to [build a secure etcd cluster][security].
|
||||
+ env variable: ETCD_DEBUG
|
||||
|
||||
### --log-package-levels
|
||||
+ Set individual etcd subpackages to specific log levels. An example being `etcdserver=WARNING,security=DEBUG`
|
||||
+ Set individual etcd subpackages to specific log levels. An example being `etcdserver=WARNING,security=DEBUG`
|
||||
+ default: none (INFO for all packages)
|
||||
+ env variable: ETCD_LOG_PACKAGE_LEVELS
|
||||
|
||||
@ -272,7 +272,7 @@ Follow the instructions when using these flags.
|
||||
[build-cluster]: clustering.md#static
|
||||
[reconfig]: runtime-configuration.md
|
||||
[discovery]: clustering.md#discovery
|
||||
[iana-ports]: https://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.xhtml?search=etcd
|
||||
[iana-ports]: http://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.txt
|
||||
[proxy]: proxy.md
|
||||
[reconfig]: runtime-configuration.md
|
||||
[restore]: admin_guide.md#restoring-a-backup
|
||||
|
@ -112,7 +112,6 @@
|
||||
- [mattn/etcdenv](https://github.com/mattn/etcdenv) - "env" shebang with etcd integration
|
||||
- [kelseyhightower/confd](https://github.com/kelseyhightower/confd) - Manage local app config files using templates and data from etcd
|
||||
- [configdb](https://git.autistici.org/ai/configdb/tree/master) - A REST relational abstraction on top of arbitrary database backends, aimed at storing configs and inventories.
|
||||
- [scrz](https://github.com/scrz/scrz) - Container manager, stores configuration in etcd.
|
||||
- [fleet](https://github.com/coreos/fleet) - Distributed init system
|
||||
- [kubernetes/kubernetes](https://github.com/kubernetes/kubernetes) - Container cluster manager introduced by Google.
|
||||
- [mailgun/vulcand](https://github.com/mailgun/vulcand) - HTTP proxy that uses etcd as a configuration backend.
|
||||
|
@ -1,6 +1,6 @@
|
||||
# Reporting Bugs
|
||||
|
||||
If you find bugs or documentation mistakes in the etcd project, please let us know by [opening an issue][issue]. We treat bugs and mistakes very seriously and believe no issue is too small. Before creating a bug report, please check that an issue reporting the same problem does not already exist.
|
||||
If you find bugs or documentation mistakes in the etcd project, please let us know by [opening an issue][etcd-issue]. We treat bugs and mistakes very seriously and believe no issue is too small. Before creating a bug report, please check that an issue reporting the same problem does not already exist.
|
||||
|
||||
To make your bug report accurate and easy to understand, please try to create bug reports that are:
|
||||
|
||||
|
@ -7,25 +7,25 @@ To prove out the design of the v3 API the team has also built [a number of examp
|
||||
# Design
|
||||
|
||||
1. Flatten binary key-value space
|
||||
|
||||
|
||||
2. Keep the event history until compaction
|
||||
- access to old version of keys
|
||||
- user controlled history compaction
|
||||
|
||||
|
||||
3. Support range query
|
||||
- Pagination support with limit argument
|
||||
- Support consistency guarantee across multiple range queries
|
||||
|
||||
|
||||
4. Replace TTL key with Lease
|
||||
- more efficient/ low cost keep alive
|
||||
- a logical group of TTL keys
|
||||
|
||||
|
||||
5. Replace CAS/CAD with multi-object Txn
|
||||
- MUCH MORE powerful and flexible
|
||||
|
||||
|
||||
6. Support efficient watching with multiple ranges
|
||||
|
||||
7. RPC API supports the completed set of APIs.
|
||||
7. RPC API supports the completed set of APIs.
|
||||
- more efficient than JSON/HTTP
|
||||
- additional txn/lease support
|
||||
|
||||
@ -56,7 +56,7 @@ the size in the future a little bit or make it configurable.
|
||||
// A put is always successful
|
||||
Put( PutRequest { key = foo, value = bar } )
|
||||
|
||||
PutResponse {
|
||||
PutResponse {
|
||||
cluster_id = 0x1000,
|
||||
member_id = 0x1,
|
||||
revision = 1,
|
||||
@ -119,7 +119,7 @@ RangeResponse {
|
||||
Txn(TxnRequest {
|
||||
// mod_revision of foo0 is equal to 1, mod_revision of foo1 is greater than 1
|
||||
compare = {
|
||||
{compareType = equal, key = foo0, mod_revision = 1},
|
||||
{compareType = equal, key = foo0, mod_revision = 1},
|
||||
{compareType = greater, key = foo1, mod_revision = 1}}
|
||||
},
|
||||
// if the comparison succeeds, put foo2 = bar2
|
||||
@ -156,7 +156,7 @@ Watch( WatchRequest{
|
||||
end_revision = 10000,
|
||||
// server decided notification frequency
|
||||
progress_notification = true,
|
||||
}
|
||||
}
|
||||
… // this can be a watch request stream
|
||||
)
|
||||
|
||||
@ -176,7 +176,7 @@ WatchResponse {
|
||||
},
|
||||
}
|
||||
…
|
||||
|
||||
|
||||
// a notification at 2000
|
||||
WatchResponse {
|
||||
cluster_id = 0x1000,
|
||||
@ -185,9 +185,9 @@ WatchResponse {
|
||||
raft_term = 0x1,
|
||||
// nil event as notification
|
||||
}
|
||||
|
||||
…
|
||||
|
||||
|
||||
…
|
||||
|
||||
// put (foo0=bar3000) event at 3000
|
||||
WatchResponse {
|
||||
cluster_id = 0x1000,
|
||||
@ -204,8 +204,8 @@ WatchResponse {
|
||||
},
|
||||
}
|
||||
…
|
||||
|
||||
|
||||
```
|
||||
|
||||
[api-protobuf]: https://github.com/coreos/etcd/blob/master/etcdserver/etcdserverpb/rpc.proto
|
||||
[kv-protobuf]: https://github.com/coreos/etcd/blob/master/storage/storagepb/kv.proto
|
||||
[api-protobuf]: https://github.com/coreos/etcd/blob/release-2.3/etcdserver/etcdserverpb/rpc.proto
|
||||
[kv-protobuf]: https://github.com/coreos/etcd/blob/release-2.3/storage/storagepb/kv.proto
|
||||
|
@ -188,6 +188,6 @@ Make sure that you sign your certificates with a Subject Name your member's publ
|
||||
If you need your certificate to be signed for your member's FQDN in its Subject Name then you could use Subject Alternative Names (short IP SANs) to add your IP address. The `etcd-ca` tool provides `--domain=` option for its `new-cert` command, and openssl can make [it][alt-name] too.
|
||||
|
||||
[cfssl]: https://github.com/cloudflare/cfssl
|
||||
[tls-setup]: /hack/tls-setup
|
||||
[tls-setup]: ../../hack/tls-setup
|
||||
[tls-guide]: https://github.com/coreos/docs/blob/master/os/generate-self-signed-certificates.md
|
||||
[alt-name]: http://wiki.cacert.org/FAQ/subjectAltName
|
||||
|
91
NEWS
91
NEWS
@ -1,54 +1,81 @@
|
||||
etcd v3.0.11 (2016-10-07)
|
||||
- server returns previous key-value (optional)
|
||||
- clientv3 WithPrevKV option
|
||||
- v3 etcdctl prev-kv flag
|
||||
etcd v3.1.0 (2017-01-20)
|
||||
- faster linearizable reads (implements Raft read-index)
|
||||
- automatic leadership transfer when leader steps down
|
||||
- etcd uses default route IP if advertise URL is not given
|
||||
- cluster rejects removing members if quorum will be lost
|
||||
- SRV records (e.g., infra1.example.com) must match the discovery domain
|
||||
(i.e., example.com) if no custom certificate authority is given
|
||||
- TLSConfig ServerName is ignored with user-provided certificates
|
||||
for backwards compatibility; to be deprecated in 3.2
|
||||
- discovery now has upper limit for waiting on retries
|
||||
- etcd flags
|
||||
- --strict-reconfig-check flag is set by default
|
||||
- add --log-output flag
|
||||
- add --metrics flag
|
||||
- v3 authentication API is now stable
|
||||
- v3 client
|
||||
- add SetEndpoints method; update endpoints at runtime
|
||||
- add Sync method; auto-update endpoints at runtime
|
||||
- add Lease TimeToLive API; fetch lease information
|
||||
- replace Config.Logger field with global logger
|
||||
- Get API responses are sorted in ascending order by default
|
||||
- v3 etcdctl
|
||||
- add lease timetolive command
|
||||
- add --print-value-only flag to get command
|
||||
- add --dest-prefix flag to make-mirror command
|
||||
- command get responses are sorted in ascending order by default
|
||||
- recipes now conform to sessions defined in clientv3/concurrency
|
||||
- ACI has symlinks to /usr/local/bin/etcd*
|
||||
- warn on binding listeners through domain names; to be deprecated in 3.2
|
||||
- experimental gRPC proxy feature
|
||||
|
||||
etcd v3.0.16 (2017-01-13)
|
||||
|
||||
etcd v3.0.15 (2016-11-11)
|
||||
- fix cancel watch request with wrong range end
|
||||
|
||||
etcd v3.0.14 (2016-11-04)
|
||||
- v3 etcdctl migrate command now supports --no-ttl flag to discard keys on transform
|
||||
|
||||
etcd v3.0.13 (2016-10-24)
|
||||
|
||||
etcd v3.0.12 (2016-10-07)
|
||||
|
||||
etcd v3.0.11 (2016-10-07)
|
||||
- server returns previous key-value (optional)
|
||||
- clientv3 WithPrevKV option
|
||||
- v3 etcdctl put,watch,del --prev-kv flag
|
||||
|
||||
etcd v3.0.10 (2016-09-23)
|
||||
|
||||
|
||||
etcd v3.0.9 (2016-09-15)
|
||||
|
||||
- warn on domain names on listen URLs (v3.2 will reject domain names)
|
||||
|
||||
- warn on domain names on listen URLs (v3.2 will reject domain names)
|
||||
|
||||
etcd v3.0.8 (2016-09-09)
|
||||
|
||||
- allow only IP addresses in listen URLs (domain names are rejected)
|
||||
|
||||
- allow only IP addresses in listen URLs (domain names are rejected)
|
||||
|
||||
etcd v3.0.7 (2016-08-31)
|
||||
|
||||
- SRV records only allow A records (RFC 2052)
|
||||
|
||||
- SRV records only allow A records (RFC 2052)
|
||||
|
||||
etcd v3.0.6 (2016-08-19)
|
||||
|
||||
|
||||
etcd v3.0.5 (2016-08-19)
|
||||
|
||||
- SRV records (e.g., infra1.example.com) must match the discovery domain
|
||||
(i.e., example.com) when using the default certificate authority.
|
||||
|
||||
- SRV records (e.g., infra1.example.com) must match the discovery domain
|
||||
(i.e., example.com) if no custom certificate authority is given
|
||||
|
||||
etcd v3.0.4 (2016-07-27)
|
||||
|
||||
- v2 auth can now use common name from TLS certificate when --client-cert-auth is enabled
|
||||
- v2 etcdctl ls command now supports --output=json
|
||||
- Add /var/lib/etcd directory to etcd official Docker image
|
||||
|
||||
- v2 auth can now use common name from TLS certificate when --client-cert-auth is enabled
|
||||
- v2 etcdctl ls command now supports --output=json
|
||||
- Add /var/lib/etcd directory to etcd official Docker image
|
||||
|
||||
etcd v3.0.3 (2016-07-15)
|
||||
|
||||
- Revert Dockerfile to use CMD, instead of ENTRYPOINT, to support etcdctl run
|
||||
- Docker commands for v3.0.2 won't work without specifying executable binary paths
|
||||
- v3 etcdctl default endpoints are now 127.0.0.1:2379
|
||||
|
||||
- Revert Dockerfile to use CMD, instead of ENTRYPOINT, to support etcdctl run
|
||||
- Docker commands for v3.0.2 won't work without specifying executable binary paths
|
||||
- v3 etcdctl default endpoints are now 127.0.0.1:2379
|
||||
|
||||
etcd v3.0.2 (2016-07-08)
|
||||
|
||||
- Dockerfile uses ENTRYPOINT, instead of CMD, to run etcd without binary path specified
|
||||
|
||||
- Dockerfile uses ENTRYPOINT, instead of CMD, to run etcd without binary path specified
|
||||
|
||||
etcd v3.0.1 (2016-07-01)
|
||||
|
||||
etcd v3.0.0 (2016-06-30)
|
||||
|
12
README.md
12
README.md
@ -37,13 +37,14 @@ See [etcdctl][etcdctl] for a simple command line client.
|
||||
|
||||
### Getting etcd
|
||||
|
||||
The easiest way to get etcd is to use one of the pre-built release binaries which are available for OSX, Linux, Windows, AppC (ACI), and Docker. Instructions for using these binaries are on the [GitHub releases page][github-release].
|
||||
The easiest way to get etcd is to use one of the pre-built release binaries which are available for OSX, Linux, Windows, [rkt][rkt], and Docker. Instructions for using these binaries are on the [GitHub releases page][github-release].
|
||||
|
||||
For those wanting to try the very latest version, you can [build the latest version of etcd][dl-build] from the `master` branch.
|
||||
You will first need [*Go*](https://golang.org/) installed on your machine (version 1.6+ is required).
|
||||
You will first need [*Go*](https://golang.org/) installed on your machine (version 1.7+ is required).
|
||||
All development occurs on `master`, including new features and bug fixes.
|
||||
Bug fixes are first targeted at `master` and subsequently ported to release branches, as described in the [branch management][branch-management] guide.
|
||||
|
||||
[rkt]: https://github.com/coreos/rkt/releases/
|
||||
[github-release]: https://github.com/coreos/etcd/releases/
|
||||
[branch-management]: ./Documentation/branch_management.md
|
||||
[dl-build]: ./Documentation/dl_build.md#build-the-latest-version
|
||||
@ -77,7 +78,7 @@ That's it! etcd is now running and serving client requests. For more
|
||||
|
||||
The [official etcd ports][iana-ports] are 2379 for client requests, and 2380 for peer communication.
|
||||
|
||||
[iana-ports]: https://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.xhtml?search=etcd
|
||||
[iana-ports]: http://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.txt
|
||||
|
||||
### Running a local etcd cluster
|
||||
|
||||
@ -93,6 +94,10 @@ This will bring up 3 etcd members `infra1`, `infra2` and `infra3` and etcd proxy
|
||||
|
||||
Every cluster member and proxy accepts key value reads and key value writes.
|
||||
|
||||
### Running etcd on Kubernetes
|
||||
|
||||
If you want to run etcd cluster on Kubernetes, try [etcd operator](https://github.com/coreos/etcd-operator).
|
||||
|
||||
### Next steps
|
||||
|
||||
Now it's time to dig into the full etcd API and other guides.
|
||||
@ -131,4 +136,3 @@ See [reporting bugs](Documentation/reporting_bugs.md) for details about reportin
|
||||
### License
|
||||
|
||||
etcd is under the Apache 2.0 license. See the [LICENSE](LICENSE) file for details.
|
||||
|
||||
|
25
ROADMAP.md
25
ROADMAP.md
@ -6,19 +6,18 @@ This document defines a high level roadmap for etcd development.
|
||||
|
||||
The dates below should not be considered authoritative, but rather indicative of the projected timeline of the project. The [milestones defined in GitHub](https://github.com/coreos/etcd/milestones) represent the most up-to-date and issue-for-issue plans.
|
||||
|
||||
etcd 3.0 is our current stable branch. The roadmap below outlines new features that will be added to etcd, and while subject to change, define what future stable will look like.
|
||||
etcd 3.1 is our current stable branch. The roadmap below outlines new features that will be added to etcd, and while subject to change, define what future stable will look like.
|
||||
|
||||
### etcd 3.1 (2016-Oct)
|
||||
- Stable L4 gateway
|
||||
- Experimental support for scalable proxy
|
||||
- Automatic leadership transfer for the rolling upgrade
|
||||
- V3 API improvements
|
||||
- Get previous key-value pair
|
||||
- Get only keys (ignore values)
|
||||
- Get only key count
|
||||
|
||||
### etcd 3.2 (2017-Feb)
|
||||
### etcd 3.2 (2017-May)
|
||||
- Stable scalable proxy
|
||||
- JWT token based auth
|
||||
- Proxy-as-client interface passthrough
|
||||
- Lock service
|
||||
- Namespacing proxy
|
||||
- TLS Command Name and JWT token based authentication
|
||||
- Read-modify-write V3 Put
|
||||
- Improved watch performance
|
||||
- ...
|
||||
- Support non-blocking concurrent read
|
||||
|
||||
### etcd 3.3 (?)
|
||||
- TBD
|
||||
|
||||
|
@ -32,7 +32,9 @@ var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
const _ = proto.ProtoPackageIsVersion1
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
|
||||
|
||||
type Permission_Type int32
|
||||
|
||||
@ -99,113 +101,113 @@ func init() {
|
||||
proto.RegisterType((*Role)(nil), "authpb.Role")
|
||||
proto.RegisterEnum("authpb.Permission_Type", Permission_Type_name, Permission_Type_value)
|
||||
}
|
||||
func (m *User) Marshal() (data []byte, err error) {
|
||||
func (m *User) Marshal() (dAtA []byte, err error) {
|
||||
size := m.Size()
|
||||
data = make([]byte, size)
|
||||
n, err := m.MarshalTo(data)
|
||||
dAtA = make([]byte, size)
|
||||
n, err := m.MarshalTo(dAtA)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return data[:n], nil
|
||||
return dAtA[:n], nil
|
||||
}
|
||||
|
||||
func (m *User) MarshalTo(data []byte) (int, error) {
|
||||
func (m *User) MarshalTo(dAtA []byte) (int, error) {
|
||||
var i int
|
||||
_ = i
|
||||
var l int
|
||||
_ = l
|
||||
if len(m.Name) > 0 {
|
||||
data[i] = 0xa
|
||||
dAtA[i] = 0xa
|
||||
i++
|
||||
i = encodeVarintAuth(data, i, uint64(len(m.Name)))
|
||||
i += copy(data[i:], m.Name)
|
||||
i = encodeVarintAuth(dAtA, i, uint64(len(m.Name)))
|
||||
i += copy(dAtA[i:], m.Name)
|
||||
}
|
||||
if len(m.Password) > 0 {
|
||||
data[i] = 0x12
|
||||
dAtA[i] = 0x12
|
||||
i++
|
||||
i = encodeVarintAuth(data, i, uint64(len(m.Password)))
|
||||
i += copy(data[i:], m.Password)
|
||||
i = encodeVarintAuth(dAtA, i, uint64(len(m.Password)))
|
||||
i += copy(dAtA[i:], m.Password)
|
||||
}
|
||||
if len(m.Roles) > 0 {
|
||||
for _, s := range m.Roles {
|
||||
data[i] = 0x1a
|
||||
dAtA[i] = 0x1a
|
||||
i++
|
||||
l = len(s)
|
||||
for l >= 1<<7 {
|
||||
data[i] = uint8(uint64(l)&0x7f | 0x80)
|
||||
dAtA[i] = uint8(uint64(l)&0x7f | 0x80)
|
||||
l >>= 7
|
||||
i++
|
||||
}
|
||||
data[i] = uint8(l)
|
||||
dAtA[i] = uint8(l)
|
||||
i++
|
||||
i += copy(data[i:], s)
|
||||
i += copy(dAtA[i:], s)
|
||||
}
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
|
||||
func (m *Permission) Marshal() (data []byte, err error) {
|
||||
func (m *Permission) Marshal() (dAtA []byte, err error) {
|
||||
size := m.Size()
|
||||
data = make([]byte, size)
|
||||
n, err := m.MarshalTo(data)
|
||||
dAtA = make([]byte, size)
|
||||
n, err := m.MarshalTo(dAtA)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return data[:n], nil
|
||||
return dAtA[:n], nil
|
||||
}
|
||||
|
||||
func (m *Permission) MarshalTo(data []byte) (int, error) {
|
||||
func (m *Permission) MarshalTo(dAtA []byte) (int, error) {
|
||||
var i int
|
||||
_ = i
|
||||
var l int
|
||||
_ = l
|
||||
if m.PermType != 0 {
|
||||
data[i] = 0x8
|
||||
dAtA[i] = 0x8
|
||||
i++
|
||||
i = encodeVarintAuth(data, i, uint64(m.PermType))
|
||||
i = encodeVarintAuth(dAtA, i, uint64(m.PermType))
|
||||
}
|
||||
if len(m.Key) > 0 {
|
||||
data[i] = 0x12
|
||||
dAtA[i] = 0x12
|
||||
i++
|
||||
i = encodeVarintAuth(data, i, uint64(len(m.Key)))
|
||||
i += copy(data[i:], m.Key)
|
||||
i = encodeVarintAuth(dAtA, i, uint64(len(m.Key)))
|
||||
i += copy(dAtA[i:], m.Key)
|
||||
}
|
||||
if len(m.RangeEnd) > 0 {
|
||||
data[i] = 0x1a
|
||||
dAtA[i] = 0x1a
|
||||
i++
|
||||
i = encodeVarintAuth(data, i, uint64(len(m.RangeEnd)))
|
||||
i += copy(data[i:], m.RangeEnd)
|
||||
i = encodeVarintAuth(dAtA, i, uint64(len(m.RangeEnd)))
|
||||
i += copy(dAtA[i:], m.RangeEnd)
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
|
||||
func (m *Role) Marshal() (data []byte, err error) {
|
||||
func (m *Role) Marshal() (dAtA []byte, err error) {
|
||||
size := m.Size()
|
||||
data = make([]byte, size)
|
||||
n, err := m.MarshalTo(data)
|
||||
dAtA = make([]byte, size)
|
||||
n, err := m.MarshalTo(dAtA)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return data[:n], nil
|
||||
return dAtA[:n], nil
|
||||
}
|
||||
|
||||
func (m *Role) MarshalTo(data []byte) (int, error) {
|
||||
func (m *Role) MarshalTo(dAtA []byte) (int, error) {
|
||||
var i int
|
||||
_ = i
|
||||
var l int
|
||||
_ = l
|
||||
if len(m.Name) > 0 {
|
||||
data[i] = 0xa
|
||||
dAtA[i] = 0xa
|
||||
i++
|
||||
i = encodeVarintAuth(data, i, uint64(len(m.Name)))
|
||||
i += copy(data[i:], m.Name)
|
||||
i = encodeVarintAuth(dAtA, i, uint64(len(m.Name)))
|
||||
i += copy(dAtA[i:], m.Name)
|
||||
}
|
||||
if len(m.KeyPermission) > 0 {
|
||||
for _, msg := range m.KeyPermission {
|
||||
data[i] = 0x12
|
||||
dAtA[i] = 0x12
|
||||
i++
|
||||
i = encodeVarintAuth(data, i, uint64(msg.Size()))
|
||||
n, err := msg.MarshalTo(data[i:])
|
||||
i = encodeVarintAuth(dAtA, i, uint64(msg.Size()))
|
||||
n, err := msg.MarshalTo(dAtA[i:])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
@ -215,31 +217,31 @@ func (m *Role) MarshalTo(data []byte) (int, error) {
|
||||
return i, nil
|
||||
}
|
||||
|
||||
func encodeFixed64Auth(data []byte, offset int, v uint64) int {
|
||||
data[offset] = uint8(v)
|
||||
data[offset+1] = uint8(v >> 8)
|
||||
data[offset+2] = uint8(v >> 16)
|
||||
data[offset+3] = uint8(v >> 24)
|
||||
data[offset+4] = uint8(v >> 32)
|
||||
data[offset+5] = uint8(v >> 40)
|
||||
data[offset+6] = uint8(v >> 48)
|
||||
data[offset+7] = uint8(v >> 56)
|
||||
func encodeFixed64Auth(dAtA []byte, offset int, v uint64) int {
|
||||
dAtA[offset] = uint8(v)
|
||||
dAtA[offset+1] = uint8(v >> 8)
|
||||
dAtA[offset+2] = uint8(v >> 16)
|
||||
dAtA[offset+3] = uint8(v >> 24)
|
||||
dAtA[offset+4] = uint8(v >> 32)
|
||||
dAtA[offset+5] = uint8(v >> 40)
|
||||
dAtA[offset+6] = uint8(v >> 48)
|
||||
dAtA[offset+7] = uint8(v >> 56)
|
||||
return offset + 8
|
||||
}
|
||||
func encodeFixed32Auth(data []byte, offset int, v uint32) int {
|
||||
data[offset] = uint8(v)
|
||||
data[offset+1] = uint8(v >> 8)
|
||||
data[offset+2] = uint8(v >> 16)
|
||||
data[offset+3] = uint8(v >> 24)
|
||||
func encodeFixed32Auth(dAtA []byte, offset int, v uint32) int {
|
||||
dAtA[offset] = uint8(v)
|
||||
dAtA[offset+1] = uint8(v >> 8)
|
||||
dAtA[offset+2] = uint8(v >> 16)
|
||||
dAtA[offset+3] = uint8(v >> 24)
|
||||
return offset + 4
|
||||
}
|
||||
func encodeVarintAuth(data []byte, offset int, v uint64) int {
|
||||
func encodeVarintAuth(dAtA []byte, offset int, v uint64) int {
|
||||
for v >= 1<<7 {
|
||||
data[offset] = uint8(v&0x7f | 0x80)
|
||||
dAtA[offset] = uint8(v&0x7f | 0x80)
|
||||
v >>= 7
|
||||
offset++
|
||||
}
|
||||
data[offset] = uint8(v)
|
||||
dAtA[offset] = uint8(v)
|
||||
return offset + 1
|
||||
}
|
||||
func (m *User) Size() (n int) {
|
||||
@ -308,8 +310,8 @@ func sovAuth(x uint64) (n int) {
|
||||
func sozAuth(x uint64) (n int) {
|
||||
return sovAuth(uint64((x << 1) ^ uint64((int64(x) >> 63))))
|
||||
}
|
||||
func (m *User) Unmarshal(data []byte) error {
|
||||
l := len(data)
|
||||
func (m *User) Unmarshal(dAtA []byte) error {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
for iNdEx < l {
|
||||
preIndex := iNdEx
|
||||
@ -321,7 +323,7 @@ func (m *User) Unmarshal(data []byte) error {
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
@ -349,7 +351,7 @@ func (m *User) Unmarshal(data []byte) error {
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
byteLen |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
@ -363,7 +365,7 @@ func (m *User) Unmarshal(data []byte) error {
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.Name = append(m.Name[:0], data[iNdEx:postIndex]...)
|
||||
m.Name = append(m.Name[:0], dAtA[iNdEx:postIndex]...)
|
||||
if m.Name == nil {
|
||||
m.Name = []byte{}
|
||||
}
|
||||
@ -380,7 +382,7 @@ func (m *User) Unmarshal(data []byte) error {
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
byteLen |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
@ -394,7 +396,7 @@ func (m *User) Unmarshal(data []byte) error {
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.Password = append(m.Password[:0], data[iNdEx:postIndex]...)
|
||||
m.Password = append(m.Password[:0], dAtA[iNdEx:postIndex]...)
|
||||
if m.Password == nil {
|
||||
m.Password = []byte{}
|
||||
}
|
||||
@ -411,7 +413,7 @@ func (m *User) Unmarshal(data []byte) error {
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
@ -426,11 +428,11 @@ func (m *User) Unmarshal(data []byte) error {
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.Roles = append(m.Roles, string(data[iNdEx:postIndex]))
|
||||
m.Roles = append(m.Roles, string(dAtA[iNdEx:postIndex]))
|
||||
iNdEx = postIndex
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipAuth(data[iNdEx:])
|
||||
skippy, err := skipAuth(dAtA[iNdEx:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -449,8 +451,8 @@ func (m *User) Unmarshal(data []byte) error {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (m *Permission) Unmarshal(data []byte) error {
|
||||
l := len(data)
|
||||
func (m *Permission) Unmarshal(dAtA []byte) error {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
for iNdEx < l {
|
||||
preIndex := iNdEx
|
||||
@ -462,7 +464,7 @@ func (m *Permission) Unmarshal(data []byte) error {
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
@ -490,7 +492,7 @@ func (m *Permission) Unmarshal(data []byte) error {
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
m.PermType |= (Permission_Type(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
@ -509,7 +511,7 @@ func (m *Permission) Unmarshal(data []byte) error {
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
byteLen |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
@ -523,7 +525,7 @@ func (m *Permission) Unmarshal(data []byte) error {
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.Key = append(m.Key[:0], data[iNdEx:postIndex]...)
|
||||
m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...)
|
||||
if m.Key == nil {
|
||||
m.Key = []byte{}
|
||||
}
|
||||
@ -540,7 +542,7 @@ func (m *Permission) Unmarshal(data []byte) error {
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
byteLen |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
@ -554,14 +556,14 @@ func (m *Permission) Unmarshal(data []byte) error {
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.RangeEnd = append(m.RangeEnd[:0], data[iNdEx:postIndex]...)
|
||||
m.RangeEnd = append(m.RangeEnd[:0], dAtA[iNdEx:postIndex]...)
|
||||
if m.RangeEnd == nil {
|
||||
m.RangeEnd = []byte{}
|
||||
}
|
||||
iNdEx = postIndex
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipAuth(data[iNdEx:])
|
||||
skippy, err := skipAuth(dAtA[iNdEx:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -580,8 +582,8 @@ func (m *Permission) Unmarshal(data []byte) error {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (m *Role) Unmarshal(data []byte) error {
|
||||
l := len(data)
|
||||
func (m *Role) Unmarshal(dAtA []byte) error {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
for iNdEx < l {
|
||||
preIndex := iNdEx
|
||||
@ -593,7 +595,7 @@ func (m *Role) Unmarshal(data []byte) error {
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
@ -621,7 +623,7 @@ func (m *Role) Unmarshal(data []byte) error {
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
byteLen |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
@ -635,7 +637,7 @@ func (m *Role) Unmarshal(data []byte) error {
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.Name = append(m.Name[:0], data[iNdEx:postIndex]...)
|
||||
m.Name = append(m.Name[:0], dAtA[iNdEx:postIndex]...)
|
||||
if m.Name == nil {
|
||||
m.Name = []byte{}
|
||||
}
|
||||
@ -652,7 +654,7 @@ func (m *Role) Unmarshal(data []byte) error {
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
msglen |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
@ -667,13 +669,13 @@ func (m *Role) Unmarshal(data []byte) error {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.KeyPermission = append(m.KeyPermission, &Permission{})
|
||||
if err := m.KeyPermission[len(m.KeyPermission)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
|
||||
if err := m.KeyPermission[len(m.KeyPermission)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||
return err
|
||||
}
|
||||
iNdEx = postIndex
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipAuth(data[iNdEx:])
|
||||
skippy, err := skipAuth(dAtA[iNdEx:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -692,8 +694,8 @@ func (m *Role) Unmarshal(data []byte) error {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func skipAuth(data []byte) (n int, err error) {
|
||||
l := len(data)
|
||||
func skipAuth(dAtA []byte) (n int, err error) {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
for iNdEx < l {
|
||||
var wire uint64
|
||||
@ -704,7 +706,7 @@ func skipAuth(data []byte) (n int, err error) {
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
@ -722,7 +724,7 @@ func skipAuth(data []byte) (n int, err error) {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx++
|
||||
if data[iNdEx-1] < 0x80 {
|
||||
if dAtA[iNdEx-1] < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
@ -739,7 +741,7 @@ func skipAuth(data []byte) (n int, err error) {
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
length |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
@ -762,7 +764,7 @@ func skipAuth(data []byte) (n int, err error) {
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
innerWire |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
@ -773,7 +775,7 @@ func skipAuth(data []byte) (n int, err error) {
|
||||
if innerWireType == 4 {
|
||||
break
|
||||
}
|
||||
next, err := skipAuth(data[start:])
|
||||
next, err := skipAuth(dAtA[start:])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
@ -797,6 +799,8 @@ var (
|
||||
ErrIntOverflowAuth = fmt.Errorf("proto: integer overflow")
|
||||
)
|
||||
|
||||
func init() { proto.RegisterFile("auth.proto", fileDescriptorAuth) }
|
||||
|
||||
var fileDescriptorAuth = []byte{
|
||||
// 288 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x6c, 0x90, 0xc1, 0x4a, 0xc3, 0x30,
|
||||
|
@ -49,38 +49,30 @@ func isRangeEqual(a, b *rangePerm) bool {
|
||||
|
||||
// removeSubsetRangePerms removes any rangePerms that are subsets of other rangePerms.
|
||||
// If there are equal ranges, removeSubsetRangePerms only keeps one of them.
|
||||
func removeSubsetRangePerms(perms []*rangePerm) []*rangePerm {
|
||||
// TODO(mitake): currently it is O(n^2), we need a better algorithm
|
||||
var newp []*rangePerm
|
||||
|
||||
// It returns a sorted rangePerm slice.
|
||||
func removeSubsetRangePerms(perms []*rangePerm) (newp []*rangePerm) {
|
||||
sort.Sort(RangePermSliceByBegin(perms))
|
||||
var prev *rangePerm
|
||||
for i := range perms {
|
||||
skip := false
|
||||
|
||||
for j := range perms {
|
||||
if i == j {
|
||||
continue
|
||||
}
|
||||
|
||||
if isRangeEqual(perms[i], perms[j]) {
|
||||
// if ranges are equal, we only keep the first range.
|
||||
if i > j {
|
||||
skip = true
|
||||
break
|
||||
}
|
||||
} else if isSubset(perms[i], perms[j]) {
|
||||
// if a range is a strict subset of the other one, we skip the subset.
|
||||
skip = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if skip {
|
||||
if i == 0 {
|
||||
prev = perms[i]
|
||||
newp = append(newp, perms[i])
|
||||
continue
|
||||
}
|
||||
|
||||
if isRangeEqual(perms[i], prev) {
|
||||
continue
|
||||
}
|
||||
if isSubset(perms[i], prev) {
|
||||
continue
|
||||
}
|
||||
if isSubset(prev, perms[i]) {
|
||||
prev = perms[i]
|
||||
newp[len(newp)-1] = perms[i]
|
||||
continue
|
||||
}
|
||||
prev = perms[i]
|
||||
newp = append(newp, perms[i])
|
||||
}
|
||||
|
||||
return newp
|
||||
}
|
||||
|
||||
@ -88,7 +80,6 @@ func removeSubsetRangePerms(perms []*rangePerm) []*rangePerm {
|
||||
func mergeRangePerms(perms []*rangePerm) []*rangePerm {
|
||||
var merged []*rangePerm
|
||||
perms = removeSubsetRangePerms(perms)
|
||||
sort.Sort(RangePermSliceByBegin(perms))
|
||||
|
||||
i := 0
|
||||
for i < len(perms) {
|
||||
|
@ -16,6 +16,8 @@ package auth
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
@ -131,3 +133,47 @@ func TestGetMergedPerms(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestRemoveSubsetRangePerms(t *testing.T) {
|
||||
tests := []struct {
|
||||
perms []*rangePerm
|
||||
expect []*rangePerm
|
||||
}{
|
||||
{ // subsets converge
|
||||
[]*rangePerm{{[]byte{2}, []byte{3}}, {[]byte{2}, []byte{5}}, {[]byte{1}, []byte{4}}},
|
||||
[]*rangePerm{{[]byte{1}, []byte{4}}, {[]byte{2}, []byte{5}}},
|
||||
},
|
||||
{ // subsets converge
|
||||
[]*rangePerm{{[]byte{0}, []byte{3}}, {[]byte{0}, []byte{1}}, {[]byte{2}, []byte{4}}, {[]byte{0}, []byte{2}}},
|
||||
[]*rangePerm{{[]byte{0}, []byte{3}}, {[]byte{2}, []byte{4}}},
|
||||
},
|
||||
{ // biggest range at the end
|
||||
[]*rangePerm{{[]byte{2}, []byte{3}}, {[]byte{0}, []byte{2}}, {[]byte{1}, []byte{4}}, {[]byte{0}, []byte{5}}},
|
||||
[]*rangePerm{{[]byte{0}, []byte{5}}},
|
||||
},
|
||||
{ // biggest range at the beginning
|
||||
[]*rangePerm{{[]byte{0}, []byte{5}}, {[]byte{2}, []byte{3}}, {[]byte{0}, []byte{2}}, {[]byte{1}, []byte{4}}},
|
||||
[]*rangePerm{{[]byte{0}, []byte{5}}},
|
||||
},
|
||||
{ // no overlapping ranges
|
||||
[]*rangePerm{{[]byte{2}, []byte{3}}, {[]byte{0}, []byte{1}}, {[]byte{4}, []byte{7}}, {[]byte{8}, []byte{15}}},
|
||||
[]*rangePerm{{[]byte{0}, []byte{1}}, {[]byte{2}, []byte{3}}, {[]byte{4}, []byte{7}}, {[]byte{8}, []byte{15}}},
|
||||
},
|
||||
}
|
||||
for i, tt := range tests {
|
||||
rs := removeSubsetRangePerms(tt.perms)
|
||||
if !reflect.DeepEqual(rs, tt.expect) {
|
||||
t.Fatalf("#%d: unexpected rangePerms %q, got %q", i, printPerms(rs), printPerms(tt.expect))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func printPerms(rs []*rangePerm) (txt string) {
|
||||
for i, p := range rs {
|
||||
if i != 0 {
|
||||
txt += ","
|
||||
}
|
||||
txt += fmt.Sprintf("%+v", *p)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
@ -21,6 +21,8 @@ import (
|
||||
"crypto/rand"
|
||||
"math/big"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -28,6 +30,83 @@ const (
|
||||
defaultSimpleTokenLength = 16
|
||||
)
|
||||
|
||||
// var for testing purposes
|
||||
var (
|
||||
simpleTokenTTL = 5 * time.Minute
|
||||
simpleTokenTTLResolution = 1 * time.Second
|
||||
)
|
||||
|
||||
type simpleTokenTTLKeeper struct {
|
||||
tokens map[string]time.Time
|
||||
donec chan struct{}
|
||||
stopc chan struct{}
|
||||
deleteTokenFunc func(string)
|
||||
mu *sync.Mutex
|
||||
}
|
||||
|
||||
func (tm *simpleTokenTTLKeeper) stop() {
|
||||
select {
|
||||
case tm.stopc <- struct{}{}:
|
||||
case <-tm.donec:
|
||||
}
|
||||
<-tm.donec
|
||||
}
|
||||
|
||||
func (tm *simpleTokenTTLKeeper) addSimpleToken(token string) {
|
||||
tm.tokens[token] = time.Now().Add(simpleTokenTTL)
|
||||
}
|
||||
|
||||
func (tm *simpleTokenTTLKeeper) resetSimpleToken(token string) {
|
||||
if _, ok := tm.tokens[token]; ok {
|
||||
tm.tokens[token] = time.Now().Add(simpleTokenTTL)
|
||||
}
|
||||
}
|
||||
|
||||
func (tm *simpleTokenTTLKeeper) deleteSimpleToken(token string) {
|
||||
delete(tm.tokens, token)
|
||||
}
|
||||
|
||||
func (tm *simpleTokenTTLKeeper) run() {
|
||||
tokenTicker := time.NewTicker(simpleTokenTTLResolution)
|
||||
defer func() {
|
||||
tokenTicker.Stop()
|
||||
close(tm.donec)
|
||||
}()
|
||||
for {
|
||||
select {
|
||||
case <-tokenTicker.C:
|
||||
nowtime := time.Now()
|
||||
tm.mu.Lock()
|
||||
for t, tokenendtime := range tm.tokens {
|
||||
if nowtime.After(tokenendtime) {
|
||||
tm.deleteTokenFunc(t)
|
||||
delete(tm.tokens, t)
|
||||
}
|
||||
}
|
||||
tm.mu.Unlock()
|
||||
case <-tm.stopc:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (as *authStore) enable() {
|
||||
delf := func(tk string) {
|
||||
if username, ok := as.simpleTokens[tk]; ok {
|
||||
plog.Infof("deleting token %s for user %s", tk, username)
|
||||
delete(as.simpleTokens, tk)
|
||||
}
|
||||
}
|
||||
as.simpleTokenKeeper = &simpleTokenTTLKeeper{
|
||||
tokens: make(map[string]time.Time),
|
||||
donec: make(chan struct{}),
|
||||
stopc: make(chan struct{}),
|
||||
deleteTokenFunc: delf,
|
||||
mu: &as.simpleTokensMu,
|
||||
}
|
||||
go as.simpleTokenKeeper.run()
|
||||
}
|
||||
|
||||
func (as *authStore) GenSimpleToken() (string, error) {
|
||||
ret := make([]byte, defaultSimpleTokenLength)
|
||||
|
||||
@ -45,23 +124,26 @@ func (as *authStore) GenSimpleToken() (string, error) {
|
||||
|
||||
func (as *authStore) assignSimpleTokenToUser(username, token string) {
|
||||
as.simpleTokensMu.Lock()
|
||||
|
||||
_, ok := as.simpleTokens[token]
|
||||
if ok {
|
||||
plog.Panicf("token %s is alredy used", token)
|
||||
}
|
||||
|
||||
as.simpleTokens[token] = username
|
||||
as.simpleTokenKeeper.addSimpleToken(token)
|
||||
as.simpleTokensMu.Unlock()
|
||||
}
|
||||
|
||||
func (as *authStore) invalidateUser(username string) {
|
||||
if as.simpleTokenKeeper == nil {
|
||||
return
|
||||
}
|
||||
as.simpleTokensMu.Lock()
|
||||
defer as.simpleTokensMu.Unlock()
|
||||
|
||||
for token, name := range as.simpleTokens {
|
||||
if strings.Compare(name, username) == 0 {
|
||||
delete(as.simpleTokens, token)
|
||||
as.simpleTokenKeeper.deleteSimpleToken(token)
|
||||
}
|
||||
}
|
||||
as.simpleTokensMu.Unlock()
|
||||
}
|
||||
|
201
auth/store.go
201
auth/store.go
@ -20,6 +20,7 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
@ -29,6 +30,7 @@ import (
|
||||
"github.com/coreos/pkg/capnslog"
|
||||
"golang.org/x/crypto/bcrypt"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc/metadata"
|
||||
)
|
||||
|
||||
var (
|
||||
@ -47,6 +49,7 @@ var (
|
||||
ErrRootUserNotExist = errors.New("auth: root user does not exist")
|
||||
ErrRootRoleNotExist = errors.New("auth: root user does not have root role")
|
||||
ErrUserAlreadyExist = errors.New("auth: user already exists")
|
||||
ErrUserEmpty = errors.New("auth: user name is empty")
|
||||
ErrUserNotFound = errors.New("auth: user not found")
|
||||
ErrRoleAlreadyExist = errors.New("auth: role already exists")
|
||||
ErrRoleNotFound = errors.New("auth: role not found")
|
||||
@ -56,6 +59,7 @@ var (
|
||||
ErrPermissionNotGranted = errors.New("auth: permission is not granted to the role")
|
||||
ErrAuthNotEnabled = errors.New("auth: authentication is not enabled")
|
||||
ErrAuthOldRevision = errors.New("auth: revision in header is old")
|
||||
ErrInvalidAuthToken = errors.New("auth: invalid auth token")
|
||||
|
||||
// BcryptCost is the algorithm cost / strength for hashing auth passwords
|
||||
BcryptCost = bcrypt.DefaultCost
|
||||
@ -146,6 +150,15 @@ type AuthStore interface {
|
||||
|
||||
// Revision gets current revision of authStore
|
||||
Revision() uint64
|
||||
|
||||
// CheckPassword checks a given pair of username and password is correct
|
||||
CheckPassword(username, password string) (uint64, error)
|
||||
|
||||
// Close does cleanup of AuthStore
|
||||
Close() error
|
||||
|
||||
// AuthInfoFromCtx gets AuthInfo from gRPC's context
|
||||
AuthInfoFromCtx(ctx context.Context) (*AuthInfo, error)
|
||||
}
|
||||
|
||||
type authStore struct {
|
||||
@ -155,13 +168,33 @@ type authStore struct {
|
||||
|
||||
rangePermCache map[string]*unifiedRangePermissions // username -> unifiedRangePermissions
|
||||
|
||||
simpleTokensMu sync.RWMutex
|
||||
simpleTokens map[string]string // token -> username
|
||||
|
||||
revision uint64
|
||||
|
||||
// tokenSimple in v3.2+
|
||||
indexWaiter func(uint64) <-chan struct{}
|
||||
simpleTokenKeeper *simpleTokenTTLKeeper
|
||||
simpleTokensMu sync.Mutex
|
||||
simpleTokens map[string]string // token -> username
|
||||
}
|
||||
|
||||
func newDeleterFunc(as *authStore) func(string) {
|
||||
return func(t string) {
|
||||
as.simpleTokensMu.Lock()
|
||||
defer as.simpleTokensMu.Unlock()
|
||||
if username, ok := as.simpleTokens[t]; ok {
|
||||
plog.Infof("deleting token %s for user %s", t, username)
|
||||
delete(as.simpleTokens, t)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (as *authStore) AuthEnable() error {
|
||||
as.enabledMu.Lock()
|
||||
defer as.enabledMu.Unlock()
|
||||
if as.enabled {
|
||||
plog.Noticef("Authentication already enabled")
|
||||
return nil
|
||||
}
|
||||
b := as.be
|
||||
tx := b.BatchTx()
|
||||
tx.Lock()
|
||||
@ -181,9 +214,8 @@ func (as *authStore) AuthEnable() error {
|
||||
|
||||
tx.UnsafePut(authBucketName, enableFlagKey, authEnabled)
|
||||
|
||||
as.enabledMu.Lock()
|
||||
as.enabled = true
|
||||
as.enabledMu.Unlock()
|
||||
as.enable()
|
||||
|
||||
as.rangePermCache = make(map[string]*unifiedRangePermissions)
|
||||
|
||||
@ -195,6 +227,11 @@ func (as *authStore) AuthEnable() error {
|
||||
}
|
||||
|
||||
func (as *authStore) AuthDisable() {
|
||||
as.enabledMu.Lock()
|
||||
defer as.enabledMu.Unlock()
|
||||
if !as.enabled {
|
||||
return
|
||||
}
|
||||
b := as.be
|
||||
tx := b.BatchTx()
|
||||
tx.Lock()
|
||||
@ -203,17 +240,33 @@ func (as *authStore) AuthDisable() {
|
||||
tx.Unlock()
|
||||
b.ForceCommit()
|
||||
|
||||
as.enabledMu.Lock()
|
||||
as.enabled = false
|
||||
as.enabledMu.Unlock()
|
||||
|
||||
as.simpleTokensMu.Lock()
|
||||
tk := as.simpleTokenKeeper
|
||||
as.simpleTokenKeeper = nil
|
||||
as.simpleTokens = make(map[string]string) // invalidate all tokens
|
||||
as.simpleTokensMu.Unlock()
|
||||
if tk != nil {
|
||||
tk.stop()
|
||||
}
|
||||
|
||||
plog.Noticef("Authentication disabled")
|
||||
}
|
||||
|
||||
func (as *authStore) Close() error {
|
||||
as.enabledMu.Lock()
|
||||
defer as.enabledMu.Unlock()
|
||||
if !as.enabled {
|
||||
return nil
|
||||
}
|
||||
if as.simpleTokenKeeper != nil {
|
||||
as.simpleTokenKeeper.stop()
|
||||
as.simpleTokenKeeper = nil
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (as *authStore) Authenticate(ctx context.Context, username, password string) (*pb.AuthenticateResponse, error) {
|
||||
if !as.isAuthEnabled() {
|
||||
return nil, ErrAuthNotEnabled
|
||||
@ -232,11 +285,6 @@ func (as *authStore) Authenticate(ctx context.Context, username, password string
|
||||
return nil, ErrAuthFailed
|
||||
}
|
||||
|
||||
if bcrypt.CompareHashAndPassword(user.Password, []byte(password)) != nil {
|
||||
plog.Noticef("authentication failed, invalid password for user %s", username)
|
||||
return &pb.AuthenticateResponse{}, ErrAuthFailed
|
||||
}
|
||||
|
||||
token := fmt.Sprintf("%s.%d", simpleToken, index)
|
||||
as.assignSimpleTokenToUser(username, token)
|
||||
|
||||
@ -244,6 +292,24 @@ func (as *authStore) Authenticate(ctx context.Context, username, password string
|
||||
return &pb.AuthenticateResponse{Token: token}, nil
|
||||
}
|
||||
|
||||
func (as *authStore) CheckPassword(username, password string) (uint64, error) {
|
||||
tx := as.be.BatchTx()
|
||||
tx.Lock()
|
||||
defer tx.Unlock()
|
||||
|
||||
user := getUser(tx, username)
|
||||
if user == nil {
|
||||
return 0, ErrAuthFailed
|
||||
}
|
||||
|
||||
if bcrypt.CompareHashAndPassword(user.Password, []byte(password)) != nil {
|
||||
plog.Noticef("authentication failed, invalid password for user %s", username)
|
||||
return 0, ErrAuthFailed
|
||||
}
|
||||
|
||||
return getRevision(tx), nil
|
||||
}
|
||||
|
||||
func (as *authStore) Recover(be backend.Backend) {
|
||||
enabled := false
|
||||
as.be = be
|
||||
@ -266,6 +332,10 @@ func (as *authStore) Recover(be backend.Backend) {
|
||||
}
|
||||
|
||||
func (as *authStore) UserAdd(r *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse, error) {
|
||||
if len(r.Name) == 0 {
|
||||
return nil, ErrUserEmpty
|
||||
}
|
||||
|
||||
hashed, err := bcrypt.GenerateFromPassword([]byte(r.Password), BcryptCost)
|
||||
if err != nil {
|
||||
plog.Errorf("failed to hash password: %s", err)
|
||||
@ -400,11 +470,7 @@ func (as *authStore) UserGet(r *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse,
|
||||
if user == nil {
|
||||
return nil, ErrUserNotFound
|
||||
}
|
||||
|
||||
for _, role := range user.Roles {
|
||||
resp.Roles = append(resp.Roles, role)
|
||||
}
|
||||
|
||||
resp.Roles = append(resp.Roles, user.Roles...)
|
||||
return &resp, nil
|
||||
}
|
||||
|
||||
@ -470,11 +536,7 @@ func (as *authStore) RoleGet(r *pb.AuthRoleGetRequest) (*pb.AuthRoleGetResponse,
|
||||
if role == nil {
|
||||
return nil, ErrRoleNotFound
|
||||
}
|
||||
|
||||
for _, perm := range role.KeyPermission {
|
||||
resp.Perm = append(resp.Perm, perm)
|
||||
}
|
||||
|
||||
resp.Perm = append(resp.Perm, role.KeyPermission...)
|
||||
return &resp, nil
|
||||
}
|
||||
|
||||
@ -584,10 +646,14 @@ func (as *authStore) RoleAdd(r *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse,
|
||||
}
|
||||
|
||||
func (as *authStore) AuthInfoFromToken(token string) (*AuthInfo, bool) {
|
||||
as.simpleTokensMu.RLock()
|
||||
defer as.simpleTokensMu.RUnlock()
|
||||
t, ok := as.simpleTokens[token]
|
||||
return &AuthInfo{Username: t, Revision: as.revision}, ok
|
||||
// same as '(t *tokenSimple) info' in v3.2+
|
||||
as.simpleTokensMu.Lock()
|
||||
username, ok := as.simpleTokens[token]
|
||||
if ok && as.simpleTokenKeeper != nil {
|
||||
as.simpleTokenKeeper.resetSimpleToken(token)
|
||||
}
|
||||
as.simpleTokensMu.Unlock()
|
||||
return &AuthInfo{Username: username, Revision: as.revision}, ok
|
||||
}
|
||||
|
||||
type permSlice []*authpb.Permission
|
||||
@ -652,6 +718,11 @@ func (as *authStore) isOpPermitted(userName string, revision uint64, key, rangeE
|
||||
return nil
|
||||
}
|
||||
|
||||
// only gets rev == 0 when passed AuthInfo{}; no user given
|
||||
if revision == 0 {
|
||||
return ErrUserEmpty
|
||||
}
|
||||
|
||||
if revision < as.revision {
|
||||
return ErrAuthOldRevision
|
||||
}
|
||||
@ -694,6 +765,9 @@ func (as *authStore) IsAdminPermitted(authInfo *AuthInfo) error {
|
||||
if !as.isAuthEnabled() {
|
||||
return nil
|
||||
}
|
||||
if authInfo == nil {
|
||||
return ErrUserEmpty
|
||||
}
|
||||
|
||||
tx := as.be.BatchTx()
|
||||
tx.Lock()
|
||||
@ -812,7 +886,7 @@ func (as *authStore) isAuthEnabled() bool {
|
||||
return as.enabled
|
||||
}
|
||||
|
||||
func NewAuthStore(be backend.Backend) *authStore {
|
||||
func NewAuthStore(be backend.Backend, indexWaiter func(uint64) <-chan struct{}) *authStore {
|
||||
tx := be.BatchTx()
|
||||
tx.Lock()
|
||||
|
||||
@ -820,13 +894,30 @@ func NewAuthStore(be backend.Backend) *authStore {
|
||||
tx.UnsafeCreateBucket(authUsersBucketName)
|
||||
tx.UnsafeCreateBucket(authRolesBucketName)
|
||||
|
||||
as := &authStore{
|
||||
be: be,
|
||||
simpleTokens: make(map[string]string),
|
||||
revision: 0,
|
||||
enabled := false
|
||||
_, vs := tx.UnsafeRange(authBucketName, enableFlagKey, nil, 0)
|
||||
if len(vs) == 1 {
|
||||
if bytes.Equal(vs[0], authEnabled) {
|
||||
enabled = true
|
||||
}
|
||||
}
|
||||
|
||||
as.commitRevision(tx)
|
||||
as := &authStore{
|
||||
be: be,
|
||||
simpleTokens: make(map[string]string),
|
||||
revision: getRevision(tx),
|
||||
indexWaiter: indexWaiter,
|
||||
enabled: enabled,
|
||||
rangePermCache: make(map[string]*unifiedRangePermissions),
|
||||
}
|
||||
|
||||
if enabled {
|
||||
as.enable()
|
||||
}
|
||||
|
||||
if as.revision == 0 {
|
||||
as.commitRevision(tx)
|
||||
}
|
||||
|
||||
tx.Unlock()
|
||||
be.ForceCommit()
|
||||
@ -853,7 +944,8 @@ func (as *authStore) commitRevision(tx backend.BatchTx) {
|
||||
func getRevision(tx backend.BatchTx) uint64 {
|
||||
_, vs := tx.UnsafeRange(authBucketName, []byte(revisionKey), nil, 0)
|
||||
if len(vs) != 1 {
|
||||
plog.Panicf("failed to get the key of auth store revision")
|
||||
// this can happen in the initialization phase
|
||||
return 0
|
||||
}
|
||||
|
||||
return binary.BigEndian.Uint64(vs[0])
|
||||
@ -862,3 +954,46 @@ func getRevision(tx backend.BatchTx) uint64 {
|
||||
func (as *authStore) Revision() uint64 {
|
||||
return as.revision
|
||||
}
|
||||
|
||||
func (as *authStore) isValidSimpleToken(token string, ctx context.Context) bool {
|
||||
splitted := strings.Split(token, ".")
|
||||
if len(splitted) != 2 {
|
||||
return false
|
||||
}
|
||||
index, err := strconv.Atoi(splitted[1])
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
select {
|
||||
case <-as.indexWaiter(uint64(index)):
|
||||
return true
|
||||
case <-ctx.Done():
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func (as *authStore) AuthInfoFromCtx(ctx context.Context) (*AuthInfo, error) {
|
||||
md, ok := metadata.FromContext(ctx)
|
||||
if !ok {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
ts, tok := md["token"]
|
||||
if !tok {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
token := ts[0]
|
||||
if !as.isValidSimpleToken(token, ctx) {
|
||||
return nil, ErrInvalidAuthToken
|
||||
}
|
||||
|
||||
authInfo, uok := as.AuthInfoFromToken(token)
|
||||
if !uok {
|
||||
plog.Warningf("invalid auth token: %s", token)
|
||||
return nil, ErrInvalidAuthToken
|
||||
}
|
||||
return authInfo, nil
|
||||
}
|
||||
|
@ -26,25 +26,38 @@ import (
|
||||
|
||||
func init() { BcryptCost = bcrypt.MinCost }
|
||||
|
||||
func TestUserAdd(t *testing.T) {
|
||||
b, tPath := backend.NewDefaultTmpBackend()
|
||||
defer func() {
|
||||
b.Close()
|
||||
os.Remove(tPath)
|
||||
func dummyIndexWaiter(index uint64) <-chan struct{} {
|
||||
ch := make(chan struct{})
|
||||
go func() {
|
||||
ch <- struct{}{}
|
||||
}()
|
||||
return ch
|
||||
}
|
||||
|
||||
as := NewAuthStore(b)
|
||||
ua := &pb.AuthUserAddRequest{Name: "foo"}
|
||||
_, err := as.UserAdd(ua) // add a non-existing user
|
||||
// TestNewAuthStoreRevision ensures newly auth store
|
||||
// keeps the old revision when there are no changes.
|
||||
func TestNewAuthStoreRevision(t *testing.T) {
|
||||
b, tPath := backend.NewDefaultTmpBackend()
|
||||
defer os.Remove(tPath)
|
||||
|
||||
as := NewAuthStore(b, dummyIndexWaiter)
|
||||
err := enableAuthAndCreateRoot(as)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
_, err = as.UserAdd(ua) // add an existing user
|
||||
if err == nil {
|
||||
t.Fatalf("expected %v, got %v", ErrUserAlreadyExist, err)
|
||||
}
|
||||
if err != ErrUserAlreadyExist {
|
||||
t.Fatalf("expected %v, got %v", ErrUserAlreadyExist, err)
|
||||
old := as.Revision()
|
||||
b.Close()
|
||||
as.Close()
|
||||
|
||||
// no changes to commit
|
||||
b2 := backend.NewDefaultBackend(tPath)
|
||||
as = NewAuthStore(b2, dummyIndexWaiter)
|
||||
new := as.Revision()
|
||||
b2.Close()
|
||||
as.Close()
|
||||
|
||||
if old != new {
|
||||
t.Fatalf("expected revision %d, got %d", old, new)
|
||||
}
|
||||
}
|
||||
|
||||
@ -67,14 +80,15 @@ func enableAuthAndCreateRoot(as *authStore) error {
|
||||
return as.AuthEnable()
|
||||
}
|
||||
|
||||
func TestAuthenticate(t *testing.T) {
|
||||
func TestCheckPassword(t *testing.T) {
|
||||
b, tPath := backend.NewDefaultTmpBackend()
|
||||
defer func() {
|
||||
b.Close()
|
||||
os.Remove(tPath)
|
||||
}()
|
||||
|
||||
as := NewAuthStore(b)
|
||||
as := NewAuthStore(b, dummyIndexWaiter)
|
||||
defer as.Close()
|
||||
err := enableAuthAndCreateRoot(as)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
@ -87,8 +101,7 @@ func TestAuthenticate(t *testing.T) {
|
||||
}
|
||||
|
||||
// auth a non-existing user
|
||||
ctx1 := context.WithValue(context.WithValue(context.TODO(), "index", uint64(1)), "simpleToken", "dummy")
|
||||
_, err = as.Authenticate(ctx1, "foo-test", "bar")
|
||||
_, err = as.CheckPassword("foo-test", "bar")
|
||||
if err == nil {
|
||||
t.Fatalf("expected %v, got %v", ErrAuthFailed, err)
|
||||
}
|
||||
@ -97,15 +110,13 @@ func TestAuthenticate(t *testing.T) {
|
||||
}
|
||||
|
||||
// auth an existing user with correct password
|
||||
ctx2 := context.WithValue(context.WithValue(context.TODO(), "index", uint64(2)), "simpleToken", "dummy")
|
||||
_, err = as.Authenticate(ctx2, "foo", "bar")
|
||||
_, err = as.CheckPassword("foo", "bar")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// auth an existing user but with wrong password
|
||||
ctx3 := context.WithValue(context.WithValue(context.TODO(), "index", uint64(3)), "simpleToken", "dummy")
|
||||
_, err = as.Authenticate(ctx3, "foo", "")
|
||||
_, err = as.CheckPassword("foo", "")
|
||||
if err == nil {
|
||||
t.Fatalf("expected %v, got %v", ErrAuthFailed, err)
|
||||
}
|
||||
@ -121,7 +132,8 @@ func TestUserDelete(t *testing.T) {
|
||||
os.Remove(tPath)
|
||||
}()
|
||||
|
||||
as := NewAuthStore(b)
|
||||
as := NewAuthStore(b, dummyIndexWaiter)
|
||||
defer as.Close()
|
||||
err := enableAuthAndCreateRoot(as)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
@ -157,7 +169,8 @@ func TestUserChangePassword(t *testing.T) {
|
||||
os.Remove(tPath)
|
||||
}()
|
||||
|
||||
as := NewAuthStore(b)
|
||||
as := NewAuthStore(b, dummyIndexWaiter)
|
||||
defer as.Close()
|
||||
err := enableAuthAndCreateRoot(as)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
@ -202,7 +215,8 @@ func TestRoleAdd(t *testing.T) {
|
||||
os.Remove(tPath)
|
||||
}()
|
||||
|
||||
as := NewAuthStore(b)
|
||||
as := NewAuthStore(b, dummyIndexWaiter)
|
||||
defer as.Close()
|
||||
err := enableAuthAndCreateRoot(as)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
@ -222,7 +236,8 @@ func TestUserGrant(t *testing.T) {
|
||||
os.Remove(tPath)
|
||||
}()
|
||||
|
||||
as := NewAuthStore(b)
|
||||
as := NewAuthStore(b, dummyIndexWaiter)
|
||||
defer as.Close()
|
||||
err := enableAuthAndCreateRoot(as)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
@ -253,4 +268,93 @@ func TestUserGrant(t *testing.T) {
|
||||
if err != ErrUserNotFound {
|
||||
t.Fatalf("expected %v, got %v", ErrUserNotFound, err)
|
||||
}
|
||||
|
||||
// non-admin user
|
||||
err = as.IsAdminPermitted(&AuthInfo{Username: "foo", Revision: 1})
|
||||
if err != ErrPermissionDenied {
|
||||
t.Errorf("expected %v, got %v", ErrPermissionDenied, err)
|
||||
}
|
||||
|
||||
// disabled auth should return nil
|
||||
as.AuthDisable()
|
||||
err = as.IsAdminPermitted(&AuthInfo{Username: "root", Revision: 1})
|
||||
if err != nil {
|
||||
t.Errorf("expected nil, got %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRecoverFromSnapshot(t *testing.T) {
|
||||
as, _ := setupAuthStore(t)
|
||||
|
||||
ua := &pb.AuthUserAddRequest{Name: "foo"}
|
||||
_, err := as.UserAdd(ua) // add an existing user
|
||||
if err == nil {
|
||||
t.Fatalf("expected %v, got %v", ErrUserAlreadyExist, err)
|
||||
}
|
||||
if err != ErrUserAlreadyExist {
|
||||
t.Fatalf("expected %v, got %v", ErrUserAlreadyExist, err)
|
||||
}
|
||||
|
||||
ua = &pb.AuthUserAddRequest{Name: ""}
|
||||
_, err = as.UserAdd(ua) // add a user with empty name
|
||||
if err != ErrUserEmpty {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
as.Close()
|
||||
|
||||
as2 := NewAuthStore(as.be, dummyIndexWaiter)
|
||||
defer func(a *authStore) {
|
||||
a.Close()
|
||||
}(as2)
|
||||
|
||||
if !as2.isAuthEnabled() {
|
||||
t.Fatal("recovering authStore from existing backend failed")
|
||||
}
|
||||
|
||||
ul, err := as.UserList(&pb.AuthUserListRequest{})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !contains(ul.Users, "root") {
|
||||
t.Errorf("expected %v in %v", "root", ul.Users)
|
||||
}
|
||||
}
|
||||
|
||||
func contains(array []string, str string) bool {
|
||||
for _, s := range array {
|
||||
if s == str {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func setupAuthStore(t *testing.T) (store *authStore, teardownfunc func(t *testing.T)) {
|
||||
b, tPath := backend.NewDefaultTmpBackend()
|
||||
|
||||
as := NewAuthStore(b, dummyIndexWaiter)
|
||||
err := enableAuthAndCreateRoot(as)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// adds a new role
|
||||
_, err = as.RoleAdd(&pb.AuthRoleAddRequest{Name: "role-test"})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
ua := &pb.AuthUserAddRequest{Name: "foo", Password: "bar"}
|
||||
_, err = as.UserAdd(ua) // add a non-existing user
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
tearDown := func(t *testing.T) {
|
||||
b.Close()
|
||||
os.Remove(tPath)
|
||||
as.Close()
|
||||
}
|
||||
return as, tearDown
|
||||
}
|
||||
|
5
build
5
build
@ -11,8 +11,7 @@ if [ ! -z "$FAILPOINTS" ]; then
|
||||
GIT_SHA="$GIT_SHA"-FAILPOINTS
|
||||
fi
|
||||
|
||||
# Set GO_LDFLAGS="" for building with all symbols for debugging.
|
||||
if [ -z "${GO_LDFLAGS+x}" ]; then GO_LDFLAGS="-s"; fi
|
||||
# Set GO_LDFLAGS="-s" for building without symbols for debugging.
|
||||
GO_LDFLAGS="$GO_LDFLAGS -X ${REPO_PATH}/cmd/vendor/${REPO_PATH}/version.GitSHA=${GIT_SHA}"
|
||||
|
||||
# enable/disable failpoints
|
||||
@ -49,7 +48,7 @@ etcd_setup_gopath() {
|
||||
GOPATH=":$GOPATH"
|
||||
fi
|
||||
export GOPATH=${etcdGOPATH}$GOPATH
|
||||
rm -f ${etcdGOPATH}/src
|
||||
rm -rf ${etcdGOPATH}/src
|
||||
mkdir -p ${etcdGOPATH}
|
||||
ln -s ${CDIR}/cmd/vendor ${etcdGOPATH}/src
|
||||
}
|
||||
|
63
build.ps1
63
build.ps1
@ -1,16 +1,18 @@
|
||||
$ORG_PATH="github.com/coreos"
|
||||
$REPO_PATH="$ORG_PATH/etcd"
|
||||
$PWD = $((Get-Item -Path ".\" -Verbose).FullName)
|
||||
$GO_LDFLAGS="-s"
|
||||
$FSROOT = $((Get-Location).Drive.Name+":")
|
||||
$FSYS = $((Get-WMIObject win32_logicaldisk -filter "DeviceID = '$FSROOT'").filesystem)
|
||||
|
||||
# Set $Env:GO_LDFLAGS=" "(space) for building with all symbols for debugging.
|
||||
if ($Env:GO_LDFLAGS.length -gt 0) {
|
||||
$GO_LDFLAGS=$Env:GO_LDFLAGS
|
||||
if ($FSYS.StartsWith("FAT","CurrentCultureIgnoreCase")) {
|
||||
echo "Error: Cannot build etcd using the $FSYS filesystem (use NTFS instead)"
|
||||
exit 1
|
||||
}
|
||||
$GO_LDFLAGS="$GO_LDFLAGS -X $REPO_PATH/cmd/vendor/$REPO_PATH/version.GitSHA=$GIT_SHA"
|
||||
|
||||
# Set $Env:GO_LDFLAGS="-s" for building without symbols.
|
||||
$GO_LDFLAGS="$Env:GO_LDFLAGS -X $REPO_PATH/cmd/vendor/$REPO_PATH/version.GitSHA=$GIT_SHA"
|
||||
|
||||
# rebuild symlinks
|
||||
echo "Rebuilding symlinks"
|
||||
git ls-files -s cmd | select-string -pattern 120000 | ForEach {
|
||||
$l = $_.ToString()
|
||||
$lnkname = $l.Split(' ')[1]
|
||||
@ -20,27 +22,54 @@ git ls-files -s cmd | select-string -pattern 120000 | ForEach {
|
||||
|
||||
$terms = $lnkname.Split("\")
|
||||
$dirname = $terms[0..($terms.length-2)] -join "\"
|
||||
|
||||
$lnkname = "$PWD\$lnkname"
|
||||
$targetAbs = "$((Get-Item -Path "$dirname\$target").FullName)"
|
||||
$targetAbs = $targetAbs.Replace("/", "\")
|
||||
|
||||
if (test-path -pathtype container "$targetAbs") {
|
||||
# rd so deleting junction doesn't take files with it
|
||||
cmd /c rd "$lnkname"
|
||||
cmd /c del /A /F "$lnkname"
|
||||
cmd /c mklink /J "$lnkname" "$targetAbs"
|
||||
if (Test-Path "$lnkname") {
|
||||
if ((Get-Item "$lnkname") -is [System.IO.DirectoryInfo]) {
|
||||
# rd so deleting junction doesn't take files with it
|
||||
cmd /c rd "$lnkname"
|
||||
}
|
||||
}
|
||||
if (Test-Path "$lnkname") {
|
||||
if (!((Get-Item "$lnkname") -is [System.IO.DirectoryInfo])) {
|
||||
cmd /c del /A /F "$lnkname"
|
||||
}
|
||||
}
|
||||
cmd /c mklink /J "$lnkname" "$targetAbs" ">NUL"
|
||||
} else {
|
||||
cmd /c del /A /F "$lnkname"
|
||||
cmd /c mklink /H "$lnkname" "$targetAbs"
|
||||
# Remove file with symlink data (first run)
|
||||
if (Test-Path "$lnkname") {
|
||||
cmd /c del /A /F "$lnkname"
|
||||
}
|
||||
cmd /c mklink /H "$lnkname" "$targetAbs" ">NUL"
|
||||
}
|
||||
}
|
||||
|
||||
if (-not $env:GOPATH) {
|
||||
$orgpath="$PWD\gopath\src\" + $ORG_PATH.Replace("/", "\")
|
||||
cmd /c rd "$orgpath\etcd"
|
||||
cmd /c del "$orgpath"
|
||||
cmd /c mkdir "$orgpath"
|
||||
cmd /c mklink /J "$orgpath\etcd" "$PWD"
|
||||
if (Test-Path "$orgpath\etcd") {
|
||||
if ((Get-Item "$orgpath\etcd") -is [System.IO.DirectoryInfo]) {
|
||||
# rd so deleting junction doesn't take files with it
|
||||
cmd /c rd "$orgpath\etcd"
|
||||
}
|
||||
}
|
||||
if (Test-Path "$orgpath") {
|
||||
if ((Get-Item "$orgpath") -is [System.IO.DirectoryInfo]) {
|
||||
# rd so deleting junction doesn't take files with it
|
||||
cmd /c rd "$orgpath"
|
||||
}
|
||||
}
|
||||
if (Test-Path "$orgpath") {
|
||||
if (!((Get-Item "$orgpath") -is [System.IO.DirectoryInfo])) {
|
||||
# Remove file with symlink data (first run)
|
||||
cmd /c del /A /F "$orgpath"
|
||||
}
|
||||
}
|
||||
cmd /c mkdir "$orgpath"
|
||||
cmd /c mklink /J "$orgpath\etcd" "$PWD" ">NUL"
|
||||
$env:GOPATH = "$PWD\gopath"
|
||||
}
|
||||
|
||||
|
@ -114,4 +114,4 @@ if err != nil {
|
||||
|
||||
3. Default etcd/client cannot handle the case that the remote server is SIGSTOPed now. TCP keepalive mechanism doesn't help in this scenario because operating system may still send TCP keep-alive packets. Over time we'd like to improve this functionality, but solving this issue isn't high priority because a real-life case in which a server is stopped, but the connection is kept alive, hasn't been brought to our attention.
|
||||
|
||||
4. etcd/client cannot detect whether the member in use is healthy when doing read requests. If the member is isolated from the cluster, etcd/client may retrieve outdated data. As a workaround, users could monitor experimental /health endpoint for member healthy information. We are improving it at [#3265](https://github.com/coreos/etcd/issues/3265).
|
||||
4. etcd/client cannot detect whether a member is healthy with watches and non-quorum read requests. If the member is isolated from the cluster, etcd/client may retrieve outdated data. Instead, users can either issue quorum read requests or monitor the /health endpoint for member health information.
|
||||
|
145
client/client.go
145
client/client.go
@ -22,7 +22,6 @@ import (
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strconv"
|
||||
"sync"
|
||||
@ -261,53 +260,67 @@ type httpClusterClient struct {
|
||||
selectionMode EndpointSelectionMode
|
||||
}
|
||||
|
||||
func (c *httpClusterClient) getLeaderEndpoint() (string, error) {
|
||||
mAPI := NewMembersAPI(c)
|
||||
leader, err := mAPI.Leader(context.Background())
|
||||
func (c *httpClusterClient) getLeaderEndpoint(ctx context.Context, eps []url.URL) (string, error) {
|
||||
ceps := make([]url.URL, len(eps))
|
||||
copy(ceps, eps)
|
||||
|
||||
// To perform a lookup on the new endpoint list without using the current
|
||||
// client, we'll copy it
|
||||
clientCopy := &httpClusterClient{
|
||||
clientFactory: c.clientFactory,
|
||||
credentials: c.credentials,
|
||||
rand: c.rand,
|
||||
|
||||
pinned: 0,
|
||||
endpoints: ceps,
|
||||
}
|
||||
|
||||
mAPI := NewMembersAPI(clientCopy)
|
||||
leader, err := mAPI.Leader(ctx)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if len(leader.ClientURLs) == 0 {
|
||||
return "", ErrNoLeaderEndpoint
|
||||
}
|
||||
|
||||
return leader.ClientURLs[0], nil // TODO: how to handle multiple client URLs?
|
||||
}
|
||||
|
||||
func (c *httpClusterClient) SetEndpoints(eps []string) error {
|
||||
func (c *httpClusterClient) parseEndpoints(eps []string) ([]url.URL, error) {
|
||||
if len(eps) == 0 {
|
||||
return ErrNoEndpoints
|
||||
return []url.URL{}, ErrNoEndpoints
|
||||
}
|
||||
|
||||
neps := make([]url.URL, len(eps))
|
||||
for i, ep := range eps {
|
||||
u, err := url.Parse(ep)
|
||||
if err != nil {
|
||||
return err
|
||||
return []url.URL{}, err
|
||||
}
|
||||
neps[i] = *u
|
||||
}
|
||||
return neps, nil
|
||||
}
|
||||
|
||||
switch c.selectionMode {
|
||||
case EndpointSelectionRandom:
|
||||
c.endpoints = shuffleEndpoints(c.rand, neps)
|
||||
c.pinned = 0
|
||||
case EndpointSelectionPrioritizeLeader:
|
||||
c.endpoints = neps
|
||||
lep, err := c.getLeaderEndpoint()
|
||||
if err != nil {
|
||||
return ErrNoLeaderEndpoint
|
||||
}
|
||||
|
||||
for i := range c.endpoints {
|
||||
if c.endpoints[i].String() == lep {
|
||||
c.pinned = i
|
||||
break
|
||||
}
|
||||
}
|
||||
// If endpoints doesn't have the lu, just keep c.pinned = 0.
|
||||
// Forwarding between follower and leader would be required but it works.
|
||||
default:
|
||||
return fmt.Errorf("invalid endpoint selection mode: %d", c.selectionMode)
|
||||
func (c *httpClusterClient) SetEndpoints(eps []string) error {
|
||||
neps, err := c.parseEndpoints(eps)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
|
||||
c.endpoints = shuffleEndpoints(c.rand, neps)
|
||||
// We're not doing anything for PrioritizeLeader here. This is
|
||||
// due to not having a context meaning we can't call getLeaderEndpoint
|
||||
// However, if you're using PrioritizeLeader, you've already been told
|
||||
// to regularly call sync, where we do have a ctx, and can figure the
|
||||
// leader. PrioritizeLeader is also quite a loose guarantee, so deal
|
||||
// with it
|
||||
c.pinned = 0
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -401,27 +414,51 @@ func (c *httpClusterClient) Sync(ctx context.Context) error {
|
||||
return err
|
||||
}
|
||||
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
|
||||
var eps []string
|
||||
for _, m := range ms {
|
||||
eps = append(eps, m.ClientURLs...)
|
||||
}
|
||||
sort.Sort(sort.StringSlice(eps))
|
||||
|
||||
ceps := make([]string, len(c.endpoints))
|
||||
for i, cep := range c.endpoints {
|
||||
ceps[i] = cep.String()
|
||||
}
|
||||
sort.Sort(sort.StringSlice(ceps))
|
||||
// fast path if no change happens
|
||||
// this helps client to pin the endpoint when no cluster change
|
||||
if reflect.DeepEqual(eps, ceps) {
|
||||
return nil
|
||||
neps, err := c.parseEndpoints(eps)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return c.SetEndpoints(eps)
|
||||
npin := 0
|
||||
|
||||
switch c.selectionMode {
|
||||
case EndpointSelectionRandom:
|
||||
c.RLock()
|
||||
eq := endpointsEqual(c.endpoints, neps)
|
||||
c.RUnlock()
|
||||
|
||||
if eq {
|
||||
return nil
|
||||
}
|
||||
// When items in the endpoint list changes, we choose a new pin
|
||||
neps = shuffleEndpoints(c.rand, neps)
|
||||
case EndpointSelectionPrioritizeLeader:
|
||||
nle, err := c.getLeaderEndpoint(ctx, neps)
|
||||
if err != nil {
|
||||
return ErrNoLeaderEndpoint
|
||||
}
|
||||
|
||||
for i, n := range neps {
|
||||
if n.String() == nle {
|
||||
npin = i
|
||||
break
|
||||
}
|
||||
}
|
||||
default:
|
||||
return fmt.Errorf("invalid endpoint selection mode: %d", c.selectionMode)
|
||||
}
|
||||
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
c.endpoints = neps
|
||||
c.pinned = npin
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *httpClusterClient) AutoSync(ctx context.Context, interval time.Duration) error {
|
||||
@ -607,3 +644,27 @@ func shuffleEndpoints(r *rand.Rand, eps []url.URL) []url.URL {
|
||||
}
|
||||
return neps
|
||||
}
|
||||
|
||||
func endpointsEqual(left, right []url.URL) bool {
|
||||
if len(left) != len(right) {
|
||||
return false
|
||||
}
|
||||
|
||||
sLeft := make([]string, len(left))
|
||||
sRight := make([]string, len(right))
|
||||
for i, l := range left {
|
||||
sLeft[i] = l.String()
|
||||
}
|
||||
for i, r := range right {
|
||||
sRight[i] = r.String()
|
||||
}
|
||||
|
||||
sort.Strings(sLeft)
|
||||
sort.Strings(sRight)
|
||||
for i := range sLeft {
|
||||
if sLeft[i] != sRight[i] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
@ -900,6 +900,90 @@ func TestHTTPClusterClientSyncPinEndpoint(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// TestHTTPClusterClientSyncUnpinEndpoint tests that Sync() unpins the endpoint when
|
||||
// it gets a different member list than before.
|
||||
func TestHTTPClusterClientSyncUnpinEndpoint(t *testing.T) {
|
||||
cf := newStaticHTTPClientFactory([]staticHTTPResponse{
|
||||
{
|
||||
resp: http.Response{StatusCode: http.StatusOK, Header: http.Header{"Content-Type": []string{"application/json"}}},
|
||||
body: []byte(`{"members":[{"id":"2745e2525fce8fe","peerURLs":["http://127.0.0.1:7003"],"name":"node3","clientURLs":["http://127.0.0.1:4003"]},{"id":"42134f434382925","peerURLs":["http://127.0.0.1:2380","http://127.0.0.1:7001"],"name":"node1","clientURLs":["http://127.0.0.1:2379","http://127.0.0.1:4001"]},{"id":"94088180e21eb87b","peerURLs":["http://127.0.0.1:7002"],"name":"node2","clientURLs":["http://127.0.0.1:4002"]}]}`),
|
||||
},
|
||||
{
|
||||
resp: http.Response{StatusCode: http.StatusOK, Header: http.Header{"Content-Type": []string{"application/json"}}},
|
||||
body: []byte(`{"members":[{"id":"42134f434382925","peerURLs":["http://127.0.0.1:2380","http://127.0.0.1:7001"],"name":"node1","clientURLs":["http://127.0.0.1:2379","http://127.0.0.1:4001"]},{"id":"94088180e21eb87b","peerURLs":["http://127.0.0.1:7002"],"name":"node2","clientURLs":["http://127.0.0.1:4002"]}]}`),
|
||||
},
|
||||
{
|
||||
resp: http.Response{StatusCode: http.StatusOK, Header: http.Header{"Content-Type": []string{"application/json"}}},
|
||||
body: []byte(`{"members":[{"id":"2745e2525fce8fe","peerURLs":["http://127.0.0.1:7003"],"name":"node3","clientURLs":["http://127.0.0.1:4003"]},{"id":"42134f434382925","peerURLs":["http://127.0.0.1:2380","http://127.0.0.1:7001"],"name":"node1","clientURLs":["http://127.0.0.1:2379","http://127.0.0.1:4001"]},{"id":"94088180e21eb87b","peerURLs":["http://127.0.0.1:7002"],"name":"node2","clientURLs":["http://127.0.0.1:4002"]}]}`),
|
||||
},
|
||||
})
|
||||
|
||||
hc := &httpClusterClient{
|
||||
clientFactory: cf,
|
||||
rand: rand.New(rand.NewSource(0)),
|
||||
}
|
||||
err := hc.SetEndpoints([]string{"http://127.0.0.1:4003", "http://127.0.0.1:2379", "http://127.0.0.1:4001", "http://127.0.0.1:4002"})
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error during setup: %#v", err)
|
||||
}
|
||||
wants := []string{"http://127.0.0.1:2379", "http://127.0.0.1:4001", "http://127.0.0.1:4002"}
|
||||
|
||||
for i := 0; i < 3; i++ {
|
||||
err = hc.Sync(context.Background())
|
||||
if err != nil {
|
||||
t.Fatalf("#%d: unexpected error during Sync: %#v", i, err)
|
||||
}
|
||||
|
||||
if g := hc.endpoints[hc.pinned]; g.String() != wants[i] {
|
||||
t.Errorf("#%d: pinned endpoint = %v, want %v", i, g, wants[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestHTTPClusterClientSyncPinLeaderEndpoint tests that Sync() pins the leader
|
||||
// when the selection mode is EndpointSelectionPrioritizeLeader
|
||||
func TestHTTPClusterClientSyncPinLeaderEndpoint(t *testing.T) {
|
||||
cf := newStaticHTTPClientFactory([]staticHTTPResponse{
|
||||
{
|
||||
resp: http.Response{StatusCode: http.StatusOK, Header: http.Header{"Content-Type": []string{"application/json"}}},
|
||||
body: []byte(`{"members":[{"id":"2745e2525fce8fe","peerURLs":["http://127.0.0.1:7003"],"name":"node3","clientURLs":["http://127.0.0.1:4003"]},{"id":"42134f434382925","peerURLs":["http://127.0.0.1:2380","http://127.0.0.1:7001"],"name":"node1","clientURLs":["http://127.0.0.1:2379","http://127.0.0.1:4001"]},{"id":"94088180e21eb87b","peerURLs":["http://127.0.0.1:7002"],"name":"node2","clientURLs":["http://127.0.0.1:4002"]}]}`),
|
||||
},
|
||||
{
|
||||
resp: http.Response{StatusCode: http.StatusOK, Header: http.Header{"Content-Type": []string{"application/json"}}},
|
||||
body: []byte(`{"id":"2745e2525fce8fe","peerURLs":["http://127.0.0.1:7003"],"name":"node3","clientURLs":["http://127.0.0.1:4003"]}`),
|
||||
},
|
||||
{
|
||||
resp: http.Response{StatusCode: http.StatusOK, Header: http.Header{"Content-Type": []string{"application/json"}}},
|
||||
body: []byte(`{"members":[{"id":"2745e2525fce8fe","peerURLs":["http://127.0.0.1:7003"],"name":"node3","clientURLs":["http://127.0.0.1:4003"]},{"id":"42134f434382925","peerURLs":["http://127.0.0.1:2380","http://127.0.0.1:7001"],"name":"node1","clientURLs":["http://127.0.0.1:2379","http://127.0.0.1:4001"]},{"id":"94088180e21eb87b","peerURLs":["http://127.0.0.1:7002"],"name":"node2","clientURLs":["http://127.0.0.1:4002"]}]}`),
|
||||
},
|
||||
{
|
||||
resp: http.Response{StatusCode: http.StatusOK, Header: http.Header{"Content-Type": []string{"application/json"}}},
|
||||
body: []byte(`{"id":"94088180e21eb87b","peerURLs":["http://127.0.0.1:7002"],"name":"node2","clientURLs":["http://127.0.0.1:4002"]}`),
|
||||
},
|
||||
})
|
||||
|
||||
hc := &httpClusterClient{
|
||||
clientFactory: cf,
|
||||
rand: rand.New(rand.NewSource(0)),
|
||||
selectionMode: EndpointSelectionPrioritizeLeader,
|
||||
endpoints: []url.URL{{}}, // Need somewhere to pretend to send to initially
|
||||
}
|
||||
|
||||
wants := []string{"http://127.0.0.1:4003", "http://127.0.0.1:4002"}
|
||||
|
||||
for i, want := range wants {
|
||||
err := hc.Sync(context.Background())
|
||||
if err != nil {
|
||||
t.Fatalf("#%d: unexpected error during Sync: %#v", i, err)
|
||||
}
|
||||
|
||||
pinned := hc.endpoints[hc.pinned].String()
|
||||
if pinned != want {
|
||||
t.Errorf("#%d: pinned endpoint = %v, want %v", i, pinned, want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestHTTPClusterClientResetFail(t *testing.T) {
|
||||
tests := [][]string{
|
||||
// need at least one endpoint
|
||||
|
@ -34,7 +34,7 @@ import (
|
||||
func TestV2NoRetryEOF(t *testing.T) {
|
||||
defer testutil.AfterTest(t)
|
||||
// generate an EOF response; specify address so appears first in sorted ep list
|
||||
lEOF := integration.NewListenerWithAddr(t, fmt.Sprintf("eof:123.%d.sock", os.Getpid()))
|
||||
lEOF := integration.NewListenerWithAddr(t, fmt.Sprintf("127.0.0.1:%05d", os.Getpid()))
|
||||
defer lEOF.Close()
|
||||
tries := uint32(0)
|
||||
go func() {
|
||||
@ -65,8 +65,7 @@ func TestV2NoRetryEOF(t *testing.T) {
|
||||
// TestV2NoRetryNoLeader tests destructive api calls won't retry if given an error code.
|
||||
func TestV2NoRetryNoLeader(t *testing.T) {
|
||||
defer testutil.AfterTest(t)
|
||||
|
||||
lHttp := integration.NewListenerWithAddr(t, fmt.Sprintf("errHttp:123.%d.sock", os.Getpid()))
|
||||
lHttp := integration.NewListenerWithAddr(t, fmt.Sprintf("127.0.0.1:%05d", os.Getpid()))
|
||||
eh := &errHandler{errCode: http.StatusServiceUnavailable}
|
||||
srv := httptest.NewUnstartedServer(eh)
|
||||
defer lHttp.Close()
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -272,6 +272,10 @@ type Response struct {
|
||||
// Index holds the cluster-level index at the time the Response was generated.
|
||||
// This index is not tied to the Node(s) contained in this Response.
|
||||
Index uint64 `json:"-"`
|
||||
|
||||
// ClusterID holds the cluster-level ID reported by the server. This
|
||||
// should be different for different etcd clusters.
|
||||
ClusterID string `json:"-"`
|
||||
}
|
||||
|
||||
type Node struct {
|
||||
@ -665,6 +669,7 @@ func unmarshalSuccessfulKeysResponse(header http.Header, body []byte) (*Response
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
res.ClusterID = header.Get("X-Etcd-Cluster-ID")
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
|
@ -673,23 +673,24 @@ func TestUnmarshalSuccessfulResponse(t *testing.T) {
|
||||
expiration.UnmarshalText([]byte("2015-04-07T04:40:23.044979686Z"))
|
||||
|
||||
tests := []struct {
|
||||
hdr string
|
||||
body string
|
||||
wantRes *Response
|
||||
wantErr bool
|
||||
indexHdr string
|
||||
clusterIDHdr string
|
||||
body string
|
||||
wantRes *Response
|
||||
wantErr bool
|
||||
}{
|
||||
// Neither PrevNode or Node
|
||||
{
|
||||
hdr: "1",
|
||||
body: `{"action":"delete"}`,
|
||||
wantRes: &Response{Action: "delete", Index: 1},
|
||||
wantErr: false,
|
||||
indexHdr: "1",
|
||||
body: `{"action":"delete"}`,
|
||||
wantRes: &Response{Action: "delete", Index: 1},
|
||||
wantErr: false,
|
||||
},
|
||||
|
||||
// PrevNode
|
||||
{
|
||||
hdr: "15",
|
||||
body: `{"action":"delete", "prevNode": {"key": "/foo", "value": "bar", "modifiedIndex": 12, "createdIndex": 10}}`,
|
||||
indexHdr: "15",
|
||||
body: `{"action":"delete", "prevNode": {"key": "/foo", "value": "bar", "modifiedIndex": 12, "createdIndex": 10}}`,
|
||||
wantRes: &Response{
|
||||
Action: "delete",
|
||||
Index: 15,
|
||||
@ -706,8 +707,8 @@ func TestUnmarshalSuccessfulResponse(t *testing.T) {
|
||||
|
||||
// Node
|
||||
{
|
||||
hdr: "15",
|
||||
body: `{"action":"get", "node": {"key": "/foo", "value": "bar", "modifiedIndex": 12, "createdIndex": 10, "ttl": 10, "expiration": "2015-04-07T04:40:23.044979686Z"}}`,
|
||||
indexHdr: "15",
|
||||
body: `{"action":"get", "node": {"key": "/foo", "value": "bar", "modifiedIndex": 12, "createdIndex": 10, "ttl": 10, "expiration": "2015-04-07T04:40:23.044979686Z"}}`,
|
||||
wantRes: &Response{
|
||||
Action: "get",
|
||||
Index: 15,
|
||||
@ -726,8 +727,9 @@ func TestUnmarshalSuccessfulResponse(t *testing.T) {
|
||||
|
||||
// Node Dir
|
||||
{
|
||||
hdr: "15",
|
||||
body: `{"action":"get", "node": {"key": "/foo", "dir": true, "modifiedIndex": 12, "createdIndex": 10}}`,
|
||||
indexHdr: "15",
|
||||
clusterIDHdr: "abcdef",
|
||||
body: `{"action":"get", "node": {"key": "/foo", "dir": true, "modifiedIndex": 12, "createdIndex": 10}}`,
|
||||
wantRes: &Response{
|
||||
Action: "get",
|
||||
Index: 15,
|
||||
@ -737,15 +739,16 @@ func TestUnmarshalSuccessfulResponse(t *testing.T) {
|
||||
ModifiedIndex: 12,
|
||||
CreatedIndex: 10,
|
||||
},
|
||||
PrevNode: nil,
|
||||
PrevNode: nil,
|
||||
ClusterID: "abcdef",
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
|
||||
// PrevNode and Node
|
||||
{
|
||||
hdr: "15",
|
||||
body: `{"action":"update", "prevNode": {"key": "/foo", "value": "baz", "modifiedIndex": 10, "createdIndex": 10}, "node": {"key": "/foo", "value": "bar", "modifiedIndex": 12, "createdIndex": 10}}`,
|
||||
indexHdr: "15",
|
||||
body: `{"action":"update", "prevNode": {"key": "/foo", "value": "baz", "modifiedIndex": 10, "createdIndex": 10}, "node": {"key": "/foo", "value": "bar", "modifiedIndex": 12, "createdIndex": 10}}`,
|
||||
wantRes: &Response{
|
||||
Action: "update",
|
||||
Index: 15,
|
||||
@ -767,24 +770,24 @@ func TestUnmarshalSuccessfulResponse(t *testing.T) {
|
||||
|
||||
// Garbage in body
|
||||
{
|
||||
hdr: "",
|
||||
body: `garbage`,
|
||||
wantRes: nil,
|
||||
wantErr: true,
|
||||
indexHdr: "",
|
||||
body: `garbage`,
|
||||
wantRes: nil,
|
||||
wantErr: true,
|
||||
},
|
||||
|
||||
// non-integer index
|
||||
{
|
||||
hdr: "poo",
|
||||
body: `{}`,
|
||||
wantRes: nil,
|
||||
wantErr: true,
|
||||
indexHdr: "poo",
|
||||
body: `{}`,
|
||||
wantRes: nil,
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
|
||||
for i, tt := range tests {
|
||||
h := make(http.Header)
|
||||
h.Add("X-Etcd-Index", tt.hdr)
|
||||
h.Add("X-Etcd-Index", tt.indexHdr)
|
||||
res, err := unmarshalSuccessfulKeysResponse(h, []byte(tt.body))
|
||||
if tt.wantErr != (err != nil) {
|
||||
t.Errorf("#%d: wantErr=%t, err=%v", i, tt.wantErr, err)
|
||||
|
@ -72,6 +72,10 @@ if err != nil {
|
||||
}
|
||||
```
|
||||
|
||||
## Metrics
|
||||
|
||||
The etcd client optionally exposes RPC metrics through [go-grpc-prometheus](https://github.com/grpc-ecosystem/go-grpc-prometheus). See the [examples](https://github.com/coreos/etcd/blob/master/clientv3/example_metrics_test.go).
|
||||
|
||||
## Examples
|
||||
|
||||
More code examples can be found at [GoDoc](https://godoc.org/github.com/coreos/etcd/clientv3).
|
||||
|
@ -116,12 +116,12 @@ func NewAuth(c *Client) Auth {
|
||||
}
|
||||
|
||||
func (auth *auth) AuthEnable(ctx context.Context) (*AuthEnableResponse, error) {
|
||||
resp, err := auth.remote.AuthEnable(ctx, &pb.AuthEnableRequest{})
|
||||
resp, err := auth.remote.AuthEnable(ctx, &pb.AuthEnableRequest{}, grpc.FailFast(false))
|
||||
return (*AuthEnableResponse)(resp), toErr(ctx, err)
|
||||
}
|
||||
|
||||
func (auth *auth) AuthDisable(ctx context.Context) (*AuthDisableResponse, error) {
|
||||
resp, err := auth.remote.AuthDisable(ctx, &pb.AuthDisableRequest{})
|
||||
resp, err := auth.remote.AuthDisable(ctx, &pb.AuthDisableRequest{}, grpc.FailFast(false))
|
||||
return (*AuthDisableResponse)(resp), toErr(ctx, err)
|
||||
}
|
||||
|
||||
|
@ -21,8 +21,14 @@ import (
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
)
|
||||
|
||||
// ErrNoAddrAvilable is returned by Get() when the balancer does not have
|
||||
// any active connection to endpoints at the time.
|
||||
// This error is returned only when opts.BlockingWait is true.
|
||||
var ErrNoAddrAvilable = grpc.Errorf(codes.Unavailable, "there is no address available")
|
||||
|
||||
// simpleBalancer does the bare minimum to expose multiple eps
|
||||
// to the grpc reconnection code path
|
||||
type simpleBalancer struct {
|
||||
@ -162,6 +168,25 @@ func (b *simpleBalancer) Up(addr grpc.Address) func(error) {
|
||||
|
||||
func (b *simpleBalancer) Get(ctx context.Context, opts grpc.BalancerGetOptions) (grpc.Address, func(), error) {
|
||||
var addr string
|
||||
|
||||
// If opts.BlockingWait is false (for fail-fast RPCs), it should return
|
||||
// an address it has notified via Notify immediately instead of blocking.
|
||||
if !opts.BlockingWait {
|
||||
b.mu.RLock()
|
||||
closed := b.closed
|
||||
addr = b.pinAddr
|
||||
upEps := len(b.upEps)
|
||||
b.mu.RUnlock()
|
||||
if closed {
|
||||
return grpc.Address{Addr: ""}, nil, grpc.ErrClientConnClosing
|
||||
}
|
||||
|
||||
if upEps == 0 {
|
||||
return grpc.Address{Addr: ""}, nil, ErrNoAddrAvilable
|
||||
}
|
||||
return grpc.Address{Addr: addr}, func() {}, nil
|
||||
}
|
||||
|
||||
for {
|
||||
b.mu.RLock()
|
||||
ch := b.upc
|
||||
|
106
clientv3/balancer_test.go
Normal file
106
clientv3/balancer_test.go
Normal file
@ -0,0 +1,106 @@
|
||||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package clientv3
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
var (
|
||||
endpoints = []string{"localhost:2379", "localhost:22379", "localhost:32379"}
|
||||
)
|
||||
|
||||
func TestBalancerGetUnblocking(t *testing.T) {
|
||||
sb := newSimpleBalancer(endpoints)
|
||||
unblockingOpts := grpc.BalancerGetOptions{BlockingWait: false}
|
||||
|
||||
_, _, err := sb.Get(context.Background(), unblockingOpts)
|
||||
if err != ErrNoAddrAvilable {
|
||||
t.Errorf("Get() with no up endpoints should return ErrNoAddrAvailable, got: %v", err)
|
||||
}
|
||||
|
||||
down1 := sb.Up(grpc.Address{Addr: endpoints[1]})
|
||||
down2 := sb.Up(grpc.Address{Addr: endpoints[2]})
|
||||
addrFirst, putFun, err := sb.Get(context.Background(), unblockingOpts)
|
||||
if err != nil {
|
||||
t.Errorf("Get() with up endpoints should success, got %v", err)
|
||||
}
|
||||
if addrFirst.Addr != endpoints[1] {
|
||||
t.Errorf("Get() didn't return expected address, got %v", addrFirst)
|
||||
}
|
||||
if putFun == nil {
|
||||
t.Errorf("Get() returned unexpected nil put function")
|
||||
}
|
||||
addrSecond, _, _ := sb.Get(context.Background(), unblockingOpts)
|
||||
if addrFirst.Addr != addrSecond.Addr {
|
||||
t.Errorf("Get() didn't return the same address as previous call, got %v and %v", addrFirst, addrSecond)
|
||||
}
|
||||
|
||||
down1(errors.New("error"))
|
||||
down2(errors.New("error"))
|
||||
_, _, err = sb.Get(context.Background(), unblockingOpts)
|
||||
if err != ErrNoAddrAvilable {
|
||||
t.Errorf("Get() with no up endpoints should return ErrNoAddrAvailable, got: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBalancerGetBlocking(t *testing.T) {
|
||||
sb := newSimpleBalancer(endpoints)
|
||||
blockingOpts := grpc.BalancerGetOptions{BlockingWait: true}
|
||||
|
||||
ctx, _ := context.WithTimeout(context.Background(), time.Millisecond*100)
|
||||
_, _, err := sb.Get(ctx, blockingOpts)
|
||||
if err != context.DeadlineExceeded {
|
||||
t.Errorf("Get() with no up endpoints should timeout, got %v", err)
|
||||
}
|
||||
|
||||
downC := make(chan func(error), 1)
|
||||
|
||||
go func() {
|
||||
// ensure sb.Up() will be called after sb.Get() to see if Up() releases blocking Get()
|
||||
time.Sleep(time.Millisecond * 100)
|
||||
downC <- sb.Up(grpc.Address{Addr: endpoints[1]})
|
||||
}()
|
||||
addrFirst, putFun, err := sb.Get(context.Background(), blockingOpts)
|
||||
if err != nil {
|
||||
t.Errorf("Get() with up endpoints should success, got %v", err)
|
||||
}
|
||||
if addrFirst.Addr != endpoints[1] {
|
||||
t.Errorf("Get() didn't return expected address, got %v", addrFirst)
|
||||
}
|
||||
if putFun == nil {
|
||||
t.Errorf("Get() returned unexpected nil put function")
|
||||
}
|
||||
down1 := <-downC
|
||||
|
||||
down2 := sb.Up(grpc.Address{Addr: endpoints[2]})
|
||||
addrSecond, _, _ := sb.Get(context.Background(), blockingOpts)
|
||||
if addrFirst.Addr != addrSecond.Addr {
|
||||
t.Errorf("Get() didn't return the same address as previous call, got %v and %v", addrFirst, addrSecond)
|
||||
}
|
||||
|
||||
down1(errors.New("error"))
|
||||
down2(errors.New("error"))
|
||||
ctx, _ = context.WithTimeout(context.Background(), time.Millisecond*100)
|
||||
_, _, err = sb.Get(ctx, blockingOpts)
|
||||
if err != context.DeadlineExceeded {
|
||||
t.Errorf("Get() with no up endpoints should timeout, got %v", err)
|
||||
}
|
||||
}
|
@ -21,9 +21,11 @@ import (
|
||||
"net"
|
||||
"net/url"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
|
||||
prometheus "github.com/grpc-ecosystem/go-grpc-prometheus"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
@ -45,11 +47,12 @@ type Client struct {
|
||||
Auth
|
||||
Maintenance
|
||||
|
||||
conn *grpc.ClientConn
|
||||
cfg Config
|
||||
creds *credentials.TransportCredentials
|
||||
balancer *simpleBalancer
|
||||
retryWrapper retryRpcFunc
|
||||
conn *grpc.ClientConn
|
||||
cfg Config
|
||||
creds *credentials.TransportCredentials
|
||||
balancer *simpleBalancer
|
||||
retryWrapper retryRpcFunc
|
||||
retryAuthWrapper retryRpcFunc
|
||||
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
@ -58,6 +61,8 @@ type Client struct {
|
||||
Username string
|
||||
// Password is a password for authentication
|
||||
Password string
|
||||
// tokenCred is an instance of WithPerRPCCredentials()'s argument
|
||||
tokenCred *authTokenCredential
|
||||
}
|
||||
|
||||
// New creates a new etcdv3 client from a given configuration.
|
||||
@ -86,6 +91,8 @@ func NewFromConfigFile(path string) (*Client, error) {
|
||||
// Close shuts down the client's etcd connections.
|
||||
func (c *Client) Close() error {
|
||||
c.cancel()
|
||||
c.Watcher.Close()
|
||||
c.Lease.Close()
|
||||
return toErr(c.ctx, c.conn.Close())
|
||||
}
|
||||
|
||||
@ -95,7 +102,12 @@ func (c *Client) Close() error {
|
||||
func (c *Client) Ctx() context.Context { return c.ctx }
|
||||
|
||||
// Endpoints lists the registered endpoints for the client.
|
||||
func (c *Client) Endpoints() []string { return c.cfg.Endpoints }
|
||||
func (c *Client) Endpoints() (eps []string) {
|
||||
// copy the slice; protect original endpoints from being changed
|
||||
eps = make([]string, len(c.cfg.Endpoints))
|
||||
copy(eps, c.cfg.Endpoints)
|
||||
return
|
||||
}
|
||||
|
||||
// SetEndpoints updates client's endpoints.
|
||||
func (c *Client) SetEndpoints(eps ...string) {
|
||||
@ -136,7 +148,8 @@ func (c *Client) autoSync() {
|
||||
}
|
||||
|
||||
type authTokenCredential struct {
|
||||
token string
|
||||
token string
|
||||
tokenMu *sync.RWMutex
|
||||
}
|
||||
|
||||
func (cred authTokenCredential) RequireTransportSecurity() bool {
|
||||
@ -144,6 +157,8 @@ func (cred authTokenCredential) RequireTransportSecurity() bool {
|
||||
}
|
||||
|
||||
func (cred authTokenCredential) GetRequestMetadata(ctx context.Context, s ...string) (map[string]string, error) {
|
||||
cred.tokenMu.RLock()
|
||||
defer cred.tokenMu.RUnlock()
|
||||
return map[string]string{
|
||||
"token": cred.token,
|
||||
}, nil
|
||||
@ -206,7 +221,8 @@ func (c *Client) dialSetupOpts(endpoint string, dopts ...grpc.DialOption) (opts
|
||||
return nil, c.ctx.Err()
|
||||
default:
|
||||
}
|
||||
return net.DialTimeout(proto, host, t)
|
||||
dialer := &net.Dialer{Timeout: t}
|
||||
return dialer.DialContext(c.ctx, proto, host)
|
||||
}
|
||||
opts = append(opts, grpc.WithDialer(f))
|
||||
|
||||
@ -228,24 +244,64 @@ func (c *Client) Dial(endpoint string) (*grpc.ClientConn, error) {
|
||||
return c.dial(endpoint)
|
||||
}
|
||||
|
||||
func (c *Client) getToken(ctx context.Context) error {
|
||||
var err error // return last error in a case of fail
|
||||
var auth *authenticator
|
||||
|
||||
for i := 0; i < len(c.cfg.Endpoints); i++ {
|
||||
endpoint := c.cfg.Endpoints[i]
|
||||
host := getHost(endpoint)
|
||||
// use dial options without dopts to avoid reusing the client balancer
|
||||
auth, err = newAuthenticator(host, c.dialSetupOpts(endpoint))
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
defer auth.close()
|
||||
|
||||
var resp *AuthenticateResponse
|
||||
resp, err = auth.authenticate(ctx, c.Username, c.Password)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
c.tokenCred.tokenMu.Lock()
|
||||
c.tokenCred.token = resp.Token
|
||||
c.tokenCred.tokenMu.Unlock()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *Client) dial(endpoint string, dopts ...grpc.DialOption) (*grpc.ClientConn, error) {
|
||||
opts := c.dialSetupOpts(endpoint, dopts...)
|
||||
host := getHost(endpoint)
|
||||
if c.Username != "" && c.Password != "" {
|
||||
// use dial options without dopts to avoid reusing the client balancer
|
||||
auth, err := newAuthenticator(host, c.dialSetupOpts(endpoint))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
c.tokenCred = &authTokenCredential{
|
||||
tokenMu: &sync.RWMutex{},
|
||||
}
|
||||
defer auth.close()
|
||||
|
||||
resp, err := auth.authenticate(c.ctx, c.Username, c.Password)
|
||||
if err != nil {
|
||||
ctx := c.ctx
|
||||
if c.cfg.DialTimeout > 0 {
|
||||
cctx, cancel := context.WithTimeout(ctx, c.cfg.DialTimeout)
|
||||
defer cancel()
|
||||
ctx = cctx
|
||||
}
|
||||
if err := c.getToken(ctx); err != nil {
|
||||
if err == ctx.Err() && ctx.Err() != c.ctx.Err() {
|
||||
err = grpc.ErrClientConnTimeout
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
opts = append(opts, grpc.WithPerRPCCredentials(authTokenCredential{token: resp.Token}))
|
||||
|
||||
opts = append(opts, grpc.WithPerRPCCredentials(c.tokenCred))
|
||||
}
|
||||
|
||||
// add metrics options
|
||||
opts = append(opts, grpc.WithUnaryInterceptor(prometheus.UnaryClientInterceptor))
|
||||
opts = append(opts, grpc.WithStreamInterceptor(prometheus.StreamClientInterceptor))
|
||||
|
||||
conn, err := grpc.Dial(host, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -287,10 +343,13 @@ func newClient(cfg *Config) (*Client, error) {
|
||||
client.balancer = newSimpleBalancer(cfg.Endpoints)
|
||||
conn, err := client.dial(cfg.Endpoints[0], grpc.WithBalancer(client.balancer))
|
||||
if err != nil {
|
||||
client.cancel()
|
||||
client.balancer.Close()
|
||||
return nil, err
|
||||
}
|
||||
client.conn = conn
|
||||
client.retryWrapper = client.newRetryWrapper()
|
||||
client.retryAuthWrapper = client.newAuthRetryWrapper()
|
||||
|
||||
// wait for a connection
|
||||
if cfg.DialTimeout > 0 {
|
||||
@ -304,6 +363,7 @@ func newClient(cfg *Config) (*Client, error) {
|
||||
}
|
||||
if !hasConn {
|
||||
client.cancel()
|
||||
client.balancer.Close()
|
||||
conn.Close()
|
||||
return nil, grpc.ErrClientConnTimeout
|
||||
}
|
||||
|
@ -16,6 +16,7 @@ package clientv3
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@ -25,36 +26,89 @@ import (
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
func TestDialTimeout(t *testing.T) {
|
||||
func TestDialCancel(t *testing.T) {
|
||||
defer testutil.AfterTest(t)
|
||||
|
||||
donec := make(chan error)
|
||||
go func() {
|
||||
// without timeout, grpc keeps redialing if connection refused
|
||||
cfg := Config{
|
||||
Endpoints: []string{"localhost:12345"},
|
||||
DialTimeout: 2 * time.Second}
|
||||
c, err := New(cfg)
|
||||
if c != nil || err == nil {
|
||||
t.Errorf("new client should fail")
|
||||
}
|
||||
donec <- err
|
||||
}()
|
||||
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
|
||||
select {
|
||||
case err := <-donec:
|
||||
t.Errorf("dial didn't wait (%v)", err)
|
||||
default:
|
||||
// accept first connection so client is created with dial timeout
|
||||
ln, err := net.Listen("unix", "dialcancel:12345")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer ln.Close()
|
||||
|
||||
ep := "unix://dialcancel:12345"
|
||||
cfg := Config{
|
||||
Endpoints: []string{ep},
|
||||
DialTimeout: 30 * time.Second}
|
||||
c, err := New(cfg)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// connect to ipv4 blackhole so dial blocks
|
||||
c.SetEndpoints("http://254.0.0.1:12345")
|
||||
|
||||
// issue Get to force redial attempts
|
||||
go c.Get(context.TODO(), "abc")
|
||||
|
||||
// wait a little bit so client close is after dial starts
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
donec := make(chan struct{})
|
||||
go func() {
|
||||
defer close(donec)
|
||||
c.Close()
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-time.After(5 * time.Second):
|
||||
t.Errorf("failed to timeout dial on time")
|
||||
case err := <-donec:
|
||||
if err != grpc.ErrClientConnTimeout {
|
||||
t.Errorf("unexpected error %v, want %v", err, grpc.ErrClientConnTimeout)
|
||||
t.Fatalf("failed to close")
|
||||
case <-donec:
|
||||
}
|
||||
}
|
||||
|
||||
func TestDialTimeout(t *testing.T) {
|
||||
defer testutil.AfterTest(t)
|
||||
|
||||
testCfgs := []Config{
|
||||
{
|
||||
Endpoints: []string{"http://254.0.0.1:12345"},
|
||||
DialTimeout: 2 * time.Second,
|
||||
},
|
||||
{
|
||||
Endpoints: []string{"http://254.0.0.1:12345"},
|
||||
DialTimeout: time.Second,
|
||||
Username: "abc",
|
||||
Password: "def",
|
||||
},
|
||||
}
|
||||
|
||||
for i, cfg := range testCfgs {
|
||||
donec := make(chan error)
|
||||
go func() {
|
||||
// without timeout, dial continues forever on ipv4 blackhole
|
||||
c, err := New(cfg)
|
||||
if c != nil || err == nil {
|
||||
t.Errorf("#%d: new client should fail", i)
|
||||
}
|
||||
donec <- err
|
||||
}()
|
||||
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
|
||||
select {
|
||||
case err := <-donec:
|
||||
t.Errorf("#%d: dial didn't wait (%v)", i, err)
|
||||
default:
|
||||
}
|
||||
|
||||
select {
|
||||
case <-time.After(5 * time.Second):
|
||||
t.Errorf("#%d: failed to timeout dial on time", i)
|
||||
case err := <-donec:
|
||||
if err != grpc.ErrClientConnTimeout {
|
||||
t.Errorf("#%d: unexpected error %v, want %v", i, err, grpc.ErrClientConnTimeout)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -78,7 +78,7 @@ func (c *cluster) MemberUpdate(ctx context.Context, id uint64, peerAddrs []strin
|
||||
// it is safe to retry on update.
|
||||
for {
|
||||
r := &pb.MemberUpdateRequest{ID: id, PeerURLs: peerAddrs}
|
||||
resp, err := c.remote.MemberUpdate(ctx, r)
|
||||
resp, err := c.remote.MemberUpdate(ctx, r, grpc.FailFast(false))
|
||||
if err == nil {
|
||||
return (*MemberUpdateResponse)(resp), nil
|
||||
}
|
||||
|
@ -36,6 +36,8 @@ func Compare(cmp Cmp, result string, v interface{}) Cmp {
|
||||
switch result {
|
||||
case "=":
|
||||
r = pb.Compare_EQUAL
|
||||
case "!=":
|
||||
r = pb.Compare_NOT_EQUAL
|
||||
case ">":
|
||||
r = pb.Compare_GREATER
|
||||
case "<":
|
||||
|
@ -15,6 +15,8 @@
|
||||
package concurrency
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
v3 "github.com/coreos/etcd/clientv3"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
@ -25,6 +27,7 @@ const defaultSessionTTL = 60
|
||||
// Fault-tolerant applications may use sessions to reason about liveness.
|
||||
type Session struct {
|
||||
client *v3.Client
|
||||
opts *sessionOptions
|
||||
id v3.LeaseID
|
||||
|
||||
cancel context.CancelFunc
|
||||
@ -33,25 +36,25 @@ type Session struct {
|
||||
|
||||
// NewSession gets the leased session for a client.
|
||||
func NewSession(client *v3.Client, opts ...SessionOption) (*Session, error) {
|
||||
ops := &sessionOptions{ttl: defaultSessionTTL}
|
||||
ops := &sessionOptions{ttl: defaultSessionTTL, ctx: client.Ctx()}
|
||||
for _, opt := range opts {
|
||||
opt(ops)
|
||||
}
|
||||
|
||||
resp, err := client.Grant(client.Ctx(), int64(ops.ttl))
|
||||
resp, err := client.Grant(ops.ctx, int64(ops.ttl))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
id := v3.LeaseID(resp.ID)
|
||||
|
||||
ctx, cancel := context.WithCancel(client.Ctx())
|
||||
ctx, cancel := context.WithCancel(ops.ctx)
|
||||
keepAlive, err := client.KeepAlive(ctx, id)
|
||||
if err != nil || keepAlive == nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
donec := make(chan struct{})
|
||||
s := &Session{client: client, id: id, cancel: cancel, donec: donec}
|
||||
s := &Session{client: client, opts: ops, id: id, cancel: cancel, donec: donec}
|
||||
|
||||
// keep the lease alive until client error or cancelled context
|
||||
go func() {
|
||||
@ -87,12 +90,16 @@ func (s *Session) Orphan() {
|
||||
// Close orphans the session and revokes the session lease.
|
||||
func (s *Session) Close() error {
|
||||
s.Orphan()
|
||||
_, err := s.client.Revoke(s.client.Ctx(), s.id)
|
||||
// if revoke takes longer than the ttl, lease is expired anyway
|
||||
ctx, cancel := context.WithTimeout(s.opts.ctx, time.Duration(s.opts.ttl)*time.Second)
|
||||
_, err := s.client.Revoke(ctx, s.id)
|
||||
cancel()
|
||||
return err
|
||||
}
|
||||
|
||||
type sessionOptions struct {
|
||||
ttl int
|
||||
ctx context.Context
|
||||
}
|
||||
|
||||
// SessionOption configures Session.
|
||||
@ -107,3 +114,14 @@ func WithTTL(ttl int) SessionOption {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// WithContext assigns a context to the session instead of defaulting to
|
||||
// using the client context. This is useful for canceling NewSession and
|
||||
// Close operations immediately without having to close the client. If the
|
||||
// context is canceled before Close() completes, the session's lease will be
|
||||
// abandoned and left to expire instead of being revoked.
|
||||
func WithContext(ctx context.Context) SessionOption {
|
||||
return func(so *sessionOptions) {
|
||||
so.ctx = ctx
|
||||
}
|
||||
}
|
||||
|
@ -249,11 +249,10 @@ func (s *stmReadCommitted) commit() *v3.TxnResponse {
|
||||
}
|
||||
|
||||
func isKeyCurrent(k string, r *v3.GetResponse) v3.Cmp {
|
||||
rev := r.Header.Revision + 1
|
||||
if len(r.Kvs) != 0 {
|
||||
rev = r.Kvs[0].ModRevision + 1
|
||||
return v3.Compare(v3.ModRevision(k), "=", r.Kvs[0].ModRevision)
|
||||
}
|
||||
return v3.Compare(v3.ModRevision(k), "<", rev)
|
||||
return v3.Compare(v3.ModRevision(k), "=", 0)
|
||||
}
|
||||
|
||||
func respToValue(resp *v3.GetResponse) string {
|
||||
|
@ -44,7 +44,7 @@
|
||||
// etcd client returns 2 types of errors:
|
||||
//
|
||||
// 1. context error: canceled or deadline exceeded.
|
||||
// 2. gRPC error: see https://github.com/coreos/etcd/blob/master/etcdserver/api/v3rpc/error.go.
|
||||
// 2. gRPC error: see https://github.com/coreos/etcd/blob/master/etcdserver/api/v3rpc/rpctypes/error.go
|
||||
//
|
||||
// Here is the example code to handle client errors:
|
||||
//
|
||||
|
46
clientv3/example_metrics_test.go
Normal file
46
clientv3/example_metrics_test.go
Normal file
@ -0,0 +1,46 @@
|
||||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package clientv3_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
func ExampleMetrics_All() {
|
||||
// listen for all prometheus metrics
|
||||
go func() {
|
||||
http.Handle("/metrics", prometheus.Handler())
|
||||
log.Fatal(http.ListenAndServe(":47989", nil))
|
||||
}()
|
||||
|
||||
url := "http://localhost:47989/metrics"
|
||||
|
||||
// make an http request to fetch all prometheus metrics
|
||||
resp, err := http.Get(url)
|
||||
if err != nil {
|
||||
log.Fatalf("fetch error: %v", err)
|
||||
}
|
||||
b, err := ioutil.ReadAll(resp.Body)
|
||||
resp.Body.Close()
|
||||
if err != nil {
|
||||
log.Fatalf("fetch error: reading %s: %v", url, err)
|
||||
}
|
||||
fmt.Printf("%s", b)
|
||||
}
|
@ -226,6 +226,21 @@ func TestKVRange(t *testing.T) {
|
||||
{Key: []byte("fop"), Value: nil, CreateRevision: 9, ModRevision: 9, Version: 1},
|
||||
},
|
||||
},
|
||||
// range all with SortByKey, missing sorting order (ASCEND by default)
|
||||
{
|
||||
"a", "x",
|
||||
0,
|
||||
[]clientv3.OpOption{clientv3.WithSort(clientv3.SortByKey, clientv3.SortNone)},
|
||||
|
||||
[]*mvccpb.KeyValue{
|
||||
{Key: []byte("a"), Value: nil, CreateRevision: 2, ModRevision: 2, Version: 1},
|
||||
{Key: []byte("b"), Value: nil, CreateRevision: 3, ModRevision: 3, Version: 1},
|
||||
{Key: []byte("c"), Value: nil, CreateRevision: 4, ModRevision: 6, Version: 3},
|
||||
{Key: []byte("foo"), Value: nil, CreateRevision: 7, ModRevision: 7, Version: 1},
|
||||
{Key: []byte("foo/abc"), Value: nil, CreateRevision: 8, ModRevision: 8, Version: 1},
|
||||
{Key: []byte("fop"), Value: nil, CreateRevision: 9, ModRevision: 9, Version: 1},
|
||||
},
|
||||
},
|
||||
// range all with SortByCreateRevision, SortDescend
|
||||
{
|
||||
"a", "x",
|
||||
@ -241,6 +256,21 @@ func TestKVRange(t *testing.T) {
|
||||
{Key: []byte("a"), Value: nil, CreateRevision: 2, ModRevision: 2, Version: 1},
|
||||
},
|
||||
},
|
||||
// range all with SortByCreateRevision, missing sorting order (ASCEND by default)
|
||||
{
|
||||
"a", "x",
|
||||
0,
|
||||
[]clientv3.OpOption{clientv3.WithSort(clientv3.SortByCreateRevision, clientv3.SortNone)},
|
||||
|
||||
[]*mvccpb.KeyValue{
|
||||
{Key: []byte("a"), Value: nil, CreateRevision: 2, ModRevision: 2, Version: 1},
|
||||
{Key: []byte("b"), Value: nil, CreateRevision: 3, ModRevision: 3, Version: 1},
|
||||
{Key: []byte("c"), Value: nil, CreateRevision: 4, ModRevision: 6, Version: 3},
|
||||
{Key: []byte("foo"), Value: nil, CreateRevision: 7, ModRevision: 7, Version: 1},
|
||||
{Key: []byte("foo/abc"), Value: nil, CreateRevision: 8, ModRevision: 8, Version: 1},
|
||||
{Key: []byte("fop"), Value: nil, CreateRevision: 9, ModRevision: 9, Version: 1},
|
||||
},
|
||||
},
|
||||
// range all with SortByModRevision, SortDescend
|
||||
{
|
||||
"a", "x",
|
||||
|
@ -17,10 +17,12 @@ package integration
|
||||
import (
|
||||
"reflect"
|
||||
"sort"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/coreos/etcd/clientv3"
|
||||
"github.com/coreos/etcd/clientv3/concurrency"
|
||||
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
|
||||
"github.com/coreos/etcd/integration"
|
||||
"github.com/coreos/etcd/pkg/testutil"
|
||||
@ -154,6 +156,30 @@ func TestLeaseKeepAlive(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestLeaseKeepAliveOneSecond(t *testing.T) {
|
||||
defer testutil.AfterTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
cli := clus.Client(0)
|
||||
|
||||
resp, err := cli.Grant(context.Background(), 1)
|
||||
if err != nil {
|
||||
t.Errorf("failed to create lease %v", err)
|
||||
}
|
||||
rc, kerr := cli.KeepAlive(context.Background(), resp.ID)
|
||||
if kerr != nil {
|
||||
t.Errorf("failed to keepalive lease %v", kerr)
|
||||
}
|
||||
|
||||
for i := 0; i < 3; i++ {
|
||||
if _, ok := <-rc; !ok {
|
||||
t.Errorf("chan is closed, want not closed")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: add a client that can connect to all the members of cluster via unix sock.
|
||||
// TODO: test handle more complicated failures.
|
||||
func TestLeaseKeepAliveHandleFailure(t *testing.T) {
|
||||
@ -510,3 +536,121 @@ func TestLeaseTimeToLive(t *testing.T) {
|
||||
t.Fatalf("unexpected keys %+v", lresp.Keys)
|
||||
}
|
||||
}
|
||||
|
||||
// TestLeaseRenewLostQuorum ensures keepalives work after losing quorum
|
||||
// for a while.
|
||||
func TestLeaseRenewLostQuorum(t *testing.T) {
|
||||
defer testutil.AfterTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
cli := clus.Client(0)
|
||||
r, err := cli.Grant(context.TODO(), 4)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
kctx, kcancel := context.WithCancel(context.Background())
|
||||
defer kcancel()
|
||||
ka, err := cli.KeepAlive(kctx, r.ID)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// consume first keepalive so next message sends when cluster is down
|
||||
<-ka
|
||||
|
||||
// force keepalive stream message to timeout
|
||||
clus.Members[1].Stop(t)
|
||||
clus.Members[2].Stop(t)
|
||||
// Use TTL-1 since the client closes the keepalive channel if no
|
||||
// keepalive arrives before the lease deadline.
|
||||
// The cluster has 1 second to recover and reply to the keepalive.
|
||||
time.Sleep(time.Duration(r.TTL-1) * time.Second)
|
||||
clus.Members[1].Restart(t)
|
||||
clus.Members[2].Restart(t)
|
||||
|
||||
select {
|
||||
case _, ok := <-ka:
|
||||
if !ok {
|
||||
t.Fatalf("keepalive closed")
|
||||
}
|
||||
case <-time.After(time.Duration(r.TTL) * time.Second):
|
||||
t.Fatalf("timed out waiting for keepalive")
|
||||
}
|
||||
}
|
||||
|
||||
func TestLeaseKeepAliveLoopExit(t *testing.T) {
|
||||
defer testutil.AfterTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
ctx := context.Background()
|
||||
cli := clus.Client(0)
|
||||
|
||||
resp, err := cli.Grant(ctx, 5)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
cli.Lease.Close()
|
||||
|
||||
_, err = cli.KeepAlive(ctx, resp.ID)
|
||||
if _, ok := err.(clientv3.ErrKeepAliveHalted); !ok {
|
||||
t.Fatalf("expected %T, got %v(%T)", clientv3.ErrKeepAliveHalted{}, err, err)
|
||||
}
|
||||
}
|
||||
|
||||
// TestV3LeaseFailureOverlap issues Grant and Keepalive requests to a cluster
|
||||
// before, during, and after quorum loss to confirm Grant/Keepalive tolerates
|
||||
// transient cluster failure.
|
||||
func TestV3LeaseFailureOverlap(t *testing.T) {
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 2})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
numReqs := 5
|
||||
cli := clus.Client(0)
|
||||
|
||||
// bring up a session, tear it down
|
||||
updown := func(i int) error {
|
||||
sess, err := concurrency.NewSession(cli)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ch := make(chan struct{})
|
||||
go func() {
|
||||
defer close(ch)
|
||||
sess.Close()
|
||||
}()
|
||||
select {
|
||||
case <-ch:
|
||||
case <-time.After(time.Minute / 4):
|
||||
t.Fatalf("timeout %d", i)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
mkReqs := func(n int) {
|
||||
wg.Add(numReqs)
|
||||
for i := 0; i < numReqs; i++ {
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
err := updown(n)
|
||||
if err == nil || err == rpctypes.ErrTimeoutDueToConnectionLost {
|
||||
return
|
||||
}
|
||||
t.Fatal(err)
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
mkReqs(1)
|
||||
clus.Members[1].Stop(t)
|
||||
mkReqs(2)
|
||||
time.Sleep(time.Second)
|
||||
mkReqs(3)
|
||||
clus.Members[1].Restart(t)
|
||||
mkReqs(4)
|
||||
wg.Wait()
|
||||
}
|
||||
|
169
clientv3/integration/metrics_test.go
Normal file
169
clientv3/integration/metrics_test.go
Normal file
@ -0,0 +1,169 @@
|
||||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package integration
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/coreos/etcd/clientv3"
|
||||
"github.com/coreos/etcd/integration"
|
||||
"github.com/coreos/etcd/pkg/testutil"
|
||||
"github.com/coreos/etcd/pkg/transport"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
func TestV3ClientMetrics(t *testing.T) {
|
||||
defer testutil.AfterTest(t)
|
||||
|
||||
var (
|
||||
addr string = "localhost:27989"
|
||||
ln net.Listener
|
||||
err error
|
||||
)
|
||||
|
||||
// listen for all prometheus metrics
|
||||
donec := make(chan struct{})
|
||||
go func() {
|
||||
defer close(donec)
|
||||
|
||||
srv := &http.Server{Handler: prometheus.Handler()}
|
||||
srv.SetKeepAlivesEnabled(false)
|
||||
|
||||
ln, err = transport.NewUnixListener(addr)
|
||||
if err != nil {
|
||||
t.Fatalf("Error: %v occurred while listening on addr: %v", err, addr)
|
||||
}
|
||||
|
||||
err = srv.Serve(ln)
|
||||
if err != nil && !strings.Contains(err.Error(), "use of closed network connection") {
|
||||
t.Fatalf("Err serving http requests: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
url := "unix://" + addr + "/metrics"
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
client := clus.Client(0)
|
||||
|
||||
w := clientv3.NewWatcher(client)
|
||||
defer w.Close()
|
||||
|
||||
kv := clientv3.NewKV(client)
|
||||
|
||||
wc := w.Watch(context.Background(), "foo")
|
||||
|
||||
wBefore := sumCountersForMetricAndLabels(t, url, "grpc_client_msg_received_total", "Watch", "bidi_stream")
|
||||
|
||||
pBefore := sumCountersForMetricAndLabels(t, url, "grpc_client_started_total", "Put", "unary")
|
||||
|
||||
_, err = kv.Put(context.Background(), "foo", "bar")
|
||||
if err != nil {
|
||||
t.Errorf("Error putting value in key store")
|
||||
}
|
||||
|
||||
pAfter := sumCountersForMetricAndLabels(t, url, "grpc_client_started_total", "Put", "unary")
|
||||
if pBefore+1 != pAfter {
|
||||
t.Errorf("grpc_client_started_total expected %d, got %d", 1, pAfter-pBefore)
|
||||
}
|
||||
|
||||
// consume watch response
|
||||
select {
|
||||
case <-wc:
|
||||
case <-time.After(10 * time.Second):
|
||||
t.Error("Timeout occurred for getting watch response")
|
||||
}
|
||||
|
||||
wAfter := sumCountersForMetricAndLabels(t, url, "grpc_client_msg_received_total", "Watch", "bidi_stream")
|
||||
if wBefore+1 != wAfter {
|
||||
t.Errorf("grpc_client_msg_received_total expected %d, got %d", 1, wAfter-wBefore)
|
||||
}
|
||||
|
||||
ln.Close()
|
||||
<-donec
|
||||
}
|
||||
|
||||
func sumCountersForMetricAndLabels(t *testing.T, url string, metricName string, matchingLabelValues ...string) int {
|
||||
count := 0
|
||||
for _, line := range getHTTPBodyAsLines(t, url) {
|
||||
ok := true
|
||||
if !strings.HasPrefix(line, metricName) {
|
||||
continue
|
||||
}
|
||||
|
||||
for _, labelValue := range matchingLabelValues {
|
||||
if !strings.Contains(line, `"`+labelValue+`"`) {
|
||||
ok = false
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
valueString := line[strings.LastIndex(line, " ")+1 : len(line)-1]
|
||||
valueFloat, err := strconv.ParseFloat(valueString, 32)
|
||||
if err != nil {
|
||||
t.Fatalf("failed parsing value for line: %v and matchingLabelValues: %v", line, matchingLabelValues)
|
||||
}
|
||||
count += int(valueFloat)
|
||||
}
|
||||
return count
|
||||
}
|
||||
|
||||
func getHTTPBodyAsLines(t *testing.T, url string) []string {
|
||||
cfgtls := transport.TLSInfo{}
|
||||
tr, err := transport.NewTransport(cfgtls, time.Second)
|
||||
if err != nil {
|
||||
t.Fatalf("Error getting transport: %v", err)
|
||||
}
|
||||
|
||||
tr.MaxIdleConns = -1
|
||||
tr.DisableKeepAlives = true
|
||||
|
||||
cli := &http.Client{Transport: tr}
|
||||
|
||||
resp, err := cli.Get(url)
|
||||
if err != nil {
|
||||
t.Fatalf("Error fetching: %v", err)
|
||||
}
|
||||
|
||||
reader := bufio.NewReader(resp.Body)
|
||||
lines := []string{}
|
||||
for {
|
||||
line, err := reader.ReadString('\n')
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
break
|
||||
} else {
|
||||
t.Fatalf("error reading: %v", err)
|
||||
}
|
||||
}
|
||||
lines = append(lines, line)
|
||||
}
|
||||
resp.Body.Close()
|
||||
return lines
|
||||
}
|
@ -347,8 +347,61 @@ func putAndWatch(t *testing.T, wctx *watchctx, key, val string) {
|
||||
}
|
||||
}
|
||||
|
||||
// TestWatchResumeComapcted checks that the watcher gracefully closes in case
|
||||
func TestWatchResumeInitRev(t *testing.T) {
|
||||
defer testutil.AfterTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
cli := clus.Client(0)
|
||||
if _, err := cli.Put(context.TODO(), "b", "2"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, err := cli.Put(context.TODO(), "a", "3"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// if resume is broken, it'll pick up this key first instead of a=3
|
||||
if _, err := cli.Put(context.TODO(), "a", "4"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
wch := clus.Client(0).Watch(context.Background(), "a", clientv3.WithRev(1), clientv3.WithCreatedNotify())
|
||||
if resp, ok := <-wch; !ok || resp.Header.Revision != 4 {
|
||||
t.Fatalf("got (%v, %v), expected create notification rev=4", resp, ok)
|
||||
}
|
||||
// pause wch
|
||||
clus.Members[0].DropConnections()
|
||||
clus.Members[0].PauseConnections()
|
||||
|
||||
select {
|
||||
case resp, ok := <-wch:
|
||||
t.Skipf("wch should block, got (%+v, %v); drop not fast enough", resp, ok)
|
||||
case <-time.After(100 * time.Millisecond):
|
||||
}
|
||||
|
||||
// resume wch
|
||||
clus.Members[0].UnpauseConnections()
|
||||
|
||||
select {
|
||||
case resp, ok := <-wch:
|
||||
if !ok {
|
||||
t.Fatal("unexpected watch close")
|
||||
}
|
||||
if len(resp.Events) == 0 {
|
||||
t.Fatal("expected event on watch")
|
||||
}
|
||||
if string(resp.Events[0].Kv.Value) != "3" {
|
||||
t.Fatalf("expected value=3, got event %+v", resp.Events[0])
|
||||
}
|
||||
case <-time.After(5 * time.Second):
|
||||
t.Fatal("watch timed out")
|
||||
}
|
||||
}
|
||||
|
||||
// TestWatchResumeCompacted checks that the watcher gracefully closes in case
|
||||
// that it tries to resume to a revision that's been compacted out of the store.
|
||||
// Since the watcher's server restarts with stale data, the watcher will receive
|
||||
// either a compaction error or all keys by staying in sync before the compaction
|
||||
// is finally applied.
|
||||
func TestWatchResumeCompacted(t *testing.T) {
|
||||
defer testutil.AfterTest(t)
|
||||
|
||||
@ -377,8 +430,9 @@ func TestWatchResumeCompacted(t *testing.T) {
|
||||
}
|
||||
|
||||
// put some data and compact away
|
||||
numPuts := 5
|
||||
kv := clientv3.NewKV(clus.Client(1))
|
||||
for i := 0; i < 5; i++ {
|
||||
for i := 0; i < numPuts; i++ {
|
||||
if _, err := kv.Put(context.TODO(), "foo", "bar"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -389,17 +443,48 @@ func TestWatchResumeCompacted(t *testing.T) {
|
||||
|
||||
clus.Members[0].Restart(t)
|
||||
|
||||
// get compacted error message
|
||||
wresp, ok := <-wch
|
||||
if !ok {
|
||||
t.Fatalf("expected wresp, but got closed channel")
|
||||
// since watch's server isn't guaranteed to be synced with the cluster when
|
||||
// the watch resumes, there is a window where the watch can stay synced and
|
||||
// read off all events; if the watcher misses the window, it will go out of
|
||||
// sync and get a compaction error.
|
||||
wRev := int64(2)
|
||||
for int(wRev) <= numPuts+1 {
|
||||
var wresp clientv3.WatchResponse
|
||||
var ok bool
|
||||
select {
|
||||
case wresp, ok = <-wch:
|
||||
if !ok {
|
||||
t.Fatalf("expected wresp, but got closed channel")
|
||||
}
|
||||
case <-time.After(5 * time.Second):
|
||||
t.Fatalf("compacted watch timed out")
|
||||
}
|
||||
for _, ev := range wresp.Events {
|
||||
if ev.Kv.ModRevision != wRev {
|
||||
t.Fatalf("expected modRev %v, got %+v", wRev, ev)
|
||||
}
|
||||
wRev++
|
||||
}
|
||||
if wresp.Err() == nil {
|
||||
continue
|
||||
}
|
||||
if wresp.Err() != rpctypes.ErrCompacted {
|
||||
t.Fatalf("wresp.Err() expected %v, but got %v %+v", rpctypes.ErrCompacted, wresp.Err())
|
||||
}
|
||||
break
|
||||
}
|
||||
if wresp.Err() != rpctypes.ErrCompacted {
|
||||
t.Fatalf("wresp.Err() expected %v, but got %v", rpctypes.ErrCompacted, wresp.Err())
|
||||
if int(wRev) > numPuts+1 {
|
||||
// got data faster than the compaction
|
||||
return
|
||||
}
|
||||
// ensure the channel is closed
|
||||
if wresp, ok = <-wch; ok {
|
||||
t.Fatalf("expected closed channel, but got %v", wresp)
|
||||
// received compaction error; ensure the channel closes
|
||||
select {
|
||||
case wresp, ok := <-wch:
|
||||
if ok {
|
||||
t.Fatalf("expected closed channel, but got %v", wresp)
|
||||
}
|
||||
case <-time.After(5 * time.Second):
|
||||
t.Fatalf("timed out waiting for channel close")
|
||||
}
|
||||
}
|
||||
|
||||
@ -579,18 +664,19 @@ func TestWatchErrConnClosed(t *testing.T) {
|
||||
defer clus.Terminate(t)
|
||||
|
||||
cli := clus.Client(0)
|
||||
defer cli.Close()
|
||||
wc := clientv3.NewWatcher(cli)
|
||||
|
||||
donec := make(chan struct{})
|
||||
go func() {
|
||||
defer close(donec)
|
||||
wc.Watch(context.TODO(), "foo")
|
||||
if err := wc.Close(); err != nil && err != grpc.ErrClientConnClosing {
|
||||
t.Fatalf("expected %v, got %v", grpc.ErrClientConnClosing, err)
|
||||
ch := wc.Watch(context.TODO(), "foo")
|
||||
if wr := <-ch; grpc.ErrorDesc(wr.Err()) != grpc.ErrClientConnClosing.Error() {
|
||||
t.Fatalf("expected %v, got %v", grpc.ErrClientConnClosing, grpc.ErrorDesc(wr.Err()))
|
||||
}
|
||||
}()
|
||||
|
||||
if err := cli.Close(); err != nil {
|
||||
if err := cli.ActiveConnection().Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
clus.TakeClient(0)
|
||||
@ -637,8 +723,12 @@ func TestWatchWithRequireLeader(t *testing.T) {
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
// something for the non-require leader watch to read as an event
|
||||
if _, err := clus.Client(1).Put(context.TODO(), "foo", "bar"); err != nil {
|
||||
// Put a key for the non-require leader watch to read as an event.
|
||||
// The watchers will be on member[0]; put key through member[0] to
|
||||
// ensure that it receives the update so watching after killing quorum
|
||||
// is guaranteed to have the key.
|
||||
liveClient := clus.Client(0)
|
||||
if _, err := liveClient.Put(context.TODO(), "foo", "bar"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
@ -653,8 +743,8 @@ func TestWatchWithRequireLeader(t *testing.T) {
|
||||
tickDuration := 10 * time.Millisecond
|
||||
time.Sleep(time.Duration(3*clus.Members[0].ElectionTicks) * tickDuration)
|
||||
|
||||
chLeader := clus.Client(0).Watch(clientv3.WithRequireLeader(context.TODO()), "foo", clientv3.WithRev(1))
|
||||
chNoLeader := clus.Client(0).Watch(context.TODO(), "foo", clientv3.WithRev(1))
|
||||
chLeader := liveClient.Watch(clientv3.WithRequireLeader(context.TODO()), "foo", clientv3.WithRev(1))
|
||||
chNoLeader := liveClient.Watch(context.TODO(), "foo", clientv3.WithRev(1))
|
||||
|
||||
select {
|
||||
case resp, ok := <-chLeader:
|
||||
@ -736,6 +826,36 @@ func TestWatchWithCreatedNotification(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// TestWatchWithCreatedNotificationDropConn ensures that
|
||||
// a watcher with created notify does not post duplicate
|
||||
// created events from disconnect.
|
||||
func TestWatchWithCreatedNotificationDropConn(t *testing.T) {
|
||||
cluster := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
defer cluster.Terminate(t)
|
||||
|
||||
client := cluster.RandClient()
|
||||
|
||||
wch := client.Watch(context.Background(), "a", clientv3.WithCreatedNotify())
|
||||
|
||||
resp := <-wch
|
||||
|
||||
if !resp.Created {
|
||||
t.Fatalf("expected created event, got %v", resp)
|
||||
}
|
||||
|
||||
cluster.Members[0].DropConnections()
|
||||
|
||||
// try to receive from watch channel again
|
||||
// ensure it doesn't post another createNotify
|
||||
select {
|
||||
case wresp := <-wch:
|
||||
t.Fatalf("got unexpected watch response: %+v\n", wresp)
|
||||
case <-time.After(time.Second):
|
||||
// watcher may not reconnect by the time it hits the select,
|
||||
// so it wouldn't have a chance to filter out the second create event
|
||||
}
|
||||
}
|
||||
|
||||
// TestWatchCancelOnServer ensures client watcher cancels propagate back to the server.
|
||||
func TestWatchCancelOnServer(t *testing.T) {
|
||||
cluster := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
@ -868,3 +988,45 @@ func TestWatchCancelAndCloseClient(t *testing.T) {
|
||||
<-donec
|
||||
clus.TakeClient(0)
|
||||
}
|
||||
|
||||
// TestWatchStressResumeClose establishes a bunch of watchers, disconnects
|
||||
// to put them in resuming mode, cancels them so some resumes by cancel fail,
|
||||
// then closes the watcher interface to ensure correct clean up.
|
||||
func TestWatchStressResumeClose(t *testing.T) {
|
||||
defer testutil.AfterTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
cli := clus.Client(0)
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
// add more watches than can be resumed before the cancel
|
||||
wchs := make([]clientv3.WatchChan, 2000)
|
||||
for i := range wchs {
|
||||
wchs[i] = cli.Watch(ctx, "abc")
|
||||
}
|
||||
clus.Members[0].DropConnections()
|
||||
cancel()
|
||||
if err := cli.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
clus.TakeClient(0)
|
||||
}
|
||||
|
||||
// TestWatchCancelDisconnected ensures canceling a watcher works when
|
||||
// its grpc stream is disconnected / reconnecting.
|
||||
func TestWatchCancelDisconnected(t *testing.T) {
|
||||
defer testutil.AfterTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
cli := clus.Client(0)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
// add more watches than can be resumed before the cancel
|
||||
wch := cli.Watch(ctx, "abc")
|
||||
clus.Members[0].Stop(t)
|
||||
cancel()
|
||||
select {
|
||||
case <-wch:
|
||||
case <-time.After(time.Second):
|
||||
t.Fatal("took too long to cancel disconnected watcher")
|
||||
}
|
||||
}
|
||||
|
@ -105,7 +105,7 @@ func (kv *kv) Delete(ctx context.Context, key string, opts ...OpOption) (*Delete
|
||||
}
|
||||
|
||||
func (kv *kv) Compact(ctx context.Context, rev int64, opts ...CompactOption) (*CompactResponse, error) {
|
||||
resp, err := kv.remote.Compact(ctx, OpCompact(rev, opts...).toRequest(), grpc.FailFast(false))
|
||||
resp, err := kv.remote.Compact(ctx, OpCompact(rev, opts...).toRequest())
|
||||
if err != nil {
|
||||
return nil, toErr(ctx, err)
|
||||
}
|
||||
@ -125,6 +125,7 @@ func (kv *kv) Do(ctx context.Context, op Op) (OpResponse, error) {
|
||||
if err == nil {
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
if isHaltErr(ctx, err) {
|
||||
return resp, toErr(ctx, err)
|
||||
}
|
||||
|
@ -69,6 +69,21 @@ const (
|
||||
NoLease LeaseID = 0
|
||||
)
|
||||
|
||||
// ErrKeepAliveHalted is returned if client keep alive loop halts with an unexpected error.
|
||||
//
|
||||
// This usually means that automatic lease renewal via KeepAlive is broken, but KeepAliveOnce will still work as expected.
|
||||
type ErrKeepAliveHalted struct {
|
||||
Reason error
|
||||
}
|
||||
|
||||
func (e ErrKeepAliveHalted) Error() string {
|
||||
s := "etcdclient: leases keep alive halted"
|
||||
if e.Reason != nil {
|
||||
s += ": " + e.Reason.Error()
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
type Lease interface {
|
||||
// Grant creates a new lease.
|
||||
Grant(ctx context.Context, ttl int64) (*LeaseGrantResponse, error)
|
||||
@ -94,8 +109,9 @@ type Lease interface {
|
||||
type lessor struct {
|
||||
mu sync.Mutex // guards all fields
|
||||
|
||||
// donec is closed when recvKeepAliveLoop stops
|
||||
donec chan struct{}
|
||||
// donec is closed and loopErr is set when recvKeepAliveLoop stops
|
||||
donec chan struct{}
|
||||
loopErr error
|
||||
|
||||
remote pb.LeaseClient
|
||||
|
||||
@ -161,9 +177,6 @@ func (l *lessor) Grant(ctx context.Context, ttl int64) (*LeaseGrantResponse, err
|
||||
if isHaltErr(cctx, err) {
|
||||
return nil, toErr(cctx, err)
|
||||
}
|
||||
if nerr := l.newStream(); nerr != nil {
|
||||
return nil, nerr
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -182,9 +195,6 @@ func (l *lessor) Revoke(ctx context.Context, id LeaseID) (*LeaseRevokeResponse,
|
||||
if isHaltErr(ctx, err) {
|
||||
return nil, toErr(ctx, err)
|
||||
}
|
||||
if nerr := l.newStream(); nerr != nil {
|
||||
return nil, nerr
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -195,7 +205,7 @@ func (l *lessor) TimeToLive(ctx context.Context, id LeaseID, opts ...LeaseOption
|
||||
|
||||
for {
|
||||
r := toLeaseTimeToLiveRequest(id, opts...)
|
||||
resp, err := l.remote.LeaseTimeToLive(cctx, r)
|
||||
resp, err := l.remote.LeaseTimeToLive(cctx, r, grpc.FailFast(false))
|
||||
if err == nil {
|
||||
gresp := &LeaseTimeToLiveResponse{
|
||||
ResponseHeader: resp.GetHeader(),
|
||||
@ -216,6 +226,15 @@ func (l *lessor) KeepAlive(ctx context.Context, id LeaseID) (<-chan *LeaseKeepAl
|
||||
ch := make(chan *LeaseKeepAliveResponse, leaseResponseChSize)
|
||||
|
||||
l.mu.Lock()
|
||||
// ensure that recvKeepAliveLoop is still running
|
||||
select {
|
||||
case <-l.donec:
|
||||
err := l.loopErr
|
||||
l.mu.Unlock()
|
||||
close(ch)
|
||||
return ch, ErrKeepAliveHalted{Reason: err}
|
||||
default:
|
||||
}
|
||||
ka, ok := l.keepAlives[id]
|
||||
if !ok {
|
||||
// create fresh keep alive
|
||||
@ -255,10 +274,6 @@ func (l *lessor) KeepAliveOnce(ctx context.Context, id LeaseID) (*LeaseKeepAlive
|
||||
if isHaltErr(ctx, err) {
|
||||
return nil, toErr(ctx, err)
|
||||
}
|
||||
|
||||
if nerr := l.newStream(); nerr != nil {
|
||||
return nil, nerr
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -327,10 +342,11 @@ func (l *lessor) keepAliveOnce(ctx context.Context, id LeaseID) (*LeaseKeepAlive
|
||||
return karesp, nil
|
||||
}
|
||||
|
||||
func (l *lessor) recvKeepAliveLoop() {
|
||||
func (l *lessor) recvKeepAliveLoop() (gerr error) {
|
||||
defer func() {
|
||||
l.mu.Lock()
|
||||
close(l.donec)
|
||||
l.loopErr = gerr
|
||||
for _, ka := range l.keepAlives {
|
||||
ka.Close()
|
||||
}
|
||||
@ -343,21 +359,35 @@ func (l *lessor) recvKeepAliveLoop() {
|
||||
resp, err := stream.Recv()
|
||||
if err != nil {
|
||||
if isHaltErr(l.stopCtx, err) {
|
||||
return
|
||||
return err
|
||||
}
|
||||
stream, serr = l.resetRecv()
|
||||
continue
|
||||
}
|
||||
l.recvKeepAlive(resp)
|
||||
}
|
||||
return serr
|
||||
}
|
||||
|
||||
// resetRecv opens a new lease stream and starts sending LeaseKeepAliveRequests
|
||||
func (l *lessor) resetRecv() (pb.Lease_LeaseKeepAliveClient, error) {
|
||||
if err := l.newStream(); err != nil {
|
||||
sctx, cancel := context.WithCancel(l.stopCtx)
|
||||
stream, err := l.remote.LeaseKeepAlive(sctx, grpc.FailFast(false))
|
||||
if err = toErr(sctx, err); err != nil {
|
||||
cancel()
|
||||
return nil, err
|
||||
}
|
||||
stream := l.getKeepAliveStream()
|
||||
|
||||
l.mu.Lock()
|
||||
defer l.mu.Unlock()
|
||||
if l.stream != nil && l.streamCancel != nil {
|
||||
l.stream.CloseSend()
|
||||
l.streamCancel()
|
||||
}
|
||||
|
||||
l.streamCancel = cancel
|
||||
l.stream = stream
|
||||
|
||||
go l.sendKeepAliveLoop(stream)
|
||||
return stream, nil
|
||||
}
|
||||
@ -386,7 +416,7 @@ func (l *lessor) recvKeepAlive(resp *pb.LeaseKeepAliveResponse) {
|
||||
}
|
||||
|
||||
// send update to all channels
|
||||
nextKeepAlive := time.Now().Add(1 + time.Duration(karesp.TTL/3)*time.Second)
|
||||
nextKeepAlive := time.Now().Add((time.Duration(karesp.TTL) * time.Second) / 3.0)
|
||||
ka.deadline = time.Now().Add(time.Duration(karesp.TTL) * time.Second)
|
||||
for _, ch := range ka.chs {
|
||||
select {
|
||||
@ -453,32 +483,6 @@ func (l *lessor) sendKeepAliveLoop(stream pb.Lease_LeaseKeepAliveClient) {
|
||||
}
|
||||
}
|
||||
|
||||
func (l *lessor) getKeepAliveStream() pb.Lease_LeaseKeepAliveClient {
|
||||
l.mu.Lock()
|
||||
defer l.mu.Unlock()
|
||||
return l.stream
|
||||
}
|
||||
|
||||
func (l *lessor) newStream() error {
|
||||
sctx, cancel := context.WithCancel(l.stopCtx)
|
||||
stream, err := l.remote.LeaseKeepAlive(sctx, grpc.FailFast(false))
|
||||
if err != nil {
|
||||
cancel()
|
||||
return toErr(sctx, err)
|
||||
}
|
||||
|
||||
l.mu.Lock()
|
||||
defer l.mu.Unlock()
|
||||
if l.stream != nil && l.streamCancel != nil {
|
||||
l.stream.CloseSend()
|
||||
l.streamCancel()
|
||||
}
|
||||
|
||||
l.streamCancel = cancel
|
||||
l.stream = stream
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ka *keepAlive) Close() {
|
||||
close(ka.donec)
|
||||
for _, ch := range ka.chs {
|
||||
|
@ -31,16 +31,16 @@ type GRPCResolver struct {
|
||||
Client *etcd.Client
|
||||
}
|
||||
|
||||
func (gr *GRPCResolver) Update(ctx context.Context, target string, nm naming.Update) (err error) {
|
||||
func (gr *GRPCResolver) Update(ctx context.Context, target string, nm naming.Update, opts ...etcd.OpOption) (err error) {
|
||||
switch nm.Op {
|
||||
case naming.Add:
|
||||
var v []byte
|
||||
if v, err = json.Marshal(nm); err != nil {
|
||||
return grpc.Errorf(codes.InvalidArgument, err.Error())
|
||||
}
|
||||
_, err = gr.Client.KV.Put(ctx, target+"/"+nm.Addr, string(v))
|
||||
_, err = gr.Client.KV.Put(ctx, target+"/"+nm.Addr, string(v), opts...)
|
||||
case naming.Delete:
|
||||
_, err = gr.Client.Delete(ctx, target+"/"+nm.Addr)
|
||||
_, err = gr.Client.Delete(ctx, target+"/"+nm.Addr, opts...)
|
||||
default:
|
||||
return grpc.Errorf(codes.InvalidArgument, "naming: bad naming op")
|
||||
}
|
||||
|
@ -223,7 +223,7 @@ func WithSort(target SortTarget, order SortOrder) OpOption {
|
||||
// If order != SortNone, server fetches the entire key-space,
|
||||
// and then applies the sort and limit, if provided.
|
||||
// Since current mvcc.Range implementation returns results
|
||||
// sorted by keys in lexiographically ascending order,
|
||||
// sorted by keys in lexicographically ascending order,
|
||||
// client should ignore SortOrder if the target is SortByKey.
|
||||
order = SortNone
|
||||
}
|
||||
@ -261,14 +261,15 @@ func WithPrefix() OpOption {
|
||||
}
|
||||
}
|
||||
|
||||
// WithRange specifies the range of 'Get' or 'Delete' requests.
|
||||
// WithRange specifies the range of 'Get', 'Delete', 'Watch' requests.
|
||||
// For example, 'Get' requests with 'WithRange(end)' returns
|
||||
// the keys in the range [key, end).
|
||||
// endKey must be lexicographically greater than start key.
|
||||
func WithRange(endKey string) OpOption {
|
||||
return func(op *Op) { op.end = []byte(endKey) }
|
||||
}
|
||||
|
||||
// WithFromKey specifies the range of 'Get' or 'Delete' requests
|
||||
// WithFromKey specifies the range of 'Get', 'Delete', 'Watch' requests
|
||||
// to be equal or greater than the key in the argument.
|
||||
func WithFromKey() OpOption { return WithRange("\x00") }
|
||||
|
||||
|
@ -23,70 +23,109 @@ import (
|
||||
)
|
||||
|
||||
type rpcFunc func(ctx context.Context) error
|
||||
type retryRpcFunc func(context.Context, rpcFunc)
|
||||
type retryRpcFunc func(context.Context, rpcFunc) error
|
||||
|
||||
func (c *Client) newRetryWrapper() retryRpcFunc {
|
||||
return func(rpcCtx context.Context, f rpcFunc) {
|
||||
return func(rpcCtx context.Context, f rpcFunc) error {
|
||||
for {
|
||||
err := f(rpcCtx)
|
||||
if err == nil {
|
||||
return
|
||||
return nil
|
||||
}
|
||||
|
||||
eErr := rpctypes.Error(err)
|
||||
// always stop retry on etcd errors
|
||||
if _, ok := eErr.(rpctypes.EtcdError); ok {
|
||||
return err
|
||||
}
|
||||
|
||||
// only retry if unavailable
|
||||
if grpc.Code(err) != codes.Unavailable {
|
||||
return
|
||||
}
|
||||
// always stop retry on etcd errors
|
||||
eErr := rpctypes.Error(err)
|
||||
if _, ok := eErr.(rpctypes.EtcdError); ok {
|
||||
return
|
||||
return err
|
||||
}
|
||||
|
||||
select {
|
||||
case <-c.balancer.ConnectNotify():
|
||||
case <-rpcCtx.Done():
|
||||
return rpcCtx.Err()
|
||||
case <-c.ctx.Done():
|
||||
return
|
||||
return c.ctx.Err()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type retryKVClient struct {
|
||||
pb.KVClient
|
||||
retryf retryRpcFunc
|
||||
func (c *Client) newAuthRetryWrapper() retryRpcFunc {
|
||||
return func(rpcCtx context.Context, f rpcFunc) error {
|
||||
for {
|
||||
err := f(rpcCtx)
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// always stop retry on etcd errors other than invalid auth token
|
||||
if rpctypes.Error(err) == rpctypes.ErrInvalidAuthToken {
|
||||
gterr := c.getToken(rpcCtx)
|
||||
if gterr != nil {
|
||||
return err // return the original error for simplicity
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// RetryKVClient implements a KVClient that uses the client's FailFast retry policy.
|
||||
func RetryKVClient(c *Client) pb.KVClient {
|
||||
return &retryKVClient{pb.NewKVClient(c.conn), c.retryWrapper}
|
||||
retryWrite := &retryWriteKVClient{pb.NewKVClient(c.conn), c.retryWrapper}
|
||||
return &retryKVClient{&retryWriteKVClient{retryWrite, c.retryAuthWrapper}}
|
||||
}
|
||||
|
||||
func (rkv *retryKVClient) Put(ctx context.Context, in *pb.PutRequest, opts ...grpc.CallOption) (resp *pb.PutResponse, err error) {
|
||||
rkv.retryf(ctx, func(rctx context.Context) error {
|
||||
type retryKVClient struct {
|
||||
*retryWriteKVClient
|
||||
}
|
||||
|
||||
func (rkv *retryKVClient) Range(ctx context.Context, in *pb.RangeRequest, opts ...grpc.CallOption) (resp *pb.RangeResponse, err error) {
|
||||
err = rkv.retryf(ctx, func(rctx context.Context) error {
|
||||
resp, err = rkv.retryWriteKVClient.Range(rctx, in, opts...)
|
||||
return err
|
||||
})
|
||||
return resp, err
|
||||
}
|
||||
|
||||
type retryWriteKVClient struct {
|
||||
pb.KVClient
|
||||
retryf retryRpcFunc
|
||||
}
|
||||
|
||||
func (rkv *retryWriteKVClient) Put(ctx context.Context, in *pb.PutRequest, opts ...grpc.CallOption) (resp *pb.PutResponse, err error) {
|
||||
err = rkv.retryf(ctx, func(rctx context.Context) error {
|
||||
resp, err = rkv.KVClient.Put(rctx, in, opts...)
|
||||
return err
|
||||
})
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (rkv *retryKVClient) DeleteRange(ctx context.Context, in *pb.DeleteRangeRequest, opts ...grpc.CallOption) (resp *pb.DeleteRangeResponse, err error) {
|
||||
rkv.retryf(ctx, func(rctx context.Context) error {
|
||||
func (rkv *retryWriteKVClient) DeleteRange(ctx context.Context, in *pb.DeleteRangeRequest, opts ...grpc.CallOption) (resp *pb.DeleteRangeResponse, err error) {
|
||||
err = rkv.retryf(ctx, func(rctx context.Context) error {
|
||||
resp, err = rkv.KVClient.DeleteRange(rctx, in, opts...)
|
||||
return err
|
||||
})
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (rkv *retryKVClient) Txn(ctx context.Context, in *pb.TxnRequest, opts ...grpc.CallOption) (resp *pb.TxnResponse, err error) {
|
||||
rkv.retryf(ctx, func(rctx context.Context) error {
|
||||
func (rkv *retryWriteKVClient) Txn(ctx context.Context, in *pb.TxnRequest, opts ...grpc.CallOption) (resp *pb.TxnResponse, err error) {
|
||||
err = rkv.retryf(ctx, func(rctx context.Context) error {
|
||||
resp, err = rkv.KVClient.Txn(rctx, in, opts...)
|
||||
return err
|
||||
})
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (rkv *retryKVClient) Compact(ctx context.Context, in *pb.CompactionRequest, opts ...grpc.CallOption) (resp *pb.CompactionResponse, err error) {
|
||||
rkv.retryf(ctx, func(rctx context.Context) error {
|
||||
func (rkv *retryWriteKVClient) Compact(ctx context.Context, in *pb.CompactionRequest, opts ...grpc.CallOption) (resp *pb.CompactionResponse, err error) {
|
||||
err = rkv.retryf(ctx, func(rctx context.Context) error {
|
||||
resp, err = rkv.KVClient.Compact(rctx, in, opts...)
|
||||
return err
|
||||
})
|
||||
@ -100,11 +139,12 @@ type retryLeaseClient struct {
|
||||
|
||||
// RetryLeaseClient implements a LeaseClient that uses the client's FailFast retry policy.
|
||||
func RetryLeaseClient(c *Client) pb.LeaseClient {
|
||||
return &retryLeaseClient{pb.NewLeaseClient(c.conn), c.retryWrapper}
|
||||
retry := &retryLeaseClient{pb.NewLeaseClient(c.conn), c.retryWrapper}
|
||||
return &retryLeaseClient{retry, c.retryAuthWrapper}
|
||||
}
|
||||
|
||||
func (rlc *retryLeaseClient) LeaseGrant(ctx context.Context, in *pb.LeaseGrantRequest, opts ...grpc.CallOption) (resp *pb.LeaseGrantResponse, err error) {
|
||||
rlc.retryf(ctx, func(rctx context.Context) error {
|
||||
err = rlc.retryf(ctx, func(rctx context.Context) error {
|
||||
resp, err = rlc.LeaseClient.LeaseGrant(rctx, in, opts...)
|
||||
return err
|
||||
})
|
||||
@ -113,7 +153,7 @@ func (rlc *retryLeaseClient) LeaseGrant(ctx context.Context, in *pb.LeaseGrantRe
|
||||
}
|
||||
|
||||
func (rlc *retryLeaseClient) LeaseRevoke(ctx context.Context, in *pb.LeaseRevokeRequest, opts ...grpc.CallOption) (resp *pb.LeaseRevokeResponse, err error) {
|
||||
rlc.retryf(ctx, func(rctx context.Context) error {
|
||||
err = rlc.retryf(ctx, func(rctx context.Context) error {
|
||||
resp, err = rlc.LeaseClient.LeaseRevoke(rctx, in, opts...)
|
||||
return err
|
||||
})
|
||||
@ -131,7 +171,7 @@ func RetryClusterClient(c *Client) pb.ClusterClient {
|
||||
}
|
||||
|
||||
func (rcc *retryClusterClient) MemberAdd(ctx context.Context, in *pb.MemberAddRequest, opts ...grpc.CallOption) (resp *pb.MemberAddResponse, err error) {
|
||||
rcc.retryf(ctx, func(rctx context.Context) error {
|
||||
err = rcc.retryf(ctx, func(rctx context.Context) error {
|
||||
resp, err = rcc.ClusterClient.MemberAdd(rctx, in, opts...)
|
||||
return err
|
||||
})
|
||||
@ -139,7 +179,7 @@ func (rcc *retryClusterClient) MemberAdd(ctx context.Context, in *pb.MemberAddRe
|
||||
}
|
||||
|
||||
func (rcc *retryClusterClient) MemberRemove(ctx context.Context, in *pb.MemberRemoveRequest, opts ...grpc.CallOption) (resp *pb.MemberRemoveResponse, err error) {
|
||||
rcc.retryf(ctx, func(rctx context.Context) error {
|
||||
err = rcc.retryf(ctx, func(rctx context.Context) error {
|
||||
resp, err = rcc.ClusterClient.MemberRemove(rctx, in, opts...)
|
||||
return err
|
||||
})
|
||||
@ -147,7 +187,7 @@ func (rcc *retryClusterClient) MemberRemove(ctx context.Context, in *pb.MemberRe
|
||||
}
|
||||
|
||||
func (rcc *retryClusterClient) MemberUpdate(ctx context.Context, in *pb.MemberUpdateRequest, opts ...grpc.CallOption) (resp *pb.MemberUpdateResponse, err error) {
|
||||
rcc.retryf(ctx, func(rctx context.Context) error {
|
||||
err = rcc.retryf(ctx, func(rctx context.Context) error {
|
||||
resp, err = rcc.ClusterClient.MemberUpdate(rctx, in, opts...)
|
||||
return err
|
||||
})
|
||||
@ -165,7 +205,7 @@ func RetryAuthClient(c *Client) pb.AuthClient {
|
||||
}
|
||||
|
||||
func (rac *retryAuthClient) AuthEnable(ctx context.Context, in *pb.AuthEnableRequest, opts ...grpc.CallOption) (resp *pb.AuthEnableResponse, err error) {
|
||||
rac.retryf(ctx, func(rctx context.Context) error {
|
||||
err = rac.retryf(ctx, func(rctx context.Context) error {
|
||||
resp, err = rac.AuthClient.AuthEnable(rctx, in, opts...)
|
||||
return err
|
||||
})
|
||||
@ -173,7 +213,7 @@ func (rac *retryAuthClient) AuthEnable(ctx context.Context, in *pb.AuthEnableReq
|
||||
}
|
||||
|
||||
func (rac *retryAuthClient) AuthDisable(ctx context.Context, in *pb.AuthDisableRequest, opts ...grpc.CallOption) (resp *pb.AuthDisableResponse, err error) {
|
||||
rac.retryf(ctx, func(rctx context.Context) error {
|
||||
err = rac.retryf(ctx, func(rctx context.Context) error {
|
||||
resp, err = rac.AuthClient.AuthDisable(rctx, in, opts...)
|
||||
return err
|
||||
})
|
||||
@ -181,7 +221,7 @@ func (rac *retryAuthClient) AuthDisable(ctx context.Context, in *pb.AuthDisableR
|
||||
}
|
||||
|
||||
func (rac *retryAuthClient) UserAdd(ctx context.Context, in *pb.AuthUserAddRequest, opts ...grpc.CallOption) (resp *pb.AuthUserAddResponse, err error) {
|
||||
rac.retryf(ctx, func(rctx context.Context) error {
|
||||
err = rac.retryf(ctx, func(rctx context.Context) error {
|
||||
resp, err = rac.AuthClient.UserAdd(rctx, in, opts...)
|
||||
return err
|
||||
})
|
||||
@ -189,7 +229,7 @@ func (rac *retryAuthClient) UserAdd(ctx context.Context, in *pb.AuthUserAddReque
|
||||
}
|
||||
|
||||
func (rac *retryAuthClient) UserDelete(ctx context.Context, in *pb.AuthUserDeleteRequest, opts ...grpc.CallOption) (resp *pb.AuthUserDeleteResponse, err error) {
|
||||
rac.retryf(ctx, func(rctx context.Context) error {
|
||||
err = rac.retryf(ctx, func(rctx context.Context) error {
|
||||
resp, err = rac.AuthClient.UserDelete(rctx, in, opts...)
|
||||
return err
|
||||
})
|
||||
@ -197,7 +237,7 @@ func (rac *retryAuthClient) UserDelete(ctx context.Context, in *pb.AuthUserDelet
|
||||
}
|
||||
|
||||
func (rac *retryAuthClient) UserChangePassword(ctx context.Context, in *pb.AuthUserChangePasswordRequest, opts ...grpc.CallOption) (resp *pb.AuthUserChangePasswordResponse, err error) {
|
||||
rac.retryf(ctx, func(rctx context.Context) error {
|
||||
err = rac.retryf(ctx, func(rctx context.Context) error {
|
||||
resp, err = rac.AuthClient.UserChangePassword(rctx, in, opts...)
|
||||
return err
|
||||
})
|
||||
@ -205,7 +245,7 @@ func (rac *retryAuthClient) UserChangePassword(ctx context.Context, in *pb.AuthU
|
||||
}
|
||||
|
||||
func (rac *retryAuthClient) UserGrantRole(ctx context.Context, in *pb.AuthUserGrantRoleRequest, opts ...grpc.CallOption) (resp *pb.AuthUserGrantRoleResponse, err error) {
|
||||
rac.retryf(ctx, func(rctx context.Context) error {
|
||||
err = rac.retryf(ctx, func(rctx context.Context) error {
|
||||
resp, err = rac.AuthClient.UserGrantRole(rctx, in, opts...)
|
||||
return err
|
||||
})
|
||||
@ -213,7 +253,7 @@ func (rac *retryAuthClient) UserGrantRole(ctx context.Context, in *pb.AuthUserGr
|
||||
}
|
||||
|
||||
func (rac *retryAuthClient) UserRevokeRole(ctx context.Context, in *pb.AuthUserRevokeRoleRequest, opts ...grpc.CallOption) (resp *pb.AuthUserRevokeRoleResponse, err error) {
|
||||
rac.retryf(ctx, func(rctx context.Context) error {
|
||||
err = rac.retryf(ctx, func(rctx context.Context) error {
|
||||
resp, err = rac.AuthClient.UserRevokeRole(rctx, in, opts...)
|
||||
return err
|
||||
})
|
||||
@ -221,7 +261,7 @@ func (rac *retryAuthClient) UserRevokeRole(ctx context.Context, in *pb.AuthUserR
|
||||
}
|
||||
|
||||
func (rac *retryAuthClient) RoleAdd(ctx context.Context, in *pb.AuthRoleAddRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleAddResponse, err error) {
|
||||
rac.retryf(ctx, func(rctx context.Context) error {
|
||||
err = rac.retryf(ctx, func(rctx context.Context) error {
|
||||
resp, err = rac.AuthClient.RoleAdd(rctx, in, opts...)
|
||||
return err
|
||||
})
|
||||
@ -229,7 +269,7 @@ func (rac *retryAuthClient) RoleAdd(ctx context.Context, in *pb.AuthRoleAddReque
|
||||
}
|
||||
|
||||
func (rac *retryAuthClient) RoleDelete(ctx context.Context, in *pb.AuthRoleDeleteRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleDeleteResponse, err error) {
|
||||
rac.retryf(ctx, func(rctx context.Context) error {
|
||||
err = rac.retryf(ctx, func(rctx context.Context) error {
|
||||
resp, err = rac.AuthClient.RoleDelete(rctx, in, opts...)
|
||||
return err
|
||||
})
|
||||
@ -237,7 +277,7 @@ func (rac *retryAuthClient) RoleDelete(ctx context.Context, in *pb.AuthRoleDelet
|
||||
}
|
||||
|
||||
func (rac *retryAuthClient) RoleGrantPermission(ctx context.Context, in *pb.AuthRoleGrantPermissionRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleGrantPermissionResponse, err error) {
|
||||
rac.retryf(ctx, func(rctx context.Context) error {
|
||||
err = rac.retryf(ctx, func(rctx context.Context) error {
|
||||
resp, err = rac.AuthClient.RoleGrantPermission(rctx, in, opts...)
|
||||
return err
|
||||
})
|
||||
@ -245,7 +285,7 @@ func (rac *retryAuthClient) RoleGrantPermission(ctx context.Context, in *pb.Auth
|
||||
}
|
||||
|
||||
func (rac *retryAuthClient) RoleRevokePermission(ctx context.Context, in *pb.AuthRoleRevokePermissionRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleRevokePermissionResponse, err error) {
|
||||
rac.retryf(ctx, func(rctx context.Context) error {
|
||||
err = rac.retryf(ctx, func(rctx context.Context) error {
|
||||
resp, err = rac.AuthClient.RoleRevokePermission(rctx, in, opts...)
|
||||
return err
|
||||
})
|
||||
|
@ -126,14 +126,14 @@ type watchGrpcStream struct {
|
||||
reqc chan *watchRequest
|
||||
// respc receives data from the watch client
|
||||
respc chan *pb.WatchResponse
|
||||
// stopc is sent to the main goroutine to stop all processing
|
||||
stopc chan struct{}
|
||||
// donec closes to broadcast shutdown
|
||||
donec chan struct{}
|
||||
// errc transmits errors from grpc Recv to the watch stream reconn logic
|
||||
errc chan error
|
||||
// closingc gets the watcherStream of closing watchers
|
||||
closingc chan *watcherStream
|
||||
// wg is Done when all substream goroutines have exited
|
||||
wg sync.WaitGroup
|
||||
|
||||
// resumec closes to signal that all substreams should begin resuming
|
||||
resumec chan struct{}
|
||||
@ -213,7 +213,6 @@ func (w *watcher) newWatcherGrpcStream(inctx context.Context) *watchGrpcStream {
|
||||
|
||||
respc: make(chan *pb.WatchResponse),
|
||||
reqc: make(chan *watchRequest),
|
||||
stopc: make(chan struct{}),
|
||||
donec: make(chan struct{}),
|
||||
errc: make(chan error, 1),
|
||||
closingc: make(chan *watcherStream),
|
||||
@ -319,7 +318,7 @@ func (w *watcher) Close() (err error) {
|
||||
}
|
||||
|
||||
func (w *watchGrpcStream) Close() (err error) {
|
||||
close(w.stopc)
|
||||
w.cancel()
|
||||
<-w.donec
|
||||
select {
|
||||
case err = <-w.errc:
|
||||
@ -366,7 +365,7 @@ func (w *watchGrpcStream) closeSubstream(ws *watcherStream) {
|
||||
// close subscriber's channel
|
||||
if closeErr := w.closeErr; closeErr != nil && ws.initReq.ctx.Err() == nil {
|
||||
go w.sendCloseSubstream(ws, &WatchResponse{closeErr: w.closeErr})
|
||||
} else {
|
||||
} else if ws.outc != nil {
|
||||
close(ws.outc)
|
||||
}
|
||||
if ws.id != -1 {
|
||||
@ -396,18 +395,20 @@ func (w *watchGrpcStream) run() {
|
||||
for _, ws := range w.substreams {
|
||||
if _, ok := closing[ws]; !ok {
|
||||
close(ws.recvc)
|
||||
closing[ws] = struct{}{}
|
||||
}
|
||||
}
|
||||
for _, ws := range w.resuming {
|
||||
if _, ok := closing[ws]; ws != nil && !ok {
|
||||
close(ws.recvc)
|
||||
closing[ws] = struct{}{}
|
||||
}
|
||||
}
|
||||
w.joinSubstreams()
|
||||
for toClose := len(w.substreams) + len(w.resuming); toClose > 0; toClose-- {
|
||||
for range closing {
|
||||
w.closeSubstream(<-w.closingc)
|
||||
}
|
||||
|
||||
w.wg.Wait()
|
||||
w.owner.closeStream(w)
|
||||
}()
|
||||
|
||||
@ -432,6 +433,7 @@ func (w *watchGrpcStream) run() {
|
||||
}
|
||||
|
||||
ws.donec = make(chan struct{})
|
||||
w.wg.Add(1)
|
||||
go w.serveSubstream(ws, w.resumec)
|
||||
|
||||
// queue up for watcher creation/resume
|
||||
@ -491,7 +493,7 @@ func (w *watchGrpcStream) run() {
|
||||
wc.Send(ws.initReq.toPB())
|
||||
}
|
||||
cancelSet = make(map[int64]struct{})
|
||||
case <-w.stopc:
|
||||
case <-w.ctx.Done():
|
||||
return
|
||||
case ws := <-w.closingc:
|
||||
w.closeSubstream(ws)
|
||||
@ -577,6 +579,7 @@ func (w *watchGrpcStream) serveSubstream(ws *watcherStream, resumec chan struct{
|
||||
if !resuming {
|
||||
w.closingc <- ws
|
||||
}
|
||||
w.wg.Done()
|
||||
}()
|
||||
|
||||
emptyWr := &WatchResponse{}
|
||||
@ -584,17 +587,6 @@ func (w *watchGrpcStream) serveSubstream(ws *watcherStream, resumec chan struct{
|
||||
curWr := emptyWr
|
||||
outc := ws.outc
|
||||
|
||||
if len(ws.buf) > 0 && ws.buf[0].Created {
|
||||
select {
|
||||
case ws.initReq.retc <- ws.outc:
|
||||
// send first creation event and only if requested
|
||||
if !ws.initReq.createdNotify {
|
||||
ws.buf = ws.buf[1:]
|
||||
}
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
if len(ws.buf) > 0 {
|
||||
curWr = ws.buf[0]
|
||||
} else {
|
||||
@ -612,13 +604,51 @@ func (w *watchGrpcStream) serveSubstream(ws *watcherStream, resumec chan struct{
|
||||
// shutdown from closeSubstream
|
||||
return
|
||||
}
|
||||
// TODO pause channel if buffer gets too large
|
||||
ws.buf = append(ws.buf, wr)
|
||||
nextRev = wr.Header.Revision
|
||||
|
||||
if wr.Created {
|
||||
if ws.initReq.retc != nil {
|
||||
ws.initReq.retc <- ws.outc
|
||||
// to prevent next write from taking the slot in buffered channel
|
||||
// and posting duplicate create events
|
||||
ws.initReq.retc = nil
|
||||
|
||||
// send first creation event only if requested
|
||||
if ws.initReq.createdNotify {
|
||||
ws.outc <- *wr
|
||||
}
|
||||
// once the watch channel is returned, a current revision
|
||||
// watch must resume at the store revision. This is necessary
|
||||
// for the following case to work as expected:
|
||||
// wch := m1.Watch("a")
|
||||
// m2.Put("a", "b")
|
||||
// <-wch
|
||||
// If the revision is only bound on the first observed event,
|
||||
// if wch is disconnected before the Put is issued, then reconnects
|
||||
// after it is committed, it'll miss the Put.
|
||||
if ws.initReq.rev == 0 {
|
||||
nextRev = wr.Header.Revision
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// current progress of watch; <= store revision
|
||||
nextRev = wr.Header.Revision
|
||||
}
|
||||
|
||||
if len(wr.Events) > 0 {
|
||||
nextRev = wr.Events[len(wr.Events)-1].Kv.ModRevision + 1
|
||||
}
|
||||
ws.initReq.rev = nextRev
|
||||
|
||||
// created event is already sent above,
|
||||
// watcher should not post duplicate events
|
||||
if wr.Created {
|
||||
continue
|
||||
}
|
||||
|
||||
// TODO pause channel if buffer gets too large
|
||||
ws.buf = append(ws.buf, wr)
|
||||
case <-w.ctx.Done():
|
||||
return
|
||||
case <-ws.initReq.ctx.Done():
|
||||
return
|
||||
case <-resumec:
|
||||
@ -630,34 +660,83 @@ func (w *watchGrpcStream) serveSubstream(ws *watcherStream, resumec chan struct{
|
||||
}
|
||||
|
||||
func (w *watchGrpcStream) newWatchClient() (pb.Watch_WatchClient, error) {
|
||||
// connect to grpc stream
|
||||
// mark all substreams as resuming
|
||||
close(w.resumec)
|
||||
w.resumec = make(chan struct{})
|
||||
w.joinSubstreams()
|
||||
for _, ws := range w.substreams {
|
||||
ws.id = -1
|
||||
w.resuming = append(w.resuming, ws)
|
||||
}
|
||||
// strip out nils, if any
|
||||
var resuming []*watcherStream
|
||||
for _, ws := range w.resuming {
|
||||
if ws != nil {
|
||||
resuming = append(resuming, ws)
|
||||
}
|
||||
}
|
||||
w.resuming = resuming
|
||||
w.substreams = make(map[int64]*watcherStream)
|
||||
|
||||
// connect to grpc stream while accepting watcher cancelation
|
||||
stopc := make(chan struct{})
|
||||
donec := w.waitCancelSubstreams(stopc)
|
||||
wc, err := w.openWatchClient()
|
||||
close(stopc)
|
||||
<-donec
|
||||
|
||||
// serve all non-closing streams, even if there's a client error
|
||||
// so that the teardown path can shutdown the streams as expected.
|
||||
for _, ws := range w.resuming {
|
||||
if ws.closing {
|
||||
continue
|
||||
}
|
||||
ws.donec = make(chan struct{})
|
||||
w.wg.Add(1)
|
||||
go w.serveSubstream(ws, w.resumec)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, v3rpc.Error(err)
|
||||
}
|
||||
// mark all substreams as resuming
|
||||
if len(w.substreams)+len(w.resuming) > 0 {
|
||||
close(w.resumec)
|
||||
w.resumec = make(chan struct{})
|
||||
w.joinSubstreams()
|
||||
for _, ws := range w.substreams {
|
||||
ws.id = -1
|
||||
w.resuming = append(w.resuming, ws)
|
||||
}
|
||||
for _, ws := range w.resuming {
|
||||
if ws == nil || ws.closing {
|
||||
continue
|
||||
}
|
||||
ws.donec = make(chan struct{})
|
||||
go w.serveSubstream(ws, w.resumec)
|
||||
}
|
||||
}
|
||||
w.substreams = make(map[int64]*watcherStream)
|
||||
|
||||
// receive data from new grpc stream
|
||||
go w.serveWatchClient(wc)
|
||||
return wc, nil
|
||||
}
|
||||
|
||||
func (w *watchGrpcStream) waitCancelSubstreams(stopc <-chan struct{}) <-chan struct{} {
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(len(w.resuming))
|
||||
donec := make(chan struct{})
|
||||
for i := range w.resuming {
|
||||
go func(ws *watcherStream) {
|
||||
defer wg.Done()
|
||||
if ws.closing {
|
||||
if ws.initReq.ctx.Err() != nil && ws.outc != nil {
|
||||
close(ws.outc)
|
||||
ws.outc = nil
|
||||
}
|
||||
return
|
||||
}
|
||||
select {
|
||||
case <-ws.initReq.ctx.Done():
|
||||
// closed ws will be removed from resuming
|
||||
ws.closing = true
|
||||
close(ws.outc)
|
||||
ws.outc = nil
|
||||
go func() { w.closingc <- ws }()
|
||||
case <-stopc:
|
||||
}
|
||||
}(w.resuming[i])
|
||||
}
|
||||
go func() {
|
||||
defer close(donec)
|
||||
wg.Wait()
|
||||
}()
|
||||
return donec
|
||||
}
|
||||
|
||||
// joinSubstream waits for all substream goroutines to complete
|
||||
func (w *watchGrpcStream) joinSubstreams() {
|
||||
for _, ws := range w.substreams {
|
||||
@ -674,9 +753,9 @@ func (w *watchGrpcStream) joinSubstreams() {
|
||||
func (w *watchGrpcStream) openWatchClient() (ws pb.Watch_WatchClient, err error) {
|
||||
for {
|
||||
select {
|
||||
case <-w.stopc:
|
||||
case <-w.ctx.Done():
|
||||
if err == nil {
|
||||
return nil, context.Canceled
|
||||
return nil, w.ctx.Err()
|
||||
}
|
||||
return nil, err
|
||||
default:
|
||||
|
27
cmd/vendor/github.com/akrennmair/gopcap/LICENSE
generated
vendored
27
cmd/vendor/github.com/akrennmair/gopcap/LICENSE
generated
vendored
@ -1,27 +0,0 @@
|
||||
Copyright (c) 2009-2011 Andreas Krennmair. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Andreas Krennmair nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user