Compare commits
794 Commits
v3.0.1
...
v3.1.0-alp
Author | SHA1 | Date | |
---|---|---|---|
5c2053109b | |||
8827619f5b | |||
143e2f27fc | |||
d6904ce415 | |||
c6feb695dc | |||
37fa6ac45c | |||
2724c3946e | |||
c658fa62c5 | |||
624eb609fa | |||
1b1e54a281 | |||
9913e0073c | |||
7cd7b5d539 | |||
a12b317552 | |||
bb337c87d0 | |||
fb760b4c53 | |||
d814804fa1 | |||
cd3a7fb833 | |||
64e1a327ee | |||
b3a083d336 | |||
5cfa9e2384 | |||
e77baa3dcb | |||
059f419ac5 | |||
82af0c4a7d | |||
9b1fe45853 | |||
004a5f0dbc | |||
aa7a35798d | |||
5bd251a6fa | |||
c0981a90f7 | |||
c74ac99871 | |||
3730802fef | |||
8eac9fb93d | |||
4211c0b7af | |||
eeca614cd3 | |||
672472f85e | |||
4e2b09a7ca | |||
c350cd7679 | |||
9b91e96510 | |||
9f829fdab7 | |||
c6bfdb909b | |||
afef9cc312 | |||
6f4e3696d2 | |||
c7212b438d | |||
0d35ba9b94 | |||
e6a7f25065 | |||
cfe717e926 | |||
8c492c70ef | |||
56084a7cc8 | |||
fa2e9c2449 | |||
17e7f83212 | |||
b0481ba858 | |||
3df8838501 | |||
af0264d2e6 | |||
ce01fb3cdf | |||
8a63071463 | |||
ef1ef0ba16 | |||
710b14ce56 | |||
840f4d48c8 | |||
bfb9d837d9 | |||
caaa8a48aa | |||
03b9d6f24c | |||
9a67d71e6c | |||
8f47468a40 | |||
a571655983 | |||
0250f0c984 | |||
92f141d670 | |||
d5edb62bd0 | |||
b22b405465 | |||
20fc9dc463 | |||
ccb46d2024 | |||
0b675845f6 | |||
aa6b1e6a10 | |||
b7dc6cc604 | |||
04a4cea630 | |||
4c08f6767c | |||
55ba3d95fb | |||
78cfc8db95 | |||
63b0cd470d | |||
0712ebc9b5 | |||
2e25a772a5 | |||
617d2d5b98 | |||
3132e36bf3 | |||
33b3fdc627 | |||
758f0d9017 | |||
17377f5642 | |||
8b764aac71 | |||
bb3ba1ee1c | |||
28d80ad709 | |||
e9f841627c | |||
4563efd766 | |||
68f2fdc1ff | |||
bd7107bd4b | |||
c449da6ff9 | |||
0cc2f82e7e | |||
1aec483e42 | |||
1defeda792 | |||
0b6350227c | |||
656167d760 | |||
a6c905ad96 | |||
f411583ed1 | |||
534cb0b749 | |||
7b7b29ad1e | |||
5ea6990a73 | |||
ce49fb6ec4 | |||
7e182fa24a | |||
b24527f2f0 | |||
ad318ee891 | |||
af5ab7b351 | |||
7644a8ad76 | |||
2752169d6a | |||
c1948f2940 | |||
da6a0f0594 | |||
96ed856bca | |||
e508ce36ef | |||
0b9c65c82f | |||
fd0539c8cc | |||
d36c0a1444 | |||
bc5d7bbe03 | |||
271df0dd71 | |||
b17b482268 | |||
65fb1ad362 | |||
4a33aa3917 | |||
1ebeef5cbf | |||
1b40fe7709 | |||
da26e230a0 | |||
f36267bf74 | |||
a66b1e7c60 | |||
5c8ba23767 | |||
ec9e77db96 | |||
2e0dc8467d | |||
cccbf302f2 | |||
56cfe40184 | |||
b56ee178d5 | |||
0d07154926 | |||
81bd381048 | |||
805d4cbd93 | |||
eded62e60c | |||
5b14b834c9 | |||
8cd47c4348 | |||
f7293125cf | |||
51b4d6b7a8 | |||
acc270edbf | |||
ed2b3314b8 | |||
e93ee6179c | |||
666e7bd120 | |||
b1740f5fe4 | |||
c59e0aa83e | |||
7b2f769643 | |||
3489fa82fb | |||
d3ecebd14e | |||
26999db927 | |||
9ef0f5ef8a | |||
9e5bccd458 | |||
b982c80c14 | |||
48706a9cd6 | |||
5b60be9626 | |||
d016383740 | |||
44e710f76c | |||
a6d22b96c3 | |||
2d552927e0 | |||
a1598d767b | |||
54ab9a1aba | |||
3aa2d1b40e | |||
c8ad147c0a | |||
e29c79c54c | |||
28277b5a65 | |||
2943bf9086 | |||
48941cea95 | |||
ff7458508f | |||
b9cd329c61 | |||
771ee43169 | |||
5c06fc9093 | |||
2da7b63809 | |||
fb39e96862 | |||
572bfd99ff | |||
82053f04b2 | |||
7873c25abd | |||
e7314a2460 | |||
9e9bbb829e | |||
547bf1a92d | |||
9aee3f01cd | |||
9497e9678c | |||
48f4a7d037 | |||
a7a867c1e6 | |||
f4c30425c0 | |||
452dedf8ab | |||
f6cda8ac0b | |||
396fac416e | |||
db7e38b0ed | |||
69ed560fae | |||
754b9025c4 | |||
1c59708c51 | |||
524a5a1afb | |||
45079ec6c1 | |||
4f150b06e5 | |||
fa79d42b98 | |||
86bf2bc443 | |||
e53b99588a | |||
5e963608b7 | |||
3552420dfd | |||
64ac631863 | |||
f73258a51f | |||
0bf2ef3c1b | |||
a0759298c5 | |||
017aac88a8 | |||
0be190df4d | |||
1437388f77 | |||
c388b2f22f | |||
a50c707050 | |||
3a49cbb769 | |||
af4f82228c | |||
df54ad2208 | |||
267063efd0 | |||
417b9469aa | |||
254c0ea814 | |||
4f5cacc835 | |||
f1ead43482 | |||
58a36cb651 | |||
0d8d9a374c | |||
488ae52a51 | |||
f2b7c501cc | |||
bb110b0a2d | |||
159c8ee6e0 | |||
1c989edb47 | |||
3dc12e33f1 | |||
8e4fcaa6dc | |||
86dcfbf205 | |||
83e66d2962 | |||
c12104bd15 | |||
7f3d4bfae5 | |||
959f860a40 | |||
0c37df7265 | |||
e1789aa531 | |||
028b954052 | |||
49ef47a9a4 | |||
13f79affb6 | |||
aa89bc35fd | |||
722d66b03d | |||
be38c50567 | |||
1d58c7d3b2 | |||
3b92384394 | |||
c39b7205a6 | |||
3d5d3b90e9 | |||
0504b277b6 | |||
4c7bced34e | |||
8c88c1611e | |||
784c4446d9 | |||
262c98f327 | |||
83de13e4a8 | |||
940402a27d | |||
8db4f5b8e1 | |||
146bce3377 | |||
eaa5d9772f | |||
c8bbb8c53e | |||
5e6d2a23b7 | |||
01471481a9 | |||
f4b6ed2469 | |||
da1e022890 | |||
5e9fe0dc23 | |||
5630a76766 | |||
8021487b7a | |||
a8fc4396e2 | |||
9b3b1f80dd | |||
00f5a01378 | |||
cc4f4b47bc | |||
a20d4a2d31 | |||
14f6dd4ded | |||
10c9e238f0 | |||
f9d122066e | |||
57fde954b9 | |||
d0fa390048 | |||
5aa935f3b7 | |||
f2fedbae9b | |||
a5022c1cba | |||
e7a7fb2bb1 | |||
6655afda4b | |||
47b6449934 | |||
30cf8b7f0f | |||
83dd121bae | |||
38c370a7c5 | |||
fb00a32b86 | |||
f91f7dfb91 | |||
3f0f4bfee7 | |||
28b797b538 | |||
cf063ed475 | |||
b499f69181 | |||
e1519cf460 | |||
8d7703528a | |||
3eadf964f4 | |||
ee3797ddff | |||
46765ad79c | |||
462eb511c5 | |||
b125d590cf | |||
b9d01fb98b | |||
a4ef36c8bf | |||
d5d2370fc8 | |||
961b03420e | |||
16b2d9ca5e | |||
449923c98b | |||
7b84456366 | |||
c3f069c9fc | |||
0307382c1a | |||
db834301eb | |||
feaff17259 | |||
2cc245e8bf | |||
29372f9dd2 | |||
ddf65421e7 | |||
b207dd095c | |||
d5900e8b63 | |||
e810dec662 | |||
e8594b60b1 | |||
d23392ed8e | |||
bd450c1ba3 | |||
561c3b918a | |||
9eb6ea34bd | |||
d0d8e49e20 | |||
911c8442b7 | |||
96e018634a | |||
f14fd43548 | |||
0503676bde | |||
ae4b4109b2 | |||
1b5a129bbe | |||
19b35c939a | |||
4d3b281369 | |||
6b671b88dc | |||
d788eb8d92 | |||
a205242ca5 | |||
64a0e34602 | |||
7b11c288fe | |||
1fec4ba127 | |||
817de6d212 | |||
5eff6fb7db | |||
f975fe8068 | |||
0a00328a7c | |||
82a3d90763 | |||
92a0f08722 | |||
67b1c7cce5 | |||
429d5ab20b | |||
c6c6cfb502 | |||
c33ea20fef | |||
965b2901d5 | |||
aa9837e8ff | |||
e742ff331f | |||
6205a9a6cb | |||
de06dc1272 | |||
d3812ed664 | |||
f8ee322b08 | |||
8a32929d29 | |||
937ae658dd | |||
a1ce07a321 | |||
a56cb82180 | |||
e64ef3f261 | |||
f4141f0f51 | |||
d72cee1b0c | |||
1644679d00 | |||
7eb43ea75b | |||
f5549cba2a | |||
de864d3b58 | |||
2bb1f9c8a4 | |||
eb97aba581 | |||
6de993b468 | |||
06e2338108 | |||
d219e96359 | |||
b6f5b6b1c9 | |||
2b5a5c77cf | |||
a5e4fbd335 | |||
2ca87f6c03 | |||
81f5e31ed2 | |||
2d3eda4afa | |||
1c83a46c6d | |||
2b996b6038 | |||
88a77f30e1 | |||
8c1c291332 | |||
5e651a0d0d | |||
c3c41234f1 | |||
c7e4198742 | |||
8f3a11c73c | |||
f58a119b44 | |||
adbd936f22 | |||
39f39c185e | |||
918af500c3 | |||
311c19e494 | |||
5f0c122496 | |||
bb28c9ab00 | |||
c6cf015e26 | |||
fb7c4da361 | |||
978ae9de29 | |||
7678b84f2c | |||
619a40b22b | |||
f6a1585902 | |||
107a07563f | |||
69204397ee | |||
f505bcb91a | |||
f1f31f1015 | |||
c71f0ea174 | |||
9063ce5e3f | |||
9764652356 | |||
854a215329 | |||
4a7fabd219 | |||
6c3efde51b | |||
d69d438289 | |||
7ed8a133d2 | |||
c38f0290a7 | |||
c46955b60a | |||
e2a956c0c4 | |||
bd62b0a646 | |||
ddddecc3ab | |||
75c06cacae | |||
4d59b6f52c | |||
fd757756f5 | |||
29a077bdbe | |||
41dee84733 | |||
eb36d0dbba | |||
a752338d45 | |||
d1809830bb | |||
ab4ac828f3 | |||
e218834b58 | |||
cd781bf30c | |||
6e7baab32c | |||
cabd28516c | |||
c8cc87c3f5 | |||
bc9882f521 | |||
57c68ab1db | |||
c30a436829 | |||
33c3583b50 | |||
76e62c39b0 | |||
bf71497537 | |||
c0a8da7fd0 | |||
4db07dbc93 | |||
755eee0d30 | |||
b23045e34d | |||
fc4b30a1e0 | |||
9836990aa7 | |||
87498e0209 | |||
59ac42ff38 | |||
911dcc9386 | |||
a2715e3bda | |||
9311d7b77e | |||
5a83f05e96 | |||
a60387bab2 | |||
564bf8d17e | |||
4d309f0cb7 | |||
06da46c4ee | |||
b43722dd48 | |||
8d12017fe2 | |||
992f628e6e | |||
e2088b8073 | |||
86de0797e1 | |||
72eb2d8893 | |||
4c9a2a65c9 | |||
943fe70178 | |||
79d25a6884 | |||
3d8e4ace47 | |||
76a99fa1c3 | |||
1153350a95 | |||
cfe09d34b8 | |||
205f10aeb6 | |||
6136b26f38 | |||
273c6f6ba9 | |||
de99dfb134 | |||
982e18d80b | |||
6e95ce26fb | |||
13c2d32061 | |||
a75688bd17 | |||
3c3b33b00f | |||
0090573749 | |||
de2c3ec3db | |||
640d511684 | |||
914e9266cb | |||
0d6c028aa2 | |||
484f579905 | |||
864947a825 | |||
d6b22323a8 | |||
6079be7dae | |||
537057bd11 | |||
42fc36b4d6 | |||
7f0f9795bf | |||
2b4c37f54a | |||
418bb5e176 | |||
1cad722a6d | |||
ac96963003 | |||
4fa9363aca | |||
020a24f1c3 | |||
38b69a9301 | |||
fffa484a9f | |||
b4ce427d45 | |||
116a1b5855 | |||
abbefc9e25 | |||
5b288f6cd1 | |||
4ff6c72257 | |||
8f4a36fd32 | |||
ec5c5d9ddf | |||
c603b5e6a1 | |||
2bf55e3a15 | |||
fee9e2b183 | |||
de638a5e4d | |||
214c1e55b0 | |||
32553c5796 | |||
624187d25f | |||
00c9fe4753 | |||
f18d5433cc | |||
42db8f55b2 | |||
e001848270 | |||
5066981cc7 | |||
25aeeb35c3 | |||
68ece954fb | |||
be001c44e8 | |||
9510bd6036 | |||
0f0d32b073 | |||
ff5709bb41 | |||
ab17165352 | |||
768ccb8c10 | |||
becbd9f3d6 | |||
7b3d502b96 | |||
17e0164f57 | |||
54df540c2c | |||
15aa64eb3c | |||
65d7e7963a | |||
8c8742f43c | |||
a289bf58e6 | |||
299ebc6137 | |||
a7b098b26d | |||
82ddeb38b4 | |||
aba478fb8a | |||
edcfcae332 | |||
ef6b74411c | |||
8abae076d1 | |||
6e290abee2 | |||
99e0655c2f | |||
80c2e4098d | |||
1c5754f02d | |||
e5f0cdcc69 | |||
783675f91c | |||
d3d954d659 | |||
e177d9eda2 | |||
1bf78476cf | |||
c7c5cd324b | |||
fcc96c9ebd | |||
d914502090 | |||
27a30768e1 | |||
a1d823c2aa | |||
a61862acc7 | |||
5cccb49498 | |||
5271cf0160 | |||
8d897fd51f | |||
e177f391f2 | |||
32ed0aa0b3 | |||
969bcd282b | |||
7fbc1e39a6 | |||
7bfe75cbf3 | |||
3a5e418ff9 | |||
cae56f583e | |||
e1892e264d | |||
851d69181d | |||
b86e723107 | |||
c920ce0453 | |||
fd24340903 | |||
58aa3483c3 | |||
6dbdf6e55f | |||
3f74e9db0d | |||
b61f882635 | |||
1c8b30dbdb | |||
dc80ae86d9 | |||
8893ab0198 | |||
984badeb03 | |||
50be793f09 | |||
e7c1594c82 | |||
6e53f75092 | |||
cab2e45319 | |||
336e4f2f28 | |||
d9e939d5d1 | |||
52764f1e5a | |||
bdfbd26e94 | |||
2d761d64a4 | |||
884452c403 | |||
cb9ee7320b | |||
331ec82400 | |||
4a5795b55f | |||
04155423f5 | |||
4835322aa1 | |||
b26f1bb2b6 | |||
93e3112471 | |||
3839a55910 | |||
34602b87ec | |||
5f3aa43899 | |||
ecebe7b979 | |||
5b92e17e86 | |||
4a7b730e69 | |||
4ec94989cf | |||
b2b98399fb | |||
1ba7bb237f | |||
38d38f2635 | |||
1dfafd8fe0 | |||
b50d2395fd | |||
0419d3ecf7 | |||
3e21d9f023 | |||
bf0be0fe5e | |||
b3f8490660 | |||
d8f0ef0e80 | |||
d9a8a326df | |||
07ed4da2ff | |||
51c5c307fa | |||
ee78f590ba | |||
575682f593 | |||
14d7dc940d | |||
ba2725c2d0 | |||
ceb9fe4822 | |||
b0f2e5e64a | |||
8e59fb749c | |||
c0cc161ba8 | |||
27b03f0ed5 | |||
35d379b052 | |||
2f7da66d43 | |||
6b487fb199 | |||
3d109be3b4 | |||
071eac3838 | |||
c7881fddc2 | |||
9bcf5a83fb | |||
c32dd164fe | |||
8368e6a992 | |||
97ff1abb3e | |||
439b96f090 | |||
06fd46f835 | |||
41a98dbd66 | |||
f6ef6157cc | |||
c0299ca6f4 | |||
f4f33ea767 | |||
81d5ae3ce1 | |||
7114a27345 | |||
8273e1c07e | |||
a243064e76 | |||
6392ef5c44 | |||
7432e9fbe9 | |||
b9f6de9277 | |||
b2c1112288 | |||
c36a40ca15 | |||
eb08f2274e | |||
cc26f2c889 | |||
4bc29e2b9c | |||
8a21be721f | |||
7edb6bcbe1 | |||
6f3a40cb53 | |||
ea0a569c4d | |||
f65e75e4b3 | |||
c0f292e6b8 | |||
55ca788efe | |||
2b6f04a58e | |||
a3347e3e68 | |||
5b0d52f8c3 | |||
e8e561e8f5 | |||
e5b5cf02d3 | |||
0d9b6ba0ab | |||
da44e17b58 | |||
c396b6aaaa | |||
c47689d98f | |||
474eb1b44b | |||
f78d4713ea | |||
90889ebc0f | |||
df94f58462 | |||
eded9f5f84 | |||
a153448b84 | |||
abb20ec51f | |||
7c39f41e7c | |||
b970e03e19 | |||
ce8900e3b4 | |||
e6d15b966c | |||
6f0a67603a | |||
a2760c9f49 | |||
c30f89f1d0 | |||
946b3cce1d | |||
4f2da16d82 | |||
427496ebb8 | |||
dc2dced129 | |||
b6a497214e | |||
0b0cbaac09 | |||
d4e0e419dc | |||
16b0c1d1e1 | |||
88a9cf2cea | |||
c4a280e511 | |||
244b1d7d20 | |||
1c9e0a0e33 | |||
4db8f018cb | |||
3a080143a7 | |||
3451623c71 | |||
8c4df9a96f | |||
234c30c061 | |||
7ec822107a | |||
12bf1a3382 | |||
a78cdeae81 | |||
929d6ab62c | |||
c853704ac9 | |||
c642430fae | |||
066afd6abd | |||
f19cef960e | |||
beab76c7a9 | |||
ff5ddd0909 | |||
660f0fcc3d | |||
8c71eb71df | |||
c52bf1ac5d | |||
9e0de02fde | |||
c7dd74d8d3 | |||
881a120453 | |||
b566ca225c | |||
8d99a666f9 | |||
c76dcc5190 | |||
df61322e5b | |||
7cb61af245 | |||
70bf768005 | |||
8a8a8253fa | |||
9b5e99efe0 | |||
13a4056327 | |||
5991209c2d | |||
7cc4596ebd | |||
9405583745 | |||
1af7c400d1 | |||
a5f043c85b | |||
c6a3048e81 | |||
ba023e539a | |||
c8c5f41a01 | |||
8d4701bb1d | |||
40c4a7894d | |||
ab6f49dc67 | |||
a53f538f27 | |||
d163aefc1a | |||
bf0ab6a2df | |||
c7a0830a62 | |||
b3464a918b | |||
b7f5f8fc99 | |||
581f847e06 | |||
0d44947c11 | |||
78b143b800 | |||
a2f6ec3128 | |||
c68d60c99f | |||
4cd834910e | |||
cb1a1426b1 | |||
04a9141e45 | |||
548360b140 | |||
8ce7481a7f | |||
74d75a96eb | |||
0938c861f0 | |||
8c96d2573f | |||
ad556b7e7d | |||
ea0eab84a4 | |||
5f4d1c8891 | |||
dc49016987 | |||
0e137e21bc | |||
9b47ca5972 | |||
3b80df7f4e | |||
b7d0497c47 | |||
150321f5ac | |||
2cc2372165 | |||
6d8c647db8 | |||
5f459a64ce | |||
402df5bd03 | |||
63f78bf7c8 | |||
66d195ff75 | |||
ff908b4ba8 | |||
dc218fb41d | |||
fd5bc21522 | |||
7f3b2e23a4 | |||
e020b2a228 | |||
8e9097d0c0 | |||
3b91648070 | |||
a4667cb863 | |||
2e2f405b1e | |||
f28a87d835 | |||
83d9ce3d7c | |||
8f8ff4d519 | |||
745e1e2cf9 | |||
15f2fd0726 | |||
df31eab136 | |||
66107b8653 | |||
8e825de35f | |||
8216fdc59f | |||
4f57bb313f | |||
1b2f025414 | |||
1db4ee8c61 | |||
81322b498e | |||
ede0b584b8 | |||
da180e0790 | |||
bc6d7659af | |||
ae057ec508 | |||
dced92f8bd | |||
5f1c763993 | |||
ddffdc3e37 | |||
ec232ec9d8 | |||
38035c8c13 | |||
ef9754910e | |||
1c25aa6c48 | |||
0cd5c658aa | |||
ac68f70843 | |||
0faae33ace | |||
8df37d53d6 | |||
da85108ca2 | |||
7ba352d9ca |
1
.gitignore
vendored
1
.gitignore
vendored
@ -10,3 +10,4 @@
|
|||||||
/hack/insta-discovery/.env
|
/hack/insta-discovery/.env
|
||||||
*.test
|
*.test
|
||||||
tools/functional-tester/docker/bin
|
tools/functional-tester/docker/bin
|
||||||
|
hack/tls-setup/certs
|
||||||
|
30
.travis.yml
30
.travis.yml
@ -4,7 +4,6 @@ go_import_path: github.com/coreos/etcd
|
|||||||
sudo: false
|
sudo: false
|
||||||
|
|
||||||
go:
|
go:
|
||||||
- 1.5
|
|
||||||
- 1.6
|
- 1.6
|
||||||
- tip
|
- tip
|
||||||
|
|
||||||
@ -15,25 +14,19 @@ env:
|
|||||||
- TARGET=amd64
|
- TARGET=amd64
|
||||||
- TARGET=arm64
|
- TARGET=arm64
|
||||||
- TARGET=arm
|
- TARGET=arm
|
||||||
- TARGET=ppc64le
|
- TARGET=386
|
||||||
|
|
||||||
matrix:
|
matrix:
|
||||||
fast_finish: true
|
fast_finish: true
|
||||||
allow_failures:
|
allow_failures:
|
||||||
- go: tip
|
- go: tip
|
||||||
exclude:
|
exclude:
|
||||||
- go: 1.5
|
|
||||||
env: TARGET=arm
|
|
||||||
- go: 1.5
|
|
||||||
env: TARGET=ppc64le
|
|
||||||
- go: 1.6
|
|
||||||
env: TARGET=arm64
|
|
||||||
- go: tip
|
- go: tip
|
||||||
env: TARGET=arm
|
env: TARGET=arm
|
||||||
- go: tip
|
- go: tip
|
||||||
env: TARGET=arm64
|
env: TARGET=arm64
|
||||||
- go: tip
|
- go: tip
|
||||||
env: TARGET=ppc64le
|
env: TARGET=386
|
||||||
|
|
||||||
addons:
|
addons:
|
||||||
apt:
|
apt:
|
||||||
@ -49,12 +42,19 @@ before_install:
|
|||||||
|
|
||||||
# disable godep restore override
|
# disable godep restore override
|
||||||
install:
|
install:
|
||||||
- pushd cmd/ && go get -t -v ./... && popd
|
- pushd cmd/etcd && go get -t -v ./... && popd
|
||||||
|
|
||||||
script:
|
script:
|
||||||
- >
|
- >
|
||||||
if [ "${TARGET}" == "amd64" ]; then
|
case "${TARGET}" in
|
||||||
GOARCH="${TARGET}" ./test;
|
amd64)
|
||||||
else
|
GOARCH=amd64 ./test
|
||||||
GOARCH="${TARGET}" ./build;
|
;;
|
||||||
fi
|
386)
|
||||||
|
GOARCH=386 PASSES="build unit" ./test
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
# test building out of gopath
|
||||||
|
GO_BUILD_FLAGS="-a -v" GOPATH=/bad-go-path GOARCH="${TARGET}" ./build
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
@ -1,8 +1,9 @@
|
|||||||
FROM alpine:latest
|
FROM alpine:latest
|
||||||
|
|
||||||
ADD bin/etcd /usr/local/bin/
|
ADD etcd /usr/local/bin/
|
||||||
ADD bin/etcdctl /usr/local/bin/
|
ADD etcdctl /usr/local/bin/
|
||||||
RUN mkdir -p /var/etcd/
|
RUN mkdir -p /var/etcd/
|
||||||
|
RUN mkdir -p /var/lib/etcd/
|
||||||
|
|
||||||
EXPOSE 2379 2380
|
EXPOSE 2379 2380
|
||||||
|
|
||||||
|
@ -25,13 +25,13 @@ curl -L http://localhost:2379/v3alpha/kv/range \
|
|||||||
|
|
||||||
## Swagger
|
## Swagger
|
||||||
|
|
||||||
Generated [Swapper][swagger] API definitions can be found at [rpc.swagger.json][swagger-doc].
|
Generated [Swagger][swagger] API definitions can be found at [rpc.swagger.json][swagger-doc].
|
||||||
|
|
||||||
[api-ref]: ./api_reference_v3.md
|
[api-ref]: ./api_reference_v3.md
|
||||||
[go-client]: https://github.com/coreos/etcd/tree/master/clientv3
|
[go-client]: https://github.com/coreos/etcd/tree/master/clientv3
|
||||||
[etcdctl]: https://github.com/coreos/etcd/tree/master/etcdctl
|
[etcdctl]: https://github.com/coreos/etcd/tree/master/etcdctl
|
||||||
[grpc]: http://www.grpc.io/
|
[grpc]: http://www.grpc.io/
|
||||||
[grpc-gateway]: https://github.com/gengo/grpc-gateway
|
[grpc-gateway]: https://github.com/grpc-ecosystem/grpc-gateway
|
||||||
[json-mapping]: https://developers.google.com/protocol-buffers/docs/proto3#json
|
[json-mapping]: https://developers.google.com/protocol-buffers/docs/proto3#json
|
||||||
[swagger]: http://swagger.io/
|
[swagger]: http://swagger.io/
|
||||||
[swagger-doc]: apispec/swagger/rpc.swagger.json
|
[swagger-doc]: apispec/swagger/rpc.swagger.json
|
||||||
|
@ -59,6 +59,7 @@ for grpc-gateway
|
|||||||
| LeaseGrant | LeaseGrantRequest | LeaseGrantResponse | LeaseGrant creates a lease which expires if the server does not receive a keepAlive within a given time to live period. All keys attached to the lease will be expired and deleted if the lease expires. Each expired key generates a delete event in the event history. |
|
| LeaseGrant | LeaseGrantRequest | LeaseGrantResponse | LeaseGrant creates a lease which expires if the server does not receive a keepAlive within a given time to live period. All keys attached to the lease will be expired and deleted if the lease expires. Each expired key generates a delete event in the event history. |
|
||||||
| LeaseRevoke | LeaseRevokeRequest | LeaseRevokeResponse | LeaseRevoke revokes a lease. All keys attached to the lease will expire and be deleted. |
|
| LeaseRevoke | LeaseRevokeRequest | LeaseRevokeResponse | LeaseRevoke revokes a lease. All keys attached to the lease will expire and be deleted. |
|
||||||
| LeaseKeepAlive | LeaseKeepAliveRequest | LeaseKeepAliveResponse | LeaseKeepAlive keeps the lease alive by streaming keep alive requests from the client to the server and streaming keep alive responses from the server to the client. |
|
| LeaseKeepAlive | LeaseKeepAliveRequest | LeaseKeepAliveResponse | LeaseKeepAlive keeps the lease alive by streaming keep alive requests from the client to the server and streaming keep alive responses from the server to the client. |
|
||||||
|
| LeaseTimeToLive | LeaseTimeToLiveRequest | LeaseTimeToLiveResponse | LeaseTimeToLive retrieves lease information. |
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
@ -427,6 +428,7 @@ Empty field.
|
|||||||
| ----- | ----------- | ---- |
|
| ----- | ----------- | ---- |
|
||||||
| key | key is the first key to delete in the range. | bytes |
|
| key | key is the first key to delete in the range. | bytes |
|
||||||
| range_end | range_end is the key following the last key to delete for the range [key, range_end). If range_end is not given, the range is defined to contain only the key argument. If range_end is '\0', the range is all keys greater than or equal to the key argument. | bytes |
|
| range_end | range_end is the key following the last key to delete for the range [key, range_end). If range_end is not given, the range is defined to contain only the key argument. If range_end is '\0', the range is all keys greater than or equal to the key argument. | bytes |
|
||||||
|
| prev_kv | If prev_kv is set, etcd gets the previous key-value pairs before deleting it. The previous key-value pairs will be returned in the delte response. | bool |
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
@ -436,6 +438,7 @@ Empty field.
|
|||||||
| ----- | ----------- | ---- |
|
| ----- | ----------- | ---- |
|
||||||
| header | | ResponseHeader |
|
| header | | ResponseHeader |
|
||||||
| deleted | deleted is the number of keys deleted by the delete range request. | int64 |
|
| deleted | deleted is the number of keys deleted by the delete range request. | int64 |
|
||||||
|
| prev_kvs | if prev_kv is set in the request, the previous key-value pairs will be returned. | (slice of) mvccpb.KeyValue |
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
@ -508,6 +511,27 @@ Empty field.
|
|||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
##### message `LeaseTimeToLiveRequest` (etcdserver/etcdserverpb/rpc.proto)
|
||||||
|
|
||||||
|
| Field | Description | Type |
|
||||||
|
| ----- | ----------- | ---- |
|
||||||
|
| ID | ID is the lease ID for the lease. | int64 |
|
||||||
|
| keys | keys is true to query all the keys attached to this lease. | bool |
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
##### message `LeaseTimeToLiveResponse` (etcdserver/etcdserverpb/rpc.proto)
|
||||||
|
|
||||||
|
| Field | Description | Type |
|
||||||
|
| ----- | ----------- | ---- |
|
||||||
|
| header | | ResponseHeader |
|
||||||
|
| ID | ID is the lease ID from the keep alive request. | int64 |
|
||||||
|
| TTL | TTL is the remaining TTL in seconds for the lease; the lease will expire in under TTL+1 seconds. | int64 |
|
||||||
|
| grantedTTL | GrantedTTL is the initial granted time in seconds upon lease creation/renewal. | int64 |
|
||||||
|
| keys | Keys is the list of keys attached to this lease. | (slice of) bytes |
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
##### message `Member` (etcdserver/etcdserverpb/rpc.proto)
|
##### message `Member` (etcdserver/etcdserverpb/rpc.proto)
|
||||||
|
|
||||||
| Field | Description | Type |
|
| Field | Description | Type |
|
||||||
@ -591,6 +615,7 @@ Empty field.
|
|||||||
| key | key is the key, in bytes, to put into the key-value store. | bytes |
|
| key | key is the key, in bytes, to put into the key-value store. | bytes |
|
||||||
| value | value is the value, in bytes, to associate with the key in the key-value store. | bytes |
|
| value | value is the value, in bytes, to associate with the key in the key-value store. | bytes |
|
||||||
| lease | lease is the lease ID to associate with the key in the key-value store. A lease value of 0 indicates no lease. | int64 |
|
| lease | lease is the lease ID to associate with the key in the key-value store. A lease value of 0 indicates no lease. | int64 |
|
||||||
|
| prev_kv | If prev_kv is set, etcd gets the previous key-value pair before changing it. The previous key-value pair will be returned in the put response. | bool |
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
@ -599,6 +624,7 @@ Empty field.
|
|||||||
| Field | Description | Type |
|
| Field | Description | Type |
|
||||||
| ----- | ----------- | ---- |
|
| ----- | ----------- | ---- |
|
||||||
| header | | ResponseHeader |
|
| header | | ResponseHeader |
|
||||||
|
| prev_kv | if prev_kv is set in the request, the previous key-value pair will be returned. | mvccpb.KeyValue |
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
@ -613,6 +639,12 @@ Empty field.
|
|||||||
| sort_order | sort_order is the order for returned sorted results. | SortOrder |
|
| sort_order | sort_order is the order for returned sorted results. | SortOrder |
|
||||||
| sort_target | sort_target is the key-value field to use for sorting. | SortTarget |
|
| sort_target | sort_target is the key-value field to use for sorting. | SortTarget |
|
||||||
| serializable | serializable sets the range request to use serializable member-local reads. Range requests are linearizable by default; linearizable requests have higher latency and lower throughput than serializable requests but reflect the current consensus of the cluster. For better performance, in exchange for possible stale reads, a serializable range request is served locally without needing to reach consensus with other nodes in the cluster. | bool |
|
| serializable | serializable sets the range request to use serializable member-local reads. Range requests are linearizable by default; linearizable requests have higher latency and lower throughput than serializable requests but reflect the current consensus of the cluster. For better performance, in exchange for possible stale reads, a serializable range request is served locally without needing to reach consensus with other nodes in the cluster. | bool |
|
||||||
|
| keys_only | keys_only when set returns only the keys and not the values. | bool |
|
||||||
|
| count_only | count_only when set returns only the count of the keys in the range. | bool |
|
||||||
|
| min_mod_revision | min_mod_revision is the lower bound for returned key mod revisions; all keys with lesser mod revisions will be filtered away. | int64 |
|
||||||
|
| max_mod_revision | max_mod_revision is the upper bound for returned key mod revisions; all keys with greater mod revisions will be filtered away. | int64 |
|
||||||
|
| min_create_revision | min_create_revision is the lower bound for returned key create revisions; all keys with lesser create trevisions will be filtered away. | int64 |
|
||||||
|
| max_create_revision | max_create_revision is the upper bound for returned key create revisions; all keys with greater create revisions will be filtered away. | int64 |
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
@ -621,8 +653,9 @@ Empty field.
|
|||||||
| Field | Description | Type |
|
| Field | Description | Type |
|
||||||
| ----- | ----------- | ---- |
|
| ----- | ----------- | ---- |
|
||||||
| header | | ResponseHeader |
|
| header | | ResponseHeader |
|
||||||
| kvs | kvs is the list of key-value pairs matched by the range request. | (slice of) mvccpb.KeyValue |
|
| kvs | kvs is the list of key-value pairs matched by the range request. kvs is empty when count is requested. | (slice of) mvccpb.KeyValue |
|
||||||
| more | more indicates if there are more keys to return in the requested range. | bool |
|
| more | more indicates if there are more keys to return in the requested range. | bool |
|
||||||
|
| count | count is set to the number of keys within the range when requested. | int64 |
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
@ -732,6 +765,8 @@ From google paxosdb paper: Our implementation hinges around a powerful primitive
|
|||||||
| range_end | range_end is the end of the range [key, range_end) to watch. If range_end is not given, only the key argument is watched. If range_end is equal to '\0', all keys greater than or equal to the key argument are watched. | bytes |
|
| range_end | range_end is the end of the range [key, range_end) to watch. If range_end is not given, only the key argument is watched. If range_end is equal to '\0', all keys greater than or equal to the key argument are watched. | bytes |
|
||||||
| start_revision | start_revision is an optional revision to watch from (inclusive). No start_revision is "now". | int64 |
|
| start_revision | start_revision is an optional revision to watch from (inclusive). No start_revision is "now". | int64 |
|
||||||
| progress_notify | progress_notify is set so that the etcd server will periodically send a WatchResponse with no events to the new watcher if there are no recent events. It is useful when clients wish to recover a disconnected watcher starting from a recent known revision. The etcd server may decide how often it will send notifications based on current load. | bool |
|
| progress_notify | progress_notify is set so that the etcd server will periodically send a WatchResponse with no events to the new watcher if there are no recent events. It is useful when clients wish to recover a disconnected watcher starting from a recent known revision. The etcd server may decide how often it will send notifications based on current load. | bool |
|
||||||
|
| filters | filter out put event. filter out delete event. filters filter the events at server side before it sends back to the watcher. | (slice of) FilterType |
|
||||||
|
| prev_kv | If prev_kv is set, created watcher gets the previous KV before the event happens. If the previous KV is already compacted, nothing will be returned. | bool |
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
@ -764,6 +799,7 @@ From google paxosdb paper: Our implementation hinges around a powerful primitive
|
|||||||
| ----- | ----------- | ---- |
|
| ----- | ----------- | ---- |
|
||||||
| type | type is the kind of event. If type is a PUT, it indicates new data has been stored to the key. If type is a DELETE, it indicates the key was deleted. | EventType |
|
| type | type is the kind of event. If type is a PUT, it indicates new data has been stored to the key. If type is a DELETE, it indicates the key was deleted. | EventType |
|
||||||
| kv | kv holds the KeyValue for the event. A PUT event contains current kv pair. A PUT event with kv.Version=1 indicates the creation of a key. A DELETE/EXPIRE event contains the deleted key with its modification revision set to the revision of deletion. | KeyValue |
|
| kv | kv holds the KeyValue for the event. A PUT event contains current kv pair. A PUT event with kv.Version=1 indicates the creation of a key. A DELETE/EXPIRE event contains the deleted key with its modification revision set to the revision of deletion. | KeyValue |
|
||||||
|
| prev_kv | prev_kv holds the key-value pair before the event happens. | KeyValue |
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
@ -789,6 +825,22 @@ From google paxosdb paper: Our implementation hinges around a powerful primitive
|
|||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
##### message `LeaseInternalRequest` (lease/leasepb/lease.proto)
|
||||||
|
|
||||||
|
| Field | Description | Type |
|
||||||
|
| ----- | ----------- | ---- |
|
||||||
|
| LeaseTimeToLiveRequest | | etcdserverpb.LeaseTimeToLiveRequest |
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
##### message `LeaseInternalResponse` (lease/leasepb/lease.proto)
|
||||||
|
|
||||||
|
| Field | Description | Type |
|
||||||
|
| ----- | ----------- | ---- |
|
||||||
|
| LeaseTimeToLiveResponse | | etcdserverpb.LeaseTimeToLiveResponse |
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
##### message `Permission` (auth/authpb/auth.proto)
|
##### message `Permission` (auth/authpb/auth.proto)
|
||||||
|
|
||||||
Permission is a single entity
|
Permission is a single entity
|
||||||
|
File diff suppressed because it is too large
Load Diff
@ -4,5 +4,5 @@ For the most part, the etcd project is stable, but we are still moving fast! We
|
|||||||
|
|
||||||
## The current experimental API/features are:
|
## The current experimental API/features are:
|
||||||
|
|
||||||
- v3 auth API: expect to be stale in 3.1 release
|
- v3 auth API: expect to be stable in 3.1 release
|
||||||
- etcd gateway: expect to be stable in 3.1 release
|
- etcd gateway: expect to be stable in 3.1 release
|
||||||
|
@ -21,7 +21,7 @@ OK
|
|||||||
|
|
||||||
## Read keys
|
## Read keys
|
||||||
|
|
||||||
Applications can read values of keys from an etcd cluster. Queries may read a single key, or a range of keys.
|
Applications can read values of keys from an etcd cluster. Queries may read a single key, or a range of keys.
|
||||||
|
|
||||||
Suppose the etcd cluster has stored the following keys:
|
Suppose the etcd cluster has stored the following keys:
|
||||||
|
|
||||||
@ -61,7 +61,7 @@ Suppose an etcd cluster already has the following keys:
|
|||||||
``` bash
|
``` bash
|
||||||
$ etcdctl put foo bar # revision = 2
|
$ etcdctl put foo bar # revision = 2
|
||||||
$ etcdctl put foo1 bar1 # revision = 3
|
$ etcdctl put foo1 bar1 # revision = 3
|
||||||
$ etcdctl put foo bar_new # revision = 4
|
$ etcdctl put foo bar_new # revision = 4
|
||||||
$ etcdctl put foo1 bar1_new # revision = 5
|
$ etcdctl put foo1 bar1_new # revision = 5
|
||||||
```
|
```
|
||||||
|
|
||||||
@ -118,7 +118,7 @@ Applications can watch on a key or a range of keys to monitor for any updates.
|
|||||||
Here is the command to watch on key `foo`:
|
Here is the command to watch on key `foo`:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
$ etcdctl watch foo
|
$ etcdctl watch foo
|
||||||
# in another terminal: etcdctl put foo bar
|
# in another terminal: etcdctl put foo bar
|
||||||
foo
|
foo
|
||||||
bar
|
bar
|
||||||
@ -145,11 +145,12 @@ Suppose we finished the following sequence of operations:
|
|||||||
``` bash
|
``` bash
|
||||||
etcdctl put foo bar # revision = 2
|
etcdctl put foo bar # revision = 2
|
||||||
etcdctl put foo1 bar1 # revision = 3
|
etcdctl put foo1 bar1 # revision = 3
|
||||||
etcdctl put foo bar_new # revision = 4
|
etcdctl put foo bar_new # revision = 4
|
||||||
etcdctl put foo1 bar1_new # revision = 5
|
etcdctl put foo1 bar1_new # revision = 5
|
||||||
```
|
```
|
||||||
|
|
||||||
Here is an example to watch the historical changes:
|
Here is an example to watch the historical changes:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
# watch for changes on key `foo` since revision 2
|
# watch for changes on key `foo` since revision 2
|
||||||
$ etcdctl watch --rev=2 foo
|
$ etcdctl watch --rev=2 foo
|
||||||
@ -188,7 +189,7 @@ Applications can grant leases for keys from an etcd cluster. When a key is attac
|
|||||||
|
|
||||||
Here is the command to grant a lease:
|
Here is the command to grant a lease:
|
||||||
|
|
||||||
```
|
```bash
|
||||||
# grant a lease with 10 second TTL
|
# grant a lease with 10 second TTL
|
||||||
$ etcdctl lease grant 10
|
$ etcdctl lease grant 10
|
||||||
lease 32695410dcc0ca06 granted with TTL(10s)
|
lease 32695410dcc0ca06 granted with TTL(10s)
|
||||||
@ -204,7 +205,7 @@ Applications revoke leases by lease ID. Revoking a lease deletes all of its atta
|
|||||||
|
|
||||||
Suppose we finished the following sequence of operations:
|
Suppose we finished the following sequence of operations:
|
||||||
|
|
||||||
```
|
```bash
|
||||||
$ etcdctl lease grant 10
|
$ etcdctl lease grant 10
|
||||||
lease 32695410dcc0ca06 granted with TTL(10s)
|
lease 32695410dcc0ca06 granted with TTL(10s)
|
||||||
$ etcdctl put --lease=32695410dcc0ca06 foo bar
|
$ etcdctl put --lease=32695410dcc0ca06 foo bar
|
||||||
@ -213,7 +214,7 @@ OK
|
|||||||
|
|
||||||
Here is the command to revoke the same lease:
|
Here is the command to revoke the same lease:
|
||||||
|
|
||||||
```
|
```bash
|
||||||
$ etcdctl lease revoke 32695410dcc0ca06
|
$ etcdctl lease revoke 32695410dcc0ca06
|
||||||
lease 32695410dcc0ca06 revoked
|
lease 32695410dcc0ca06 revoked
|
||||||
|
|
||||||
@ -227,17 +228,17 @@ Applications can keep a lease alive by refreshing its TTL so it does not expire.
|
|||||||
|
|
||||||
Suppose we finished the following sequence of operations:
|
Suppose we finished the following sequence of operations:
|
||||||
|
|
||||||
```
|
```bash
|
||||||
$ etcdctl lease grant 10
|
$ etcdctl lease grant 10
|
||||||
lease 32695410dcc0ca06 granted with TTL(10s)
|
lease 32695410dcc0ca06 granted with TTL(10s)
|
||||||
```
|
```
|
||||||
|
|
||||||
Here is the command to keep the same lease alive:
|
Here is the command to keep the same lease alive:
|
||||||
|
|
||||||
```
|
```bash
|
||||||
$ etcdctl lease keep-alive 32695410dcc0ca0
|
$ etcdctl lease keep-alive 32695410dcc0ca06
|
||||||
lease 32695410dcc0ca0 keepalived with TTL(100)
|
lease 32695410dcc0ca06 keepalived with TTL(100)
|
||||||
lease 32695410dcc0ca0 keepalived with TTL(100)
|
lease 32695410dcc0ca06 keepalived with TTL(100)
|
||||||
lease 32695410dcc0ca0 keepalived with TTL(100)
|
lease 32695410dcc0ca06 keepalived with TTL(100)
|
||||||
...
|
...
|
||||||
```
|
```
|
||||||
|
@ -28,7 +28,7 @@ bar
|
|||||||
|
|
||||||
## Local multi-member cluster
|
## Local multi-member cluster
|
||||||
|
|
||||||
A Procfile is provided to easily set up a local multi-member cluster. Start a multi-member cluster with a few commands:
|
A `Procfile` at the base of this git repo is provided to easily set up a local multi-member cluster. To start a multi-member cluster go to the root of an etcd source tree and run:
|
||||||
|
|
||||||
```
|
```
|
||||||
# install goreman program to control Profile-based applications.
|
# install goreman program to control Profile-based applications.
|
||||||
|
@ -31,8 +31,8 @@ All releases version numbers follow the format of [semantic versioning 2.0.0](ht
|
|||||||
## Write release note
|
## Write release note
|
||||||
|
|
||||||
- Write introduction for the new release. For example, what major bug we fix, what new features we introduce or what performance improvement we make.
|
- Write introduction for the new release. For example, what major bug we fix, what new features we introduce or what performance improvement we make.
|
||||||
- Write changelog for the last release. ChangeLog should be straightforward and easy to understand for the end-user.
|
|
||||||
- Put `[GH XXXX]` at the head of change line to reference Pull Request that introduces the change. Moreover, add a link on it to jump to the Pull Request.
|
- Put `[GH XXXX]` at the head of change line to reference Pull Request that introduces the change. Moreover, add a link on it to jump to the Pull Request.
|
||||||
|
- Find PRs with `release-note` label and explain them in `NEWS` file, as a straightforward summary of changes for end-users.
|
||||||
|
|
||||||
## Tag version
|
## Tag version
|
||||||
|
|
||||||
@ -47,7 +47,7 @@ All releases version numbers follow the format of [semantic versioning 2.0.0](ht
|
|||||||
|
|
||||||
## Build release binaries and images
|
## Build release binaries and images
|
||||||
|
|
||||||
- Ensure `actool` is available, or installing it through `go get github.com/appc/spec/actool`.
|
- Ensure `acbuild` is available.
|
||||||
- Ensure `docker` is available.
|
- Ensure `docker` is available.
|
||||||
|
|
||||||
Run release script in root directory:
|
Run release script in root directory:
|
||||||
|
@ -11,7 +11,9 @@ The easiest way to get etcd is to use one of the pre-built release binaries whic
|
|||||||
## Build the latest version
|
## Build the latest version
|
||||||
|
|
||||||
For those wanting to try the very latest version, build etcd from the `master` branch.
|
For those wanting to try the very latest version, build etcd from the `master` branch.
|
||||||
[Go](https://golang.org/) version 1.5+ is required to build the latest version of etcd.
|
[Go](https://golang.org/) version 1.6+ (with HTTP2 support) is required to build the latest version of etcd.
|
||||||
|
etcd vendors its dependency for official release binaries, while making vendoring optional to avoid import conflicts.
|
||||||
|
[`build` script][build-script] would automatically include the vendored dependencies from [`cmd`][cmd-directory] directory.
|
||||||
|
|
||||||
Here are the commands to build an etcd binary from the `master` branch:
|
Here are the commands to build an etcd binary from the `master` branch:
|
||||||
|
|
||||||
@ -54,3 +56,6 @@ If OK is printed, then etcd is working!
|
|||||||
|
|
||||||
[github-release]: https://github.com/coreos/etcd/releases/
|
[github-release]: https://github.com/coreos/etcd/releases/
|
||||||
[go]: https://golang.org/doc/install
|
[go]: https://golang.org/doc/install
|
||||||
|
[build-script]: ../build
|
||||||
|
[cmd-directory]: ../cmd
|
||||||
|
|
||||||
|
@ -14,13 +14,15 @@ The easiest way to get started using etcd as a distributed key-value store is to
|
|||||||
- [Interacting with etcd][interacting]
|
- [Interacting with etcd][interacting]
|
||||||
- [API references][api_ref]
|
- [API references][api_ref]
|
||||||
- [gRPC gateway][api_grpc_gateway]
|
- [gRPC gateway][api_grpc_gateway]
|
||||||
|
- [Embedding etcd][embed_etcd]
|
||||||
- [Experimental features and APIs][experimental]
|
- [Experimental features and APIs][experimental]
|
||||||
|
|
||||||
## Operating etcd clusters
|
## Operating etcd clusters
|
||||||
|
|
||||||
Administrators who need to create reliable and scalable key-value stores for the developers they support should begin with a [cluster on multiple machines][clustering].
|
Administrators who need to create reliable and scalable key-value stores for the developers they support should begin with a [cluster on multiple machines][clustering].
|
||||||
|
|
||||||
- [Setting up clusters][clustering]
|
- [Setting up etcd clusters][clustering]
|
||||||
|
- [Setting up etcd gateways][gateway]
|
||||||
- [Run etcd clusters inside containers][container]
|
- [Run etcd clusters inside containers][container]
|
||||||
- [Configuration][conf]
|
- [Configuration][conf]
|
||||||
- [Security][security]
|
- [Security][security]
|
||||||
@ -56,7 +58,9 @@ To learn more about the concepts and internals behind etcd, read the following p
|
|||||||
[data_model]: learning/data_model.md
|
[data_model]: learning/data_model.md
|
||||||
[demo]: demo.md
|
[demo]: demo.md
|
||||||
[download_build]: dl_build.md
|
[download_build]: dl_build.md
|
||||||
|
[embed_etcd]: https://godoc.org/github.com/coreos/etcd/embed
|
||||||
[failures]: op-guide/failures.md
|
[failures]: op-guide/failures.md
|
||||||
|
[gateway]: op-guide/gateway.md
|
||||||
[glossary]: learning/glossary.md
|
[glossary]: learning/glossary.md
|
||||||
[interacting]: dev-guide/interacting_v3.md
|
[interacting]: dev-guide/interacting_v3.md
|
||||||
[local_cluster]: dev-guide/local_cluster.md
|
[local_cluster]: dev-guide/local_cluster.md
|
||||||
|
@ -23,6 +23,7 @@
|
|||||||
|
|
||||||
**Java libraries**
|
**Java libraries**
|
||||||
|
|
||||||
|
- [coreos/jetcd](https://github.com/coreos/jetcd) - Supports v3
|
||||||
- [boonproject/etcd](https://github.com/boonproject/boon/blob/master/etcd/README.md) - Supports v2, Async/Sync and waits
|
- [boonproject/etcd](https://github.com/boonproject/boon/blob/master/etcd/README.md) - Supports v2, Async/Sync and waits
|
||||||
- [justinsb/jetcd](https://github.com/justinsb/jetcd)
|
- [justinsb/jetcd](https://github.com/justinsb/jetcd)
|
||||||
- [diwakergupta/jetcd](https://github.com/diwakergupta/jetcd) - Supports v2
|
- [diwakergupta/jetcd](https://github.com/diwakergupta/jetcd) - Supports v2
|
||||||
@ -61,6 +62,8 @@
|
|||||||
**C++ libraries**
|
**C++ libraries**
|
||||||
- [edwardcapriolo/etcdcpp](https://github.com/edwardcapriolo/etcdcpp) - Supports v2
|
- [edwardcapriolo/etcdcpp](https://github.com/edwardcapriolo/etcdcpp) - Supports v2
|
||||||
- [suryanathan/etcdcpp](https://github.com/suryanathan/etcdcpp) - Supports v2 (with waits)
|
- [suryanathan/etcdcpp](https://github.com/suryanathan/etcdcpp) - Supports v2 (with waits)
|
||||||
|
- [nokia/etcd-cpp-api](https://github.com/nokia/etcd-cpp-api) - Supports v2
|
||||||
|
- [nokia/etcd-cpp-apiv3](https://github.com/nokia/etcd-cpp-apiv3) - Supports v3
|
||||||
|
|
||||||
**Clojure libraries**
|
**Clojure libraries**
|
||||||
|
|
||||||
@ -80,6 +83,7 @@
|
|||||||
**PHP Libraries**
|
**PHP Libraries**
|
||||||
|
|
||||||
- [linkorb/etcd-php](https://github.com/linkorb/etcd-php)
|
- [linkorb/etcd-php](https://github.com/linkorb/etcd-php)
|
||||||
|
- [activecollab/etcd](https://github.com/activecollab/etcd)
|
||||||
|
|
||||||
**Haskell libraries**
|
**Haskell libraries**
|
||||||
|
|
||||||
|
@ -70,6 +70,8 @@ All these metrics are prefixed with `etcd_network_`
|
|||||||
|---------------------------|--------------------------------------------------------------------|---------------|
|
|---------------------------|--------------------------------------------------------------------|---------------|
|
||||||
| peer_sent_bytes_total | The total number of bytes sent to the peer with ID `To`. | Counter(To) |
|
| peer_sent_bytes_total | The total number of bytes sent to the peer with ID `To`. | Counter(To) |
|
||||||
| peer_received_bytes_total | The total number of bytes received from the peer with ID `From`. | Counter(From) |
|
| peer_received_bytes_total | The total number of bytes received from the peer with ID `From`. | Counter(From) |
|
||||||
|
| peer_sent_failures_total | The total number of send failures from the peer with ID `To`. | Counter(To) |
|
||||||
|
| peer_received_failures_total | The total number of receive failures from the peer with ID `From`. | Counter(From) |
|
||||||
| peer_round_trip_time_seconds | Round-Trip-Time histogram between peers. | Histogram(To) |
|
| peer_round_trip_time_seconds | Round-Trip-Time histogram between peers. | Histogram(To) |
|
||||||
| client_grpc_sent_bytes_total | The total number of bytes sent to grpc clients. | Counter |
|
| client_grpc_sent_bytes_total | The total number of bytes sent to grpc clients. | Counter |
|
||||||
| client_grpc_received_bytes_total| The total number of bytes received to grpc clients. | Counter |
|
| client_grpc_received_bytes_total| The total number of bytes received to grpc clients. | Counter |
|
||||||
|
@ -357,6 +357,8 @@ To help clients discover the etcd cluster, the following DNS SRV records are loo
|
|||||||
|
|
||||||
If `_etcd-client-ssl._tcp.example.com` is found, clients will attempt to communicate with the etcd cluster over SSL/TLS.
|
If `_etcd-client-ssl._tcp.example.com` is found, clients will attempt to communicate with the etcd cluster over SSL/TLS.
|
||||||
|
|
||||||
|
If etcd is using TLS without a custom certificate authority, the discovery domain (e.g., example.com) must match the SRV record domain (e.g., infra1.example.com). This is to mitigate attacks that forge SRV records to point to a different domain; the domain would have a valid certificate under PKI but be controlled by an unknown third party.
|
||||||
|
|
||||||
#### Create DNS SRV records
|
#### Create DNS SRV records
|
||||||
|
|
||||||
```
|
```
|
||||||
@ -454,6 +456,10 @@ $ etcd --name infra2 \
|
|||||||
--listen-peer-urls http://10.0.1.12:2380
|
--listen-peer-urls http://10.0.1.12:2380
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### Gateway
|
||||||
|
|
||||||
|
etcd gateway is a simple TCP proxy that forwards network data to the etcd cluster. Please read [gateway guide] for more information.
|
||||||
|
|
||||||
### Proxy
|
### Proxy
|
||||||
|
|
||||||
When the `--proxy` flag is set, etcd runs in [proxy mode][proxy]. This proxy mode only supports the etcd v2 API; there are no plans to support the v3 API. Instead, for v3 API support, there will be a new proxy with enhanced features following the etcd 3.0 release.
|
When the `--proxy` flag is set, etcd runs in [proxy mode][proxy]. This proxy mode only supports the etcd v2 API; there are no plans to support the v3 API. Instead, for v3 API support, there will be a new proxy with enhanced features following the etcd 3.0 release.
|
||||||
@ -470,3 +476,4 @@ To setup an etcd cluster with proxies of v2 API, please read the the [clustering
|
|||||||
[clustering_etcd2]: https://github.com/coreos/etcd/blob/release-2.3/Documentation/clustering.md
|
[clustering_etcd2]: https://github.com/coreos/etcd/blob/release-2.3/Documentation/clustering.md
|
||||||
[security-guide]: security.md
|
[security-guide]: security.md
|
||||||
[tls-setup]: /hack/tls-setup
|
[tls-setup]: /hack/tls-setup
|
||||||
|
[gateway]: gateway.md
|
||||||
|
@ -276,7 +276,7 @@ Follow the instructions when using these flags.
|
|||||||
## Profiling flags
|
## Profiling flags
|
||||||
|
|
||||||
### --enable-pprof
|
### --enable-pprof
|
||||||
+ Enable runtime profiling data via HTTP server. Address is at client URL + "/debug/pprof"
|
+ Enable runtime profiling data via HTTP server. Address is at client URL + "/debug/pprof/"
|
||||||
+ default: false
|
+ default: false
|
||||||
|
|
||||||
[build-cluster]: clustering.md#static
|
[build-cluster]: clustering.md#static
|
||||||
|
@ -2,13 +2,75 @@
|
|||||||
|
|
||||||
The following guide shows how to run etcd with rkt and Docker using the [static bootstrap process](clustering.md#static).
|
The following guide shows how to run etcd with rkt and Docker using the [static bootstrap process](clustering.md#static).
|
||||||
|
|
||||||
|
## rkt
|
||||||
|
|
||||||
|
### Running a single node etcd
|
||||||
|
|
||||||
|
The following rkt run command will expose the etcd client API on port 2379 and expose the peer API on port 2380.
|
||||||
|
|
||||||
|
Use the host IP address when configuring etcd.
|
||||||
|
|
||||||
|
```
|
||||||
|
export NODE1=192.168.1.21
|
||||||
|
```
|
||||||
|
|
||||||
|
Trust the CoreOS [App Signing Key](https://coreos.com/security/app-signing-key/).
|
||||||
|
|
||||||
|
```
|
||||||
|
sudo rkt trust --prefix coreos.com/etcd
|
||||||
|
# gpg key fingerprint is: 18AD 5014 C99E F7E3 BA5F 6CE9 50BD D3E0 FC8A 365E
|
||||||
|
```
|
||||||
|
|
||||||
|
Run the `v3.0.6` version of etcd or specify another release version.
|
||||||
|
|
||||||
|
```
|
||||||
|
sudo rkt run --net=default:IP=${NODE1} coreos.com/etcd:v3.0.6 -- -name=node1 -advertise-client-urls=http://${NODE1}:2379 -initial-advertise-peer-urls=http://${NODE1}:2380 -listen-client-urls=http://0.0.0.0:2379 -listen-peer-urls=http://${NODE1}:2380 -initial-cluster=node1=http://${NODE1}:2380
|
||||||
|
```
|
||||||
|
|
||||||
|
List the cluster member.
|
||||||
|
|
||||||
|
```
|
||||||
|
etcdctl --endpoints=http://192.168.1.21:2379 member list
|
||||||
|
```
|
||||||
|
|
||||||
|
### Running a 3 node etcd cluster
|
||||||
|
|
||||||
|
Setup a 3 node cluster with rkt locally, using the `-initial-cluster` flag.
|
||||||
|
|
||||||
|
```sh
|
||||||
|
export NODE1=172.16.28.21
|
||||||
|
export NODE2=172.16.28.22
|
||||||
|
export NODE3=172.16.28.23
|
||||||
|
```
|
||||||
|
|
||||||
|
```
|
||||||
|
# node 1
|
||||||
|
sudo rkt run --net=default:IP=${NODE1} coreos.com/etcd:v3.0.6 -- -name=node1 -advertise-client-urls=http://${NODE1}:2379 -initial-advertise-peer-urls=http://${NODE1}:2380 -listen-client-urls=http://0.0.0.0:2379 -listen-peer-urls=http://${NODE1}:2380 -initial-cluster=node1=http://${NODE1}:2380,node2=http://${NODE2}:2380,node3=http://${NODE3}:2380
|
||||||
|
|
||||||
|
# node 2
|
||||||
|
sudo rkt run --net=default:IP=${NODE2} coreos.com/etcd:v3.0.6 -- -name=node2 -advertise-client-urls=http://${NODE2}:2379 -initial-advertise-peer-urls=http://${NODE2}:2380 -listen-client-urls=http://0.0.0.0:2379 -listen-peer-urls=http://${NODE2}:2380 -initial-cluster=node1=http://${NODE1}:2380,node2=http://${NODE2}:2380,node3=http://${NODE3}:2380
|
||||||
|
|
||||||
|
# node 3
|
||||||
|
sudo rkt run --net=default:IP=${NODE3} coreos.com/etcd:v3.0.6 -- -name=node3 -advertise-client-urls=http://${NODE3}:2379 -initial-advertise-peer-urls=http://${NODE3}:2380 -listen-client-urls=http://0.0.0.0:2379 -listen-peer-urls=http://${NODE3}:2380 -initial-cluster=node1=http://${NODE1}:2380,node2=http://${NODE2}:2380,node3=http://${NODE3}:2380
|
||||||
|
```
|
||||||
|
|
||||||
|
Verify the cluster is healthy and can be reached.
|
||||||
|
|
||||||
|
```
|
||||||
|
ETCDCTL_API=3 etcdctl --endpoints=http://172.16.28.21:2379,http://172.16.28.22:2379,http://172.16.28.23:2379 endpoint-health
|
||||||
|
```
|
||||||
|
|
||||||
|
### DNS
|
||||||
|
|
||||||
|
Production clusters which refer to peers by DNS name known to the local resolver must mount the [host's DNS configuration](https://coreos.com/kubernetes/docs/latest/kubelet-wrapper.html#customizing-rkt-options).
|
||||||
|
|
||||||
## Docker
|
## Docker
|
||||||
|
|
||||||
In order to expose the etcd API to clients outside of Docker host, use the host IP address of the container. Please see [`docker inspect`](https://docs.docker.com/engine/reference/commandline/inspect) for more detail on how to get the IP address. Alternatively, specify `--net=host` flag to `docker run` command to skip placing the container inside of a separate network stack.
|
In order to expose the etcd API to clients outside of Docker host, use the host IP address of the container. Please see [`docker inspect`](https://docs.docker.com/engine/reference/commandline/inspect) for more detail on how to get the IP address. Alternatively, specify `--net=host` flag to `docker run` command to skip placing the container inside of a separate network stack.
|
||||||
|
|
||||||
```
|
```
|
||||||
# For each machine
|
# For each machine
|
||||||
ETCD_VERSION=v3.0.0-beta.0
|
ETCD_VERSION=v3.0.0
|
||||||
TOKEN=my-etcd-token
|
TOKEN=my-etcd-token
|
||||||
CLUSTER_STATE=new
|
CLUSTER_STATE=new
|
||||||
NAME_1=etcd-node-0
|
NAME_1=etcd-node-0
|
||||||
@ -59,3 +121,7 @@ To run `etcdctl` using API version 3:
|
|||||||
docker exec etcd /bin/sh -c "export ETCDCTL_API=3 && /usr/local/bin/etcdctl put foo bar"
|
docker exec etcd /bin/sh -c "export ETCDCTL_API=3 && /usr/local/bin/etcdctl put foo bar"
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## Bare Metal
|
||||||
|
|
||||||
|
To provision a 3 node etcd cluster on bare-metal, you might find the examples in the [baremetal repo](https://github.com/coreos/coreos-baremetal/tree/master/examples) useful.
|
||||||
|
|
||||||
|
66
Documentation/op-guide/gateway.md
Normal file
66
Documentation/op-guide/gateway.md
Normal file
@ -0,0 +1,66 @@
|
|||||||
|
# etcd gateway
|
||||||
|
|
||||||
|
## What is etcd gateway
|
||||||
|
|
||||||
|
etcd gateway is a simple TCP proxy that forwards network data to the etcd cluster. The gateway is stateless and transparent; it neither inspects client requests nor interferes with cluster responses.
|
||||||
|
|
||||||
|
The gateway supports multiple etcd server endpoints. When the gateway starts, it randomly picks one etcd server endpoint and forwards all requests to that endpoint. This endpoint serves all requests until the gateway detects a network failure. If the gateway detects an endpoint failure, it will switch to a different endpoint, if available, to hide failures from its clients. Other retry policies, such as weighted round-robin, may be supported in the future.
|
||||||
|
|
||||||
|
## When to use etcd gateway
|
||||||
|
|
||||||
|
Every application that accesses etcd must first have the address of an etcd cluster client endpoint. If multiple applications on the same server access the same etcd cluster, every application still needs to know the advertised client endpoints of the etcd cluster. If the etcd cluster is reconfigured to have different endpoints, every application may also need to update its endpoint list. This wide-scale reconfiguration is both tedious and error prone.
|
||||||
|
|
||||||
|
etcd gateway solves this problem by serving as a stable local endpoint. A typical etcd gateway configuration has
|
||||||
|
each machine running a gateway listening on a local address and every etcd application connecting to its local gateway. The upshot is only the gateway needs to update its endpoints instead of updating each and every application.
|
||||||
|
|
||||||
|
In summary, to automatically propagate cluster endpoint changes, the etcd gateway runs on every machine serving multiple applications accessing same etcd cluster.
|
||||||
|
|
||||||
|
## When not to use etcd gateway
|
||||||
|
|
||||||
|
- Improving performance
|
||||||
|
|
||||||
|
The gateway is not designed for improving etcd cluster performance. It does not provide caching, watch coalescing or batching. The etcd team is developing a caching proxy designed for improving cluster scalability.
|
||||||
|
|
||||||
|
- Running on a cluster management system
|
||||||
|
|
||||||
|
Advanced cluster management systems like Kubernetes natively support service discovery. Applications can access an etcd cluster with a DNS name or a virtual IP address managed by the system. For example, kube-proxy is equivalent to etcd gateway.
|
||||||
|
|
||||||
|
## Start etcd gateway
|
||||||
|
|
||||||
|
Consider an etcd cluster with the following static endpoints:
|
||||||
|
|
||||||
|
|Name|Address|Hostname|
|
||||||
|
|------|---------|------------------|
|
||||||
|
|infra0|10.0.1.10|infra0.example.com|
|
||||||
|
|infra1|10.0.1.11|infra1.example.com|
|
||||||
|
|infra2|10.0.1.12|infra2.example.com|
|
||||||
|
|
||||||
|
Start the etcd gateway to use these static endpoints with the command:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$ etcd gateway start --endpoints=infra0.example.com,infra1.example.com,infra2.example.com
|
||||||
|
2016-08-16 11:21:18.867350 I | tcpproxy: ready to proxy client requests to [...]
|
||||||
|
```
|
||||||
|
|
||||||
|
Alternatively, if using DNS for service discovery, consider the DNS SRV entries:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$ dig +noall +answer SRV _etcd-client._tcp.example.com
|
||||||
|
_etcd-client._tcp.example.com. 300 IN SRV 0 0 2379 infra0.example.com.
|
||||||
|
_etcd-client._tcp.example.com. 300 IN SRV 0 0 2379 infra1.example.com.
|
||||||
|
_etcd-client._tcp.example.com. 300 IN SRV 0 0 2379 infra2.example.com.
|
||||||
|
```
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$ dig +noall +answer infra0.example.com infra1.example.com infra2.example.com
|
||||||
|
infra0.example.com. 300 IN A 10.0.1.10
|
||||||
|
infra1.example.com. 300 IN A 10.0.1.11
|
||||||
|
infra2.example.com. 300 IN A 10.0.1.12
|
||||||
|
```
|
||||||
|
|
||||||
|
Start the etcd gateway to fetch the endpoints from the DNS SRV entries with the command:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$ etcd gateway --discovery-srv=example.com
|
||||||
|
2016-08-16 11:21:18.867350 I | tcpproxy: ready to proxy client requests to [...]
|
||||||
|
```
|
@ -1,14 +1,39 @@
|
|||||||
## Supported platform
|
## Supported platforms
|
||||||
|
|
||||||
|
### Current support
|
||||||
|
|
||||||
|
The following table lists etcd support status for common architectures and operating systems,
|
||||||
|
|
||||||
|
| Architecture | Operating System | Status | Maintainers |
|
||||||
|
| ------------ | ---------------- | ------------ | ---------------- |
|
||||||
|
| amd64 | Darwin | Experimental | etcd maintainers |
|
||||||
|
| amd64 | Linux | Stable | etcd maintainers |
|
||||||
|
| amd64 | Windows | Experimental | |
|
||||||
|
| arm64 | Linux | Experimental | @glevand |
|
||||||
|
| arm | Linux | Unstable | |
|
||||||
|
| 386 | Linux | Unstable | |
|
||||||
|
|
||||||
|
* etcd-maintainers are listed in https://github.com/coreos/etcd/blob/master/MAINTAINERS.
|
||||||
|
|
||||||
|
Experimental platforms appear to work in practice and have some platform specific code in etcd, but do not fully conform to the stable support policy. Unstable platforms have been lightly tested, but less than experimental. Unlisted architecture and operating system pairs are currently unsupported; caveat emptor.
|
||||||
|
|
||||||
|
### Supporting a new platform
|
||||||
|
|
||||||
|
For etcd to officially support a new platform as stable, a few requirements are necessary to ensure acceptable quality:
|
||||||
|
|
||||||
|
1. An "official" maintainer for the platform with clear motivation; someone must be responsible for taking care of the platform.
|
||||||
|
2. Set up CI for build; etcd must compile.
|
||||||
|
3. Set up CI for running unit tests; etcd must pass simple tests.
|
||||||
|
4. Set up CI (TravisCI, SemaphoreCI or Jenkins) for running integration tests; etcd must pass intensive tests.
|
||||||
|
5. (Optional) Set up a functional testing cluster; an etcd cluster should survive stress testing.
|
||||||
|
|
||||||
### 32-bit and other unsupported systems
|
### 32-bit and other unsupported systems
|
||||||
|
|
||||||
etcd has known issues on 32-bit systems due to a bug in the Go runtime. See #[358][358] for more information.
|
etcd has known issues on 32-bit systems due to a bug in the Go runtime. See the [Go issue][go-issue] and [atomic package][go-atomic] for more information.
|
||||||
|
|
||||||
To avoid inadvertently running a possibly unstable etcd server, `etcd` on unsupported architectures will print
|
To avoid inadvertently running a possibly unstable etcd server, `etcd` on unstable or unsupported architectures will print a warning message and immediately exit if the environment variable `ETCD_UNSUPPORTED_ARCH` is not set to the target architecture.
|
||||||
a warning message and immediately exit if the environment variable `ETCD_UNSUPPORTED_ARCH` is not set to
|
|
||||||
the target architecture.
|
|
||||||
|
|
||||||
Currently only the amd64 architecture is officially supported by `etcd`.
|
Currently only the amd64 architecture is officially supported by `etcd`.
|
||||||
|
|
||||||
[358]: https://github.com/coreos/etcd/issues/358
|
[go-issue]: https://github.com/golang/go/issues/599
|
||||||
|
[go-atomic]: https://golang.org/pkg/sync/atomic/#pkg-note-BUG
|
||||||
|
@ -71,4 +71,23 @@ $ etcd --snapshot-count=5000
|
|||||||
$ ETCD_SNAPSHOT_COUNT=5000 etcd
|
$ ETCD_SNAPSHOT_COUNT=5000 etcd
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## Network
|
||||||
|
|
||||||
|
If the etcd leader serves a large number of concurrent client requests, it may delay processing follower peer requests due to network congestion. This manifests as send buffer error messages on the follower nodes:
|
||||||
|
|
||||||
|
```
|
||||||
|
dropped MsgProp to 247ae21ff9436b2d since streamMsg's sending buffer is full
|
||||||
|
dropped MsgAppResp to 247ae21ff9436b2d since streamMsg's sending buffer is full
|
||||||
|
```
|
||||||
|
|
||||||
|
These errors may be resolved by prioritizing etcd's peer traffic over its client traffic. On Linux, peer traffic can be prioritized by using the traffic control mechanism:
|
||||||
|
|
||||||
|
```
|
||||||
|
tc qdisc add dev eth0 root handle 1: prio bands 3
|
||||||
|
tc filter add dev eth0 parent 1: protocol ip prio 1 u32 match ip sport 2380 0xffff flowid 1:1
|
||||||
|
tc filter add dev eth0 parent 1: protocol ip prio 1 u32 match ip dport 2380 0xffff flowid 1:1
|
||||||
|
tc filter add dev eth0 parent 1: protocol ip prio 2 u32 match ip sport 2739 0xffff flowid 1:1
|
||||||
|
tc filter add dev eth0 parent 1: protocol ip prio 2 u32 match ip dport 2739 0xffff flowid 1:1
|
||||||
|
```
|
||||||
|
|
||||||
[ping]: https://en.wikipedia.org/wiki/Ping_(networking_utility)
|
[ping]: https://en.wikipedia.org/wiki/Ping_(networking_utility)
|
||||||
|
@ -18,7 +18,7 @@ Also, to ensure a smooth rolling upgrade, the running cluster must be healthy. Y
|
|||||||
|
|
||||||
Before upgrading etcd, always test the services relying on etcd in a staging environment before deploying the upgrade to the production environment.
|
Before upgrading etcd, always test the services relying on etcd in a staging environment before deploying the upgrade to the production environment.
|
||||||
|
|
||||||
Before beginning, [backup the etcd data directory](admin_guide.md#backing-up-the-datastore). Should something go wrong with the upgrade, it is possible to use this backup to [downgrade](#downgrade) back to existing etcd version.
|
Before beginning, [backup the etcd data directory](../v2/admin_guide.md#backing-up-the-datastore). Should something go wrong with the upgrade, it is possible to use this backup to [downgrade](#downgrade) back to existing etcd version.
|
||||||
|
|
||||||
#### Mixed Versions
|
#### Mixed Versions
|
||||||
|
|
||||||
@ -34,7 +34,7 @@ For a much larger total data size, 100MB or more , this one-time process might t
|
|||||||
|
|
||||||
If all members have been upgraded to v3.0, the cluster will be upgraded to v3.0, and downgrade from this completed state is **not possible**. If any single member is still v2.3, however, the cluster and its operations remains “v2.3”, and it is possible from this mixed cluster state to return to using a v2.3 etcd binary on all members.
|
If all members have been upgraded to v3.0, the cluster will be upgraded to v3.0, and downgrade from this completed state is **not possible**. If any single member is still v2.3, however, the cluster and its operations remains “v2.3”, and it is possible from this mixed cluster state to return to using a v2.3 etcd binary on all members.
|
||||||
|
|
||||||
Please [backup the data directory](admin_guide.md#backing-up-the-datastore) of all etcd members to make downgrading the cluster possible even after it has been completely upgraded.
|
Please [backup the data directory](../v2/admin_guide.md#backing-up-the-datastore) of all etcd members to make downgrading the cluster possible even after it has been completely upgraded.
|
||||||
|
|
||||||
### Upgrade Procedure
|
### Upgrade Procedure
|
||||||
|
|
||||||
@ -64,7 +64,7 @@ When each etcd process is stopped, expected errors will be logged by other clust
|
|||||||
2016-06-27 15:21:48.624175 I | rafthttp: the connection with 8211f1d0f64f3269 became inactive
|
2016-06-27 15:21:48.624175 I | rafthttp: the connection with 8211f1d0f64f3269 became inactive
|
||||||
```
|
```
|
||||||
|
|
||||||
It’s a good idea at this point to [backup the etcd data directory](https://github.com/coreos/etcd/blob/master/Documentation/v2/admin_guide.md#backing-up-the-datastore) to provide a downgrade path should any problems occur:
|
It’s a good idea at this point to [backup the etcd data directory](../v2/admin_guide.md#backing-up-the-datastore) to provide a downgrade path should any problems occur:
|
||||||
|
|
||||||
```
|
```
|
||||||
$ etcdctl backup \
|
$ etcdctl backup \
|
||||||
|
@ -559,6 +559,25 @@ Let's create a key-value pair first: `foo=one`.
|
|||||||
curl http://127.0.0.1:2379/v2/keys/foo -XPUT -d value=one
|
curl http://127.0.0.1:2379/v2/keys/foo -XPUT -d value=one
|
||||||
```
|
```
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"action":"set",
|
||||||
|
"node":{
|
||||||
|
"key":"/foo",
|
||||||
|
"value":"one",
|
||||||
|
"modifiedIndex":4,
|
||||||
|
"createdIndex":4
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Specifying `noValueOnSuccess` option skips returning the node as value.
|
||||||
|
|
||||||
|
```sh
|
||||||
|
curl http://127.0.0.1:2379/v2/keys/foo?noValueOnSuccess=true -XPUT -d value=one
|
||||||
|
# {"action":"set"}
|
||||||
|
```
|
||||||
|
|
||||||
Now let's try some invalid `CompareAndSwap` commands.
|
Now let's try some invalid `CompareAndSwap` commands.
|
||||||
|
|
||||||
Trying to set this existing key with `prevExist=false` fails as expected:
|
Trying to set this existing key with `prevExist=false` fails as expected:
|
||||||
|
@ -266,7 +266,7 @@ Follow the instructions when using these flags.
|
|||||||
## Profiling flags
|
## Profiling flags
|
||||||
|
|
||||||
### --enable-pprof
|
### --enable-pprof
|
||||||
+ Enable runtime profiling data via HTTP server. Address is at client URL + "/debug/pprof"
|
+ Enable runtime profiling data via HTTP server. Address is at client URL + "/debug/pprof/"
|
||||||
+ default: false
|
+ default: false
|
||||||
|
|
||||||
[build-cluster]: clustering.md#static
|
[build-cluster]: clustering.md#static
|
||||||
|
@ -48,7 +48,7 @@ All releases version numbers follow the format of [semantic versioning 2.0.0](ht
|
|||||||
|
|
||||||
## Build Release Binaries and Images
|
## Build Release Binaries and Images
|
||||||
|
|
||||||
- Ensure `actool` is available, or installing it through `go get github.com/appc/spec/actool`.
|
- Ensure `acbuild` is available.
|
||||||
- Ensure `docker` is available.
|
- Ensure `docker` is available.
|
||||||
|
|
||||||
Run release script in root directory:
|
Run release script in root directory:
|
||||||
|
@ -105,7 +105,7 @@ ETCD_INITIAL_CLUSTER_STATE=existing
|
|||||||
|
|
||||||
### Stop the proxy process
|
### Stop the proxy process
|
||||||
|
|
||||||
Stop the existing proxy so we can wipe it's state on disk and reload it with the new configuration:
|
Stop the existing proxy so we can wipe its state on disk and reload it with the new configuration:
|
||||||
|
|
||||||
``` bash
|
``` bash
|
||||||
px aux | grep etcd
|
px aux | grep etcd
|
||||||
|
45
NEWS
Normal file
45
NEWS
Normal file
@ -0,0 +1,45 @@
|
|||||||
|
etcd v3.0.9 (2016-09-15)
|
||||||
|
|
||||||
|
- warn on domain names on listen URLs (v3.2 will reject domain names)
|
||||||
|
|
||||||
|
|
||||||
|
etcd v3.0.8 (2016-09-09)
|
||||||
|
|
||||||
|
- allow only IP addresses in listen URLs (domain names are rejected)
|
||||||
|
|
||||||
|
|
||||||
|
etcd v3.0.7 (2016-08-31)
|
||||||
|
|
||||||
|
- SRV records only allow A records (RFC 2052)
|
||||||
|
|
||||||
|
|
||||||
|
etcd v3.0.6 (2016-08-19)
|
||||||
|
|
||||||
|
|
||||||
|
etcd v3.0.5 (2016-08-19)
|
||||||
|
|
||||||
|
- SRV records (e.g., infra1.example.com) must match the discovery domain
|
||||||
|
(i.e., example.com) when using the default certificate authority.
|
||||||
|
|
||||||
|
|
||||||
|
etcd v3.0.4 (2016-07-27)
|
||||||
|
|
||||||
|
- v2 auth can now use common name from TLS certificate when --client-cert-auth is enabled
|
||||||
|
- v2 etcdctl ls command now supports --output=json
|
||||||
|
- Add /var/lib/etcd directory to etcd official Docker image
|
||||||
|
|
||||||
|
|
||||||
|
etcd v3.0.3 (2016-07-15)
|
||||||
|
|
||||||
|
- Revert Dockerfile to use CMD, instead of ENTRYPOINT, to support etcdctl run
|
||||||
|
- Docker commands for v3.0.2 won't work without specifying executable binary paths
|
||||||
|
- v3 etcdctl default endpoints are now 127.0.0.1:2379
|
||||||
|
|
||||||
|
|
||||||
|
etcd v3.0.2 (2016-07-08)
|
||||||
|
|
||||||
|
- Dockerfile uses ENTRYPOINT, instead of CMD, to run etcd without binary path specified
|
||||||
|
|
||||||
|
|
||||||
|
etcd v3.0.1 (2016-07-01)
|
||||||
|
|
@ -39,13 +39,14 @@ See [etcdctl][etcdctl] for a simple command line client.
|
|||||||
|
|
||||||
The easiest way to get etcd is to use one of the pre-built release binaries which are available for OSX, Linux, Windows, AppC (ACI), and Docker. Instructions for using these binaries are on the [GitHub releases page][github-release].
|
The easiest way to get etcd is to use one of the pre-built release binaries which are available for OSX, Linux, Windows, AppC (ACI), and Docker. Instructions for using these binaries are on the [GitHub releases page][github-release].
|
||||||
|
|
||||||
For those wanting to try the very latest version, you can build the latest version of etcd from the `master` branch.
|
For those wanting to try the very latest version, you can [build the latest version of etcd][dl-build] from the `master` branch.
|
||||||
You will first need [*Go*](https://golang.org/) installed on your machine (version 1.5+ is required).
|
You will first need [*Go*](https://golang.org/) installed on your machine (version 1.6+ is required).
|
||||||
All development occurs on `master`, including new features and bug fixes.
|
All development occurs on `master`, including new features and bug fixes.
|
||||||
Bug fixes are first targeted at `master` and subsequently ported to release branches, as described in the [branch management][branch-management] guide.
|
Bug fixes are first targeted at `master` and subsequently ported to release branches, as described in the [branch management][branch-management] guide.
|
||||||
|
|
||||||
[github-release]: https://github.com/coreos/etcd/releases/
|
[github-release]: https://github.com/coreos/etcd/releases/
|
||||||
[branch-management]: ./Documentation/branch_management.md
|
[branch-management]: ./Documentation/branch_management.md
|
||||||
|
[dl-build]: ./Documentation/dl_build.md#build-the-latest-version
|
||||||
|
|
||||||
### Running etcd
|
### Running etcd
|
||||||
|
|
||||||
|
35
ROADMAP.md
35
ROADMAP.md
@ -6,26 +6,19 @@ This document defines a high level roadmap for etcd development.
|
|||||||
|
|
||||||
The dates below should not be considered authoritative, but rather indicative of the projected timeline of the project. The [milestones defined in GitHub](https://github.com/coreos/etcd/milestones) represent the most up-to-date and issue-for-issue plans.
|
The dates below should not be considered authoritative, but rather indicative of the projected timeline of the project. The [milestones defined in GitHub](https://github.com/coreos/etcd/milestones) represent the most up-to-date and issue-for-issue plans.
|
||||||
|
|
||||||
etcd 2.3 is our current stable branch. The roadmap below outlines new features that will be added to etcd, and while subject to change, define what future stable will look like.
|
etcd 3.0 is our current stable branch. The roadmap below outlines new features that will be added to etcd, and while subject to change, define what future stable will look like.
|
||||||
|
|
||||||
### etcd 3.0 (April)
|
### etcd 3.1 (2016-Oct)
|
||||||
- v3 API ([see also the issue tag](https://github.com/coreos/etcd/issues?utf8=%E2%9C%93&q=label%3Aarea/v3api))
|
- Stable L4 gateway
|
||||||
- Leases
|
- Experimental support for scalable proxy
|
||||||
- Binary protocol
|
- Automatic leadership transfer for the rolling upgrade
|
||||||
- Support a large number of watchers
|
- V3 API improvements
|
||||||
- Failure guarantees documented
|
- Get previous key-value pair
|
||||||
- Simple v3 client (golang)
|
- Get only keys (ignore values)
|
||||||
- v3 API
|
- Get only key count
|
||||||
- Locking
|
|
||||||
- Better disk backend
|
|
||||||
- Improved write throughput
|
|
||||||
- Support larger datasets and histories
|
|
||||||
- Simpler disaster recovery UX
|
|
||||||
- Integrated with Kubernetes
|
|
||||||
- Mirroring
|
|
||||||
|
|
||||||
### etcd 3.1 (July)
|
### etcd 3.2 (2017-Feb)
|
||||||
- API bindings for other languages
|
- Stable scalable proxy
|
||||||
|
- JWT token based auth
|
||||||
### etcd 3.+ (future)
|
- Improved watch performance
|
||||||
- Horizontally scalable proxy layer
|
- ...
|
@ -18,7 +18,7 @@ package authpb
|
|||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
proto "github.com/gogo/protobuf/proto"
|
proto "github.com/golang/protobuf/proto"
|
||||||
|
|
||||||
math "math"
|
math "math"
|
||||||
)
|
)
|
||||||
@ -32,7 +32,7 @@ var _ = math.Inf
|
|||||||
|
|
||||||
// This is a compile-time assertion to ensure that this generated file
|
// This is a compile-time assertion to ensure that this generated file
|
||||||
// is compatible with the proto package it is being compiled against.
|
// is compatible with the proto package it is being compiled against.
|
||||||
const _ = proto.GoGoProtoPackageIsVersion1
|
const _ = proto.ProtoPackageIsVersion1
|
||||||
|
|
||||||
type Permission_Type int32
|
type Permission_Type int32
|
||||||
|
|
||||||
@ -798,23 +798,23 @@ var (
|
|||||||
)
|
)
|
||||||
|
|
||||||
var fileDescriptorAuth = []byte{
|
var fileDescriptorAuth = []byte{
|
||||||
// 276 bytes of a gzipped FileDescriptorProto
|
// 288 bytes of a gzipped FileDescriptorProto
|
||||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0x4a, 0x2c, 0x2d, 0xc9,
|
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x6c, 0x90, 0xc1, 0x4a, 0xc3, 0x30,
|
||||||
0xd0, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x03, 0xb1, 0x0b, 0x92, 0xa4, 0x44, 0xd2, 0xf3,
|
0x1c, 0xc6, 0x9b, 0xb6, 0x1b, 0xed, 0x5f, 0x27, 0x25, 0x0c, 0x0c, 0x13, 0x42, 0xe9, 0xa9, 0x78,
|
||||||
0xd3, 0xf3, 0xc1, 0x42, 0xfa, 0x20, 0x16, 0x44, 0x56, 0xc9, 0x87, 0x8b, 0x25, 0xb4, 0x38, 0xb5,
|
0xa8, 0xb0, 0x5d, 0xbc, 0x2a, 0xf6, 0x20, 0x78, 0x90, 0x50, 0xf1, 0x28, 0x1d, 0x0d, 0x75, 0x6c,
|
||||||
0x48, 0x48, 0x88, 0x8b, 0x25, 0x2f, 0x31, 0x37, 0x55, 0x82, 0x51, 0x81, 0x51, 0x83, 0x27, 0x08,
|
0x6d, 0x4a, 0x32, 0x91, 0xbe, 0x89, 0x07, 0x1f, 0x68, 0xc7, 0x3d, 0x82, 0xab, 0x2f, 0x22, 0x4d,
|
||||||
0xcc, 0x16, 0x92, 0xe2, 0xe2, 0x28, 0x48, 0x2c, 0x2e, 0x2e, 0xcf, 0x2f, 0x4a, 0x91, 0x60, 0x02,
|
0x64, 0x43, 0xdc, 0xed, 0xfb, 0xbe, 0xff, 0x97, 0xe4, 0x97, 0x3f, 0x40, 0xfe, 0xb6, 0x7e, 0x4d,
|
||||||
0x8b, 0xc3, 0xf9, 0x42, 0x22, 0x5c, 0xac, 0x45, 0xf9, 0x39, 0xa9, 0xc5, 0x12, 0xcc, 0x0a, 0xcc,
|
0x1a, 0x29, 0xd6, 0x02, 0x0f, 0x7b, 0xdd, 0xcc, 0x27, 0xe3, 0x52, 0x94, 0x42, 0x47, 0x57, 0xbd,
|
||||||
0x1a, 0x9c, 0x41, 0x10, 0x8e, 0xd2, 0x1c, 0x46, 0x2e, 0xae, 0x80, 0xd4, 0xa2, 0xdc, 0xcc, 0xe2,
|
0x32, 0xd3, 0xe8, 0x01, 0xdc, 0x27, 0xc5, 0x25, 0xc6, 0xe0, 0xd6, 0x79, 0xc5, 0x09, 0x0a, 0x51,
|
||||||
0xe2, 0xcc, 0xfc, 0x3c, 0x21, 0x63, 0xa0, 0x01, 0x40, 0x5e, 0x48, 0x65, 0x01, 0xc4, 0x60, 0x3e,
|
0x7c, 0xca, 0xb4, 0xc6, 0x13, 0xf0, 0x9a, 0x5c, 0xa9, 0x77, 0x21, 0x0b, 0x62, 0xeb, 0x7c, 0xef,
|
||||||
0x23, 0x71, 0x3d, 0x88, 0x6b, 0xf4, 0x10, 0xaa, 0xf4, 0x40, 0xd2, 0x41, 0x70, 0x85, 0x42, 0x02,
|
0xf1, 0x18, 0x06, 0x52, 0xac, 0xb8, 0x22, 0x4e, 0xe8, 0xc4, 0x3e, 0x33, 0x26, 0xfa, 0x44, 0x00,
|
||||||
0x5c, 0xcc, 0xd9, 0xa9, 0x95, 0x50, 0x0b, 0x41, 0x4c, 0x21, 0x69, 0x2e, 0xce, 0xa2, 0xc4, 0xbc,
|
0x8f, 0x5c, 0x56, 0x0b, 0xa5, 0x16, 0xa2, 0xc6, 0x33, 0xf0, 0x1a, 0x2e, 0xab, 0xac, 0x6d, 0xcc,
|
||||||
0xf4, 0xd4, 0xf8, 0xd4, 0xbc, 0x14, 0xa0, 0x7d, 0x60, 0x87, 0x80, 0x05, 0x5c, 0xf3, 0x52, 0x94,
|
0xc5, 0x67, 0xd3, 0xf3, 0xc4, 0xd0, 0x24, 0x87, 0x56, 0xd2, 0x8f, 0xd9, 0xbe, 0x88, 0x03, 0x70,
|
||||||
0xb4, 0xb8, 0x58, 0xc0, 0xda, 0x38, 0xb8, 0x58, 0x82, 0x5c, 0x1d, 0x5d, 0x04, 0x18, 0x84, 0x38,
|
0x96, 0xbc, 0xfd, 0x7d, 0xb0, 0x97, 0xf8, 0x02, 0x7c, 0x99, 0xd7, 0x25, 0x7f, 0xe1, 0x75, 0x41,
|
||||||
0xb9, 0x58, 0xc3, 0x83, 0x3c, 0x43, 0x5c, 0x05, 0x18, 0x85, 0x78, 0xb9, 0x38, 0x41, 0x82, 0x10,
|
0x1c, 0x03, 0xa2, 0x83, 0xb4, 0x2e, 0xa2, 0x4b, 0x70, 0xf5, 0x31, 0x0f, 0x5c, 0x96, 0xde, 0xdc,
|
||||||
0x2e, 0x93, 0x52, 0x08, 0x50, 0x0d, 0xd0, 0x9d, 0x58, 0x3d, 0x6b, 0xc1, 0xc5, 0x0b, 0xb4, 0x0b,
|
0x05, 0x16, 0xf6, 0x61, 0xf0, 0xcc, 0xee, 0xb3, 0x34, 0x40, 0x78, 0x04, 0x7e, 0x1f, 0x1a, 0x6b,
|
||||||
0xe1, 0x2c, 0xa0, 0x03, 0x98, 0x35, 0xb8, 0x8d, 0x84, 0x30, 0x1d, 0x1c, 0x84, 0xaa, 0xd0, 0x49,
|
0x47, 0x19, 0xb8, 0x4c, 0xac, 0xf8, 0xd1, 0xcf, 0x5e, 0xc3, 0x68, 0xc9, 0xdb, 0x03, 0x16, 0xb1,
|
||||||
0xe4, 0xc4, 0x43, 0x39, 0x86, 0x0b, 0x40, 0x7c, 0xe2, 0x91, 0x1c, 0xe3, 0x05, 0x20, 0x7e, 0x00,
|
0x43, 0x27, 0x3e, 0x99, 0xe2, 0xff, 0xc0, 0xec, 0x6f, 0xf1, 0x96, 0x6c, 0x76, 0xd4, 0xda, 0xee,
|
||||||
0xc4, 0x49, 0x6c, 0xe0, 0xf0, 0x35, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0x9e, 0x31, 0x53, 0xfd,
|
0xa8, 0xb5, 0xe9, 0x28, 0xda, 0x76, 0x14, 0x7d, 0x75, 0x14, 0x7d, 0x7c, 0x53, 0x6b, 0x3e, 0xd4,
|
||||||
0x8b, 0x01, 0x00, 0x00,
|
0x3b, 0x9e, 0xfd, 0x04, 0x00, 0x00, 0xff, 0xff, 0xcc, 0x76, 0x8d, 0x4f, 0x8f, 0x01, 0x00, 0x00,
|
||||||
}
|
}
|
||||||
|
@ -22,7 +22,10 @@ import (
|
|||||||
"github.com/coreos/etcd/mvcc/backend"
|
"github.com/coreos/etcd/mvcc/backend"
|
||||||
)
|
)
|
||||||
|
|
||||||
// isSubset returns true if a is a subset of b
|
// isSubset returns true if a is a subset of b.
|
||||||
|
// If a is a prefix of b, then a is a subset of b.
|
||||||
|
// Given intervals [a1,a2) and [b1,b2), is
|
||||||
|
// the a interval a subset of b?
|
||||||
func isSubset(a, b *rangePerm) bool {
|
func isSubset(a, b *rangePerm) bool {
|
||||||
switch {
|
switch {
|
||||||
case len(a.end) == 0 && len(b.end) == 0:
|
case len(a.end) == 0 && len(b.end) == 0:
|
||||||
@ -32,9 +35,11 @@ func isSubset(a, b *rangePerm) bool {
|
|||||||
// b is a key, a is a range
|
// b is a key, a is a range
|
||||||
return false
|
return false
|
||||||
case len(a.end) == 0:
|
case len(a.end) == 0:
|
||||||
return 0 <= bytes.Compare(a.begin, b.begin) && bytes.Compare(a.begin, b.end) <= 0
|
// a is a key, b is a range. need b1 <= a1 and a1 < b2
|
||||||
|
return bytes.Compare(b.begin, a.begin) <= 0 && bytes.Compare(a.begin, b.end) < 0
|
||||||
default:
|
default:
|
||||||
return 0 <= bytes.Compare(a.begin, b.begin) && bytes.Compare(a.end, b.end) <= 0
|
// both are ranges. need b1 <= a1 and a2 <= b2
|
||||||
|
return bytes.Compare(b.begin, a.begin) <= 0 && bytes.Compare(a.end, b.end) <= 0
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -46,7 +51,7 @@ func isRangeEqual(a, b *rangePerm) bool {
|
|||||||
// If there are equal ranges, removeSubsetRangePerms only keeps one of them.
|
// If there are equal ranges, removeSubsetRangePerms only keeps one of them.
|
||||||
func removeSubsetRangePerms(perms []*rangePerm) []*rangePerm {
|
func removeSubsetRangePerms(perms []*rangePerm) []*rangePerm {
|
||||||
// TODO(mitake): currently it is O(n^2), we need a better algorithm
|
// TODO(mitake): currently it is O(n^2), we need a better algorithm
|
||||||
newp := make([]*rangePerm, 0)
|
var newp []*rangePerm
|
||||||
|
|
||||||
for i := range perms {
|
for i := range perms {
|
||||||
skip := false
|
skip := false
|
||||||
@ -81,19 +86,25 @@ func removeSubsetRangePerms(perms []*rangePerm) []*rangePerm {
|
|||||||
|
|
||||||
// mergeRangePerms merges adjacent rangePerms.
|
// mergeRangePerms merges adjacent rangePerms.
|
||||||
func mergeRangePerms(perms []*rangePerm) []*rangePerm {
|
func mergeRangePerms(perms []*rangePerm) []*rangePerm {
|
||||||
merged := make([]*rangePerm, 0)
|
var merged []*rangePerm
|
||||||
perms = removeSubsetRangePerms(perms)
|
perms = removeSubsetRangePerms(perms)
|
||||||
sort.Sort(RangePermSliceByBegin(perms))
|
sort.Sort(RangePermSliceByBegin(perms))
|
||||||
|
|
||||||
i := 0
|
i := 0
|
||||||
for i < len(perms) {
|
for i < len(perms) {
|
||||||
begin, next := i, i
|
begin, next := i, i
|
||||||
for next+1 < len(perms) && bytes.Compare(perms[next].end, perms[next+1].begin) != -1 {
|
for next+1 < len(perms) && bytes.Compare(perms[next].end, perms[next+1].begin) >= 0 {
|
||||||
next++
|
next++
|
||||||
}
|
}
|
||||||
|
// don't merge ["a", "b") with ["b", ""), because perms[next+1].end is empty.
|
||||||
merged = append(merged, &rangePerm{begin: perms[begin].begin, end: perms[next].end})
|
if next != begin && len(perms[next].end) > 0 {
|
||||||
|
merged = append(merged, &rangePerm{begin: perms[begin].begin, end: perms[next].end})
|
||||||
|
} else {
|
||||||
|
merged = append(merged, perms[begin])
|
||||||
|
if next != begin {
|
||||||
|
merged = append(merged, perms[next])
|
||||||
|
}
|
||||||
|
}
|
||||||
i = next + 1
|
i = next + 1
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -46,6 +46,10 @@ func TestGetMergedPerms(t *testing.T) {
|
|||||||
[]*rangePerm{{[]byte("a"), []byte("b")}},
|
[]*rangePerm{{[]byte("a"), []byte("b")}},
|
||||||
[]*rangePerm{{[]byte("a"), []byte("b")}},
|
[]*rangePerm{{[]byte("a"), []byte("b")}},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
[]*rangePerm{{[]byte("a"), []byte("b")}, {[]byte("b"), []byte("")}},
|
||||||
|
[]*rangePerm{{[]byte("a"), []byte("b")}, {[]byte("b"), []byte("")}},
|
||||||
|
},
|
||||||
{
|
{
|
||||||
[]*rangePerm{{[]byte("a"), []byte("b")}, {[]byte("b"), []byte("c")}},
|
[]*rangePerm{{[]byte("a"), []byte("b")}, {[]byte("b"), []byte("c")}},
|
||||||
[]*rangePerm{{[]byte("a"), []byte("c")}},
|
[]*rangePerm{{[]byte("a"), []byte("c")}},
|
||||||
@ -106,7 +110,7 @@ func TestGetMergedPerms(t *testing.T) {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
[]*rangePerm{{[]byte("a"), []byte("")}, {[]byte("b"), []byte("c")}, {[]byte("b"), []byte("")}, {[]byte("c"), []byte("")}, {[]byte("d"), []byte("")}},
|
[]*rangePerm{{[]byte("a"), []byte("")}, {[]byte("b"), []byte("c")}, {[]byte("b"), []byte("")}, {[]byte("c"), []byte("")}, {[]byte("d"), []byte("")}},
|
||||||
[]*rangePerm{{[]byte("a"), []byte("")}, {[]byte("b"), []byte("c")}, {[]byte("d"), []byte("")}},
|
[]*rangePerm{{[]byte("a"), []byte("")}, {[]byte("b"), []byte("c")}, {[]byte("c"), []byte("")}, {[]byte("d"), []byte("")}},
|
||||||
},
|
},
|
||||||
// duplicate ranges
|
// duplicate ranges
|
||||||
{
|
{
|
||||||
|
@ -20,6 +20,7 @@ package auth
|
|||||||
import (
|
import (
|
||||||
"crypto/rand"
|
"crypto/rand"
|
||||||
"math/big"
|
"math/big"
|
||||||
|
"strings"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@ -53,3 +54,14 @@ func (as *authStore) assignSimpleTokenToUser(username, token string) {
|
|||||||
as.simpleTokens[token] = username
|
as.simpleTokens[token] = username
|
||||||
as.simpleTokensMu.Unlock()
|
as.simpleTokensMu.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (as *authStore) invalidateUser(username string) {
|
||||||
|
as.simpleTokensMu.Lock()
|
||||||
|
defer as.simpleTokensMu.Unlock()
|
||||||
|
|
||||||
|
for token, name := range as.simpleTokens {
|
||||||
|
if strings.Compare(name, username) == 0 {
|
||||||
|
delete(as.simpleTokens, token)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
157
auth/store.go
157
auth/store.go
@ -16,6 +16,7 @@ package auth
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"encoding/binary"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"sort"
|
"sort"
|
||||||
@ -35,6 +36,8 @@ var (
|
|||||||
authEnabled = []byte{1}
|
authEnabled = []byte{1}
|
||||||
authDisabled = []byte{0}
|
authDisabled = []byte{0}
|
||||||
|
|
||||||
|
revisionKey = []byte("authRevision")
|
||||||
|
|
||||||
authBucketName = []byte("auth")
|
authBucketName = []byte("auth")
|
||||||
authUsersBucketName = []byte("authUsers")
|
authUsersBucketName = []byte("authUsers")
|
||||||
authRolesBucketName = []byte("authRoles")
|
authRolesBucketName = []byte("authRoles")
|
||||||
@ -51,13 +54,25 @@ var (
|
|||||||
ErrPermissionDenied = errors.New("auth: permission denied")
|
ErrPermissionDenied = errors.New("auth: permission denied")
|
||||||
ErrRoleNotGranted = errors.New("auth: role is not granted to the user")
|
ErrRoleNotGranted = errors.New("auth: role is not granted to the user")
|
||||||
ErrPermissionNotGranted = errors.New("auth: permission is not granted to the role")
|
ErrPermissionNotGranted = errors.New("auth: permission is not granted to the role")
|
||||||
|
ErrAuthNotEnabled = errors.New("auth: authentication is not enabled")
|
||||||
|
ErrAuthOldRevision = errors.New("auth: revision in header is old")
|
||||||
|
|
||||||
|
// BcryptCost is the algorithm cost / strength for hashing auth passwords
|
||||||
|
BcryptCost = bcrypt.DefaultCost
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
rootUser = "root"
|
rootUser = "root"
|
||||||
rootRole = "root"
|
rootRole = "root"
|
||||||
|
|
||||||
|
revBytesLen = 8
|
||||||
)
|
)
|
||||||
|
|
||||||
|
type AuthInfo struct {
|
||||||
|
Username string
|
||||||
|
Revision uint64
|
||||||
|
}
|
||||||
|
|
||||||
type AuthStore interface {
|
type AuthStore interface {
|
||||||
// AuthEnable turns on the authentication feature
|
// AuthEnable turns on the authentication feature
|
||||||
AuthEnable() error
|
AuthEnable() error
|
||||||
@ -110,23 +125,27 @@ type AuthStore interface {
|
|||||||
// RoleList gets a list of all roles
|
// RoleList gets a list of all roles
|
||||||
RoleList(r *pb.AuthRoleListRequest) (*pb.AuthRoleListResponse, error)
|
RoleList(r *pb.AuthRoleListRequest) (*pb.AuthRoleListResponse, error)
|
||||||
|
|
||||||
// UsernameFromToken gets a username from the given Token
|
// AuthInfoFromToken gets a username from the given Token and current revision number
|
||||||
UsernameFromToken(token string) (string, bool)
|
// (The revision number is used for preventing the TOCTOU problem)
|
||||||
|
AuthInfoFromToken(token string) (*AuthInfo, bool)
|
||||||
|
|
||||||
// IsPutPermitted checks put permission of the user
|
// IsPutPermitted checks put permission of the user
|
||||||
IsPutPermitted(username string, key []byte) bool
|
IsPutPermitted(authInfo *AuthInfo, key []byte) error
|
||||||
|
|
||||||
// IsRangePermitted checks range permission of the user
|
// IsRangePermitted checks range permission of the user
|
||||||
IsRangePermitted(username string, key, rangeEnd []byte) bool
|
IsRangePermitted(authInfo *AuthInfo, key, rangeEnd []byte) error
|
||||||
|
|
||||||
// IsDeleteRangePermitted checks delete-range permission of the user
|
// IsDeleteRangePermitted checks delete-range permission of the user
|
||||||
IsDeleteRangePermitted(username string, key, rangeEnd []byte) bool
|
IsDeleteRangePermitted(authInfo *AuthInfo, key, rangeEnd []byte) error
|
||||||
|
|
||||||
// IsAdminPermitted checks admin permission of the user
|
// IsAdminPermitted checks admin permission of the user
|
||||||
IsAdminPermitted(username string) bool
|
IsAdminPermitted(authInfo *AuthInfo) error
|
||||||
|
|
||||||
// GenSimpleToken produces a simple random string
|
// GenSimpleToken produces a simple random string
|
||||||
GenSimpleToken() (string, error)
|
GenSimpleToken() (string, error)
|
||||||
|
|
||||||
|
// Revision gets current revision of authStore
|
||||||
|
Revision() uint64
|
||||||
}
|
}
|
||||||
|
|
||||||
type authStore struct {
|
type authStore struct {
|
||||||
@ -138,6 +157,8 @@ type authStore struct {
|
|||||||
|
|
||||||
simpleTokensMu sync.RWMutex
|
simpleTokensMu sync.RWMutex
|
||||||
simpleTokens map[string]string // token -> username
|
simpleTokens map[string]string // token -> username
|
||||||
|
|
||||||
|
revision uint64
|
||||||
}
|
}
|
||||||
|
|
||||||
func (as *authStore) AuthEnable() error {
|
func (as *authStore) AuthEnable() error {
|
||||||
@ -166,6 +187,8 @@ func (as *authStore) AuthEnable() error {
|
|||||||
|
|
||||||
as.rangePermCache = make(map[string]*unifiedRangePermissions)
|
as.rangePermCache = make(map[string]*unifiedRangePermissions)
|
||||||
|
|
||||||
|
as.revision = getRevision(tx)
|
||||||
|
|
||||||
plog.Noticef("Authentication enabled")
|
plog.Noticef("Authentication enabled")
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
@ -176,6 +199,7 @@ func (as *authStore) AuthDisable() {
|
|||||||
tx := b.BatchTx()
|
tx := b.BatchTx()
|
||||||
tx.Lock()
|
tx.Lock()
|
||||||
tx.UnsafePut(authBucketName, enableFlagKey, authDisabled)
|
tx.UnsafePut(authBucketName, enableFlagKey, authDisabled)
|
||||||
|
as.commitRevision(tx)
|
||||||
tx.Unlock()
|
tx.Unlock()
|
||||||
b.ForceCommit()
|
b.ForceCommit()
|
||||||
|
|
||||||
@ -183,10 +207,18 @@ func (as *authStore) AuthDisable() {
|
|||||||
as.enabled = false
|
as.enabled = false
|
||||||
as.enabledMu.Unlock()
|
as.enabledMu.Unlock()
|
||||||
|
|
||||||
|
as.simpleTokensMu.Lock()
|
||||||
|
as.simpleTokens = make(map[string]string) // invalidate all tokens
|
||||||
|
as.simpleTokensMu.Unlock()
|
||||||
|
|
||||||
plog.Noticef("Authentication disabled")
|
plog.Noticef("Authentication disabled")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (as *authStore) Authenticate(ctx context.Context, username, password string) (*pb.AuthenticateResponse, error) {
|
func (as *authStore) Authenticate(ctx context.Context, username, password string) (*pb.AuthenticateResponse, error) {
|
||||||
|
if !as.isAuthEnabled() {
|
||||||
|
return nil, ErrAuthNotEnabled
|
||||||
|
}
|
||||||
|
|
||||||
// TODO(mitake): after adding jwt support, branching based on values of ctx is required
|
// TODO(mitake): after adding jwt support, branching based on values of ctx is required
|
||||||
index := ctx.Value("index").(uint64)
|
index := ctx.Value("index").(uint64)
|
||||||
simpleToken := ctx.Value("simpleToken").(string)
|
simpleToken := ctx.Value("simpleToken").(string)
|
||||||
@ -223,6 +255,9 @@ func (as *authStore) Recover(be backend.Backend) {
|
|||||||
enabled = true
|
enabled = true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
as.revision = getRevision(tx)
|
||||||
|
|
||||||
tx.Unlock()
|
tx.Unlock()
|
||||||
|
|
||||||
as.enabledMu.Lock()
|
as.enabledMu.Lock()
|
||||||
@ -231,7 +266,7 @@ func (as *authStore) Recover(be backend.Backend) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (as *authStore) UserAdd(r *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse, error) {
|
func (as *authStore) UserAdd(r *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse, error) {
|
||||||
hashed, err := bcrypt.GenerateFromPassword([]byte(r.Password), bcrypt.DefaultCost)
|
hashed, err := bcrypt.GenerateFromPassword([]byte(r.Password), BcryptCost)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
plog.Errorf("failed to hash password: %s", err)
|
plog.Errorf("failed to hash password: %s", err)
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -253,6 +288,8 @@ func (as *authStore) UserAdd(r *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse,
|
|||||||
|
|
||||||
putUser(tx, newUser)
|
putUser(tx, newUser)
|
||||||
|
|
||||||
|
as.commitRevision(tx)
|
||||||
|
|
||||||
plog.Noticef("added a new user: %s", r.Name)
|
plog.Noticef("added a new user: %s", r.Name)
|
||||||
|
|
||||||
return &pb.AuthUserAddResponse{}, nil
|
return &pb.AuthUserAddResponse{}, nil
|
||||||
@ -270,6 +307,11 @@ func (as *authStore) UserDelete(r *pb.AuthUserDeleteRequest) (*pb.AuthUserDelete
|
|||||||
|
|
||||||
delUser(tx, r.Name)
|
delUser(tx, r.Name)
|
||||||
|
|
||||||
|
as.commitRevision(tx)
|
||||||
|
|
||||||
|
as.invalidateCachedPerm(r.Name)
|
||||||
|
as.invalidateUser(r.Name)
|
||||||
|
|
||||||
plog.Noticef("deleted a user: %s", r.Name)
|
plog.Noticef("deleted a user: %s", r.Name)
|
||||||
|
|
||||||
return &pb.AuthUserDeleteResponse{}, nil
|
return &pb.AuthUserDeleteResponse{}, nil
|
||||||
@ -278,7 +320,7 @@ func (as *authStore) UserDelete(r *pb.AuthUserDeleteRequest) (*pb.AuthUserDelete
|
|||||||
func (as *authStore) UserChangePassword(r *pb.AuthUserChangePasswordRequest) (*pb.AuthUserChangePasswordResponse, error) {
|
func (as *authStore) UserChangePassword(r *pb.AuthUserChangePasswordRequest) (*pb.AuthUserChangePasswordResponse, error) {
|
||||||
// TODO(mitake): measure the cost of bcrypt.GenerateFromPassword()
|
// TODO(mitake): measure the cost of bcrypt.GenerateFromPassword()
|
||||||
// If the cost is too high, we should move the encryption to outside of the raft
|
// If the cost is too high, we should move the encryption to outside of the raft
|
||||||
hashed, err := bcrypt.GenerateFromPassword([]byte(r.Password), bcrypt.DefaultCost)
|
hashed, err := bcrypt.GenerateFromPassword([]byte(r.Password), BcryptCost)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
plog.Errorf("failed to hash password: %s", err)
|
plog.Errorf("failed to hash password: %s", err)
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -301,6 +343,11 @@ func (as *authStore) UserChangePassword(r *pb.AuthUserChangePasswordRequest) (*p
|
|||||||
|
|
||||||
putUser(tx, updatedUser)
|
putUser(tx, updatedUser)
|
||||||
|
|
||||||
|
as.commitRevision(tx)
|
||||||
|
|
||||||
|
as.invalidateCachedPerm(r.Name)
|
||||||
|
as.invalidateUser(r.Name)
|
||||||
|
|
||||||
plog.Noticef("changed a password of a user: %s", r.Name)
|
plog.Noticef("changed a password of a user: %s", r.Name)
|
||||||
|
|
||||||
return &pb.AuthUserChangePasswordResponse{}, nil
|
return &pb.AuthUserChangePasswordResponse{}, nil
|
||||||
@ -336,6 +383,8 @@ func (as *authStore) UserGrantRole(r *pb.AuthUserGrantRoleRequest) (*pb.AuthUser
|
|||||||
|
|
||||||
as.invalidateCachedPerm(r.User)
|
as.invalidateCachedPerm(r.User)
|
||||||
|
|
||||||
|
as.commitRevision(tx)
|
||||||
|
|
||||||
plog.Noticef("granted role %s to user %s", r.Role, r.User)
|
plog.Noticef("granted role %s to user %s", r.Role, r.User)
|
||||||
return &pb.AuthUserGrantRoleResponse{}, nil
|
return &pb.AuthUserGrantRoleResponse{}, nil
|
||||||
}
|
}
|
||||||
@ -404,6 +453,8 @@ func (as *authStore) UserRevokeRole(r *pb.AuthUserRevokeRoleRequest) (*pb.AuthUs
|
|||||||
|
|
||||||
as.invalidateCachedPerm(r.Name)
|
as.invalidateCachedPerm(r.Name)
|
||||||
|
|
||||||
|
as.commitRevision(tx)
|
||||||
|
|
||||||
plog.Noticef("revoked role %s from user %s", r.Role, r.Name)
|
plog.Noticef("revoked role %s from user %s", r.Role, r.Name)
|
||||||
return &pb.AuthUserRevokeRoleResponse{}, nil
|
return &pb.AuthUserRevokeRoleResponse{}, nil
|
||||||
}
|
}
|
||||||
@ -473,6 +524,8 @@ func (as *authStore) RoleRevokePermission(r *pb.AuthRoleRevokePermissionRequest)
|
|||||||
// It should be optimized.
|
// It should be optimized.
|
||||||
as.clearCachedPerm()
|
as.clearCachedPerm()
|
||||||
|
|
||||||
|
as.commitRevision(tx)
|
||||||
|
|
||||||
plog.Noticef("revoked key %s from role %s", r.Key, r.Role)
|
plog.Noticef("revoked key %s from role %s", r.Key, r.Role)
|
||||||
return &pb.AuthRoleRevokePermissionResponse{}, nil
|
return &pb.AuthRoleRevokePermissionResponse{}, nil
|
||||||
}
|
}
|
||||||
@ -501,6 +554,8 @@ func (as *authStore) RoleDelete(r *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDelete
|
|||||||
|
|
||||||
delRole(tx, r.Role)
|
delRole(tx, r.Role)
|
||||||
|
|
||||||
|
as.commitRevision(tx)
|
||||||
|
|
||||||
plog.Noticef("deleted role %s", r.Role)
|
plog.Noticef("deleted role %s", r.Role)
|
||||||
return &pb.AuthRoleDeleteResponse{}, nil
|
return &pb.AuthRoleDeleteResponse{}, nil
|
||||||
}
|
}
|
||||||
@ -521,16 +576,18 @@ func (as *authStore) RoleAdd(r *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse,
|
|||||||
|
|
||||||
putRole(tx, newRole)
|
putRole(tx, newRole)
|
||||||
|
|
||||||
|
as.commitRevision(tx)
|
||||||
|
|
||||||
plog.Noticef("Role %s is created", r.Name)
|
plog.Noticef("Role %s is created", r.Name)
|
||||||
|
|
||||||
return &pb.AuthRoleAddResponse{}, nil
|
return &pb.AuthRoleAddResponse{}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (as *authStore) UsernameFromToken(token string) (string, bool) {
|
func (as *authStore) AuthInfoFromToken(token string) (*AuthInfo, bool) {
|
||||||
as.simpleTokensMu.RLock()
|
as.simpleTokensMu.RLock()
|
||||||
defer as.simpleTokensMu.RUnlock()
|
defer as.simpleTokensMu.RUnlock()
|
||||||
t, ok := as.simpleTokens[token]
|
t, ok := as.simpleTokens[token]
|
||||||
return t, ok
|
return &AuthInfo{Username: t, Revision: as.revision}, ok
|
||||||
}
|
}
|
||||||
|
|
||||||
type permSlice []*authpb.Permission
|
type permSlice []*authpb.Permission
|
||||||
@ -582,15 +639,21 @@ func (as *authStore) RoleGrantPermission(r *pb.AuthRoleGrantPermissionRequest) (
|
|||||||
// It should be optimized.
|
// It should be optimized.
|
||||||
as.clearCachedPerm()
|
as.clearCachedPerm()
|
||||||
|
|
||||||
|
as.commitRevision(tx)
|
||||||
|
|
||||||
plog.Noticef("role %s's permission of key %s is updated as %s", r.Name, r.Perm.Key, authpb.Permission_Type_name[int32(r.Perm.PermType)])
|
plog.Noticef("role %s's permission of key %s is updated as %s", r.Name, r.Perm.Key, authpb.Permission_Type_name[int32(r.Perm.PermType)])
|
||||||
|
|
||||||
return &pb.AuthRoleGrantPermissionResponse{}, nil
|
return &pb.AuthRoleGrantPermissionResponse{}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (as *authStore) isOpPermitted(userName string, key, rangeEnd []byte, permTyp authpb.Permission_Type) bool {
|
func (as *authStore) isOpPermitted(userName string, revision uint64, key, rangeEnd []byte, permTyp authpb.Permission_Type) error {
|
||||||
// TODO(mitake): this function would be costly so we need a caching mechanism
|
// TODO(mitake): this function would be costly so we need a caching mechanism
|
||||||
if !as.isAuthEnabled() {
|
if !as.isAuthEnabled() {
|
||||||
return true
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if revision < as.revision {
|
||||||
|
return ErrAuthOldRevision
|
||||||
}
|
}
|
||||||
|
|
||||||
tx := as.be.BatchTx()
|
tx := as.be.BatchTx()
|
||||||
@ -600,43 +663,52 @@ func (as *authStore) isOpPermitted(userName string, key, rangeEnd []byte, permTy
|
|||||||
user := getUser(tx, userName)
|
user := getUser(tx, userName)
|
||||||
if user == nil {
|
if user == nil {
|
||||||
plog.Errorf("invalid user name %s for permission checking", userName)
|
plog.Errorf("invalid user name %s for permission checking", userName)
|
||||||
return false
|
return ErrPermissionDenied
|
||||||
|
}
|
||||||
|
|
||||||
|
// root role should have permission on all ranges
|
||||||
|
if hasRootRole(user) {
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if as.isRangeOpPermitted(tx, userName, key, rangeEnd, permTyp) {
|
if as.isRangeOpPermitted(tx, userName, key, rangeEnd, permTyp) {
|
||||||
return true
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
return false
|
return ErrPermissionDenied
|
||||||
}
|
}
|
||||||
|
|
||||||
func (as *authStore) IsPutPermitted(username string, key []byte) bool {
|
func (as *authStore) IsPutPermitted(authInfo *AuthInfo, key []byte) error {
|
||||||
return as.isOpPermitted(username, key, nil, authpb.WRITE)
|
return as.isOpPermitted(authInfo.Username, authInfo.Revision, key, nil, authpb.WRITE)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (as *authStore) IsRangePermitted(username string, key, rangeEnd []byte) bool {
|
func (as *authStore) IsRangePermitted(authInfo *AuthInfo, key, rangeEnd []byte) error {
|
||||||
return as.isOpPermitted(username, key, rangeEnd, authpb.READ)
|
return as.isOpPermitted(authInfo.Username, authInfo.Revision, key, rangeEnd, authpb.READ)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (as *authStore) IsDeleteRangePermitted(username string, key, rangeEnd []byte) bool {
|
func (as *authStore) IsDeleteRangePermitted(authInfo *AuthInfo, key, rangeEnd []byte) error {
|
||||||
return as.isOpPermitted(username, key, rangeEnd, authpb.WRITE)
|
return as.isOpPermitted(authInfo.Username, authInfo.Revision, key, rangeEnd, authpb.WRITE)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (as *authStore) IsAdminPermitted(username string) bool {
|
func (as *authStore) IsAdminPermitted(authInfo *AuthInfo) error {
|
||||||
if !as.isAuthEnabled() {
|
if !as.isAuthEnabled() {
|
||||||
return true
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
tx := as.be.BatchTx()
|
tx := as.be.BatchTx()
|
||||||
tx.Lock()
|
tx.Lock()
|
||||||
defer tx.Unlock()
|
defer tx.Unlock()
|
||||||
|
|
||||||
u := getUser(tx, username)
|
u := getUser(tx, authInfo.Username)
|
||||||
if u == nil {
|
if u == nil {
|
||||||
return false
|
return ErrUserNotFound
|
||||||
}
|
}
|
||||||
|
|
||||||
return hasRootRole(u)
|
if !hasRootRole(u) {
|
||||||
|
return ErrPermissionDenied
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func getUser(tx backend.BatchTx, username string) *authpb.User {
|
func getUser(tx backend.BatchTx, username string) *authpb.User {
|
||||||
@ -748,13 +820,18 @@ func NewAuthStore(be backend.Backend) *authStore {
|
|||||||
tx.UnsafeCreateBucket(authUsersBucketName)
|
tx.UnsafeCreateBucket(authUsersBucketName)
|
||||||
tx.UnsafeCreateBucket(authRolesBucketName)
|
tx.UnsafeCreateBucket(authRolesBucketName)
|
||||||
|
|
||||||
|
as := &authStore{
|
||||||
|
be: be,
|
||||||
|
simpleTokens: make(map[string]string),
|
||||||
|
revision: 0,
|
||||||
|
}
|
||||||
|
|
||||||
|
as.commitRevision(tx)
|
||||||
|
|
||||||
tx.Unlock()
|
tx.Unlock()
|
||||||
be.ForceCommit()
|
be.ForceCommit()
|
||||||
|
|
||||||
return &authStore{
|
return as
|
||||||
be: be,
|
|
||||||
simpleTokens: make(map[string]string),
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func hasRootRole(u *authpb.User) bool {
|
func hasRootRole(u *authpb.User) bool {
|
||||||
@ -765,3 +842,23 @@ func hasRootRole(u *authpb.User) bool {
|
|||||||
}
|
}
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (as *authStore) commitRevision(tx backend.BatchTx) {
|
||||||
|
as.revision++
|
||||||
|
revBytes := make([]byte, revBytesLen)
|
||||||
|
binary.BigEndian.PutUint64(revBytes, as.revision)
|
||||||
|
tx.UnsafePut(authBucketName, revisionKey, revBytes)
|
||||||
|
}
|
||||||
|
|
||||||
|
func getRevision(tx backend.BatchTx) uint64 {
|
||||||
|
_, vs := tx.UnsafeRange(authBucketName, []byte(revisionKey), nil, 0)
|
||||||
|
if len(vs) != 1 {
|
||||||
|
plog.Panicf("failed to get the key of auth store revision")
|
||||||
|
}
|
||||||
|
|
||||||
|
return binary.BigEndian.Uint64(vs[0])
|
||||||
|
}
|
||||||
|
|
||||||
|
func (as *authStore) Revision() uint64 {
|
||||||
|
return as.revision
|
||||||
|
}
|
||||||
|
@ -20,9 +20,12 @@ import (
|
|||||||
|
|
||||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||||
"github.com/coreos/etcd/mvcc/backend"
|
"github.com/coreos/etcd/mvcc/backend"
|
||||||
|
"golang.org/x/crypto/bcrypt"
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
func init() { BcryptCost = bcrypt.MinCost }
|
||||||
|
|
||||||
func TestUserAdd(t *testing.T) {
|
func TestUserAdd(t *testing.T) {
|
||||||
b, tPath := backend.NewDefaultTmpBackend()
|
b, tPath := backend.NewDefaultTmpBackend()
|
||||||
defer func() {
|
defer func() {
|
||||||
@ -45,6 +48,25 @@ func TestUserAdd(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func enableAuthAndCreateRoot(as *authStore) error {
|
||||||
|
_, err := as.UserAdd(&pb.AuthUserAddRequest{Name: "root", Password: "root"})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = as.RoleAdd(&pb.AuthRoleAddRequest{Name: "root"})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = as.UserGrantRole(&pb.AuthUserGrantRoleRequest{User: "root", Role: "root"})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return as.AuthEnable()
|
||||||
|
}
|
||||||
|
|
||||||
func TestAuthenticate(t *testing.T) {
|
func TestAuthenticate(t *testing.T) {
|
||||||
b, tPath := backend.NewDefaultTmpBackend()
|
b, tPath := backend.NewDefaultTmpBackend()
|
||||||
defer func() {
|
defer func() {
|
||||||
@ -53,9 +75,13 @@ func TestAuthenticate(t *testing.T) {
|
|||||||
}()
|
}()
|
||||||
|
|
||||||
as := NewAuthStore(b)
|
as := NewAuthStore(b)
|
||||||
|
err := enableAuthAndCreateRoot(as)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
ua := &pb.AuthUserAddRequest{Name: "foo", Password: "bar"}
|
ua := &pb.AuthUserAddRequest{Name: "foo", Password: "bar"}
|
||||||
_, err := as.UserAdd(ua)
|
_, err = as.UserAdd(ua)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -96,9 +122,13 @@ func TestUserDelete(t *testing.T) {
|
|||||||
}()
|
}()
|
||||||
|
|
||||||
as := NewAuthStore(b)
|
as := NewAuthStore(b)
|
||||||
|
err := enableAuthAndCreateRoot(as)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
ua := &pb.AuthUserAddRequest{Name: "foo"}
|
ua := &pb.AuthUserAddRequest{Name: "foo"}
|
||||||
_, err := as.UserAdd(ua)
|
_, err = as.UserAdd(ua)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -128,8 +158,12 @@ func TestUserChangePassword(t *testing.T) {
|
|||||||
}()
|
}()
|
||||||
|
|
||||||
as := NewAuthStore(b)
|
as := NewAuthStore(b)
|
||||||
|
err := enableAuthAndCreateRoot(as)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
_, err := as.UserAdd(&pb.AuthUserAddRequest{Name: "foo"})
|
_, err = as.UserAdd(&pb.AuthUserAddRequest{Name: "foo"})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -169,9 +203,13 @@ func TestRoleAdd(t *testing.T) {
|
|||||||
}()
|
}()
|
||||||
|
|
||||||
as := NewAuthStore(b)
|
as := NewAuthStore(b)
|
||||||
|
err := enableAuthAndCreateRoot(as)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
// adds a new role
|
// adds a new role
|
||||||
_, err := as.RoleAdd(&pb.AuthRoleAddRequest{Name: "role-test"})
|
_, err = as.RoleAdd(&pb.AuthRoleAddRequest{Name: "role-test"})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -185,8 +223,12 @@ func TestUserGrant(t *testing.T) {
|
|||||||
}()
|
}()
|
||||||
|
|
||||||
as := NewAuthStore(b)
|
as := NewAuthStore(b)
|
||||||
|
err := enableAuthAndCreateRoot(as)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
_, err := as.UserAdd(&pb.AuthUserAddRequest{Name: "foo"})
|
_, err = as.UserAdd(&pb.AuthUserAddRequest{Name: "foo"})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
32
build
32
build
@ -12,7 +12,7 @@ fi
|
|||||||
|
|
||||||
# enable/disable failpoints
|
# enable/disable failpoints
|
||||||
toggle_failpoints() {
|
toggle_failpoints() {
|
||||||
FAILPKGS="etcdserver/"
|
FAILPKGS="etcdserver/ mvcc/backend/"
|
||||||
|
|
||||||
mode="disable"
|
mode="disable"
|
||||||
if [ ! -z "$FAILPOINTS" ]; then mode="enable"; fi
|
if [ ! -z "$FAILPOINTS" ]; then mode="enable"; fi
|
||||||
@ -27,18 +27,30 @@ toggle_failpoints() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
etcd_build() {
|
etcd_build() {
|
||||||
if [ -z "${GOARCH}" ] || [ "${GOARCH}" = "$(go env GOHOSTARCH)" ]; then
|
out="bin"
|
||||||
out="bin"
|
if [ -n "${BINDIR}" ]; then out="${BINDIR}"; fi
|
||||||
else
|
|
||||||
out="bin/${GOARCH}"
|
|
||||||
fi
|
|
||||||
toggle_failpoints
|
toggle_failpoints
|
||||||
# Static compilation is useful when etcd is run in a container
|
# Static compilation is useful when etcd is run in a container
|
||||||
CGO_ENABLED=0 go build $GO_BUILD_FLAGS -installsuffix cgo -ldflags "-s -X ${REPO_PATH}/cmd/vendor/${REPO_PATH}/version.GitSHA=${GIT_SHA}" -o ${out}/etcd ${REPO_PATH}/cmd
|
CGO_ENABLED=0 go build $GO_BUILD_FLAGS -installsuffix cgo -ldflags "-s -X ${REPO_PATH}/cmd/vendor/${REPO_PATH}/version.GitSHA=${GIT_SHA}" -o ${out}/etcd ${REPO_PATH}/cmd/etcd || return
|
||||||
CGO_ENABLED=0 go build $GO_BUILD_FLAGS -installsuffix cgo -ldflags "-s" -o ${out}/etcdctl ${REPO_PATH}/cmd/etcdctl
|
CGO_ENABLED=0 go build $GO_BUILD_FLAGS -installsuffix cgo -ldflags "-s" -o ${out}/etcdctl ${REPO_PATH}/cmd/etcdctl || return
|
||||||
|
}
|
||||||
|
|
||||||
|
etcd_setup_gopath() {
|
||||||
|
CDIR=$(cd `dirname "$0"` && pwd)
|
||||||
|
cd "$CDIR"
|
||||||
|
etcdGOPATH=${CDIR}/gopath
|
||||||
|
# preserve old gopath to support building with unvendored tooling deps (e.g., gofail)
|
||||||
|
export GOPATH=${etcdGOPATH}:$GOPATH
|
||||||
|
rm -f ${etcdGOPATH}/src
|
||||||
|
mkdir -p ${etcdGOPATH}
|
||||||
|
ln -s ${CDIR}/cmd/vendor ${etcdGOPATH}/src
|
||||||
}
|
}
|
||||||
|
|
||||||
toggle_failpoints
|
toggle_failpoints
|
||||||
|
|
||||||
# don't build when sourced
|
# only build when called directly, not sourced
|
||||||
(echo "$0" | grep "/build$" > /dev/null) && etcd_build || true
|
if echo "$0" | grep "build$" >/dev/null; then
|
||||||
|
# force new gopath so builds outside of gopath work
|
||||||
|
etcd_setup_gopath
|
||||||
|
etcd_build
|
||||||
|
fi
|
||||||
|
@ -41,5 +41,5 @@ if (-not $env:GOPATH) {
|
|||||||
$env:CGO_ENABLED = 0
|
$env:CGO_ENABLED = 0
|
||||||
$env:GO15VENDOREXPERIMENT = 1
|
$env:GO15VENDOREXPERIMENT = 1
|
||||||
$GIT_SHA="$(git rev-parse --short HEAD)"
|
$GIT_SHA="$(git rev-parse --short HEAD)"
|
||||||
go build -a -installsuffix cgo -ldflags "-s -X $REPO_PATH/cmd/vendor/$REPO_PATH/version.GitSHA=$GIT_SHA" -o bin\etcd.exe "$REPO_PATH\cmd"
|
go build -a -installsuffix cgo -ldflags "-s -X $REPO_PATH/cmd/vendor/$REPO_PATH/version.GitSHA=$GIT_SHA" -o bin\etcd.exe "$REPO_PATH\cmd\etcd"
|
||||||
go build -a -installsuffix cgo -ldflags "-s" -o bin\etcdctl.exe "$REPO_PATH\cmd\etcdctl"
|
go build -a -installsuffix cgo -ldflags "-s" -o bin\etcdctl.exe "$REPO_PATH\cmd\etcdctl"
|
||||||
|
@ -37,6 +37,10 @@ var (
|
|||||||
ErrClusterUnavailable = errors.New("client: etcd cluster is unavailable or misconfigured")
|
ErrClusterUnavailable = errors.New("client: etcd cluster is unavailable or misconfigured")
|
||||||
ErrNoLeaderEndpoint = errors.New("client: no leader endpoint available")
|
ErrNoLeaderEndpoint = errors.New("client: no leader endpoint available")
|
||||||
errTooManyRedirectChecks = errors.New("client: too many redirect checks")
|
errTooManyRedirectChecks = errors.New("client: too many redirect checks")
|
||||||
|
|
||||||
|
// oneShotCtxValue is set on a context using WithValue(&oneShotValue) so
|
||||||
|
// that Do() will not retry a request
|
||||||
|
oneShotCtxValue interface{}
|
||||||
)
|
)
|
||||||
|
|
||||||
var DefaultRequestTimeout = 5 * time.Second
|
var DefaultRequestTimeout = 5 * time.Second
|
||||||
@ -301,7 +305,7 @@ func (c *httpClusterClient) SetEndpoints(eps []string) error {
|
|||||||
// If endpoints doesn't have the lu, just keep c.pinned = 0.
|
// If endpoints doesn't have the lu, just keep c.pinned = 0.
|
||||||
// Forwarding between follower and leader would be required but it works.
|
// Forwarding between follower and leader would be required but it works.
|
||||||
default:
|
default:
|
||||||
return errors.New(fmt.Sprintf("invalid endpoint selection mode: %d", c.selectionMode))
|
return fmt.Errorf("invalid endpoint selection mode: %d", c.selectionMode)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
@ -335,6 +339,7 @@ func (c *httpClusterClient) Do(ctx context.Context, act httpAction) (*http.Respo
|
|||||||
var body []byte
|
var body []byte
|
||||||
var err error
|
var err error
|
||||||
cerr := &ClusterError{}
|
cerr := &ClusterError{}
|
||||||
|
isOneShot := ctx.Value(&oneShotCtxValue) != nil
|
||||||
|
|
||||||
for i := pinned; i < leps+pinned; i++ {
|
for i := pinned; i < leps+pinned; i++ {
|
||||||
k := i % leps
|
k := i % leps
|
||||||
@ -348,6 +353,9 @@ func (c *httpClusterClient) Do(ctx context.Context, act httpAction) (*http.Respo
|
|||||||
if err == context.Canceled || err == context.DeadlineExceeded {
|
if err == context.Canceled || err == context.DeadlineExceeded {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
|
if isOneShot {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if resp.StatusCode/100 == 5 {
|
if resp.StatusCode/100 == 5 {
|
||||||
@ -358,6 +366,9 @@ func (c *httpClusterClient) Do(ctx context.Context, act httpAction) (*http.Respo
|
|||||||
default:
|
default:
|
||||||
cerr.Errors = append(cerr.Errors, fmt.Errorf("client: etcd member %s returns server error [%s]", eps[k].String(), http.StatusText(resp.StatusCode)))
|
cerr.Errors = append(cerr.Errors, fmt.Errorf("client: etcd member %s returns server error [%s]", eps[k].String(), http.StatusText(resp.StatusCode)))
|
||||||
}
|
}
|
||||||
|
if isOneShot {
|
||||||
|
return nil, nil, cerr.Errors[0]
|
||||||
|
}
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if k != pinned {
|
if k != pinned {
|
||||||
@ -393,7 +404,7 @@ func (c *httpClusterClient) Sync(ctx context.Context) error {
|
|||||||
c.Lock()
|
c.Lock()
|
||||||
defer c.Unlock()
|
defer c.Unlock()
|
||||||
|
|
||||||
eps := make([]string, 0)
|
var eps []string
|
||||||
for _, m := range ms {
|
for _, m := range ms {
|
||||||
eps = append(eps, m.ClientURLs...)
|
eps = append(eps, m.ClientURLs...)
|
||||||
}
|
}
|
||||||
|
@ -855,7 +855,7 @@ func TestHTTPClusterClientAutoSyncFail(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
err = hc.AutoSync(context.Background(), time.Hour)
|
err = hc.AutoSync(context.Background(), time.Hour)
|
||||||
if err.Error() != ErrClusterUnavailable.Error() {
|
if !strings.HasPrefix(err.Error(), ErrClusterUnavailable.Error()) {
|
||||||
t.Fatalf("incorrect error value: want=%v got=%v", ErrClusterUnavailable, err)
|
t.Fatalf("incorrect error value: want=%v got=%v", ErrClusterUnavailable, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -21,7 +21,11 @@ type ClusterError struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (ce *ClusterError) Error() string {
|
func (ce *ClusterError) Error() string {
|
||||||
return ErrClusterUnavailable.Error()
|
s := ErrClusterUnavailable.Error()
|
||||||
|
for i, e := range ce.Errors {
|
||||||
|
s += fmt.Sprintf("; error #%d: %s\n", i, e)
|
||||||
|
}
|
||||||
|
return s
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ce *ClusterError) Detail() string {
|
func (ce *ClusterError) Detail() string {
|
||||||
|
134
client/integration/client_test.go
Normal file
134
client/integration/client_test.go
Normal file
@ -0,0 +1,134 @@
|
|||||||
|
// Copyright 2016 The etcd Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package integration
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
"net/http/httptest"
|
||||||
|
"os"
|
||||||
|
"strings"
|
||||||
|
"sync/atomic"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"golang.org/x/net/context"
|
||||||
|
|
||||||
|
"github.com/coreos/etcd/client"
|
||||||
|
"github.com/coreos/etcd/integration"
|
||||||
|
"github.com/coreos/etcd/pkg/testutil"
|
||||||
|
)
|
||||||
|
|
||||||
|
// TestV2NoRetryEOF tests destructive api calls won't retry on a disconnection.
|
||||||
|
func TestV2NoRetryEOF(t *testing.T) {
|
||||||
|
defer testutil.AfterTest(t)
|
||||||
|
// generate an EOF response; specify address so appears first in sorted ep list
|
||||||
|
lEOF := integration.NewListenerWithAddr(t, fmt.Sprintf("eof:123.%d.sock", os.Getpid()))
|
||||||
|
defer lEOF.Close()
|
||||||
|
tries := uint32(0)
|
||||||
|
go func() {
|
||||||
|
for {
|
||||||
|
conn, err := lEOF.Accept()
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
atomic.AddUint32(&tries, 1)
|
||||||
|
conn.Close()
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
eofURL := integration.UrlScheme + "://" + lEOF.Addr().String()
|
||||||
|
cli := integration.MustNewHTTPClient(t, []string{eofURL, eofURL}, nil)
|
||||||
|
kapi := client.NewKeysAPI(cli)
|
||||||
|
for i, f := range noRetryList(kapi) {
|
||||||
|
startTries := atomic.LoadUint32(&tries)
|
||||||
|
if err := f(); err == nil {
|
||||||
|
t.Errorf("#%d: expected EOF error, got nil", i)
|
||||||
|
}
|
||||||
|
endTries := atomic.LoadUint32(&tries)
|
||||||
|
if startTries+1 != endTries {
|
||||||
|
t.Errorf("#%d: expected 1 try, got %d", i, endTries-startTries)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestV2NoRetryNoLeader tests destructive api calls won't retry if given an error code.
|
||||||
|
func TestV2NoRetryNoLeader(t *testing.T) {
|
||||||
|
defer testutil.AfterTest(t)
|
||||||
|
|
||||||
|
lHttp := integration.NewListenerWithAddr(t, fmt.Sprintf("errHttp:123.%d.sock", os.Getpid()))
|
||||||
|
eh := &errHandler{errCode: http.StatusServiceUnavailable}
|
||||||
|
srv := httptest.NewUnstartedServer(eh)
|
||||||
|
defer lHttp.Close()
|
||||||
|
defer srv.Close()
|
||||||
|
srv.Listener = lHttp
|
||||||
|
go srv.Start()
|
||||||
|
lHttpURL := integration.UrlScheme + "://" + lHttp.Addr().String()
|
||||||
|
|
||||||
|
cli := integration.MustNewHTTPClient(t, []string{lHttpURL, lHttpURL}, nil)
|
||||||
|
kapi := client.NewKeysAPI(cli)
|
||||||
|
// test error code
|
||||||
|
for i, f := range noRetryList(kapi) {
|
||||||
|
reqs := eh.reqs
|
||||||
|
if err := f(); err == nil || !strings.Contains(err.Error(), "no leader") {
|
||||||
|
t.Errorf("#%d: expected \"no leader\", got %v", i, err)
|
||||||
|
}
|
||||||
|
if eh.reqs != reqs+1 {
|
||||||
|
t.Errorf("#%d: expected 1 request, got %d", i, eh.reqs-reqs)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestV2RetryRefuse tests destructive api calls will retry if a connection is refused.
|
||||||
|
func TestV2RetryRefuse(t *testing.T) {
|
||||||
|
defer testutil.AfterTest(t)
|
||||||
|
cl := integration.NewCluster(t, 1)
|
||||||
|
cl.Launch(t)
|
||||||
|
defer cl.Terminate(t)
|
||||||
|
// test connection refused; expect no error failover
|
||||||
|
cli := integration.MustNewHTTPClient(t, []string{integration.UrlScheme + "://refuseconn:123", cl.URL(0)}, nil)
|
||||||
|
kapi := client.NewKeysAPI(cli)
|
||||||
|
if _, err := kapi.Set(context.Background(), "/delkey", "def", nil); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
for i, f := range noRetryList(kapi) {
|
||||||
|
if err := f(); err != nil {
|
||||||
|
t.Errorf("#%d: unexpected retry failure (%v)", i, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type errHandler struct {
|
||||||
|
errCode int
|
||||||
|
reqs int
|
||||||
|
}
|
||||||
|
|
||||||
|
func (eh *errHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
|
||||||
|
req.Body.Close()
|
||||||
|
eh.reqs++
|
||||||
|
w.WriteHeader(eh.errCode)
|
||||||
|
}
|
||||||
|
|
||||||
|
func noRetryList(kapi client.KeysAPI) []func() error {
|
||||||
|
return []func() error{
|
||||||
|
func() error {
|
||||||
|
opts := &client.SetOptions{PrevExist: client.PrevNoExist}
|
||||||
|
_, err := kapi.Set(context.Background(), "/setkey", "bar", opts)
|
||||||
|
return err
|
||||||
|
},
|
||||||
|
func() error {
|
||||||
|
_, err := kapi.Delete(context.Background(), "/delkey", nil)
|
||||||
|
return err
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
17
client/integration/doc.go
Normal file
17
client/integration/doc.go
Normal file
@ -0,0 +1,17 @@
|
|||||||
|
// Copyright 2016 The etcd Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
// Package integration implements tests built upon embedded etcd, focusing on
|
||||||
|
// the correctness of the etcd v2 client.
|
||||||
|
package integration
|
20
client/integration/main_test.go
Normal file
20
client/integration/main_test.go
Normal file
@ -0,0 +1,20 @@
|
|||||||
|
// Copyright 2013 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package integration
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/coreos/etcd/pkg/testutil"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestMain(m *testing.M) {
|
||||||
|
v := m.Run()
|
||||||
|
if v == 0 && testutil.CheckLeakedGoroutine() {
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
os.Exit(v)
|
||||||
|
}
|
@ -8,10 +8,11 @@ package client
|
|||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
codec1978 "github.com/ugorji/go/codec"
|
|
||||||
"reflect"
|
"reflect"
|
||||||
"runtime"
|
"runtime"
|
||||||
time "time"
|
time "time"
|
||||||
|
|
||||||
|
codec1978 "github.com/ugorji/go/codec"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
@ -191,6 +191,10 @@ type SetOptions struct {
|
|||||||
|
|
||||||
// Dir specifies whether or not this Node should be created as a directory.
|
// Dir specifies whether or not this Node should be created as a directory.
|
||||||
Dir bool
|
Dir bool
|
||||||
|
|
||||||
|
// NoValueOnSuccess specifies whether the response contains the current value of the Node.
|
||||||
|
// If set, the response will only contain the current value when the request fails.
|
||||||
|
NoValueOnSuccess bool
|
||||||
}
|
}
|
||||||
|
|
||||||
type GetOptions struct {
|
type GetOptions struct {
|
||||||
@ -335,9 +339,14 @@ func (k *httpKeysAPI) Set(ctx context.Context, key, val string, opts *SetOptions
|
|||||||
act.TTL = opts.TTL
|
act.TTL = opts.TTL
|
||||||
act.Refresh = opts.Refresh
|
act.Refresh = opts.Refresh
|
||||||
act.Dir = opts.Dir
|
act.Dir = opts.Dir
|
||||||
|
act.NoValueOnSuccess = opts.NoValueOnSuccess
|
||||||
}
|
}
|
||||||
|
|
||||||
resp, body, err := k.client.Do(ctx, act)
|
doCtx := ctx
|
||||||
|
if act.PrevExist == PrevNoExist {
|
||||||
|
doCtx = context.WithValue(doCtx, &oneShotCtxValue, &oneShotCtxValue)
|
||||||
|
}
|
||||||
|
resp, body, err := k.client.Do(doCtx, act)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -385,7 +394,8 @@ func (k *httpKeysAPI) Delete(ctx context.Context, key string, opts *DeleteOption
|
|||||||
act.Recursive = opts.Recursive
|
act.Recursive = opts.Recursive
|
||||||
}
|
}
|
||||||
|
|
||||||
resp, body, err := k.client.Do(ctx, act)
|
doCtx := context.WithValue(ctx, &oneShotCtxValue, &oneShotCtxValue)
|
||||||
|
resp, body, err := k.client.Do(doCtx, act)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -518,15 +528,16 @@ func (w *waitAction) HTTPRequest(ep url.URL) *http.Request {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type setAction struct {
|
type setAction struct {
|
||||||
Prefix string
|
Prefix string
|
||||||
Key string
|
Key string
|
||||||
Value string
|
Value string
|
||||||
PrevValue string
|
PrevValue string
|
||||||
PrevIndex uint64
|
PrevIndex uint64
|
||||||
PrevExist PrevExistType
|
PrevExist PrevExistType
|
||||||
TTL time.Duration
|
TTL time.Duration
|
||||||
Refresh bool
|
Refresh bool
|
||||||
Dir bool
|
Dir bool
|
||||||
|
NoValueOnSuccess bool
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *setAction) HTTPRequest(ep url.URL) *http.Request {
|
func (a *setAction) HTTPRequest(ep url.URL) *http.Request {
|
||||||
@ -560,6 +571,9 @@ func (a *setAction) HTTPRequest(ep url.URL) *http.Request {
|
|||||||
if a.Refresh {
|
if a.Refresh {
|
||||||
form.Add("refresh", "true")
|
form.Add("refresh", "true")
|
||||||
}
|
}
|
||||||
|
if a.NoValueOnSuccess {
|
||||||
|
params.Set("noValueOnSuccess", strconv.FormatBool(a.NoValueOnSuccess))
|
||||||
|
}
|
||||||
|
|
||||||
u.RawQuery = params.Encode()
|
u.RawQuery = params.Encode()
|
||||||
body := strings.NewReader(form.Encode())
|
body := strings.NewReader(form.Encode())
|
||||||
|
@ -407,6 +407,15 @@ func TestSetAction(t *testing.T) {
|
|||||||
wantURL: "http://example.com/foo?dir=true",
|
wantURL: "http://example.com/foo?dir=true",
|
||||||
wantBody: "",
|
wantBody: "",
|
||||||
},
|
},
|
||||||
|
// NoValueOnSuccess is set
|
||||||
|
{
|
||||||
|
act: setAction{
|
||||||
|
Key: "foo",
|
||||||
|
NoValueOnSuccess: true,
|
||||||
|
},
|
||||||
|
wantURL: "http://example.com/foo?noValueOnSuccess=true",
|
||||||
|
wantBody: "value=",
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for i, tt := range tests {
|
for i, tt := range tests {
|
||||||
|
@ -14,6 +14,20 @@
|
|||||||
|
|
||||||
package client
|
package client
|
||||||
|
|
||||||
|
import (
|
||||||
|
"regexp"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
roleNotFoundRegExp *regexp.Regexp
|
||||||
|
userNotFoundRegExp *regexp.Regexp
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
roleNotFoundRegExp = regexp.MustCompile("auth: Role .* does not exist.")
|
||||||
|
userNotFoundRegExp = regexp.MustCompile("auth: User .* does not exist.")
|
||||||
|
}
|
||||||
|
|
||||||
// IsKeyNotFound returns true if the error code is ErrorCodeKeyNotFound.
|
// IsKeyNotFound returns true if the error code is ErrorCodeKeyNotFound.
|
||||||
func IsKeyNotFound(err error) bool {
|
func IsKeyNotFound(err error) bool {
|
||||||
if cErr, ok := err.(Error); ok {
|
if cErr, ok := err.(Error); ok {
|
||||||
@ -21,3 +35,19 @@ func IsKeyNotFound(err error) bool {
|
|||||||
}
|
}
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// IsRoleNotFound returns true if the error means role not found of v2 API.
|
||||||
|
func IsRoleNotFound(err error) bool {
|
||||||
|
if ae, ok := err.(authError); ok {
|
||||||
|
return roleNotFoundRegExp.MatchString(ae.Message)
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsUserNotFound returns true if the error means user not found of v2 API.
|
||||||
|
func IsUserNotFound(err error) bool {
|
||||||
|
if ae, ok := err.(authError); ok {
|
||||||
|
return userNotFoundRegExp.MatchString(ae.Message)
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
@ -43,6 +43,7 @@ type (
|
|||||||
AuthRoleListResponse pb.AuthRoleListResponse
|
AuthRoleListResponse pb.AuthRoleListResponse
|
||||||
|
|
||||||
PermissionType authpb.Permission_Type
|
PermissionType authpb.Permission_Type
|
||||||
|
Permission authpb.Permission
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@ -145,12 +146,12 @@ func (auth *auth) UserGrantRole(ctx context.Context, user string, role string) (
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (auth *auth) UserGet(ctx context.Context, name string) (*AuthUserGetResponse, error) {
|
func (auth *auth) UserGet(ctx context.Context, name string) (*AuthUserGetResponse, error) {
|
||||||
resp, err := auth.remote.UserGet(ctx, &pb.AuthUserGetRequest{Name: name})
|
resp, err := auth.remote.UserGet(ctx, &pb.AuthUserGetRequest{Name: name}, grpc.FailFast(false))
|
||||||
return (*AuthUserGetResponse)(resp), toErr(ctx, err)
|
return (*AuthUserGetResponse)(resp), toErr(ctx, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (auth *auth) UserList(ctx context.Context) (*AuthUserListResponse, error) {
|
func (auth *auth) UserList(ctx context.Context) (*AuthUserListResponse, error) {
|
||||||
resp, err := auth.remote.UserList(ctx, &pb.AuthUserListRequest{})
|
resp, err := auth.remote.UserList(ctx, &pb.AuthUserListRequest{}, grpc.FailFast(false))
|
||||||
return (*AuthUserListResponse)(resp), toErr(ctx, err)
|
return (*AuthUserListResponse)(resp), toErr(ctx, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -175,12 +176,12 @@ func (auth *auth) RoleGrantPermission(ctx context.Context, name string, key, ran
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (auth *auth) RoleGet(ctx context.Context, role string) (*AuthRoleGetResponse, error) {
|
func (auth *auth) RoleGet(ctx context.Context, role string) (*AuthRoleGetResponse, error) {
|
||||||
resp, err := auth.remote.RoleGet(ctx, &pb.AuthRoleGetRequest{Role: role})
|
resp, err := auth.remote.RoleGet(ctx, &pb.AuthRoleGetRequest{Role: role}, grpc.FailFast(false))
|
||||||
return (*AuthRoleGetResponse)(resp), toErr(ctx, err)
|
return (*AuthRoleGetResponse)(resp), toErr(ctx, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (auth *auth) RoleList(ctx context.Context) (*AuthRoleListResponse, error) {
|
func (auth *auth) RoleList(ctx context.Context) (*AuthRoleListResponse, error) {
|
||||||
resp, err := auth.remote.RoleList(ctx, &pb.AuthRoleListRequest{})
|
resp, err := auth.remote.RoleList(ctx, &pb.AuthRoleListRequest{}, grpc.FailFast(false))
|
||||||
return (*AuthRoleListResponse)(resp), toErr(ctx, err)
|
return (*AuthRoleListResponse)(resp), toErr(ctx, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -208,7 +209,7 @@ type authenticator struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (auth *authenticator) authenticate(ctx context.Context, name string, password string) (*AuthenticateResponse, error) {
|
func (auth *authenticator) authenticate(ctx context.Context, name string, password string) (*AuthenticateResponse, error) {
|
||||||
resp, err := auth.remote.Authenticate(ctx, &pb.AuthenticateRequest{Name: name, Password: password})
|
resp, err := auth.remote.Authenticate(ctx, &pb.AuthenticateRequest{Name: name, Password: password}, grpc.FailFast(false))
|
||||||
return (*AuthenticateResponse)(resp), toErr(ctx, err)
|
return (*AuthenticateResponse)(resp), toErr(ctx, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -17,7 +17,7 @@ package clientv3
|
|||||||
import (
|
import (
|
||||||
"net/url"
|
"net/url"
|
||||||
"strings"
|
"strings"
|
||||||
"sync/atomic"
|
"sync"
|
||||||
|
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
"google.golang.org/grpc"
|
"google.golang.org/grpc"
|
||||||
@ -26,32 +26,133 @@ import (
|
|||||||
// simpleBalancer does the bare minimum to expose multiple eps
|
// simpleBalancer does the bare minimum to expose multiple eps
|
||||||
// to the grpc reconnection code path
|
// to the grpc reconnection code path
|
||||||
type simpleBalancer struct {
|
type simpleBalancer struct {
|
||||||
// eps are the client's endpoints stripped of any URL scheme
|
// addrs are the client's endpoints for grpc
|
||||||
eps []string
|
addrs []grpc.Address
|
||||||
ch chan []grpc.Address
|
// notifyCh notifies grpc of the set of addresses for connecting
|
||||||
numGets uint32
|
notifyCh chan []grpc.Address
|
||||||
|
|
||||||
|
// readyc closes once the first connection is up
|
||||||
|
readyc chan struct{}
|
||||||
|
readyOnce sync.Once
|
||||||
|
|
||||||
|
// mu protects upEps, pinAddr, and connectingAddr
|
||||||
|
mu sync.RWMutex
|
||||||
|
// upEps holds the current endpoints that have an active connection
|
||||||
|
upEps map[string]struct{}
|
||||||
|
// upc closes when upEps transitions from empty to non-zero or the balancer closes.
|
||||||
|
upc chan struct{}
|
||||||
|
|
||||||
|
// pinAddr is the currently pinned address; set to the empty string on
|
||||||
|
// intialization and shutdown.
|
||||||
|
pinAddr string
|
||||||
|
|
||||||
|
closed bool
|
||||||
}
|
}
|
||||||
|
|
||||||
func newSimpleBalancer(eps []string) grpc.Balancer {
|
func newSimpleBalancer(eps []string) *simpleBalancer {
|
||||||
ch := make(chan []grpc.Address, 1)
|
notifyCh := make(chan []grpc.Address, 1)
|
||||||
addrs := make([]grpc.Address, len(eps))
|
addrs := make([]grpc.Address, len(eps))
|
||||||
for i := range eps {
|
for i := range eps {
|
||||||
addrs[i].Addr = getHost(eps[i])
|
addrs[i].Addr = getHost(eps[i])
|
||||||
}
|
}
|
||||||
ch <- addrs
|
notifyCh <- addrs
|
||||||
return &simpleBalancer{eps: eps, ch: ch}
|
sb := &simpleBalancer{
|
||||||
|
addrs: addrs,
|
||||||
|
notifyCh: notifyCh,
|
||||||
|
readyc: make(chan struct{}),
|
||||||
|
upEps: make(map[string]struct{}),
|
||||||
|
upc: make(chan struct{}),
|
||||||
|
}
|
||||||
|
return sb
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *simpleBalancer) Start(target string) error { return nil }
|
func (b *simpleBalancer) Start(target string) error { return nil }
|
||||||
func (b *simpleBalancer) Up(addr grpc.Address) func(error) { return func(error) {} }
|
|
||||||
func (b *simpleBalancer) Get(ctx context.Context, opts grpc.BalancerGetOptions) (grpc.Address, func(), error) {
|
func (b *simpleBalancer) ConnectNotify() <-chan struct{} {
|
||||||
v := atomic.AddUint32(&b.numGets, 1)
|
b.mu.Lock()
|
||||||
ep := b.eps[v%uint32(len(b.eps))]
|
defer b.mu.Unlock()
|
||||||
return grpc.Address{Addr: getHost(ep)}, func() {}, nil
|
return b.upc
|
||||||
}
|
}
|
||||||
func (b *simpleBalancer) Notify() <-chan []grpc.Address { return b.ch }
|
|
||||||
|
func (b *simpleBalancer) Up(addr grpc.Address) func(error) {
|
||||||
|
b.mu.Lock()
|
||||||
|
defer b.mu.Unlock()
|
||||||
|
|
||||||
|
// gRPC might call Up after it called Close. We add this check
|
||||||
|
// to "fix" it up at application layer. Or our simplerBalancer
|
||||||
|
// might panic since b.upc is closed.
|
||||||
|
if b.closed {
|
||||||
|
return func(err error) {}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(b.upEps) == 0 {
|
||||||
|
// notify waiting Get()s and pin first connected address
|
||||||
|
close(b.upc)
|
||||||
|
b.pinAddr = addr.Addr
|
||||||
|
}
|
||||||
|
b.upEps[addr.Addr] = struct{}{}
|
||||||
|
|
||||||
|
// notify client that a connection is up
|
||||||
|
b.readyOnce.Do(func() { close(b.readyc) })
|
||||||
|
|
||||||
|
return func(err error) {
|
||||||
|
b.mu.Lock()
|
||||||
|
delete(b.upEps, addr.Addr)
|
||||||
|
if len(b.upEps) == 0 && b.pinAddr != "" {
|
||||||
|
b.upc = make(chan struct{})
|
||||||
|
} else if b.pinAddr == addr.Addr {
|
||||||
|
// choose new random up endpoint
|
||||||
|
for k := range b.upEps {
|
||||||
|
b.pinAddr = k
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
b.mu.Unlock()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *simpleBalancer) Get(ctx context.Context, opts grpc.BalancerGetOptions) (grpc.Address, func(), error) {
|
||||||
|
var addr string
|
||||||
|
for {
|
||||||
|
b.mu.RLock()
|
||||||
|
ch := b.upc
|
||||||
|
b.mu.RUnlock()
|
||||||
|
select {
|
||||||
|
case <-ch:
|
||||||
|
case <-ctx.Done():
|
||||||
|
return grpc.Address{Addr: ""}, nil, ctx.Err()
|
||||||
|
}
|
||||||
|
b.mu.RLock()
|
||||||
|
addr = b.pinAddr
|
||||||
|
upEps := len(b.upEps)
|
||||||
|
b.mu.RUnlock()
|
||||||
|
if addr == "" {
|
||||||
|
return grpc.Address{Addr: ""}, nil, grpc.ErrClientConnClosing
|
||||||
|
}
|
||||||
|
if upEps > 0 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return grpc.Address{Addr: addr}, func() {}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *simpleBalancer) Notify() <-chan []grpc.Address { return b.notifyCh }
|
||||||
|
|
||||||
func (b *simpleBalancer) Close() error {
|
func (b *simpleBalancer) Close() error {
|
||||||
close(b.ch)
|
b.mu.Lock()
|
||||||
|
defer b.mu.Unlock()
|
||||||
|
// In case gRPC calls close twice. TODO: remove the checking
|
||||||
|
// when we are sure that gRPC wont call close twice.
|
||||||
|
if b.closed {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
b.closed = true
|
||||||
|
close(b.notifyCh)
|
||||||
|
// terminate all waiting Get()s
|
||||||
|
b.pinAddr = ""
|
||||||
|
if len(b.upEps) == 0 {
|
||||||
|
close(b.upc)
|
||||||
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -29,6 +29,7 @@ import (
|
|||||||
|
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
"google.golang.org/grpc"
|
"google.golang.org/grpc"
|
||||||
|
"google.golang.org/grpc/codes"
|
||||||
"google.golang.org/grpc/credentials"
|
"google.golang.org/grpc/credentials"
|
||||||
"google.golang.org/grpc/metadata"
|
"google.golang.org/grpc/metadata"
|
||||||
)
|
)
|
||||||
@ -46,9 +47,11 @@ type Client struct {
|
|||||||
Auth
|
Auth
|
||||||
Maintenance
|
Maintenance
|
||||||
|
|
||||||
conn *grpc.ClientConn
|
conn *grpc.ClientConn
|
||||||
cfg Config
|
cfg Config
|
||||||
creds *credentials.TransportCredentials
|
creds *credentials.TransportCredentials
|
||||||
|
balancer *simpleBalancer
|
||||||
|
retryWrapper retryRpcFunc
|
||||||
|
|
||||||
ctx context.Context
|
ctx context.Context
|
||||||
cancel context.CancelFunc
|
cancel context.CancelFunc
|
||||||
@ -138,11 +141,10 @@ func (c *Client) dialTarget(endpoint string) (proto string, host string, creds *
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// dialSetupOpts gives the dial opts prioer to any authentication
|
// dialSetupOpts gives the dial opts prior to any authentication
|
||||||
func (c *Client) dialSetupOpts(endpoint string, dopts ...grpc.DialOption) []grpc.DialOption {
|
func (c *Client) dialSetupOpts(endpoint string, dopts ...grpc.DialOption) (opts []grpc.DialOption) {
|
||||||
opts := []grpc.DialOption{
|
if c.cfg.DialTimeout > 0 {
|
||||||
grpc.WithBlock(),
|
opts = []grpc.DialOption{grpc.WithTimeout(c.cfg.DialTimeout)}
|
||||||
grpc.WithTimeout(c.cfg.DialTimeout),
|
|
||||||
}
|
}
|
||||||
opts = append(opts, dopts...)
|
opts = append(opts, dopts...)
|
||||||
|
|
||||||
@ -240,12 +242,30 @@ func newClient(cfg *Config) (*Client, error) {
|
|||||||
client.Password = cfg.Password
|
client.Password = cfg.Password
|
||||||
}
|
}
|
||||||
|
|
||||||
b := newSimpleBalancer(cfg.Endpoints)
|
client.balancer = newSimpleBalancer(cfg.Endpoints)
|
||||||
conn, err := client.dial(cfg.Endpoints[0], grpc.WithBalancer(b))
|
conn, err := client.dial(cfg.Endpoints[0], grpc.WithBalancer(client.balancer))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
client.conn = conn
|
client.conn = conn
|
||||||
|
client.retryWrapper = client.newRetryWrapper()
|
||||||
|
|
||||||
|
// wait for a connection
|
||||||
|
if cfg.DialTimeout > 0 {
|
||||||
|
hasConn := false
|
||||||
|
waitc := time.After(cfg.DialTimeout)
|
||||||
|
select {
|
||||||
|
case <-client.balancer.readyc:
|
||||||
|
hasConn = true
|
||||||
|
case <-ctx.Done():
|
||||||
|
case <-waitc:
|
||||||
|
}
|
||||||
|
if !hasConn {
|
||||||
|
client.cancel()
|
||||||
|
conn.Close()
|
||||||
|
return nil, grpc.ErrClientConnTimeout
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
client.Cluster = NewCluster(client)
|
client.Cluster = NewCluster(client)
|
||||||
client.KV = NewKV(client)
|
client.KV = NewKV(client)
|
||||||
@ -275,8 +295,14 @@ func isHaltErr(ctx context.Context, err error) bool {
|
|||||||
if err == nil {
|
if err == nil {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
return strings.HasPrefix(grpc.ErrorDesc(err), "etcdserver: ") ||
|
code := grpc.Code(err)
|
||||||
strings.Contains(err.Error(), grpc.ErrClientConnClosing.Error())
|
// Unavailable codes mean the system will be right back.
|
||||||
|
// (e.g., can't connect, lost leader)
|
||||||
|
// Treat Internal codes as if something failed, leaving the
|
||||||
|
// system in an inconsistent state, but retrying could make progress.
|
||||||
|
// (e.g., failed in middle of send, corrupted frame)
|
||||||
|
// TODO: are permanent Internal errors possible from grpc?
|
||||||
|
return code != codes.Unavailable && code != codes.Internal
|
||||||
}
|
}
|
||||||
|
|
||||||
func toErr(ctx context.Context, err error) error {
|
func toErr(ctx context.Context, err error) error {
|
||||||
@ -284,9 +310,20 @@ func toErr(ctx context.Context, err error) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
err = rpctypes.Error(err)
|
err = rpctypes.Error(err)
|
||||||
if ctx.Err() != nil && strings.Contains(err.Error(), "context") {
|
if _, ok := err.(rpctypes.EtcdError); ok {
|
||||||
err = ctx.Err()
|
return err
|
||||||
} else if strings.Contains(err.Error(), grpc.ErrClientConnClosing.Error()) {
|
}
|
||||||
|
code := grpc.Code(err)
|
||||||
|
switch code {
|
||||||
|
case codes.DeadlineExceeded:
|
||||||
|
fallthrough
|
||||||
|
case codes.Canceled:
|
||||||
|
if ctx.Err() != nil {
|
||||||
|
err = ctx.Err()
|
||||||
|
}
|
||||||
|
case codes.Unavailable:
|
||||||
|
err = ErrNoAvailableEndpoints
|
||||||
|
case codes.FailedPrecondition:
|
||||||
err = grpc.ErrClientConnClosing
|
err = grpc.ErrClientConnClosing
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
|
@ -19,11 +19,15 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
|
||||||
|
"github.com/coreos/etcd/pkg/testutil"
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
"google.golang.org/grpc"
|
"google.golang.org/grpc"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestDialTimeout(t *testing.T) {
|
func TestDialTimeout(t *testing.T) {
|
||||||
|
defer testutil.AfterTest(t)
|
||||||
|
|
||||||
donec := make(chan error)
|
donec := make(chan error)
|
||||||
go func() {
|
go func() {
|
||||||
// without timeout, grpc keeps redialing if connection refused
|
// without timeout, grpc keeps redialing if connection refused
|
||||||
@ -55,9 +59,24 @@ func TestDialTimeout(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestDialNoTimeout(t *testing.T) {
|
||||||
|
cfg := Config{Endpoints: []string{"127.0.0.1:12345"}}
|
||||||
|
c, err := New(cfg)
|
||||||
|
if c == nil || err != nil {
|
||||||
|
t.Fatalf("new client with DialNoWait should succeed, got %v", err)
|
||||||
|
}
|
||||||
|
c.Close()
|
||||||
|
}
|
||||||
|
|
||||||
func TestIsHaltErr(t *testing.T) {
|
func TestIsHaltErr(t *testing.T) {
|
||||||
if !isHaltErr(nil, fmt.Errorf("etcdserver: some etcdserver error")) {
|
if !isHaltErr(nil, fmt.Errorf("etcdserver: some etcdserver error")) {
|
||||||
t.Errorf(`error prefixed with "etcdserver: " should be Halted`)
|
t.Errorf(`error prefixed with "etcdserver: " should be Halted by default`)
|
||||||
|
}
|
||||||
|
if isHaltErr(nil, rpctypes.ErrGRPCStopped) {
|
||||||
|
t.Errorf("error %v should not halt", rpctypes.ErrGRPCStopped)
|
||||||
|
}
|
||||||
|
if isHaltErr(nil, rpctypes.ErrGRPCNoLeader) {
|
||||||
|
t.Errorf("error %v should not halt", rpctypes.ErrGRPCNoLeader)
|
||||||
}
|
}
|
||||||
ctx, cancel := context.WithCancel(context.TODO())
|
ctx, cancel := context.WithCancel(context.TODO())
|
||||||
if isHaltErr(ctx, nil) {
|
if isHaltErr(ctx, nil) {
|
||||||
|
@ -17,6 +17,7 @@ package clientv3
|
|||||||
import (
|
import (
|
||||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
|
"google.golang.org/grpc"
|
||||||
)
|
)
|
||||||
|
|
||||||
type (
|
type (
|
||||||
@ -46,7 +47,7 @@ type cluster struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func NewCluster(c *Client) Cluster {
|
func NewCluster(c *Client) Cluster {
|
||||||
return &cluster{remote: pb.NewClusterClient(c.conn)}
|
return &cluster{remote: RetryClusterClient(c)}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *cluster) MemberAdd(ctx context.Context, peerAddrs []string) (*MemberAddResponse, error) {
|
func (c *cluster) MemberAdd(ctx context.Context, peerAddrs []string) (*MemberAddResponse, error) {
|
||||||
@ -90,7 +91,7 @@ func (c *cluster) MemberUpdate(ctx context.Context, id uint64, peerAddrs []strin
|
|||||||
func (c *cluster) MemberList(ctx context.Context) (*MemberListResponse, error) {
|
func (c *cluster) MemberList(ctx context.Context) (*MemberListResponse, error) {
|
||||||
// it is safe to retry on list.
|
// it is safe to retry on list.
|
||||||
for {
|
for {
|
||||||
resp, err := c.remote.MemberList(ctx, &pb.MemberListRequest{})
|
resp, err := c.remote.MemberList(ctx, &pb.MemberListRequest{}, grpc.FailFast(false))
|
||||||
if err == nil {
|
if err == nil {
|
||||||
return (*MemberListResponse)(resp), nil
|
return (*MemberListResponse)(resp), nil
|
||||||
}
|
}
|
||||||
|
@ -29,7 +29,7 @@ var (
|
|||||||
)
|
)
|
||||||
|
|
||||||
type Election struct {
|
type Election struct {
|
||||||
client *v3.Client
|
session *Session
|
||||||
|
|
||||||
keyPrefix string
|
keyPrefix string
|
||||||
|
|
||||||
@ -39,27 +39,24 @@ type Election struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewElection returns a new election on a given key prefix.
|
// NewElection returns a new election on a given key prefix.
|
||||||
func NewElection(client *v3.Client, pfx string) *Election {
|
func NewElection(s *Session, pfx string) *Election {
|
||||||
return &Election{client: client, keyPrefix: pfx}
|
return &Election{session: s, keyPrefix: pfx + "/"}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Campaign puts a value as eligible for the election. It blocks until
|
// Campaign puts a value as eligible for the election. It blocks until
|
||||||
// it is elected, an error occurs, or the context is cancelled.
|
// it is elected, an error occurs, or the context is cancelled.
|
||||||
func (e *Election) Campaign(ctx context.Context, val string) error {
|
func (e *Election) Campaign(ctx context.Context, val string) error {
|
||||||
s, serr := NewSession(e.client)
|
s := e.session
|
||||||
if serr != nil {
|
client := e.session.Client()
|
||||||
return serr
|
|
||||||
}
|
|
||||||
|
|
||||||
k := fmt.Sprintf("%s/%x", e.keyPrefix, s.Lease())
|
k := fmt.Sprintf("%s%x", e.keyPrefix, s.Lease())
|
||||||
txn := e.client.Txn(ctx).If(v3.Compare(v3.CreateRevision(k), "=", 0))
|
txn := client.Txn(ctx).If(v3.Compare(v3.CreateRevision(k), "=", 0))
|
||||||
txn = txn.Then(v3.OpPut(k, val, v3.WithLease(s.Lease())))
|
txn = txn.Then(v3.OpPut(k, val, v3.WithLease(s.Lease())))
|
||||||
txn = txn.Else(v3.OpGet(k))
|
txn = txn.Else(v3.OpGet(k))
|
||||||
resp, err := txn.Commit()
|
resp, err := txn.Commit()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
e.leaderKey, e.leaderRev, e.leaderSession = k, resp.Header.Revision, s
|
e.leaderKey, e.leaderRev, e.leaderSession = k, resp.Header.Revision, s
|
||||||
if !resp.Succeeded {
|
if !resp.Succeeded {
|
||||||
kv := resp.Responses[0].GetResponseRange().Kvs[0]
|
kv := resp.Responses[0].GetResponseRange().Kvs[0]
|
||||||
@ -72,12 +69,12 @@ func (e *Election) Campaign(ctx context.Context, val string) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
err = waitDeletes(ctx, e.client, e.keyPrefix, v3.WithPrefix(), v3.WithRev(e.leaderRev-1))
|
err = waitDeletes(ctx, client, e.keyPrefix, e.leaderRev-1)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// clean up in case of context cancel
|
// clean up in case of context cancel
|
||||||
select {
|
select {
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
e.Resign(e.client.Ctx())
|
e.Resign(client.Ctx())
|
||||||
default:
|
default:
|
||||||
e.leaderSession = nil
|
e.leaderSession = nil
|
||||||
}
|
}
|
||||||
@ -92,8 +89,9 @@ func (e *Election) Proclaim(ctx context.Context, val string) error {
|
|||||||
if e.leaderSession == nil {
|
if e.leaderSession == nil {
|
||||||
return ErrElectionNotLeader
|
return ErrElectionNotLeader
|
||||||
}
|
}
|
||||||
|
client := e.session.Client()
|
||||||
cmp := v3.Compare(v3.CreateRevision(e.leaderKey), "=", e.leaderRev)
|
cmp := v3.Compare(v3.CreateRevision(e.leaderKey), "=", e.leaderRev)
|
||||||
txn := e.client.Txn(ctx).If(cmp)
|
txn := client.Txn(ctx).If(cmp)
|
||||||
txn = txn.Then(v3.OpPut(e.leaderKey, val, v3.WithLease(e.leaderSession.Lease())))
|
txn = txn.Then(v3.OpPut(e.leaderKey, val, v3.WithLease(e.leaderSession.Lease())))
|
||||||
tresp, terr := txn.Commit()
|
tresp, terr := txn.Commit()
|
||||||
if terr != nil {
|
if terr != nil {
|
||||||
@ -111,7 +109,8 @@ func (e *Election) Resign(ctx context.Context) (err error) {
|
|||||||
if e.leaderSession == nil {
|
if e.leaderSession == nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
_, err = e.client.Delete(ctx, e.leaderKey)
|
client := e.session.Client()
|
||||||
|
_, err = client.Delete(ctx, e.leaderKey)
|
||||||
e.leaderKey = ""
|
e.leaderKey = ""
|
||||||
e.leaderSession = nil
|
e.leaderSession = nil
|
||||||
return err
|
return err
|
||||||
@ -119,7 +118,8 @@ func (e *Election) Resign(ctx context.Context) (err error) {
|
|||||||
|
|
||||||
// Leader returns the leader value for the current election.
|
// Leader returns the leader value for the current election.
|
||||||
func (e *Election) Leader(ctx context.Context) (string, error) {
|
func (e *Election) Leader(ctx context.Context) (string, error) {
|
||||||
resp, err := e.client.Get(ctx, e.keyPrefix, v3.WithFirstCreate()...)
|
client := e.session.Client()
|
||||||
|
resp, err := client.Get(ctx, e.keyPrefix, v3.WithFirstCreate()...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
} else if len(resp.Kvs) == 0 {
|
} else if len(resp.Kvs) == 0 {
|
||||||
@ -139,9 +139,11 @@ func (e *Election) Observe(ctx context.Context) <-chan v3.GetResponse {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (e *Election) observe(ctx context.Context, ch chan<- v3.GetResponse) {
|
func (e *Election) observe(ctx context.Context, ch chan<- v3.GetResponse) {
|
||||||
|
client := e.session.Client()
|
||||||
|
|
||||||
defer close(ch)
|
defer close(ch)
|
||||||
for {
|
for {
|
||||||
resp, err := e.client.Get(ctx, e.keyPrefix, v3.WithFirstCreate()...)
|
resp, err := client.Get(ctx, e.keyPrefix, v3.WithFirstCreate()...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -152,7 +154,7 @@ func (e *Election) observe(ctx context.Context, ch chan<- v3.GetResponse) {
|
|||||||
if len(resp.Kvs) == 0 {
|
if len(resp.Kvs) == 0 {
|
||||||
// wait for first key put on prefix
|
// wait for first key put on prefix
|
||||||
opts := []v3.OpOption{v3.WithRev(resp.Header.Revision), v3.WithPrefix()}
|
opts := []v3.OpOption{v3.WithRev(resp.Header.Revision), v3.WithPrefix()}
|
||||||
wch := e.client.Watch(cctx, e.keyPrefix, opts...)
|
wch := client.Watch(cctx, e.keyPrefix, opts...)
|
||||||
|
|
||||||
for kv == nil {
|
for kv == nil {
|
||||||
wr, ok := <-wch
|
wr, ok := <-wch
|
||||||
@ -172,7 +174,7 @@ func (e *Election) observe(ctx context.Context, ch chan<- v3.GetResponse) {
|
|||||||
kv = resp.Kvs[0]
|
kv = resp.Kvs[0]
|
||||||
}
|
}
|
||||||
|
|
||||||
wch := e.client.Watch(cctx, string(kv.Key), v3.WithRev(kv.ModRevision))
|
wch := client.Watch(cctx, string(kv.Key), v3.WithRev(kv.ModRevision))
|
||||||
keyDeleted := false
|
keyDeleted := false
|
||||||
for !keyDeleted {
|
for !keyDeleted {
|
||||||
wr, ok := <-wch
|
wr, ok := <-wch
|
||||||
|
@ -16,7 +16,6 @@ package concurrency
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"math"
|
|
||||||
|
|
||||||
v3 "github.com/coreos/etcd/clientv3"
|
v3 "github.com/coreos/etcd/clientv3"
|
||||||
"github.com/coreos/etcd/mvcc/mvccpb"
|
"github.com/coreos/etcd/mvcc/mvccpb"
|
||||||
@ -26,46 +25,40 @@ import (
|
|||||||
func waitDelete(ctx context.Context, client *v3.Client, key string, rev int64) error {
|
func waitDelete(ctx context.Context, client *v3.Client, key string, rev int64) error {
|
||||||
cctx, cancel := context.WithCancel(ctx)
|
cctx, cancel := context.WithCancel(ctx)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
|
var wr v3.WatchResponse
|
||||||
wch := client.Watch(cctx, key, v3.WithRev(rev))
|
wch := client.Watch(cctx, key, v3.WithRev(rev))
|
||||||
for wr := range wch {
|
for wr = range wch {
|
||||||
for _, ev := range wr.Events {
|
for _, ev := range wr.Events {
|
||||||
if ev.Type == mvccpb.DELETE {
|
if ev.Type == mvccpb.DELETE {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if err := wr.Err(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
if err := ctx.Err(); err != nil {
|
if err := ctx.Err(); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return fmt.Errorf("lost watcher waiting for delete")
|
return fmt.Errorf("lost watcher waiting for delete")
|
||||||
}
|
}
|
||||||
|
|
||||||
// waitDeletes efficiently waits until all keys matched by Get(key, opts...) are deleted
|
// waitDeletes efficiently waits until all keys matching the prefix and no greater
|
||||||
func waitDeletes(ctx context.Context, client *v3.Client, key string, opts ...v3.OpOption) error {
|
// than the create revision.
|
||||||
getOpts := []v3.OpOption{v3.WithSort(v3.SortByCreateRevision, v3.SortAscend)}
|
func waitDeletes(ctx context.Context, client *v3.Client, pfx string, maxCreateRev int64) error {
|
||||||
getOpts = append(getOpts, opts...)
|
getOpts := append(v3.WithLastCreate(), v3.WithMaxCreateRev(maxCreateRev))
|
||||||
resp, err := client.Get(ctx, key, getOpts...)
|
for {
|
||||||
maxRev := int64(math.MaxInt64)
|
resp, err := client.Get(ctx, pfx, getOpts...)
|
||||||
getOpts = append(getOpts, v3.WithRev(0))
|
if err != nil {
|
||||||
for err == nil {
|
return err
|
||||||
for len(resp.Kvs) > 0 {
|
|
||||||
i := len(resp.Kvs) - 1
|
|
||||||
if resp.Kvs[i].CreateRevision <= maxRev {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
resp.Kvs = resp.Kvs[:i]
|
|
||||||
}
|
}
|
||||||
if len(resp.Kvs) == 0 {
|
if len(resp.Kvs) == 0 {
|
||||||
break
|
return nil
|
||||||
}
|
}
|
||||||
lastKV := resp.Kvs[len(resp.Kvs)-1]
|
lastKey := string(resp.Kvs[0].Key)
|
||||||
maxRev = lastKV.CreateRevision
|
if err = waitDelete(ctx, client, lastKey, resp.Header.Revision); err != nil {
|
||||||
err = waitDelete(ctx, client, string(lastKV.Key), maxRev)
|
return err
|
||||||
if err != nil || len(resp.Kvs) == 1 {
|
|
||||||
break
|
|
||||||
}
|
}
|
||||||
getOpts = append(getOpts, v3.WithLimit(int64(len(resp.Kvs)-1)))
|
|
||||||
resp, err = client.Get(ctx, key, getOpts...)
|
|
||||||
}
|
}
|
||||||
return err
|
|
||||||
}
|
}
|
||||||
|
@ -24,32 +24,30 @@ import (
|
|||||||
|
|
||||||
// Mutex implements the sync Locker interface with etcd
|
// Mutex implements the sync Locker interface with etcd
|
||||||
type Mutex struct {
|
type Mutex struct {
|
||||||
client *v3.Client
|
s *Session
|
||||||
|
|
||||||
pfx string
|
pfx string
|
||||||
myKey string
|
myKey string
|
||||||
myRev int64
|
myRev int64
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewMutex(client *v3.Client, pfx string) *Mutex {
|
func NewMutex(s *Session, pfx string) *Mutex {
|
||||||
return &Mutex{client, pfx, "", -1}
|
return &Mutex{s, pfx + "/", "", -1}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Lock locks the mutex with a cancellable context. If the context is cancelled
|
// Lock locks the mutex with a cancellable context. If the context is cancelled
|
||||||
// while trying to acquire the lock, the mutex tries to clean its stale lock entry.
|
// while trying to acquire the lock, the mutex tries to clean its stale lock entry.
|
||||||
func (m *Mutex) Lock(ctx context.Context) error {
|
func (m *Mutex) Lock(ctx context.Context) error {
|
||||||
s, serr := NewSession(m.client)
|
s := m.s
|
||||||
if serr != nil {
|
client := m.s.Client()
|
||||||
return serr
|
|
||||||
}
|
|
||||||
|
|
||||||
m.myKey = fmt.Sprintf("%s/%x", m.pfx, s.Lease())
|
m.myKey = fmt.Sprintf("%s%x", m.pfx, s.Lease())
|
||||||
cmp := v3.Compare(v3.CreateRevision(m.myKey), "=", 0)
|
cmp := v3.Compare(v3.CreateRevision(m.myKey), "=", 0)
|
||||||
// put self in lock waiters via myKey; oldest waiter holds lock
|
// put self in lock waiters via myKey; oldest waiter holds lock
|
||||||
put := v3.OpPut(m.myKey, "", v3.WithLease(s.Lease()))
|
put := v3.OpPut(m.myKey, "", v3.WithLease(s.Lease()))
|
||||||
// reuse key in case this session already holds the lock
|
// reuse key in case this session already holds the lock
|
||||||
get := v3.OpGet(m.myKey)
|
get := v3.OpGet(m.myKey)
|
||||||
resp, err := m.client.Txn(ctx).If(cmp).Then(put).Else(get).Commit()
|
resp, err := client.Txn(ctx).If(cmp).Then(put).Else(get).Commit()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -59,18 +57,19 @@ func (m *Mutex) Lock(ctx context.Context) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// wait for deletion revisions prior to myKey
|
// wait for deletion revisions prior to myKey
|
||||||
err = waitDeletes(ctx, m.client, m.pfx, v3.WithPrefix(), v3.WithRev(m.myRev-1))
|
err = waitDeletes(ctx, client, m.pfx, m.myRev-1)
|
||||||
// release lock key if cancelled
|
// release lock key if cancelled
|
||||||
select {
|
select {
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
m.Unlock(m.client.Ctx())
|
m.Unlock(client.Ctx())
|
||||||
default:
|
default:
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Mutex) Unlock(ctx context.Context) error {
|
func (m *Mutex) Unlock(ctx context.Context) error {
|
||||||
if _, err := m.client.Delete(ctx, m.myKey); err != nil {
|
client := m.s.Client()
|
||||||
|
if _, err := client.Delete(ctx, m.myKey); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
m.myKey = "\x00"
|
m.myKey = "\x00"
|
||||||
@ -87,17 +86,19 @@ func (m *Mutex) Key() string { return m.myKey }
|
|||||||
type lockerMutex struct{ *Mutex }
|
type lockerMutex struct{ *Mutex }
|
||||||
|
|
||||||
func (lm *lockerMutex) Lock() {
|
func (lm *lockerMutex) Lock() {
|
||||||
if err := lm.Mutex.Lock(lm.client.Ctx()); err != nil {
|
client := lm.s.Client()
|
||||||
|
if err := lm.Mutex.Lock(client.Ctx()); err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
func (lm *lockerMutex) Unlock() {
|
func (lm *lockerMutex) Unlock() {
|
||||||
if err := lm.Mutex.Unlock(lm.client.Ctx()); err != nil {
|
client := lm.s.Client()
|
||||||
|
if err := lm.Mutex.Unlock(client.Ctx()); err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewLocker creates a sync.Locker backed by an etcd mutex.
|
// NewLocker creates a sync.Locker backed by an etcd mutex.
|
||||||
func NewLocker(client *v3.Client, pfx string) sync.Locker {
|
func NewLocker(s *Session, pfx string) sync.Locker {
|
||||||
return &lockerMutex{NewMutex(client, pfx)}
|
return &lockerMutex{NewMutex(s, pfx)}
|
||||||
}
|
}
|
||||||
|
@ -15,21 +15,11 @@
|
|||||||
package concurrency
|
package concurrency
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"sync"
|
|
||||||
|
|
||||||
v3 "github.com/coreos/etcd/clientv3"
|
v3 "github.com/coreos/etcd/clientv3"
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
)
|
)
|
||||||
|
|
||||||
// only keep one ephemeral lease per client
|
const defaultSessionTTL = 60
|
||||||
var clientSessions clientSessionMgr = clientSessionMgr{sessions: make(map[*v3.Client]*Session)}
|
|
||||||
|
|
||||||
const sessionTTL = 60
|
|
||||||
|
|
||||||
type clientSessionMgr struct {
|
|
||||||
sessions map[*v3.Client]*Session
|
|
||||||
mu sync.Mutex
|
|
||||||
}
|
|
||||||
|
|
||||||
// Session represents a lease kept alive for the lifetime of a client.
|
// Session represents a lease kept alive for the lifetime of a client.
|
||||||
// Fault-tolerant applications may use sessions to reason about liveness.
|
// Fault-tolerant applications may use sessions to reason about liveness.
|
||||||
@ -42,14 +32,13 @@ type Session struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewSession gets the leased session for a client.
|
// NewSession gets the leased session for a client.
|
||||||
func NewSession(client *v3.Client) (*Session, error) {
|
func NewSession(client *v3.Client, opts ...SessionOption) (*Session, error) {
|
||||||
clientSessions.mu.Lock()
|
ops := &sessionOptions{ttl: defaultSessionTTL}
|
||||||
defer clientSessions.mu.Unlock()
|
for _, opt := range opts {
|
||||||
if s, ok := clientSessions.sessions[client]; ok {
|
opt(ops)
|
||||||
return s, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
resp, err := client.Grant(client.Ctx(), sessionTTL)
|
resp, err := client.Grant(client.Ctx(), int64(ops.ttl))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -63,16 +52,10 @@ func NewSession(client *v3.Client) (*Session, error) {
|
|||||||
|
|
||||||
donec := make(chan struct{})
|
donec := make(chan struct{})
|
||||||
s := &Session{client: client, id: id, cancel: cancel, donec: donec}
|
s := &Session{client: client, id: id, cancel: cancel, donec: donec}
|
||||||
clientSessions.sessions[client] = s
|
|
||||||
|
|
||||||
// keep the lease alive until client error or cancelled context
|
// keep the lease alive until client error or cancelled context
|
||||||
go func() {
|
go func() {
|
||||||
defer func() {
|
defer close(donec)
|
||||||
clientSessions.mu.Lock()
|
|
||||||
delete(clientSessions.sessions, client)
|
|
||||||
clientSessions.mu.Unlock()
|
|
||||||
close(donec)
|
|
||||||
}()
|
|
||||||
for range keepAlive {
|
for range keepAlive {
|
||||||
// eat messages until keep alive channel closes
|
// eat messages until keep alive channel closes
|
||||||
}
|
}
|
||||||
@ -81,6 +64,11 @@ func NewSession(client *v3.Client) (*Session, error) {
|
|||||||
return s, nil
|
return s, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Client is the etcd client that is attached to the session.
|
||||||
|
func (s *Session) Client() *v3.Client {
|
||||||
|
return s.client
|
||||||
|
}
|
||||||
|
|
||||||
// Lease is the lease ID for keys bound to the session.
|
// Lease is the lease ID for keys bound to the session.
|
||||||
func (s *Session) Lease() v3.LeaseID { return s.id }
|
func (s *Session) Lease() v3.LeaseID { return s.id }
|
||||||
|
|
||||||
@ -102,3 +90,20 @@ func (s *Session) Close() error {
|
|||||||
_, err := s.client.Revoke(s.client.Ctx(), s.id)
|
_, err := s.client.Revoke(s.client.Ctx(), s.id)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type sessionOptions struct {
|
||||||
|
ttl int
|
||||||
|
}
|
||||||
|
|
||||||
|
// SessionOption configures Session.
|
||||||
|
type SessionOption func(*sessionOptions)
|
||||||
|
|
||||||
|
// WithTTL configures the session's TTL in seconds.
|
||||||
|
// If TTL is <= 0, the default 60 seconds TTL will be used.
|
||||||
|
func WithTTL(ttl int) SessionOption {
|
||||||
|
return func(so *sessionOptions) {
|
||||||
|
if ttl > 0 {
|
||||||
|
so.ttl = ttl
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -32,35 +32,63 @@ func ExampleAuth() {
|
|||||||
}
|
}
|
||||||
defer cli.Close()
|
defer cli.Close()
|
||||||
|
|
||||||
authapi := clientv3.NewAuth(cli)
|
if _, err = cli.RoleAdd(context.TODO(), "root"); err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
if _, err = authapi.RoleAdd(context.TODO(), "root"); err != nil {
|
}
|
||||||
|
if _, err = cli.UserAdd(context.TODO(), "root", "123"); err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
if _, err = cli.UserGrantRole(context.TODO(), "root", "root"); err != nil {
|
||||||
log.Fatal(err)
|
log.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, err = authapi.RoleGrantPermission(
|
if _, err = cli.RoleAdd(context.TODO(), "r"); err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err = cli.RoleGrantPermission(
|
||||||
context.TODO(),
|
context.TODO(),
|
||||||
"root", // role name
|
"r", // role name
|
||||||
"foo", // key
|
"foo", // key
|
||||||
"zoo", // range end
|
"zoo", // range end
|
||||||
clientv3.PermissionType(clientv3.PermReadWrite),
|
clientv3.PermissionType(clientv3.PermReadWrite),
|
||||||
); err != nil {
|
); err != nil {
|
||||||
log.Fatal(err)
|
log.Fatal(err)
|
||||||
}
|
}
|
||||||
|
if _, err = cli.UserAdd(context.TODO(), "u", "123"); err != nil {
|
||||||
if _, err = authapi.UserAdd(context.TODO(), "root", "123"); err != nil {
|
|
||||||
log.Fatal(err)
|
log.Fatal(err)
|
||||||
}
|
}
|
||||||
|
if _, err = cli.UserGrantRole(context.TODO(), "u", "r"); err != nil {
|
||||||
if _, err = authapi.UserGrantRole(context.TODO(), "root", "root"); err != nil {
|
|
||||||
log.Fatal(err)
|
log.Fatal(err)
|
||||||
}
|
}
|
||||||
|
if _, err = cli.AuthEnable(context.TODO()); err != nil {
|
||||||
if _, err = authapi.AuthEnable(context.TODO()); err != nil {
|
|
||||||
log.Fatal(err)
|
log.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
cliAuth, err := clientv3.New(clientv3.Config{
|
cliAuth, err := clientv3.New(clientv3.Config{
|
||||||
|
Endpoints: endpoints,
|
||||||
|
DialTimeout: dialTimeout,
|
||||||
|
Username: "u",
|
||||||
|
Password: "123",
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
defer cliAuth.Close()
|
||||||
|
|
||||||
|
if _, err = cliAuth.Put(context.TODO(), "foo1", "bar"); err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = cliAuth.Txn(context.TODO()).
|
||||||
|
If(clientv3.Compare(clientv3.Value("zoo1"), ">", "abc")).
|
||||||
|
Then(clientv3.OpPut("zoo1", "XYZ")).
|
||||||
|
Else(clientv3.OpPut("zoo1", "ABC")).
|
||||||
|
Commit()
|
||||||
|
fmt.Println(err)
|
||||||
|
|
||||||
|
// now check the permission with the root account
|
||||||
|
rootCli, err := clientv3.New(clientv3.Config{
|
||||||
Endpoints: endpoints,
|
Endpoints: endpoints,
|
||||||
DialTimeout: dialTimeout,
|
DialTimeout: dialTimeout,
|
||||||
Username: "root",
|
Username: "root",
|
||||||
@ -69,31 +97,17 @@ func ExampleAuth() {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(err)
|
log.Fatal(err)
|
||||||
}
|
}
|
||||||
defer cliAuth.Close()
|
defer rootCli.Close()
|
||||||
|
|
||||||
kv := clientv3.NewKV(cliAuth)
|
resp, err := rootCli.RoleGet(context.TODO(), "r")
|
||||||
if _, err = kv.Put(context.TODO(), "foo1", "bar"); err != nil {
|
|
||||||
log.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err = kv.Txn(context.TODO()).
|
|
||||||
If(clientv3.Compare(clientv3.Value("zoo1"), ">", "abc")).
|
|
||||||
Then(clientv3.OpPut("zoo1", "XYZ")).
|
|
||||||
Else(clientv3.OpPut("zoo1", "ABC")).
|
|
||||||
Commit()
|
|
||||||
fmt.Println(err)
|
|
||||||
|
|
||||||
// now check the permission
|
|
||||||
authapi2 := clientv3.NewAuth(cliAuth)
|
|
||||||
resp, err := authapi2.RoleGet(context.TODO(), "root")
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(err)
|
log.Fatal(err)
|
||||||
}
|
}
|
||||||
fmt.Printf("root user permission: key %q, range end %q\n", resp.Perm[0].Key, resp.Perm[0].RangeEnd)
|
fmt.Printf("user u permission: key %q, range end %q\n", resp.Perm[0].Key, resp.Perm[0].RangeEnd)
|
||||||
|
|
||||||
if _, err = authapi2.AuthDisable(context.TODO()); err != nil {
|
if _, err = rootCli.AuthDisable(context.TODO()); err != nil {
|
||||||
log.Fatal(err)
|
log.Fatal(err)
|
||||||
}
|
}
|
||||||
// Output: etcdserver: permission denied
|
// Output: etcdserver: permission denied
|
||||||
// root user permission: key "foo", range end "zoo"
|
// user u permission: key "foo", range end "zoo"
|
||||||
}
|
}
|
||||||
|
@ -210,7 +210,7 @@ func ExampleKV_compact() {
|
|||||||
compRev := resp.Header.Revision // specify compact revision of your choice
|
compRev := resp.Header.Revision // specify compact revision of your choice
|
||||||
|
|
||||||
ctx, cancel = context.WithTimeout(context.Background(), requestTimeout)
|
ctx, cancel = context.WithTimeout(context.Background(), requestTimeout)
|
||||||
err = cli.Compact(ctx, compRev)
|
_, err = cli.Compact(ctx, compRev)
|
||||||
cancel()
|
cancel()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(err)
|
log.Fatal(err)
|
||||||
|
@ -19,6 +19,7 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/coreos/etcd/clientv3"
|
"github.com/coreos/etcd/clientv3"
|
||||||
|
"github.com/coreos/etcd/pkg/transport"
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -43,3 +44,29 @@ func Example() {
|
|||||||
log.Fatal(err)
|
log.Fatal(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func ExampleConfig_withTLS() {
|
||||||
|
tlsInfo := transport.TLSInfo{
|
||||||
|
CertFile: "/tmp/test-certs/test-name-1.pem",
|
||||||
|
KeyFile: "/tmp/test-certs/test-name-1-key.pem",
|
||||||
|
TrustedCAFile: "/tmp/test-certs/trusted-ca.pem",
|
||||||
|
}
|
||||||
|
tlsConfig, err := tlsInfo.ClientConfig()
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
cli, err := clientv3.New(clientv3.Config{
|
||||||
|
Endpoints: endpoints,
|
||||||
|
DialTimeout: dialTimeout,
|
||||||
|
TLS: tlsConfig,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
defer cli.Close() // make sure to close the client
|
||||||
|
|
||||||
|
_, err = cli.Put(context.TODO(), "foo", "bar")
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -16,6 +16,7 @@ package integration
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"math/rand"
|
||||||
"reflect"
|
"reflect"
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
@ -470,17 +471,17 @@ func TestKVCompactError(t *testing.T) {
|
|||||||
t.Fatalf("couldn't put 'foo' (%v)", err)
|
t.Fatalf("couldn't put 'foo' (%v)", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
err := kv.Compact(ctx, 6)
|
_, err := kv.Compact(ctx, 6)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("couldn't compact 6 (%v)", err)
|
t.Fatalf("couldn't compact 6 (%v)", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
err = kv.Compact(ctx, 6)
|
_, err = kv.Compact(ctx, 6)
|
||||||
if err != rpctypes.ErrCompacted {
|
if err != rpctypes.ErrCompacted {
|
||||||
t.Fatalf("expected %v, got %v", rpctypes.ErrCompacted, err)
|
t.Fatalf("expected %v, got %v", rpctypes.ErrCompacted, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
err = kv.Compact(ctx, 100)
|
_, err = kv.Compact(ctx, 100)
|
||||||
if err != rpctypes.ErrFutureRev {
|
if err != rpctypes.ErrFutureRev {
|
||||||
t.Fatalf("expected %v, got %v", rpctypes.ErrFutureRev, err)
|
t.Fatalf("expected %v, got %v", rpctypes.ErrFutureRev, err)
|
||||||
}
|
}
|
||||||
@ -501,11 +502,11 @@ func TestKVCompact(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
err := kv.Compact(ctx, 7)
|
_, err := kv.Compact(ctx, 7)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("couldn't compact kv space (%v)", err)
|
t.Fatalf("couldn't compact kv space (%v)", err)
|
||||||
}
|
}
|
||||||
err = kv.Compact(ctx, 7)
|
_, err = kv.Compact(ctx, 7)
|
||||||
if err == nil || err != rpctypes.ErrCompacted {
|
if err == nil || err != rpctypes.ErrCompacted {
|
||||||
t.Fatalf("error got %v, want %v", err, rpctypes.ErrCompacted)
|
t.Fatalf("error got %v, want %v", err, rpctypes.ErrCompacted)
|
||||||
}
|
}
|
||||||
@ -525,7 +526,7 @@ func TestKVCompact(t *testing.T) {
|
|||||||
t.Fatalf("wchan got %v, expected closed", wr)
|
t.Fatalf("wchan got %v, expected closed", wr)
|
||||||
}
|
}
|
||||||
|
|
||||||
err = kv.Compact(ctx, 1000)
|
_, err = kv.Compact(ctx, 1000)
|
||||||
if err == nil || err != rpctypes.ErrFutureRev {
|
if err == nil || err != rpctypes.ErrFutureRev {
|
||||||
t.Fatalf("error got %v, want %v", err, rpctypes.ErrFutureRev)
|
t.Fatalf("error got %v, want %v", err, rpctypes.ErrFutureRev)
|
||||||
}
|
}
|
||||||
@ -647,18 +648,121 @@ func TestKVGetCancel(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestKVPutStoppedServerAndClose ensures closing after a failed Put works.
|
// TestKVGetStoppedServerAndClose ensures closing after a failed Get works.
|
||||||
func TestKVPutStoppedServerAndClose(t *testing.T) {
|
func TestKVGetStoppedServerAndClose(t *testing.T) {
|
||||||
defer testutil.AfterTest(t)
|
defer testutil.AfterTest(t)
|
||||||
|
|
||||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||||
defer clus.Terminate(t)
|
defer clus.Terminate(t)
|
||||||
|
|
||||||
cli := clus.Client(0)
|
cli := clus.Client(0)
|
||||||
clus.Members[0].Stop(t)
|
clus.Members[0].Stop(t)
|
||||||
ctx, cancel := context.WithTimeout(context.TODO(), time.Second)
|
ctx, cancel := context.WithTimeout(context.TODO(), time.Second)
|
||||||
// this Put fails and triggers an asynchronous connection retry
|
// this Get fails and triggers an asynchronous connection retry
|
||||||
_, err := cli.Put(ctx, "abc", "123")
|
_, err := cli.Get(ctx, "abc")
|
||||||
cancel()
|
cancel()
|
||||||
if !strings.Contains(err.Error(), "context deadline") {
|
if !strings.Contains(err.Error(), "context deadline") {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TestKVPutStoppedServerAndClose ensures closing after a failed Put works.
|
||||||
|
func TestKVPutStoppedServerAndClose(t *testing.T) {
|
||||||
|
defer testutil.AfterTest(t)
|
||||||
|
|
||||||
|
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||||
|
defer clus.Terminate(t)
|
||||||
|
|
||||||
|
cli := clus.Client(0)
|
||||||
|
clus.Members[0].Stop(t)
|
||||||
|
|
||||||
|
ctx, cancel := context.WithTimeout(context.TODO(), time.Second)
|
||||||
|
// get retries on all errors.
|
||||||
|
// so here we use it to eat the potential broken pipe error for the next put.
|
||||||
|
// grpc client might see a broken pipe error when we issue the get request before
|
||||||
|
// grpc finds out the original connection is down due to the member shutdown.
|
||||||
|
_, err := cli.Get(ctx, "abc")
|
||||||
|
cancel()
|
||||||
|
if !strings.Contains(err.Error(), "context deadline") {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// this Put fails and triggers an asynchronous connection retry
|
||||||
|
_, err = cli.Put(ctx, "abc", "123")
|
||||||
|
cancel()
|
||||||
|
if !strings.Contains(err.Error(), "context deadline") {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestKVGetOneEndpointDown ensures a client can connect and get if one endpoint is down
|
||||||
|
func TestKVPutOneEndpointDown(t *testing.T) {
|
||||||
|
defer testutil.AfterTest(t)
|
||||||
|
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||||
|
defer clus.Terminate(t)
|
||||||
|
|
||||||
|
// get endpoint list
|
||||||
|
eps := make([]string, 3)
|
||||||
|
for i := range eps {
|
||||||
|
eps[i] = clus.Members[i].GRPCAddr()
|
||||||
|
}
|
||||||
|
|
||||||
|
// make a dead node
|
||||||
|
clus.Members[rand.Intn(len(eps))].Stop(t)
|
||||||
|
|
||||||
|
// try to connect with dead node in the endpoint list
|
||||||
|
cfg := clientv3.Config{Endpoints: eps, DialTimeout: 1 * time.Second}
|
||||||
|
cli, err := clientv3.New(cfg)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
defer cli.Close()
|
||||||
|
ctx, cancel := context.WithTimeout(context.TODO(), 3*time.Second)
|
||||||
|
if _, err := cli.Get(ctx, "abc", clientv3.WithSerializable()); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
cancel()
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestKVGetResetLoneEndpoint ensures that if an endpoint resets and all other
|
||||||
|
// endpoints are down, then it will reconnect.
|
||||||
|
func TestKVGetResetLoneEndpoint(t *testing.T) {
|
||||||
|
defer testutil.AfterTest(t)
|
||||||
|
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 2})
|
||||||
|
defer clus.Terminate(t)
|
||||||
|
|
||||||
|
// get endpoint list
|
||||||
|
eps := make([]string, 2)
|
||||||
|
for i := range eps {
|
||||||
|
eps[i] = clus.Members[i].GRPCAddr()
|
||||||
|
}
|
||||||
|
|
||||||
|
cfg := clientv3.Config{Endpoints: eps, DialTimeout: 500 * time.Millisecond}
|
||||||
|
cli, err := clientv3.New(cfg)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
defer cli.Close()
|
||||||
|
|
||||||
|
// disconnect everything
|
||||||
|
clus.Members[0].Stop(t)
|
||||||
|
clus.Members[1].Stop(t)
|
||||||
|
|
||||||
|
// have Get try to reconnect
|
||||||
|
donec := make(chan struct{})
|
||||||
|
go func() {
|
||||||
|
ctx, cancel := context.WithTimeout(context.TODO(), 5*time.Second)
|
||||||
|
if _, err := cli.Get(ctx, "abc", clientv3.WithSerializable()); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
cancel()
|
||||||
|
close(donec)
|
||||||
|
}()
|
||||||
|
time.Sleep(500 * time.Millisecond)
|
||||||
|
clus.Members[0].Restart(t)
|
||||||
|
select {
|
||||||
|
case <-time.After(10 * time.Second):
|
||||||
|
t.Fatalf("timed out waiting for Get")
|
||||||
|
case <-donec:
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -15,6 +15,8 @@
|
|||||||
package integration
|
package integration
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"reflect"
|
||||||
|
"sort"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@ -359,7 +361,8 @@ func TestLeaseKeepAliveCloseAfterDisconnectRevoke(t *testing.T) {
|
|||||||
if kerr != nil {
|
if kerr != nil {
|
||||||
t.Fatal(kerr)
|
t.Fatal(kerr)
|
||||||
}
|
}
|
||||||
if kresp := <-rc; kresp.ID != resp.ID {
|
kresp := <-rc
|
||||||
|
if kresp.ID != resp.ID {
|
||||||
t.Fatalf("ID = %x, want %x", kresp.ID, resp.ID)
|
t.Fatalf("ID = %x, want %x", kresp.ID, resp.ID)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -374,13 +377,14 @@ func TestLeaseKeepAliveCloseAfterDisconnectRevoke(t *testing.T) {
|
|||||||
|
|
||||||
clus.Members[0].Restart(t)
|
clus.Members[0].Restart(t)
|
||||||
|
|
||||||
select {
|
// some keep-alives may still be buffered; drain until close
|
||||||
case ka, ok := <-rc:
|
timer := time.After(time.Duration(kresp.TTL) * time.Second)
|
||||||
if ok {
|
for kresp != nil {
|
||||||
t.Fatalf("unexpected keepalive %v", ka)
|
select {
|
||||||
|
case kresp = <-rc:
|
||||||
|
case <-timer:
|
||||||
|
t.Fatalf("keepalive channel did not close")
|
||||||
}
|
}
|
||||||
case <-time.After(5 * time.Second):
|
|
||||||
t.Fatalf("keepalive channel did not close")
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -453,3 +457,56 @@ func TestLeaseKeepAliveTTLTimeout(t *testing.T) {
|
|||||||
|
|
||||||
clus.Members[0].Restart(t)
|
clus.Members[0].Restart(t)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestLeaseTimeToLive(t *testing.T) {
|
||||||
|
defer testutil.AfterTest(t)
|
||||||
|
|
||||||
|
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||||
|
defer clus.Terminate(t)
|
||||||
|
|
||||||
|
lapi := clientv3.NewLease(clus.RandClient())
|
||||||
|
defer lapi.Close()
|
||||||
|
|
||||||
|
resp, err := lapi.Grant(context.Background(), 10)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("failed to create lease %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
kv := clientv3.NewKV(clus.RandClient())
|
||||||
|
keys := []string{"foo1", "foo2"}
|
||||||
|
for i := range keys {
|
||||||
|
if _, err = kv.Put(context.TODO(), keys[i], "bar", clientv3.WithLease(resp.ID)); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
lresp, lerr := lapi.TimeToLive(context.Background(), resp.ID, clientv3.WithAttachedKeys())
|
||||||
|
if lerr != nil {
|
||||||
|
t.Fatal(lerr)
|
||||||
|
}
|
||||||
|
if lresp.ID != resp.ID {
|
||||||
|
t.Fatalf("leaseID expected %d, got %d", resp.ID, lresp.ID)
|
||||||
|
}
|
||||||
|
if lresp.GrantedTTL != int64(10) {
|
||||||
|
t.Fatalf("GrantedTTL expected %d, got %d", 10, lresp.GrantedTTL)
|
||||||
|
}
|
||||||
|
if lresp.TTL == 0 || lresp.TTL > lresp.GrantedTTL {
|
||||||
|
t.Fatalf("unexpected TTL %d (granted %d)", lresp.TTL, lresp.GrantedTTL)
|
||||||
|
}
|
||||||
|
ks := make([]string, len(lresp.Keys))
|
||||||
|
for i := range lresp.Keys {
|
||||||
|
ks[i] = string(lresp.Keys[i])
|
||||||
|
}
|
||||||
|
sort.Strings(ks)
|
||||||
|
if !reflect.DeepEqual(ks, keys) {
|
||||||
|
t.Fatalf("keys expected %v, got %v", keys, ks)
|
||||||
|
}
|
||||||
|
|
||||||
|
lresp, lerr = lapi.TimeToLive(context.Background(), resp.ID)
|
||||||
|
if lerr != nil {
|
||||||
|
t.Fatal(lerr)
|
||||||
|
}
|
||||||
|
if len(lresp.Keys) != 0 {
|
||||||
|
t.Fatalf("unexpected keys %+v", lresp.Keys)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
21
clientv3/integration/logger_test.go
Normal file
21
clientv3/integration/logger_test.go
Normal file
@ -0,0 +1,21 @@
|
|||||||
|
// Copyright 2016 The etcd Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package integration
|
||||||
|
|
||||||
|
import "github.com/coreos/pkg/capnslog"
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
capnslog.SetGlobalLogLevel(capnslog.INFO)
|
||||||
|
}
|
@ -15,7 +15,9 @@
|
|||||||
package integration
|
package integration
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"fmt"
|
||||||
"reflect"
|
"reflect"
|
||||||
|
"sync"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@ -69,3 +71,55 @@ func TestMirrorSync(t *testing.T) {
|
|||||||
t.Fatal("failed to receive update in one second")
|
t.Fatal("failed to receive update in one second")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestMirrorSyncBase(t *testing.T) {
|
||||||
|
cluster := integration.NewClusterV3(nil, &integration.ClusterConfig{Size: 1})
|
||||||
|
defer cluster.Terminate(nil)
|
||||||
|
|
||||||
|
cli := cluster.Client(0)
|
||||||
|
ctx := context.TODO()
|
||||||
|
|
||||||
|
keyCh := make(chan string)
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
|
||||||
|
for i := 0; i < 50; i++ {
|
||||||
|
wg.Add(1)
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
defer wg.Done()
|
||||||
|
|
||||||
|
for key := range keyCh {
|
||||||
|
if _, err := cli.Put(ctx, key, "test"); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := 0; i < 2000; i++ {
|
||||||
|
keyCh <- fmt.Sprintf("test%d", i)
|
||||||
|
}
|
||||||
|
|
||||||
|
close(keyCh)
|
||||||
|
wg.Wait()
|
||||||
|
|
||||||
|
syncer := mirror.NewSyncer(cli, "test", 0)
|
||||||
|
respCh, errCh := syncer.SyncBase(ctx)
|
||||||
|
|
||||||
|
count := 0
|
||||||
|
|
||||||
|
for resp := range respCh {
|
||||||
|
count = count + len(resp.Kvs)
|
||||||
|
if !resp.More {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for err := range errCh {
|
||||||
|
t.Fatalf("unexpected error %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if count != 2000 {
|
||||||
|
t.Errorf("unexpected kv count: %d", count)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -73,6 +73,7 @@ func TestTxnWriteFail(t *testing.T) {
|
|||||||
}()
|
}()
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
|
defer close(getc)
|
||||||
select {
|
select {
|
||||||
case <-time.After(5 * time.Second):
|
case <-time.After(5 * time.Second):
|
||||||
t.Fatalf("timed out waiting for txn fail")
|
t.Fatalf("timed out waiting for txn fail")
|
||||||
@ -86,11 +87,10 @@ func TestTxnWriteFail(t *testing.T) {
|
|||||||
if len(gresp.Kvs) != 0 {
|
if len(gresp.Kvs) != 0 {
|
||||||
t.Fatalf("expected no keys, got %v", gresp.Kvs)
|
t.Fatalf("expected no keys, got %v", gresp.Kvs)
|
||||||
}
|
}
|
||||||
close(getc)
|
|
||||||
}()
|
}()
|
||||||
|
|
||||||
select {
|
select {
|
||||||
case <-time.After(5 * time.Second):
|
case <-time.After(2 * clus.Members[1].ServerConfig.ReqTimeout()):
|
||||||
t.Fatalf("timed out waiting for get")
|
t.Fatalf("timed out waiting for get")
|
||||||
case <-getc:
|
case <-getc:
|
||||||
}
|
}
|
||||||
@ -125,7 +125,7 @@ func TestTxnReadRetry(t *testing.T) {
|
|||||||
clus.Members[0].Restart(t)
|
clus.Members[0].Restart(t)
|
||||||
select {
|
select {
|
||||||
case <-donec:
|
case <-donec:
|
||||||
case <-time.After(5 * time.Second):
|
case <-time.After(2 * clus.Members[1].ServerConfig.ReqTimeout()):
|
||||||
t.Fatalf("waited too long")
|
t.Fatalf("waited too long")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -375,7 +375,7 @@ func TestWatchResumeCompacted(t *testing.T) {
|
|||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if err := kv.Compact(context.TODO(), 3); err != nil {
|
if _, err := kv.Compact(context.TODO(), 3); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -400,7 +400,7 @@ func TestWatchResumeCompacted(t *testing.T) {
|
|||||||
func TestWatchCompactRevision(t *testing.T) {
|
func TestWatchCompactRevision(t *testing.T) {
|
||||||
defer testutil.AfterTest(t)
|
defer testutil.AfterTest(t)
|
||||||
|
|
||||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||||
defer clus.Terminate(t)
|
defer clus.Terminate(t)
|
||||||
|
|
||||||
// set some keys
|
// set some keys
|
||||||
@ -414,7 +414,7 @@ func TestWatchCompactRevision(t *testing.T) {
|
|||||||
w := clientv3.NewWatcher(clus.RandClient())
|
w := clientv3.NewWatcher(clus.RandClient())
|
||||||
defer w.Close()
|
defer w.Close()
|
||||||
|
|
||||||
if err := kv.Compact(context.TODO(), 4); err != nil {
|
if _, err := kv.Compact(context.TODO(), 4); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
wch := w.Watch(context.Background(), "foo", clientv3.WithRev(2))
|
wch := w.Watch(context.Background(), "foo", clientv3.WithRev(2))
|
||||||
@ -487,7 +487,7 @@ func testWatchWithProgressNotify(t *testing.T, watchOnPut bool) {
|
|||||||
} else if len(resp.Events) != 0 { // wait for notification otherwise
|
} else if len(resp.Events) != 0 { // wait for notification otherwise
|
||||||
t.Fatalf("expected no events, but got %+v", resp.Events)
|
t.Fatalf("expected no events, but got %+v", resp.Events)
|
||||||
}
|
}
|
||||||
case <-time.After(2 * pi):
|
case <-time.After(time.Duration(1.5 * float64(pi))):
|
||||||
t.Fatalf("watch response expected in %v, but timed out", pi)
|
t.Fatalf("watch response expected in %v, but timed out", pi)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -673,3 +673,81 @@ func TestWatchWithRequireLeader(t *testing.T) {
|
|||||||
t.Fatalf("expected response, got closed channel")
|
t.Fatalf("expected response, got closed channel")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TestWatchWithFilter checks that watch filtering works.
|
||||||
|
func TestWatchWithFilter(t *testing.T) {
|
||||||
|
cluster := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||||
|
defer cluster.Terminate(t)
|
||||||
|
|
||||||
|
client := cluster.RandClient()
|
||||||
|
ctx := context.Background()
|
||||||
|
|
||||||
|
wcNoPut := client.Watch(ctx, "a", clientv3.WithFilterPut())
|
||||||
|
wcNoDel := client.Watch(ctx, "a", clientv3.WithFilterDelete())
|
||||||
|
|
||||||
|
if _, err := client.Put(ctx, "a", "abc"); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if _, err := client.Delete(ctx, "a"); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
npResp := <-wcNoPut
|
||||||
|
if len(npResp.Events) != 1 || npResp.Events[0].Type != clientv3.EventTypeDelete {
|
||||||
|
t.Fatalf("expected delete event, got %+v", npResp.Events)
|
||||||
|
}
|
||||||
|
ndResp := <-wcNoDel
|
||||||
|
if len(ndResp.Events) != 1 || ndResp.Events[0].Type != clientv3.EventTypePut {
|
||||||
|
t.Fatalf("expected put event, got %+v", ndResp.Events)
|
||||||
|
}
|
||||||
|
|
||||||
|
select {
|
||||||
|
case resp := <-wcNoPut:
|
||||||
|
t.Fatalf("unexpected event on filtered put (%+v)", resp)
|
||||||
|
case resp := <-wcNoDel:
|
||||||
|
t.Fatalf("unexpected event on filtered delete (%+v)", resp)
|
||||||
|
case <-time.After(100 * time.Millisecond):
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestWatchWithCreatedNotification checks that createdNotification works.
|
||||||
|
func TestWatchWithCreatedNotification(t *testing.T) {
|
||||||
|
cluster := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||||
|
defer cluster.Terminate(t)
|
||||||
|
|
||||||
|
client := cluster.RandClient()
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
|
||||||
|
createC := client.Watch(ctx, "a", clientv3.WithCreatedNotify())
|
||||||
|
|
||||||
|
resp := <-createC
|
||||||
|
|
||||||
|
if !resp.Created {
|
||||||
|
t.Fatalf("expected created event, got %v", resp)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestWatchCancelOnServer ensures client watcher cancels propagate back to the server.
|
||||||
|
func TestWatchCancelOnServer(t *testing.T) {
|
||||||
|
cluster := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||||
|
defer cluster.Terminate(t)
|
||||||
|
|
||||||
|
client := cluster.RandClient()
|
||||||
|
|
||||||
|
for i := 0; i < 10; i++ {
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
|
||||||
|
client.Watch(ctx, "a", clientv3.WithCreatedNotify())
|
||||||
|
cancel()
|
||||||
|
}
|
||||||
|
// wait for cancels to propagate
|
||||||
|
time.Sleep(time.Second)
|
||||||
|
|
||||||
|
watchers, err := cluster.Members[0].Metric("etcd_debugging_mvcc_watcher_total")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if watchers != "0" {
|
||||||
|
t.Fatalf("expected 0 watchers, got %q", watchers)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -17,13 +17,15 @@ package clientv3
|
|||||||
import (
|
import (
|
||||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
|
"google.golang.org/grpc"
|
||||||
)
|
)
|
||||||
|
|
||||||
type (
|
type (
|
||||||
PutResponse pb.PutResponse
|
CompactResponse pb.CompactionResponse
|
||||||
GetResponse pb.RangeResponse
|
PutResponse pb.PutResponse
|
||||||
DeleteResponse pb.DeleteRangeResponse
|
GetResponse pb.RangeResponse
|
||||||
TxnResponse pb.TxnResponse
|
DeleteResponse pb.DeleteRangeResponse
|
||||||
|
TxnResponse pb.TxnResponse
|
||||||
)
|
)
|
||||||
|
|
||||||
type KV interface {
|
type KV interface {
|
||||||
@ -47,7 +49,7 @@ type KV interface {
|
|||||||
Delete(ctx context.Context, key string, opts ...OpOption) (*DeleteResponse, error)
|
Delete(ctx context.Context, key string, opts ...OpOption) (*DeleteResponse, error)
|
||||||
|
|
||||||
// Compact compacts etcd KV history before the given rev.
|
// Compact compacts etcd KV history before the given rev.
|
||||||
Compact(ctx context.Context, rev int64, opts ...CompactOption) error
|
Compact(ctx context.Context, rev int64, opts ...CompactOption) (*CompactResponse, error)
|
||||||
|
|
||||||
// Do applies a single Op on KV without a transaction.
|
// Do applies a single Op on KV without a transaction.
|
||||||
// Do is useful when declaring operations to be issued at a later time
|
// Do is useful when declaring operations to be issued at a later time
|
||||||
@ -80,7 +82,11 @@ type kv struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func NewKV(c *Client) KV {
|
func NewKV(c *Client) KV {
|
||||||
return &kv{remote: pb.NewKVClient(c.conn)}
|
return &kv{remote: RetryKVClient(c)}
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewKVFromKVClient(remote pb.KVClient) KV {
|
||||||
|
return &kv{remote: remote}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (kv *kv) Put(ctx context.Context, key, val string, opts ...OpOption) (*PutResponse, error) {
|
func (kv *kv) Put(ctx context.Context, key, val string, opts ...OpOption) (*PutResponse, error) {
|
||||||
@ -98,11 +104,12 @@ func (kv *kv) Delete(ctx context.Context, key string, opts ...OpOption) (*Delete
|
|||||||
return r.del, toErr(ctx, err)
|
return r.del, toErr(ctx, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (kv *kv) Compact(ctx context.Context, rev int64, opts ...CompactOption) error {
|
func (kv *kv) Compact(ctx context.Context, rev int64, opts ...CompactOption) (*CompactResponse, error) {
|
||||||
if _, err := kv.remote.Compact(ctx, OpCompact(rev, opts...).toRequest()); err != nil {
|
resp, err := kv.remote.Compact(ctx, OpCompact(rev, opts...).toRequest(), grpc.FailFast(false))
|
||||||
return toErr(ctx, err)
|
if err != nil {
|
||||||
|
return nil, toErr(ctx, err)
|
||||||
}
|
}
|
||||||
return nil
|
return (*CompactResponse)(resp), err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (kv *kv) Txn(ctx context.Context) Txn {
|
func (kv *kv) Txn(ctx context.Context) Txn {
|
||||||
@ -134,34 +141,20 @@ func (kv *kv) do(ctx context.Context, op Op) (OpResponse, error) {
|
|||||||
// TODO: handle other ops
|
// TODO: handle other ops
|
||||||
case tRange:
|
case tRange:
|
||||||
var resp *pb.RangeResponse
|
var resp *pb.RangeResponse
|
||||||
r := &pb.RangeRequest{
|
resp, err = kv.remote.Range(ctx, op.toRangeRequest(), grpc.FailFast(false))
|
||||||
Key: op.key,
|
|
||||||
RangeEnd: op.end,
|
|
||||||
Limit: op.limit,
|
|
||||||
Revision: op.rev,
|
|
||||||
Serializable: op.serializable,
|
|
||||||
KeysOnly: op.keysOnly,
|
|
||||||
CountOnly: op.countOnly,
|
|
||||||
}
|
|
||||||
if op.sort != nil {
|
|
||||||
r.SortOrder = pb.RangeRequest_SortOrder(op.sort.Order)
|
|
||||||
r.SortTarget = pb.RangeRequest_SortTarget(op.sort.Target)
|
|
||||||
}
|
|
||||||
|
|
||||||
resp, err = kv.remote.Range(ctx, r)
|
|
||||||
if err == nil {
|
if err == nil {
|
||||||
return OpResponse{get: (*GetResponse)(resp)}, nil
|
return OpResponse{get: (*GetResponse)(resp)}, nil
|
||||||
}
|
}
|
||||||
case tPut:
|
case tPut:
|
||||||
var resp *pb.PutResponse
|
var resp *pb.PutResponse
|
||||||
r := &pb.PutRequest{Key: op.key, Value: op.val, Lease: int64(op.leaseID)}
|
r := &pb.PutRequest{Key: op.key, Value: op.val, Lease: int64(op.leaseID), PrevKv: op.prevKV}
|
||||||
resp, err = kv.remote.Put(ctx, r)
|
resp, err = kv.remote.Put(ctx, r)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
return OpResponse{put: (*PutResponse)(resp)}, nil
|
return OpResponse{put: (*PutResponse)(resp)}, nil
|
||||||
}
|
}
|
||||||
case tDeleteRange:
|
case tDeleteRange:
|
||||||
var resp *pb.DeleteRangeResponse
|
var resp *pb.DeleteRangeResponse
|
||||||
r := &pb.DeleteRangeRequest{Key: op.key, RangeEnd: op.end}
|
r := &pb.DeleteRangeRequest{Key: op.key, RangeEnd: op.end, PrevKv: op.prevKV}
|
||||||
resp, err = kv.remote.DeleteRange(ctx, r)
|
resp, err = kv.remote.DeleteRange(ctx, r)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
return OpResponse{del: (*DeleteResponse)(resp)}, nil
|
return OpResponse{del: (*DeleteResponse)(resp)}, nil
|
||||||
|
@ -21,6 +21,7 @@ import (
|
|||||||
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
|
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
|
||||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
|
"google.golang.org/grpc"
|
||||||
)
|
)
|
||||||
|
|
||||||
type (
|
type (
|
||||||
@ -43,6 +44,21 @@ type LeaseKeepAliveResponse struct {
|
|||||||
TTL int64
|
TTL int64
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// LeaseTimeToLiveResponse is used to convert the protobuf lease timetolive response.
|
||||||
|
type LeaseTimeToLiveResponse struct {
|
||||||
|
*pb.ResponseHeader
|
||||||
|
ID LeaseID `json:"id"`
|
||||||
|
|
||||||
|
// TTL is the remaining TTL in seconds for the lease; the lease will expire in under TTL+1 seconds.
|
||||||
|
TTL int64 `json:"ttl"`
|
||||||
|
|
||||||
|
// GrantedTTL is the initial granted time in seconds upon lease creation/renewal.
|
||||||
|
GrantedTTL int64 `json:"granted-ttl"`
|
||||||
|
|
||||||
|
// Keys is the list of keys attached to this lease.
|
||||||
|
Keys [][]byte `json:"keys"`
|
||||||
|
}
|
||||||
|
|
||||||
const (
|
const (
|
||||||
// defaultTTL is the assumed lease TTL used for the first keepalive
|
// defaultTTL is the assumed lease TTL used for the first keepalive
|
||||||
// deadline before the actual TTL is known to the client.
|
// deadline before the actual TTL is known to the client.
|
||||||
@ -60,6 +76,9 @@ type Lease interface {
|
|||||||
// Revoke revokes the given lease.
|
// Revoke revokes the given lease.
|
||||||
Revoke(ctx context.Context, id LeaseID) (*LeaseRevokeResponse, error)
|
Revoke(ctx context.Context, id LeaseID) (*LeaseRevokeResponse, error)
|
||||||
|
|
||||||
|
// TimeToLive retrieves the lease information of the given lease ID.
|
||||||
|
TimeToLive(ctx context.Context, id LeaseID, opts ...LeaseOption) (*LeaseTimeToLiveResponse, error)
|
||||||
|
|
||||||
// KeepAlive keeps the given lease alive forever.
|
// KeepAlive keeps the given lease alive forever.
|
||||||
KeepAlive(ctx context.Context, id LeaseID) (<-chan *LeaseKeepAliveResponse, error)
|
KeepAlive(ctx context.Context, id LeaseID) (<-chan *LeaseKeepAliveResponse, error)
|
||||||
|
|
||||||
@ -109,7 +128,7 @@ func NewLease(c *Client) Lease {
|
|||||||
l := &lessor{
|
l := &lessor{
|
||||||
donec: make(chan struct{}),
|
donec: make(chan struct{}),
|
||||||
keepAlives: make(map[LeaseID]*keepAlive),
|
keepAlives: make(map[LeaseID]*keepAlive),
|
||||||
remote: pb.NewLeaseClient(c.conn),
|
remote: RetryLeaseClient(c),
|
||||||
firstKeepAliveTimeout: c.cfg.DialTimeout + time.Second,
|
firstKeepAliveTimeout: c.cfg.DialTimeout + time.Second,
|
||||||
}
|
}
|
||||||
if l.firstKeepAliveTimeout == time.Second {
|
if l.firstKeepAliveTimeout == time.Second {
|
||||||
@ -140,7 +159,7 @@ func (l *lessor) Grant(ctx context.Context, ttl int64) (*LeaseGrantResponse, err
|
|||||||
return gresp, nil
|
return gresp, nil
|
||||||
}
|
}
|
||||||
if isHaltErr(cctx, err) {
|
if isHaltErr(cctx, err) {
|
||||||
return nil, toErr(ctx, err)
|
return nil, toErr(cctx, err)
|
||||||
}
|
}
|
||||||
if nerr := l.newStream(); nerr != nil {
|
if nerr := l.newStream(); nerr != nil {
|
||||||
return nil, nerr
|
return nil, nerr
|
||||||
@ -169,6 +188,30 @@ func (l *lessor) Revoke(ctx context.Context, id LeaseID) (*LeaseRevokeResponse,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (l *lessor) TimeToLive(ctx context.Context, id LeaseID, opts ...LeaseOption) (*LeaseTimeToLiveResponse, error) {
|
||||||
|
cctx, cancel := context.WithCancel(ctx)
|
||||||
|
done := cancelWhenStop(cancel, l.stopCtx.Done())
|
||||||
|
defer close(done)
|
||||||
|
|
||||||
|
for {
|
||||||
|
r := toLeaseTimeToLiveRequest(id, opts...)
|
||||||
|
resp, err := l.remote.LeaseTimeToLive(cctx, r)
|
||||||
|
if err == nil {
|
||||||
|
gresp := &LeaseTimeToLiveResponse{
|
||||||
|
ResponseHeader: resp.GetHeader(),
|
||||||
|
ID: LeaseID(resp.ID),
|
||||||
|
TTL: resp.TTL,
|
||||||
|
GrantedTTL: resp.GrantedTTL,
|
||||||
|
Keys: resp.Keys,
|
||||||
|
}
|
||||||
|
return gresp, nil
|
||||||
|
}
|
||||||
|
if isHaltErr(cctx, err) {
|
||||||
|
return nil, toErr(cctx, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func (l *lessor) KeepAlive(ctx context.Context, id LeaseID) (<-chan *LeaseKeepAliveResponse, error) {
|
func (l *lessor) KeepAlive(ctx context.Context, id LeaseID) (<-chan *LeaseKeepAliveResponse, error) {
|
||||||
ch := make(chan *LeaseKeepAliveResponse, leaseResponseChSize)
|
ch := make(chan *LeaseKeepAliveResponse, leaseResponseChSize)
|
||||||
|
|
||||||
@ -261,7 +304,7 @@ func (l *lessor) keepAliveOnce(ctx context.Context, id LeaseID) (*LeaseKeepAlive
|
|||||||
cctx, cancel := context.WithCancel(ctx)
|
cctx, cancel := context.WithCancel(ctx)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
stream, err := l.remote.LeaseKeepAlive(cctx)
|
stream, err := l.remote.LeaseKeepAlive(cctx, grpc.FailFast(false))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, toErr(ctx, err)
|
return nil, toErr(ctx, err)
|
||||||
}
|
}
|
||||||
@ -389,7 +432,7 @@ func (l *lessor) sendKeepAliveLoop(stream pb.Lease_LeaseKeepAliveClient) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
tosend := make([]LeaseID, 0)
|
var tosend []LeaseID
|
||||||
|
|
||||||
now := time.Now()
|
now := time.Now()
|
||||||
l.mu.Lock()
|
l.mu.Lock()
|
||||||
@ -418,7 +461,7 @@ func (l *lessor) getKeepAliveStream() pb.Lease_LeaseKeepAliveClient {
|
|||||||
|
|
||||||
func (l *lessor) newStream() error {
|
func (l *lessor) newStream() error {
|
||||||
sctx, cancel := context.WithCancel(l.stopCtx)
|
sctx, cancel := context.WithCancel(l.stopCtx)
|
||||||
stream, err := l.remote.LeaseKeepAlive(sctx)
|
stream, err := l.remote.LeaseKeepAlive(sctx, grpc.FailFast(false))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
cancel()
|
cancel()
|
||||||
return toErr(sctx, err)
|
return toErr(sctx, err)
|
||||||
|
@ -20,10 +20,14 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"github.com/coreos/etcd/auth"
|
||||||
"github.com/coreos/etcd/integration"
|
"github.com/coreos/etcd/integration"
|
||||||
"github.com/coreos/etcd/pkg/testutil"
|
"github.com/coreos/etcd/pkg/testutil"
|
||||||
|
"golang.org/x/crypto/bcrypt"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
func init() { auth.BcryptCost = bcrypt.MinCost }
|
||||||
|
|
||||||
// TestMain sets up an etcd cluster if running the examples.
|
// TestMain sets up an etcd cluster if running the examples.
|
||||||
func TestMain(m *testing.M) {
|
func TestMain(m *testing.M) {
|
||||||
useCluster := true // default to running all tests
|
useCluster := true // default to running all tests
|
||||||
|
@ -19,6 +19,7 @@ import (
|
|||||||
|
|
||||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
|
"google.golang.org/grpc"
|
||||||
)
|
)
|
||||||
|
|
||||||
type (
|
type (
|
||||||
@ -67,7 +68,7 @@ func (m *maintenance) AlarmList(ctx context.Context) (*AlarmResponse, error) {
|
|||||||
Alarm: pb.AlarmType_NONE, // all
|
Alarm: pb.AlarmType_NONE, // all
|
||||||
}
|
}
|
||||||
for {
|
for {
|
||||||
resp, err := m.remote.Alarm(ctx, req)
|
resp, err := m.remote.Alarm(ctx, req, grpc.FailFast(false))
|
||||||
if err == nil {
|
if err == nil {
|
||||||
return (*AlarmResponse)(resp), nil
|
return (*AlarmResponse)(resp), nil
|
||||||
}
|
}
|
||||||
@ -100,7 +101,7 @@ func (m *maintenance) AlarmDisarm(ctx context.Context, am *AlarmMember) (*AlarmR
|
|||||||
return &ret, nil
|
return &ret, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
resp, err := m.remote.Alarm(ctx, req)
|
resp, err := m.remote.Alarm(ctx, req, grpc.FailFast(false))
|
||||||
if err == nil {
|
if err == nil {
|
||||||
return (*AlarmResponse)(resp), nil
|
return (*AlarmResponse)(resp), nil
|
||||||
}
|
}
|
||||||
@ -114,7 +115,7 @@ func (m *maintenance) Defragment(ctx context.Context, endpoint string) (*Defragm
|
|||||||
}
|
}
|
||||||
defer conn.Close()
|
defer conn.Close()
|
||||||
remote := pb.NewMaintenanceClient(conn)
|
remote := pb.NewMaintenanceClient(conn)
|
||||||
resp, err := remote.Defragment(ctx, &pb.DefragmentRequest{})
|
resp, err := remote.Defragment(ctx, &pb.DefragmentRequest{}, grpc.FailFast(false))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, toErr(ctx, err)
|
return nil, toErr(ctx, err)
|
||||||
}
|
}
|
||||||
@ -128,7 +129,7 @@ func (m *maintenance) Status(ctx context.Context, endpoint string) (*StatusRespo
|
|||||||
}
|
}
|
||||||
defer conn.Close()
|
defer conn.Close()
|
||||||
remote := pb.NewMaintenanceClient(conn)
|
remote := pb.NewMaintenanceClient(conn)
|
||||||
resp, err := remote.Status(ctx, &pb.StatusRequest{})
|
resp, err := remote.Status(ctx, &pb.StatusRequest{}, grpc.FailFast(false))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, toErr(ctx, err)
|
return nil, toErr(ctx, err)
|
||||||
}
|
}
|
||||||
@ -136,7 +137,7 @@ func (m *maintenance) Status(ctx context.Context, endpoint string) (*StatusRespo
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (m *maintenance) Snapshot(ctx context.Context) (io.ReadCloser, error) {
|
func (m *maintenance) Snapshot(ctx context.Context) (io.ReadCloser, error) {
|
||||||
ss, err := m.remote.Snapshot(ctx, &pb.SnapshotRequest{})
|
ss, err := m.remote.Snapshot(ctx, &pb.SnapshotRequest{}, grpc.FailFast(false))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, toErr(ctx, err)
|
return nil, toErr(ctx, err)
|
||||||
}
|
}
|
||||||
|
@ -78,7 +78,7 @@ func (s *syncer) SyncBase(ctx context.Context) (<-chan clientv3.GetResponse, cha
|
|||||||
// If len(s.prefix) != 0, we will sync key-value space with given prefix.
|
// If len(s.prefix) != 0, we will sync key-value space with given prefix.
|
||||||
// We then range from the prefix to the next prefix if exists. Or we will
|
// We then range from the prefix to the next prefix if exists. Or we will
|
||||||
// range from the prefix to the end if the next prefix does not exists.
|
// range from the prefix to the end if the next prefix does not exists.
|
||||||
opts = append(opts, clientv3.WithPrefix())
|
opts = append(opts, clientv3.WithRange(clientv3.GetPrefixRangeEnd(s.prefix)))
|
||||||
key = s.prefix
|
key = s.prefix
|
||||||
}
|
}
|
||||||
|
|
||||||
|
115
clientv3/naming/grpc.go
Normal file
115
clientv3/naming/grpc.go
Normal file
@ -0,0 +1,115 @@
|
|||||||
|
// Copyright 2016 The etcd Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package naming
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/coreos/etcd/clientv3"
|
||||||
|
"github.com/coreos/etcd/mvcc/mvccpb"
|
||||||
|
"golang.org/x/net/context"
|
||||||
|
"google.golang.org/grpc/naming"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
gRPCNamingPrefix = "/github.com/grpc/"
|
||||||
|
)
|
||||||
|
|
||||||
|
// GRPCResolver creates a grpc.Watcher for a target to track its resolution changes.
|
||||||
|
type GRPCResolver struct {
|
||||||
|
// Client is an initialized etcd client
|
||||||
|
Client *clientv3.Client
|
||||||
|
// Timeout for update/delete request.
|
||||||
|
Timeout time.Duration
|
||||||
|
}
|
||||||
|
|
||||||
|
func (gr *GRPCResolver) Add(target string, addr string, metadata interface{}) error {
|
||||||
|
update := naming.Update{
|
||||||
|
Addr: addr,
|
||||||
|
Metadata: metadata,
|
||||||
|
}
|
||||||
|
val, err := json.Marshal(update)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
if gr.Timeout != 0 {
|
||||||
|
var cancel context.CancelFunc
|
||||||
|
ctx, cancel = context.WithTimeout(context.Background(), gr.Timeout)
|
||||||
|
defer cancel()
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = gr.Client.KV.Put(ctx, gRPCNamingPrefix+target, string(val))
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (gr *GRPCResolver) Delete(target string) error {
|
||||||
|
ctx := context.Background()
|
||||||
|
if gr.Timeout != 0 {
|
||||||
|
var cancel context.CancelFunc
|
||||||
|
ctx, cancel = context.WithTimeout(context.Background(), gr.Timeout)
|
||||||
|
defer cancel()
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err := gr.Client.Delete(ctx, gRPCNamingPrefix+target)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (gr *GRPCResolver) Resolve(target string) (naming.Watcher, error) {
|
||||||
|
cctx, cancel := context.WithCancel(context.Background())
|
||||||
|
|
||||||
|
wch := gr.Client.Watch(cctx, gRPCNamingPrefix+target)
|
||||||
|
|
||||||
|
w := &gRPCWatcher{
|
||||||
|
cancel: cancel,
|
||||||
|
wch: wch,
|
||||||
|
}
|
||||||
|
|
||||||
|
return w, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type gRPCWatcher struct {
|
||||||
|
cancel context.CancelFunc
|
||||||
|
wch clientv3.WatchChan
|
||||||
|
}
|
||||||
|
|
||||||
|
func (gw *gRPCWatcher) Next() ([]*naming.Update, error) {
|
||||||
|
wr, ok := <-gw.wch
|
||||||
|
if !ok {
|
||||||
|
return nil, wr.Err()
|
||||||
|
}
|
||||||
|
|
||||||
|
updates := make([]*naming.Update, 0, len(wr.Events))
|
||||||
|
|
||||||
|
for _, e := range wr.Events {
|
||||||
|
switch e.Type {
|
||||||
|
case mvccpb.PUT:
|
||||||
|
var jupdate naming.Update
|
||||||
|
err := json.Unmarshal(e.Kv.Value, &jupdate)
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
updates = append(updates, &jupdate)
|
||||||
|
case mvccpb.DELETE:
|
||||||
|
updates = append(updates, &naming.Update{Op: naming.Delete})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return updates, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (gw *gRPCWatcher) Close() { gw.cancel() }
|
77
clientv3/naming/grpc_test.go
Normal file
77
clientv3/naming/grpc_test.go
Normal file
@ -0,0 +1,77 @@
|
|||||||
|
// Copyright 2016 The etcd Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package naming
|
||||||
|
|
||||||
|
import (
|
||||||
|
"reflect"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"google.golang.org/grpc/naming"
|
||||||
|
|
||||||
|
"github.com/coreos/etcd/integration"
|
||||||
|
"github.com/coreos/etcd/pkg/testutil"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestGRPCResolver(t *testing.T) {
|
||||||
|
defer testutil.AfterTest(t)
|
||||||
|
|
||||||
|
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||||
|
defer clus.Terminate(t)
|
||||||
|
|
||||||
|
r := GRPCResolver{
|
||||||
|
Client: clus.RandClient(),
|
||||||
|
}
|
||||||
|
|
||||||
|
w, err := r.Resolve("foo")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("failed to resolve foo", err)
|
||||||
|
}
|
||||||
|
defer w.Close()
|
||||||
|
|
||||||
|
err = r.Add("foo", "127.0.0.1", "metadata")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("failed to add foo", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
us, err := w.Next()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("failed to get udpate", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
wu := &naming.Update{
|
||||||
|
Op: naming.Add,
|
||||||
|
Addr: "127.0.0.1",
|
||||||
|
Metadata: "metadata",
|
||||||
|
}
|
||||||
|
|
||||||
|
if !reflect.DeepEqual(us[0], wu) {
|
||||||
|
t.Fatalf("up = %#v, want %#v", us[0], wu)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = r.Delete("foo")
|
||||||
|
|
||||||
|
us, err = w.Next()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("failed to get udpate", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
wu = &naming.Update{
|
||||||
|
Op: naming.Delete,
|
||||||
|
}
|
||||||
|
|
||||||
|
if !reflect.DeepEqual(us[0], wu) {
|
||||||
|
t.Fatalf("up = %#v, want %#v", us[0], wu)
|
||||||
|
}
|
||||||
|
}
|
166
clientv3/op.go
166
clientv3/op.go
@ -14,9 +14,7 @@
|
|||||||
|
|
||||||
package clientv3
|
package clientv3
|
||||||
|
|
||||||
import (
|
import pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
|
||||||
)
|
|
||||||
|
|
||||||
type opType int
|
type opType int
|
||||||
|
|
||||||
@ -43,40 +41,63 @@ type Op struct {
|
|||||||
serializable bool
|
serializable bool
|
||||||
keysOnly bool
|
keysOnly bool
|
||||||
countOnly bool
|
countOnly bool
|
||||||
|
minModRev int64
|
||||||
|
maxModRev int64
|
||||||
|
minCreateRev int64
|
||||||
|
maxCreateRev int64
|
||||||
|
|
||||||
// for range, watch
|
// for range, watch
|
||||||
rev int64
|
rev int64
|
||||||
|
|
||||||
|
// for watch, put, delete
|
||||||
|
prevKV bool
|
||||||
|
|
||||||
// progressNotify is for progress updates.
|
// progressNotify is for progress updates.
|
||||||
progressNotify bool
|
progressNotify bool
|
||||||
|
// createdNotify is for created event
|
||||||
|
createdNotify bool
|
||||||
|
// filters for watchers
|
||||||
|
filterPut bool
|
||||||
|
filterDelete bool
|
||||||
|
|
||||||
// for put
|
// for put
|
||||||
val []byte
|
val []byte
|
||||||
leaseID LeaseID
|
leaseID LeaseID
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (op Op) toRangeRequest() *pb.RangeRequest {
|
||||||
|
if op.t != tRange {
|
||||||
|
panic("op.t != tRange")
|
||||||
|
}
|
||||||
|
r := &pb.RangeRequest{
|
||||||
|
Key: op.key,
|
||||||
|
RangeEnd: op.end,
|
||||||
|
Limit: op.limit,
|
||||||
|
Revision: op.rev,
|
||||||
|
Serializable: op.serializable,
|
||||||
|
KeysOnly: op.keysOnly,
|
||||||
|
CountOnly: op.countOnly,
|
||||||
|
MinModRevision: op.minModRev,
|
||||||
|
MaxModRevision: op.maxModRev,
|
||||||
|
MinCreateRevision: op.minCreateRev,
|
||||||
|
MaxCreateRevision: op.maxCreateRev,
|
||||||
|
}
|
||||||
|
if op.sort != nil {
|
||||||
|
r.SortOrder = pb.RangeRequest_SortOrder(op.sort.Order)
|
||||||
|
r.SortTarget = pb.RangeRequest_SortTarget(op.sort.Target)
|
||||||
|
}
|
||||||
|
return r
|
||||||
|
}
|
||||||
|
|
||||||
func (op Op) toRequestOp() *pb.RequestOp {
|
func (op Op) toRequestOp() *pb.RequestOp {
|
||||||
switch op.t {
|
switch op.t {
|
||||||
case tRange:
|
case tRange:
|
||||||
r := &pb.RangeRequest{
|
return &pb.RequestOp{Request: &pb.RequestOp_RequestRange{RequestRange: op.toRangeRequest()}}
|
||||||
Key: op.key,
|
|
||||||
RangeEnd: op.end,
|
|
||||||
Limit: op.limit,
|
|
||||||
Revision: op.rev,
|
|
||||||
Serializable: op.serializable,
|
|
||||||
KeysOnly: op.keysOnly,
|
|
||||||
CountOnly: op.countOnly,
|
|
||||||
}
|
|
||||||
if op.sort != nil {
|
|
||||||
r.SortOrder = pb.RangeRequest_SortOrder(op.sort.Order)
|
|
||||||
r.SortTarget = pb.RangeRequest_SortTarget(op.sort.Target)
|
|
||||||
}
|
|
||||||
return &pb.RequestOp{Request: &pb.RequestOp_RequestRange{RequestRange: r}}
|
|
||||||
case tPut:
|
case tPut:
|
||||||
r := &pb.PutRequest{Key: op.key, Value: op.val, Lease: int64(op.leaseID)}
|
r := &pb.PutRequest{Key: op.key, Value: op.val, Lease: int64(op.leaseID), PrevKv: op.prevKV}
|
||||||
return &pb.RequestOp{Request: &pb.RequestOp_RequestPut{RequestPut: r}}
|
return &pb.RequestOp{Request: &pb.RequestOp_RequestPut{RequestPut: r}}
|
||||||
case tDeleteRange:
|
case tDeleteRange:
|
||||||
r := &pb.DeleteRangeRequest{Key: op.key, RangeEnd: op.end}
|
r := &pb.DeleteRangeRequest{Key: op.key, RangeEnd: op.end, PrevKv: op.prevKV}
|
||||||
return &pb.RequestOp{Request: &pb.RequestOp_RequestDeleteRange{RequestDeleteRange: r}}
|
return &pb.RequestOp{Request: &pb.RequestOp_RequestDeleteRange{RequestDeleteRange: r}}
|
||||||
default:
|
default:
|
||||||
panic("Unknown Op")
|
panic("Unknown Op")
|
||||||
@ -109,6 +130,14 @@ func OpDelete(key string, opts ...OpOption) Op {
|
|||||||
panic("unexpected serializable in delete")
|
panic("unexpected serializable in delete")
|
||||||
case ret.countOnly:
|
case ret.countOnly:
|
||||||
panic("unexpected countOnly in delete")
|
panic("unexpected countOnly in delete")
|
||||||
|
case ret.minModRev != 0, ret.maxModRev != 0:
|
||||||
|
panic("unexpected mod revision filter in delete")
|
||||||
|
case ret.minCreateRev != 0, ret.maxCreateRev != 0:
|
||||||
|
panic("unexpected create revision filter in delete")
|
||||||
|
case ret.filterDelete, ret.filterPut:
|
||||||
|
panic("unexpected filter in delete")
|
||||||
|
case ret.createdNotify:
|
||||||
|
panic("unexpected createdNotify in delete")
|
||||||
}
|
}
|
||||||
return ret
|
return ret
|
||||||
}
|
}
|
||||||
@ -128,7 +157,15 @@ func OpPut(key, val string, opts ...OpOption) Op {
|
|||||||
case ret.serializable:
|
case ret.serializable:
|
||||||
panic("unexpected serializable in put")
|
panic("unexpected serializable in put")
|
||||||
case ret.countOnly:
|
case ret.countOnly:
|
||||||
panic("unexpected countOnly in delete")
|
panic("unexpected countOnly in put")
|
||||||
|
case ret.minModRev != 0, ret.maxModRev != 0:
|
||||||
|
panic("unexpected mod revision filter in put")
|
||||||
|
case ret.minCreateRev != 0, ret.maxCreateRev != 0:
|
||||||
|
panic("unexpected create revision filter in put")
|
||||||
|
case ret.filterDelete, ret.filterPut:
|
||||||
|
panic("unexpected filter in put")
|
||||||
|
case ret.createdNotify:
|
||||||
|
panic("unexpected createdNotify in put")
|
||||||
}
|
}
|
||||||
return ret
|
return ret
|
||||||
}
|
}
|
||||||
@ -146,7 +183,11 @@ func opWatch(key string, opts ...OpOption) Op {
|
|||||||
case ret.serializable:
|
case ret.serializable:
|
||||||
panic("unexpected serializable in watch")
|
panic("unexpected serializable in watch")
|
||||||
case ret.countOnly:
|
case ret.countOnly:
|
||||||
panic("unexpected countOnly in delete")
|
panic("unexpected countOnly in watch")
|
||||||
|
case ret.minModRev != 0, ret.maxModRev != 0:
|
||||||
|
panic("unexpected mod revision filter in watch")
|
||||||
|
case ret.minCreateRev != 0, ret.maxCreateRev != 0:
|
||||||
|
panic("unexpected create revision filter in watch")
|
||||||
}
|
}
|
||||||
return ret
|
return ret
|
||||||
}
|
}
|
||||||
@ -178,10 +219,24 @@ func WithRev(rev int64) OpOption { return func(op *Op) { op.rev = rev } }
|
|||||||
// 'order' can be either 'SortNone', 'SortAscend', 'SortDescend'.
|
// 'order' can be either 'SortNone', 'SortAscend', 'SortDescend'.
|
||||||
func WithSort(target SortTarget, order SortOrder) OpOption {
|
func WithSort(target SortTarget, order SortOrder) OpOption {
|
||||||
return func(op *Op) {
|
return func(op *Op) {
|
||||||
|
if target == SortByKey && order == SortAscend {
|
||||||
|
// If order != SortNone, server fetches the entire key-space,
|
||||||
|
// and then applies the sort and limit, if provided.
|
||||||
|
// Since current mvcc.Range implementation returns results
|
||||||
|
// sorted by keys in lexiographically ascending order,
|
||||||
|
// client should ignore SortOrder if the target is SortByKey.
|
||||||
|
order = SortNone
|
||||||
|
}
|
||||||
op.sort = &SortOption{target, order}
|
op.sort = &SortOption{target, order}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetPrefixRangeEnd gets the range end of the prefix.
|
||||||
|
// 'Get(foo, WithPrefix())' is equal to 'Get(foo, WithRange(GetPrefixRangeEnd(foo))'.
|
||||||
|
func GetPrefixRangeEnd(prefix string) string {
|
||||||
|
return string(getPrefix([]byte(prefix)))
|
||||||
|
}
|
||||||
|
|
||||||
func getPrefix(key []byte) []byte {
|
func getPrefix(key []byte) []byte {
|
||||||
end := make([]byte, len(key))
|
end := make([]byte, len(key))
|
||||||
copy(end, key)
|
copy(end, key)
|
||||||
@ -235,6 +290,18 @@ func WithCountOnly() OpOption {
|
|||||||
return func(op *Op) { op.countOnly = true }
|
return func(op *Op) { op.countOnly = true }
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// WithMinModRev filters out keys for Get with modification revisions less than the given revision.
|
||||||
|
func WithMinModRev(rev int64) OpOption { return func(op *Op) { op.minModRev = rev } }
|
||||||
|
|
||||||
|
// WithMaxModRev filters out keys for Get with modification revisions greater than the given revision.
|
||||||
|
func WithMaxModRev(rev int64) OpOption { return func(op *Op) { op.maxModRev = rev } }
|
||||||
|
|
||||||
|
// WithMinCreateRev filters out keys for Get with creation revisions less than the given revision.
|
||||||
|
func WithMinCreateRev(rev int64) OpOption { return func(op *Op) { op.minCreateRev = rev } }
|
||||||
|
|
||||||
|
// WithMaxCreateRev filters out keys for Get with creation revisions greater than the given revision.
|
||||||
|
func WithMaxCreateRev(rev int64) OpOption { return func(op *Op) { op.maxCreateRev = rev } }
|
||||||
|
|
||||||
// WithFirstCreate gets the key with the oldest creation revision in the request range.
|
// WithFirstCreate gets the key with the oldest creation revision in the request range.
|
||||||
func WithFirstCreate() []OpOption { return withTop(SortByCreateRevision, SortAscend) }
|
func WithFirstCreate() []OpOption { return withTop(SortByCreateRevision, SortAscend) }
|
||||||
|
|
||||||
@ -258,10 +325,65 @@ func withTop(target SortTarget, order SortOrder) []OpOption {
|
|||||||
return []OpOption{WithPrefix(), WithSort(target, order), WithLimit(1)}
|
return []OpOption{WithPrefix(), WithSort(target, order), WithLimit(1)}
|
||||||
}
|
}
|
||||||
|
|
||||||
// WithProgressNotify makes watch server send periodic progress updates.
|
// WithProgressNotify makes watch server send periodic progress updates
|
||||||
|
// every 10 minutes when there is no incoming events.
|
||||||
// Progress updates have zero events in WatchResponse.
|
// Progress updates have zero events in WatchResponse.
|
||||||
func WithProgressNotify() OpOption {
|
func WithProgressNotify() OpOption {
|
||||||
return func(op *Op) {
|
return func(op *Op) {
|
||||||
op.progressNotify = true
|
op.progressNotify = true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// WithCreatedNotify makes watch server sends the created event.
|
||||||
|
func WithCreatedNotify() OpOption {
|
||||||
|
return func(op *Op) {
|
||||||
|
op.createdNotify = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithFilterPut discards PUT events from the watcher.
|
||||||
|
func WithFilterPut() OpOption {
|
||||||
|
return func(op *Op) { op.filterPut = true }
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithFilterDelete discards DELETE events from the watcher.
|
||||||
|
func WithFilterDelete() OpOption {
|
||||||
|
return func(op *Op) { op.filterDelete = true }
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithPrevKV gets the previous key-value pair before the event happens. If the previous KV is already compacted,
|
||||||
|
// nothing will be returned.
|
||||||
|
func WithPrevKV() OpOption {
|
||||||
|
return func(op *Op) {
|
||||||
|
op.prevKV = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// LeaseOp represents an Operation that lease can execute.
|
||||||
|
type LeaseOp struct {
|
||||||
|
id LeaseID
|
||||||
|
|
||||||
|
// for TimeToLive
|
||||||
|
attachedKeys bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// LeaseOption configures lease operations.
|
||||||
|
type LeaseOption func(*LeaseOp)
|
||||||
|
|
||||||
|
func (op *LeaseOp) applyOpts(opts []LeaseOption) {
|
||||||
|
for _, opt := range opts {
|
||||||
|
opt(op)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithAttachedKeys requests lease timetolive API to return
|
||||||
|
// attached keys of given lease ID.
|
||||||
|
func WithAttachedKeys() LeaseOption {
|
||||||
|
return func(op *LeaseOp) { op.attachedKeys = true }
|
||||||
|
}
|
||||||
|
|
||||||
|
func toLeaseTimeToLiveRequest(id LeaseID, opts ...LeaseOption) *pb.LeaseTimeToLiveRequest {
|
||||||
|
ret := &LeaseOp{id: id}
|
||||||
|
ret.applyOpts(opts)
|
||||||
|
return &pb.LeaseTimeToLiveRequest{ID: int64(id), Keys: ret.attachedKeys}
|
||||||
|
}
|
||||||
|
38
clientv3/op_test.go
Normal file
38
clientv3/op_test.go
Normal file
@ -0,0 +1,38 @@
|
|||||||
|
// Copyright 2016 The etcd Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package clientv3
|
||||||
|
|
||||||
|
import (
|
||||||
|
"reflect"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||||
|
)
|
||||||
|
|
||||||
|
// TestOpWithSort tests if WithSort(ASCEND, KEY) and WithLimit are specified,
|
||||||
|
// RangeRequest ignores the SortOption to avoid unnecessarily fetching
|
||||||
|
// the entire key-space.
|
||||||
|
func TestOpWithSort(t *testing.T) {
|
||||||
|
opReq := OpGet("foo", WithSort(SortByKey, SortAscend), WithLimit(10)).toRequestOp().Request
|
||||||
|
q, ok := opReq.(*pb.RequestOp_RequestRange)
|
||||||
|
if !ok {
|
||||||
|
t.Fatalf("expected range request, got %v", reflect.TypeOf(opReq))
|
||||||
|
}
|
||||||
|
req := q.RequestRange
|
||||||
|
wreq := &pb.RangeRequest{Key: []byte("foo"), SortOrder: pb.RangeRequest_NONE, Limit: 10}
|
||||||
|
if !reflect.DeepEqual(req, wreq) {
|
||||||
|
t.Fatalf("expected %+v, got %+v", wreq, req)
|
||||||
|
}
|
||||||
|
}
|
253
clientv3/retry.go
Normal file
253
clientv3/retry.go
Normal file
@ -0,0 +1,253 @@
|
|||||||
|
// Copyright 2016 The etcd Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package clientv3
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
|
||||||
|
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||||
|
"golang.org/x/net/context"
|
||||||
|
"google.golang.org/grpc"
|
||||||
|
"google.golang.org/grpc/codes"
|
||||||
|
)
|
||||||
|
|
||||||
|
type rpcFunc func(ctx context.Context) error
|
||||||
|
type retryRpcFunc func(context.Context, rpcFunc)
|
||||||
|
|
||||||
|
func (c *Client) newRetryWrapper() retryRpcFunc {
|
||||||
|
return func(rpcCtx context.Context, f rpcFunc) {
|
||||||
|
for {
|
||||||
|
err := f(rpcCtx)
|
||||||
|
if err == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// only retry if unavailable
|
||||||
|
if grpc.Code(err) != codes.Unavailable {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// always stop retry on etcd errors
|
||||||
|
eErr := rpctypes.Error(err)
|
||||||
|
if _, ok := eErr.(rpctypes.EtcdError); ok {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
select {
|
||||||
|
case <-c.balancer.ConnectNotify():
|
||||||
|
case <-rpcCtx.Done():
|
||||||
|
case <-c.ctx.Done():
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type retryKVClient struct {
|
||||||
|
pb.KVClient
|
||||||
|
retryf retryRpcFunc
|
||||||
|
}
|
||||||
|
|
||||||
|
// RetryKVClient implements a KVClient that uses the client's FailFast retry policy.
|
||||||
|
func RetryKVClient(c *Client) pb.KVClient {
|
||||||
|
return &retryKVClient{pb.NewKVClient(c.conn), c.retryWrapper}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rkv *retryKVClient) Put(ctx context.Context, in *pb.PutRequest, opts ...grpc.CallOption) (resp *pb.PutResponse, err error) {
|
||||||
|
rkv.retryf(ctx, func(rctx context.Context) error {
|
||||||
|
resp, err = rkv.KVClient.Put(rctx, in, opts...)
|
||||||
|
return err
|
||||||
|
})
|
||||||
|
return resp, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rkv *retryKVClient) DeleteRange(ctx context.Context, in *pb.DeleteRangeRequest, opts ...grpc.CallOption) (resp *pb.DeleteRangeResponse, err error) {
|
||||||
|
rkv.retryf(ctx, func(rctx context.Context) error {
|
||||||
|
resp, err = rkv.KVClient.DeleteRange(rctx, in, opts...)
|
||||||
|
return err
|
||||||
|
})
|
||||||
|
return resp, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rkv *retryKVClient) Txn(ctx context.Context, in *pb.TxnRequest, opts ...grpc.CallOption) (resp *pb.TxnResponse, err error) {
|
||||||
|
rkv.retryf(ctx, func(rctx context.Context) error {
|
||||||
|
resp, err = rkv.KVClient.Txn(rctx, in, opts...)
|
||||||
|
return err
|
||||||
|
})
|
||||||
|
return resp, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rkv *retryKVClient) Compact(ctx context.Context, in *pb.CompactionRequest, opts ...grpc.CallOption) (resp *pb.CompactionResponse, err error) {
|
||||||
|
rkv.retryf(ctx, func(rctx context.Context) error {
|
||||||
|
resp, err = rkv.KVClient.Compact(rctx, in, opts...)
|
||||||
|
return err
|
||||||
|
})
|
||||||
|
return resp, err
|
||||||
|
}
|
||||||
|
|
||||||
|
type retryLeaseClient struct {
|
||||||
|
pb.LeaseClient
|
||||||
|
retryf retryRpcFunc
|
||||||
|
}
|
||||||
|
|
||||||
|
// RetryLeaseClient implements a LeaseClient that uses the client's FailFast retry policy.
|
||||||
|
func RetryLeaseClient(c *Client) pb.LeaseClient {
|
||||||
|
return &retryLeaseClient{pb.NewLeaseClient(c.conn), c.retryWrapper}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rlc *retryLeaseClient) LeaseGrant(ctx context.Context, in *pb.LeaseGrantRequest, opts ...grpc.CallOption) (resp *pb.LeaseGrantResponse, err error) {
|
||||||
|
rlc.retryf(ctx, func(rctx context.Context) error {
|
||||||
|
resp, err = rlc.LeaseClient.LeaseGrant(rctx, in, opts...)
|
||||||
|
return err
|
||||||
|
})
|
||||||
|
return resp, err
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rlc *retryLeaseClient) LeaseRevoke(ctx context.Context, in *pb.LeaseRevokeRequest, opts ...grpc.CallOption) (resp *pb.LeaseRevokeResponse, err error) {
|
||||||
|
rlc.retryf(ctx, func(rctx context.Context) error {
|
||||||
|
resp, err = rlc.LeaseClient.LeaseRevoke(rctx, in, opts...)
|
||||||
|
return err
|
||||||
|
})
|
||||||
|
return resp, err
|
||||||
|
}
|
||||||
|
|
||||||
|
type retryClusterClient struct {
|
||||||
|
pb.ClusterClient
|
||||||
|
retryf retryRpcFunc
|
||||||
|
}
|
||||||
|
|
||||||
|
// RetryClusterClient implements a ClusterClient that uses the client's FailFast retry policy.
|
||||||
|
func RetryClusterClient(c *Client) pb.ClusterClient {
|
||||||
|
return &retryClusterClient{pb.NewClusterClient(c.conn), c.retryWrapper}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rcc *retryClusterClient) MemberAdd(ctx context.Context, in *pb.MemberAddRequest, opts ...grpc.CallOption) (resp *pb.MemberAddResponse, err error) {
|
||||||
|
rcc.retryf(ctx, func(rctx context.Context) error {
|
||||||
|
resp, err = rcc.ClusterClient.MemberAdd(rctx, in, opts...)
|
||||||
|
return err
|
||||||
|
})
|
||||||
|
return resp, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rcc *retryClusterClient) MemberRemove(ctx context.Context, in *pb.MemberRemoveRequest, opts ...grpc.CallOption) (resp *pb.MemberRemoveResponse, err error) {
|
||||||
|
rcc.retryf(ctx, func(rctx context.Context) error {
|
||||||
|
resp, err = rcc.ClusterClient.MemberRemove(rctx, in, opts...)
|
||||||
|
return err
|
||||||
|
})
|
||||||
|
return resp, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rcc *retryClusterClient) MemberUpdate(ctx context.Context, in *pb.MemberUpdateRequest, opts ...grpc.CallOption) (resp *pb.MemberUpdateResponse, err error) {
|
||||||
|
rcc.retryf(ctx, func(rctx context.Context) error {
|
||||||
|
resp, err = rcc.ClusterClient.MemberUpdate(rctx, in, opts...)
|
||||||
|
return err
|
||||||
|
})
|
||||||
|
return resp, err
|
||||||
|
}
|
||||||
|
|
||||||
|
type retryAuthClient struct {
|
||||||
|
pb.AuthClient
|
||||||
|
retryf retryRpcFunc
|
||||||
|
}
|
||||||
|
|
||||||
|
// RetryAuthClient implements a AuthClient that uses the client's FailFast retry policy.
|
||||||
|
func RetryAuthClient(c *Client) pb.AuthClient {
|
||||||
|
return &retryAuthClient{pb.NewAuthClient(c.conn), c.retryWrapper}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rac *retryAuthClient) AuthEnable(ctx context.Context, in *pb.AuthEnableRequest, opts ...grpc.CallOption) (resp *pb.AuthEnableResponse, err error) {
|
||||||
|
rac.retryf(ctx, func(rctx context.Context) error {
|
||||||
|
resp, err = rac.AuthClient.AuthEnable(rctx, in, opts...)
|
||||||
|
return err
|
||||||
|
})
|
||||||
|
return resp, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rac *retryAuthClient) AuthDisable(ctx context.Context, in *pb.AuthDisableRequest, opts ...grpc.CallOption) (resp *pb.AuthDisableResponse, err error) {
|
||||||
|
rac.retryf(ctx, func(rctx context.Context) error {
|
||||||
|
resp, err = rac.AuthClient.AuthDisable(rctx, in, opts...)
|
||||||
|
return err
|
||||||
|
})
|
||||||
|
return resp, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rac *retryAuthClient) UserAdd(ctx context.Context, in *pb.AuthUserAddRequest, opts ...grpc.CallOption) (resp *pb.AuthUserAddResponse, err error) {
|
||||||
|
rac.retryf(ctx, func(rctx context.Context) error {
|
||||||
|
resp, err = rac.AuthClient.UserAdd(rctx, in, opts...)
|
||||||
|
return err
|
||||||
|
})
|
||||||
|
return resp, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rac *retryAuthClient) UserDelete(ctx context.Context, in *pb.AuthUserDeleteRequest, opts ...grpc.CallOption) (resp *pb.AuthUserDeleteResponse, err error) {
|
||||||
|
rac.retryf(ctx, func(rctx context.Context) error {
|
||||||
|
resp, err = rac.AuthClient.UserDelete(rctx, in, opts...)
|
||||||
|
return err
|
||||||
|
})
|
||||||
|
return resp, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rac *retryAuthClient) UserChangePassword(ctx context.Context, in *pb.AuthUserChangePasswordRequest, opts ...grpc.CallOption) (resp *pb.AuthUserChangePasswordResponse, err error) {
|
||||||
|
rac.retryf(ctx, func(rctx context.Context) error {
|
||||||
|
resp, err = rac.AuthClient.UserChangePassword(rctx, in, opts...)
|
||||||
|
return err
|
||||||
|
})
|
||||||
|
return resp, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rac *retryAuthClient) UserGrantRole(ctx context.Context, in *pb.AuthUserGrantRoleRequest, opts ...grpc.CallOption) (resp *pb.AuthUserGrantRoleResponse, err error) {
|
||||||
|
rac.retryf(ctx, func(rctx context.Context) error {
|
||||||
|
resp, err = rac.AuthClient.UserGrantRole(rctx, in, opts...)
|
||||||
|
return err
|
||||||
|
})
|
||||||
|
return resp, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rac *retryAuthClient) UserRevokeRole(ctx context.Context, in *pb.AuthUserRevokeRoleRequest, opts ...grpc.CallOption) (resp *pb.AuthUserRevokeRoleResponse, err error) {
|
||||||
|
rac.retryf(ctx, func(rctx context.Context) error {
|
||||||
|
resp, err = rac.AuthClient.UserRevokeRole(rctx, in, opts...)
|
||||||
|
return err
|
||||||
|
})
|
||||||
|
return resp, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rac *retryAuthClient) RoleAdd(ctx context.Context, in *pb.AuthRoleAddRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleAddResponse, err error) {
|
||||||
|
rac.retryf(ctx, func(rctx context.Context) error {
|
||||||
|
resp, err = rac.AuthClient.RoleAdd(rctx, in, opts...)
|
||||||
|
return err
|
||||||
|
})
|
||||||
|
return resp, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rac *retryAuthClient) RoleDelete(ctx context.Context, in *pb.AuthRoleDeleteRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleDeleteResponse, err error) {
|
||||||
|
rac.retryf(ctx, func(rctx context.Context) error {
|
||||||
|
resp, err = rac.AuthClient.RoleDelete(rctx, in, opts...)
|
||||||
|
return err
|
||||||
|
})
|
||||||
|
return resp, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rac *retryAuthClient) RoleGrantPermission(ctx context.Context, in *pb.AuthRoleGrantPermissionRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleGrantPermissionResponse, err error) {
|
||||||
|
rac.retryf(ctx, func(rctx context.Context) error {
|
||||||
|
resp, err = rac.AuthClient.RoleGrantPermission(rctx, in, opts...)
|
||||||
|
return err
|
||||||
|
})
|
||||||
|
return resp, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rac *retryAuthClient) RoleRevokePermission(ctx context.Context, in *pb.AuthRoleRevokePermissionRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleRevokePermissionResponse, err error) {
|
||||||
|
rac.retryf(ctx, func(rctx context.Context) error {
|
||||||
|
resp, err = rac.AuthClient.RoleRevokePermission(rctx, in, opts...)
|
||||||
|
return err
|
||||||
|
})
|
||||||
|
return resp, err
|
||||||
|
}
|
@ -19,6 +19,7 @@ import (
|
|||||||
|
|
||||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
|
"google.golang.org/grpc"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Txn is the interface that wraps mini-transactions.
|
// Txn is the interface that wraps mini-transactions.
|
||||||
@ -152,7 +153,12 @@ func (txn *txn) Commit() (*TxnResponse, error) {
|
|||||||
|
|
||||||
func (txn *txn) commit() (*TxnResponse, error) {
|
func (txn *txn) commit() (*TxnResponse, error) {
|
||||||
r := &pb.TxnRequest{Compare: txn.cmps, Success: txn.sus, Failure: txn.fas}
|
r := &pb.TxnRequest{Compare: txn.cmps, Success: txn.sus, Failure: txn.fas}
|
||||||
resp, err := txn.kv.remote.Txn(txn.ctx, r)
|
|
||||||
|
var opts []grpc.CallOption
|
||||||
|
if !txn.isWrite {
|
||||||
|
opts = []grpc.CallOption{grpc.FailFast(false)}
|
||||||
|
}
|
||||||
|
resp, err := txn.kv.remote.Txn(txn.ctx, r, opts...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -17,9 +17,13 @@ package clientv3
|
|||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/coreos/etcd/pkg/testutil"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestTxnPanics(t *testing.T) {
|
func TestTxnPanics(t *testing.T) {
|
||||||
|
defer testutil.AfterTest(t)
|
||||||
|
|
||||||
kv := &kv{}
|
kv := &kv{}
|
||||||
|
|
||||||
errc := make(chan string)
|
errc := make(chan string)
|
||||||
|
@ -23,6 +23,7 @@ import (
|
|||||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||||
mvccpb "github.com/coreos/etcd/mvcc/mvccpb"
|
mvccpb "github.com/coreos/etcd/mvcc/mvccpb"
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
|
"google.golang.org/grpc"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@ -60,6 +61,9 @@ type WatchResponse struct {
|
|||||||
// the channel sends a final response that has Canceled set to true with a non-nil Err().
|
// the channel sends a final response that has Canceled set to true with a non-nil Err().
|
||||||
Canceled bool
|
Canceled bool
|
||||||
|
|
||||||
|
// Created is used to indicate the creation of the watcher.
|
||||||
|
Created bool
|
||||||
|
|
||||||
closeErr error
|
closeErr error
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -88,7 +92,7 @@ func (wr *WatchResponse) Err() error {
|
|||||||
|
|
||||||
// IsProgressNotify returns true if the WatchResponse is progress notification.
|
// IsProgressNotify returns true if the WatchResponse is progress notification.
|
||||||
func (wr *WatchResponse) IsProgressNotify() bool {
|
func (wr *WatchResponse) IsProgressNotify() bool {
|
||||||
return len(wr.Events) == 0 && !wr.Canceled
|
return len(wr.Events) == 0 && !wr.Canceled && !wr.Created
|
||||||
}
|
}
|
||||||
|
|
||||||
// watcher implements the Watcher interface
|
// watcher implements the Watcher interface
|
||||||
@ -97,6 +101,7 @@ type watcher struct {
|
|||||||
|
|
||||||
// mu protects the grpc streams map
|
// mu protects the grpc streams map
|
||||||
mu sync.RWMutex
|
mu sync.RWMutex
|
||||||
|
|
||||||
// streams holds all the active grpc streams keyed by ctx value.
|
// streams holds all the active grpc streams keyed by ctx value.
|
||||||
streams map[string]*watchGrpcStream
|
streams map[string]*watchGrpcStream
|
||||||
}
|
}
|
||||||
@ -137,8 +142,14 @@ type watchRequest struct {
|
|||||||
key string
|
key string
|
||||||
end string
|
end string
|
||||||
rev int64
|
rev int64
|
||||||
// progressNotify is for progress updates.
|
// send created notification event if this field is true
|
||||||
|
createdNotify bool
|
||||||
|
// progressNotify is for progress updates
|
||||||
progressNotify bool
|
progressNotify bool
|
||||||
|
// filters is the list of events to filter out
|
||||||
|
filters []pb.WatchCreateRequest_FilterType
|
||||||
|
// get the previous key-value pair before the event happens
|
||||||
|
prevKV bool
|
||||||
// retc receives a chan WatchResponse once the watcher is established
|
// retc receives a chan WatchResponse once the watcher is established
|
||||||
retc chan chan WatchResponse
|
retc chan chan WatchResponse
|
||||||
}
|
}
|
||||||
@ -161,8 +172,12 @@ type watcherStream struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func NewWatcher(c *Client) Watcher {
|
func NewWatcher(c *Client) Watcher {
|
||||||
|
return NewWatchFromWatchClient(pb.NewWatchClient(c.conn))
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewWatchFromWatchClient(wc pb.WatchClient) Watcher {
|
||||||
return &watcher{
|
return &watcher{
|
||||||
remote: pb.NewWatchClient(c.conn),
|
remote: wc,
|
||||||
streams: make(map[string]*watchGrpcStream),
|
streams: make(map[string]*watchGrpcStream),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -203,12 +218,24 @@ func (w *watcher) Watch(ctx context.Context, key string, opts ...OpOption) Watch
|
|||||||
ow := opWatch(key, opts...)
|
ow := opWatch(key, opts...)
|
||||||
|
|
||||||
retc := make(chan chan WatchResponse, 1)
|
retc := make(chan chan WatchResponse, 1)
|
||||||
|
|
||||||
|
var filters []pb.WatchCreateRequest_FilterType
|
||||||
|
if ow.filterPut {
|
||||||
|
filters = append(filters, pb.WatchCreateRequest_NOPUT)
|
||||||
|
}
|
||||||
|
if ow.filterDelete {
|
||||||
|
filters = append(filters, pb.WatchCreateRequest_NODELETE)
|
||||||
|
}
|
||||||
|
|
||||||
wr := &watchRequest{
|
wr := &watchRequest{
|
||||||
ctx: ctx,
|
ctx: ctx,
|
||||||
|
createdNotify: ow.createdNotify,
|
||||||
key: string(ow.key),
|
key: string(ow.key),
|
||||||
end: string(ow.end),
|
end: string(ow.end),
|
||||||
rev: ow.rev,
|
rev: ow.rev,
|
||||||
progressNotify: ow.progressNotify,
|
progressNotify: ow.progressNotify,
|
||||||
|
filters: filters,
|
||||||
|
prevKV: ow.prevKV,
|
||||||
retc: retc,
|
retc: retc,
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -241,6 +268,7 @@ func (w *watcher) Watch(ctx context.Context, key string, opts ...OpOption) Watch
|
|||||||
case reqc <- wr:
|
case reqc <- wr:
|
||||||
ok = true
|
ok = true
|
||||||
case <-wr.ctx.Done():
|
case <-wr.ctx.Done():
|
||||||
|
wgs.stopIfEmpty()
|
||||||
case <-donec:
|
case <-donec:
|
||||||
if wgs.closeErr != nil {
|
if wgs.closeErr != nil {
|
||||||
closeCh <- WatchResponse{closeErr: wgs.closeErr}
|
closeCh <- WatchResponse{closeErr: wgs.closeErr}
|
||||||
@ -284,7 +312,12 @@ func (w *watcher) Close() (err error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (w *watchGrpcStream) Close() (err error) {
|
func (w *watchGrpcStream) Close() (err error) {
|
||||||
close(w.stopc)
|
w.mu.Lock()
|
||||||
|
if w.stopc != nil {
|
||||||
|
close(w.stopc)
|
||||||
|
w.stopc = nil
|
||||||
|
}
|
||||||
|
w.mu.Unlock()
|
||||||
<-w.donec
|
<-w.donec
|
||||||
select {
|
select {
|
||||||
case err = <-w.errc:
|
case err = <-w.errc:
|
||||||
@ -347,11 +380,13 @@ func (w *watchGrpcStream) addStream(resp *pb.WatchResponse, pendingReq *watchReq
|
|||||||
|
|
||||||
// closeStream closes the watcher resources and removes it
|
// closeStream closes the watcher resources and removes it
|
||||||
func (w *watchGrpcStream) closeStream(ws *watcherStream) {
|
func (w *watchGrpcStream) closeStream(ws *watcherStream) {
|
||||||
|
w.mu.Lock()
|
||||||
// cancels request stream; subscriber receives nil channel
|
// cancels request stream; subscriber receives nil channel
|
||||||
close(ws.initReq.retc)
|
close(ws.initReq.retc)
|
||||||
// close subscriber's channel
|
// close subscriber's channel
|
||||||
close(ws.outc)
|
close(ws.outc)
|
||||||
delete(w.streams, ws.id)
|
delete(w.streams, ws.id)
|
||||||
|
w.mu.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
// run is the root of the goroutines for managing a watcher client
|
// run is the root of the goroutines for managing a watcher client
|
||||||
@ -370,6 +405,14 @@ func (w *watchGrpcStream) run() {
|
|||||||
w.cancel()
|
w.cancel()
|
||||||
}()
|
}()
|
||||||
|
|
||||||
|
// already stopped?
|
||||||
|
w.mu.RLock()
|
||||||
|
stopc := w.stopc
|
||||||
|
w.mu.RUnlock()
|
||||||
|
if stopc == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
// start a stream with the etcd grpc server
|
// start a stream with the etcd grpc server
|
||||||
if wc, closeErr = w.newWatchClient(); closeErr != nil {
|
if wc, closeErr = w.newWatchClient(); closeErr != nil {
|
||||||
return
|
return
|
||||||
@ -398,6 +441,7 @@ func (w *watchGrpcStream) run() {
|
|||||||
w.addStream(pbresp, pendingReq)
|
w.addStream(pbresp, pendingReq)
|
||||||
pendingReq = nil
|
pendingReq = nil
|
||||||
curReqC = w.reqc
|
curReqC = w.reqc
|
||||||
|
w.dispatchEvent(pbresp)
|
||||||
case pbresp.Canceled:
|
case pbresp.Canceled:
|
||||||
delete(cancelSet, pbresp.WatchId)
|
delete(cancelSet, pbresp.WatchId)
|
||||||
// shutdown serveStream, if any
|
// shutdown serveStream, if any
|
||||||
@ -433,7 +477,7 @@ func (w *watchGrpcStream) run() {
|
|||||||
// watch client failed to recv; spawn another if possible
|
// watch client failed to recv; spawn another if possible
|
||||||
// TODO report watch client errors from errc?
|
// TODO report watch client errors from errc?
|
||||||
case err := <-w.errc:
|
case err := <-w.errc:
|
||||||
if toErr(w.ctx, err) == v3rpc.ErrNoLeader {
|
if isHaltErr(w.ctx, err) || toErr(w.ctx, err) == v3rpc.ErrNoLeader {
|
||||||
closeErr = err
|
closeErr = err
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -445,7 +489,7 @@ func (w *watchGrpcStream) run() {
|
|||||||
failedReq = pendingReq
|
failedReq = pendingReq
|
||||||
}
|
}
|
||||||
cancelSet = make(map[int64]struct{})
|
cancelSet = make(map[int64]struct{})
|
||||||
case <-w.stopc:
|
case <-stopc:
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -469,19 +513,23 @@ func (w *watchGrpcStream) dispatchEvent(pbresp *pb.WatchResponse) bool {
|
|||||||
w.mu.RLock()
|
w.mu.RLock()
|
||||||
defer w.mu.RUnlock()
|
defer w.mu.RUnlock()
|
||||||
ws, ok := w.streams[pbresp.WatchId]
|
ws, ok := w.streams[pbresp.WatchId]
|
||||||
|
if !ok {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
events := make([]*Event, len(pbresp.Events))
|
events := make([]*Event, len(pbresp.Events))
|
||||||
for i, ev := range pbresp.Events {
|
for i, ev := range pbresp.Events {
|
||||||
events[i] = (*Event)(ev)
|
events[i] = (*Event)(ev)
|
||||||
}
|
}
|
||||||
if ok {
|
wr := &WatchResponse{
|
||||||
wr := &WatchResponse{
|
Header: *pbresp.Header,
|
||||||
Header: *pbresp.Header,
|
Events: events,
|
||||||
Events: events,
|
CompactRevision: pbresp.CompactRevision,
|
||||||
CompactRevision: pbresp.CompactRevision,
|
Created: pbresp.Created,
|
||||||
Canceled: pbresp.Canceled}
|
Canceled: pbresp.Canceled,
|
||||||
ws.recvc <- wr
|
|
||||||
}
|
}
|
||||||
return ok
|
ws.recvc <- wr
|
||||||
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
// serveWatchClient forwards messages from the grpc stream to run()
|
// serveWatchClient forwards messages from the grpc stream to run()
|
||||||
@ -505,6 +553,7 @@ func (w *watchGrpcStream) serveWatchClient(wc pb.Watch_WatchClient) {
|
|||||||
|
|
||||||
// serveStream forwards watch responses from run() to the subscriber
|
// serveStream forwards watch responses from run() to the subscriber
|
||||||
func (w *watchGrpcStream) serveStream(ws *watcherStream) {
|
func (w *watchGrpcStream) serveStream(ws *watcherStream) {
|
||||||
|
var closeErr error
|
||||||
emptyWr := &WatchResponse{}
|
emptyWr := &WatchResponse{}
|
||||||
wrs := []*WatchResponse{}
|
wrs := []*WatchResponse{}
|
||||||
resuming := false
|
resuming := false
|
||||||
@ -512,6 +561,14 @@ func (w *watchGrpcStream) serveStream(ws *watcherStream) {
|
|||||||
for !closing {
|
for !closing {
|
||||||
curWr := emptyWr
|
curWr := emptyWr
|
||||||
outc := ws.outc
|
outc := ws.outc
|
||||||
|
|
||||||
|
// ignore created event if create notify is not requested or
|
||||||
|
// we already sent the initial created event (when we are on the resume path).
|
||||||
|
if len(wrs) > 0 && wrs[0].Created &&
|
||||||
|
(!ws.initReq.createdNotify || ws.lastRev != 0) {
|
||||||
|
wrs = wrs[1:]
|
||||||
|
}
|
||||||
|
|
||||||
if len(wrs) > 0 {
|
if len(wrs) > 0 {
|
||||||
curWr = wrs[0]
|
curWr = wrs[0]
|
||||||
} else {
|
} else {
|
||||||
@ -569,13 +626,14 @@ func (w *watchGrpcStream) serveStream(ws *watcherStream) {
|
|||||||
}
|
}
|
||||||
case <-w.donec:
|
case <-w.donec:
|
||||||
closing = true
|
closing = true
|
||||||
|
closeErr = w.closeErr
|
||||||
case <-ws.initReq.ctx.Done():
|
case <-ws.initReq.ctx.Done():
|
||||||
closing = true
|
closing = true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// try to send off close error
|
// try to send off close error
|
||||||
if w.closeErr != nil {
|
if closeErr != nil {
|
||||||
select {
|
select {
|
||||||
case ws.outc <- WatchResponse{closeErr: w.closeErr}:
|
case ws.outc <- WatchResponse{closeErr: w.closeErr}:
|
||||||
case <-w.donec:
|
case <-w.donec:
|
||||||
@ -583,12 +641,20 @@ func (w *watchGrpcStream) serveStream(ws *watcherStream) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
w.mu.Lock()
|
|
||||||
w.closeStream(ws)
|
w.closeStream(ws)
|
||||||
w.mu.Unlock()
|
w.stopIfEmpty()
|
||||||
// lazily send cancel message if events on missing id
|
// lazily send cancel message if events on missing id
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (wgs *watchGrpcStream) stopIfEmpty() {
|
||||||
|
wgs.mu.Lock()
|
||||||
|
if len(wgs.streams) == 0 && wgs.stopc != nil {
|
||||||
|
close(wgs.stopc)
|
||||||
|
wgs.stopc = nil
|
||||||
|
}
|
||||||
|
wgs.mu.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
func (w *watchGrpcStream) newWatchClient() (pb.Watch_WatchClient, error) {
|
func (w *watchGrpcStream) newWatchClient() (pb.Watch_WatchClient, error) {
|
||||||
ws, rerr := w.resume()
|
ws, rerr := w.resume()
|
||||||
if rerr != nil {
|
if rerr != nil {
|
||||||
@ -613,15 +679,16 @@ func (w *watchGrpcStream) resume() (ws pb.Watch_WatchClient, err error) {
|
|||||||
// openWatchClient retries opening a watchclient until retryConnection fails
|
// openWatchClient retries opening a watchclient until retryConnection fails
|
||||||
func (w *watchGrpcStream) openWatchClient() (ws pb.Watch_WatchClient, err error) {
|
func (w *watchGrpcStream) openWatchClient() (ws pb.Watch_WatchClient, err error) {
|
||||||
for {
|
for {
|
||||||
select {
|
w.mu.Lock()
|
||||||
case <-w.stopc:
|
stopc := w.stopc
|
||||||
|
w.mu.Unlock()
|
||||||
|
if stopc == nil {
|
||||||
if err == nil {
|
if err == nil {
|
||||||
err = context.Canceled
|
err = context.Canceled
|
||||||
}
|
}
|
||||||
return nil, err
|
return nil, err
|
||||||
default:
|
|
||||||
}
|
}
|
||||||
if ws, err = w.remote.Watch(w.ctx); ws != nil && err == nil {
|
if ws, err = w.remote.Watch(w.ctx, grpc.FailFast(false)); ws != nil && err == nil {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
if isHaltErr(w.ctx, err) {
|
if isHaltErr(w.ctx, err) {
|
||||||
@ -641,6 +708,10 @@ func (w *watchGrpcStream) resumeWatchers(wc pb.Watch_WatchClient) error {
|
|||||||
w.mu.RUnlock()
|
w.mu.RUnlock()
|
||||||
|
|
||||||
for _, ws := range streams {
|
for _, ws := range streams {
|
||||||
|
// drain recvc so no old WatchResponses (e.g., Created messages)
|
||||||
|
// are processed while resuming
|
||||||
|
ws.drain()
|
||||||
|
|
||||||
// pause serveStream
|
// pause serveStream
|
||||||
ws.resumec <- -1
|
ws.resumec <- -1
|
||||||
|
|
||||||
@ -673,6 +744,17 @@ func (w *watchGrpcStream) resumeWatchers(wc pb.Watch_WatchClient) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// drain removes all buffered WatchResponses from the stream's receive channel.
|
||||||
|
func (ws *watcherStream) drain() {
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-ws.recvc:
|
||||||
|
default:
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// toPB converts an internal watch request structure to its protobuf messagefunc (wr *watchRequest)
|
// toPB converts an internal watch request structure to its protobuf messagefunc (wr *watchRequest)
|
||||||
func (wr *watchRequest) toPB() *pb.WatchRequest {
|
func (wr *watchRequest) toPB() *pb.WatchRequest {
|
||||||
req := &pb.WatchCreateRequest{
|
req := &pb.WatchCreateRequest{
|
||||||
@ -680,6 +762,8 @@ func (wr *watchRequest) toPB() *pb.WatchRequest {
|
|||||||
Key: []byte(wr.key),
|
Key: []byte(wr.key),
|
||||||
RangeEnd: []byte(wr.end),
|
RangeEnd: []byte(wr.end),
|
||||||
ProgressNotify: wr.progressNotify,
|
ProgressNotify: wr.progressNotify,
|
||||||
|
Filters: wr.filters,
|
||||||
|
PrevKv: wr.prevKV,
|
||||||
}
|
}
|
||||||
cr := &pb.WatchRequest_CreateRequest{CreateRequest: req}
|
cr := &pb.WatchRequest_CreateRequest{CreateRequest: req}
|
||||||
return &pb.WatchRequest{RequestUnion: cr}
|
return &pb.WatchRequest{RequestUnion: cr}
|
||||||
|
281
cmd/Godeps/Godeps.json
generated
281
cmd/Godeps/Godeps.json
generated
@ -1,281 +0,0 @@
|
|||||||
{
|
|
||||||
"ImportPath": "github.com/coreos/etcd",
|
|
||||||
"GoVersion": "go1.6",
|
|
||||||
"GodepVersion": "v74",
|
|
||||||
"Packages": [
|
|
||||||
"./..."
|
|
||||||
],
|
|
||||||
"Deps": [
|
|
||||||
{
|
|
||||||
"ImportPath": "bitbucket.org/ww/goautoneg",
|
|
||||||
"Comment": "null-5",
|
|
||||||
"Rev": "'75cd24fc2f2c2a2088577d12123ddee5f54e0675'"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/akrennmair/gopcap",
|
|
||||||
"Rev": "00e11033259acb75598ba416495bb708d864a010"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/beorn7/perks/quantile",
|
|
||||||
"Rev": "b965b613227fddccbfffe13eae360ed3fa822f8d"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/bgentry/speakeasy",
|
|
||||||
"Rev": "36e9cfdd690967f4f690c6edcc9ffacd006014a0"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/boltdb/bolt",
|
|
||||||
"Comment": "v1.2.1",
|
|
||||||
"Rev": "dfb21201d9270c1082d5fb0f07f500311ff72f18"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/cockroachdb/cmux",
|
|
||||||
"Rev": "112f0506e7743d64a6eb8fedbcff13d9979bbf92"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/coreos/go-semver/semver",
|
|
||||||
"Rev": "568e959cd89871e61434c1143528d9162da89ef2"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/coreos/go-systemd/daemon",
|
|
||||||
"Comment": "v3-6-gcea488b",
|
|
||||||
"Rev": "cea488b4e6855fee89b6c22a811e3c5baca861b6"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/coreos/go-systemd/journal",
|
|
||||||
"Comment": "v3-6-gcea488b",
|
|
||||||
"Rev": "cea488b4e6855fee89b6c22a811e3c5baca861b6"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/coreos/go-systemd/util",
|
|
||||||
"Comment": "v3-6-gcea488b",
|
|
||||||
"Rev": "cea488b4e6855fee89b6c22a811e3c5baca861b6"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/coreos/pkg/capnslog",
|
|
||||||
"Comment": "v2-8-gfa29b1d",
|
|
||||||
"Rev": "fa29b1d70f0beaddd4c7021607cc3c3be8ce94b8"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/cpuguy83/go-md2man/md2man",
|
|
||||||
"Comment": "v1.0.4",
|
|
||||||
"Rev": "71acacd42f85e5e82f70a55327789582a5200a90"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/dustin/go-humanize",
|
|
||||||
"Rev": "8929fe90cee4b2cb9deb468b51fb34eba64d1bf0"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/gengo/grpc-gateway/runtime",
|
|
||||||
"Rev": "dcb844349dc5d2cb0300fdc4d2d374839d0d2e13"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/gengo/grpc-gateway/runtime/internal",
|
|
||||||
"Rev": "dcb844349dc5d2cb0300fdc4d2d374839d0d2e13"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/gengo/grpc-gateway/utilities",
|
|
||||||
"Rev": "dcb844349dc5d2cb0300fdc4d2d374839d0d2e13"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/ghodss/yaml",
|
|
||||||
"Rev": "73d445a93680fa1a78ae23a5839bad48f32ba1ee"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/gogo/protobuf/proto",
|
|
||||||
"Comment": "v0.2-13-gc3995ae",
|
|
||||||
"Rev": "c3995ae437bb78d1189f4f147dfe5f87ad3596e4"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/golang/glog",
|
|
||||||
"Rev": "44145f04b68cf362d9c4df2182967c2275eaefed"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/golang/groupcache/lru",
|
|
||||||
"Rev": "02826c3e79038b59d737d3b1c0a1d937f71a4433"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/golang/protobuf/jsonpb",
|
|
||||||
"Rev": "8616e8ee5e20a1704615e6c8d7afcdac06087a67"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/golang/protobuf/proto",
|
|
||||||
"Rev": "8616e8ee5e20a1704615e6c8d7afcdac06087a67"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/google/btree",
|
|
||||||
"Rev": "7d79101e329e5a3adf994758c578dab82b90c017"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/inconshreveable/mousetrap",
|
|
||||||
"Rev": "76626ae9c91c4f2a10f34cad8ce83ea42c93bb75"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/jonboulle/clockwork",
|
|
||||||
"Rev": "72f9bd7c4e0c2a40055ab3d0f09654f730cce982"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/kballard/go-shellquote",
|
|
||||||
"Rev": "d8ec1a69a250a17bb0e419c386eac1f3711dc142"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/kr/pty",
|
|
||||||
"Comment": "release.r56-29-gf7ee69f",
|
|
||||||
"Rev": "f7ee69f31298ecbe5d2b349c711e2547a617d398"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/mattn/go-runewidth",
|
|
||||||
"Comment": "v0.0.1",
|
|
||||||
"Rev": "d6bea18f789704b5f83375793155289da36a3c7f"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/matttproud/golang_protobuf_extensions/pbutil",
|
|
||||||
"Rev": "fc2b8d3a73c4867e51861bbdd5ae3c1f0869dd6a"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/olekukonko/tablewriter",
|
|
||||||
"Rev": "cca8bbc0798408af109aaaa239cbd2634846b340"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/prometheus/client_golang/prometheus",
|
|
||||||
"Comment": "0.7.0-52-ge51041b",
|
|
||||||
"Rev": "e51041b3fa41cece0dca035740ba6411905be473"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/prometheus/client_model/go",
|
|
||||||
"Comment": "model-0.0.2-12-gfa8ad6f",
|
|
||||||
"Rev": "fa8ad6fec33561be4280a8f0514318c79d7f6cb6"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/prometheus/common/expfmt",
|
|
||||||
"Rev": "ffe929a3f4c4faeaa10f2b9535c2b1be3ad15650"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/prometheus/common/model",
|
|
||||||
"Rev": "ffe929a3f4c4faeaa10f2b9535c2b1be3ad15650"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/prometheus/procfs",
|
|
||||||
"Rev": "454a56f35412459b5e684fd5ec0f9211b94f002a"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/russross/blackfriday",
|
|
||||||
"Comment": "v1.4-2-g300106c",
|
|
||||||
"Rev": "300106c228d52c8941d4b3de6054a6062a86dda3"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/shurcooL/sanitized_anchor_name",
|
|
||||||
"Rev": "10ef21a441db47d8b13ebcc5fd2310f636973c77"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/spacejam/loghisto",
|
|
||||||
"Rev": "323309774dec8b7430187e46cd0793974ccca04a"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/spf13/cobra",
|
|
||||||
"Rev": "1c44ec8d3f1552cac48999f9306da23c4d8a288b"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/spf13/pflag",
|
|
||||||
"Rev": "08b1a584251b5b62f458943640fc8ebd4d50aaa5"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/stretchr/testify/assert",
|
|
||||||
"Rev": "9cc77fa25329013ce07362c7742952ff887361f2"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/ugorji/go/codec",
|
|
||||||
"Rev": "f1f1a805ed361a0e078bb537e4ea78cd37dcf065"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/urfave/cli",
|
|
||||||
"Comment": "v1.17.0-79-g6011f16",
|
|
||||||
"Rev": "6011f165dc288c72abd8acd7722f837c5c64198d"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/xiang90/probing",
|
|
||||||
"Rev": "6a0cc1ae81b4cc11db5e491e030e4b98fba79c19"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "golang.org/x/crypto/bcrypt",
|
|
||||||
"Rev": "1351f936d976c60a0a48d728281922cf63eafb8d"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "golang.org/x/crypto/blowfish",
|
|
||||||
"Rev": "1351f936d976c60a0a48d728281922cf63eafb8d"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "golang.org/x/net/context",
|
|
||||||
"Rev": "6acef71eb69611914f7a30939ea9f6e194c78172"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "golang.org/x/net/http2",
|
|
||||||
"Rev": "6acef71eb69611914f7a30939ea9f6e194c78172"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "golang.org/x/net/http2/hpack",
|
|
||||||
"Rev": "6acef71eb69611914f7a30939ea9f6e194c78172"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "golang.org/x/net/internal/timeseries",
|
|
||||||
"Rev": "6acef71eb69611914f7a30939ea9f6e194c78172"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "golang.org/x/net/trace",
|
|
||||||
"Rev": "6acef71eb69611914f7a30939ea9f6e194c78172"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "golang.org/x/sys/unix",
|
|
||||||
"Rev": "9c60d1c508f5134d1ca726b4641db998f2523357"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "golang.org/x/time/rate",
|
|
||||||
"Rev": "a4bde12657593d5e90d0533a3e4fd95e635124cb"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "google.golang.org/grpc",
|
|
||||||
"Rev": "e78224b060cf3215247b7be455f80ea22e469b66"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "google.golang.org/grpc/codes",
|
|
||||||
"Rev": "e78224b060cf3215247b7be455f80ea22e469b66"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "google.golang.org/grpc/credentials",
|
|
||||||
"Rev": "e78224b060cf3215247b7be455f80ea22e469b66"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "google.golang.org/grpc/grpclog",
|
|
||||||
"Rev": "e78224b060cf3215247b7be455f80ea22e469b66"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "google.golang.org/grpc/internal",
|
|
||||||
"Rev": "e78224b060cf3215247b7be455f80ea22e469b66"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "google.golang.org/grpc/metadata",
|
|
||||||
"Rev": "e78224b060cf3215247b7be455f80ea22e469b66"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "google.golang.org/grpc/naming",
|
|
||||||
"Rev": "e78224b060cf3215247b7be455f80ea22e469b66"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "google.golang.org/grpc/peer",
|
|
||||||
"Rev": "e78224b060cf3215247b7be455f80ea22e469b66"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "google.golang.org/grpc/transport",
|
|
||||||
"Rev": "e78224b060cf3215247b7be455f80ea22e469b66"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "gopkg.in/cheggaaa/pb.v1",
|
|
||||||
"Comment": "v1.0.1",
|
|
||||||
"Rev": "29ad9b62f9e0274422d738242b94a5b89440bfa6"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "gopkg.in/yaml.v2",
|
|
||||||
"Rev": "53feefa2559fb8dfa8d81baad31be332c97d6c77"
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
5
cmd/Godeps/Readme
generated
5
cmd/Godeps/Readme
generated
@ -1,5 +0,0 @@
|
|||||||
This directory tree is generated automatically by godep.
|
|
||||||
|
|
||||||
Please do not edit.
|
|
||||||
|
|
||||||
See https://github.com/tools/godep for more information.
|
|
@ -1 +0,0 @@
|
|||||||
../etcdmain
|
|
@ -1 +0,0 @@
|
|||||||
../main.go
|
|
13
cmd/vendor/bitbucket.org/ww/goautoneg/Makefile
generated
vendored
13
cmd/vendor/bitbucket.org/ww/goautoneg/Makefile
generated
vendored
@ -1,13 +0,0 @@
|
|||||||
include $(GOROOT)/src/Make.inc
|
|
||||||
|
|
||||||
TARG=bitbucket.org/ww/goautoneg
|
|
||||||
GOFILES=autoneg.go
|
|
||||||
|
|
||||||
include $(GOROOT)/src/Make.pkg
|
|
||||||
|
|
||||||
format:
|
|
||||||
gofmt -w *.go
|
|
||||||
|
|
||||||
docs:
|
|
||||||
gomake clean
|
|
||||||
godoc ${TARG} > README.txt
|
|
67
cmd/vendor/bitbucket.org/ww/goautoneg/README.txt
generated
vendored
67
cmd/vendor/bitbucket.org/ww/goautoneg/README.txt
generated
vendored
@ -1,67 +0,0 @@
|
|||||||
PACKAGE
|
|
||||||
|
|
||||||
package goautoneg
|
|
||||||
import "bitbucket.org/ww/goautoneg"
|
|
||||||
|
|
||||||
HTTP Content-Type Autonegotiation.
|
|
||||||
|
|
||||||
The functions in this package implement the behaviour specified in
|
|
||||||
http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html
|
|
||||||
|
|
||||||
Copyright (c) 2011, Open Knowledge Foundation Ltd.
|
|
||||||
All rights reserved.
|
|
||||||
|
|
||||||
Redistribution and use in source and binary forms, with or without
|
|
||||||
modification, are permitted provided that the following conditions are
|
|
||||||
met:
|
|
||||||
|
|
||||||
Redistributions of source code must retain the above copyright
|
|
||||||
notice, this list of conditions and the following disclaimer.
|
|
||||||
|
|
||||||
Redistributions in binary form must reproduce the above copyright
|
|
||||||
notice, this list of conditions and the following disclaimer in
|
|
||||||
the documentation and/or other materials provided with the
|
|
||||||
distribution.
|
|
||||||
|
|
||||||
Neither the name of the Open Knowledge Foundation Ltd. nor the
|
|
||||||
names of its contributors may be used to endorse or promote
|
|
||||||
products derived from this software without specific prior written
|
|
||||||
permission.
|
|
||||||
|
|
||||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
||||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
||||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|
||||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|
||||||
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
||||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|
||||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
||||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
||||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
||||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
||||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
||||||
|
|
||||||
|
|
||||||
FUNCTIONS
|
|
||||||
|
|
||||||
func Negotiate(header string, alternatives []string) (content_type string)
|
|
||||||
Negotiate the most appropriate content_type given the accept header
|
|
||||||
and a list of alternatives.
|
|
||||||
|
|
||||||
func ParseAccept(header string) (accept []Accept)
|
|
||||||
Parse an Accept Header string returning a sorted list
|
|
||||||
of clauses
|
|
||||||
|
|
||||||
|
|
||||||
TYPES
|
|
||||||
|
|
||||||
type Accept struct {
|
|
||||||
Type, SubType string
|
|
||||||
Q float32
|
|
||||||
Params map[string]string
|
|
||||||
}
|
|
||||||
Structure to represent a clause in an HTTP Accept Header
|
|
||||||
|
|
||||||
|
|
||||||
SUBDIRECTORIES
|
|
||||||
|
|
||||||
.hg
|
|
5
cmd/vendor/github.com/akrennmair/gopcap/.gitignore
generated
vendored
5
cmd/vendor/github.com/akrennmair/gopcap/.gitignore
generated
vendored
@ -1,5 +0,0 @@
|
|||||||
#*
|
|
||||||
*~
|
|
||||||
/tools/pass/pass
|
|
||||||
/tools/pcaptest/pcaptest
|
|
||||||
/tools/tcpdump/tcpdump
|
|
11
cmd/vendor/github.com/akrennmair/gopcap/README.mkd
generated
vendored
11
cmd/vendor/github.com/akrennmair/gopcap/README.mkd
generated
vendored
@ -1,11 +0,0 @@
|
|||||||
# PCAP
|
|
||||||
|
|
||||||
This is a simple wrapper around libpcap for Go. Originally written by Andreas
|
|
||||||
Krennmair <ak@synflood.at> and only minorly touched up by Mark Smith <mark@qq.is>.
|
|
||||||
|
|
||||||
Please see the included pcaptest.go and tcpdump.go programs for instructions on
|
|
||||||
how to use this library.
|
|
||||||
|
|
||||||
Miek Gieben <miek@miek.nl> has created a more Go-like package and replaced functionality
|
|
||||||
with standard functions from the standard library. The package has also been renamed to
|
|
||||||
pcap.
|
|
2388
cmd/vendor/github.com/beorn7/perks/quantile/exampledata.txt
generated
vendored
2388
cmd/vendor/github.com/beorn7/perks/quantile/exampledata.txt
generated
vendored
File diff suppressed because it is too large
Load Diff
2
cmd/vendor/github.com/bgentry/speakeasy/.gitignore
generated
vendored
2
cmd/vendor/github.com/bgentry/speakeasy/.gitignore
generated
vendored
@ -1,2 +0,0 @@
|
|||||||
example/example
|
|
||||||
example/example.exe
|
|
30
cmd/vendor/github.com/bgentry/speakeasy/Readme.md
generated
vendored
30
cmd/vendor/github.com/bgentry/speakeasy/Readme.md
generated
vendored
@ -1,30 +0,0 @@
|
|||||||
# Speakeasy
|
|
||||||
|
|
||||||
This package provides cross-platform Go (#golang) helpers for taking user input
|
|
||||||
from the terminal while not echoing the input back (similar to `getpasswd`). The
|
|
||||||
package uses syscalls to avoid any dependence on cgo, and is therefore
|
|
||||||
compatible with cross-compiling.
|
|
||||||
|
|
||||||
[][godoc]
|
|
||||||
|
|
||||||
## Unicode
|
|
||||||
|
|
||||||
Multi-byte unicode characters work successfully on Mac OS X. On Windows,
|
|
||||||
however, this may be problematic (as is UTF in general on Windows). Other
|
|
||||||
platforms have not been tested.
|
|
||||||
|
|
||||||
## License
|
|
||||||
|
|
||||||
The code herein was not written by me, but was compiled from two separate open
|
|
||||||
source packages. Unix portions were imported from [gopass][gopass], while
|
|
||||||
Windows portions were imported from the [CloudFoundry Go CLI][cf-cli]'s
|
|
||||||
[Windows terminal helpers][cf-ui-windows].
|
|
||||||
|
|
||||||
The [license for the windows portion](./LICENSE_WINDOWS) has been copied exactly
|
|
||||||
from the source (though I attempted to fill in the correct owner in the
|
|
||||||
boilerplate copyright notice).
|
|
||||||
|
|
||||||
[cf-cli]: https://github.com/cloudfoundry/cli "CloudFoundry Go CLI"
|
|
||||||
[cf-ui-windows]: https://github.com/cloudfoundry/cli/blob/master/src/cf/terminal/ui_windows.go "CloudFoundry Go CLI Windows input helpers"
|
|
||||||
[godoc]: https://godoc.org/github.com/bgentry/speakeasy "speakeasy on Godoc.org"
|
|
||||||
[gopass]: https://code.google.com/p/gopass "gopass"
|
|
4
cmd/vendor/github.com/boltdb/bolt/.gitignore
generated
vendored
4
cmd/vendor/github.com/boltdb/bolt/.gitignore
generated
vendored
@ -1,4 +0,0 @@
|
|||||||
*.prof
|
|
||||||
*.test
|
|
||||||
*.swp
|
|
||||||
/bin/
|
|
18
cmd/vendor/github.com/boltdb/bolt/Makefile
generated
vendored
18
cmd/vendor/github.com/boltdb/bolt/Makefile
generated
vendored
@ -1,18 +0,0 @@
|
|||||||
BRANCH=`git rev-parse --abbrev-ref HEAD`
|
|
||||||
COMMIT=`git rev-parse --short HEAD`
|
|
||||||
GOLDFLAGS="-X main.branch $(BRANCH) -X main.commit $(COMMIT)"
|
|
||||||
|
|
||||||
default: build
|
|
||||||
|
|
||||||
race:
|
|
||||||
@go test -v -race -test.run="TestSimulate_(100op|1000op)"
|
|
||||||
|
|
||||||
# go get github.com/kisielk/errcheck
|
|
||||||
errcheck:
|
|
||||||
@errcheck -ignorepkg=bytes -ignore=os:Remove github.com/boltdb/bolt
|
|
||||||
|
|
||||||
test:
|
|
||||||
@go test -v -cover .
|
|
||||||
@go test -v ./cmd/bolt
|
|
||||||
|
|
||||||
.PHONY: fmt test
|
|
850
cmd/vendor/github.com/boltdb/bolt/README.md
generated
vendored
850
cmd/vendor/github.com/boltdb/bolt/README.md
generated
vendored
@ -1,850 +0,0 @@
|
|||||||
Bolt [](https://coveralls.io/r/boltdb/bolt?branch=master) [](https://godoc.org/github.com/boltdb/bolt) 
|
|
||||||
====
|
|
||||||
|
|
||||||
Bolt is a pure Go key/value store inspired by [Howard Chu's][hyc_symas]
|
|
||||||
[LMDB project][lmdb]. The goal of the project is to provide a simple,
|
|
||||||
fast, and reliable database for projects that don't require a full database
|
|
||||||
server such as Postgres or MySQL.
|
|
||||||
|
|
||||||
Since Bolt is meant to be used as such a low-level piece of functionality,
|
|
||||||
simplicity is key. The API will be small and only focus on getting values
|
|
||||||
and setting values. That's it.
|
|
||||||
|
|
||||||
[hyc_symas]: https://twitter.com/hyc_symas
|
|
||||||
[lmdb]: http://symas.com/mdb/
|
|
||||||
|
|
||||||
## Project Status
|
|
||||||
|
|
||||||
Bolt is stable and the API is fixed. Full unit test coverage and randomized
|
|
||||||
black box testing are used to ensure database consistency and thread safety.
|
|
||||||
Bolt is currently in high-load production environments serving databases as
|
|
||||||
large as 1TB. Many companies such as Shopify and Heroku use Bolt-backed
|
|
||||||
services every day.
|
|
||||||
|
|
||||||
## Table of Contents
|
|
||||||
|
|
||||||
- [Getting Started](#getting-started)
|
|
||||||
- [Installing](#installing)
|
|
||||||
- [Opening a database](#opening-a-database)
|
|
||||||
- [Transactions](#transactions)
|
|
||||||
- [Read-write transactions](#read-write-transactions)
|
|
||||||
- [Read-only transactions](#read-only-transactions)
|
|
||||||
- [Batch read-write transactions](#batch-read-write-transactions)
|
|
||||||
- [Managing transactions manually](#managing-transactions-manually)
|
|
||||||
- [Using buckets](#using-buckets)
|
|
||||||
- [Using key/value pairs](#using-keyvalue-pairs)
|
|
||||||
- [Autoincrementing integer for the bucket](#autoincrementing-integer-for-the-bucket)
|
|
||||||
- [Iterating over keys](#iterating-over-keys)
|
|
||||||
- [Prefix scans](#prefix-scans)
|
|
||||||
- [Range scans](#range-scans)
|
|
||||||
- [ForEach()](#foreach)
|
|
||||||
- [Nested buckets](#nested-buckets)
|
|
||||||
- [Database backups](#database-backups)
|
|
||||||
- [Statistics](#statistics)
|
|
||||||
- [Read-Only Mode](#read-only-mode)
|
|
||||||
- [Mobile Use (iOS/Android)](#mobile-use-iosandroid)
|
|
||||||
- [Resources](#resources)
|
|
||||||
- [Comparison with other databases](#comparison-with-other-databases)
|
|
||||||
- [Postgres, MySQL, & other relational databases](#postgres-mysql--other-relational-databases)
|
|
||||||
- [LevelDB, RocksDB](#leveldb-rocksdb)
|
|
||||||
- [LMDB](#lmdb)
|
|
||||||
- [Caveats & Limitations](#caveats--limitations)
|
|
||||||
- [Reading the Source](#reading-the-source)
|
|
||||||
- [Other Projects Using Bolt](#other-projects-using-bolt)
|
|
||||||
|
|
||||||
## Getting Started
|
|
||||||
|
|
||||||
### Installing
|
|
||||||
|
|
||||||
To start using Bolt, install Go and run `go get`:
|
|
||||||
|
|
||||||
```sh
|
|
||||||
$ go get github.com/boltdb/bolt/...
|
|
||||||
```
|
|
||||||
|
|
||||||
This will retrieve the library and install the `bolt` command line utility into
|
|
||||||
your `$GOBIN` path.
|
|
||||||
|
|
||||||
|
|
||||||
### Opening a database
|
|
||||||
|
|
||||||
The top-level object in Bolt is a `DB`. It is represented as a single file on
|
|
||||||
your disk and represents a consistent snapshot of your data.
|
|
||||||
|
|
||||||
To open your database, simply use the `bolt.Open()` function:
|
|
||||||
|
|
||||||
```go
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"log"
|
|
||||||
|
|
||||||
"github.com/boltdb/bolt"
|
|
||||||
)
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
// Open the my.db data file in your current directory.
|
|
||||||
// It will be created if it doesn't exist.
|
|
||||||
db, err := bolt.Open("my.db", 0600, nil)
|
|
||||||
if err != nil {
|
|
||||||
log.Fatal(err)
|
|
||||||
}
|
|
||||||
defer db.Close()
|
|
||||||
|
|
||||||
...
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
Please note that Bolt obtains a file lock on the data file so multiple processes
|
|
||||||
cannot open the same database at the same time. Opening an already open Bolt
|
|
||||||
database will cause it to hang until the other process closes it. To prevent
|
|
||||||
an indefinite wait you can pass a timeout option to the `Open()` function:
|
|
||||||
|
|
||||||
```go
|
|
||||||
db, err := bolt.Open("my.db", 0600, &bolt.Options{Timeout: 1 * time.Second})
|
|
||||||
```
|
|
||||||
|
|
||||||
|
|
||||||
### Transactions
|
|
||||||
|
|
||||||
Bolt allows only one read-write transaction at a time but allows as many
|
|
||||||
read-only transactions as you want at a time. Each transaction has a consistent
|
|
||||||
view of the data as it existed when the transaction started.
|
|
||||||
|
|
||||||
Individual transactions and all objects created from them (e.g. buckets, keys)
|
|
||||||
are not thread safe. To work with data in multiple goroutines you must start
|
|
||||||
a transaction for each one or use locking to ensure only one goroutine accesses
|
|
||||||
a transaction at a time. Creating transaction from the `DB` is thread safe.
|
|
||||||
|
|
||||||
Read-only transactions and read-write transactions should not depend on one
|
|
||||||
another and generally shouldn't be opened simultaneously in the same goroutine.
|
|
||||||
This can cause a deadlock as the read-write transaction needs to periodically
|
|
||||||
re-map the data file but it cannot do so while a read-only transaction is open.
|
|
||||||
|
|
||||||
|
|
||||||
#### Read-write transactions
|
|
||||||
|
|
||||||
To start a read-write transaction, you can use the `DB.Update()` function:
|
|
||||||
|
|
||||||
```go
|
|
||||||
err := db.Update(func(tx *bolt.Tx) error {
|
|
||||||
...
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
```
|
|
||||||
|
|
||||||
Inside the closure, you have a consistent view of the database. You commit the
|
|
||||||
transaction by returning `nil` at the end. You can also rollback the transaction
|
|
||||||
at any point by returning an error. All database operations are allowed inside
|
|
||||||
a read-write transaction.
|
|
||||||
|
|
||||||
Always check the return error as it will report any disk failures that can cause
|
|
||||||
your transaction to not complete. If you return an error within your closure
|
|
||||||
it will be passed through.
|
|
||||||
|
|
||||||
|
|
||||||
#### Read-only transactions
|
|
||||||
|
|
||||||
To start a read-only transaction, you can use the `DB.View()` function:
|
|
||||||
|
|
||||||
```go
|
|
||||||
err := db.View(func(tx *bolt.Tx) error {
|
|
||||||
...
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
```
|
|
||||||
|
|
||||||
You also get a consistent view of the database within this closure, however,
|
|
||||||
no mutating operations are allowed within a read-only transaction. You can only
|
|
||||||
retrieve buckets, retrieve values, and copy the database within a read-only
|
|
||||||
transaction.
|
|
||||||
|
|
||||||
|
|
||||||
#### Batch read-write transactions
|
|
||||||
|
|
||||||
Each `DB.Update()` waits for disk to commit the writes. This overhead
|
|
||||||
can be minimized by combining multiple updates with the `DB.Batch()`
|
|
||||||
function:
|
|
||||||
|
|
||||||
```go
|
|
||||||
err := db.Batch(func(tx *bolt.Tx) error {
|
|
||||||
...
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
```
|
|
||||||
|
|
||||||
Concurrent Batch calls are opportunistically combined into larger
|
|
||||||
transactions. Batch is only useful when there are multiple goroutines
|
|
||||||
calling it.
|
|
||||||
|
|
||||||
The trade-off is that `Batch` can call the given
|
|
||||||
function multiple times, if parts of the transaction fail. The
|
|
||||||
function must be idempotent and side effects must take effect only
|
|
||||||
after a successful return from `DB.Batch()`.
|
|
||||||
|
|
||||||
For example: don't display messages from inside the function, instead
|
|
||||||
set variables in the enclosing scope:
|
|
||||||
|
|
||||||
```go
|
|
||||||
var id uint64
|
|
||||||
err := db.Batch(func(tx *bolt.Tx) error {
|
|
||||||
// Find last key in bucket, decode as bigendian uint64, increment
|
|
||||||
// by one, encode back to []byte, and add new key.
|
|
||||||
...
|
|
||||||
id = newValue
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return ...
|
|
||||||
}
|
|
||||||
fmt.Println("Allocated ID %d", id)
|
|
||||||
```
|
|
||||||
|
|
||||||
|
|
||||||
#### Managing transactions manually
|
|
||||||
|
|
||||||
The `DB.View()` and `DB.Update()` functions are wrappers around the `DB.Begin()`
|
|
||||||
function. These helper functions will start the transaction, execute a function,
|
|
||||||
and then safely close your transaction if an error is returned. This is the
|
|
||||||
recommended way to use Bolt transactions.
|
|
||||||
|
|
||||||
However, sometimes you may want to manually start and end your transactions.
|
|
||||||
You can use the `Tx.Begin()` function directly but **please** be sure to close
|
|
||||||
the transaction.
|
|
||||||
|
|
||||||
```go
|
|
||||||
// Start a writable transaction.
|
|
||||||
tx, err := db.Begin(true)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer tx.Rollback()
|
|
||||||
|
|
||||||
// Use the transaction...
|
|
||||||
_, err := tx.CreateBucket([]byte("MyBucket"))
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Commit the transaction and check for error.
|
|
||||||
if err := tx.Commit(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
The first argument to `DB.Begin()` is a boolean stating if the transaction
|
|
||||||
should be writable.
|
|
||||||
|
|
||||||
|
|
||||||
### Using buckets
|
|
||||||
|
|
||||||
Buckets are collections of key/value pairs within the database. All keys in a
|
|
||||||
bucket must be unique. You can create a bucket using the `DB.CreateBucket()`
|
|
||||||
function:
|
|
||||||
|
|
||||||
```go
|
|
||||||
db.Update(func(tx *bolt.Tx) error {
|
|
||||||
b, err := tx.CreateBucket([]byte("MyBucket"))
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("create bucket: %s", err)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
```
|
|
||||||
|
|
||||||
You can also create a bucket only if it doesn't exist by using the
|
|
||||||
`Tx.CreateBucketIfNotExists()` function. It's a common pattern to call this
|
|
||||||
function for all your top-level buckets after you open your database so you can
|
|
||||||
guarantee that they exist for future transactions.
|
|
||||||
|
|
||||||
To delete a bucket, simply call the `Tx.DeleteBucket()` function.
|
|
||||||
|
|
||||||
|
|
||||||
### Using key/value pairs
|
|
||||||
|
|
||||||
To save a key/value pair to a bucket, use the `Bucket.Put()` function:
|
|
||||||
|
|
||||||
```go
|
|
||||||
db.Update(func(tx *bolt.Tx) error {
|
|
||||||
b := tx.Bucket([]byte("MyBucket"))
|
|
||||||
err := b.Put([]byte("answer"), []byte("42"))
|
|
||||||
return err
|
|
||||||
})
|
|
||||||
```
|
|
||||||
|
|
||||||
This will set the value of the `"answer"` key to `"42"` in the `MyBucket`
|
|
||||||
bucket. To retrieve this value, we can use the `Bucket.Get()` function:
|
|
||||||
|
|
||||||
```go
|
|
||||||
db.View(func(tx *bolt.Tx) error {
|
|
||||||
b := tx.Bucket([]byte("MyBucket"))
|
|
||||||
v := b.Get([]byte("answer"))
|
|
||||||
fmt.Printf("The answer is: %s\n", v)
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
```
|
|
||||||
|
|
||||||
The `Get()` function does not return an error because its operation is
|
|
||||||
guaranteed to work (unless there is some kind of system failure). If the key
|
|
||||||
exists then it will return its byte slice value. If it doesn't exist then it
|
|
||||||
will return `nil`. It's important to note that you can have a zero-length value
|
|
||||||
set to a key which is different than the key not existing.
|
|
||||||
|
|
||||||
Use the `Bucket.Delete()` function to delete a key from the bucket.
|
|
||||||
|
|
||||||
Please note that values returned from `Get()` are only valid while the
|
|
||||||
transaction is open. If you need to use a value outside of the transaction
|
|
||||||
then you must use `copy()` to copy it to another byte slice.
|
|
||||||
|
|
||||||
|
|
||||||
### Autoincrementing integer for the bucket
|
|
||||||
By using the `NextSequence()` function, you can let Bolt determine a sequence
|
|
||||||
which can be used as the unique identifier for your key/value pairs. See the
|
|
||||||
example below.
|
|
||||||
|
|
||||||
```go
|
|
||||||
// CreateUser saves u to the store. The new user ID is set on u once the data is persisted.
|
|
||||||
func (s *Store) CreateUser(u *User) error {
|
|
||||||
return s.db.Update(func(tx *bolt.Tx) error {
|
|
||||||
// Retrieve the users bucket.
|
|
||||||
// This should be created when the DB is first opened.
|
|
||||||
b := tx.Bucket([]byte("users"))
|
|
||||||
|
|
||||||
// Generate ID for the user.
|
|
||||||
// This returns an error only if the Tx is closed or not writeable.
|
|
||||||
// That can't happen in an Update() call so I ignore the error check.
|
|
||||||
id, _ = b.NextSequence()
|
|
||||||
u.ID = int(id)
|
|
||||||
|
|
||||||
// Marshal user data into bytes.
|
|
||||||
buf, err := json.Marshal(u)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Persist bytes to users bucket.
|
|
||||||
return b.Put(itob(u.ID), buf)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// itob returns an 8-byte big endian representation of v.
|
|
||||||
func itob(v int) []byte {
|
|
||||||
b := make([]byte, 8)
|
|
||||||
binary.BigEndian.PutUint64(b, uint64(v))
|
|
||||||
return b
|
|
||||||
}
|
|
||||||
|
|
||||||
type User struct {
|
|
||||||
ID int
|
|
||||||
...
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
### Iterating over keys
|
|
||||||
|
|
||||||
Bolt stores its keys in byte-sorted order within a bucket. This makes sequential
|
|
||||||
iteration over these keys extremely fast. To iterate over keys we'll use a
|
|
||||||
`Cursor`:
|
|
||||||
|
|
||||||
```go
|
|
||||||
db.View(func(tx *bolt.Tx) error {
|
|
||||||
// Assume bucket exists and has keys
|
|
||||||
b := tx.Bucket([]byte("MyBucket"))
|
|
||||||
|
|
||||||
c := b.Cursor()
|
|
||||||
|
|
||||||
for k, v := c.First(); k != nil; k, v = c.Next() {
|
|
||||||
fmt.Printf("key=%s, value=%s\n", k, v)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
```
|
|
||||||
|
|
||||||
The cursor allows you to move to a specific point in the list of keys and move
|
|
||||||
forward or backward through the keys one at a time.
|
|
||||||
|
|
||||||
The following functions are available on the cursor:
|
|
||||||
|
|
||||||
```
|
|
||||||
First() Move to the first key.
|
|
||||||
Last() Move to the last key.
|
|
||||||
Seek() Move to a specific key.
|
|
||||||
Next() Move to the next key.
|
|
||||||
Prev() Move to the previous key.
|
|
||||||
```
|
|
||||||
|
|
||||||
Each of those functions has a return signature of `(key []byte, value []byte)`.
|
|
||||||
When you have iterated to the end of the cursor then `Next()` will return a
|
|
||||||
`nil` key. You must seek to a position using `First()`, `Last()`, or `Seek()`
|
|
||||||
before calling `Next()` or `Prev()`. If you do not seek to a position then
|
|
||||||
these functions will return a `nil` key.
|
|
||||||
|
|
||||||
During iteration, if the key is non-`nil` but the value is `nil`, that means
|
|
||||||
the key refers to a bucket rather than a value. Use `Bucket.Bucket()` to
|
|
||||||
access the sub-bucket.
|
|
||||||
|
|
||||||
|
|
||||||
#### Prefix scans
|
|
||||||
|
|
||||||
To iterate over a key prefix, you can combine `Seek()` and `bytes.HasPrefix()`:
|
|
||||||
|
|
||||||
```go
|
|
||||||
db.View(func(tx *bolt.Tx) error {
|
|
||||||
// Assume bucket exists and has keys
|
|
||||||
c := tx.Bucket([]byte("MyBucket")).Cursor()
|
|
||||||
|
|
||||||
prefix := []byte("1234")
|
|
||||||
for k, v := c.Seek(prefix); bytes.HasPrefix(k, prefix); k, v = c.Next() {
|
|
||||||
fmt.Printf("key=%s, value=%s\n", k, v)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Range scans
|
|
||||||
|
|
||||||
Another common use case is scanning over a range such as a time range. If you
|
|
||||||
use a sortable time encoding such as RFC3339 then you can query a specific
|
|
||||||
date range like this:
|
|
||||||
|
|
||||||
```go
|
|
||||||
db.View(func(tx *bolt.Tx) error {
|
|
||||||
// Assume our events bucket exists and has RFC3339 encoded time keys.
|
|
||||||
c := tx.Bucket([]byte("Events")).Cursor()
|
|
||||||
|
|
||||||
// Our time range spans the 90's decade.
|
|
||||||
min := []byte("1990-01-01T00:00:00Z")
|
|
||||||
max := []byte("2000-01-01T00:00:00Z")
|
|
||||||
|
|
||||||
// Iterate over the 90's.
|
|
||||||
for k, v := c.Seek(min); k != nil && bytes.Compare(k, max) <= 0; k, v = c.Next() {
|
|
||||||
fmt.Printf("%s: %s\n", k, v)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
```
|
|
||||||
|
|
||||||
Note that, while RFC3339 is sortable, the Golang implementation of RFC3339Nano does not use a fixed number of digits after the decimal point and is therefore not sortable.
|
|
||||||
|
|
||||||
|
|
||||||
#### ForEach()
|
|
||||||
|
|
||||||
You can also use the function `ForEach()` if you know you'll be iterating over
|
|
||||||
all the keys in a bucket:
|
|
||||||
|
|
||||||
```go
|
|
||||||
db.View(func(tx *bolt.Tx) error {
|
|
||||||
// Assume bucket exists and has keys
|
|
||||||
b := tx.Bucket([]byte("MyBucket"))
|
|
||||||
|
|
||||||
b.ForEach(func(k, v []byte) error {
|
|
||||||
fmt.Printf("key=%s, value=%s\n", k, v)
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
```
|
|
||||||
|
|
||||||
|
|
||||||
### Nested buckets
|
|
||||||
|
|
||||||
You can also store a bucket in a key to create nested buckets. The API is the
|
|
||||||
same as the bucket management API on the `DB` object:
|
|
||||||
|
|
||||||
```go
|
|
||||||
func (*Bucket) CreateBucket(key []byte) (*Bucket, error)
|
|
||||||
func (*Bucket) CreateBucketIfNotExists(key []byte) (*Bucket, error)
|
|
||||||
func (*Bucket) DeleteBucket(key []byte) error
|
|
||||||
```
|
|
||||||
|
|
||||||
|
|
||||||
### Database backups
|
|
||||||
|
|
||||||
Bolt is a single file so it's easy to backup. You can use the `Tx.WriteTo()`
|
|
||||||
function to write a consistent view of the database to a writer. If you call
|
|
||||||
this from a read-only transaction, it will perform a hot backup and not block
|
|
||||||
your other database reads and writes.
|
|
||||||
|
|
||||||
By default, it will use a regular file handle which will utilize the operating
|
|
||||||
system's page cache. See the [`Tx`](https://godoc.org/github.com/boltdb/bolt#Tx)
|
|
||||||
documentation for information about optimizing for larger-than-RAM datasets.
|
|
||||||
|
|
||||||
One common use case is to backup over HTTP so you can use tools like `cURL` to
|
|
||||||
do database backups:
|
|
||||||
|
|
||||||
```go
|
|
||||||
func BackupHandleFunc(w http.ResponseWriter, req *http.Request) {
|
|
||||||
err := db.View(func(tx *bolt.Tx) error {
|
|
||||||
w.Header().Set("Content-Type", "application/octet-stream")
|
|
||||||
w.Header().Set("Content-Disposition", `attachment; filename="my.db"`)
|
|
||||||
w.Header().Set("Content-Length", strconv.Itoa(int(tx.Size())))
|
|
||||||
_, err := tx.WriteTo(w)
|
|
||||||
return err
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
Then you can backup using this command:
|
|
||||||
|
|
||||||
```sh
|
|
||||||
$ curl http://localhost/backup > my.db
|
|
||||||
```
|
|
||||||
|
|
||||||
Or you can open your browser to `http://localhost/backup` and it will download
|
|
||||||
automatically.
|
|
||||||
|
|
||||||
If you want to backup to another file you can use the `Tx.CopyFile()` helper
|
|
||||||
function.
|
|
||||||
|
|
||||||
|
|
||||||
### Statistics
|
|
||||||
|
|
||||||
The database keeps a running count of many of the internal operations it
|
|
||||||
performs so you can better understand what's going on. By grabbing a snapshot
|
|
||||||
of these stats at two points in time we can see what operations were performed
|
|
||||||
in that time range.
|
|
||||||
|
|
||||||
For example, we could start a goroutine to log stats every 10 seconds:
|
|
||||||
|
|
||||||
```go
|
|
||||||
go func() {
|
|
||||||
// Grab the initial stats.
|
|
||||||
prev := db.Stats()
|
|
||||||
|
|
||||||
for {
|
|
||||||
// Wait for 10s.
|
|
||||||
time.Sleep(10 * time.Second)
|
|
||||||
|
|
||||||
// Grab the current stats and diff them.
|
|
||||||
stats := db.Stats()
|
|
||||||
diff := stats.Sub(&prev)
|
|
||||||
|
|
||||||
// Encode stats to JSON and print to STDERR.
|
|
||||||
json.NewEncoder(os.Stderr).Encode(diff)
|
|
||||||
|
|
||||||
// Save stats for the next loop.
|
|
||||||
prev = stats
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
```
|
|
||||||
|
|
||||||
It's also useful to pipe these stats to a service such as statsd for monitoring
|
|
||||||
or to provide an HTTP endpoint that will perform a fixed-length sample.
|
|
||||||
|
|
||||||
|
|
||||||
### Read-Only Mode
|
|
||||||
|
|
||||||
Sometimes it is useful to create a shared, read-only Bolt database. To this,
|
|
||||||
set the `Options.ReadOnly` flag when opening your database. Read-only mode
|
|
||||||
uses a shared lock to allow multiple processes to read from the database but
|
|
||||||
it will block any processes from opening the database in read-write mode.
|
|
||||||
|
|
||||||
```go
|
|
||||||
db, err := bolt.Open("my.db", 0666, &bolt.Options{ReadOnly: true})
|
|
||||||
if err != nil {
|
|
||||||
log.Fatal(err)
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
### Mobile Use (iOS/Android)
|
|
||||||
|
|
||||||
Bolt is able to run on mobile devices by leveraging the binding feature of the
|
|
||||||
[gomobile](https://github.com/golang/mobile) tool. Create a struct that will
|
|
||||||
contain your database logic and a reference to a `*bolt.DB` with a initializing
|
|
||||||
contstructor that takes in a filepath where the database file will be stored.
|
|
||||||
Neither Android nor iOS require extra permissions or cleanup from using this method.
|
|
||||||
|
|
||||||
```go
|
|
||||||
func NewBoltDB(filepath string) *BoltDB {
|
|
||||||
db, err := bolt.Open(filepath+"/demo.db", 0600, nil)
|
|
||||||
if err != nil {
|
|
||||||
log.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return &BoltDB{db}
|
|
||||||
}
|
|
||||||
|
|
||||||
type BoltDB struct {
|
|
||||||
db *bolt.DB
|
|
||||||
...
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *BoltDB) Path() string {
|
|
||||||
return b.db.Path()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *BoltDB) Close() {
|
|
||||||
b.db.Close()
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
Database logic should be defined as methods on this wrapper struct.
|
|
||||||
|
|
||||||
To initialize this struct from the native language (both platforms now sync
|
|
||||||
their local storage to the cloud. These snippets disable that functionality for the
|
|
||||||
database file):
|
|
||||||
|
|
||||||
#### Android
|
|
||||||
|
|
||||||
```java
|
|
||||||
String path;
|
|
||||||
if (android.os.Build.VERSION.SDK_INT >=android.os.Build.VERSION_CODES.LOLLIPOP){
|
|
||||||
path = getNoBackupFilesDir().getAbsolutePath();
|
|
||||||
} else{
|
|
||||||
path = getFilesDir().getAbsolutePath();
|
|
||||||
}
|
|
||||||
Boltmobiledemo.BoltDB boltDB = Boltmobiledemo.NewBoltDB(path)
|
|
||||||
```
|
|
||||||
|
|
||||||
#### iOS
|
|
||||||
|
|
||||||
```objc
|
|
||||||
- (void)demo {
|
|
||||||
NSString* path = [NSSearchPathForDirectoriesInDomains(NSLibraryDirectory,
|
|
||||||
NSUserDomainMask,
|
|
||||||
YES) objectAtIndex:0];
|
|
||||||
GoBoltmobiledemoBoltDB * demo = GoBoltmobiledemoNewBoltDB(path);
|
|
||||||
[self addSkipBackupAttributeToItemAtPath:demo.path];
|
|
||||||
//Some DB Logic would go here
|
|
||||||
[demo close];
|
|
||||||
}
|
|
||||||
|
|
||||||
- (BOOL)addSkipBackupAttributeToItemAtPath:(NSString *) filePathString
|
|
||||||
{
|
|
||||||
NSURL* URL= [NSURL fileURLWithPath: filePathString];
|
|
||||||
assert([[NSFileManager defaultManager] fileExistsAtPath: [URL path]]);
|
|
||||||
|
|
||||||
NSError *error = nil;
|
|
||||||
BOOL success = [URL setResourceValue: [NSNumber numberWithBool: YES]
|
|
||||||
forKey: NSURLIsExcludedFromBackupKey error: &error];
|
|
||||||
if(!success){
|
|
||||||
NSLog(@"Error excluding %@ from backup %@", [URL lastPathComponent], error);
|
|
||||||
}
|
|
||||||
return success;
|
|
||||||
}
|
|
||||||
|
|
||||||
```
|
|
||||||
|
|
||||||
## Resources
|
|
||||||
|
|
||||||
For more information on getting started with Bolt, check out the following articles:
|
|
||||||
|
|
||||||
* [Intro to BoltDB: Painless Performant Persistence](http://npf.io/2014/07/intro-to-boltdb-painless-performant-persistence/) by [Nate Finch](https://github.com/natefinch).
|
|
||||||
* [Bolt -- an embedded key/value database for Go](https://www.progville.com/go/bolt-embedded-db-golang/) by Progville
|
|
||||||
|
|
||||||
|
|
||||||
## Comparison with other databases
|
|
||||||
|
|
||||||
### Postgres, MySQL, & other relational databases
|
|
||||||
|
|
||||||
Relational databases structure data into rows and are only accessible through
|
|
||||||
the use of SQL. This approach provides flexibility in how you store and query
|
|
||||||
your data but also incurs overhead in parsing and planning SQL statements. Bolt
|
|
||||||
accesses all data by a byte slice key. This makes Bolt fast to read and write
|
|
||||||
data by key but provides no built-in support for joining values together.
|
|
||||||
|
|
||||||
Most relational databases (with the exception of SQLite) are standalone servers
|
|
||||||
that run separately from your application. This gives your systems
|
|
||||||
flexibility to connect multiple application servers to a single database
|
|
||||||
server but also adds overhead in serializing and transporting data over the
|
|
||||||
network. Bolt runs as a library included in your application so all data access
|
|
||||||
has to go through your application's process. This brings data closer to your
|
|
||||||
application but limits multi-process access to the data.
|
|
||||||
|
|
||||||
|
|
||||||
### LevelDB, RocksDB
|
|
||||||
|
|
||||||
LevelDB and its derivatives (RocksDB, HyperLevelDB) are similar to Bolt in that
|
|
||||||
they are libraries bundled into the application, however, their underlying
|
|
||||||
structure is a log-structured merge-tree (LSM tree). An LSM tree optimizes
|
|
||||||
random writes by using a write ahead log and multi-tiered, sorted files called
|
|
||||||
SSTables. Bolt uses a B+tree internally and only a single file. Both approaches
|
|
||||||
have trade-offs.
|
|
||||||
|
|
||||||
If you require a high random write throughput (>10,000 w/sec) or you need to use
|
|
||||||
spinning disks then LevelDB could be a good choice. If your application is
|
|
||||||
read-heavy or does a lot of range scans then Bolt could be a good choice.
|
|
||||||
|
|
||||||
One other important consideration is that LevelDB does not have transactions.
|
|
||||||
It supports batch writing of key/values pairs and it supports read snapshots
|
|
||||||
but it will not give you the ability to do a compare-and-swap operation safely.
|
|
||||||
Bolt supports fully serializable ACID transactions.
|
|
||||||
|
|
||||||
|
|
||||||
### LMDB
|
|
||||||
|
|
||||||
Bolt was originally a port of LMDB so it is architecturally similar. Both use
|
|
||||||
a B+tree, have ACID semantics with fully serializable transactions, and support
|
|
||||||
lock-free MVCC using a single writer and multiple readers.
|
|
||||||
|
|
||||||
The two projects have somewhat diverged. LMDB heavily focuses on raw performance
|
|
||||||
while Bolt has focused on simplicity and ease of use. For example, LMDB allows
|
|
||||||
several unsafe actions such as direct writes for the sake of performance. Bolt
|
|
||||||
opts to disallow actions which can leave the database in a corrupted state. The
|
|
||||||
only exception to this in Bolt is `DB.NoSync`.
|
|
||||||
|
|
||||||
There are also a few differences in API. LMDB requires a maximum mmap size when
|
|
||||||
opening an `mdb_env` whereas Bolt will handle incremental mmap resizing
|
|
||||||
automatically. LMDB overloads the getter and setter functions with multiple
|
|
||||||
flags whereas Bolt splits these specialized cases into their own functions.
|
|
||||||
|
|
||||||
|
|
||||||
## Caveats & Limitations
|
|
||||||
|
|
||||||
It's important to pick the right tool for the job and Bolt is no exception.
|
|
||||||
Here are a few things to note when evaluating and using Bolt:
|
|
||||||
|
|
||||||
* Bolt is good for read intensive workloads. Sequential write performance is
|
|
||||||
also fast but random writes can be slow. You can use `DB.Batch()` or add a
|
|
||||||
write-ahead log to help mitigate this issue.
|
|
||||||
|
|
||||||
* Bolt uses a B+tree internally so there can be a lot of random page access.
|
|
||||||
SSDs provide a significant performance boost over spinning disks.
|
|
||||||
|
|
||||||
* Try to avoid long running read transactions. Bolt uses copy-on-write so
|
|
||||||
old pages cannot be reclaimed while an old transaction is using them.
|
|
||||||
|
|
||||||
* Byte slices returned from Bolt are only valid during a transaction. Once the
|
|
||||||
transaction has been committed or rolled back then the memory they point to
|
|
||||||
can be reused by a new page or can be unmapped from virtual memory and you'll
|
|
||||||
see an `unexpected fault address` panic when accessing it.
|
|
||||||
|
|
||||||
* Be careful when using `Bucket.FillPercent`. Setting a high fill percent for
|
|
||||||
buckets that have random inserts will cause your database to have very poor
|
|
||||||
page utilization.
|
|
||||||
|
|
||||||
* Use larger buckets in general. Smaller buckets causes poor page utilization
|
|
||||||
once they become larger than the page size (typically 4KB).
|
|
||||||
|
|
||||||
* Bulk loading a lot of random writes into a new bucket can be slow as the
|
|
||||||
page will not split until the transaction is committed. Randomly inserting
|
|
||||||
more than 100,000 key/value pairs into a single new bucket in a single
|
|
||||||
transaction is not advised.
|
|
||||||
|
|
||||||
* Bolt uses a memory-mapped file so the underlying operating system handles the
|
|
||||||
caching of the data. Typically, the OS will cache as much of the file as it
|
|
||||||
can in memory and will release memory as needed to other processes. This means
|
|
||||||
that Bolt can show very high memory usage when working with large databases.
|
|
||||||
However, this is expected and the OS will release memory as needed. Bolt can
|
|
||||||
handle databases much larger than the available physical RAM, provided its
|
|
||||||
memory-map fits in the process virtual address space. It may be problematic
|
|
||||||
on 32-bits systems.
|
|
||||||
|
|
||||||
* The data structures in the Bolt database are memory mapped so the data file
|
|
||||||
will be endian specific. This means that you cannot copy a Bolt file from a
|
|
||||||
little endian machine to a big endian machine and have it work. For most
|
|
||||||
users this is not a concern since most modern CPUs are little endian.
|
|
||||||
|
|
||||||
* Because of the way pages are laid out on disk, Bolt cannot truncate data files
|
|
||||||
and return free pages back to the disk. Instead, Bolt maintains a free list
|
|
||||||
of unused pages within its data file. These free pages can be reused by later
|
|
||||||
transactions. This works well for many use cases as databases generally tend
|
|
||||||
to grow. However, it's important to note that deleting large chunks of data
|
|
||||||
will not allow you to reclaim that space on disk.
|
|
||||||
|
|
||||||
For more information on page allocation, [see this comment][page-allocation].
|
|
||||||
|
|
||||||
[page-allocation]: https://github.com/boltdb/bolt/issues/308#issuecomment-74811638
|
|
||||||
|
|
||||||
|
|
||||||
## Reading the Source
|
|
||||||
|
|
||||||
Bolt is a relatively small code base (<3KLOC) for an embedded, serializable,
|
|
||||||
transactional key/value database so it can be a good starting point for people
|
|
||||||
interested in how databases work.
|
|
||||||
|
|
||||||
The best places to start are the main entry points into Bolt:
|
|
||||||
|
|
||||||
- `Open()` - Initializes the reference to the database. It's responsible for
|
|
||||||
creating the database if it doesn't exist, obtaining an exclusive lock on the
|
|
||||||
file, reading the meta pages, & memory-mapping the file.
|
|
||||||
|
|
||||||
- `DB.Begin()` - Starts a read-only or read-write transaction depending on the
|
|
||||||
value of the `writable` argument. This requires briefly obtaining the "meta"
|
|
||||||
lock to keep track of open transactions. Only one read-write transaction can
|
|
||||||
exist at a time so the "rwlock" is acquired during the life of a read-write
|
|
||||||
transaction.
|
|
||||||
|
|
||||||
- `Bucket.Put()` - Writes a key/value pair into a bucket. After validating the
|
|
||||||
arguments, a cursor is used to traverse the B+tree to the page and position
|
|
||||||
where they key & value will be written. Once the position is found, the bucket
|
|
||||||
materializes the underlying page and the page's parent pages into memory as
|
|
||||||
"nodes". These nodes are where mutations occur during read-write transactions.
|
|
||||||
These changes get flushed to disk during commit.
|
|
||||||
|
|
||||||
- `Bucket.Get()` - Retrieves a key/value pair from a bucket. This uses a cursor
|
|
||||||
to move to the page & position of a key/value pair. During a read-only
|
|
||||||
transaction, the key and value data is returned as a direct reference to the
|
|
||||||
underlying mmap file so there's no allocation overhead. For read-write
|
|
||||||
transactions, this data may reference the mmap file or one of the in-memory
|
|
||||||
node values.
|
|
||||||
|
|
||||||
- `Cursor` - This object is simply for traversing the B+tree of on-disk pages
|
|
||||||
or in-memory nodes. It can seek to a specific key, move to the first or last
|
|
||||||
value, or it can move forward or backward. The cursor handles the movement up
|
|
||||||
and down the B+tree transparently to the end user.
|
|
||||||
|
|
||||||
- `Tx.Commit()` - Converts the in-memory dirty nodes and the list of free pages
|
|
||||||
into pages to be written to disk. Writing to disk then occurs in two phases.
|
|
||||||
First, the dirty pages are written to disk and an `fsync()` occurs. Second, a
|
|
||||||
new meta page with an incremented transaction ID is written and another
|
|
||||||
`fsync()` occurs. This two phase write ensures that partially written data
|
|
||||||
pages are ignored in the event of a crash since the meta page pointing to them
|
|
||||||
is never written. Partially written meta pages are invalidated because they
|
|
||||||
are written with a checksum.
|
|
||||||
|
|
||||||
If you have additional notes that could be helpful for others, please submit
|
|
||||||
them via pull request.
|
|
||||||
|
|
||||||
|
|
||||||
## Other Projects Using Bolt
|
|
||||||
|
|
||||||
Below is a list of public, open source projects that use Bolt:
|
|
||||||
|
|
||||||
* [Operation Go: A Routine Mission](http://gocode.io) - An online programming game for Golang using Bolt for user accounts and a leaderboard.
|
|
||||||
* [Bazil](https://bazil.org/) - A file system that lets your data reside where it is most convenient for it to reside.
|
|
||||||
* [DVID](https://github.com/janelia-flyem/dvid) - Added Bolt as optional storage engine and testing it against Basho-tuned leveldb.
|
|
||||||
* [Skybox Analytics](https://github.com/skybox/skybox) - A standalone funnel analysis tool for web analytics.
|
|
||||||
* [Scuttlebutt](https://github.com/benbjohnson/scuttlebutt) - Uses Bolt to store and process all Twitter mentions of GitHub projects.
|
|
||||||
* [Wiki](https://github.com/peterhellberg/wiki) - A tiny wiki using Goji, BoltDB and Blackfriday.
|
|
||||||
* [ChainStore](https://github.com/pressly/chainstore) - Simple key-value interface to a variety of storage engines organized as a chain of operations.
|
|
||||||
* [MetricBase](https://github.com/msiebuhr/MetricBase) - Single-binary version of Graphite.
|
|
||||||
* [Gitchain](https://github.com/gitchain/gitchain) - Decentralized, peer-to-peer Git repositories aka "Git meets Bitcoin".
|
|
||||||
* [event-shuttle](https://github.com/sclasen/event-shuttle) - A Unix system service to collect and reliably deliver messages to Kafka.
|
|
||||||
* [ipxed](https://github.com/kelseyhightower/ipxed) - Web interface and api for ipxed.
|
|
||||||
* [BoltStore](https://github.com/yosssi/boltstore) - Session store using Bolt.
|
|
||||||
* [photosite/session](https://godoc.org/bitbucket.org/kardianos/photosite/session) - Sessions for a photo viewing site.
|
|
||||||
* [LedisDB](https://github.com/siddontang/ledisdb) - A high performance NoSQL, using Bolt as optional storage.
|
|
||||||
* [ipLocator](https://github.com/AndreasBriese/ipLocator) - A fast ip-geo-location-server using bolt with bloom filters.
|
|
||||||
* [cayley](https://github.com/google/cayley) - Cayley is an open-source graph database using Bolt as optional backend.
|
|
||||||
* [bleve](http://www.blevesearch.com/) - A pure Go search engine similar to ElasticSearch that uses Bolt as the default storage backend.
|
|
||||||
* [tentacool](https://github.com/optiflows/tentacool) - REST api server to manage system stuff (IP, DNS, Gateway...) on a linux server.
|
|
||||||
* [SkyDB](https://github.com/skydb/sky) - Behavioral analytics database.
|
|
||||||
* [Seaweed File System](https://github.com/chrislusf/seaweedfs) - Highly scalable distributed key~file system with O(1) disk read.
|
|
||||||
* [InfluxDB](https://influxdata.com) - Scalable datastore for metrics, events, and real-time analytics.
|
|
||||||
* [Freehold](http://tshannon.bitbucket.org/freehold/) - An open, secure, and lightweight platform for your files and data.
|
|
||||||
* [Prometheus Annotation Server](https://github.com/oliver006/prom_annotation_server) - Annotation server for PromDash & Prometheus service monitoring system.
|
|
||||||
* [Consul](https://github.com/hashicorp/consul) - Consul is service discovery and configuration made easy. Distributed, highly available, and datacenter-aware.
|
|
||||||
* [Kala](https://github.com/ajvb/kala) - Kala is a modern job scheduler optimized to run on a single node. It is persistent, JSON over HTTP API, ISO 8601 duration notation, and dependent jobs.
|
|
||||||
* [drive](https://github.com/odeke-em/drive) - drive is an unofficial Google Drive command line client for \*NIX operating systems.
|
|
||||||
* [stow](https://github.com/djherbis/stow) - a persistence manager for objects
|
|
||||||
backed by boltdb.
|
|
||||||
* [buckets](https://github.com/joyrexus/buckets) - a bolt wrapper streamlining
|
|
||||||
simple tx and key scans.
|
|
||||||
* [mbuckets](https://github.com/abhigupta912/mbuckets) - A Bolt wrapper that allows easy operations on multi level (nested) buckets.
|
|
||||||
* [Request Baskets](https://github.com/darklynx/request-baskets) - A web service to collect arbitrary HTTP requests and inspect them via REST API or simple web UI, similar to [RequestBin](http://requestb.in/) service
|
|
||||||
* [Go Report Card](https://goreportcard.com/) - Go code quality report cards as a (free and open source) service.
|
|
||||||
* [Boltdb Boilerplate](https://github.com/bobintornado/boltdb-boilerplate) - Boilerplate wrapper around bolt aiming to make simple calls one-liners.
|
|
||||||
* [lru](https://github.com/crowdriff/lru) - Easy to use Bolt-backed Least-Recently-Used (LRU) read-through cache with chainable remote stores.
|
|
||||||
* [Storm](https://github.com/asdine/storm) - A simple ORM around BoltDB.
|
|
||||||
* [GoWebApp](https://github.com/josephspurrier/gowebapp) - A basic MVC web application in Go using BoltDB.
|
|
||||||
* [SimpleBolt](https://github.com/xyproto/simplebolt) - A simple way to use BoltDB. Deals mainly with strings.
|
|
||||||
* [Algernon](https://github.com/xyproto/algernon) - A HTTP/2 web server with built-in support for Lua. Uses BoltDB as the default database backend.
|
|
||||||
|
|
||||||
If you are using Bolt in a project please send a pull request to add it to the list.
|
|
18
cmd/vendor/github.com/boltdb/bolt/appveyor.yml
generated
vendored
18
cmd/vendor/github.com/boltdb/bolt/appveyor.yml
generated
vendored
@ -1,18 +0,0 @@
|
|||||||
version: "{build}"
|
|
||||||
|
|
||||||
os: Windows Server 2012 R2
|
|
||||||
|
|
||||||
clone_folder: c:\gopath\src\github.com\boltdb\bolt
|
|
||||||
|
|
||||||
environment:
|
|
||||||
GOPATH: c:\gopath
|
|
||||||
|
|
||||||
install:
|
|
||||||
- echo %PATH%
|
|
||||||
- echo %GOPATH%
|
|
||||||
- go version
|
|
||||||
- go env
|
|
||||||
- go get -v -t ./...
|
|
||||||
|
|
||||||
build_script:
|
|
||||||
- go test -v ./...
|
|
18
cmd/vendor/github.com/boltdb/bolt/freelist.go
generated
vendored
18
cmd/vendor/github.com/boltdb/bolt/freelist.go
generated
vendored
@ -166,12 +166,16 @@ func (f *freelist) read(p *page) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Copy the list of page ids from the freelist.
|
// Copy the list of page ids from the freelist.
|
||||||
ids := ((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[idx:count]
|
if count == 0 {
|
||||||
f.ids = make([]pgid, len(ids))
|
f.ids = nil
|
||||||
copy(f.ids, ids)
|
} else {
|
||||||
|
ids := ((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[idx:count]
|
||||||
|
f.ids = make([]pgid, len(ids))
|
||||||
|
copy(f.ids, ids)
|
||||||
|
|
||||||
// Make sure they're sorted.
|
// Make sure they're sorted.
|
||||||
sort.Sort(pgids(f.ids))
|
sort.Sort(pgids(f.ids))
|
||||||
|
}
|
||||||
|
|
||||||
// Rebuild the page cache.
|
// Rebuild the page cache.
|
||||||
f.reindex()
|
f.reindex()
|
||||||
@ -189,7 +193,9 @@ func (f *freelist) write(p *page) error {
|
|||||||
|
|
||||||
// The page.count can only hold up to 64k elements so if we overflow that
|
// The page.count can only hold up to 64k elements so if we overflow that
|
||||||
// number then we handle it by putting the size in the first element.
|
// number then we handle it by putting the size in the first element.
|
||||||
if len(ids) < 0xFFFF {
|
if len(ids) == 0 {
|
||||||
|
p.count = uint16(len(ids))
|
||||||
|
} else if len(ids) < 0xFFFF {
|
||||||
p.count = uint16(len(ids))
|
p.count = uint16(len(ids))
|
||||||
copy(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[:], ids)
|
copy(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[:], ids)
|
||||||
} else {
|
} else {
|
||||||
|
5
cmd/vendor/github.com/boltdb/bolt/node.go
generated
vendored
5
cmd/vendor/github.com/boltdb/bolt/node.go
generated
vendored
@ -201,6 +201,11 @@ func (n *node) write(p *page) {
|
|||||||
}
|
}
|
||||||
p.count = uint16(len(n.inodes))
|
p.count = uint16(len(n.inodes))
|
||||||
|
|
||||||
|
// Stop here if there are no items to write.
|
||||||
|
if p.count == 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
// Loop over each item and write it to the page.
|
// Loop over each item and write it to the page.
|
||||||
b := (*[maxAllocSize]byte)(unsafe.Pointer(&p.ptr))[n.pageElementSize()*len(n.inodes):]
|
b := (*[maxAllocSize]byte)(unsafe.Pointer(&p.ptr))[n.pageElementSize()*len(n.inodes):]
|
||||||
for i, item := range n.inodes {
|
for i, item := range n.inodes {
|
||||||
|
6
cmd/vendor/github.com/boltdb/bolt/page.go
generated
vendored
6
cmd/vendor/github.com/boltdb/bolt/page.go
generated
vendored
@ -62,6 +62,9 @@ func (p *page) leafPageElement(index uint16) *leafPageElement {
|
|||||||
|
|
||||||
// leafPageElements retrieves a list of leaf nodes.
|
// leafPageElements retrieves a list of leaf nodes.
|
||||||
func (p *page) leafPageElements() []leafPageElement {
|
func (p *page) leafPageElements() []leafPageElement {
|
||||||
|
if p.count == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
return ((*[0x7FFFFFF]leafPageElement)(unsafe.Pointer(&p.ptr)))[:]
|
return ((*[0x7FFFFFF]leafPageElement)(unsafe.Pointer(&p.ptr)))[:]
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -72,6 +75,9 @@ func (p *page) branchPageElement(index uint16) *branchPageElement {
|
|||||||
|
|
||||||
// branchPageElements retrieves a list of branch nodes.
|
// branchPageElements retrieves a list of branch nodes.
|
||||||
func (p *page) branchPageElements() []branchPageElement {
|
func (p *page) branchPageElements() []branchPageElement {
|
||||||
|
if p.count == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
return ((*[0x7FFFFFF]branchPageElement)(unsafe.Pointer(&p.ptr)))[:]
|
return ((*[0x7FFFFFF]branchPageElement)(unsafe.Pointer(&p.ptr)))[:]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
24
cmd/vendor/github.com/cockroachdb/cmux/.gitignore
generated
vendored
24
cmd/vendor/github.com/cockroachdb/cmux/.gitignore
generated
vendored
@ -1,24 +0,0 @@
|
|||||||
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
|
||||||
*.o
|
|
||||||
*.a
|
|
||||||
*.so
|
|
||||||
|
|
||||||
# Folders
|
|
||||||
_obj
|
|
||||||
_test
|
|
||||||
|
|
||||||
# Architecture specific extensions/prefixes
|
|
||||||
*.[568vq]
|
|
||||||
[568vq].out
|
|
||||||
|
|
||||||
*.cgo1.go
|
|
||||||
*.cgo2.c
|
|
||||||
_cgo_defun.c
|
|
||||||
_cgo_gotypes.go
|
|
||||||
_cgo_export.*
|
|
||||||
|
|
||||||
_testmain.go
|
|
||||||
|
|
||||||
*.exe
|
|
||||||
*.test
|
|
||||||
*.prof
|
|
22
cmd/vendor/github.com/cockroachdb/cmux/.travis.yml
generated
vendored
22
cmd/vendor/github.com/cockroachdb/cmux/.travis.yml
generated
vendored
@ -1,22 +0,0 @@
|
|||||||
language: go
|
|
||||||
|
|
||||||
go:
|
|
||||||
- 1.3
|
|
||||||
- 1.4
|
|
||||||
- 1.5
|
|
||||||
- 1.6
|
|
||||||
|
|
||||||
gobuild_args: -race
|
|
||||||
|
|
||||||
before_install:
|
|
||||||
- go get -u github.com/golang/lint/golint
|
|
||||||
- if [[ $TRAVIS_GO_VERSION == 1.5* ]]; then go get -u github.com/kisielk/errcheck; fi
|
|
||||||
- go get -u golang.org/x/tools/cmd/vet
|
|
||||||
|
|
||||||
before_script:
|
|
||||||
- '! gofmt -s -l . | read'
|
|
||||||
- golint ./...
|
|
||||||
- echo $TRAVIS_GO_VERSION
|
|
||||||
- if [[ $TRAVIS_GO_VERSION == 1.5* ]]; then errcheck ./...; fi
|
|
||||||
- go vet .
|
|
||||||
- go tool vet --shadow .
|
|
65
cmd/vendor/github.com/cockroachdb/cmux/README.md
generated
vendored
65
cmd/vendor/github.com/cockroachdb/cmux/README.md
generated
vendored
@ -1,65 +0,0 @@
|
|||||||
# cmux: Connection Mux [](https://travis-ci.org/cockroachdb/cmux) [](https://godoc.org/github.com/cockroachdb/cmux)
|
|
||||||
|
|
||||||
cmux is a generic Go library to multiplex connections based on their payload.
|
|
||||||
Using cmux, you can serve gRPC, SSH, HTTPS, HTTP, Go RPC, and pretty much any
|
|
||||||
other protocol on the same TCP listener.
|
|
||||||
|
|
||||||
## How-To
|
|
||||||
Simply create your main listener, create a cmux for that listener,
|
|
||||||
and then match connections:
|
|
||||||
```go
|
|
||||||
// Create the main listener.
|
|
||||||
l, err := net.Listen("tcp", ":23456")
|
|
||||||
if err != nil {
|
|
||||||
log.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create a cmux.
|
|
||||||
m := cmux.New(l)
|
|
||||||
|
|
||||||
// Match connections in order:
|
|
||||||
// First grpc, then HTTP, and otherwise Go RPC/TCP.
|
|
||||||
grpcL := m.Match(cmux.HTTP2HeaderField("content-type", "application/grpc"))
|
|
||||||
httpL := m.Match(cmux.HTTP1Fast())
|
|
||||||
trpcL := m.Match(cmux.Any()) // Any means anything that is not yet matched.
|
|
||||||
|
|
||||||
// Create your protocol servers.
|
|
||||||
grpcS := grpc.NewServer()
|
|
||||||
grpchello.RegisterGreeterServer(grpcs, &server{})
|
|
||||||
|
|
||||||
httpS := &http.Server{
|
|
||||||
Handler: &helloHTTP1Handler{},
|
|
||||||
}
|
|
||||||
|
|
||||||
trpcS := rpc.NewServer()
|
|
||||||
s.Register(&ExampleRPCRcvr{})
|
|
||||||
|
|
||||||
// Use the muxed listeners for your servers.
|
|
||||||
go grpcS.Serve(grpcL)
|
|
||||||
go httpS.Serve(httpL)
|
|
||||||
go trpcS.Accept(trpcL)
|
|
||||||
|
|
||||||
// Start serving!
|
|
||||||
m.Serve()
|
|
||||||
```
|
|
||||||
|
|
||||||
There are [more examples on GoDoc](https://godoc.org/github.com/cockroachdb/cmux#pkg-examples).
|
|
||||||
|
|
||||||
## Performance
|
|
||||||
Since we are only matching the very first bytes of a connection, the
|
|
||||||
performance overhead on long-lived connections (i.e., RPCs and pipelined HTTP
|
|
||||||
streams) is negligible.
|
|
||||||
|
|
||||||
## Limitations
|
|
||||||
* *TLS*: `net/http` uses a [type assertion](https://github.com/golang/go/issues/14221)
|
|
||||||
to identify TLS connections; since cmux's lookahead-implementing connection
|
|
||||||
wraps the underlying TLS connection, this type assertion fails. This means you
|
|
||||||
can serve HTTPS using cmux but `http.Request.TLS` will not be set in your
|
|
||||||
handlers. If you are able to wrap TLS around cmux, you can work around this
|
|
||||||
limitation. See https://github.com/cockroachdb/cockroach/commit/83caba2 for an
|
|
||||||
example of this approach.
|
|
||||||
|
|
||||||
* *Different Protocols on The Same Connection*: `cmux` matches the connection
|
|
||||||
when it's accepted. For example, one connection can be either gRPC or REST, but
|
|
||||||
not both. That is, we assume that a client connection is either used for gRPC
|
|
||||||
or REST.
|
|
20
cmd/vendor/github.com/coreos/go-semver/example.go
generated
vendored
Normal file
20
cmd/vendor/github.com/coreos/go-semver/example.go
generated
vendored
Normal file
@ -0,0 +1,20 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"github.com/coreos/go-semver/semver"
|
||||||
|
"os"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
vA, err := semver.NewVersion(os.Args[1])
|
||||||
|
if err != nil {
|
||||||
|
fmt.Println(err.Error())
|
||||||
|
}
|
||||||
|
vB, err := semver.NewVersion(os.Args[2])
|
||||||
|
if err != nil {
|
||||||
|
fmt.Println(err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf("%s < %s == %t\n", vA, vB, vA.LessThan(*vB))
|
||||||
|
}
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user