Compare commits

...

142 Commits

Author SHA1 Message Date
e694b7bb08 version: 3.4.7
Signed-off-by: Gyuho Lee <leegyuho@amazon.com>
2020-04-01 10:46:54 -07:00
e99399d0dc wal: add "etcd_wal_writes_bytes_total"
Signed-off-by: Gyuho Lee <leegyuho@amazon.com>
2020-04-01 09:30:09 -07:00
b68f8ff31d pkg/ioutil: add "FlushN"
Signed-off-by: Gyuho Lee <leegyuho@amazon.com>
2020-04-01 09:29:59 -07:00
857dffa386 Merge pull request #11734 from jingyih/automated-cherry-pick-of-#11330-upstream-release-3.4
Cherry pick of #11330 on release-3.4
2020-03-31 14:58:59 -07:00
5f17aa2c8b test: auto detect branch when finding merge base 2020-03-31 10:59:44 -07:00
89b10cf967 mvcc/kvstore:when the number key-value is greater than one million, compact take too long and blocks other requests 2020-03-30 08:21:38 -07:00
bdc9bc1d81 version: 3.4.6
Signed-off-by: Gyuho Lee <leegyuho@amazon.com>
2020-03-29 12:47:27 -07:00
b0bdaaa449 lease: fix memory leak in LeaseGrant when node is follower 2020-03-29 12:47:14 -07:00
e784ba73c2 version: 3.4.5
Signed-off-by: Gyuho Lee <leegyuho@amazon.com>
2020-03-18 17:24:42 -07:00
35dc623a98 words: whitelist "racey"
Signed-off-by: Gyuho Lee <leegyuho@amazon.com>
2020-03-18 17:24:15 -07:00
130342152a Revert "version: 3.4.5"
This reverts commit 0dc5d577fc.
2020-03-18 17:17:19 -07:00
fc93fbf9de words: whitelist "hasleader"
Signed-off-by: Gyuho Lee <leegyuho@amazon.com>
2020-03-18 17:17:04 -07:00
0dc5d577fc version: 3.4.5
Signed-off-by: Gyuho Lee <leegyuho@amazon.com>
2020-03-18 17:17:04 -07:00
e63db56cc9 etcdserver/api/v3rpc: handle api version metadata, add metrics
ref.
https://github.com/etcd-io/etcd/pull/11687

Signed-off-by: Gyuho Lee <leegyuho@amazon.com>
2020-03-18 17:17:04 -07:00
1471e12108 clientv3: embed api version in metadata
ref.
https://github.com/etcd-io/etcd/pull/11687

Signed-off-by: Gyuho Lee <leegyuho@amazon.com>

clientv3: fix racy writes to context key

=== RUN   TestWatchOverlapContextCancel

==================

WARNING: DATA RACE

Write at 0x00c42110dd40 by goroutine 99:

  runtime.mapassign()

      /usr/local/go/src/runtime/hashmap.go:485 +0x0

  github.com/coreos/etcd/clientv3.metadataSet()

      /go/src/github.com/coreos/etcd/gopath/src/github.com/coreos/etcd/clientv3/ctx.go:61 +0x8c

  github.com/coreos/etcd/clientv3.withVersion()

      /go/src/github.com/coreos/etcd/gopath/src/github.com/coreos/etcd/clientv3/ctx.go:47 +0x137

  github.com/coreos/etcd/clientv3.newStreamClientInterceptor.func1()

      /go/src/github.com/coreos/etcd/gopath/src/github.com/coreos/etcd/clientv3/client.go:309 +0x81

  google.golang.org/grpc.NewClientStream()

      /go/src/github.com/coreos/etcd/gopath/src/google.golang.org/grpc/stream.go:101 +0x10e

  github.com/coreos/etcd/etcdserver/etcdserverpb.(*watchClient).Watch()

      /go/src/github.com/coreos/etcd/gopath/src/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.pb.go:3193 +0xe9

  github.com/coreos/etcd/clientv3.(*watchGrpcStream).openWatchClient()

      /go/src/github.com/coreos/etcd/gopath/src/github.com/coreos/etcd/clientv3/watch.go:788 +0x143

  github.com/coreos/etcd/clientv3.(*watchGrpcStream).newWatchClient()

      /go/src/github.com/coreos/etcd/gopath/src/github.com/coreos/etcd/clientv3/watch.go:700 +0x5c3

  github.com/coreos/etcd/clientv3.(*watchGrpcStream).run()

      /go/src/github.com/coreos/etcd/gopath/src/github.com/coreos/etcd/clientv3/watch.go:431 +0x12b

Previous read at 0x00c42110dd40 by goroutine 130:

  reflect.maplen()

      /usr/local/go/src/runtime/hashmap.go:1165 +0x0

  reflect.Value.MapKeys()

      /usr/local/go/src/reflect/value.go:1090 +0x43b

  fmt.(*pp).printValue()

      /usr/local/go/src/fmt/print.go:741 +0x1885

  fmt.(*pp).printArg()

      /usr/local/go/src/fmt/print.go:682 +0x1b1

  fmt.(*pp).doPrintf()

      /usr/local/go/src/fmt/print.go:998 +0x1cad

  fmt.Sprintf()

      /usr/local/go/src/fmt/print.go:196 +0x77

  github.com/coreos/etcd/clientv3.streamKeyFromCtx()

      /go/src/github.com/coreos/etcd/gopath/src/github.com/coreos/etcd/clientv3/watch.go:825 +0xc8

  github.com/coreos/etcd/clientv3.(*watcher).Watch()

      /go/src/github.com/coreos/etcd/gopath/src/github.com/coreos/etcd/clientv3/watch.go:265 +0x426

  github.com/coreos/etcd/clientv3/integration.testWatchOverlapContextCancel.func1()

      /go/src/github.com/coreos/etcd/gopath/src/github.com/coreos/etcd/clientv3/integration/watch_test.go:959 +0x23e

Goroutine 99 (running) created at:

  github.com/coreos/etcd/clientv3.(*watcher).newWatcherGrpcStream()

      /go/src/github.com/coreos/etcd/gopath/src/github.com/coreos/etcd/clientv3/watch.go:236 +0x59d

  github.com/coreos/etcd/clientv3.(*watcher).Watch()

      /go/src/github.com/coreos/etcd/gopath/src/github.com/coreos/etcd/clientv3/watch.go:278 +0xbb6

  github.com/coreos/etcd/clientv3/integration.testWatchOverlapContextCancel.func1()

      /go/src/github.com/coreos/etcd/gopath/src/github.com/coreos/etcd/clientv3/integration/watch_test.go:959 +0x23e

Goroutine 130 (running) created at:

  github.com/coreos/etcd/clientv3/integration.testWatchOverlapContextCancel()

      /go/src/github.com/coreos/etcd/gopath/src/github.com/coreos/etcd/clientv3/integration/watch_test.go:979 +0x76d

  github.com/coreos/etcd/clientv3/integration.TestWatchOverlapContextCancel()

      /go/src/github.com/coreos/etcd/gopath/src/github.com/coreos/etcd/clientv3/integration/watch_test.go:922 +0x44

  testing.tRunner()

      /usr/local/go/src/testing/testing.go:657 +0x107

==================

Signed-off-by: Gyuho Lee <leegyuho@amazon.com>
2020-03-18 17:17:00 -07:00
0b9cfa8677 etcdserver/api/etcdhttp: log server-side /health checks
ref.
https://github.com/etcd-io/etcd/pull/11704

Signed-off-by: Gyuho Lee <leegyuho@amazon.com>
2020-03-18 16:29:24 -07:00
b66c53ff5f proxy/grpcproxy: add return on error for metrics handler
Signed-off-by: Sam Batschelet <sbatsche@redhat.com>
2020-03-16 12:06:01 -04:00
8f6c3f4d09 Merge pull request #11664 from jingyih/automated-cherry-pick-of-#11638-upstream-release-3.4
Automated cherry pick of #11638 on release-3.4
2020-03-11 19:26:11 -04:00
379d01a0d2 etcdctl: fix member add command
Use members information from member add response, which is
guaranteed to be up to date.
2020-02-29 07:18:11 -08:00
c65a9e2dd1 version: 3.4.4
Signed-off-by: Gyuho Lee <leegyuho@amazon.com>
2020-02-24 13:14:02 -08:00
7862f6ed2c Merge pull request #11644 from jingyih/automated-cherry-pick-of-#11640-upstream-release-3.4
Automated cherry pick of #11640 on release-3.4
2020-02-23 14:42:34 +08:00
257319fb18 etcdserver: fix quorum calculation when promoting a learner member
When promoting a learner member we should not count already a voting
member, but take only into account the number of existing voting
members and their current status (started, unstarted) when taking the
decision whether a learner member can be promoted.

Before this change, it was impossible to grow from a quorum N to a N+1
through promoting a learning member.

Fixes: #11633
2020-02-21 23:14:55 -08:00
cdb2dc11b8 Merge pull request #11636 from YoyinZyc/automated-cherry-pick-of-#11621-upstream-release-3.4
Automated cherry pick of #11621 to release-3.4
2020-02-20 13:04:33 +08:00
770674e4a6 etcdserver: corruption check via http
During corruption check, get peer's hashKV via http call.
2020-02-18 14:12:19 -08:00
c10168f718 Merge pull request #11631 from jingyih/automated-cherry-pick-of-#11630-upstream-release-3.4
Automated cherry pick of #11630 to release-3.4
2020-02-16 08:35:23 +08:00
94673a6ba4 mvcc/backend: check for nil boltOpenOptions
Check if boltOpenOptions is nil before use it.
2020-02-15 00:18:26 -08:00
a1bf5574fc Merge pull request #11622 from jpbetz/automated-cherry-pick-of-#11613-origin-release-3.4
Automated cherry pick of #11613 to release-3.4
2020-02-13 14:45:38 -08:00
6d646c442a mvcc/backend: Delete orphaned db.tmp files before defrag 2020-02-13 12:26:54 -08:00
1226686cf3 Merge pull request #11588 from jingyih/automated-cherry-pick-of-#11586-upstream-release-3.4
Automated cherry pick of #11586 on release 3.4
2020-02-04 19:45:59 -08:00
50e12328ac auth: correct logging level 2020-02-04 05:38:58 -08:00
0dc78a144b Merge pull request #11439 from YoyinZyc/automated-cherry-pick-of-#11418-upstream-release-3.4
Automated cherry pick of #11418 to release 3.4
2019-12-11 14:41:06 -08:00
7cf32c262c e2e: test curl auth on onoption user 2019-12-10 12:53:10 -08:00
4a9247a47e auth: fix NoPassWord check when add user 2019-12-10 12:53:10 -08:00
ac63c2fbd0 Merge pull request #11415 from YoyinZyc/automated-cherry-pick-of-#11413-upstream-release-3.4
Automated cherry pick of #11413 to release-3.4
2019-12-02 14:51:47 -08:00
ae5bd3c268 auth: fix user.Options nil pointer 2019-12-02 14:44:15 -08:00
94e46ba0d7 Merge pull request #11403 from jingyih/automated-cherry-pick-of-#11400-upstream-release-3.4
Automated cherry pick of #11400 on release 3.4
2019-11-27 13:28:34 -08:00
8c10973820 mvcc/kvstore:fixcompactbug 2019-11-27 13:07:47 -08:00
1af0b51537 Merge pull request #11393 from jingyih/automated-cherry-pick-of-#11374-upstream-release-3.4
Automated cherry pick of #11374 on release 3.4
2019-11-26 15:02:27 -08:00
f4669c3b62 mvcc: update to "etcd_debugging_mvcc_total_put_size_in_bytes" 2019-11-26 14:03:07 -08:00
55c3476abc mvcc: add "etcd_mvcc_put_size_in_bytes" to monitor the throughput of put request. 2019-11-26 14:03:07 -08:00
b66203c0a1 Merge pull request #11299 from jingyih/automated-cherry-pick-of-#10468-upstream-release-3.4
Automated cherry pick of #10468 on release-3.4
2019-11-05 18:34:22 -08:00
4388404f56 clientv3: fix retry/streamer error message
Signed-off-by: Gyuho Lee <leegyuho@amazon.com>
2019-10-31 10:09:18 -07:00
a447d51f23 Merge pull request #11312 from jingyih/automated-cherry-pick-of-#11308-upstream-release-3.4
Automated cherry pick of #11308 on release-3.4
2019-10-31 10:08:34 -07:00
4f3c81d81d etcdserver: wait purge file loop during shutdown
To prevent the purge file loop from accidentally acquiring the file lock
and remove the files during server shutdowm.
2019-10-30 16:04:41 -07:00
478da3bf24 integration: disable TestV3AuthOldRevConcurrent
Disable TestV3AuthOldRevConcurrent for now. See
https://github.com/etcd-io/etcd/pull/10468#issuecomment-463253361
2019-10-28 15:03:44 -07:00
d6b30e43cd etcdserver: remove auth validation loop
Remove auth validation loop in v3_server.raftRequest(). Re-validation
when error ErrAuthOldRevision occurs should be handled on client side.
2019-10-28 15:03:44 -07:00
1e98c9642e scripts/release: list GPG key only when tagging is needed
Signed-off-by: Gyuho Lee <leegyuho@amazon.com>
2019-10-23 11:13:21 -07:00
3cf2f69b57 version: 3.4.3
Signed-off-by: Gyuho Lee <leegyuho@amazon.com>
2019-10-23 10:11:46 -07:00
d617055284 *: use Go 1.12.12
Signed-off-by: Gyuho Lee <leegyuho@amazon.com>
2019-10-23 10:11:02 -07:00
84db9b0878 Merge pull request #11252 from YoyinZyc/automated-cherry-pick-of-#11247-origin-release-3.4
Automated cherry pick of #11247
2019-10-18 10:30:30 -07:00
6cf418ff6d Merge pull request #11275 from YoyinZyc/stream-support-3.4
rafthttp: add 3.4 stream type
2019-10-18 10:25:53 -07:00
97e68cf4e7 rafthttp: add 3.4 stream type 2019-10-17 14:33:53 -07:00
90556d550d Merge pull request #11269 from jingyih/automated-cherry-pick-of-#11265-upstream-release-3.4
Automated cherry pick of #11265 on release 3.4
2019-10-16 16:50:06 -07:00
a00abf5f2a etcdserver: strip patch version in metrics
Strip patch version in cluster version metrics during node restart.
2019-10-16 16:29:53 -07:00
b3329ebcd2 Merge pull request #11255 from jingyih/automated-cherry-pick-of-#11233-#11254-upstream-release-3.4
Automated cherry pick of #11233 #11254 on release 3.4
2019-10-15 11:06:09 -07:00
b67862c0a6 etcdserver: strip patch version in cluster version
Strip patch version in cluster version metrics.
2019-10-14 17:37:49 -07:00
6a699b6b7f etcdserver: unset old cluster version in metrics 2019-10-14 17:35:10 -07:00
bb5ba14aac Add version, tag and branch checks to release script 2019-10-14 12:55:17 -07:00
c3dc994567 Merge pull request #11243 from YoyinZyc/automated-cherry-pick-of-#11237-origin-release-3.4
Automated cherry pick of #11237
2019-10-11 12:38:10 -07:00
f3fbed5b72 Merge branch 'release-3.4' into automated-cherry-pick-of-#11237-origin-release-3.4 2019-10-11 11:17:03 -07:00
e2547907c5 scripts: avoid release builds on darwin machine. 2019-10-11 11:12:30 -07:00
14c239030f Merge pull request #11235 from YoyinZyc/automated-cherry-pick-of-#11234-origin-release-3.4
Automated cherry pick of #11234
2019-10-10 16:28:59 -07:00
7b67e8a5c5 scripts: fix read failure prompt in release; use https for git clone. 2019-10-10 16:20:17 -07:00
bbe86b066c version: 3.4.2 2019-10-09 15:26:52 -07:00
2c36cab87d Merge pull request #11223 from YoyinZyc/automated-cherry-pick-of-#11179-origin-release-3.4
Automated cherry pick of #11179
2019-10-09 13:28:04 -07:00
480d5510f9 etcdserver: trace compaction request; add return parameter 'trace' to applierV3.Compaction() mvcc: trace compaction request; add input parameter 'trace' to KV.Compact() 2019-10-09 12:40:12 -07:00
9245518363 etcdserver: trace raft requests. 2019-10-09 12:40:12 -07:00
daa432cfa7 etcdserver: add put request steps. mvcc: add put request steps; add trace to KV.Write() as input parameter. 2019-10-09 12:40:12 -07:00
8717327697 pkg: use zap logger to format the structure log output. 2019-10-09 12:40:12 -07:00
4f1bbff888 pkg: add field to record additional detail of trace; add stepThreshold to reduce log volume. 2019-10-09 12:40:12 -07:00
28bb8037d9 pkg: create package traceutil for tracing. mvcc: add tracing steps:range from the in-memory index tree; range from boltdb. etcdserver: add tracing steps: agreement among raft nodes before linerized reading; authentication; filter and sort kv pairs; assemble the response. 2019-10-09 12:40:12 -07:00
03b5e7229b Merge pull request #11213 from jpbetz/automated-cherry-pick-of-#11211-origin-release-3.4
Automated cherry pick of #11211
2019-10-08 18:47:06 -07:00
0781c0327d clientv3: Replace endpoint.ParseHostPort with net.SplitHostPort to fix IPv6 client endpoints 2019-10-08 18:27:03 -07:00
99774d8ed4 Merge pull request #11214 from jpbetz/automated-cherry-pick-of-#11184-origin-release-3.4
Automated cherry pick of #11184
2019-10-08 17:35:02 -07:00
c454344f14 clientv3: Set authority used in cert checks to host of endpoint 2019-10-08 15:35:27 -07:00
dae0a72a42 Merge pull request #11200 from jingyih/automated-cherry-pick-of-#11194-origin-release-3.4
Automated cherry pick of #11194 on release-3.4
2019-10-03 16:03:23 -07:00
c91a6bf14f tests/e2e: fix metrics tests
Signed-off-by: Gyuho Lee <leegyuho@amazon.com>
2019-10-03 16:02:39 -07:00
b7ff97f54e etcdctl: fix member add command 2019-10-03 13:52:22 -07:00
d08bb07d6d scripts/build-binary: fix darwin tar commands
Signed-off-by: Gyuho Lee <leegyuho@amazon.com>
2019-09-28 11:39:04 -07:00
3a736a81e8 scripts/release: fix SHA256SUMS command
Signed-off-by: Gyuho Lee <leegyuho@amazon.com>
2019-09-17 14:12:18 -07:00
a14579fbfb version: 3.4.1
Signed-off-by: Gyuho Lee <leegyuho@amazon.com>
2019-09-17 13:53:25 -07:00
ade66a5722 scripts/release: fix docker push command
Signed-off-by: Gyuho Lee <leegyuho@amazon.com>
2019-09-17 13:53:12 -07:00
67cc70926d integration: fix bug in for loop, make it break properly 2019-09-17 13:30:12 -07:00
21dcadc83c Merge pull request #11148 from spzala/automated-cherry-pick-of-#11147-upstream-release-3.4
Automated cherry pick of #11147
2019-09-13 11:12:41 -07:00
c7c379e52e embed: expose ZapLoggerBuilder
This exposes the ZapLoggerBuilder in the embed.Config to allow for
custom loggers to be defined and used by embedded etcd.

Fixes #11144
2019-09-13 14:09:54 -04:00
9ed5f76dc0 vendor: upgrade to gRPC v1.23.1
https://github.com/grpc/grpc-go/releases/tag/v1.23.1

Signed-off-by: Gyuho Lee <leegyuho@amazon.com>
2019-09-11 14:54:24 -07:00
994865c89e Merge pull request #11133 from jingyih/automated-cherry-pick-of-#11126-origin-release-3.4
Automated cherry pick of #11126 on release-3.4
2019-09-07 00:03:37 -07:00
ccbbb2f8d6 mvcc: add store revision metrics
Add experimental metrics etcd_debugging_mvcc_current_revision and
etcd_debugging_mvcc_compact_revision.
2019-09-06 17:03:21 -07:00
d5f79adc9c etcdserver: remove dup percentage sign in log 2019-09-04 22:03:49 -07:00
8b053b0f44 embed: fix secure server logging message
Signed-off-by: Gyuho Lee <leegyuho@amazon.com>
2019-09-03 09:43:08 -07:00
11980f8165 scripts/release: Apply shellcheck findings
I run https://github.com/koalaman/shellcheck/ over scripts/* and fixed
the findings it returned.

Signed-off-by: Manuel Rüger <manuel@rueg.eu>
2019-09-03 09:42:35 -07:00
41d4e2b276 scripts/release: rename SHA256SUM to SHA256SUMS
These files are commonly called SHA256SUMS and with this change rget
works for v3.4.0 as well.

Signed-off-by: Gyuho Lee <leegyuho@amazon.com>
2019-08-30 13:35:40 -07:00
898bd1351f version: 3.4.0
Signed-off-by: Gyuho Lee <leegyuho@amazon.com>
2019-08-30 08:09:55 -07:00
d04d96c9ac tests/e2e: run metrics test again
Signed-off-by: Gyuho Lee <leegyuho@amazon.com>
2019-08-30 08:09:32 -07:00
21edf98fdb Documentation:fix clerical error 2019-08-30 08:08:47 -07:00
a4f7c65ef8 vendor: x/sys and x/net to support building on Risc-V
Signed-off-by: Carlos de Paula <me@carlosedp.com>
2019-08-29 14:03:59 -07:00
c3a9eec843 scripts/release: fix sha256sum
Signed-off-by: Gyuho Lee <leegyuho@amazon.com>
2019-08-29 09:38:57 -07:00
e5528acf57 version: 3.4.0-rc.4
Signed-off-by: Gyuho Lee <leegyuho@amazon.com>
2019-08-29 08:53:10 -07:00
9977550ae9 Merge pull request #11091 from hexfusion/automated-cherry-pick-of-#11087-upstream-release-3.4
Automated cherry pick of #11087 on release 3.4
2019-08-29 08:39:11 -07:00
4d7a6e2755 scripts/release: add sha256sum summary of release assets
Signed-off-by: Sam Batschelet <sbatsche@redhat.com>
2019-08-29 11:33:16 +00:00
5e8757c3c5 Documentation: Add section headers to etcd Learner
In the Background section, the document describes various challenges for cluster membership change.
Added section header for each case described for better readability.
2019-08-27 10:18:34 -07:00
012e38fef3 version: 3.4.0-rc.3
Signed-off-by: Gyuho Lee <leegyuho@amazon.com>
2019-08-27 09:50:54 -07:00
41a2cfa122 pkg/logutil: change to "MergeOutputPaths"
Signed-off-by: Gyuho Lee <leegyuho@amazon.com>
2019-08-27 09:50:26 -07:00
9f8a1edf38 embed: fix "--log-outputs" setup without "stderr"
Signed-off-by: Gyuho Lee <leegyuho@amazon.com>
2019-08-27 09:50:17 -07:00
165ba72593 raft/log_test: fixed wrong index 2019-08-26 12:37:07 -07:00
9c850ccef0 raft: fixed some typos and simplify minor logic 2019-08-26 12:37:02 -07:00
61d6efda4c etcdserver: add check for nil options 2019-08-26 10:48:20 -07:00
b76f149c35 tests/e2e: skip metrics tests for now
Signed-off-by: Gyuho Lee <leegyuho@amazon.com>
2019-08-26 00:02:48 -07:00
5e33bb1a95 Documentation: snapshot can be requested from one etcd node only
Updated Snapshot section of demo.md to reflect that snapsot can be requested only from one etcd node at a time.

Fixes : #10855
2019-08-25 23:40:25 -07:00
83bf125d93 clientv3: add nil checks in Close()
Added nil checks in Close() for Watcher and Lease fields
Added test case
2019-08-25 23:40:05 -07:00
d23af41bca tests/e2e: remove string replace for v3.4.0-rc.1
Signed-off-by: Gyuho Lee <leegyuho@amazon.com>
2019-08-23 01:14:42 -07:00
67d0c21bb0 version: 3.4.0-rc.2
Signed-off-by: Gyuho Lee <leegyuho@amazon.com>
2019-08-23 00:37:01 -07:00
18a077d3d3 raft : Write compact if statements 2019-08-23 00:36:44 -07:00
fb6d870e89 Merge pull request #11072 from jingyih/automated-cherry-pick-of-#11069-origin-release-3.4
Automated cherry pick of #11069 on release-3.4
2019-08-23 06:57:12 +08:00
e00224f87e integration: fix TestKVPutError
Give backend quota enough overhead.
2019-08-22 13:33:19 -07:00
2af1caf1a5 functional test: fix typo in agent log
Fix typo in functional test agent log to avoid debugging confusion.
2019-08-20 15:23:13 -07:00
0777eab766 Documentation/upgrades: special upgrade guides for >= 3.3.14
Signed-off-by: Gyuho Lee <leegyuho@amazon.com>
2019-08-16 16:19:22 -07:00
0ecc0d0542 etcdmain: update help message
Add experimental-peer-skip-client-san-verification flag description to
help message. Add default values.
2019-08-16 16:07:06 -07:00
982a8c9bc3 rafttest: print Ready before processing it
It was confusing to see the effects of the Ready (i.e. log messages)
printed before the Ready itself.
2019-08-16 08:10:17 -07:00
b8e3e4e7cb raft: fix a test file name 2019-08-16 08:10:07 -07:00
4090edfb5b raft: document problem with leader self-removal
When a leader removes itself, it will retain its leadership but not
accept new proposals, making the range effectively stuck until manual
intervention triggers a campaign event.

This commit documents the behavior. It does not correct it yet.
2019-08-16 08:09:56 -07:00
078caccce5 raft: add a batch of interaction-driven conf change tests
Verifiy the behavior in various v1 and v2 conf change operations.
This also includes various fixups, notably it adds protection
against transitioning in and out of new configs when this is not
permissible.

There are more threads to pull, but those are left for future commits.
2019-08-16 08:09:44 -07:00
d177b7f6b4 raft: proactively probe newly added followers
When the leader applied a new configuration that added voters, it would
not immediately probe these voters, delaying when they would be caught
up.

I noticed this while writing an interaction-driven test, which has now
been cleaned up and completed.
2019-08-16 08:09:33 -07:00
2c1a1d8c32 rafttest: add _breakpoint directive
It is a helper case to attach a debugger to when a problem needs
to be investigated in a longer test file. In such a case, add the
following stanza immediately before the interesting behavior starts:

_breakpoint:
----
ok

and set a breakpoint on the _breakpoint case.
2019-08-16 08:09:23 -07:00
0fc108428e raft: initialize new Progress at LastIndex, not LastIndex+1
Initializing at LastIndex+1 meant that new peers would not be probed
immediately when they appeared in the leader's config, which delays
their getting caught up.
2019-08-16 08:09:11 -07:00
df489e7a2c raft/rafttest: fix stabilize handler
It was bailing out too early.
2019-08-16 08:08:28 -07:00
f13a5102ec tests/e2e: fix version matching
Signed-off-by: Gyuho Lee <leegyuho@amazon.com>
2019-08-15 14:46:19 -07:00
c9465f51d2 *: use Go 1.12.9
Signed-off-by: Gyuho Lee <leegyuho@amazon.com>
2019-08-15 14:40:46 -07:00
8f85f0dc26 version: 3.4.0-rc.1
Signed-off-by: Gyuho Lee <leegyuho@amazon.com>
2019-08-15 13:45:25 -07:00
0161e72d8d mvcc: keep 64-bit alignment in "store" struct
Signed-off-by: Gyuho Lee <leegyuho@amazon.com>
2019-08-15 13:31:52 -07:00
1691eec2db clientv3/integration: fix "mvcc.NewStore" call
Signed-off-by: Gyuho Lee <leegyuho@amazon.com>
2019-08-15 13:31:46 -07:00
1e213b7ab6 *: Add experimental-compaction-batch-limit flag
Signed-off-by: Gyuho Lee <leegyuho@amazon.com>
2019-08-15 13:31:39 -07:00
b30c1eb2c8 mvcc: Optimize compaction for short commit pauses 2019-08-15 13:29:28 -07:00
a0be90f450 Documentation/upgrades: update
Signed-off-by: Gyuho Lee <leegyuho@amazon.com>
2019-08-14 17:01:19 -07:00
8110a96f69 scripts/release: clean up minor tag docker commands
Signed-off-by: Gyuho Lee <leegyuho@amazon.com>
2019-08-13 22:01:10 -07:00
8e05c73fa7 Makefile: explicit about GOOS in docker-test builds
Signed-off-by: Gyuho Lee <leegyuho@amazon.com>
2019-08-13 16:57:22 -07:00
970ca9fa43 Documentation/upgrades: highlight "--enable-v2=false"
Signed-off-by: Gyuho Lee <leegyuho@amazon.com>
2019-08-13 15:32:46 -07:00
a481ee809f vendor: update "net/http2" to latest
Signed-off-by: Gyuho Lee <leegyuho@amazon.com>
2019-08-13 14:44:59 -07:00
4d06d3b498 vendor: upgrade grpc-go to 1.23.0
https://github.com/grpc/grpc-go/releases/tag/v1.23.0

Signed-off-by: Gyuho Lee <leegyuho@amazon.com>
2019-08-13 14:44:53 -07:00
98462b52d1 *: use Go 1.12.8
Signed-off-by: Gyuho Lee <leegyuho@amazon.com>
2019-08-13 12:56:11 -07:00
2a8d09b83b clientv3: use Endpoints(), fix context creation
If overwritten, the previous context should be canceled first.

Signed-off-by: Gyuho Lee <leegyuho@amazon.com>
2019-08-13 12:43:49 -07:00
49c6e87f74 version: 3.4.0-pre
Signed-off-by: Gyuho Lee <leegyuho@amazon.com>
2019-08-13 12:43:40 -07:00
334 changed files with 43257 additions and 10964 deletions

View File

@ -6,7 +6,7 @@ sudo: required
services: docker
go:
- 1.12.7
- 1.12.12
notifications:
on_success: never
@ -27,9 +27,9 @@ env:
matrix:
fast_finish: true
allow_failures:
- go: 1.12.7
- go: 1.12.12
env: TARGET=linux-amd64-grpcproxy
- go: 1.12.7
- go: 1.12.12
env: TARGET=linux-386-unit
before_install:

3
.words
View File

@ -36,6 +36,8 @@ iff
inflight
keepalive
keepalives
hasleader
racey
keyspace
linearization
liveness
@ -95,6 +97,7 @@ jitter
WithBackoff
BackoffLinearWithJitter
jitter
WithDialer
WithMax
ServerStreams
BidiStreams

View File

@ -271,7 +271,10 @@ etcdctl --endpoints=$ENDPOINTS endpoint health
<img src="https://storage.googleapis.com/etcd/demo/11_etcdctl_snapshot_2016051001.gif" alt="11_etcdctl_snapshot_2016051001"/>
Snapshot can only be requested from one etcd node, so `--endpoints` flag should contain only one endpoint.
```
ENDPOINTS=$HOST_1:2379
etcdctl --endpoints=$ENDPOINTS snapshot save my.db
Snapshot saved at my.db

View File

@ -72,7 +72,7 @@ etcdctl provides a `snapshot` command to create backups. See [backup][backup] fo
When replacing an etcd node, it's important to remove the member first and then add its replacement.
etcd employs distributed consensus based on a quorum model; (n+1)/2 members, a majority, must agree on a proposal before it can be committed to the cluster. These proposals include key-value updates and membership changes. This model totally avoids any possibility of split brain inconsistency. The downside is permanent quorum loss is catastrophic.
etcd employs distributed consensus based on a quorum model; (n/2)+1 members, a majority, must agree on a proposal before it can be committed to the cluster. These proposals include key-value updates and membership changes. This model totally avoids any possibility of split brain inconsistency. The downside is permanent quorum loss is catastrophic.
How this applies to membership: If a 3-member cluster has 1 downed member, it can still make forward progress because the quorum is 2 and 2 members are still live. However, adding a new member to a 3-member cluster will increase the quorum to 3 because 3 votes are required for a majority of 4 members. Since the quorum increased, this extra member buys nothing in terms of fault tolerance; the cluster is still one node failure away from being unrecoverable.

View File

@ -13,26 +13,34 @@ Background
Membership reconfiguration has been one of the biggest operational challenges. Lets review common challenges.
### 1. New Cluster member overloads Leader
A newly joined etcd member starts with no data, thus demanding more updates from leader until it catches up with leaders logs. Then leaders network is more likely to be overloaded, blocking or dropping leader heartbeats to followers. In such case, a follower may election-timeout to start a new leader election. That is, a cluster with a new member is more vulnerable to leader election. Both leader election and the subsequent update propagation to the new member are prone to causing periods of cluster unavailability (see *Figure 1*).
![server-learner-figure-01](img/server-learner-figure-01.png)
### 2. Network Partitions scenarios
What if network partition happens? It depends on leader partition. If the leader still maintains the active quorum, the cluster would continue to operate (see *Figure 2*).
![server-learner-figure-02](img/server-learner-figure-02.png)
#### 2.1 Leader isolation
What if the leader becomes isolated from the rest of the cluster? Leader monitors progress of each follower. When leader loses connectivity from the quorum, it reverts back to follower which will affect the cluster availability (see *Figure 3*).
![server-learner-figure-03](img/server-learner-figure-03.png)
When a new node is added to 3 node cluster, the cluster size becomes 4 and the quorum size becomes 3. What if a new node had joined the cluster, and then network partition happens? It depends on which partition the new member gets located after partition. If the new node happens to be located in the same partition as leaders, the leader still maintains the active quorum of 3. No leadership election happens, and no cluster availability gets affected (see *Figure 4*).
When a new node is added to 3 node cluster, the cluster size becomes 4 and the quorum size becomes 3. What if a new node had joined the cluster, and then network partition happens? It depends on which partition the new member gets located after partition.
#### 2.2 Cluster Split 3+1
If the new node happens to be located in the same partition as leaders, the leader still maintains the active quorum of 3. No leadership election happens, and no cluster availability gets affected (see *Figure 4*).
![server-learner-figure-04](img/server-learner-figure-04.png)
#### 2.3 Cluster Split 2+2
If the cluster is 2-and-2 partitioned, then neither of partition maintains the quorum of 3. In this case, leadership election happens (see *Figure 5*).
![server-learner-figure-05](img/server-learner-figure-05.png)
#### 2.4 Quorum Lost
What if network partition happens first, and then a new member gets added? A partitioned 3-node cluster already has one disconnected follower. When a new member is added, the quorum changes from 2 to 3. Now, this cluster has only 2 active nodes out 4, thus losing quorum and starting a new leadership election (see *Figure 6*).
![server-learner-figure-06](img/server-learner-figure-06.png)
@ -43,6 +51,7 @@ Adding a new member to a 1-node cluster changes the quorum size to 2, immediatel
![server-learner-figure-07](img/server-learner-figure-07.png)
### 3. Cluster Misconfigurations
An even worse case is when an added member is misconfigured. Membership reconfiguration is a two-step process: “etcdctl member add” and starting an etcd server process with the given peer URL. That is, “member add” command is applied regardless of URL, even when the URL value is invalid. If the first step is applied with invalid URLs, the second step cannot even start the new etcd. Once the cluster loses quorum, there is no way to revert the membership change (see *Figure 8*).
![server-learner-figure-08](img/server-learner-figure-08.png)

View File

@ -446,6 +446,11 @@ Follow the instructions when using these flags.
+ default: 0s
+ env variable: ETCD_EXPERIMENTAL_CORRUPT_CHECK_TIME
### --experimental-compaction-batch-limit
+ Sets the maximum revisions deleted in each compaction batch.
+ default: 1000
+ env variable: ETCD_EXPERIMENTAL_COMPACTION_BATCH_LIMIT
[build-cluster]: clustering.md#static
[reconfig]: runtime-configuration.md
[discovery]: clustering.md#discovery

View File

@ -369,6 +369,52 @@ After
docker pull gcr.io/etcd-development/etcd:v3.3.0
```
### Upgrades to >= v3.3.14
[v3.3.14](https://github.com/etcd-io/etcd/releases/tag/v3.3.14) had to include some features from 3.4, while trying to minimize the difference between client balancer implementation. This release fixes ["kube-apiserver 1.13.x refuses to work when first etcd-server is not available" (kubernetes#72102)](https://github.com/kubernetes/kubernetes/issues/72102).
`grpc.ErrClientConnClosing` has been [deprecated in gRPC >= 1.10](https://github.com/grpc/grpc-go/pull/1854).
```diff
import (
+ "go.etcd.io/etcd/clientv3"
"google.golang.org/grpc"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/status"
)
_, err := kvc.Get(ctx, "a")
-if err == grpc.ErrClientConnClosing {
+if clientv3.IsConnCanceled(err) {
// or
+s, ok := status.FromError(err)
+if ok {
+ if s.Code() == codes.Canceled
```
[The new client balancer](https://github.com/etcd-io/etcd/blob/master/Documentation/learning/design-client.md) uses an asynchronous resolver to pass endpoints to the gRPC dial function. As a result, [v3.3.14](https://github.com/etcd-io/etcd/releases/tag/v3.3.14) or later requires `grpc.WithBlock` dial option to wait until the underlying connection is up.
```diff
import (
"time"
"go.etcd.io/etcd/clientv3"
+ "google.golang.org/grpc"
)
+// "grpc.WithBlock()" to block until the underlying connection is up
ccfg := clientv3.Config{
Endpoints: []string{"localhost:2379"},
DialTimeout: time.Second,
+ DialOptions: []grpc.DialOption{grpc.WithBlock()},
DialKeepAliveTime: time.Second,
DialKeepAliveTimeout: 500 * time.Millisecond,
}
```
Please see [CHANGELOG](https://github.com/etcd-io/etcd/blob/master/CHANGELOG-3.3.md) for a full list of changes.
### Server upgrade checklists
#### Upgrade requirements

View File

@ -35,6 +35,21 @@ OK
+etcdctl put foo bar
```
#### Make `etcd --enable-v2=false` default
[`etcd --enable-v2=false`](https://github.com/etcd-io/etcd/pull/10935) is now the default.
This means, unless `etcd --enable-v2=true` is specified, etcd v3.4 server would not serve v2 API requests.
If v2 API were used, make sure to enable v2 API in v3.4:
```diff
-etcd
+etcd --enable-v2=true
```
Other HTTP APIs will still work (e.g. `[CLIENT-URL]/metrics`, `[CLIENT-URL]/health`, v3 gRPC gateway).
#### Deprecated `etcd --ca-file` and `etcd --peer-ca-file` flags
`--ca-file` and `--peer-ca-file` flags are deprecated; they have been deprecated since v2.1.
@ -72,6 +87,27 @@ _, err := kvc.Get(ctx, "a")
+ if s.Code() == codes.Canceled
```
#### Require `grpc.WithBlock` for client dial
[The new client balancer](https://github.com/etcd-io/etcd/blob/master/Documentation/learning/design-client.md) uses an asynchronous resolver to pass endpoints to the gRPC dial function. As a result, v3.4 client requires `grpc.WithBlock` dial option to wait until the underlying connection is up.
```diff
import (
"time"
"go.etcd.io/etcd/clientv3"
+ "google.golang.org/grpc"
)
+// "grpc.WithBlock()" to block until the underlying connection is up
ccfg := clientv3.Config{
Endpoints: []string{"localhost:2379"},
DialTimeout: time.Second,
+ DialOptions: []grpc.DialOption{grpc.WithBlock()},
DialKeepAliveTime: time.Second,
DialKeepAliveTimeout: 500 * time.Millisecond,
}
```
#### Deprecating `etcd_debugging_mvcc_db_total_size_in_bytes` Prometheus metrics
v3.4 promotes `etcd_debugging_mvcc_db_total_size_in_bytes` Prometheus metrics to `etcd_mvcc_db_total_size_in_bytes`, in order to encourage etcd storage monitoring.

View File

@ -51,7 +51,7 @@ docker-remove:
GO_VERSION ?= 1.12.7
GO_VERSION ?= 1.12.12
ETCD_VERSION ?= $(shell git rev-parse --short HEAD || echo "GitNotFound")
TEST_SUFFIX = $(shell date +%s | base64 | head -c 15)
@ -65,11 +65,11 @@ endif
# Example:
# GO_VERSION=1.12.7 make build-docker-test
# GO_VERSION=1.12.12 make build-docker-test
# make build-docker-test
#
# gcloud docker -- login -u _json_key -p "$(cat /etc/gcp-key-etcd-development.json)" https://gcr.io
# GO_VERSION=1.12.7 make push-docker-test
# GO_VERSION=1.12.12 make push-docker-test
# make push-docker-test
#
# gsutil -m acl ch -u allUsers:R -r gs://artifacts.etcd-development.appspot.com
@ -104,7 +104,7 @@ compile-with-docker-test:
--rm \
--mount type=bind,source=`pwd`,destination=/go/src/go.etcd.io/etcd \
gcr.io/etcd-development/etcd-test:go$(GO_VERSION) \
/bin/bash -c "GO_BUILD_FLAGS=-v ./build && ./bin/etcd --version"
/bin/bash -c "GO_BUILD_FLAGS=-v GOOS=linux GOARCH=amd64 ./build && ./bin/etcd --version"
compile-setup-gopath-with-docker-test:
$(info GO_VERSION: $(GO_VERSION))
@ -112,7 +112,7 @@ compile-setup-gopath-with-docker-test:
--rm \
--mount type=bind,source=`pwd`,destination=/etcd \
gcr.io/etcd-development/etcd-test:go$(GO_VERSION) \
/bin/bash -c "cd /etcd && ETCD_SETUP_GOPATH=1 GO_BUILD_FLAGS=-v ./build && ./bin/etcd --version && rm -rf ./gopath"
/bin/bash -c "cd /etcd && ETCD_SETUP_GOPATH=1 GO_BUILD_FLAGS=-v GOOS=linux GOARCH=amd64 ./build && ./bin/etcd --version && rm -rf ./gopath"

View File

@ -105,7 +105,7 @@ func (t *tokenJWT) assign(ctx context.Context, username string, revision uint64)
token, err := tk.SignedString(t.key)
if err != nil {
if t.lg != nil {
t.lg.Warn(
t.lg.Debug(
"failed to sign a JWT token",
zap.String("user-name", username),
zap.Uint64("revision", revision),
@ -118,7 +118,7 @@ func (t *tokenJWT) assign(ctx context.Context, username string, revision uint64)
}
if t.lg != nil {
t.lg.Info(
t.lg.Debug(
"created/assigned a new JWT token",
zap.String("user-name", username),
zap.Uint64("revision", revision),
@ -136,7 +136,7 @@ func newTokenProviderJWT(lg *zap.Logger, optMap map[string]string) (*tokenJWT, e
err = opts.ParseWithDefaults(optMap)
if err != nil {
if lg != nil {
lg.Warn("problem loading JWT options", zap.Error(err))
lg.Error("problem loading JWT options", zap.Error(err))
} else {
plog.Errorf("problem loading JWT options: %s", err)
}

View File

@ -306,7 +306,7 @@ func (as *authStore) Authenticate(ctx context.Context, username, password string
return nil, ErrAuthFailed
}
if user.Options.NoPassword {
if user.Options != nil && user.Options.NoPassword {
return nil, ErrAuthFailed
}
@ -344,7 +344,7 @@ func (as *authStore) CheckPassword(username, password string) (uint64, error) {
return 0, ErrAuthFailed
}
if user.Options.NoPassword {
if user.Options != nil && user.Options.NoPassword {
return 0, ErrAuthFailed
}
@ -388,7 +388,8 @@ func (as *authStore) UserAdd(r *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse,
var hashed []byte
var err error
if !r.Options.NoPassword {
noPassword := r.Options != nil && r.Options.NoPassword
if !noPassword {
hashed, err = bcrypt.GenerateFromPassword([]byte(r.Password), as.bcryptCost)
if err != nil {
if as.lg != nil {

View File

@ -16,7 +16,9 @@
package endpoint
import (
"context"
"fmt"
"net"
"net/url"
"strings"
"sync"
@ -228,13 +230,18 @@ func ParseTarget(target string) (string, string, error) {
return parts[0], parts[1], nil
}
// ParseHostPort splits a "<host>:<port>" string into the host and port parts.
// The port part is optional.
func ParseHostPort(hostPort string) (host string, port string) {
parts := strings.SplitN(hostPort, ":", 2)
host = parts[0]
if len(parts) > 1 {
port = parts[1]
// Dialer dials a endpoint using net.Dialer.
// Context cancelation and timeout are supported.
func Dialer(ctx context.Context, dialEp string) (net.Conn, error) {
proto, host, _ := ParseEndpoint(dialEp)
select {
case <-ctx.Done():
return nil, ctx.Err()
default:
}
return host, port
dialer := &net.Dialer{}
if deadline, ok := ctx.Deadline(); ok {
dialer.Deadline = deadline
}
return dialer.DialContext(ctx, proto, host)
}

View File

@ -37,7 +37,6 @@ import (
"google.golang.org/grpc/codes"
grpccredentials "google.golang.org/grpc/credentials"
"google.golang.org/grpc/keepalive"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/status"
)
@ -129,8 +128,12 @@ func NewFromURLs(urls []string) (*Client, error) {
// Close shuts down the client's etcd connections.
func (c *Client) Close() error {
c.cancel()
c.Watcher.Close()
c.Lease.Close()
if c.Watcher != nil {
c.Watcher.Close()
}
if c.Lease != nil {
c.Lease.Close()
}
if c.resolverGroup != nil {
c.resolverGroup.Close()
}
@ -226,24 +229,17 @@ func (c *Client) dialSetupOpts(creds grpccredentials.TransportCredentials, dopts
}
opts = append(opts, dopts...)
// Provide a net dialer that supports cancelation and timeout.
f := func(dialEp string, t time.Duration) (net.Conn, error) {
proto, host, _ := endpoint.ParseEndpoint(dialEp)
select {
case <-c.ctx.Done():
return nil, c.ctx.Err()
default:
}
dialer := &net.Dialer{Timeout: t}
return dialer.DialContext(c.ctx, proto, host)
}
opts = append(opts, grpc.WithDialer(f))
dialer := endpoint.Dialer
if creds != nil {
opts = append(opts, grpc.WithTransportCredentials(creds))
// gRPC load balancer workaround. See credentials.transportCredential for details.
if credsDialer, ok := creds.(TransportCredentialsWithDialer); ok {
dialer = credsDialer.Dialer
}
} else {
opts = append(opts, grpc.WithInsecure())
}
opts = append(opts, grpc.WithContextDialer(dialer))
// Interceptor retry and backoff.
// TODO: Replace all of clientv3/retry.go with interceptor based retry, or with
@ -262,7 +258,10 @@ func (c *Client) dialSetupOpts(creds grpccredentials.TransportCredentials, dopts
// Dial connects to a single endpoint using the client's config.
func (c *Client) Dial(ep string) (*grpc.ClientConn, error) {
creds := c.directDialCreds(ep)
creds, err := c.directDialCreds(ep)
if err != nil {
return nil, err
}
// Use the grpc passthrough resolver to directly dial a single endpoint.
// This resolver passes through the 'unix' and 'unixs' endpoints schemes used
// by etcd without modification, allowing us to directly dial endpoints and
@ -274,8 +273,8 @@ func (c *Client) getToken(ctx context.Context) error {
var err error // return last error in a case of fail
var auth *authenticator
for i := 0; i < len(c.cfg.Endpoints); i++ {
ep := c.cfg.Endpoints[i]
eps := c.Endpoints()
for _, ep := range eps {
// use dial options without dopts to avoid reusing the client balancer
var dOpts []grpc.DialOption
_, host, _ := endpoint.ParseEndpoint(ep)
@ -365,8 +364,8 @@ func (c *Client) dial(target string, creds grpccredentials.TransportCredentials,
return conn, nil
}
func (c *Client) directDialCreds(ep string) grpccredentials.TransportCredentials {
_, hostPort, scheme := endpoint.ParseEndpoint(ep)
func (c *Client) directDialCreds(ep string) (grpccredentials.TransportCredentials, error) {
_, host, scheme := endpoint.ParseEndpoint(ep)
creds := c.creds
if len(scheme) != 0 {
creds = c.processCreds(scheme)
@ -375,12 +374,17 @@ func (c *Client) directDialCreds(ep string) grpccredentials.TransportCredentials
// Set the server name must to the endpoint hostname without port since grpc
// otherwise attempts to check if x509 cert is valid for the full endpoint
// including the scheme and port, which fails.
host, _ := endpoint.ParseHostPort(hostPort)
clone.OverrideServerName(host)
overrideServerName, _, err := net.SplitHostPort(host)
if err != nil {
// Either the host didn't have a port or the host could not be parsed. Either way, continue with the
// original host string.
overrideServerName = host
}
clone.OverrideServerName(overrideServerName)
creds = clone
}
}
return creds
return creds, nil
}
func (c *Client) dialWithBalancerCreds(ep string) grpccredentials.TransportCredentials {
@ -392,13 +396,6 @@ func (c *Client) dialWithBalancerCreds(ep string) grpccredentials.TransportCrede
return creds
}
// WithRequireLeader requires client requests to only succeed
// when the cluster has a leader.
func WithRequireLeader(ctx context.Context) context.Context {
md := metadata.Pairs(rpctypes.MetadataRequireLeaderKey, rpctypes.MetadataHasLeader)
return metadata.NewOutgoingContext(ctx, md)
}
func newClient(cfg *Config) (*Client, error) {
if cfg == nil {
cfg = &Config{}
@ -519,13 +516,17 @@ func (c *Client) roundRobinQuorumBackoff(waitBetween time.Duration, jitterFracti
func (c *Client) checkVersion() (err error) {
var wg sync.WaitGroup
errc := make(chan error, len(c.cfg.Endpoints))
eps := c.Endpoints()
errc := make(chan error, len(eps))
ctx, cancel := context.WithCancel(c.ctx)
if c.cfg.DialTimeout > 0 {
ctx, cancel = context.WithTimeout(ctx, c.cfg.DialTimeout)
cancel()
ctx, cancel = context.WithTimeout(c.ctx, c.cfg.DialTimeout)
}
wg.Add(len(c.cfg.Endpoints))
for _, ep := range c.cfg.Endpoints {
wg.Add(len(eps))
for _, ep := range eps {
// if cluster is current, any endpoint gives a recent version
go func(e string) {
defer wg.Done()
@ -537,8 +538,15 @@ func (c *Client) checkVersion() (err error) {
vs := strings.Split(resp.Version, ".")
maj, min := 0, 0
if len(vs) >= 2 {
maj, _ = strconv.Atoi(vs[0])
min, rerr = strconv.Atoi(vs[1])
var serr error
if maj, serr = strconv.Atoi(vs[0]); serr != nil {
errc <- serr
return
}
if min, serr = strconv.Atoi(vs[1]); serr != nil {
errc <- serr
return
}
}
if maj < 3 || (maj == 3 && min < 2) {
rerr = ErrOldCluster
@ -547,7 +555,7 @@ func (c *Client) checkVersion() (err error) {
}(ep)
}
// wait for success
for i := 0; i < len(c.cfg.Endpoints); i++ {
for range eps {
if err = <-errc; err == nil {
break
}
@ -648,3 +656,9 @@ func IsConnCanceled(err error) bool {
// <= gRPC v1.7.x returns 'errors.New("grpc: the client connection is closing")'
return strings.Contains(err.Error(), "grpc: the client connection is closing")
}
// TransportCredentialsWithDialer is for a gRPC load balancer workaround. See credentials.transportCredential for details.
type TransportCredentialsWithDialer interface {
grpccredentials.TransportCredentials
Dialer(ctx context.Context, dialEp string) (net.Conn, error)
}

View File

@ -156,3 +156,13 @@ func TestIsHaltErr(t *testing.T) {
t.Errorf("cancel on context should be Halted")
}
}
func TestCloseCtxClient(t *testing.T) {
ctx := context.Background()
c := NewCtxClient(ctx)
err := c.Close()
// Close returns ctx.toErr, a nil error means an open Done channel
if err == nil {
t.Errorf("failed to Close the client. %v", err)
}
}

View File

@ -22,6 +22,7 @@ import (
"net"
"sync"
"go.etcd.io/etcd/clientv3/balancer/resolver/endpoint"
"go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes"
grpccredentials "google.golang.org/grpc/credentials"
)
@ -65,38 +66,37 @@ func (b *bundle) NewWithMode(mode string) (grpccredentials.Bundle, error) {
}
// transportCredential implements "grpccredentials.TransportCredentials" interface.
// transportCredential wraps TransportCredentials to track which
// addresses are dialed for which endpoints, and then sets the authority when checking the endpoint's cert to the
// hostname or IP of the dialed endpoint.
// This is a workaround of a gRPC load balancer issue. gRPC uses the dialed target's service name as the authority when
// checking all endpoint certs, which does not work for etcd servers using their hostname or IP as the Subject Alternative Name
// in their TLS certs.
// To enable, include both WithTransportCredentials(creds) and WithContextDialer(creds.Dialer)
// when dialing.
type transportCredential struct {
gtc grpccredentials.TransportCredentials
mu sync.Mutex
// addrToEndpoint maps from the connection addresses that are dialed to the hostname or IP of the
// endpoint provided to the dialer when dialing
addrToEndpoint map[string]string
}
func newTransportCredential(cfg *tls.Config) *transportCredential {
return &transportCredential{
gtc: grpccredentials.NewTLS(cfg),
gtc: grpccredentials.NewTLS(cfg),
addrToEndpoint: map[string]string{},
}
}
func (tc *transportCredential) ClientHandshake(ctx context.Context, authority string, rawConn net.Conn) (net.Conn, grpccredentials.AuthInfo, error) {
// Only overwrite when authority is an IP address!
// Let's say, a server runs SRV records on "etcd.local" that resolves
// to "m1.etcd.local", and its SAN field also includes "m1.etcd.local".
// But what if SAN does not include its resolved IP address (e.g. 127.0.0.1)?
// Then, the server should only authenticate using its DNS hostname "m1.etcd.local",
// instead of overwriting it with its IP address.
// And we do not overwrite "localhost" either. Only overwrite IP addresses!
if isIP(authority) {
target := rawConn.RemoteAddr().String()
if authority != target {
// When user dials with "grpc.WithDialer", "grpc.DialContext" "cc.parsedTarget"
// update only happens once. This is problematic, because when TLS is enabled,
// retries happen through "grpc.WithDialer" with static "cc.parsedTarget" from
// the initial dial call.
// If the server authenticates by IP addresses, we want to set a new endpoint as
// a new authority. Otherwise
// "transport: authentication handshake failed: x509: certificate is valid for 127.0.0.1, 192.168.121.180, not 192.168.223.156"
// when the new dial target is "192.168.121.180" whose certificate host name is also "192.168.121.180"
// but client tries to authenticate with previously set "cc.parsedTarget" field "192.168.223.156"
authority = target
}
// Set the authority when checking the endpoint's cert to the hostname or IP of the dialed endpoint
tc.mu.Lock()
dialEp, ok := tc.addrToEndpoint[rawConn.RemoteAddr().String()]
tc.mu.Unlock()
if ok {
_, host, _ := endpoint.ParseEndpoint(dialEp)
authority = host
}
return tc.gtc.ClientHandshake(ctx, authority, rawConn)
}
@ -115,8 +115,15 @@ func (tc *transportCredential) Info() grpccredentials.ProtocolInfo {
}
func (tc *transportCredential) Clone() grpccredentials.TransportCredentials {
copy := map[string]string{}
tc.mu.Lock()
for k, v := range tc.addrToEndpoint {
copy[k] = v
}
tc.mu.Unlock()
return &transportCredential{
gtc: tc.gtc.Clone(),
gtc: tc.gtc.Clone(),
addrToEndpoint: copy,
}
}
@ -124,6 +131,17 @@ func (tc *transportCredential) OverrideServerName(serverNameOverride string) err
return tc.gtc.OverrideServerName(serverNameOverride)
}
func (tc *transportCredential) Dialer(ctx context.Context, dialEp string) (net.Conn, error) {
// Keep track of which addresses are dialed for which endpoints
conn, err := endpoint.Dialer(ctx, dialEp)
if conn != nil {
tc.mu.Lock()
tc.addrToEndpoint[conn.RemoteAddr().String()] = dialEp
tc.mu.Unlock()
}
return conn, err
}
// perRPCCredential implements "grpccredentials.PerRPCCredentials" interface.
type perRPCCredential struct {
authToken string

64
clientv3/ctx.go Normal file
View File

@ -0,0 +1,64 @@
// Copyright 2020 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package clientv3
import (
"context"
"strings"
"go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes"
"go.etcd.io/etcd/version"
"google.golang.org/grpc/metadata"
)
// WithRequireLeader requires client requests to only succeed
// when the cluster has a leader.
func WithRequireLeader(ctx context.Context) context.Context {
md, ok := metadata.FromOutgoingContext(ctx)
if !ok { // no outgoing metadata ctx key, create one
md = metadata.Pairs(rpctypes.MetadataRequireLeaderKey, rpctypes.MetadataHasLeader)
return metadata.NewOutgoingContext(ctx, md)
}
copied := md.Copy() // avoid racey updates
// overwrite/add 'hasleader' key/value
metadataSet(copied, rpctypes.MetadataRequireLeaderKey, rpctypes.MetadataHasLeader)
return metadata.NewOutgoingContext(ctx, copied)
}
// embeds client version
func withVersion(ctx context.Context) context.Context {
md, ok := metadata.FromOutgoingContext(ctx)
if !ok { // no outgoing metadata ctx key, create one
md = metadata.Pairs(rpctypes.MetadataClientAPIVersionKey, version.APIVersion)
return metadata.NewOutgoingContext(ctx, md)
}
copied := md.Copy() // avoid racey updates
// overwrite/add version key/value
metadataSet(copied, rpctypes.MetadataClientAPIVersionKey, version.APIVersion)
return metadata.NewOutgoingContext(ctx, copied)
}
func metadataGet(md metadata.MD, k string) []string {
k = strings.ToLower(k)
return md[k]
}
func metadataSet(md metadata.MD, k string, vals ...string) {
if len(vals) == 0 {
return
}
k = strings.ToLower(k)
md[k] = vals
}

67
clientv3/ctx_test.go Normal file
View File

@ -0,0 +1,67 @@
// Copyright 2020 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package clientv3
import (
"context"
"reflect"
"testing"
"go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes"
"go.etcd.io/etcd/version"
"google.golang.org/grpc/metadata"
)
func TestMetadataWithRequireLeader(t *testing.T) {
ctx := context.TODO()
md, ok := metadata.FromOutgoingContext(ctx)
if ok {
t.Fatal("expected no outgoing metadata ctx key")
}
// add a conflicting key with some other value
md = metadata.Pairs(rpctypes.MetadataRequireLeaderKey, "invalid")
// add a key, and expect not be overwritten
metadataSet(md, "hello", "1", "2")
ctx = metadata.NewOutgoingContext(ctx, md)
// expect overwrites but still keep other keys
ctx = WithRequireLeader(ctx)
md, ok = metadata.FromOutgoingContext(ctx)
if !ok {
t.Fatal("expected outgoing metadata ctx key")
}
if ss := metadataGet(md, rpctypes.MetadataRequireLeaderKey); !reflect.DeepEqual(ss, []string{rpctypes.MetadataHasLeader}) {
t.Fatalf("unexpected metadata for %q %v", rpctypes.MetadataRequireLeaderKey, ss)
}
if ss := metadataGet(md, "hello"); !reflect.DeepEqual(ss, []string{"1", "2"}) {
t.Fatalf("unexpected metadata for 'hello' %v", ss)
}
}
func TestMetadataWithClientAPIVersion(t *testing.T) {
ctx := withVersion(WithRequireLeader(context.TODO()))
md, ok := metadata.FromOutgoingContext(ctx)
if !ok {
t.Fatal("expected outgoing metadata ctx key")
}
if ss := metadataGet(md, rpctypes.MetadataRequireLeaderKey); !reflect.DeepEqual(ss, []string{rpctypes.MetadataHasLeader}) {
t.Fatalf("unexpected metadata for %q %v", rpctypes.MetadataRequireLeaderKey, ss)
}
if ss := metadataGet(md, rpctypes.MetadataClientAPIVersionKey); !reflect.DeepEqual(ss, []string{version.APIVersion}) {
t.Fatalf("unexpected metadata for %q %v", rpctypes.MetadataClientAPIVersionKey, ss)
}
}

View File

@ -276,8 +276,7 @@ func TestMemberPromote(t *testing.T) {
select {
case <-time.After(500 * time.Millisecond):
case <-timeout:
t.Errorf("failed all attempts to promote learner member, last error: %v", err)
break
t.Fatalf("failed all attempts to promote learner member, last error: %v", err)
}
_, err = capi.MemberPromote(context.Background(), learnerID)

View File

@ -37,8 +37,8 @@ func TestKVPutError(t *testing.T) {
defer testutil.AfterTest(t)
var (
maxReqBytes = 1.5 * 1024 * 1024 // hard coded max in v3_server.go
quota = int64(int(maxReqBytes) + 8*os.Getpagesize())
maxReqBytes = 1.5 * 1024 * 1024 // hard coded max in v3_server.go
quota = int64(int(maxReqBytes*1.2) + 8*os.Getpagesize()) // make sure we have enough overhead in backend quota. See discussion in #6486.
)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, QuotaBackendBytes: quota, ClientMaxCallSendMsgSize: 100 * 1024 * 1024})
defer clus.Terminate(t)

View File

@ -20,6 +20,7 @@ import (
"fmt"
"io"
"io/ioutil"
"math"
"path/filepath"
"testing"
"time"
@ -149,7 +150,7 @@ func TestMaintenanceSnapshotErrorInflight(t *testing.T) {
clus.Members[0].Stop(t)
dpath := filepath.Join(clus.Members[0].DataDir, "member", "snap", "db")
b := backend.NewDefaultBackend(dpath)
s := mvcc.NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil)
s := mvcc.NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil, mvcc.StoreConfig{CompactionBatchLimit: math.MaxInt32})
rev := 100000
for i := 2; i <= rev; i++ {
s.Put([]byte(fmt.Sprintf("%10d", i)), bytes.Repeat([]byte("a"), 1024), lease.NoLease)

View File

@ -38,6 +38,7 @@ import (
func (c *Client) unaryClientInterceptor(logger *zap.Logger, optFuncs ...retryOption) grpc.UnaryClientInterceptor {
intOpts := reuseOrNewWithCallOptions(defaultOptions, optFuncs)
return func(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error {
ctx = withVersion(ctx)
grpcOpts, retryOpts := filterCallOptions(opts)
callOpts := reuseOrNewWithCallOptions(intOpts, retryOpts)
// short circuit for simplicity, and avoiding allocations.
@ -103,6 +104,7 @@ func (c *Client) unaryClientInterceptor(logger *zap.Logger, optFuncs ...retryOpt
func (c *Client) streamClientInterceptor(logger *zap.Logger, optFuncs ...retryOption) grpc.StreamClientInterceptor {
intOpts := reuseOrNewWithCallOptions(defaultOptions, optFuncs)
return func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) {
ctx = withVersion(ctx)
grpcOpts, retryOpts := filterCallOptions(opts)
callOpts := reuseOrNewWithCallOptions(intOpts, retryOpts)
// short circuit for simplicity, and avoiding allocations.
@ -113,10 +115,9 @@ func (c *Client) streamClientInterceptor(logger *zap.Logger, optFuncs ...retryOp
return nil, status.Errorf(codes.Unimplemented, "clientv3/retry_interceptor: cannot retry on ClientStreams, set Disable()")
}
newStreamer, err := streamer(ctx, desc, cc, method, grpcOpts...)
logger.Warn("retry stream intercept", zap.Error(err))
if err != nil {
// TODO(mwitkow): Maybe dial and transport errors should be retriable?
return nil, err
logger.Error("streamer failed to create ClientStream", zap.Error(err))
return nil, err // TODO(mwitkow): Maybe dial and transport errors should be retriable?
}
retryingStreamer := &serverStreamingRetryingStream{
client: c,
@ -185,6 +186,7 @@ func (s *serverStreamingRetryingStream) RecvMsg(m interface{}) error {
if !attemptRetry {
return lastErr // success or hard failure
}
// We start off from attempt 1, because zeroth was already made on normal SendMsg().
for attempt := uint(1); attempt < s.callOpts.max; attempt++ {
if err := waitRetryBackoff(s.ctx, attempt, s.callOpts); err != nil {
@ -192,12 +194,13 @@ func (s *serverStreamingRetryingStream) RecvMsg(m interface{}) error {
}
newStream, err := s.reestablishStreamAndResendBuffer(s.ctx)
if err != nil {
// TODO(mwitkow): Maybe dial and transport errors should be retriable?
return err
s.client.lg.Error("failed reestablishStreamAndResendBuffer", zap.Error(err))
return err // TODO(mwitkow): Maybe dial and transport errors should be retriable?
}
s.setStream(newStream)
s.client.lg.Warn("retrying RecvMsg", zap.Error(lastErr))
attemptRetry, lastErr = s.receiveMsgAndIndicateRetry(m)
//fmt.Printf("Received message and indicate: %v %v\n", attemptRetry, lastErr)
if !attemptRetry {
return lastErr
}

View File

@ -39,6 +39,7 @@ import (
"go.etcd.io/etcd/mvcc"
"go.etcd.io/etcd/mvcc/backend"
"go.etcd.io/etcd/pkg/fileutil"
"go.etcd.io/etcd/pkg/traceutil"
"go.etcd.io/etcd/pkg/types"
"go.etcd.io/etcd/raft"
"go.etcd.io/etcd/raft/raftpb"
@ -383,8 +384,8 @@ func (s *v3Manager) saveDB() error {
// a lessor never timeouts leases
lessor := lease.NewLessor(s.lg, be, lease.LessorConfig{MinLeaseTTL: math.MaxInt64})
mvs := mvcc.NewStore(s.lg, be, lessor, (*initIndex)(&commit))
txn := mvs.Write()
mvs := mvcc.NewStore(s.lg, be, lessor, (*initIndex)(&commit), mvcc.StoreConfig{CompactionBatchLimit: math.MaxInt32})
txn := mvs.Write(traceutil.TODO())
btx := be.BatchTx()
del := func(k, v []byte) error {
txn.DeleteRange(k, nil)

View File

@ -280,6 +280,7 @@ type Config struct {
ExperimentalBackendFreelistType string `json:"experimental-backend-bbolt-freelist-type"`
// ExperimentalEnableLeaseCheckpoint enables primary lessor to persist lease remainingTTL to prevent indefinite auto-renewal of long lived leases.
ExperimentalEnableLeaseCheckpoint bool `json:"experimental-enable-lease-checkpoint"`
ExperimentalCompactionBatchLimit int `json:"experimental-compaction-batch-limit"`
// ForceNewCluster starts a new cluster even if previously started; unsafe.
ForceNewCluster bool `json:"force-new-cluster"`
@ -302,8 +303,8 @@ type Config struct {
// It can be multiple when "Logger" is zap.
LogOutputs []string `json:"log-outputs"`
// zapLoggerBuilder is used to build the zap logger.
zapLoggerBuilder func(*Config) error
// ZapLoggerBuilder is used to build the zap logger.
ZapLoggerBuilder func(*Config) error
// logger logs server-side operations. The default is nil,
// and "setupLogging" must be called before starting server.

View File

@ -170,7 +170,10 @@ func (cfg *Config) setupLogging() error {
}
if !isJournal {
copied := logutil.AddOutputPaths(logutil.DefaultZapLoggerConfig, outputPaths, errOutputPaths)
copied := logutil.DefaultZapLoggerConfig
copied.OutputPaths = outputPaths
copied.ErrorOutputPaths = errOutputPaths
copied = logutil.MergeOutputPaths(copied)
copied.Level = zap.NewAtomicLevelAt(logutil.ConvertToZapLevel(cfg.LogLevel))
if cfg.Debug || cfg.LogLevel == "debug" {
// enable tracing even when "--debug --log-level info"
@ -178,8 +181,8 @@ func (cfg *Config) setupLogging() error {
// TODO: remove "Debug" check in v3.5
grpc.EnableTracing = true
}
if cfg.zapLoggerBuilder == nil {
cfg.zapLoggerBuilder = func(c *Config) error {
if cfg.ZapLoggerBuilder == nil {
cfg.ZapLoggerBuilder = func(c *Config) error {
var err error
c.logger, err = copied.Build()
if err != nil {
@ -232,8 +235,8 @@ func (cfg *Config) setupLogging() error {
syncer,
lvl,
)
if cfg.zapLoggerBuilder == nil {
cfg.zapLoggerBuilder = func(c *Config) error {
if cfg.ZapLoggerBuilder == nil {
cfg.ZapLoggerBuilder = func(c *Config) error {
c.logger = zap.New(cr, zap.AddCaller(), zap.ErrorOutput(syncer))
c.loggerMu.Lock()
defer c.loggerMu.Unlock()
@ -249,7 +252,7 @@ func (cfg *Config) setupLogging() error {
}
}
err := cfg.zapLoggerBuilder(cfg)
err := cfg.ZapLoggerBuilder(cfg)
if err != nil {
return err
}

View File

@ -205,6 +205,7 @@ func StartEtcd(inCfg *Config) (e *Etcd, err error) {
ForceNewCluster: cfg.ForceNewCluster,
EnableGRPCGateway: cfg.EnableGRPCGateway,
EnableLeaseCheckpoint: cfg.ExperimentalEnableLeaseCheckpoint,
CompactionBatchLimit: cfg.ExperimentalCompactionBatchLimit,
}
print(e.cfg.logger, *cfg, srvcfg, memberInitialized)
if e.Server, err = etcdserver.NewServer(srvcfg); err != nil {

View File

@ -189,7 +189,7 @@ func (sctx *serveCtx) serve(
sctx.serversC <- &servers{secure: true, grpc: gs, http: srv}
if sctx.lg != nil {
sctx.lg.Info(
"serving client traffic insecurely",
"serving client traffic securely",
zap.String("address", sctx.l.Addr().String()),
)
} else {

View File

@ -156,28 +156,8 @@ func memberAddCommandFunc(cmd *cobra.Command, args []string) {
display.MemberAdd(*resp)
if _, ok := (display).(*simplePrinter); ok {
ctx, cancel = commandCtx(cmd)
listResp, err := cli.MemberList(ctx)
// get latest member list; if there's failover new member might have outdated list
for {
if err != nil {
ExitWithError(ExitError, err)
}
if listResp.Header.MemberId == resp.Header.MemberId {
break
}
// quorum get to sync cluster list
gresp, gerr := cli.Get(ctx, "_")
if gerr != nil {
ExitWithError(ExitError, err)
}
resp.Header.MemberId = gresp.Header.MemberId
listResp, err = cli.MemberList(ctx)
}
cancel()
conf := []string{}
for _, memb := range listResp.Members {
for _, memb := range resp.Members {
for _, u := range memb.PeerURLs {
n := memb.Name
if memb.ID == newID {

View File

@ -255,6 +255,7 @@ func newConfig() *config {
fs.StringVar(&cfg.ec.ExperimentalEnableV2V3, "experimental-enable-v2v3", cfg.ec.ExperimentalEnableV2V3, "v3 prefix for serving emulated v2 state.")
fs.StringVar(&cfg.ec.ExperimentalBackendFreelistType, "experimental-backend-bbolt-freelist-type", cfg.ec.ExperimentalBackendFreelistType, "ExperimentalBackendFreelistType specifies the type of freelist that boltdb backend uses(array and map are supported types)")
fs.BoolVar(&cfg.ec.ExperimentalEnableLeaseCheckpoint, "experimental-enable-lease-checkpoint", false, "Enable to persist lease remaining TTL to prevent indefinite auto-renewal of long lived leases.")
fs.IntVar(&cfg.ec.ExperimentalCompactionBatchLimit, "experimental-compaction-batch-limit", cfg.ec.ExperimentalCompactionBatchLimit, "Sets the maximum revisions deleted in each compaction batch.")
// unsafe
fs.BoolVar(&cfg.ec.ForceNewCluster, "force-new-cluster", false, "Force to create a new one member cluster.")

View File

@ -200,10 +200,14 @@ Experimental feature:
Duration of time between cluster corruption check passes.
--experimental-enable-v2v3 ''
Serve v2 requests through the v3 backend under a given prefix.
--experimental-backend-bbolt-freelist-type
--experimental-backend-bbolt-freelist-type 'array'
ExperimentalBackendFreelistType specifies the type of freelist that boltdb backend uses(array and map are supported types).
--experimental-enable-lease-checkpoint
--experimental-enable-lease-checkpoint 'false'
ExperimentalEnableLeaseCheckpoint enables primary lessor to persist lease remainingTTL to prevent indefinite auto-renewal of long lived leases.
--experimental-compaction-batch-limit 1000
ExperimentalCompactionBatchLimit sets the maximum revisions deleted in each compaction batch.
--experimental-peer-skip-client-san-verification 'false'
Skip verification of SAN field in client certificate for peer connections.
Unsafe feature:
--force-new-cluster 'false'

View File

@ -50,6 +50,7 @@ func NewHealthHandler(hfunc func() Health) http.HandlerFunc {
if r.Method != http.MethodGet {
w.Header().Set("Allow", http.MethodGet)
http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed)
plog.Warningf("/health error (status code %d)", http.StatusMethodNotAllowed)
return
}
h := hfunc()
@ -97,11 +98,15 @@ func checkHealth(srv etcdserver.ServerV2) Health {
as := srv.Alarms()
if len(as) > 0 {
h.Health = "false"
for _, v := range as {
plog.Warningf("/health error due to an alarm %s", v.String())
}
}
if h.Health == "true" {
if uint64(srv.Leader()) == raft.None {
h.Health = "false"
plog.Warningf("/health error; no leader (status code %d)", http.StatusServiceUnavailable)
}
}
@ -111,11 +116,13 @@ func checkHealth(srv etcdserver.ServerV2) Health {
cancel()
if err != nil {
h.Health = "false"
plog.Warningf("/health error; QGET failed %v (status code %d)", err, http.StatusServiceUnavailable)
}
}
if h.Health == "true" {
healthSuccess.Inc()
plog.Infof("/health OK (status code %d)", http.StatusOK)
} else {
healthFailed.Inc()
}

View File

@ -37,11 +37,17 @@ const (
)
// NewPeerHandler generates an http.Handler to handle etcd peer requests.
func NewPeerHandler(lg *zap.Logger, s etcdserver.ServerPeer) http.Handler {
return newPeerHandler(lg, s, s.RaftHandler(), s.LeaseHandler())
func NewPeerHandler(lg *zap.Logger, s etcdserver.ServerPeerV2) http.Handler {
return newPeerHandler(lg, s, s.RaftHandler(), s.LeaseHandler(), s.HashKVHandler())
}
func newPeerHandler(lg *zap.Logger, s etcdserver.Server, raftHandler http.Handler, leaseHandler http.Handler) http.Handler {
func newPeerHandler(
lg *zap.Logger,
s etcdserver.Server,
raftHandler http.Handler,
leaseHandler http.Handler,
hashKVHandler http.Handler,
) http.Handler {
peerMembersHandler := newPeerMembersHandler(lg, s.Cluster())
peerMemberPromoteHandler := newPeerMemberPromoteHandler(lg, s)
@ -55,6 +61,9 @@ func newPeerHandler(lg *zap.Logger, s etcdserver.Server, raftHandler http.Handle
mux.Handle(leasehttp.LeasePrefix, leaseHandler)
mux.Handle(leasehttp.LeaseInternalPrefix, leaseHandler)
}
if hashKVHandler != nil {
mux.Handle(etcdserver.PeerHashKVPath, hashKVHandler)
}
mux.HandleFunc(versionPath, versionHandler(s.Cluster(), serveVersion))
return mux
}

View File

@ -83,7 +83,7 @@ var fakeRaftHandler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Reque
// TestNewPeerHandlerOnRaftPrefix tests that NewPeerHandler returns a handler that
// handles raft-prefix requests well.
func TestNewPeerHandlerOnRaftPrefix(t *testing.T) {
ph := newPeerHandler(zap.NewExample(), &fakeServer{cluster: &fakeCluster{}}, fakeRaftHandler, nil)
ph := newPeerHandler(zap.NewExample(), &fakeServer{cluster: &fakeCluster{}}, fakeRaftHandler, nil, nil)
srv := httptest.NewServer(ph)
defer srv.Close()
@ -231,7 +231,7 @@ func TestServeMemberPromoteFails(t *testing.T) {
// TestNewPeerHandlerOnMembersPromotePrefix verifies the request with members promote prefix is routed correctly
func TestNewPeerHandlerOnMembersPromotePrefix(t *testing.T) {
ph := newPeerHandler(zap.NewExample(), &fakeServer{cluster: &fakeCluster{}}, fakeRaftHandler, nil)
ph := newPeerHandler(zap.NewExample(), &fakeServer{cluster: &fakeCluster{}}, fakeRaftHandler, nil, nil)
srv := httptest.NewServer(ph)
defer srv.Close()

View File

@ -565,6 +565,7 @@ func (c *RaftCluster) SetVersion(ver *semver.Version, onSet func(*zap.Logger, *s
plog.Noticef("set the initial cluster version to %v", version.Cluster(ver.String()))
}
}
oldVer := c.version
c.version = ver
mustDetectDowngrade(c.lg, c.version)
if c.v2store != nil {
@ -573,7 +574,10 @@ func (c *RaftCluster) SetVersion(ver *semver.Version, onSet func(*zap.Logger, *s
if c.be != nil {
mustSaveClusterVersionToBackend(c.be, ver)
}
ClusterVersionMetrics.With(prometheus.Labels{"cluster_version": ver.String()}).Set(1)
if oldVer != nil {
ClusterVersionMetrics.With(prometheus.Labels{"cluster_version": version.Cluster(oldVer.String())}).Set(0)
}
ClusterVersionMetrics.With(prometheus.Labels{"cluster_version": version.Cluster(ver.String())}).Set(1)
onSet(c.lg, ver)
}
@ -653,8 +657,8 @@ func (c *RaftCluster) IsReadyToRemoveVotingMember(id uint64) bool {
}
func (c *RaftCluster) IsReadyToPromoteMember(id uint64) bool {
nmembers := 1
nstarted := 0
nmembers := 1 // We count the learner to be promoted for the future quorum
nstarted := 1 // and we also count it as started.
for _, member := range c.VotingMembers() {
if member.IsStarted() {

View File

@ -859,3 +859,90 @@ func TestIsReadyToRemoveVotingMember(t *testing.T) {
}
}
}
func TestIsReadyToPromoteMember(t *testing.T) {
tests := []struct {
members []*Member
promoteID uint64
want bool
}{
{
// 1/1 members ready, should succeed (quorum = 1, new quorum = 2)
[]*Member{
newTestMember(1, nil, "1", nil),
newTestMemberAsLearner(2, nil, "2", nil),
},
2,
true,
},
{
// 0/1 members ready, should fail (quorum = 1)
[]*Member{
newTestMember(1, nil, "", nil),
newTestMemberAsLearner(2, nil, "2", nil),
},
2,
false,
},
{
// 2/2 members ready, should succeed (quorum = 2)
[]*Member{
newTestMember(1, nil, "1", nil),
newTestMember(2, nil, "2", nil),
newTestMemberAsLearner(3, nil, "3", nil),
},
3,
true,
},
{
// 1/2 members ready, should succeed (quorum = 2)
[]*Member{
newTestMember(1, nil, "1", nil),
newTestMember(2, nil, "", nil),
newTestMemberAsLearner(3, nil, "3", nil),
},
3,
true,
},
{
// 1/3 members ready, should fail (quorum = 2)
[]*Member{
newTestMember(1, nil, "1", nil),
newTestMember(2, nil, "", nil),
newTestMember(3, nil, "", nil),
newTestMemberAsLearner(4, nil, "4", nil),
},
4,
false,
},
{
// 2/3 members ready, should succeed (quorum = 2, new quorum = 3)
[]*Member{
newTestMember(1, nil, "1", nil),
newTestMember(2, nil, "2", nil),
newTestMember(3, nil, "", nil),
newTestMemberAsLearner(4, nil, "4", nil),
},
4,
true,
},
{
// 2/4 members ready, should succeed (quorum = 3)
[]*Member{
newTestMember(1, nil, "1", nil),
newTestMember(2, nil, "2", nil),
newTestMember(3, nil, "", nil),
newTestMember(4, nil, "", nil),
newTestMemberAsLearner(5, nil, "5", nil),
},
5,
true,
},
}
for i, tt := range tests {
c := newTestCluster(tt.members)
if got := c.IsReadyToPromoteMember(tt.promoteID); got != tt.want {
t.Errorf("%d: isReadyToPromoteMember returned %t, want %t", i, got, tt.want)
}
}
}

View File

@ -57,6 +57,7 @@ var (
"3.1.0": {streamTypeMsgAppV2, streamTypeMessage},
"3.2.0": {streamTypeMsgAppV2, streamTypeMessage},
"3.3.0": {streamTypeMsgAppV2, streamTypeMessage},
"3.4.0": {streamTypeMsgAppV2, streamTypeMessage},
}
)

View File

@ -226,6 +226,9 @@ func (s *Snapshotter) snapNames() ([]string, error) {
if err != nil {
return nil, err
}
if err = s.cleanupSnapdir(names); err != nil {
return nil, err
}
snaps := checkSuffix(s.lg, names)
if len(snaps) == 0 {
return nil, ErrNoSnapshot
@ -253,3 +256,21 @@ func checkSuffix(lg *zap.Logger, names []string) []string {
}
return snaps
}
// cleanupSnapdir removes any files that should not be in the snapshot directory:
// - db.tmp prefixed files that can be orphaned by defragmentation
func (s *Snapshotter) cleanupSnapdir(filenames []string) error {
for _, filename := range filenames {
if strings.HasPrefix(filename, "db.tmp") {
if s.lg != nil {
s.lg.Info("found orphaned defragmentation file; deleting", zap.String("path", filename))
} else {
plog.Infof("found orphaned defragmentation file; deleting: %s", filename)
}
if rmErr := os.Remove(filepath.Join(s.dir, filename)); rmErr != nil && !os.IsNotExist(rmErr) {
return fmt.Errorf("failed to remove orphaned defragmentation file %s: %v", filename, rmErr)
}
}
}
return nil
}

View File

@ -16,17 +16,17 @@ package v3rpc
import (
"context"
"strings"
"sync"
"time"
"github.com/coreos/pkg/capnslog"
"go.etcd.io/etcd/etcdserver"
"go.etcd.io/etcd/etcdserver/api"
"go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes"
pb "go.etcd.io/etcd/etcdserver/etcdserverpb"
"go.etcd.io/etcd/pkg/types"
"go.etcd.io/etcd/raft"
"github.com/coreos/pkg/capnslog"
pb "go.etcd.io/etcd/etcdserver/etcdserverpb"
"go.uber.org/zap"
"google.golang.org/grpc"
"google.golang.org/grpc/metadata"
@ -54,6 +54,12 @@ func newUnaryInterceptor(s *etcdserver.EtcdServer) grpc.UnaryServerInterceptor {
md, ok := metadata.FromIncomingContext(ctx)
if ok {
ver, vs := "unknown", metadataGet(md, rpctypes.MetadataClientAPIVersionKey)
if len(vs) > 0 {
ver = vs[0]
}
clientRequests.WithLabelValues("unary", ver).Inc()
if ks := md[rpctypes.MetadataRequireLeaderKey]; len(ks) > 0 && ks[0] == rpctypes.MetadataHasLeader {
if s.Leader() == types.ID(raft.None) {
return nil, rpctypes.ErrGRPCNoLeader
@ -200,6 +206,12 @@ func newStreamInterceptor(s *etcdserver.EtcdServer) grpc.StreamServerInterceptor
md, ok := metadata.FromIncomingContext(ss.Context())
if ok {
ver, vs := "unknown", metadataGet(md, rpctypes.MetadataClientAPIVersionKey)
if len(vs) > 0 {
ver = vs[0]
}
clientRequests.WithLabelValues("stream", ver).Inc()
if ks := md[rpctypes.MetadataRequireLeaderKey]; len(ks) > 0 && ks[0] == rpctypes.MetadataHasLeader {
if s.Leader() == types.ID(raft.None) {
return rpctypes.ErrGRPCNoLeader
@ -218,7 +230,6 @@ func newStreamInterceptor(s *etcdserver.EtcdServer) grpc.StreamServerInterceptor
smap.mu.Unlock()
cancel()
}()
}
}
@ -274,3 +285,8 @@ func monitorLeader(s *etcdserver.EtcdServer) *streamsMap {
return smap
}
func metadataGet(md metadata.MD, k string) []string {
k = strings.ToLower(k)
return md[k]
}

View File

@ -39,10 +39,20 @@ var (
},
[]string{"Type", "API"},
)
clientRequests = prometheus.NewCounterVec(prometheus.CounterOpts{
Namespace: "etcd",
Subsystem: "server",
Name: "client_requests_total",
Help: "The total number of client requests per client version.",
},
[]string{"type", "client_api_version"},
)
)
func init() {
prometheus.MustRegister(sentBytes)
prometheus.MustRegister(receivedBytes)
prometheus.MustRegister(streamFailures)
prometheus.MustRegister(clientRequests)
}

View File

@ -17,4 +17,6 @@ package rpctypes
var (
MetadataRequireLeaderKey = "hasleader"
MetadataHasLeader = "true"
MetadataClientAPIVersionKey = "client-api-version"
)

View File

@ -26,6 +26,7 @@ import (
"go.etcd.io/etcd/lease"
"go.etcd.io/etcd/mvcc"
"go.etcd.io/etcd/mvcc/mvccpb"
"go.etcd.io/etcd/pkg/traceutil"
"go.etcd.io/etcd/pkg/types"
"github.com/gogo/protobuf/proto"
@ -43,17 +44,18 @@ type applyResult struct {
// to being logically reflected by the node. Currently only used for
// Compaction requests.
physc <-chan struct{}
trace *traceutil.Trace
}
// applierV3 is the interface for processing V3 raft messages
type applierV3 interface {
Apply(r *pb.InternalRaftRequest) *applyResult
Put(txn mvcc.TxnWrite, p *pb.PutRequest) (*pb.PutResponse, error)
Range(txn mvcc.TxnRead, r *pb.RangeRequest) (*pb.RangeResponse, error)
Put(txn mvcc.TxnWrite, p *pb.PutRequest) (*pb.PutResponse, *traceutil.Trace, error)
Range(ctx context.Context, txn mvcc.TxnRead, r *pb.RangeRequest) (*pb.RangeResponse, error)
DeleteRange(txn mvcc.TxnWrite, dr *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error)
Txn(rt *pb.TxnRequest) (*pb.TxnResponse, error)
Compaction(compaction *pb.CompactionRequest) (*pb.CompactionResponse, <-chan struct{}, error)
Compaction(compaction *pb.CompactionRequest) (*pb.CompactionResponse, <-chan struct{}, *traceutil.Trace, error)
LeaseGrant(lc *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error)
LeaseRevoke(lc *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error)
@ -119,15 +121,15 @@ func (a *applierV3backend) Apply(r *pb.InternalRaftRequest) *applyResult {
// call into a.s.applyV3.F instead of a.F so upper appliers can check individual calls
switch {
case r.Range != nil:
ar.resp, ar.err = a.s.applyV3.Range(nil, r.Range)
ar.resp, ar.err = a.s.applyV3.Range(context.TODO(), nil, r.Range)
case r.Put != nil:
ar.resp, ar.err = a.s.applyV3.Put(nil, r.Put)
ar.resp, ar.trace, ar.err = a.s.applyV3.Put(nil, r.Put)
case r.DeleteRange != nil:
ar.resp, ar.err = a.s.applyV3.DeleteRange(nil, r.DeleteRange)
case r.Txn != nil:
ar.resp, ar.err = a.s.applyV3.Txn(r.Txn)
case r.Compaction != nil:
ar.resp, ar.physc, ar.err = a.s.applyV3.Compaction(r.Compaction)
ar.resp, ar.physc, ar.trace, ar.err = a.s.applyV3.Compaction(r.Compaction)
case r.LeaseGrant != nil:
ar.resp, ar.err = a.s.applyV3.LeaseGrant(r.LeaseGrant)
case r.LeaseRevoke != nil:
@ -174,32 +176,39 @@ func (a *applierV3backend) Apply(r *pb.InternalRaftRequest) *applyResult {
return ar
}
func (a *applierV3backend) Put(txn mvcc.TxnWrite, p *pb.PutRequest) (resp *pb.PutResponse, err error) {
func (a *applierV3backend) Put(txn mvcc.TxnWrite, p *pb.PutRequest) (resp *pb.PutResponse, trace *traceutil.Trace, err error) {
resp = &pb.PutResponse{}
resp.Header = &pb.ResponseHeader{}
trace = traceutil.New("put",
a.s.getLogger(),
traceutil.Field{Key: "key", Value: string(p.Key)},
traceutil.Field{Key: "req_size", Value: proto.Size(p)},
)
val, leaseID := p.Value, lease.LeaseID(p.Lease)
if txn == nil {
if leaseID != lease.NoLease {
if l := a.s.lessor.Lookup(leaseID); l == nil {
return nil, lease.ErrLeaseNotFound
return nil, nil, lease.ErrLeaseNotFound
}
}
txn = a.s.KV().Write()
txn = a.s.KV().Write(trace)
defer txn.End()
}
var rr *mvcc.RangeResult
if p.IgnoreValue || p.IgnoreLease || p.PrevKv {
trace.DisableStep()
rr, err = txn.Range(p.Key, nil, mvcc.RangeOptions{})
if err != nil {
return nil, err
return nil, nil, err
}
trace.EnableStep()
trace.Step("get previous kv pair")
}
if p.IgnoreValue || p.IgnoreLease {
if rr == nil || len(rr.KVs) == 0 {
// ignore_{lease,value} flag expects previous key-value pair
return nil, ErrKeyNotFound
return nil, nil, ErrKeyNotFound
}
}
if p.IgnoreValue {
@ -215,7 +224,8 @@ func (a *applierV3backend) Put(txn mvcc.TxnWrite, p *pb.PutRequest) (resp *pb.Pu
}
resp.Header.Revision = txn.Put(p.Key, val, leaseID)
return resp, nil
trace.AddField(traceutil.Field{Key: "response_revision", Value: resp.Header.Revision})
return resp, trace, nil
}
func (a *applierV3backend) DeleteRange(txn mvcc.TxnWrite, dr *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) {
@ -224,7 +234,7 @@ func (a *applierV3backend) DeleteRange(txn mvcc.TxnWrite, dr *pb.DeleteRangeRequ
end := mkGteRange(dr.RangeEnd)
if txn == nil {
txn = a.s.kv.Write()
txn = a.s.kv.Write(traceutil.TODO())
defer txn.End()
}
@ -245,12 +255,14 @@ func (a *applierV3backend) DeleteRange(txn mvcc.TxnWrite, dr *pb.DeleteRangeRequ
return resp, nil
}
func (a *applierV3backend) Range(txn mvcc.TxnRead, r *pb.RangeRequest) (*pb.RangeResponse, error) {
func (a *applierV3backend) Range(ctx context.Context, txn mvcc.TxnRead, r *pb.RangeRequest) (*pb.RangeResponse, error) {
trace := traceutil.Get(ctx)
resp := &pb.RangeResponse{}
resp.Header = &pb.ResponseHeader{}
if txn == nil {
txn = a.s.kv.Read()
txn = a.s.kv.Read(trace)
defer txn.End()
}
@ -327,7 +339,7 @@ func (a *applierV3backend) Range(txn mvcc.TxnRead, r *pb.RangeRequest) (*pb.Rang
rr.KVs = rr.KVs[:r.Limit]
resp.More = true
}
trace.Step("filter and sort the key-value pairs")
resp.Header.Revision = rr.Rev
resp.Count = int64(rr.Count)
resp.Kvs = make([]*mvccpb.KeyValue, len(rr.KVs))
@ -337,12 +349,13 @@ func (a *applierV3backend) Range(txn mvcc.TxnRead, r *pb.RangeRequest) (*pb.Rang
}
resp.Kvs[i] = &rr.KVs[i]
}
trace.Step("assemble the response")
return resp, nil
}
func (a *applierV3backend) Txn(rt *pb.TxnRequest) (*pb.TxnResponse, error) {
isWrite := !isTxnReadonly(rt)
txn := mvcc.NewReadOnlyTxnWrite(a.s.KV().Read())
txn := mvcc.NewReadOnlyTxnWrite(a.s.KV().Read(traceutil.TODO()))
txnPath := compareToPath(txn, rt)
if isWrite {
@ -364,7 +377,7 @@ func (a *applierV3backend) Txn(rt *pb.TxnRequest) (*pb.TxnResponse, error) {
// be the revision of the write txn.
if isWrite {
txn.End()
txn = a.s.KV().Write()
txn = a.s.KV().Write(traceutil.TODO())
}
a.applyTxn(txn, rt, txnPath, txnResp)
rev := txn.Rev()
@ -516,7 +529,7 @@ func (a *applierV3backend) applyTxn(txn mvcc.TxnWrite, rt *pb.TxnRequest, txnPat
respi := tresp.Responses[i].Response
switch tv := req.Request.(type) {
case *pb.RequestOp_RequestRange:
resp, err := a.Range(txn, tv.RequestRange)
resp, err := a.Range(context.TODO(), txn, tv.RequestRange)
if err != nil {
if lg != nil {
lg.Panic("unexpected error during txn", zap.Error(err))
@ -526,7 +539,7 @@ func (a *applierV3backend) applyTxn(txn mvcc.TxnWrite, rt *pb.TxnRequest, txnPat
}
respi.(*pb.ResponseOp_ResponseRange).ResponseRange = resp
case *pb.RequestOp_RequestPut:
resp, err := a.Put(txn, tv.RequestPut)
resp, _, err := a.Put(txn, tv.RequestPut)
if err != nil {
if lg != nil {
lg.Panic("unexpected error during txn", zap.Error(err))
@ -557,17 +570,22 @@ func (a *applierV3backend) applyTxn(txn mvcc.TxnWrite, rt *pb.TxnRequest, txnPat
return txns
}
func (a *applierV3backend) Compaction(compaction *pb.CompactionRequest) (*pb.CompactionResponse, <-chan struct{}, error) {
func (a *applierV3backend) Compaction(compaction *pb.CompactionRequest) (*pb.CompactionResponse, <-chan struct{}, *traceutil.Trace, error) {
resp := &pb.CompactionResponse{}
resp.Header = &pb.ResponseHeader{}
ch, err := a.s.KV().Compact(compaction.Revision)
trace := traceutil.New("compact",
a.s.getLogger(),
traceutil.Field{Key: "revision", Value: compaction.Revision},
)
ch, err := a.s.KV().Compact(trace, compaction.Revision)
if err != nil {
return nil, ch, err
return nil, ch, nil, err
}
// get the current revision. which key to get is not important.
rr, _ := a.s.KV().Range([]byte("compaction"), nil, mvcc.RangeOptions{})
resp.Header.Revision = rr.Rev
return resp, ch, err
return resp, ch, trace, err
}
func (a *applierV3backend) LeaseGrant(lc *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) {
@ -674,8 +692,8 @@ type applierV3Capped struct {
// with Puts so that the number of keys in the store is capped.
func newApplierV3Capped(base applierV3) applierV3 { return &applierV3Capped{applierV3: base} }
func (a *applierV3Capped) Put(txn mvcc.TxnWrite, p *pb.PutRequest) (*pb.PutResponse, error) {
return nil, ErrNoSpace
func (a *applierV3Capped) Put(txn mvcc.TxnWrite, p *pb.PutRequest) (*pb.PutResponse, *traceutil.Trace, error) {
return nil, nil, ErrNoSpace
}
func (a *applierV3Capped) Txn(r *pb.TxnRequest) (*pb.TxnResponse, error) {
@ -824,13 +842,13 @@ func newQuotaApplierV3(s *EtcdServer, app applierV3) applierV3 {
return &quotaApplierV3{app, NewBackendQuota(s, "v3-applier")}
}
func (a *quotaApplierV3) Put(txn mvcc.TxnWrite, p *pb.PutRequest) (*pb.PutResponse, error) {
func (a *quotaApplierV3) Put(txn mvcc.TxnWrite, p *pb.PutRequest) (*pb.PutResponse, *traceutil.Trace, error) {
ok := a.q.Available(p)
resp, err := a.applierV3.Put(txn, p)
resp, trace, err := a.applierV3.Put(txn, p)
if err == nil && !ok {
err = ErrNoSpace
}
return resp, err
return resp, trace, err
}
func (a *quotaApplierV3) Txn(rt *pb.TxnRequest) (*pb.TxnResponse, error) {

View File

@ -15,12 +15,14 @@
package etcdserver
import (
"context"
"sync"
"go.etcd.io/etcd/auth"
pb "go.etcd.io/etcd/etcdserver/etcdserverpb"
"go.etcd.io/etcd/lease"
"go.etcd.io/etcd/mvcc"
"go.etcd.io/etcd/pkg/traceutil"
)
type authApplierV3 struct {
@ -61,9 +63,9 @@ func (aa *authApplierV3) Apply(r *pb.InternalRaftRequest) *applyResult {
return ret
}
func (aa *authApplierV3) Put(txn mvcc.TxnWrite, r *pb.PutRequest) (*pb.PutResponse, error) {
func (aa *authApplierV3) Put(txn mvcc.TxnWrite, r *pb.PutRequest) (*pb.PutResponse, *traceutil.Trace, error) {
if err := aa.as.IsPutPermitted(&aa.authInfo, r.Key); err != nil {
return nil, err
return nil, nil, err
}
if err := aa.checkLeasePuts(lease.LeaseID(r.Lease)); err != nil {
@ -71,23 +73,23 @@ func (aa *authApplierV3) Put(txn mvcc.TxnWrite, r *pb.PutRequest) (*pb.PutRespon
// be written by this user. It means the user cannot revoke the
// lease so attaching the lease to the newly written key should
// be forbidden.
return nil, err
return nil, nil, err
}
if r.PrevKv {
err := aa.as.IsRangePermitted(&aa.authInfo, r.Key, nil)
if err != nil {
return nil, err
return nil, nil, err
}
}
return aa.applierV3.Put(txn, r)
}
func (aa *authApplierV3) Range(txn mvcc.TxnRead, r *pb.RangeRequest) (*pb.RangeResponse, error) {
func (aa *authApplierV3) Range(ctx context.Context, txn mvcc.TxnRead, r *pb.RangeRequest) (*pb.RangeResponse, error) {
if err := aa.as.IsRangePermitted(&aa.authInfo, r.Key, r.RangeEnd); err != nil {
return nil, err
}
return aa.applierV3.Range(txn, r)
return aa.applierV3.Range(ctx, txn, r)
}
func (aa *authApplierV3) DeleteRange(txn mvcc.TxnWrite, r *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) {

View File

@ -102,7 +102,7 @@ func openBackend(cfg ServerConfig) backend.Backend {
// case, replace the db with the snapshot db sent by the leader.
func recoverSnapshotBackend(cfg ServerConfig, oldbe backend.Backend, snapshot raftpb.Snapshot) (backend.Backend, error) {
var cIndex consistentIndex
kv := mvcc.New(cfg.Logger, oldbe, &lease.FakeLessor{}, &cIndex)
kv := mvcc.New(cfg.Logger, oldbe, &lease.FakeLessor{}, &cIndex, mvcc.StoreConfig{CompactionBatchLimit: cfg.CompactionBatchLimit})
defer kv.Close()
if snapshot.Metadata.Index <= kv.ConsistentIndex() {
return oldbe, nil

View File

@ -112,6 +112,7 @@ type ServerConfig struct {
AutoCompactionRetention time.Duration
AutoCompactionMode string
CompactionBatchLimit int
QuotaBackendBytes int64
MaxTxnOps uint

View File

@ -15,14 +15,19 @@
package etcdserver
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"strings"
"time"
"go.etcd.io/etcd/clientv3"
"go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes"
pb "go.etcd.io/etcd/etcdserver/etcdserverpb"
"go.etcd.io/etcd/mvcc"
"go.etcd.io/etcd/pkg/traceutil"
"go.etcd.io/etcd/pkg/types"
"go.uber.org/zap"
@ -229,10 +234,12 @@ func (s *EtcdServer) checkHashKV() error {
mismatch(uint64(s.ID()))
}
checkedCount := 0
for _, p := range peers {
if p.resp == nil {
continue
}
checkedCount++
id := p.resp.Header.MemberId
// leader expects follower's latest revision less than or equal to leader's
@ -297,62 +304,56 @@ func (s *EtcdServer) checkHashKV() error {
mismatch(id)
}
}
if lg != nil {
lg.Info("finished peer corruption check", zap.Int("number-of-peers-checked", checkedCount))
} else {
plog.Infof("finished peer corruption check")
}
return nil
}
type peerHashKVResp struct {
type peerInfo struct {
id types.ID
eps []string
}
resp *clientv3.HashKVResponse
type peerHashKVResp struct {
peerInfo
resp *pb.HashKVResponse
err error
}
func (s *EtcdServer) getPeerHashKVs(rev int64) (resps []*peerHashKVResp) {
func (s *EtcdServer) getPeerHashKVs(rev int64) []*peerHashKVResp {
// TODO: handle the case when "s.cluster.Members" have not
// been populated (e.g. no snapshot to load from disk)
mbs := s.cluster.Members()
pss := make([]peerHashKVResp, len(mbs))
for _, m := range mbs {
members := s.cluster.Members()
peers := make([]peerInfo, 0, len(members))
for _, m := range members {
if m.ID == s.ID() {
continue
}
pss = append(pss, peerHashKVResp{id: m.ID, eps: m.PeerURLs})
peers = append(peers, peerInfo{id: m.ID, eps: m.PeerURLs})
}
lg := s.getLogger()
for _, p := range pss {
var resps []*peerHashKVResp
for _, p := range peers {
if len(p.eps) == 0 {
continue
}
cli, cerr := clientv3.New(clientv3.Config{
DialTimeout: s.Cfg.ReqTimeout(),
Endpoints: p.eps,
})
if cerr != nil {
if lg != nil {
lg.Warn(
"failed to create client to peer URL",
zap.String("local-member-id", s.ID().String()),
zap.String("remote-peer-id", p.id.String()),
zap.Strings("remote-peer-endpoints", p.eps),
zap.Error(cerr),
)
} else {
plog.Warningf("%s failed to create client to peer %q for hash checking (%q)", s.ID(), p.eps, cerr.Error())
}
continue
}
respsLen := len(resps)
for _, c := range cli.Endpoints() {
var lastErr error
for _, ep := range p.eps {
ctx, cancel := context.WithTimeout(context.Background(), s.Cfg.ReqTimeout())
var resp *clientv3.HashKVResponse
resp, cerr = cli.HashKV(ctx, c, rev)
var resp *pb.HashKVResponse
resp, lastErr = s.getPeerHashKVHTTP(ctx, ep, rev)
cancel()
if cerr == nil {
resps = append(resps, &peerHashKVResp{id: p.id, eps: p.eps, resp: resp, err: nil})
if lastErr == nil {
resps = append(resps, &peerHashKVResp{peerInfo: p, resp: resp, err: nil})
break
}
if lg != nil {
@ -360,17 +361,17 @@ func (s *EtcdServer) getPeerHashKVs(rev int64) (resps []*peerHashKVResp) {
"failed hash kv request",
zap.String("local-member-id", s.ID().String()),
zap.Int64("requested-revision", rev),
zap.String("remote-peer-endpoint", c),
zap.Error(cerr),
zap.String("remote-peer-endpoint", ep),
zap.Error(lastErr),
)
} else {
plog.Warningf("%s hash-kv error %q on peer %q with revision %d", s.ID(), cerr.Error(), c, rev)
plog.Warningf("%s hash-kv error %q on peer %q with revision %d", s.ID(), lastErr.Error(), ep, rev)
}
}
cli.Close()
// failed to get hashKV from all endpoints of this peer
if respsLen == len(resps) {
resps = append(resps, &peerHashKVResp{id: p.id, eps: p.eps, resp: nil, err: cerr})
resps = append(resps, &peerHashKVResp{peerInfo: p, resp: nil, err: lastErr})
}
}
return resps
@ -382,11 +383,11 @@ type applierV3Corrupt struct {
func newApplierV3Corrupt(a applierV3) *applierV3Corrupt { return &applierV3Corrupt{a} }
func (a *applierV3Corrupt) Put(txn mvcc.TxnWrite, p *pb.PutRequest) (*pb.PutResponse, error) {
return nil, ErrCorrupt
func (a *applierV3Corrupt) Put(txn mvcc.TxnWrite, p *pb.PutRequest) (*pb.PutResponse, *traceutil.Trace, error) {
return nil, nil, ErrCorrupt
}
func (a *applierV3Corrupt) Range(txn mvcc.TxnRead, p *pb.RangeRequest) (*pb.RangeResponse, error) {
func (a *applierV3Corrupt) Range(ctx context.Context, txn mvcc.TxnRead, p *pb.RangeRequest) (*pb.RangeResponse, error) {
return nil, ErrCorrupt
}
@ -398,8 +399,8 @@ func (a *applierV3Corrupt) Txn(rt *pb.TxnRequest) (*pb.TxnResponse, error) {
return nil, ErrCorrupt
}
func (a *applierV3Corrupt) Compaction(compaction *pb.CompactionRequest) (*pb.CompactionResponse, <-chan struct{}, error) {
return nil, nil, ErrCorrupt
func (a *applierV3Corrupt) Compaction(compaction *pb.CompactionRequest) (*pb.CompactionResponse, <-chan struct{}, *traceutil.Trace, error) {
return nil, nil, nil, ErrCorrupt
}
func (a *applierV3Corrupt) LeaseGrant(lc *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) {
@ -409,3 +410,112 @@ func (a *applierV3Corrupt) LeaseGrant(lc *pb.LeaseGrantRequest) (*pb.LeaseGrantR
func (a *applierV3Corrupt) LeaseRevoke(lc *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error) {
return nil, ErrCorrupt
}
type ServerPeerV2 interface {
ServerPeer
HashKVHandler() http.Handler
}
const PeerHashKVPath = "/members/hashkv"
type hashKVHandler struct {
lg *zap.Logger
server *EtcdServer
}
func (s *EtcdServer) HashKVHandler() http.Handler {
return &hashKVHandler{lg: s.getLogger(), server: s}
}
func (h *hashKVHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodGet {
w.Header().Set("Allow", http.MethodGet)
http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed)
return
}
if r.URL.Path != PeerHashKVPath {
http.Error(w, "bad path", http.StatusBadRequest)
return
}
defer r.Body.Close()
b, err := ioutil.ReadAll(r.Body)
if err != nil {
http.Error(w, "error reading body", http.StatusBadRequest)
return
}
req := &pb.HashKVRequest{}
if err = json.Unmarshal(b, req); err != nil {
h.lg.Warn("failed to unmarshal request", zap.Error(err))
http.Error(w, "error unmarshalling request", http.StatusBadRequest)
return
}
hash, rev, compactRev, err := h.server.KV().HashByRev(req.Revision)
if err != nil {
h.lg.Warn(
"failed to get hashKV",
zap.Int64("requested-revision", req.Revision),
zap.Error(err),
)
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
resp := &pb.HashKVResponse{Header: &pb.ResponseHeader{Revision: rev}, Hash: hash, CompactRevision: compactRev}
respBytes, err := json.Marshal(resp)
if err != nil {
h.lg.Warn("failed to marshal hashKV response", zap.Error(err))
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
w.Header().Set("X-Etcd-Cluster-ID", h.server.Cluster().ID().String())
w.Header().Set("Content-Type", "application/json")
w.Write(respBytes)
}
// getPeerHashKVHTTP fetch hash of kv store at the given rev via http call to the given url
func (s *EtcdServer) getPeerHashKVHTTP(ctx context.Context, url string, rev int64) (*pb.HashKVResponse, error) {
cc := &http.Client{Transport: s.peerRt}
hashReq := &pb.HashKVRequest{Revision: rev}
hashReqBytes, err := json.Marshal(hashReq)
if err != nil {
return nil, err
}
requestUrl := url + PeerHashKVPath
req, err := http.NewRequest(http.MethodGet, requestUrl, bytes.NewReader(hashReqBytes))
if err != nil {
return nil, err
}
req = req.WithContext(ctx)
req.Header.Set("Content-Type", "application/json")
req.Cancel = ctx.Done()
resp, err := cc.Do(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
b, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, err
}
if resp.StatusCode == http.StatusBadRequest {
if strings.Contains(string(b), mvcc.ErrCompacted.Error()) {
return nil, rpctypes.ErrCompacted
}
if strings.Contains(string(b), mvcc.ErrFutureRev.Error()) {
return nil, rpctypes.ErrFutureRev
}
}
if resp.StatusCode != http.StatusOK {
return nil, fmt.Errorf("unknown error: %s", string(b))
}
hashResp := &pb.HashKVResponse{}
if err := json.Unmarshal(b, hashResp); err != nil {
return nil, err
}
return hashResp, nil
}

View File

@ -207,7 +207,7 @@ func monitorFileDescriptor(lg *zap.Logger, done <-chan struct{}) {
}
if used >= limit/5*4 {
if lg != nil {
lg.Warn("80%% of file descriptors are used", zap.Uint64("used", used), zap.Uint64("limit", limit))
lg.Warn("80% of file descriptors are used", zap.Uint64("used", used), zap.Uint64("limit", limit))
} else {
plog.Warningf("80%% of the file descriptor limit is used [used = %d, limit = %d]", used, limit)
}

View File

@ -50,6 +50,7 @@ import (
"go.etcd.io/etcd/pkg/pbutil"
"go.etcd.io/etcd/pkg/runtime"
"go.etcd.io/etcd/pkg/schedule"
"go.etcd.io/etcd/pkg/traceutil"
"go.etcd.io/etcd/pkg/types"
"go.etcd.io/etcd/pkg/wait"
"go.etcd.io/etcd/raft"
@ -539,7 +540,7 @@ func NewServer(cfg ServerConfig) (srv *EtcdServer, err error) {
CheckpointInterval: cfg.LeaseCheckpointInterval,
ExpiredLeasesRetryInterval: srv.Cfg.ReqTimeout(),
})
srv.kv = mvcc.New(srv.getLogger(), srv.be, srv.lessor, &srv.consistIndex)
srv.kv = mvcc.New(srv.getLogger(), srv.be, srv.lessor, &srv.consistIndex, mvcc.StoreConfig{CompactionBatchLimit: cfg.CompactionBatchLimit})
if beExist {
kvindex := srv.kv.ConsistentIndex()
// TODO: remove kvindex != 0 checking when we do not expect users to upgrade
@ -785,7 +786,7 @@ func (s *EtcdServer) start() {
} else {
plog.Infof("starting server... [version: %v, cluster version: %v]", version.Version, version.Cluster(s.ClusterVersion().String()))
}
membership.ClusterVersionMetrics.With(prometheus.Labels{"cluster_version": s.ClusterVersion().String()}).Set(1)
membership.ClusterVersionMetrics.With(prometheus.Labels{"cluster_version": version.Cluster(s.ClusterVersion().String())}).Set(1)
} else {
if lg != nil {
lg.Info(
@ -806,12 +807,13 @@ func (s *EtcdServer) start() {
func (s *EtcdServer) purgeFile() {
var dberrc, serrc, werrc <-chan error
var dbdonec, sdonec, wdonec <-chan struct{}
if s.Cfg.MaxSnapFiles > 0 {
dberrc = fileutil.PurgeFile(s.getLogger(), s.Cfg.SnapDir(), "snap.db", s.Cfg.MaxSnapFiles, purgeFileInterval, s.done)
serrc = fileutil.PurgeFile(s.getLogger(), s.Cfg.SnapDir(), "snap", s.Cfg.MaxSnapFiles, purgeFileInterval, s.done)
dbdonec, dberrc = fileutil.PurgeFileWithDoneNotify(s.getLogger(), s.Cfg.SnapDir(), "snap.db", s.Cfg.MaxSnapFiles, purgeFileInterval, s.stopping)
sdonec, serrc = fileutil.PurgeFileWithDoneNotify(s.getLogger(), s.Cfg.SnapDir(), "snap", s.Cfg.MaxSnapFiles, purgeFileInterval, s.stopping)
}
if s.Cfg.MaxWALFiles > 0 {
werrc = fileutil.PurgeFile(s.getLogger(), s.Cfg.WALDir(), "wal", s.Cfg.MaxWALFiles, purgeFileInterval, s.done)
wdonec, werrc = fileutil.PurgeFileWithDoneNotify(s.getLogger(), s.Cfg.WALDir(), "wal", s.Cfg.MaxWALFiles, purgeFileInterval, s.stopping)
}
lg := s.getLogger()
@ -835,6 +837,15 @@ func (s *EtcdServer) purgeFile() {
plog.Fatalf("failed to purge wal file %v", e)
}
case <-s.stopping:
if dbdonec != nil {
<-dbdonec
}
if sdonec != nil {
<-sdonec
}
if wdonec != nil {
<-wdonec
}
return
}
}
@ -1178,7 +1189,7 @@ func (s *EtcdServer) applySnapshot(ep *etcdProgress, apply *apply) {
plog.Info("recovering lessor...")
}
s.lessor.Recover(newbe, func() lease.TxnDelete { return s.kv.Write() })
s.lessor.Recover(newbe, func() lease.TxnDelete { return s.kv.Write(traceutil.TODO()) })
if lg != nil {
lg.Info("restored lease store")

View File

@ -984,7 +984,7 @@ func TestSnapshot(t *testing.T) {
r: *r,
v2store: st,
}
srv.kv = mvcc.New(zap.NewExample(), be, &lease.FakeLessor{}, &srv.consistIndex)
srv.kv = mvcc.New(zap.NewExample(), be, &lease.FakeLessor{}, &srv.consistIndex, mvcc.StoreConfig{})
srv.be = be
ch := make(chan struct{}, 2)
@ -1065,7 +1065,7 @@ func TestSnapshotOrdering(t *testing.T) {
be, tmpPath := backend.NewDefaultTmpBackend()
defer os.RemoveAll(tmpPath)
s.kv = mvcc.New(zap.NewExample(), be, &lease.FakeLessor{}, &s.consistIndex)
s.kv = mvcc.New(zap.NewExample(), be, &lease.FakeLessor{}, &s.consistIndex, mvcc.StoreConfig{})
s.be = be
s.start()
@ -1126,7 +1126,7 @@ func TestTriggerSnap(t *testing.T) {
}
srv.applyV2 = &applierV2store{store: srv.v2store, cluster: srv.cluster}
srv.kv = mvcc.New(zap.NewExample(), be, &lease.FakeLessor{}, &srv.consistIndex)
srv.kv = mvcc.New(zap.NewExample(), be, &lease.FakeLessor{}, &srv.consistIndex, mvcc.StoreConfig{})
srv.be = be
srv.start()
@ -1198,7 +1198,7 @@ func TestConcurrentApplyAndSnapshotV3(t *testing.T) {
defer func() {
os.RemoveAll(tmpPath)
}()
s.kv = mvcc.New(zap.NewExample(), be, &lease.FakeLessor{}, &s.consistIndex)
s.kv = mvcc.New(zap.NewExample(), be, &lease.FakeLessor{}, &s.consistIndex, mvcc.StoreConfig{})
s.be = be
s.start()

View File

@ -26,6 +26,7 @@ import (
"go.etcd.io/etcd/lease"
"go.etcd.io/etcd/lease/leasehttp"
"go.etcd.io/etcd/mvcc"
"go.etcd.io/etcd/pkg/traceutil"
"go.etcd.io/etcd/raft"
"github.com/gogo/protobuf/proto"
@ -38,6 +39,7 @@ const (
// However, if the committed entries are very heavy to apply, the gap might grow.
// We should stop accepting new proposals if the gap growing to a certain point.
maxGapBetweenApplyAndCommitIndex = 5000
traceThreshold = 100 * time.Millisecond
)
type RaftKV interface {
@ -85,14 +87,29 @@ type Authenticator interface {
}
func (s *EtcdServer) Range(ctx context.Context, r *pb.RangeRequest) (*pb.RangeResponse, error) {
trace := traceutil.New("range",
s.getLogger(),
traceutil.Field{Key: "range_begin", Value: string(r.Key)},
traceutil.Field{Key: "range_end", Value: string(r.RangeEnd)},
)
ctx = context.WithValue(ctx, traceutil.TraceKey, trace)
var resp *pb.RangeResponse
var err error
defer func(start time.Time) {
warnOfExpensiveReadOnlyRangeRequest(s.getLogger(), start, r, resp, err)
if resp != nil {
trace.AddField(
traceutil.Field{Key: "response_count", Value: len(resp.Kvs)},
traceutil.Field{Key: "response_revision", Value: resp.Header.Revision},
)
}
trace.LogIfLong(traceThreshold)
}(time.Now())
if !r.Serializable {
err = s.linearizableReadNotify(ctx)
trace.Step("agreement among raft nodes before linearized reading")
if err != nil {
return nil, err
}
@ -101,7 +118,7 @@ func (s *EtcdServer) Range(ctx context.Context, r *pb.RangeRequest) (*pb.RangeRe
return s.authStore.IsRangePermitted(ai, r.Key, r.RangeEnd)
}
get := func() { resp, err = s.applyV3Base.Range(nil, r) }
get := func() { resp, err = s.applyV3Base.Range(ctx, nil, r) }
if serr := s.doSerialize(ctx, chk, get); serr != nil {
err = serr
return nil, err
@ -110,6 +127,7 @@ func (s *EtcdServer) Range(ctx context.Context, r *pb.RangeRequest) (*pb.RangeRe
}
func (s *EtcdServer) Put(ctx context.Context, r *pb.PutRequest) (*pb.PutResponse, error) {
ctx = context.WithValue(ctx, traceutil.StartTimeKey, time.Now())
resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{Put: r})
if err != nil {
return nil, err
@ -186,7 +204,18 @@ func isTxnReadonly(r *pb.TxnRequest) bool {
}
func (s *EtcdServer) Compact(ctx context.Context, r *pb.CompactionRequest) (*pb.CompactionResponse, error) {
startTime := time.Now()
result, err := s.processInternalRaftRequestOnce(ctx, pb.InternalRaftRequest{Compaction: r})
trace := traceutil.TODO()
if result != nil && result.trace != nil {
trace = result.trace
defer func() {
trace.LogIfLong(traceThreshold)
}()
applyStart := result.trace.GetStartTime()
result.trace.SetStartTime(startTime)
trace.InsertStep(0, applyStart, "process raft request")
}
if r.Physical && result != nil && result.physc != nil {
<-result.physc
// The compaction is done deleting keys; the hash is now settled
@ -195,6 +224,7 @@ func (s *EtcdServer) Compact(ctx context.Context, r *pb.CompactionRequest) (*pb.
// if the compaction resumes. Force the finished compaction to
// commit so it won't resume following a crash.
s.be.ForceCommit()
trace.Step("physically apply compaction")
}
if err != nil {
return nil, err
@ -210,6 +240,7 @@ func (s *EtcdServer) Compact(ctx context.Context, r *pb.CompactionRequest) (*pb.
resp.Header = &pb.ResponseHeader{}
}
resp.Header.Revision = s.kv.Rev()
trace.AddField(traceutil.Field{Key: "response_revision", Value: resp.Header.Revision})
return resp, nil
}
@ -533,20 +564,25 @@ func (s *EtcdServer) raftRequestOnce(ctx context.Context, r pb.InternalRaftReque
if result.err != nil {
return nil, result.err
}
if startTime, ok := ctx.Value(traceutil.StartTimeKey).(time.Time); ok && result.trace != nil {
applyStart := result.trace.GetStartTime()
// The trace object is created in apply. Here reset the start time to trace
// the raft request time by the difference between the request start time
// and apply start time
result.trace.SetStartTime(startTime)
result.trace.InsertStep(0, applyStart, "process raft request")
result.trace.LogIfLong(traceThreshold)
}
return result.resp, nil
}
func (s *EtcdServer) raftRequest(ctx context.Context, r pb.InternalRaftRequest) (proto.Message, error) {
for {
resp, err := s.raftRequestOnce(ctx, r)
if err != auth.ErrAuthOldRevision {
return resp, err
}
}
return s.raftRequestOnce(ctx, r)
}
// doSerialize handles the auth logic, with permissions checked by "chk", for a serialized request "get". Returns a non-nil error on authentication failure.
func (s *EtcdServer) doSerialize(ctx context.Context, chk func(*auth.AuthInfo) error, get func()) error {
trace := traceutil.Get(ctx)
ai, err := s.AuthInfoFromCtx(ctx)
if err != nil {
return err
@ -558,6 +594,7 @@ func (s *EtcdServer) doSerialize(ctx context.Context, chk func(*auth.AuthInfo) e
if err = chk(ai); err != nil {
return err
}
trace.Step("get authentication metadata")
// fetch response for serialized request
get()
// check for stale token revision in case the auth store was updated while

View File

@ -382,7 +382,7 @@ func (srv *Server) loadAutoTLSAssets() error {
fdir := filepath.Join(srv.Member.Etcd.DataDir, "fixtures", "peer")
srv.lg.Info(
"loading client auto TLS assets",
"loading peer auto TLS assets",
zap.String("dir", fdir),
zap.String("endpoint", srv.EtcdClientEndpoint),
)
@ -450,10 +450,10 @@ func (srv *Server) loadAutoTLSAssets() error {
srv.lg.Info(
"loaded client TLS assets",
zap.String("peer-cert-path", certPath),
zap.Int("peer-cert-length", len(certData)),
zap.String("peer-key-path", keyPath),
zap.Int("peer-key-length", len(keyData)),
zap.String("client-cert-path", certPath),
zap.Int("client-cert-length", len(certData)),
zap.String("client-key-path", keyPath),
zap.Int("client-key-length", len(keyData)),
)
}

View File

@ -13,7 +13,7 @@ if ! [[ "${0}" =~ "scripts/docker-local-agent.sh" ]]; then
fi
if [[ -z "${GO_VERSION}" ]]; then
GO_VERSION=1.12.7
GO_VERSION=1.12.12
fi
echo "Running with GO_VERSION:" ${GO_VERSION}

View File

@ -6,7 +6,7 @@ if ! [[ "${0}" =~ "scripts/docker-local-tester.sh" ]]; then
fi
if [[ -z "${GO_VERSION}" ]]; then
GO_VERSION=1.12.7
GO_VERSION=1.12.12
fi
echo "Running with GO_VERSION:" ${GO_VERSION}

5
go.mod
View File

@ -40,9 +40,10 @@ require (
go.uber.org/multierr v1.1.0 // indirect
go.uber.org/zap v1.10.0
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2
golang.org/x/net v0.0.0-20190311183353-d8887717615a
golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7
golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456 // indirect
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2
google.golang.org/grpc v1.22.1-0.20190805101010-a2bdfb40ff25
google.golang.org/grpc v1.23.1
gopkg.in/cheggaaa/pb.v1 v1.0.25
gopkg.in/yaml.v2 v2.2.2
sigs.k8s.io/yaml v1.1.0

8
go.sum
View File

@ -153,6 +153,8 @@ golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73r
golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a h1:oWX7TPOiFAMXLq8o0ikBYfCJVlRHBcsciT5bXOrH628=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7 h1:fHDIZ2oxGnUZRN6WgWFCbYBjH9uqVPRCUVUDhs0wnbA=
golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@ -165,6 +167,8 @@ golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5 h1:mzjBh+S5frKOsOBobWIMAbXav
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456 h1:ng0gs1AKnRRuEMZoTLLlbOd+C17zUDepwGQBb/n+JVg=
golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2 h1:+DCIGbF/swA92ohVg0//6X2IVY3KZs6p9mix0ziNYJM=
@ -177,8 +181,8 @@ google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9Ywl
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8 h1:Nw54tB0rB7hY/N0NQvRW8DG4Yk3Q6T9cu9RcFQDu1tc=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.22.1-0.20190805101010-a2bdfb40ff25 h1:lS/LGci7282xXbzMwFpHD7RKjsfKUK3KYwk34RYtlK0=
google.golang.org/grpc v1.22.1-0.20190805101010-a2bdfb40ff25/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.23.1 h1:q4XQuHFC6I28BKZpo6IYyb3mNO+l7lSOxRuYTCiDfXk=
google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=

View File

@ -1,22 +1,22 @@
-----BEGIN CERTIFICATE-----
MIIDrjCCApagAwIBAgIUM24Z44NdsHtDQisQRIH+mmhXLHYwDQYJKoZIhvcNAQEL
MIIDrjCCApagAwIBAgIUOl7DCgSvqQKhiihYrZDiBKNpQX4wDQYJKoZIhvcNAQEL
BQAwbzEMMAoGA1UEBhMDVVNBMRMwEQYDVQQIEwpDYWxpZm9ybmlhMRYwFAYDVQQH
Ew1TYW4gRnJhbmNpc2NvMQ0wCwYDVQQKEwRldGNkMRYwFAYDVQQLEw1ldGNkIFNl
Y3VyaXR5MQswCQYDVQQDEwJjYTAeFw0xOTAxMjExNDQwMDBaFw0yOTAxMTgxNDQw
Y3VyaXR5MQswCQYDVQQDEwJjYTAeFw0xOTEwMDgyMTE5MDBaFw0yOTEwMDUyMTE5
MDBaMG8xDDAKBgNVBAYTA1VTQTETMBEGA1UECBMKQ2FsaWZvcm5pYTEWMBQGA1UE
BxMNU2FuIEZyYW5jaXNjbzENMAsGA1UEChMEZXRjZDEWMBQGA1UECxMNZXRjZCBT
ZWN1cml0eTELMAkGA1UEAxMCY2EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK
AoIBAQCsWG1qafiCwfmKEltvpmslNqOlWgMp9H7VIP7cExbhsW4P1L4Jlfcz7rFH
2MFpwktbxppoYI/4umTj5r7dx/K6mttUBtiLY5VwSCo/asZvLaOLFN2QP4cwkpLI
lFDy4Pez2Uu+NnmMF6SLq+M6mOaHSbURNvphP1zWX9SLKo1OV8GT6r/oHYmcR+xy
skWd/+6B73S0pbG/d3ME/WoovZtOXqaZtJn8YIBXE6LGd4NBkSK3Jg9c4QzlErTM
j6ItTs7t9aPjXd2kiq8IY6UN2TrLwssWkGM4Oop+mlp5zcKIDLhDrfsRga5hxx2Z
i0coNWBKNjvVaCO0L7Qn1nIHA1KtAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAP
BgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBTdZTAnocY85lHhyR/A7lJkp3t2mjAN
BgkqhkiG9w0BAQsFAAOCAQEABCTckIi6zoE7uSy71uNO93RC/Pcb+YzmRSNQzl60
ngUlUrd+18bjp3O9u8jQ8ikhWT3jfn5e4I1nqLKFqKP6xyMPwk2ZJXF3WeBvtuHW
BonDscbYwMpL6RDgcUU1+2ZtZYlo+NZkeXQdTO0Pa8qoo/EtNXb+Bg1FFqnrLrVI
EhY3Bd5+jvC0WkjJFMFeOUkZDmtKLX24P/901ZP+6HN2bA+MIBKmIDKbctP54J73
tncuOOFBfyWkckIMISM4D+Mi9Ezju2Hq4thV7XJeyWTRiXG8+LhVRWJaz7St1FIw
ViEST3A84CBLjiPyGqzqQCtr+HNhr7su+Tmcq550xU11Ug==
AoIBAQDBNhwKD8oqOwNSDMZR+K6l6ocyXZzZPAIbv7co34xtjt25c8PPKz8FiBSU
M4YeZpzsSp7n7WSSSzVWqFTRBZzvjIrBzLu4CfxMKuUrQX1/BPYgbSxQO+5YKPzO
yaBMhIAEtW+WYsaa6PpWyL65L4giKpVoLS/UFTEBsf+lO6pwFpX2EJnIylLbpwEd
pAXIgVFsodHlP9Zc2tR1TqYetmJ6/A/p5sSZpgLy1y2+Mg4VTMKvs2kNAoh/+lEu
WPe204eMpkBXhukulOiJkVKNdhnCkLslt8ZaMWWqBvD9d94lXycMQ9wnGakPNc4W
5VX3rbLOGOX7xK37BCsh5HGodIrZAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAP
BgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBRlB76vjaZyFLrEUGm6DQfyjmN6PjAN
BgkqhkiG9w0BAQsFAAOCAQEAD0cRNBQqOPNAUmKCH9xCr4TZFoE+P5aNePU39Jyp
qpJ1HjKI93zBk9aN5udDGPFhm2/iaKx6DuABbxCz0LwNhLiKP6UbHV8F2fTJJ5bo
crXvD0CEpor+Quh995lbq9bv29+zcDVw+Hw0QainBdHWkdw6RAgmbFnJxETDDz8z
VQ0DET3T736oxpEZ4DKQlbzK5LSgZH2lyPEEvzci4QjTZf5X/nitdx7fAdMFFPQ0
lI4l7nIuge5LTR0isEfWHx7Orx6l8dzkofG3fz5BjHCI4JInVlWq3MNNSybDI4pI
GFxeuE/U8K6kIixT8qCAh6Naq9/xuxFkffLmMKfZXoYLCg==
-----END CERTIFICATE-----

View File

@ -43,6 +43,15 @@ cfssl gencert \
mv server-ip.pem server-ip.crt
mv server-ip-key.pem server-ip.key.insecure
# generate IPv6: [::1], CN: example.com certificates
cfssl gencert \
--ca ./ca.crt \
--ca-key ./ca-key.pem \
--config ./gencert.json \
./server-ca-csr-ipv6.json | cfssljson --bare ./server-ip
mv server-ip.pem server-ipv6.crt
mv server-ip-key.pem server-ipv6.key.insecure
# generate DNS: localhost, IP: 127.0.0.1, CN: example2.com certificates
cfssl gencert \
--ca ./ca.crt \

Binary file not shown.

View File

@ -0,0 +1,19 @@
{
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"O": "etcd",
"OU": "etcd Security",
"L": "San Francisco",
"ST": "California",
"C": "USA"
}
],
"CN": "example.com",
"hosts": [
"::1"
]
}

View File

@ -1,20 +1,20 @@
-----BEGIN CERTIFICATE-----
MIIDRzCCAi+gAwIBAgIUMEJ5c+Tt0TyOcLL+dIUuE2nOuukwDQYJKoZIhvcNAQEL
MIIDRzCCAi+gAwIBAgIUKgQJ/CMaFxc4JcwwGyiT/7KpedIwDQYJKoZIhvcNAQEL
BQAwbzEMMAoGA1UEBhMDVVNBMRMwEQYDVQQIEwpDYWxpZm9ybmlhMRYwFAYDVQQH
Ew1TYW4gRnJhbmNpc2NvMQ0wCwYDVQQKEwRldGNkMRYwFAYDVQQLEw1ldGNkIFNl
Y3VyaXR5MQswCQYDVQQDEwJjYTAeFw0xOTAxMjExNDQwMDBaFw0yOTAxMTgxNDQw
Y3VyaXR5MQswCQYDVQQDEwJjYTAeFw0xOTEwMDgyMTE5MDBaFw0yOTEwMDUyMTE5
MDBaMHgxDDAKBgNVBAYTA1VTQTETMBEGA1UECBMKQ2FsaWZvcm5pYTEWMBQGA1UE
BxMNU2FuIEZyYW5jaXNjbzENMAsGA1UEChMEZXRjZDEWMBQGA1UECxMNZXRjZCBT
ZWN1cml0eTEUMBIGA1UEAxMLZXhhbXBsZS5jb20wWTATBgcqhkjOPQIBBggqhkjO
PQMBBwNCAAS/sa5Guqq+tML3vUYITeBQ7gURu5yJa0gKALVIpQ75AJXG9fp1dMD1
+3M7HiT56p5omwDPqe8zFsCvPSm6TSEPo4GcMIGZMA4GA1UdDwEB/wQEAwIFoDAd
PQMBBwNCAARXbc8naiFZ3Y2LujrnDCScVNRks/TR+aXPmnuPGjDxbuHxSSbC8Q2z
iTvCkgsIcsifmUIEQcI4v3Kbkj3qMF1so4GcMIGZMA4GA1UdDwEB/wQEAwIFoDAd
BgNVHSUEFjAUBggrBgEFBQcDAQYIKwYBBQUHAwIwDAYDVR0TAQH/BAIwADAdBgNV
HQ4EFgQUOErhH7Ot6qYob29cUsijrKhSGawwHwYDVR0jBBgwFoAU3WUwJ6HGPOZR
4ckfwO5SZKd7dpowGgYDVR0RBBMwEYIJbG9jYWxob3N0hwR/AAABMA0GCSqGSIb3
DQEBCwUAA4IBAQBOjMqQ2AGHTOvHiG1eumKDaxGzXGb7znMcnpKYuz0OT97IoZSw
EggwwUbqaK+9DotDcAWaqkReP18P3T9TgzZMfFDFctSKB5rM4EU2iPpAHdA6EEB8
87HutlAeFphjsRlUMRLZ2YvTutR0jVeniEDTmTUB9crhGuUrCbg5H8jsVjvDKDut
si3l6jsm598EWYa2P7ac5/MXQ5/Z9QCMogE/zOPzbnHNuAbf6ZdGFHNM6cgUgHvs
C8L6hnuOCouFfcNDRK+7WjpIde18LNwLC0AwCKXbwFdWErRWJ8W978t6htdBYS9p
cvvxQXBuMRmAykhjKaE+rvjdV3IJqQU8mGLY
HQ4EFgQU3z1DifT82BfoU5DfMe08meeYmSUwHwYDVR0jBBgwFoAUZQe+r42mchS6
xFBpug0H8o5jej4wGgYDVR0RBBMwEYIJbG9jYWxob3N0hwR/AAABMA0GCSqGSIb3
DQEBCwUAA4IBAQAE3bhZcJuGrnMGMgebCFMuAXvoF9twYIHXpxNOg6u0HTIWOsMB
njEJW/rfZFE/RAJ6JdOMNE2bq2LbJ8dUA25PX3uz6V4omm9B3EvEG9Hh3J+C77XQ
P+ofiUd+j06SdewoxrmmQmjZZdotpFUQG3EEncs+v94jsamwGNLdq4yWDjFdmyuC
hqzSkD48aGqP2Q93wfv8uIiCEmJS1vITTm2LxssCLfiYGortpCx32/DWme8nUlni
1U/pRTx8Brx00dMeruTGjCCpwb8k453oNV6u0D1LsQ9y5DuyEwmZtBEHBN1kVPro
yYW3/b1jcmZk8W9GXqcXy16LbWmpvJmTHPsj
-----END CERTIFICATE-----

View File

@ -1,5 +1,5 @@
-----BEGIN EC PRIVATE KEY-----
MHcCAQEEINToOjKwxXFyCQHkiWoL55IPdPoYhm1TFmDylAUIhJWZoAoGCCqGSM49
AwEHoUQDQgAEv7GuRrqqvrTC971GCE3gUO4FEbuciWtICgC1SKUO+QCVxvX6dXTA
9ftzOx4k+eqeaJsAz6nvMxbArz0puk0hDw==
MHcCAQEEIK3K2gimOw2P0pZ4soFAopriuORuqpRptllFXNRhCRV0oAoGCCqGSM49
AwEHoUQDQgAEV23PJ2ohWd2Ni7o65wwknFTUZLP00fmlz5p7jxow8W7h8UkmwvEN
s4k7wpILCHLIn5lCBEHCOL9ym5I96jBdbA==
-----END EC PRIVATE KEY-----

View File

@ -1,24 +1,24 @@
-----BEGIN CERTIFICATE-----
MIIEBzCCAu+gAwIBAgIUGZReOLZEaMEZ2PfqR25XMrEdBIMwDQYJKoZIhvcNAQEL
MIIEBzCCAu+gAwIBAgIUSvxuG1lgImYpnaK4sPaCiMAd0lgwDQYJKoZIhvcNAQEL
BQAwbzEMMAoGA1UEBhMDVVNBMRMwEQYDVQQIEwpDYWxpZm9ybmlhMRYwFAYDVQQH
Ew1TYW4gRnJhbmNpc2NvMQ0wCwYDVQQKEwRldGNkMRYwFAYDVQQLEw1ldGNkIFNl
Y3VyaXR5MQswCQYDVQQDEwJjYTAeFw0xOTAxMjExNDQwMDBaFw0yOTAxMTgxNDQw
Y3VyaXR5MQswCQYDVQQDEwJjYTAeFw0xOTEwMDgyMTE5MDBaFw0yOTEwMDUyMTE5
MDBaMHgxDDAKBgNVBAYTA1VTQTETMBEGA1UECBMKQ2FsaWZvcm5pYTEWMBQGA1UE
BxMNU2FuIEZyYW5jaXNjbzENMAsGA1UEChMEZXRjZDEWMBQGA1UECxMNZXRjZCBT
ZWN1cml0eTEUMBIGA1UEAxMLZXhhbXBsZS5jb20wggEiMA0GCSqGSIb3DQEBAQUA
A4IBDwAwggEKAoIBAQDPXZtt7WyGbsRRzkKHTKqeqAGaFdVJpeLCiLpq15hGiOGl
RJFFrQ/SxEH2y8CmoKLcY96uKYxzVWHpPStK0wa/3DMTE0sxhdWFixD/eRTNgA/o
ovvSEPTX/ya//DfrgvrKNeSCG/E3hDXitVdiexeUiIB8DZHwdAg82Zg9eJ41ck+G
WD9u//PwUqS8epqs15xXaHMQphjATnkLa/0mIjwo6JPddtGopBQRADaorjvpaoUu
SzL7TQaHzCVuj47szr7BmNK1mwoHXJk7d+BlBJz6SiSGBLLq3h3SmoX9/yDLNU7t
rCi9Yl55/ITJEfY56ZZ6L/BLm83b9r03lv1vYYr/AgMBAAGjgZEwgY4wDgYDVR0P
A4IBDwAwggEKAoIBAQC7mJOiyqWfmNM5ptQZ22plotVfgoBf9fHTzMw/ap2Vl0/0
4V3GEyYCdPt6V87GWzjBSO9GAmlISBQQybMieZTaTm8KKW2066iJDKseBCv9m4nS
mHv0oDqp3SHsZQ2xHis4lbi7ws2thdqpmjw4Dv96SUiCJUjhcBX4kBMRcOGgk1RF
ENIOInTSKlAiwNF1NSnhj8wMNw7mjw90jpAGAuPuuiQ7+AYHJBJqtT9mRikR8ppw
isjEE6kslCCg2RC45AiF4LXNp7A7Xwm6P34XJ6T9PJUh/r3pa0xHRuI2zQLaW8Z/
b6NYkUGMbHR7AY/+2JzOfnnnQcSB8EYC9bHadvHnAgMBAAGjgZEwgY4wDgYDVR0P
AQH/BAQDAgWgMB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjAMBgNVHRMB
Af8EAjAAMB0GA1UdDgQWBBRn6Ksd8Ra9fiY7fCZnmq2OylkQmTAfBgNVHSMEGDAW
gBTdZTAnocY85lHhyR/A7lJkp3t2mjAPBgNVHREECDAGhwR/AAABMA0GCSqGSIb3
DQEBCwUAA4IBAQCj+s26SaG99nC/OAHJtXQqxyqDfoKNwO6iwK4UGGzwlKAK9+a7
8ObVOIyAbFtHUFjFJ6cIBMg+Tw+9++bRPyliOcrIDiv9ytNEzMVIQq07oj0Kx7Qw
sSYcIeFRF439ftiHC0LAULFEV/hDBveuTVfdt3t5RnEzp8PiTjXvhSpgFOhkuln8
n3NK4UoolN6gJ/sSCP91Oka90l4xagPYK37mksYfzbTBNmPB88rMgioee5ZU1nCG
09fHiNWg/U8c0R6Iflpjy3lsUlnst3+VZp7HZ1mO+hBp7p0lduAdJqZs0ev9gjhi
odfA+/2O8LPUTTXz6lpdnou2kXl0B4I0jG2v
Af8EAjAAMB0GA1UdDgQWBBSPaFA2Jh7s/IJN/Yw/QFFR4pO3nDAfBgNVHSMEGDAW
gBRlB76vjaZyFLrEUGm6DQfyjmN6PjAPBgNVHREECDAGhwR/AAABMA0GCSqGSIb3
DQEBCwUAA4IBAQAO2EnUXDlZAzOJLmkzQQF/d88PjvzspFtBfj/jCGzK6bpjeZwq
oM1fQOkjuFeNvVLA3WHVT0XEpZEM8lwAr/YwnBWMFlNd3Vb2Cho5VaQq0nVfhYoB
tpzoWcf0Qx4cALesQZ3y2EnXePpzky1R4MfHqulYrmZKSBQsERob/7YgSBk+ucV9
OHLzYxm4OvYvDoR54REq+vgZ3ohoDmBrNNv9OmUHLIrUi+nBpBgnww85Dc7cKB27
EEKxqIfCNTeHSemvzfK/1M6manQX6eyGe48nOwQMV/ocfY6SeA7RABT0l/UsbeMp
g/b2RU+liZ3e8FziW4/1VTt1pmFAN/2hnb0v
-----END CERTIFICATE-----

View File

@ -1,27 +1,27 @@
-----BEGIN RSA PRIVATE KEY-----
MIIEpAIBAAKCAQEAz12bbe1shm7EUc5Ch0yqnqgBmhXVSaXiwoi6ateYRojhpUSR
Ra0P0sRB9svApqCi3GPerimMc1Vh6T0rStMGv9wzExNLMYXVhYsQ/3kUzYAP6KL7
0hD01/8mv/w364L6yjXkghvxN4Q14rVXYnsXlIiAfA2R8HQIPNmYPXieNXJPhlg/
bv/z8FKkvHqarNecV2hzEKYYwE55C2v9JiI8KOiT3XbRqKQUEQA2qK476WqFLksy
+00Gh8wlbo+O7M6+wZjStZsKB1yZO3fgZQSc+kokhgSy6t4d0pqF/f8gyzVO7awo
vWJeefyEyRH2OemWei/wS5vN2/a9N5b9b2GK/wIDAQABAoIBABMnzIHdGtdYSB5e
dVrWRDSfxHYdajSBdG8P/lh8Tf7GCkIgEWNkVz/bDVTlAYji8eh1+U4RXH4S9xZ6
phMlZ0w15Snv8FREzrKlZA6Vesx49f9Bfw2qr1N8qHG3tNq2oMApNlCmkCPWvLuS
kN3yDP2VlnjfMAoMTe6BE0UqbUL2f58h2LBo8x04FYRye5VtroO+lM/9cHUB0xAJ
Lsp7hiu5wWsORyn6pkpC5B/Fw2sqOdEKtImcs9CkimkbZAfQXq97Rd/jAkAoVaEB
j4lKri9bKwiTH7Xi9LyH8Ix445lqC3uT7728yltTiyPHME0O6RsP5AdfZRmGCrEg
cMuplFECgYEA1gUmENbRx6zQdQ0CiwgRloRzceLd3l0lJCxaohqKfX28/keBjMJY
BB/EM+Qv+aBwrOshSAAW6EttBdqKMs3/EIngCpvMQfAKN7flECqfPZpGJX5BV+E/
51E4wjwCrxpmkYlQ8qj7lt8wcNPU55Tw9bfKelEgQ+dfyD6TpWV9pnsCgYEA+ApP
XfGsSkeaFsx6LJIy3ptYxrcd4Xus7ELv6ifTmCKsgi83OeTzrEccHF1niw/9RU6P
FYH3jxUzGe1AxBfcAcgA8u4L6+Z/uHtnfU+y8od0H1kHwzAR90Rgc0/sCLlHo93+
4yDPpS3JsLLrhrn6e0uHwEOTyoecyIXcyEL16E0CgYEAiH5uIY0v633u0MgEWDFE
Lk+45OhAgiG7n09eWkY9Dv3TPATUvbXwtmigFEwywKyvT8kBx86uzWXVWUdgnjg8
tQqJxZpJccAqdBCnWWElf/9VP3I/MFHrFJb7cP0e5RgcVDNUWf6lvjoHxd2DylJ2
PvABhXMZ9dSphKdMOM76jOMCgYEAzPlIKSwj4qZVEe4cMGUIoKjjriN5D/LyLbQL
KweKdjiBMnvuOWuYao/BDTeq72JhPDr1RyLF/3nXZt+HHAVTjC1Ji3doZqNufHeO
SCHqkT2amqUqIwTAdAQPaHttZLAoIaS8k9lzft7dw6W3uPhLpEQAhMPTiBSVXagx
kVS0fikCgYAhgIEyMauBbFeEA4kR8AuvpGfB8nMxJV7wljxGzLV1FJxmMsB+nkcE
BgUUkMLRmqqbqfyTj4p8zNtsnjeNd6ka4Un9uYJoxLpbHhe7NxqGiMGaUMPszaKV
/Q7vs7YissxWZxhhoTCMkd//YNikdHBUeMTYIC3CS6ahgX7+ueydhg==
MIIEowIBAAKCAQEAu5iTosqln5jTOabUGdtqZaLVX4KAX/Xx08zMP2qdlZdP9OFd
xhMmAnT7elfOxls4wUjvRgJpSEgUEMmzInmU2k5vCilttOuoiQyrHgQr/ZuJ0ph7
9KA6qd0h7GUNsR4rOJW4u8LNrYXaqZo8OA7/eklIgiVI4XAV+JATEXDhoJNURRDS
DiJ00ipQIsDRdTUp4Y/MDDcO5o8PdI6QBgLj7rokO/gGByQSarU/ZkYpEfKacIrI
xBOpLJQgoNkQuOQIheC1zaewO18Juj9+Fyek/TyVIf696WtMR0biNs0C2lvGf2+j
WJFBjGx0ewGP/ticzn5550HEgfBGAvWx2nbx5wIDAQABAoIBAB0jBpM7TFwsfWov
6jOV68Gbd+6cs1m0NnpCDdsvsQgh904+jrUMFlQ9XS3UY45Vbsw+isNh7n5Gi69L
1KHfJmp90itO4fY+v++BYzaHSVnbhZ2LB32oQVROv00bKPRAjk/8mTO4fv+bkanU
BdRjJ/UTWsq0BczV/uObZQrJcJHi6+sAMYw4b/kxzTALd+UuvmOP7Z/NoWW6x8Mm
ahHgqaMwA0O1f4DsdKYnSUVMF9DNGsxKCUYSYR6RH93Bq/Eo0q1U2egmLIMcTVW9
7QSWsJoZuXlzkq7Hb7mxGdppa6kSzA/VM26qPNE9Cjg4tCMu1RJSfgkcnv27Y8vZ
fZSq3zkCgYEA68VjIqG6sj43SZSvD+Z+Dfuzc+lO4YBSI0Yru8B4ZZq0vfTVQdM/
uf0Bpk/nMbqec/kfcPMHP8zznLe8rcmfZXNQFIaajOb6rzWhCRSgbX98MeGnUe/y
9sG+zFSRrAPDaVRJZwSYILs6o6Hz4o6DBCvr8iKFfm26SLB7hIjwx8UCgYEAy7EL
dIMdsGDzfmxAYqad3oy/N1KVp96zfdnHEiIC0oiXz3YfI7YLFj54yXxx5rHR2/AK
wOo7b90Rc8R0PgtKedKrz5p/E0Bz723ToTxHjsqgVRZqYaEKUOp8wR2t2DJOF9b9
0C/qp6iUy0IOTBYyu3BCMV0aB5kRW62jXJIsQbsCgYB6uO7mOurUFsBug38wNpjM
rIR3RCz0Afg/NipTe1bwBDwqWEOdFNmp9QEj0ZmU7//EfBsajtXqJsNzgswqZbWb
eA9p77qItz4rby3YbS0oceByknOmmdCNEsI+15JPyFGyBNaEUgbhmrNmM0mgVu/p
fvc8vS1hZro9VeelUCaMxQKBgFDgnXHH1fQAqu4ZwX7qNWj2bb5jtjSPgqmH3Tlf
88rwnYasmjStxb0xVPh7xyYYmQFBUKPE3ZDPMGzNJnK0PQAeHEY0TByyzNXWv98X
djpGTl86pUbakKQMVzi+thZP8x4YKXOOcxfbIimKsu6XKdGvAzlihEFcD75dNa4+
BACdAoGBAJevnrC7M/KyDDGW3ci4sFcn7MxRGqLBulwGoCuM+zecbG7NBvDynoaH
NRGpASiboRJyCEoIQivvkZf+K7L/oB4bL/ThF2ZpJUe471tq0444xnXdHRDLG0Dw
OnBl27e3iAiUctqR51ufXKOUaNEf4gcsS9duELMPBxM70GE2Q/2r
-----END RSA PRIVATE KEY-----

View File

@ -0,0 +1,24 @@
-----BEGIN CERTIFICATE-----
MIIEEzCCAvugAwIBAgIUYTkp3oUkde9wFRkJA1LlvwFrZ3MwDQYJKoZIhvcNAQEL
BQAwbzEMMAoGA1UEBhMDVVNBMRMwEQYDVQQIEwpDYWxpZm9ybmlhMRYwFAYDVQQH
Ew1TYW4gRnJhbmNpc2NvMQ0wCwYDVQQKEwRldGNkMRYwFAYDVQQLEw1ldGNkIFNl
Y3VyaXR5MQswCQYDVQQDEwJjYTAeFw0xOTEwMDgyMTE5MDBaFw0yOTEwMDUyMTE5
MDBaMHgxDDAKBgNVBAYTA1VTQTETMBEGA1UECBMKQ2FsaWZvcm5pYTEWMBQGA1UE
BxMNU2FuIEZyYW5jaXNjbzENMAsGA1UEChMEZXRjZDEWMBQGA1UECxMNZXRjZCBT
ZWN1cml0eTEUMBIGA1UEAxMLZXhhbXBsZS5jb20wggEiMA0GCSqGSIb3DQEBAQUA
A4IBDwAwggEKAoIBAQDcRWZxskwCNXhprj8XCtkxj9GP4z9hVgUxgquSBync1hic
or6qNgrUztv6nlALdQdf+TbPKyGEwCgAlKU/hnJK6lAG3+riyShnyM74/ulV1wYS
F3Rkeh0nNCo95TPNq4GLB+sMfzwoSsT0srPX7KzCqpGy+G7sB0JBNwkTZLkCuMZf
dkkmcZJ3zqIiOzJPlcQa4iBa0L1nV3Uuv49kLZLMCLMslg//IZxC09fnmjn+XLcV
4+RpOKIn7AMN1kqPqmaB6gk2aCbYTZZ8aS9+cOJmTERbynyX4y4sRV18ED3dRNvs
HCedgPOp53nqDneSOqOhhg+Mb95tnMQq1on0+TRDAgMBAAGjgZ0wgZowDgYDVR0P
AQH/BAQDAgWgMB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjAMBgNVHRMB
Af8EAjAAMB0GA1UdDgQWBBTFoXLQVq+Yg2AlRIirXj5ho0PMrjAfBgNVHSMEGDAW
gBRlB76vjaZyFLrEUGm6DQfyjmN6PjAbBgNVHREEFDAShxAAAAAAAAAAAAAAAAAA
AAABMA0GCSqGSIb3DQEBCwUAA4IBAQB4bl4f8TI7k+nlHe4MhJuHP1BKHB5O5SeG
wrgI2+qV38UrKvTag2Z3OVKw12ANGN1vcOUrDS7cCtIZ8Aar7JpBgWrYvVlhAtc5
3syj74Iapg1Prc0PFRmMQTZ4mahRHEqUTm3rdzkwMjNDekBs9yyBsKa08Qrm9+Cz
Z84k/cQTBc3Bg6Xw3vUiL4EmeRQudBQAvh/vdxj6X+fwKmvLbPpgogXuQS/lHhFQ
/rZ+s22RHLlqzAMuordjxS4Nw91dqYFwdYVvEmsK89ZnSWqwLvFCJ4uNnAe8siS7
53YTpGbpLdNkQKAQJdMQSyvcDbQoQ7FI19a1EtSwpg5qSMOTpQ/C
-----END CERTIFICATE-----

View File

@ -0,0 +1,27 @@
-----BEGIN RSA PRIVATE KEY-----
MIIEpAIBAAKCAQEA3EVmcbJMAjV4aa4/FwrZMY/Rj+M/YVYFMYKrkgcp3NYYnKK+
qjYK1M7b+p5QC3UHX/k2zyshhMAoAJSlP4ZySupQBt/q4skoZ8jO+P7pVdcGEhd0
ZHodJzQqPeUzzauBiwfrDH88KErE9LKz1+yswqqRsvhu7AdCQTcJE2S5ArjGX3ZJ
JnGSd86iIjsyT5XEGuIgWtC9Z1d1Lr+PZC2SzAizLJYP/yGcQtPX55o5/ly3FePk
aTiiJ+wDDdZKj6pmgeoJNmgm2E2WfGkvfnDiZkxEW8p8l+MuLEVdfBA93UTb7Bwn
nYDzqed56g53kjqjoYYPjG/ebZzEKtaJ9Pk0QwIDAQABAoIBABBdY5gM3BLJ8DFB
zdQjbTF+ct5SztGnd2lPQPnvaE/M5DU27h1tOG7JE5TSEDZZsnuR412O4cWgFRi9
8mz+yxz/vYRVPHku4r6bL61WGvXSrNPJRE92txXDjWPd1HRySoSOyQq7pTeFHo7j
e/MN1WP9EigOxwboHycDNLxpHkmyV1DIlAgNkCZV56//liU/b+4vAVIJrgWfwfGH
NkFd9nkm93oCFOroJ2f30E1wLPlC+ZhIn4ysau+zlWDLYeils0xHwS2GD7gjp/if
i/ibVPgMVW/WPb67olm3nMUsan6CLmKWTiG+yklJT2djoam/iCZWE8/SAZj3qsxy
6W9rafkCgYEA+D8tPM8h0oHlKriFDQZx37EH1dfGJRqxr+SgQiJ03d9pGYEsT+jC
yr/l5ntzTwEEJjp/biIRwCwSWPYQtN4dNqn+11ICQzjhQbfWTfeT6vhSoBNxkeTT
R8tUM0fmoUNrXhPbGZ9XdIxDFgD1pJL96KtyaQGjIRAhyG+khIT7oIUCgYEA4yaM
Mw65KDonnKSVfMiOuG0QNYf70UcIiLSH8USnhbQhzT/c2LG7tNmru9UtQhZtmrpc
vezuOYTkfcAIUjwqm12Ra8Px8WMzwHwKx3C2SrFCLFgjNFyoQ+VIGjtAL1lNKvEx
MObSX7kVIf5+gaO9+KRBEdu55R16yQpW/UVAwCcCgYEA8XdqRkLoED2/Ln3LFW9W
ZpJpH61BlCfR/FhzNcEUUhiUv3UxKA0tJE/ijP05nPhNE+5Es1i6UWXM9vFqMLP4
UIqsUr73anGyUd1CvBX8sEqY/BHNn26nwKbboQHoKKZOknTX4qVmSPyB6K5IQaul
BKN3pwIrreZmJfPKYAiGRY0CgYAYgEbtFvB321X8enA5ZnSmhfUSoRlTaIMOI9Lp
/krHjDd9KR9MLFef2T7B4uufzkWCRAnO3qiPgbsXqUf8fsrluUD/S8JkFBw37elH
u+udwOLvX45kjn4D3M5bLfrtYIeHUz7IFI2qj48s/INuvle2Yxk1sOqrQPPGjZv2
c6rZTwKBgQCHSa+ToxicPJBZ5E7ezgue0LyRGWIMsr2OR16PBL2lPPiCWPH8Ez+l
mTClHll4KVZyqc0VOZDbjMjZBnTiAq/1lb8ZvwsXLi0ue1obkkEYfXLWcxYD3Yne
iBCGhjkqaUA4rESb22j7yqB8WGT83qV0kB9JwElzE9SxnyR9iw2FmA==
-----END RSA PRIVATE KEY-----

View File

@ -1,24 +1,24 @@
-----BEGIN CERTIFICATE-----
MIIEEjCCAvqgAwIBAgIUKhJ1faf58PSD3a0vfyrIDojJ/kswDQYJKoZIhvcNAQEL
MIIEEjCCAvqgAwIBAgIUBwoN2+J27JtT6IaqV9sWhsHii2IwDQYJKoZIhvcNAQEL
BQAwbzEMMAoGA1UEBhMDVVNBMRMwEQYDVQQIEwpDYWxpZm9ybmlhMRYwFAYDVQQH
Ew1TYW4gRnJhbmNpc2NvMQ0wCwYDVQQKEwRldGNkMRYwFAYDVQQLEw1ldGNkIFNl
Y3VyaXR5MQswCQYDVQQDEwJjYTAeFw0xOTAxMjExNDQwMDBaFw0yOTAxMTgxNDQw
Y3VyaXR5MQswCQYDVQQDEwJjYTAeFw0xOTEwMDgyMTE5MDBaFw0yOTEwMDUyMTE5
MDBaMHgxDDAKBgNVBAYTA1VTQTETMBEGA1UECBMKQ2FsaWZvcm5pYTEWMBQGA1UE
BxMNU2FuIEZyYW5jaXNjbzENMAsGA1UEChMEZXRjZDEWMBQGA1UECxMNZXRjZCBT
ZWN1cml0eTEUMBIGA1UEAxMLZXhhbXBsZS5jb20wggEiMA0GCSqGSIb3DQEBAQUA
A4IBDwAwggEKAoIBAQCczqeCNBDCkgqgJuhfIKlo1uw6NAvAN8tBSoxmYH6t51bM
+XDJzuw8iQm8UuarHI68O2qjUSiVNvEoilYi/xSu78Yj5vRsNc6i4+FBs09WdSH5
fieQ8kGVKRJwBDDNiMUQnG3DrEXH/cd7NLegyFdnO17yjTeAt8Fl482QKIFDuInT
tGLBR+NKYcSM347Vz0tmJIuD+1Ri1DPhOrnrS0KaLKVvaDYpFVdNy0xS2zX+p4Ti
QyznXjqXOXiFIDOU06huNerWJEzr8Z/ylRPz3pA0bGqcJpIXxzJcGJWN6Jfh6izf
ONszTSgchoN9sn7iah8cHitEdYQs4LTKjyqu+9QXAgMBAAGjgZwwgZkwDgYDVR0P
A4IBDwAwggEKAoIBAQDHS/zscOjq013InbTlwsBVwasv8e5+ukZNGDQx5RNaXYxI
NVUM/5Bai4R3CS+DTbr+jBDylKi55gPQ/UIDKlU/NQH+x6UJB050G+aLDWAuRmxi
w8dq7kRw2QJvuMxI+quiZhWk2HYjtvZRZLCUGl//QTL/VCT1smXwXRBU19S2uOfy
g9KgZL/DCkJ9VBUh3+bFVKXBDnIphY4N/0+B/sW71cvRj8zvW3iD0R5T1J+QVEFz
sFRT99/OhV2kUEwMaAYOFv/mMIEO6qc7vf6pB91qdUfEP8AbsOlmiSuOOLuR6X/2
FHUjc8JrFfMuOVHnedRR5quxXbP8o83ilat0tXeVAgMBAAGjgZwwgZkwDgYDVR0P
AQH/BAQDAgWgMB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjAMBgNVHRMB
Af8EAjAAMB0GA1UdDgQWBBTdwPxrHxU94wYrwQBOjp6ma6EAajAfBgNVHSMEGDAW
gBTdZTAnocY85lHhyR/A7lJkp3t2mjAaBgNVHREEEzARgglsb2NhbGhvc3SHBH8A
AAEwDQYJKoZIhvcNAQELBQADggEBAJB6rG/9kScmQi1TQMFJa0ZAsG+/9m7Rczye
RApBF6pG5nf8FJiCt7sNYT8r8i+kby2H0CLII16dXSZxPG3giRN4TviM++/YXW/j
rW1SueyhS+bxajOQfRVLxTnBk7TVDvacwJdFy/VI28i6hoV8E12g9jslAMiWREWd
nhgk3zIyXFlVuiHIRYqKFWeo75/cEyTZ5XWs06r5Odawzo2L094CT4uxgu8mRCwN
sJKa408ev6CUEW7YXZVtJ8IwtFfJCWAbe5Tsq/9K/m1puHLOiVRwYBl6rCUTLjGO
+iKZYsV3wVf75iENevyv7rQ9OkomJokdWxhi5e+VxC4x+zwbvKU=
Af8EAjAAMB0GA1UdDgQWBBS7gBJSFrjAHryiQpe38OMTzCKH1TAfBgNVHSMEGDAW
gBRlB76vjaZyFLrEUGm6DQfyjmN6PjAaBgNVHREEEzARgglsb2NhbGhvc3SHBH8A
AAEwDQYJKoZIhvcNAQELBQADggEBAE2tsLHtgF1T3d/anKf543q9uh61tr46HHf0
RrGZF+RJuJY5XIAiCN514Z/I7i2wG/x1TDTKwZUebajbk4GvaI4nEnCXs05jwm/n
wpdyRE1EUy5PkVFfXKCNQd096mpZu6EYXBGnQ2fQjg5zFvZSDnYaIf0vBF1WxE4W
0a4a9na3N77OSamPEljM1RJ1Sk+Zg5yI+nwyKcWWk3OlD0j668Vp6/m5VZKyQEkx
crfSj7kgRJWZRhMeh6li3xa9vDmzdF6ojGkgRN3Qljrs36JnmsTono2ETF8GIc+g
eNByAQNppLJjMn+zsaG9J5pr0gDLubFA7oa8aAJgYgJMM/GecAg=
-----END CERTIFICATE-----

View File

@ -1,27 +1,27 @@
-----BEGIN RSA PRIVATE KEY-----
MIIEowIBAAKCAQEAnM6ngjQQwpIKoCboXyCpaNbsOjQLwDfLQUqMZmB+redWzPlw
yc7sPIkJvFLmqxyOvDtqo1EolTbxKIpWIv8Uru/GI+b0bDXOouPhQbNPVnUh+X4n
kPJBlSkScAQwzYjFEJxtw6xFx/3HezS3oMhXZzte8o03gLfBZePNkCiBQ7iJ07Ri
wUfjSmHEjN+O1c9LZiSLg/tUYtQz4Tq560tCmiylb2g2KRVXTctMUts1/qeE4kMs
5146lzl4hSAzlNOobjXq1iRM6/Gf8pUT896QNGxqnCaSF8cyXBiVjeiX4eos3zjb
M00oHIaDfbJ+4mofHB4rRHWELOC0yo8qrvvUFwIDAQABAoIBAE2/MNKf2gd+vYH2
iJRR720p0upwz7q4bzCqM7627VhwMVtj+gY1cG/7SjZzrCb++85cb10WalbBbQS6
wkaLzseF3uoCIwJcE140OoWG0Dl/zh6T7C4zz0yTlq6vhTsDKyI3TT0Nd3PXYka2
Nq2jZ6Kpj0QXvIM3mM5aCpwFWUInwGTAPG2tskhPiIcbnJnajiGdupia4Gsl1DWY
H+L7FGxI1FHtOczb57Z58BQoG+vXFbrnuRUBWCR3TF0bGG+6ywUFPnTqE9TfVrbf
1kORUZzIGbZBOYl0n/ylq5nc/DUXThKY1G6C31kTvZu6hTsgx/A5MeKOxu6eojZ9
3DierQECgYEAwD1EwiKmaxOX9XksmVUa8IvbI/WKhXy1h9cbfJFpjPKUXBzgWAaQ
Si00PzrzKFkItGsuoI8DoglZMWEWswv+ipN+Q8DqT6+RposqpIbO/Kvr6ynMjkNI
+SW5Opfo1ih2YaTTSsKVYOwxI0HyIMnIbqUo/MDsrx0xM5x6ZcWN+lcCgYEA0NDn
huvKY+NDYaQn6G3JjD+uvAqfsTaGSyPZpWpvNb8QBGmB9gmEHfvpO+fs+ANpk1xM
T70piT15SQM8XlX5yt1w9MtCliXNUEj4KIZ60r+fD+LU61a0os3cUfeuLvck090T
/8hg0VIfh5kT4vW+/Qzj3ToGJlbgxPjy4vTIXEECgYBbHS73rKCf45uEdzhSPGoW
VNM6yegruJ35klSux+T1LwxuVCloz0OYRLCcv338Q6bCjIJ/Cwm3xSKHKvMtTaQJ
XjrcNhVYS2iHPTKq5FI6o+mffiI701xE6kJOqS1sQ6VIRmz25B8U3x2zDlCKhdj/
yDwTAAWuQfHN+n5wuh++5QKBgQCij79u3el10dsHDJC09aNON9WENpETmOr7XDF4
AWAKa2/Av5KZQ9dCsHZmWKu2eJGcUuJlwVGwsdbV7Fr25d/M+o+RQxvXYB5yPhxT
ED5WCy99Yo13mMyPYcv/U96aWXSmKxoCqrAUxOYe3iEJM184COzrsNxQxJm/Pj94
SDAjAQKBgHCW9ndreIIEHm7pugis6/WaoZTkdU00nQw+pYourCZ4/zaI9QiUnFwH
HPLkUdpt2fJKKu9h5DMGl1hzYHF4QRuPWJK8xGTP7YTJmZAzLtadiAhRaAq8/nWv
fZJPEXDteRoceSo/Odujl3mDMhRTiGi5QKfdGV6WZPicLbvbRkmk
MIIEpAIBAAKCAQEAx0v87HDo6tNdyJ205cLAVcGrL/HufrpGTRg0MeUTWl2MSDVV
DP+QWouEdwkvg026/owQ8pSoueYD0P1CAypVPzUB/selCQdOdBvmiw1gLkZsYsPH
au5EcNkCb7jMSPqromYVpNh2I7b2UWSwlBpf/0Ey/1Qk9bJl8F0QVNfUtrjn8oPS
oGS/wwpCfVQVId/mxVSlwQ5yKYWODf9Pgf7Fu9XL0Y/M71t4g9EeU9SfkFRBc7BU
U/ffzoVdpFBMDGgGDhb/5jCBDuqnO73+qQfdanVHxD/AG7DpZokrjji7kel/9hR1
I3PCaxXzLjlR53nUUearsV2z/KPN4pWrdLV3lQIDAQABAoIBAQC2y+TVvY51bJ81
lilJIIMnZTauCDqXdCVtKwkcxp8koG89/+Tdwj7WPeenAv7YcWBVf4U/6siDkgzo
EJMOsjJ0ghstZFLkYBY+eyTPX9pbN27MfAQZ+Sc/VlxcuuRs/7aTgwzRIVXi1jtB
Vph7j2GDj3rGJJit3w6PE90Z5MkPOhXwbPD+T2OCIhO0OQCv9YNrdHmQzFZJ8vn/
FuKUjZuoKKnwgXvBVBKsUPvvSdPTWpavNYdA7WQtjpVYVjVHgEHZWtxUwQ43JHzb
pABWqYp/XJNiGhZ+cEXsw5dBBWp/BPxbu1P2iagZTmNr/8EfGCq04fEkKhv22x0y
FbQa+2e1AoGBAOCztIuf1Magca9mFD+3YZHgBv2TA2XSujwYBr/664dLjL/9NQIK
00IBykiNykiWZ0ixcaJI1j+af7fWr5OuSzBVwdXMUZraKUEwrKI3hh764yX8aUYt
JsqpAFhyro7smp3LaUyMCW2ZFVxayp60h8fQXcNepFwmK5o5BnsTsFHHAoGBAOMO
ZooI0Yz/fzBKOEMM1Vdf3PpUqYnjCyJSXag8OeZn/OPgiYkwXWL0idfC49B0ArVZ
/j2zMXJduIrwa3UIfd6tjPf8O24YOiO2SenkVkcsUwJgsB1nM1QlOamGw8BB+nbT
O5V44r7vy3HldHHQgbPvjs0z5de3b3eBBTZC/2vDAoGAVNroSnYAV0YNyIwHB4zL
9tegLDBRbylmFP2JxwQN39ji/Tm0w+Gsp9efOUj6Y/EQbf48iGlzJy/EHXugcGe4
kzc/bOqswoqyW6DzAItxRc++6gBpDQxOAuhRbhVY4DZvqTlAuZyEjvPpgifzLn3E
bOu+DOJ3tSjg/Guei+oCgs8CgYAUfwxKkZk4/SdiGJETnGj1xjWQc2wKgnBS3NSP
h0BCyEhP2ckQlUkY0bJPw8wE2TQVYtZMg4yHImayRBmvKuER5ODA0ggbXByDdMUf
U/ll215y7H95aAN+KQ4Xe47YIByX9WF/kLYHPmZDFc95JrVOpOVjKLgqzOhHBWKP
D2U3OQKBgQDbmwsNr0mopOYiAp60KsKJmICUQO27RyL87UfdFysDrTZ+K1Pc6X1e
HOFtma4zNftDym9Xjzz2eOXT6flHeJNu1qZwvurNV1g0JZdXnY83q4C130bAFJLt
I0+I3vDpJt9wznYnC3jDI24gCbEJ2D//8dpeDNUPKk94rjsEjXxDFg==
-----END RSA PRIVATE KEY-----

View File

@ -1,25 +1,25 @@
-----BEGIN CERTIFICATE-----
MIIELDCCAxSgAwIBAgIUHqSMAwCTIo4rAMwp1Dp8tBqn3eMwDQYJKoZIhvcNAQEL
MIIELDCCAxSgAwIBAgIUEQuXXKtjueOgUpZjzr1ORDG/zHwwDQYJKoZIhvcNAQEL
BQAwbzEMMAoGA1UEBhMDVVNBMRMwEQYDVQQIEwpDYWxpZm9ybmlhMRYwFAYDVQQH
Ew1TYW4gRnJhbmNpc2NvMQ0wCwYDVQQKEwRldGNkMRYwFAYDVQQLEw1ldGNkIFNl
Y3VyaXR5MQswCQYDVQQDEwJjYTAeFw0xOTAxMjExNDQwMDBaFw0yOTAxMTgxNDQw
Y3VyaXR5MQswCQYDVQQDEwJjYTAeFw0xOTEwMDgyMTE5MDBaFw0yOTEwMDUyMTE5
MDBaMHgxDDAKBgNVBAYTA1VTQTETMBEGA1UECBMKQ2FsaWZvcm5pYTEWMBQGA1UE
BxMNU2FuIEZyYW5jaXNjbzENMAsGA1UEChMEZXRjZDEWMBQGA1UECxMNZXRjZCBT
ZWN1cml0eTEUMBIGA1UEAxMLZXhhbXBsZS5jb20wggEiMA0GCSqGSIb3DQEBAQUA
A4IBDwAwggEKAoIBAQC8QxRZGCUocS7zTrqiOCIuM0HYoojgOZTK91GX8EooGHMs
76bZrzTUncEvvdXxOOOAkSWzOz69937sP0G0QhPBw95UqZ4+vAcL+ByJ3PG7cKQ5
HYEo5FyETLihD3mLXydXsSudv9sCGp8aexJ2Y6KOzFzY3v7oTc5evPeYKL7qnMri
SJUUMr3W8cK1jpU/C+MddzYvC9mNe9QQ3vvRoNYLe6oKyHfV0XhR3pl8katZcf/8
091KBzx35ksHiGJ2zmaR2IDs+HN+KMzKJiizPQC3VYBvqiSHJ/2ANDKk7pRwCvdc
zyIafk679p2aInXEReOvfFbMqNHfpDAMIqG0As9TAgMBAAGjgbYwgbMwDgYDVR0P
A4IBDwAwggEKAoIBAQDPAGzCcM5JSlitl/iHLYJ6eGO8MJ3S6R1qzAvmdB9+KsGD
F99gWVTrJRzz/hJyo9Lt8GvNj9Ll3iT2QnXyyaSOX1+uT4cxBM2MFBBfERDh0WUY
43uLQKY45H4zrS4tJTOuSGKM/LlK2ZMj++pQBqHsONrNG+nOhqe3qLqPDV3yBfmD
PXfjASNvHINgxb9AwQWJydgjfGDiAwWHnKbnVScYBFgWfMG0Gm1wa8EfRfWD0NPd
L61XwQwgb5VsYAs7XH7bxVbPm6E+/oQTOJXQHMzQYve9DFPy62KFSIkfvNwVRctL
NE+k3HnyviDzbs387ys56ZjPG1/XpbFmeQuDRndJAgMBAAGjgbYwgbMwDgYDVR0P
AQH/BAQDAgWgMB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjAMBgNVHRMB
Af8EAjAAMB0GA1UdDgQWBBSJjfpommIxJQ3q67KG/4NQe0g2yjAfBgNVHSMEGDAW
gBTdZTAnocY85lHhyR/A7lJkp3t2mjA0BgNVHREELTArggwqLmV0Y2QubG9jYWyC
Af8EAjAAMB0GA1UdDgQWBBRyAdJmiDFhC3CYXPLW8kufz4zp6DAfBgNVHSMEGDAW
gBRlB76vjaZyFLrEUGm6DQfyjmN6PjA0BgNVHREELTArggwqLmV0Y2QubG9jYWyC
CmV0Y2QubG9jYWyCCWxvY2FsaG9zdIcEfwAAATANBgkqhkiG9w0BAQsFAAOCAQEA
d0BLO+3K4SfbHfn+rpNH2xCcXVyLZx2p/HhXo8ivzAQ/t+pkWBgxIkJ/qxHfQjZO
O0/b5DC+No6UhKNNyunHxv/gwsmzz4rzPmCA0OD6om/yZoXtz3kf0ktmbKbO8RWc
nQPZRC26D+cshebDAAm9XoCGErsdI1Xekx5Y66c1t1tUVZaijDB7FvnaRC5Rbzn3
5WKRHkF+ahJDqkpTWUsFDuyRWF771riiA0kD5wwmaw5W5/knr1BiOePCUFaTZPXx
r9LgDqIG7w1A1kbi5zLm43pN/S5qpif3DcSYYaCjQZC7yTVaRb1JBvcTY/g5whJx
nyHrKafioCOq8CwHpbvZlg==
sc0JnS1udx4HFStcPZXY+kyVwYPuRDv3GisO+TTrxzdupQQl4gPmuGa/cik4tKxQ
o//XFgCBQCapO2cY+JerjMSOLHtt4YmdyYDSXeMjsRG0sP36njH3nHIYsFAoEyvg
nJQX7iDWj/9bzMT4dU2ac3t9RgCtyABRoT4G/MNhWMlJt9XkwVTN2Pqf4TMV0GlV
54GOScsWAIwoIDPOCoO8Q40jtFSSnehrlrW7x6B37gY/EbbYpZNrIDNckNfLJMvl
dak9P/ovtjLk8GM11gE4s2ANWA3o5bIm17b1x7Fw122sB4Rxptbc/BDpv2GIY5Zm
tqDculro2C7Ib5GyEBEl3Q==
-----END CERTIFICATE-----

View File

@ -1,27 +1,27 @@
-----BEGIN RSA PRIVATE KEY-----
MIIEpQIBAAKCAQEAvEMUWRglKHEu8066ojgiLjNB2KKI4DmUyvdRl/BKKBhzLO+m
2a801J3BL73V8TjjgJElszs+vfd+7D9BtEITwcPeVKmePrwHC/gcidzxu3CkOR2B
KORchEy4oQ95i18nV7Ernb/bAhqfGnsSdmOijsxc2N7+6E3OXrz3mCi+6pzK4kiV
FDK91vHCtY6VPwvjHXc2LwvZjXvUEN770aDWC3uqCsh31dF4Ud6ZfJGrWXH//NPd
Sgc8d+ZLB4hids5mkdiA7PhzfijMyiYosz0At1WAb6okhyf9gDQypO6UcAr3XM8i
Gn5Ou/admiJ1xEXjr3xWzKjR36QwDCKhtALPUwIDAQABAoIBACE9KrOMSss4KJT2
uob3qDlF/YM1Nzt3jyjCv9o2p7Eql/NAINJgV9CORdSbDNH5PFmvxts9Q5egwf0O
MhiUbg6Q+YkzJWhoJEpdtRQ2OUvh2GzpI6vHnfoR3as6IziTRUv7UPsaK53Ue3De
d+UFNGdH98CmppT3X89kR8bKsuKTHLd0bL3gPt1kDqyMGEEZ3XUB0l4PrakRv466
WfYuT9suHjhdsNQn0bHzr93OyWT0KFffrwA6l4cKsGgf3eW6CKZCcqzTRHfe4Iip
Oa2l0O4uM1Qp3Eo4KPBsxk2i6dlLrAEhDUUlnvg5PCIResWV9MbBERKA/+BEi3CB
azUiyEECgYEA0Wk3AWWzPZnOVdGMltiArllkSODuwekuZN5VNQKYs5Qfi3fHcm9n
rXN6/wSOai9Mt6oL8wGHODyfChmllgtU67jV7RKQUy0WqRN81KbHrQynqmG8DAgS
aR/np8pIYoyeL1sELH9CvRSmLfGWxa4j6J2wuUzlCPFeYhmniSecxjMCgYEA5iVY
5rUXY8uILYkVBme8YEPbiqfTmgaXcsVhB9H331o5BX8pFXc2n0+igbkQ1ZomDN/u
5qJ3ZLk/ya77BixVV7U1+UPJM3knBBBbzB6zLGsXSpMuLf/cGOM8nnJ2EFfl0QnB
nTcqwH59iigUHBLXnO/nijdZtnN5P/l/U/ZDcmECgYEAojt01axIVCzX4TxaQnKw
HmI6gwtfbPKNcq+cK1k7m8PhPFcrOMh99a5FV1PcUP8b3B3s6/H/I94zB8wesENP
It8rPGLpVMbVi18Bkm0yvCnVqvXUjS2jtbV17lOUCGUQF3fXn/FnbryUkXtcZwEl
6Ixh4Oxlc6wqhq2BUYxStGkCgYEA0cKwaHYdP2O+VmJeu1vJIaQ3cQTNo9DmMEEd
0tbYqMW+uvfPJjVln4Yhg0J9yXGZxJpFUAg78Z22Ocg2GsZFco9DBlF2DGgb22Rd
holknNNugxXqPRq6LCTQl0mTugmi+Qd/ZB9n49Jl8Ynd6khyJCO0URFpvxU4Kcro
9km554ECgYEAyqC8orPs7VkJJ4CCaJXGNQycFxlklv9vodV2b5dhOsmX3gzs2Ssn
xhMphgGR35YEsMWxpKofiMbqpI/VZEpjH8Zcb8IklVCGKi61SQvBgJo7/545Gr4+
ZdJ9TBLTq21LhleqLMFsWMGR3v+5Yuo1NoGf6jxJ29J1PjK2gooqIw0=
MIIEpAIBAAKCAQEAzwBswnDOSUpYrZf4hy2CenhjvDCd0ukdaswL5nQffirBgxff
YFlU6yUc8/4ScqPS7fBrzY/S5d4k9kJ18smkjl9frk+HMQTNjBQQXxEQ4dFlGON7
i0CmOOR+M60uLSUzrkhijPy5StmTI/vqUAah7DjazRvpzoant6i6jw1d8gX5gz13
4wEjbxyDYMW/QMEFicnYI3xg4gMFh5ym51UnGARYFnzBtBptcGvBH0X1g9DT3S+t
V8EMIG+VbGALO1x+28VWz5uhPv6EEziV0BzM0GL3vQxT8utihUiJH7zcFUXLSzRP
pNx58r4g827N/O8rOemYzxtf16WxZnkLg0Z3SQIDAQABAoIBAQCd5u0PxY0WSygq
A2sJcqW9Vmh9/XfmkvxloxDQ0nPTgjnrDiLPFFW6qazUUlMwL9eOuX8CZ1uxDSuU
zk26ziZAlHAgP3oY4lkJKaTzX8lI+Lntqllrd/1UGLhMIya+OUqa/4xtj7qoZh/f
qyKpuOV7lEMTgt9vMzhs2MC2rrOjEZxcpuwpnZLKvpuwBMcxD1ccRdCA1zHvKdQ9
ukPTRVjz9WUEOgANRkndHTZKWMz2p4QC9Id35HlksZi0/M6oboz2Eg1mtZEpbgUX
loMv1CPtWP1uj9PFWiOmnBC2/v/2MVGg2fJ1Lf4c72ZVFEIU4l3YNiV4IqFb38F+
GJVcmiGhAoGBAOvrTjYYl5vodK57gRRT0UsaU6x64/IK2i0vbBGTATywuijJ313X
vwZBU9I2rLZqr7FZ27g5ANorw8dUKn92otr/TVS/c/VZOSw/+gTM9Rl4ZGji+qKt
4zY/dA38jlDJNWmFwK/9KNOfXNS+WLsA2QJlONgUfkFPb3yXJUGLsU8nAoGBAOCf
AUcyDHjGwtYsLc/4aiKtQUIdeX0v6jCWtl9EI9cZ/o414iapE05sOGb724itSFN/
EI4biQGw8CaMcaqMaRJ8+xVQQJ7qkXItzZEFVGqz0PKwiwYAwFp6raXuio9y+cTw
savJIM8IDijph9ezRCalef4Qj6I0zFI8H7PmiwwPAoGABWvY1kFmanzDAadw5eiv
LIykU5hXWJ6LOPKYBydbpethu8I30c49Y4VoybHb8i0tcGPiOq+Ep37N9uymNVui
jmnDeykTHxY3zB6EPkv/beBoXkio/cgFKp/2qMOe+ZhGE/Cw5tpob8R/u5vMKi/w
zK9KyRxfclzC8RgAESuGnY0CgYEAlHC/+Xrbvx0rOTps9Blomo4AqF6uIMr/ayjO
UNrJDKfDD9wQHhhyB8uA4p3ikMpjF7rLB/6uZg22RuNdYqXz8iHiFE26xsqhX+Fh
DkuFZBZ9KUT+OvNYKvMTuqqPqwkCguHFqI78PZVHNkZOXX+8tAV7PylWoo1d0aKm
GM9saIUCgYBXO9TtUTiaoxIVTe+r+Abt5iwasAwxai/RvymdykwYFwq2NOEl02oi
fU3gbqDV3oirHAsAKJipnrASc70hTn4SM9hUKTQrD3fNIABch811ZDd8vaHEzLZG
pp9yKam09sPvQo6O4E7TJPccrddV286jZq+qO9YNIsRlYJiCnjzihw==
-----END RSA PRIVATE KEY-----

View File

@ -1,24 +1,24 @@
-----BEGIN CERTIFICATE-----
MIIEEjCCAvqgAwIBAgIUDJK/Om7oMHhIrvfvj/NMLa2PbkIwDQYJKoZIhvcNAQEL
MIIEEjCCAvqgAwIBAgIUdUecO/Un2wfrJXsZRGkX2zFkK7wwDQYJKoZIhvcNAQEL
BQAwbzEMMAoGA1UEBhMDVVNBMRMwEQYDVQQIEwpDYWxpZm9ybmlhMRYwFAYDVQQH
Ew1TYW4gRnJhbmNpc2NvMQ0wCwYDVQQKEwRldGNkMRYwFAYDVQQLEw1ldGNkIFNl
Y3VyaXR5MQswCQYDVQQDEwJjYTAeFw0xOTAxMjExNDQwMDBaFw0yOTAxMTgxNDQw
Y3VyaXR5MQswCQYDVQQDEwJjYTAeFw0xOTEwMDgyMTE5MDBaFw0yOTEwMDUyMTE5
MDBaMHgxDDAKBgNVBAYTA1VTQTETMBEGA1UECBMKQ2FsaWZvcm5pYTEWMBQGA1UE
BxMNU2FuIEZyYW5jaXNjbzENMAsGA1UEChMEZXRjZDEWMBQGA1UECxMNZXRjZCBT
ZWN1cml0eTEUMBIGA1UEAxMLZXhhbXBsZS5jb20wggEiMA0GCSqGSIb3DQEBAQUA
A4IBDwAwggEKAoIBAQC20alHsyfXrOesg+du8zV5OyzCkwSfXrtDG+eXcPd/y6g8
uw0MQq4A3aoV2mrpLA5BOJbfaXQEoTutM2xwsNubJl5jWUr7FtFDlnvv0uv1jRkU
OG/hEwpam2otW6yFGT7YnVtfrCemvzk66siFooofmSWM2aw1r/U5irqtsR3S6dMY
76YI0ZqdPYU82l7aRp+mEKPMjt7S4renIuK5Y9GUwJAWoCanq6z7rU3qZAWUkZJ0
cHAimHhXG2bBSWbCS3AedmosWmBrowACeVh7TCwvxd9tz9WMFUbMHV9mSgViJIlS
p4hdL24+5gX3QQJgswNlSdJVKBeYevHWRxp2pkElAgMBAAGjgZwwgZkwDgYDVR0P
A4IBDwAwggEKAoIBAQCgCUX0Mjoo9cckyzK+OdFaUXWyB62pk+tRi74OCPeym56n
IF+eQd0llbBJ4AfK6udhc76B+eGFu/tgaroZd3PXJqBCV5n+EcUSYxChWZwbbmVv
mv6fr9kdhJNaF0LJ5IjrN7bVWHwIkEQsaxs+50M3yPxsjC+OcSGR+uSFz9gf2e6+
bzYvumxkv0HuIHnPJ1fPJn5Dyu6odjPjoqKjY3opVIuMUyrtAlr/XsODXxWLhoQ8
KkEGZA+ErDiVp29rNH5AfKytqF+iJPzKcHk5ftINYRx482NgJOWraY2j+KrKkv+T
bv9ZVQ3GEEHvdtxljUPXeFwY/qDoxQFDWOvbS/8JAgMBAAGjgZwwgZkwDgYDVR0P
AQH/BAQDAgWgMB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjAMBgNVHRMB
Af8EAjAAMB0GA1UdDgQWBBSFhLSU/LEPJNAzk1lir5Gekdm7sDAfBgNVHSMEGDAW
gBTdZTAnocY85lHhyR/A7lJkp3t2mjAaBgNVHREEEzARgglsb2NhbGhvc3SHBH8A
AAEwDQYJKoZIhvcNAQELBQADggEBACgZtM7ygZSzujOQdPy9lpC/9TJkGqNN1MBg
+SJGalygPY5cSaVRS6ppWNnLgvy+mIGTBXfxH+2eVO5frSwzTCB+seXJPnzwxjhO
yj89MKtPmy1YQAPlBvi4IOKJJS4cvYKhp3Dj1ca2JDOJ4Zb/FYRn+pjy5L6Ash1Q
0pqlt5Xty7EU9EeUgf+2wHtrsCb4O9eR0qPr500ZS/7NA66vfu3vv1vnetIuAhFP
zeJzL/9DBh73lwxoYroq/M9AslLLJUhBBRosxJGU8b3e2ylM6RRkAInI/rNYPlI+
nRA658kmyYMM0W9hUB6mTYy19z9nZv/KzF+cYm7VyEmiGSHCD9I=
Af8EAjAAMB0GA1UdDgQWBBQtNVqbrSLEx7TA7E4KhsH4h8NRhTAfBgNVHSMEGDAW
gBRlB76vjaZyFLrEUGm6DQfyjmN6PjAaBgNVHREEEzARgglsb2NhbGhvc3SHBH8A
AAEwDQYJKoZIhvcNAQELBQADggEBAFdeFLblwKDjQ/aTUe0XhsjI7IdKc+tQujrV
uU15FtRzA977ntJCYhaYyNpTBjOo65C/UOyZK/ZD6vI6vH2ENRvCgZUIInmfKw2X
w7TbTHKTDZqbnKVKqORx2+S8NBaE7KSPae+Q0zQbHLyv5btBcRGv8MECGsi+pjP8
EiJM4VMQ9obziCtWl0j/BOY+YSgjIAR9huyJEA6RibCoNhjCaSSSzNqIEzpCh3zh
6zR0NzQbC3z+B6IGWzvnPBbrc2LH+eqywSlswVy1Thp2btU8b21WcjIqlcuL4Q1u
NmHmq8K2zLLvYQ3voLYzxbeMlsuZjCI2/UJSNKOSGlmlNswHy18=
-----END CERTIFICATE-----

View File

@ -1,27 +1,27 @@
-----BEGIN RSA PRIVATE KEY-----
MIIEogIBAAKCAQEAttGpR7Mn16znrIPnbvM1eTsswpMEn167Qxvnl3D3f8uoPLsN
DEKuAN2qFdpq6SwOQTiW32l0BKE7rTNscLDbmyZeY1lK+xbRQ5Z779Lr9Y0ZFDhv
4RMKWptqLVushRk+2J1bX6wnpr85OurIhaKKH5kljNmsNa/1OYq6rbEd0unTGO+m
CNGanT2FPNpe2kafphCjzI7e0uK3pyLiuWPRlMCQFqAmp6us+61N6mQFlJGSdHBw
Iph4VxtmwUlmwktwHnZqLFpga6MAAnlYe0wsL8Xfbc/VjBVGzB1fZkoFYiSJUqeI
XS9uPuYF90ECYLMDZUnSVSgXmHrx1kcadqZBJQIDAQABAoIBAEehUmBDc+LvXj6b
1/wC0vrTErCSBgejiF+8+Tq/ClpKI3rYFz6siZzRPSke0TDXECbB02tp2AlQWx97
vItS4Fij6eunOteykv+WYK0QyV8Gj8L6ceuQGEh0CGAJQUlNIPCihwCvZUHSPsw0
3ahujQvgZ3QOfYjzjnOjRY10jijqxxsOfGJUjgFsD1ltMMLCrHBLnBk/KXgWfEv0
pJ79RDhQhZHcB8T5FmAc8cPnXj+uZG8rZJ+VHQq6CM8/clwTb0SBk0gdWuO0h8/L
0k4qQzZTLnB3ASS0TMbyLtdnIuCnIVgXYT5cc0THPNI+/2gdklUl5kjUviNG9gXT
PFujFcECgYEAz1tFvIs7AFS9Y1usxmzd5rLReninnyIiGORmFMDT5vg4eQW3yzT/
4NLVn1L61WKxrckpb4C9J4rDVVrDbV4RkO3CM7Fkeko/bvPwJj5h/aM6pvguMD1v
UqMZGr5QcaYakabFt1ELUTURGlkkvg6fsvoac8F96Axeu8QhEIPLZRkCgYEA4bTL
ebP5I5rjiq921JWm8N6/qnxT2cnWIXpeXdYWnSvtYfQ9LrbVt17xyJ+niWmZMAD/
dmdWbyrTTBGjsAUT+U+uX2sup+3pXSG4nFrRZ09n+vNQZa8ZliyTg4x1fyjJmoXj
/yEuFjT5pwghE2DroWy30cIxWB+cG8h6/4gbEe0CgYAX0xreIPk0fogMJHpjihqs
6RrcgYRw6lEUnxmDhOxT+20xqpCFjp9fy5mz5qrfXamgmB5kq1wQcQckhXsy074c
8tR/cABldKZ5LxoGquBDbj/M83MLI+PokMjPo9JGXJls5saM24j7WaePaXKpS7DZ
tZbYegDnapRv6ocKCLqT8QKBgEINMbrFA3T6//n3DXR68ybsWPTge31xxrt3XyRg
4a9PSqHu4vTHxtVp8KIArvvUrmLQ9/HDnhTcWIebZea+JbFBM0tzR20Xf8Kkeq0v
Grb8Evbqu612R6ueEfFeaogy/IS/CBECucT/7cuG4n8UTwCnm0fEZ8JdRccPrYcY
YqulAoGAWyNwkmFOooU48FPFkt7OKcXD/fLzzcXTeW2fcje9+bX3Kmkg9BE2B1Wg
MDCMpBOKkUB/COtqS17iC7zdViyDRO0rxybBLFNAVEq23K7/0zVsxuWCEIPnjeE4
nbh4EH5L/heT1/HrkiJtXvNd3hpYDXiVD0b06iYBVO19p3uDuO0=
MIIEowIBAAKCAQEAoAlF9DI6KPXHJMsyvjnRWlF1sgetqZPrUYu+Dgj3spuepyBf
nkHdJZWwSeAHyurnYXO+gfnhhbv7YGq6GXdz1yagQleZ/hHFEmMQoVmcG25lb5r+
n6/ZHYSTWhdCyeSI6ze21Vh8CJBELGsbPudDN8j8bIwvjnEhkfrkhc/YH9nuvm82
L7psZL9B7iB5zydXzyZ+Q8ruqHYz46Kio2N6KVSLjFMq7QJa/17Dg18Vi4aEPCpB
BmQPhKw4ladvazR+QHysrahfoiT8ynB5OX7SDWEcePNjYCTlq2mNo/iqypL/k27/
WVUNxhBB73bcZY1D13hcGP6g6MUBQ1jr20v/CQIDAQABAoIBAD5l/zmMj/LCiehF
tj5HauJtWpeUuNiizSDZfLwaMQIZ/U0qqT2abrCl4bucN02eM6Nirsgc9xrexc+9
LVyan7cm31uernNK2G0n5ScUOnLTo4dVhqwas2v38kAxS6BOlDgqXAZpXssz/PDY
viHTp/jLS+jC7BP89lrl2U53UMxx3em+FIdWmZ54wNcLrxD585Ibcu2ajRS2PQT9
szh8W0fEkyKCk1MOhCMFvXOuO34BqO7kMev1QTQFQ5QeriQU/XB1MWFwKldX9C3b
0l5c4zJ1msTXpm/IyD21JtYfCJiH3Ny3Ojl864Ud5pdG0xQ2d4qHE6fvSRvzz9aG
wAntmFUCgYEAxW4BZctTwrNMZwrXV4kNKBhkh6zl4BMxMexB0ze0N3aJmPEAcJUb
7uoT6YiN3XA7DBRYOf7MmcpcLyqJi5Yt9T24xGIbU50jgxMw1KMIHf0lbGKQxDJN
mdvLKM56O137qvnhaRYOX73pq2+DWD4UT1C/SWF+dkMTuG5aRz94h78CgYEAz4Nh
8IRZMET+7dg66qpGqZ37Y2igANrxvMwKmY9VlAbHRZu3fpt8WwQ2Vs4CFk/rfmbI
5H6mAUVu8pKYvi910wWvLlPPh/34Sxoj4TJ6QRlP0pD3Um8O/PYa/0joNlf/rZjw
6O/7UkXjzubOUHqcEnYWEm+oym2e0Av/8yLWazcCgYAvZgej3rrPRaiUHIAync6w
z5pjEFloAHORHr127iqwHh9Ovp1yafn40+3P5V7ZyPYEImZEFi4cxf53vGilQHrs
I9NWIo+Y9WLvNw5EHpf2Sy5O5SMIV2NWCvStaVTjJ98h3zgEuKzew4N0CyOnbdAG
csZZ4bQwxE3Zu3SlIlHXlQKBgQCabEj79WFxvEaBtMHTU7eWDcy/o2I+gLAYMTdK
IxIqQAkW0dRxUT/vc2kEm/WNqRe0TsT81QqwM31m4pTsIuFpkfdVYGU17FdTfDZr
JWc4/p8aMWr7W04qDPL2Oskjd8T66K+OiNfb18q5c6Tg2v0998ZhHdrcGUtvwx5L
TweFbQKBgFOC+y+2mVhlKyUxibiGmN8LAmnFYmaFyHgvQDCOOIpo0VAILf8LyEQf
EYQteM3us92jBUOuZ8NMUaI2sBCzaLPpZTkL02iIr13Y0IYtu0LgIfI157kXjoow
ByRl9TClp9RAkmbJVCxDsgR9nJVW1L1cCw8M1p5+n9SQMJFUQghW
-----END RSA PRIVATE KEY-----

View File

@ -1,24 +1,24 @@
-----BEGIN CERTIFICATE-----
MIIEEzCCAvugAwIBAgIUA4E1YyD1y228iRlwJgN2TpMYiXMwDQYJKoZIhvcNAQEL
MIIEEzCCAvugAwIBAgIUCHfFkPZDhLT2oK0fhu2TP4xZtcwwDQYJKoZIhvcNAQEL
BQAwbzEMMAoGA1UEBhMDVVNBMRMwEQYDVQQIEwpDYWxpZm9ybmlhMRYwFAYDVQQH
Ew1TYW4gRnJhbmNpc2NvMQ0wCwYDVQQKEwRldGNkMRYwFAYDVQQLEw1ldGNkIFNl
Y3VyaXR5MQswCQYDVQQDEwJjYTAeFw0xOTAxMjExNDQwMDBaFw0yOTAxMTgxNDQw
Y3VyaXR5MQswCQYDVQQDEwJjYTAeFw0xOTEwMDgyMTE5MDBaFw0yOTEwMDUyMTE5
MDBaMHkxDDAKBgNVBAYTA1VTQTETMBEGA1UECBMKQ2FsaWZvcm5pYTEWMBQGA1UE
BxMNU2FuIEZyYW5jaXNjbzENMAsGA1UEChMEZXRjZDEWMBQGA1UECxMNZXRjZCBT
ZWN1cml0eTEVMBMGA1UEAxMMZXhhbXBsZTIuY29tMIIBIjANBgkqhkiG9w0BAQEF
AAOCAQ8AMIIBCgKCAQEA3Jo8Uwuzyj5BU4Rx6eBA0bkIccRgruUWpZDQ1e0fO9Au
WgYcRSE5O/EiJW6MpHKhlT3jhBw9bmzDrcAF+Hre/1hCfRPP3udDerYAEbpHEQ4a
y1c/FlsRqaOPWS3b+Yp+RQCvyDQGiJ7TPifm0vU4opeeM+MUFmnFOdDxS0FxJOWD
JCdCckr4Srghn56srwQPJISzS0q73YT5hv1lgg4yr3PHtEzBYvzw2yRxY8nEE6ZM
8hGnOwjDn9FNRMPr13wd05w94hq8akDHqvRp4yHKXfsw11fW7jUj+1D2GsHL4VVT
mafx+RJsHy33/Lou5eJ5HIOeYsjX3PDs2KS7MlZX8wIDAQABo4GcMIGZMA4GA1Ud
AAOCAQ8AMIIBCgKCAQEAv6FwE/OeAwAmZutb+DTP+y7FKpONm0+aHtESlrmTRmDo
iLGuoEXjYcFOCHr6gQJTYKEAivBK3TIN0dRirNBqLQxssDlsbU4ZXG++OciH/OoR
8E+VmMsbqof/E0nhVFYDumnuoS+waX8elzuDjDX4u7F+d1/gIb8aYU1VDjtZxF2b
vqaPyroOn5HvBs4MW+BpAB4guHfDXpK/oAnJDsq9JTUZqoG1xOZfHNhA/iVBSAJv
hO6aBxDCjzwO1gT5kTbNELrBCJ0V2NXlHFcBOPhxyl4+DIPwa0Q8oExUEOzxnHFu
U3GPoklbLp0RNNiqHHKfD4yaLI1rf8dH5AZG/QjK4wIDAQABo4GcMIGZMA4GA1Ud
DwEB/wQEAwIFoDAdBgNVHSUEFjAUBggrBgEFBQcDAQYIKwYBBQUHAwIwDAYDVR0T
AQH/BAIwADAdBgNVHQ4EFgQUj+ruFZbJNhjHRC90urHmkegftsgwHwYDVR0jBBgw
FoAU3WUwJ6HGPOZR4ckfwO5SZKd7dpowGgYDVR0RBBMwEYIJbG9jYWxob3N0hwR/
AAABMA0GCSqGSIb3DQEBCwUAA4IBAQCcn0Y/zym6tla+ku/sMiAMrvB1NZNVd1eE
HxPns99H7qdlGQ9bnxf1D1WxKu9HztmG3DhfdYUxYA3Z772+rrx3AhVQYquZAvvK
kdZ6ixHynY90Q5PX48BlgZvzd884RZFDjwSCoGkbdYqTtD9FfuGS/fdYcHZJvVAJ
1oRXH0D0e3I1piUAzfFkLTXqgmBHh1zgNHqq3CVcjA/4CVGn2foI5r7yH9TIO+Dt
hegJjObxJQJjobDJ5ijXbkPLt+LC7OFEDGbn8IKBIMmLYXULGIJR8wLKpiaBLV6G
zoME6J9TZe2g2VQYcPxwshesIx8MAcqNVjOBI6pBey5t5Lqhbj4Z
AQH/BAIwADAdBgNVHQ4EFgQUS6HUgF/GO55DtrxLN2dcyGACgnQwHwYDVR0jBBgw
FoAUZQe+r42mchS6xFBpug0H8o5jej4wGgYDVR0RBBMwEYIJbG9jYWxob3N0hwR/
AAABMA0GCSqGSIb3DQEBCwUAA4IBAQC2pcpQZVkvZO4TC1zluGhG6Yhz+fzZuNlp
jHa0U+WIdTPerqZZ98iU1hCm3F6voNjAQ0eHRV3z5q/rDph8AR7cVl9cT2rLO/zA
F6M4QDMetddj7EIUq2/B1CWzHkOvwcJFgc0OfBWzJYAShFv/B7Ir1WpdtixOvyOH
kWsWoy1WmatQvBQ2jDrvdGRWhqsPmg2uGbJrUjABeYtc5whQr0zscy+jEIrDpqPT
VVuUu19/ALvdv2kOC+ayhH+vTAvEA38P6wlavDlgsv/M902ORWahdLQ/H0XX+gVP
QDe3MyrBR6QkjAfKJvnMv/5x8mj+AFWfnALnRb9j3/q1UOwITWN2
-----END CERTIFICATE-----

View File

@ -1,27 +1,27 @@
-----BEGIN RSA PRIVATE KEY-----
MIIEogIBAAKCAQEA3Jo8Uwuzyj5BU4Rx6eBA0bkIccRgruUWpZDQ1e0fO9AuWgYc
RSE5O/EiJW6MpHKhlT3jhBw9bmzDrcAF+Hre/1hCfRPP3udDerYAEbpHEQ4ay1c/
FlsRqaOPWS3b+Yp+RQCvyDQGiJ7TPifm0vU4opeeM+MUFmnFOdDxS0FxJOWDJCdC
ckr4Srghn56srwQPJISzS0q73YT5hv1lgg4yr3PHtEzBYvzw2yRxY8nEE6ZM8hGn
OwjDn9FNRMPr13wd05w94hq8akDHqvRp4yHKXfsw11fW7jUj+1D2GsHL4VVTmafx
+RJsHy33/Lou5eJ5HIOeYsjX3PDs2KS7MlZX8wIDAQABAoIBAAurkSNnjBRX5MbR
S+Fufp9ZpYu3MtItxlvt5E5c4/kbE0ip6Bb0If38zKykQ4Zzlf66Lm4PvGQ6FsP7
U5WIkoF5ntLVGHsXSueT9z741sedMmetbuX03WNXBr9WALwbyoBGAAacUfgqRsyT
+c8RL/TSBomCKs7gu5FKb2lmxeCItpeyxGPp35AnXR1B5Q9GK7IoFnYlpJO5Vqbs
t1cpOikotKTFrylq6hxGbRBtzBnrOLh51UneXR0r83oGzSrpHm5+BLMG5orYnK10
UKBge1HijLjG9EDpCLdR+T/IJyz3oQyFCPRVFrIq5/76C8LsMSSjKdM6apCMc9d7
NMnHPYkCgYEA6AWosramFnuAjihEkaBgJdpFRHfco+FlpglcS5cvaORmkW7dLl6R
SZaO/mRJuhtC3j+LBXdGaP6rMdW7it9wRXs160e5mfSrEb7H66bgD0FfIG+iaEPV
8OXESgrSUI69neddRgL2QXZeY++hSzaMNGef2lPpFd+irgkud+bltSUCgYEA82Z1
/VVubW/lcnz2jBehssXL5yqNa/ySVvEWHdcKG6b0RNsoQCmOUZnBIwqDLMT4+62v
e3bYNi/y44oJi741ifT6prPPQqv+J3tsEAQ0dlcscs71q4TXJEpLIoSlM+lQAsYA
IhOkeLA5YKr2RzjnD9m2d+awErK6GGtCQqGbqTcCgYBHzgiJVlFMP+hnFjsyPknD
LSumptmXthe5LMhtdFptwdGkTIUS5p1cAsY6IFtYxzsIgO3LQUB/GeFtSNIDhma+
egUTzVy5MqkGkt5YJYrN7dM8vI+saOH67YCz0WmJGMPB7GpHUn9XfwRzNSPbnQQC
69biwHkwFcfIyHqjDMgmKQKBgFWsTwOouoHSzRSLX8zX1Ja0gJ0RoU3NTUVE/t/p
/SjWj0xdR8Gt3uZiFFVdMebkPi94ZzgyENCh+ACXadzCSt4QWNmsmNuC5qbHDrZk
hILTFFYk+twwmfmwHNo9jljDWuJfB1T3TQEeJlQcWSugn9Q4cb2qeXdbaZ2Gw3/o
mn3pAoGAbrBBySSjr8AnCmW5ipJ+ys1f8Q5Sg3Skvvx72KNH640AFY9c94glumjZ
7Ryz1BcVIVszaJt9q21xQZeCpQs6VBHQB04AKlJLq/wOccg2Qb4qD8++zNyK5Ndm
Bht4nGrX+sGDD4EwLKUCTNgxBgQju6ZQrT3b05xlTsRG0fA7SQQ=
MIIEpAIBAAKCAQEAv6FwE/OeAwAmZutb+DTP+y7FKpONm0+aHtESlrmTRmDoiLGu
oEXjYcFOCHr6gQJTYKEAivBK3TIN0dRirNBqLQxssDlsbU4ZXG++OciH/OoR8E+V
mMsbqof/E0nhVFYDumnuoS+waX8elzuDjDX4u7F+d1/gIb8aYU1VDjtZxF2bvqaP
yroOn5HvBs4MW+BpAB4guHfDXpK/oAnJDsq9JTUZqoG1xOZfHNhA/iVBSAJvhO6a
BxDCjzwO1gT5kTbNELrBCJ0V2NXlHFcBOPhxyl4+DIPwa0Q8oExUEOzxnHFuU3GP
oklbLp0RNNiqHHKfD4yaLI1rf8dH5AZG/QjK4wIDAQABAoIBAE5QTHxq4BV71zXS
U7ig5KpTV9JpkMJ7CpIzgTRFzNFDQ2SxsJrhVOabWCeREpTsfWSNB6rAPugcz5cE
A/t6BRo57KUsIoqdEzI6nHQC5shOZFxgOdPClaDgiTa5x7Nun4FsT1BiK+dBQyAs
+zqux+L0y6k/blp8Peyr7OmvCaV8osB4/JLLH/WHt2wWgqFWisyIT7/D/gQlQn81
Hvv84BAL9y8iyCmCzWhQL0YisLPyaFkGkb7DK4wznWxfQn3jRAkZDQMH675o/OHj
8nL0NSdCA/MGLEtPAXegM7kMPCf68JwZV3gPZDyEK0JES1oT1z+op7JHuatlhgdL
WTA10fkCgYEA1QXRRpOeZvHzGVMzrXrrgS5GeaR+XgjUalBGgu0w8nSET195oXu6
Y8dVco4FlEZ0Wq7evA4M9XVJyKQkkEGR7Nkv922p8RhG+U73ajODANAQURIwqOPJ
01IrfMIK2mkXDZwzkAxPaOnny2OMZtUznmZnNdJ/vLd7U0sScNPVQp8CgYEA5krD
ImQ8U9/S4VOK78i3FMWMoutffXpW71lEc9tsz1YWUPf170raujjF5mqtBBXup9ko
37CmVk6mOO2TdXLg1feMaVBsoblL6iPoBZot/fLdzgmICccpimst2yrUZEHwJpdk
9k95xEZUQhN73eY/Ih6b5HZZ/ygxfAVvhDFzNT0CgYEAru5aDvUGbU9e7HsQwvNg
FfMkWJwmUZ46oRtO7BFP0qqwRGYJAf0S8QEuQCY0mrDIt/dGXXPEXIV2k9eHVxch
eDhaVXuuxJfFINIiBwpKGA7Ed27Smr6EbI7bu1W1h+oozjppdW9GfscmXDVhhMir
3PYG54H298hM8/eAKzsps80CgYEAzk5Vp76ySNVv3spv4kYmtaYQSnef8RIjRYLs
DvqY7NmLXnf0y618a22m5LfWTZ20Uov50QM40ILe6Ir1GjeS8jw1frc8yljsiFIo
brRj1We4ivcA9vmD3mwMBZbF9RcZJAlmuj4SsOHsY9F+mxjEoDVZpP7duvbv9dIM
yBlgw2UCgYB27Zwd3ta3x9mx2wRHaFMlcnwh+ERRGO6yAKndsr3L80Gkb2HxHlR3
cA3dIg/YAGocjROOIO8MO5rclZm30qM5rlv0SH6gQ4nW16PZgHSuakgeX5fVHb4v
nT8TKt0PY9r5e5cdHzpwlrDD1Q8GiAHvn0oH6oioGxlIV190YAHvGQ==
-----END RSA PRIVATE KEY-----

View File

@ -1,24 +1,24 @@
-----BEGIN CERTIFICATE-----
MIID/DCCAuSgAwIBAgIUXO0yz2xs67pa/mT7ro2mx5d9+8gwDQYJKoZIhvcNAQEL
MIID/DCCAuSgAwIBAgIUMgfWSv3FrD0dRcHmPNxLr7p9BXcwDQYJKoZIhvcNAQEL
BQAwbzEMMAoGA1UEBhMDVVNBMRMwEQYDVQQIEwpDYWxpZm9ybmlhMRYwFAYDVQQH
Ew1TYW4gRnJhbmNpc2NvMQ0wCwYDVQQKEwRldGNkMRYwFAYDVQQLEw1ldGNkIFNl
Y3VyaXR5MQswCQYDVQQDEwJjYTAeFw0xOTAxMjExNDQwMDBaFw0yOTAxMTgxNDQw
Y3VyaXR5MQswCQYDVQQDEwJjYTAeFw0xOTEwMDgyMTE5MDBaFw0yOTEwMDUyMTE5
MDBaMGIxDDAKBgNVBAYTA1VTQTETMBEGA1UECBMKQ2FsaWZvcm5pYTEWMBQGA1UE
BxMNU2FuIEZyYW5jaXNjbzENMAsGA1UEChMEZXRjZDEWMBQGA1UECxMNZXRjZCBT
ZWN1cml0eTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALf9KzBo3JFZ
1Zi8lH4t6MSCx/de1MyHSR4S0sD+rl/GcdkdPLOLGKikHZLMU5Z9u1y5HK23QWBy
KcdLFDkQckoUBDxAR+sxdVX5RgVTkMR9zdv/4HVertakKNutrno0lUdqhCtV05YR
HeU8/tPMciyE4cBbeSLlvxGe49xW2cl0iZ4Fqu9xhCS1hlSLDNNN1Azw1hwptGkP
3voTVWPzU95EXhqtnDhf7QO4QpcIZ44Zm0pWMoyip9TE16bTwjltiAdOs2shK+9h
oQ9b2OW69cWpvtwnWsWAACveQHFUZ7drIDc7cv3OPvo7aonx4CHKqxEqnmlIxipP
t8nkKtyHr10CAwEAAaOBnDCBmTAOBgNVHQ8BAf8EBAMCBaAwHQYDVR0lBBYwFAYI
KwYBBQUHAwEGCCsGAQUFBwMCMAwGA1UdEwEB/wQCMAAwHQYDVR0OBBYEFPc/GrBA
B2mcJnSACyKgkNt5GWvrMB8GA1UdIwQYMBaAFN1lMCehxjzmUeHJH8DuUmSne3aa
ZWN1cml0eTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALKKf9zhwcgu
S53Vd92PLqHtDNpTbl4nM/5qYy7nHV5FHW7uolQHFA0BjJwxz7/LC39G950PpSIs
K7Y99P7aMPVqVH5FGL+uoDbHMzt/gIUGgWj51J38+x6zN/9vIvAVIBClBhzEuB2k
WJU6KyB1V5G+1wnnKRXLB9QC0f/7vqd9f21O7sJmWeVhGJuEUwwAp1p5WDGM2Tn8
Fjy57O9f1nT4WVqWhB5EbvYGDF2Z5DWyKz90EWOwVw30ThcQHF57X6WJdlNiQOrY
KIWO475QKPwbUSpRkvw1jwvllU8s4l6pB1hKTAcUK3UsWrzpf2+m8v8ou35uFD/c
pd2bBx5MVlkCAwEAAaOBnDCBmTAOBgNVHQ8BAf8EBAMCBaAwHQYDVR0lBBYwFAYI
KwYBBQUHAwEGCCsGAQUFBwMCMAwGA1UdEwEB/wQCMAAwHQYDVR0OBBYEFAcMdmvX
8NC4V4Z67Jb/pNUwai7cMB8GA1UdIwQYMBaAFGUHvq+NpnIUusRQaboNB/KOY3o+
MBoGA1UdEQQTMBGCCWxvY2FsaG9zdIcEfwAAATANBgkqhkiG9w0BAQsFAAOCAQEA
F30eIP+5u2gZItB9VpohOFjKoW2Ts+TIGT+nE2/b70kpX1JXuOHvJSFgYxn1N9KK
tJ03T52ENwhFw7ES9sYmpAKjt/Z+2W1DnY1n29fWvGYKReOt3TjjPQcC26v9l+Kf
oNSFQEac2rdzKZEzkPDRpvlwVQuACGNbLpbpFmV4G1lavt0ecRuhohHgCtO03nx+
UOQ1Enla3gA002qw2OI71cmrxYzpS3tgZCGR3QsOOibls+/1kBQE/F4HIyhLIImo
dGC98Q1hLYVS8oKwK+DifK4mDdRSbXE7ek/5+vMOsKWdcs2dTcn9WoijJV6YVX/e
HU7ocaMm/c5SQ0yndcCU2w==
btQjJEWHD/0gYsNLFg3tDxZ64U/HfNlh4USGOK02VL2LteMcV8AoYlZ3jwmp4+33
D3HlqLclJNABax2pOvTHVnQlf25TSNwJRtmzOvcg+6xYbPdgRoeEVsWbmgbpX7Vi
P8FYelYCiYTPezjqZgPG1gmq0Uf/drlTrjwsG2njEcuK7hip+LdJnIrtpIrabpIk
lZRa7Y/JBM3gP/rR1fu9lhzJ97s3NabuHzPwyouSTTknaaiGwSV8F5frh9NGcFhd
G7giCLZLKklQB4IUTOFcVFSZmeAGy6KBqyT10N2kkBrsrcWhyMKIU9X0+6hh3Tlc
JEla9as6qFvt1dFGp+qeww==
-----END CERTIFICATE-----

View File

@ -1,27 +1,27 @@
-----BEGIN RSA PRIVATE KEY-----
MIIEpAIBAAKCAQEAt/0rMGjckVnVmLyUfi3oxILH917UzIdJHhLSwP6uX8Zx2R08
s4sYqKQdksxTln27XLkcrbdBYHIpx0sUORByShQEPEBH6zF1VflGBVOQxH3N2//g
dV6u1qQo262uejSVR2qEK1XTlhEd5Tz+08xyLIThwFt5IuW/EZ7j3FbZyXSJngWq
73GEJLWGVIsM003UDPDWHCm0aQ/e+hNVY/NT3kReGq2cOF/tA7hClwhnjhmbSlYy
jKKn1MTXptPCOW2IB06zayEr72GhD1vY5br1xam+3CdaxYAAK95AcVRnt2sgNzty
/c4++jtqifHgIcqrESqeaUjGKk+3yeQq3IevXQIDAQABAoIBACc8fjFcq2zz03E3
BhPChpkhqrM+LfNQBm+7Y+Z0aYtTLoQ2j/NZ/nA4T3Y2zLyTI5mqgEsrgW2n6vDt
OerRbw4NJroSm0O00Gj9N8l7AKxmh7ga4SsmffKYH355k3PMMul/9Z+oNe8Sx2VT
lKRmPRLxY5M57qNai1yencknXFpxetSzHwusfDFFNiStN8rTDeIgJtn9FKxhMAcJ
P0D8rHH+x4/8XGSVtjANJr22cj1win0dce4tVVUQp60pBAFhGIY+uPOM8vmxkJYx
xQTmR6bh7S5KrYT1BNK3cpo6gQGE6HJsPzTuijN6hqvJ7ufTl/lq2T0ETVEemHOf
B5NKOwECgYEAxigkRZeaCsvvUcPWEYrnIhZt6SfCx5iKlkVQsG/uYps3aWohkBAY
Ws6rr55qkH7hsye9ikeyhT9VDnWHeAbTr2SKIsa8CAc+gQoPzYfY4GVYzbNP+/rj
d1jS/s5/LyVryi3ln3EJN1fUHv+ZzvEWORSa/WVHkBuxMxoeHX8PhSkCgYEA7bJK
inBlLXtUy+c/q5DigYLIg0MquQ9slcfJapcwgswmbLnagtW9oQxlsM/uSiqR4br+
9SKD7TNLNhwJmByG6HlPyjCF2FGPhUg4pIWuRckPqJ6o2SgONupD3mZdu/CyzsuO
pyCqq0d2yeLXW77MxidMUeNfPsAA9lvRRulhCxUCgYEAwbM1ma2TD+DABP5ZQHa2
b3TbZeHPHgr31eLV+FLCBTPTG8F6I3gIRqPl4dsKMktFVzqOpiBl2qjI/URX8zVB
Mh8mhM4duf9S0xLB1dhoYRnQj+srUZazSdPTFO9IFg8PaegpoQz+xFGfcdnLQSYb
4hpJU0/wf2cCdYCfVZgB1NkCgYB9K/a1EJs3aEsvVYfiAVpGeWi+NxC4g7ba6WrY
BuY0+u0BNJ4taAGEXdLvWZBS3jgUdzTsQlDXCLwCsqEayWsB4WBzSToywECkH3Q0
r3EmrsrgMS0Zrk5N/O/gnmeeIRMIc4mb2UgHCoszpZFjbwbHEsrOFL6DfPkEwzVh
8mR4QQKBgQC88WRxSGnF0AyoM5tB8/x80O9a4CLsSwfFVzEyJcdh3uL9+JzfNVta
YsXy5ah2SyBrI7xQd/LXhv3/7GY4eZIQuwcOH74hQNHdm2u3bWfmg39jH+HgWK8m
mqYvWYE9qLXIVwBzsCzQtxq4z90KMiZ2qXV0YMFPGjZt+bBkbjJTOw==
MIIEpAIBAAKCAQEAsop/3OHByC5LndV33Y8uoe0M2lNuXicz/mpjLucdXkUdbu6i
VAcUDQGMnDHPv8sLf0b3nQ+lIiwrtj30/tow9WpUfkUYv66gNsczO3+AhQaBaPnU
nfz7HrM3/28i8BUgEKUGHMS4HaRYlTorIHVXkb7XCecpFcsH1ALR//u+p31/bU7u
wmZZ5WEYm4RTDACnWnlYMYzZOfwWPLns71/WdPhZWpaEHkRu9gYMXZnkNbIrP3QR
Y7BXDfROFxAcXntfpYl2U2JA6tgohY7jvlAo/BtRKlGS/DWPC+WVTyziXqkHWEpM
BxQrdSxavOl/b6by/yi7fm4UP9yl3ZsHHkxWWQIDAQABAoIBAQCVyfrCDqlsT+Li
1UBOIp0l/uIEnXCAD3XgodL6e6249FVgR1brFlEtJDqapHO+XhQUQS7ml0ScqeA2
cj6EPfxLOV0P3tqHnnMN4gvKhAsID9AsiUVnEuJ//C4j4FK4h5CyRjEdm7E4NTSY
ZgfeoHPKdAinZ0eh4Ad+SKt0jvmCPDD1L+6bxpJ6E258xPDxB71rHTCgnwZZmXrN
rHDg07tVVU6lYtXEsZAsyIBIxXV/RaCt4xSijM1C7kuSsUE3+CCw+pzQUvHDxkuk
FPxE5hONzkUaCSBKTv4L6gVaiYa30Jo9THuTRWvzDJmcnNwlYgRLeIR8PJY0eHqv
FYJbLdQBAoGBAOeiW1aHpNO+K4BqVMzm7fvz0a3DBkml/wIKSvw3dbTYnyIXFvDB
Be7OZLhiWPwaE/58aQWh+/OGCr88yyCLAt2mOQ5aMalKpdSCaY5swEnzt923pY9z
jt3DnhX8aXggPbqM6eJjaxJY7jIMSDNKUQZKJeSesr/EzFo0CRkkepyBAoGBAMVS
Z2nIY+G2+P1VSUEbI3dbaZ3ciMEDLc+rZQs0fx8+xQCD71eCU9ggxf+O0R1/0smm
9so65KrmKl3yOt8OQW3YgUpQqIQdJPHnfuTnsU7y/+zRl6k119gV89LLDlCM7nfW
5/ey/iJLXQxfC7OoFF/hQM0odmorA8jBuqKDWy3ZAoGAZL6gq0njzpRvpzKYH2Zx
K5woHkMsgOvJtcF0S65za2ysCc+xEpVhVzQ9alScD0noWE8T/nctdgVetz5huo27
eVvKhQuFffQRnBP8hQ2XtJJj7fLp9zJzeNCT+UwHM1ASiQiw0N4cu6YiM3JUFLrF
8s5dHMpJRE778l+fdWgATAECgYEAmJ3osE+2uUCtCjvpwbp8zvdcFCYbe7W6vBGj
wGvlGsSQ2JozB2sc8GBA5C2RHhDcdu11meq9LFWDVVBiKl27S3uWXGVQQYbNKXDU
m7V8VUTrnz5o4A5uGIq6IEK/mpu2YehNWC8QEnRZzpTA1z7cK2Bsn4F5PRpx/deh
Q8r3PdkCgYBd2rT+S4/51C5AnIhgMF/PYl0+DYMFD8HAfsx7VTaIUmFQ4devHMOz
J6lbqRyEITZtXgna1n35LkyBDcPwsEtntjJOP+xneCtKzozdzhXyoAw2xQR3gqvV
7YtV3miYQiOTqjOefViMhR/XiOV2zng3OId1AQObfOUZODJPfSN26g==
-----END RSA PRIVATE KEY-----

View File

@ -27,6 +27,7 @@ import (
"go.etcd.io/etcd/mvcc"
"go.etcd.io/etcd/mvcc/backend"
"go.etcd.io/etcd/pkg/testutil"
"go.etcd.io/etcd/pkg/traceutil"
"go.uber.org/zap"
)
@ -169,11 +170,11 @@ func TestV3CorruptAlarm(t *testing.T) {
clus.Members[0].Stop(t)
fp := filepath.Join(clus.Members[0].DataDir, "member", "snap", "db")
be := backend.NewDefaultBackend(fp)
s := mvcc.NewStore(zap.NewExample(), be, nil, &fakeConsistentIndex{13})
s := mvcc.NewStore(zap.NewExample(), be, nil, &fakeConsistentIndex{13}, mvcc.StoreConfig{})
// NOTE: cluster_proxy mode with namespacing won't set 'k', but namespace/'k'.
s.Put([]byte("abc"), []byte("def"), 0)
s.Put([]byte("xyz"), []byte("123"), 0)
s.Compact(5)
s.Compact(traceutil.TODO(), 5)
s.Commit()
s.Close()
be.Close()

View File

@ -357,6 +357,7 @@ func TestV3AuthNonAuthorizedRPCs(t *testing.T) {
}
func TestV3AuthOldRevConcurrent(t *testing.T) {
t.Skip() // TODO(jingyih): re-enable the test when #10408 is fixed.
defer testutil.AfterTest(t)
clus := NewClusterV3(t, &ClusterConfig{Size: 1})
defer clus.Terminate(t)

View File

@ -291,14 +291,14 @@ func (le *lessor) Grant(id LeaseID, ttl int64) (*Lease, error) {
}
le.leaseMap[id] = l
item := &LeaseWithTime{id: l.ID, time: l.expiry.UnixNano()}
le.leaseExpiredNotifier.RegisterOrUpdate(item)
l.persistTo(le.b)
leaseTotalTTLs.Observe(float64(l.ttl))
leaseGranted.Inc()
if le.isPrimary() {
item := &LeaseWithTime{id: l.ID, time: l.expiry.UnixNano()}
le.leaseExpiredNotifier.RegisterOrUpdate(item)
le.scheduleCheckpointIfNeeded(l)
}
@ -505,6 +505,7 @@ func (le *lessor) Demote() {
}
le.clearScheduledLeasesCheckpoints()
le.clearLeaseExpiredNotifier()
if le.demotec != nil {
close(le.demotec)
@ -648,6 +649,10 @@ func (le *lessor) clearScheduledLeasesCheckpoints() {
le.leaseCheckpointHeap = make(LeaseQueue, 0)
}
func (le *lessor) clearLeaseExpiredNotifier() {
le.leaseExpiredNotifier = newLeaseExpiredNotifier()
}
// expireExists returns true if expiry items exist.
// It pops only when expiry item exists.
// "next" is true, to indicate that it may exist in next attempt.

View File

@ -369,13 +369,27 @@ func (b *backend) defrag() error {
b.batchTx.tx = nil
tmpdb, err := bolt.Open(b.db.Path()+".tmp", 0600, boltOpenOptions)
// Create a temporary file to ensure we start with a clean slate.
// Snapshotter.cleanupSnapdir cleans up any of these that are found during startup.
dir := filepath.Dir(b.db.Path())
temp, err := ioutil.TempFile(dir, "db.tmp.*")
if err != nil {
return err
}
options := bolt.Options{}
if boltOpenOptions != nil {
options = *boltOpenOptions
}
options.OpenFile = func(path string, i int, mode os.FileMode) (file *os.File, err error) {
return temp, nil
}
tdbp := temp.Name()
tmpdb, err := bolt.Open(tdbp, 0600, &options)
if err != nil {
return err
}
dbp := b.db.Path()
tdbp := tmpdb.Path()
size1, sizeInUse1 := b.Size(), b.SizeInUse()
if b.lg != nil {
b.lg.Info(
@ -387,11 +401,17 @@ func (b *backend) defrag() error {
zap.String("current-db-size-in-use", humanize.Bytes(uint64(sizeInUse1))),
)
}
// gofail: var defragBeforeCopy struct{}
err = defragdb(b.db, tmpdb, defragLimit)
if err != nil {
tmpdb.Close()
os.RemoveAll(tmpdb.Path())
if rmErr := os.RemoveAll(tmpdb.Path()); rmErr != nil {
if b.lg != nil {
b.lg.Error("failed to remove db.tmp after defragmentation completed", zap.Error(rmErr))
} else {
plog.Fatalf("failed to remove db.tmp after defragmentation completed: %v", rmErr)
}
}
return err
}
@ -411,6 +431,7 @@ func (b *backend) defrag() error {
plog.Fatalf("cannot close database (%s)", err)
}
}
// gofail: var defragBeforeRename struct{}
err = os.Rename(tdbp, dbp)
if err != nil {
if b.lg != nil {

View File

@ -18,6 +18,7 @@ import (
"go.etcd.io/etcd/lease"
"go.etcd.io/etcd/mvcc/backend"
"go.etcd.io/etcd/mvcc/mvccpb"
"go.etcd.io/etcd/pkg/traceutil"
)
type RangeOptions struct {
@ -102,10 +103,10 @@ type KV interface {
WriteView
// Read creates a read transaction.
Read() TxnRead
Read(trace *traceutil.Trace) TxnRead
// Write creates a write transaction.
Write() TxnWrite
Write(trace *traceutil.Trace) TxnWrite
// Hash computes the hash of the KV's backend.
Hash() (hash uint32, revision int64, err error)
@ -114,7 +115,7 @@ type KV interface {
HashByRev(rev int64) (hash uint32, revision int64, compactRev int64, err error)
// Compact frees all superseded keys with revisions less than rev.
Compact(rev int64) (<-chan struct{}, error)
Compact(trace *traceutil.Trace, rev int64) (<-chan struct{}, error)
// Commit commits outstanding txns into the underlying backend.
Commit()

View File

@ -25,6 +25,7 @@ import (
"go.etcd.io/etcd/mvcc/backend"
"go.etcd.io/etcd/mvcc/mvccpb"
"go.etcd.io/etcd/pkg/testutil"
"go.etcd.io/etcd/pkg/traceutil"
"github.com/prometheus/client_golang/prometheus"
dto "github.com/prometheus/client_model/go"
@ -47,7 +48,7 @@ var (
return kv.Range(key, end, ro)
}
txnRangeFunc = func(kv KV, key, end []byte, ro RangeOptions) (*RangeResult, error) {
txn := kv.Read()
txn := kv.Read(traceutil.TODO())
defer txn.End()
return txn.Range(key, end, ro)
}
@ -56,7 +57,7 @@ var (
return kv.Put(key, value, lease)
}
txnPutFunc = func(kv KV, key, value []byte, lease lease.LeaseID) int64 {
txn := kv.Write()
txn := kv.Write(traceutil.TODO())
defer txn.End()
return txn.Put(key, value, lease)
}
@ -65,7 +66,7 @@ var (
return kv.DeleteRange(key, end)
}
txnDeleteRangeFunc = func(kv KV, key, end []byte) (n, rev int64) {
txn := kv.Write()
txn := kv.Write(traceutil.TODO())
defer txn.End()
return txn.DeleteRange(key, end)
}
@ -76,7 +77,7 @@ func TestKVTxnRange(t *testing.T) { testKVRange(t, txnRangeFunc) }
func testKVRange(t *testing.T, f rangeFunc) {
b, tmpPath := backend.NewDefaultTmpBackend()
s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil)
s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil, StoreConfig{})
defer cleanup(s, b, tmpPath)
kvs := put3TestKVs(s)
@ -142,7 +143,7 @@ func TestKVTxnRangeRev(t *testing.T) { testKVRangeRev(t, txnRangeFunc) }
func testKVRangeRev(t *testing.T, f rangeFunc) {
b, tmpPath := backend.NewDefaultTmpBackend()
s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil)
s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil, StoreConfig{})
defer cleanup(s, b, tmpPath)
kvs := put3TestKVs(s)
@ -178,11 +179,11 @@ func TestKVTxnRangeBadRev(t *testing.T) { testKVRangeBadRev(t, txnRangeFunc) }
func testKVRangeBadRev(t *testing.T, f rangeFunc) {
b, tmpPath := backend.NewDefaultTmpBackend()
s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil)
s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil, StoreConfig{})
defer cleanup(s, b, tmpPath)
put3TestKVs(s)
if _, err := s.Compact(4); err != nil {
if _, err := s.Compact(traceutil.TODO(), 4); err != nil {
t.Fatalf("compact error (%v)", err)
}
@ -211,7 +212,7 @@ func TestKVTxnRangeLimit(t *testing.T) { testKVRangeLimit(t, txnRangeFunc) }
func testKVRangeLimit(t *testing.T, f rangeFunc) {
b, tmpPath := backend.NewDefaultTmpBackend()
s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil)
s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil, StoreConfig{})
defer cleanup(s, b, tmpPath)
kvs := put3TestKVs(s)
@ -252,7 +253,7 @@ func TestKVTxnPutMultipleTimes(t *testing.T) { testKVPutMultipleTimes(t, txnPutF
func testKVPutMultipleTimes(t *testing.T, f putFunc) {
b, tmpPath := backend.NewDefaultTmpBackend()
s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil)
s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil, StoreConfig{})
defer cleanup(s, b, tmpPath)
for i := 0; i < 10; i++ {
@ -314,7 +315,7 @@ func testKVDeleteRange(t *testing.T, f deleteRangeFunc) {
for i, tt := range tests {
b, tmpPath := backend.NewDefaultTmpBackend()
s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil)
s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil, StoreConfig{})
s.Put([]byte("foo"), []byte("bar"), lease.NoLease)
s.Put([]byte("foo1"), []byte("bar1"), lease.NoLease)
@ -334,7 +335,7 @@ func TestKVTxnDeleteMultipleTimes(t *testing.T) { testKVDeleteMultipleTimes(t, t
func testKVDeleteMultipleTimes(t *testing.T, f deleteRangeFunc) {
b, tmpPath := backend.NewDefaultTmpBackend()
s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil)
s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil, StoreConfig{})
defer cleanup(s, b, tmpPath)
s.Put([]byte("foo"), []byte("bar"), lease.NoLease)
@ -355,7 +356,7 @@ func testKVDeleteMultipleTimes(t *testing.T, f deleteRangeFunc) {
// test that range, put, delete on single key in sequence repeatedly works correctly.
func TestKVOperationInSequence(t *testing.T) {
b, tmpPath := backend.NewDefaultTmpBackend()
s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil)
s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil, StoreConfig{})
defer cleanup(s, b, tmpPath)
for i := 0; i < 10; i++ {
@ -402,14 +403,14 @@ func TestKVOperationInSequence(t *testing.T) {
func TestKVTxnBlockWriteOperations(t *testing.T) {
b, tmpPath := backend.NewDefaultTmpBackend()
s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil)
s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil, StoreConfig{})
tests := []func(){
func() { s.Put([]byte("foo"), nil, lease.NoLease) },
func() { s.DeleteRange([]byte("foo"), nil) },
}
for i, tt := range tests {
txn := s.Write()
txn := s.Write(traceutil.TODO())
done := make(chan struct{}, 1)
go func() {
tt()
@ -435,10 +436,10 @@ func TestKVTxnBlockWriteOperations(t *testing.T) {
func TestKVTxnNonBlockRange(t *testing.T) {
b, tmpPath := backend.NewDefaultTmpBackend()
s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil)
s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil, StoreConfig{})
defer cleanup(s, b, tmpPath)
txn := s.Write()
txn := s.Write(traceutil.TODO())
defer txn.End()
donec := make(chan struct{})
@ -456,11 +457,11 @@ func TestKVTxnNonBlockRange(t *testing.T) {
// test that txn range, put, delete on single key in sequence repeatedly works correctly.
func TestKVTxnOperationInSequence(t *testing.T) {
b, tmpPath := backend.NewDefaultTmpBackend()
s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil)
s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil, StoreConfig{})
defer cleanup(s, b, tmpPath)
for i := 0; i < 10; i++ {
txn := s.Write()
txn := s.Write(traceutil.TODO())
base := int64(i + 1)
// put foo
@ -506,7 +507,7 @@ func TestKVTxnOperationInSequence(t *testing.T) {
func TestKVCompactReserveLastValue(t *testing.T) {
b, tmpPath := backend.NewDefaultTmpBackend()
s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil)
s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil, StoreConfig{})
defer cleanup(s, b, tmpPath)
s.Put([]byte("foo"), []byte("bar0"), 1)
@ -544,7 +545,7 @@ func TestKVCompactReserveLastValue(t *testing.T) {
},
}
for i, tt := range tests {
_, err := s.Compact(tt.rev)
_, err := s.Compact(traceutil.TODO(), tt.rev)
if err != nil {
t.Errorf("#%d: unexpect compact error %v", i, err)
}
@ -560,7 +561,7 @@ func TestKVCompactReserveLastValue(t *testing.T) {
func TestKVCompactBad(t *testing.T) {
b, tmpPath := backend.NewDefaultTmpBackend()
s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil)
s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil, StoreConfig{})
defer cleanup(s, b, tmpPath)
s.Put([]byte("foo"), []byte("bar0"), lease.NoLease)
@ -580,7 +581,7 @@ func TestKVCompactBad(t *testing.T) {
{100, ErrFutureRev},
}
for i, tt := range tests {
_, err := s.Compact(tt.rev)
_, err := s.Compact(traceutil.TODO(), tt.rev)
if err != tt.werr {
t.Errorf("#%d: compact error = %v, want %v", i, err, tt.werr)
}
@ -593,7 +594,7 @@ func TestKVHash(t *testing.T) {
for i := 0; i < len(hashes); i++ {
var err error
b, tmpPath := backend.NewDefaultTmpBackend()
kv := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil)
kv := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil, StoreConfig{})
kv.Put([]byte("foo0"), []byte("bar0"), lease.NoLease)
kv.Put([]byte("foo1"), []byte("bar0"), lease.NoLease)
hashes[i], _, err = kv.Hash()
@ -626,12 +627,12 @@ func TestKVRestore(t *testing.T) {
func(kv KV) {
kv.Put([]byte("foo"), []byte("bar0"), 1)
kv.Put([]byte("foo"), []byte("bar1"), 2)
kv.Compact(1)
kv.Compact(traceutil.TODO(), 1)
},
}
for i, tt := range tests {
b, tmpPath := backend.NewDefaultTmpBackend()
s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil)
s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil, StoreConfig{})
tt(s)
var kvss [][]mvccpb.KeyValue
for k := int64(0); k < 10; k++ {
@ -643,7 +644,7 @@ func TestKVRestore(t *testing.T) {
s.Close()
// ns should recover the the previous state from backend.
ns := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil)
ns := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil, StoreConfig{})
if keysRestore := readGaugeInt(keysGauge); keysBefore != keysRestore {
t.Errorf("#%d: got %d key count, expected %d", i, keysRestore, keysBefore)
@ -675,7 +676,7 @@ func readGaugeInt(g prometheus.Gauge) int {
func TestKVSnapshot(t *testing.T) {
b, tmpPath := backend.NewDefaultTmpBackend()
s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil)
s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil, StoreConfig{})
defer cleanup(s, b, tmpPath)
wkvs := put3TestKVs(s)
@ -695,7 +696,7 @@ func TestKVSnapshot(t *testing.T) {
}
f.Close()
ns := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil)
ns := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil, StoreConfig{})
defer ns.Close()
r, err := ns.Range([]byte("a"), []byte("z"), RangeOptions{})
if err != nil {
@ -711,7 +712,7 @@ func TestKVSnapshot(t *testing.T) {
func TestWatchableKVWatch(t *testing.T) {
b, tmpPath := backend.NewDefaultTmpBackend()
s := WatchableKV(newWatchableStore(zap.NewExample(), b, &lease.FakeLessor{}, nil))
s := WatchableKV(newWatchableStore(zap.NewExample(), b, &lease.FakeLessor{}, nil, StoreConfig{}))
defer cleanup(s, b, tmpPath)
w := s.NewWatchStream()

View File

@ -14,24 +14,27 @@
package mvcc
import "go.etcd.io/etcd/lease"
import (
"go.etcd.io/etcd/lease"
"go.etcd.io/etcd/pkg/traceutil"
)
type readView struct{ kv KV }
func (rv *readView) FirstRev() int64 {
tr := rv.kv.Read()
tr := rv.kv.Read(traceutil.TODO())
defer tr.End()
return tr.FirstRev()
}
func (rv *readView) Rev() int64 {
tr := rv.kv.Read()
tr := rv.kv.Read(traceutil.TODO())
defer tr.End()
return tr.Rev()
}
func (rv *readView) Range(key, end []byte, ro RangeOptions) (r *RangeResult, err error) {
tr := rv.kv.Read()
tr := rv.kv.Read(traceutil.TODO())
defer tr.End()
return tr.Range(key, end, ro)
}
@ -39,13 +42,13 @@ func (rv *readView) Range(key, end []byte, ro RangeOptions) (r *RangeResult, err
type writeView struct{ kv KV }
func (wv *writeView) DeleteRange(key, end []byte) (n, rev int64) {
tw := wv.kv.Write()
tw := wv.kv.Write(traceutil.TODO())
defer tw.End()
return tw.DeleteRange(key, end)
}
func (wv *writeView) Put(key, value []byte, lease lease.LeaseID) (rev int64) {
tw := wv.kv.Write()
tw := wv.kv.Write(traceutil.TODO())
defer tw.End()
return tw.Put(key, value, lease)
}

View File

@ -29,6 +29,7 @@ import (
"go.etcd.io/etcd/mvcc/backend"
"go.etcd.io/etcd/mvcc/mvccpb"
"go.etcd.io/etcd/pkg/schedule"
"go.etcd.io/etcd/pkg/traceutil"
"github.com/coreos/pkg/capnslog"
"go.uber.org/zap"
@ -60,6 +61,7 @@ const (
)
var restoreChunkKeys = 10000 // non-const for testing
var defaultCompactBatchLimit = 1000
// ConsistentIndexGetter is an interface that wraps the Get method.
// Consistent index is the offset of an entry in a consistent replicated log.
@ -68,6 +70,10 @@ type ConsistentIndexGetter interface {
ConsistentIndex() uint64
}
type StoreConfig struct {
CompactionBatchLimit int
}
type store struct {
ReadView
WriteView
@ -76,6 +82,8 @@ type store struct {
// through atomics so must be 64-bit aligned.
consistentIndex uint64
cfg StoreConfig
// mu read locks for txns and write locks for non-txn store changes.
mu sync.RWMutex
@ -108,8 +116,12 @@ type store struct {
// NewStore returns a new store. It is useful to create a store inside
// mvcc pkg. It should only be used for testing externally.
func NewStore(lg *zap.Logger, b backend.Backend, le lease.Lessor, ig ConsistentIndexGetter) *store {
func NewStore(lg *zap.Logger, b backend.Backend, le lease.Lessor, ig ConsistentIndexGetter, cfg StoreConfig) *store {
if cfg.CompactionBatchLimit == 0 {
cfg.CompactionBatchLimit = defaultCompactBatchLimit
}
s := &store{
cfg: cfg,
b: b,
ig: ig,
kvindex: newTreeIndex(lg),
@ -129,7 +141,7 @@ func NewStore(lg *zap.Logger, b backend.Backend, le lease.Lessor, ig ConsistentI
s.ReadView = &readView{s}
s.WriteView = &writeView{s}
if s.le != nil {
s.le.SetRangeDeleter(func() lease.TxnDelete { return s.Write() })
s.le.SetRangeDeleter(func() lease.TxnDelete { return s.Write(traceutil.TODO()) })
}
tx := s.b.BatchTx()
@ -258,15 +270,16 @@ func (s *store) updateCompactRev(rev int64) (<-chan struct{}, error) {
return nil, nil
}
func (s *store) compact(rev int64) (<-chan struct{}, error) {
start := time.Now()
keep := s.kvindex.Compact(rev)
func (s *store) compact(trace *traceutil.Trace, rev int64) (<-chan struct{}, error) {
ch := make(chan struct{})
var j = func(ctx context.Context) {
if ctx.Err() != nil {
s.compactBarrier(ctx, ch)
return
}
start := time.Now()
keep := s.kvindex.Compact(rev)
indexCompactionPauseMs.Observe(float64(time.Since(start) / time.Millisecond))
if !s.scheduleCompaction(rev, keep) {
s.compactBarrier(nil, ch)
return
@ -275,8 +288,7 @@ func (s *store) compact(rev int64) (<-chan struct{}, error) {
}
s.fifoSched.Schedule(j)
indexCompactionPauseMs.Observe(float64(time.Since(start) / time.Millisecond))
trace.Step("schedule compaction")
return ch, nil
}
@ -286,21 +298,21 @@ func (s *store) compactLockfree(rev int64) (<-chan struct{}, error) {
return ch, err
}
return s.compact(rev)
return s.compact(traceutil.TODO(), rev)
}
func (s *store) Compact(rev int64) (<-chan struct{}, error) {
func (s *store) Compact(trace *traceutil.Trace, rev int64) (<-chan struct{}, error) {
s.mu.Lock()
ch, err := s.updateCompactRev(rev)
trace.Step("check and update compact revision")
if err != nil {
s.mu.Unlock()
return ch, err
}
s.mu.Unlock()
return s.compact(rev)
return s.compact(trace, rev)
}
// DefaultIgnores is a map of keys to ignore in hash checking.
@ -344,19 +356,7 @@ func (s *store) Restore(b backend.Backend) error {
}
func (s *store) restore() error {
b := s.b
reportDbTotalSizeInBytesMu.Lock()
reportDbTotalSizeInBytes = func() float64 { return float64(b.Size()) }
reportDbTotalSizeInBytesMu.Unlock()
reportDbTotalSizeInBytesDebugMu.Lock()
reportDbTotalSizeInBytesDebug = func() float64 { return float64(b.Size()) }
reportDbTotalSizeInBytesDebugMu.Unlock()
reportDbTotalSizeInUseInBytesMu.Lock()
reportDbTotalSizeInUseInBytes = func() float64 { return float64(b.SizeInUse()) }
reportDbTotalSizeInUseInBytesMu.Unlock()
reportDbOpenReadTxNMu.Lock()
reportDbOpenReadTxN = func() float64 { return float64(b.OpenReadTxN()) }
reportDbOpenReadTxNMu.Unlock()
s.setupMetricsReporter()
min, max := newRevBytes(), newRevBytes()
revToBytes(revision{main: 1}, min)
@ -568,6 +568,36 @@ func (s *store) ConsistentIndex() uint64 {
return v
}
func (s *store) setupMetricsReporter() {
b := s.b
reportDbTotalSizeInBytesMu.Lock()
reportDbTotalSizeInBytes = func() float64 { return float64(b.Size()) }
reportDbTotalSizeInBytesMu.Unlock()
reportDbTotalSizeInBytesDebugMu.Lock()
reportDbTotalSizeInBytesDebug = func() float64 { return float64(b.Size()) }
reportDbTotalSizeInBytesDebugMu.Unlock()
reportDbTotalSizeInUseInBytesMu.Lock()
reportDbTotalSizeInUseInBytes = func() float64 { return float64(b.SizeInUse()) }
reportDbTotalSizeInUseInBytesMu.Unlock()
reportDbOpenReadTxNMu.Lock()
reportDbOpenReadTxN = func() float64 { return float64(b.OpenReadTxN()) }
reportDbOpenReadTxNMu.Unlock()
reportCurrentRevMu.Lock()
reportCurrentRev = func() float64 {
s.revMu.RLock()
defer s.revMu.RUnlock()
return float64(s.currentRev)
}
reportCurrentRevMu.Unlock()
reportCompactRevMu.Lock()
reportCompactRev = func() float64 {
s.revMu.RLock()
defer s.revMu.RUnlock()
return float64(s.compactMainRev)
}
reportCompactRevMu.Unlock()
}
// appendMarkTombstone appends tombstone mark to normal revision bytes.
func appendMarkTombstone(lg *zap.Logger, b []byte) []byte {
if len(b) != revBytesLen {

View File

@ -20,6 +20,7 @@ import (
"go.etcd.io/etcd/lease"
"go.etcd.io/etcd/mvcc/backend"
"go.etcd.io/etcd/pkg/traceutil"
"go.uber.org/zap"
)
@ -33,7 +34,7 @@ func (i *fakeConsistentIndex) ConsistentIndex() uint64 {
func BenchmarkStorePut(b *testing.B) {
var i fakeConsistentIndex
be, tmpPath := backend.NewDefaultTmpBackend()
s := NewStore(zap.NewExample(), be, &lease.FakeLessor{}, &i)
s := NewStore(zap.NewExample(), be, &lease.FakeLessor{}, &i, StoreConfig{})
defer cleanup(s, be, tmpPath)
// arbitrary number of bytes
@ -53,7 +54,7 @@ func BenchmarkStoreRangeKey100(b *testing.B) { benchmarkStoreRange(b, 100) }
func benchmarkStoreRange(b *testing.B, n int) {
var i fakeConsistentIndex
be, tmpPath := backend.NewDefaultTmpBackend()
s := NewStore(zap.NewExample(), be, &lease.FakeLessor{}, &i)
s := NewStore(zap.NewExample(), be, &lease.FakeLessor{}, &i, StoreConfig{})
defer cleanup(s, be, tmpPath)
// 64 byte key/val
@ -81,7 +82,7 @@ func benchmarkStoreRange(b *testing.B, n int) {
func BenchmarkConsistentIndex(b *testing.B) {
fci := fakeConsistentIndex(10)
be, tmpPath := backend.NewDefaultTmpBackend()
s := NewStore(zap.NewExample(), be, &lease.FakeLessor{}, &fci)
s := NewStore(zap.NewExample(), be, &lease.FakeLessor{}, &fci, StoreConfig{})
defer cleanup(s, be, tmpPath)
tx := s.b.BatchTx()
@ -100,7 +101,7 @@ func BenchmarkConsistentIndex(b *testing.B) {
func BenchmarkStorePutUpdate(b *testing.B) {
var i fakeConsistentIndex
be, tmpPath := backend.NewDefaultTmpBackend()
s := NewStore(zap.NewExample(), be, &lease.FakeLessor{}, &i)
s := NewStore(zap.NewExample(), be, &lease.FakeLessor{}, &i, StoreConfig{})
defer cleanup(s, be, tmpPath)
// arbitrary number of bytes
@ -119,7 +120,7 @@ func BenchmarkStorePutUpdate(b *testing.B) {
func BenchmarkStoreTxnPut(b *testing.B) {
var i fakeConsistentIndex
be, tmpPath := backend.NewDefaultTmpBackend()
s := NewStore(zap.NewExample(), be, &lease.FakeLessor{}, &i)
s := NewStore(zap.NewExample(), be, &lease.FakeLessor{}, &i, StoreConfig{})
defer cleanup(s, be, tmpPath)
// arbitrary number of bytes
@ -130,7 +131,7 @@ func BenchmarkStoreTxnPut(b *testing.B) {
b.ResetTimer()
b.ReportAllocs()
for i := 0; i < b.N; i++ {
txn := s.Write()
txn := s.Write(traceutil.TODO())
txn.Put(keys[i], vals[i], lease.NoLease)
txn.End()
}
@ -140,7 +141,7 @@ func BenchmarkStoreTxnPut(b *testing.B) {
func benchmarkStoreRestore(revsPerKey int, b *testing.B) {
var i fakeConsistentIndex
be, tmpPath := backend.NewDefaultTmpBackend()
s := NewStore(zap.NewExample(), be, &lease.FakeLessor{}, &i)
s := NewStore(zap.NewExample(), be, &lease.FakeLessor{}, &i, StoreConfig{})
// use closure to capture 's' to pick up the reassignment
defer func() { cleanup(s, be, tmpPath) }()
@ -151,7 +152,7 @@ func benchmarkStoreRestore(revsPerKey int, b *testing.B) {
for i := 0; i < b.N; i++ {
for j := 0; j < revsPerKey; j++ {
txn := s.Write()
txn := s.Write(traceutil.TODO())
txn.Put(keys[i], vals[i], lease.NoLease)
txn.End()
}
@ -160,7 +161,7 @@ func benchmarkStoreRestore(revsPerKey int, b *testing.B) {
b.ReportAllocs()
b.ResetTimer()
s = NewStore(zap.NewExample(), be, &lease.FakeLessor{}, &i)
s = NewStore(zap.NewExample(), be, &lease.FakeLessor{}, &i, StoreConfig{})
}
func BenchmarkStoreRestoreRevs1(b *testing.B) {

View File

@ -30,16 +30,15 @@ func (s *store) scheduleCompaction(compactMainRev int64, keep map[revision]struc
end := make([]byte, 8)
binary.BigEndian.PutUint64(end, uint64(compactMainRev+1))
batchsize := int64(10000)
last := make([]byte, 8+1+8)
for {
var rev revision
start := time.Now()
tx := s.b.BatchTx()
tx.Lock()
keys, _ := tx.UnsafeRange(keyBucketName, last, end, batchsize)
keys, _ := tx.UnsafeRange(keyBucketName, last, end, int64(s.cfg.CompactionBatchLimit))
for _, key := range keys {
rev = bytesToRev(key)
if _, ok := keep[rev]; !ok {
@ -48,7 +47,7 @@ func (s *store) scheduleCompaction(compactMainRev int64, keep map[revision]struc
}
}
if len(keys) < int(batchsize) {
if len(keys) < s.cfg.CompactionBatchLimit {
rbytes := make([]byte, 8+1+8)
revToBytes(revision{main: compactMainRev}, rbytes)
tx.UnsafePut(metaBucketName, finishedCompactKeyName, rbytes)
@ -60,7 +59,7 @@ func (s *store) scheduleCompaction(compactMainRev int64, keep map[revision]struc
zap.Duration("took", time.Since(totalStart)),
)
} else {
plog.Printf("finished scheduled compaction at %d (took %v)", compactMainRev, time.Since(totalStart))
plog.Infof("finished scheduled compaction at %d (took %v)", compactMainRev, time.Since(totalStart))
}
return true
}
@ -68,10 +67,12 @@ func (s *store) scheduleCompaction(compactMainRev int64, keep map[revision]struc
// update last
revToBytes(revision{main: rev.main, sub: rev.sub + 1}, last)
tx.Unlock()
// Immediately commit the compaction deletes instead of letting them accumulate in the write buffer
s.b.ForceCommit()
dbCompactionPauseMs.Observe(float64(time.Since(start) / time.Millisecond))
select {
case <-time.After(100 * time.Millisecond):
case <-time.After(10 * time.Millisecond):
case <-s.stopc:
return false
}

View File

@ -22,6 +22,7 @@ import (
"go.etcd.io/etcd/lease"
"go.etcd.io/etcd/mvcc/backend"
"go.etcd.io/etcd/pkg/traceutil"
"go.uber.org/zap"
)
@ -65,7 +66,7 @@ func TestScheduleCompaction(t *testing.T) {
}
for i, tt := range tests {
b, tmpPath := backend.NewDefaultTmpBackend()
s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil)
s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil, StoreConfig{})
tx := s.b.BatchTx()
tx.Lock()
@ -99,7 +100,7 @@ func TestScheduleCompaction(t *testing.T) {
func TestCompactAllAndRestore(t *testing.T) {
b, tmpPath := backend.NewDefaultTmpBackend()
s0 := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil)
s0 := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil, StoreConfig{})
defer os.Remove(tmpPath)
s0.Put([]byte("foo"), []byte("bar"), lease.NoLease)
@ -109,7 +110,7 @@ func TestCompactAllAndRestore(t *testing.T) {
rev := s0.Rev()
// compact all keys
done, err := s0.Compact(rev)
done, err := s0.Compact(traceutil.TODO(), rev)
if err != nil {
t.Fatal(err)
}
@ -125,7 +126,7 @@ func TestCompactAllAndRestore(t *testing.T) {
t.Fatal(err)
}
s1 := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil)
s1 := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil, StoreConfig{})
if s1.Rev() != rev {
t.Errorf("rev = %v, want %v", s1.Rev(), rev)
}

View File

@ -34,13 +34,14 @@ import (
"go.etcd.io/etcd/mvcc/mvccpb"
"go.etcd.io/etcd/pkg/schedule"
"go.etcd.io/etcd/pkg/testutil"
"go.etcd.io/etcd/pkg/traceutil"
"go.uber.org/zap"
)
func TestStoreRev(t *testing.T) {
b, tmpPath := backend.NewDefaultTmpBackend()
s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil)
s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil, StoreConfig{})
defer s.Close()
defer os.Remove(tmpPath)
@ -331,7 +332,7 @@ func TestStoreCompact(t *testing.T) {
key2 := newTestKeyBytes(revision{2, 0}, false)
b.tx.rangeRespc <- rangeResp{[][]byte{key1, key2}, nil}
s.Compact(3)
s.Compact(traceutil.TODO(), 3)
s.fifoSched.WaitFinish(1)
if s.compactMainRev != 3 {
@ -424,7 +425,7 @@ func TestRestoreDelete(t *testing.T) {
defer func() { restoreChunkKeys = oldChunk }()
b, tmpPath := backend.NewDefaultTmpBackend()
s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil)
s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil, StoreConfig{})
defer os.Remove(tmpPath)
keys := make(map[string]struct{})
@ -450,7 +451,7 @@ func TestRestoreDelete(t *testing.T) {
}
s.Close()
s = NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil)
s = NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil, StoreConfig{})
defer s.Close()
for i := 0; i < 20; i++ {
ks := fmt.Sprintf("foo-%d", i)
@ -472,7 +473,7 @@ func TestRestoreContinueUnfinishedCompaction(t *testing.T) {
tests := []string{"recreate", "restore"}
for _, test := range tests {
b, tmpPath := backend.NewDefaultTmpBackend()
s0 := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil)
s0 := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil, StoreConfig{})
defer os.Remove(tmpPath)
s0.Put([]byte("foo"), []byte("bar"), lease.NoLease)
@ -492,7 +493,7 @@ func TestRestoreContinueUnfinishedCompaction(t *testing.T) {
var s *store
switch test {
case "recreate":
s = NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil)
s = NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil, StoreConfig{})
case "restore":
s0.Restore(b)
s = s0
@ -534,7 +535,7 @@ type hashKVResult struct {
// TestHashKVWhenCompacting ensures that HashKV returns correct hash when compacting.
func TestHashKVWhenCompacting(t *testing.T) {
b, tmpPath := backend.NewDefaultTmpBackend()
s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil)
s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil, StoreConfig{})
defer os.Remove(tmpPath)
rev := 10000
@ -582,7 +583,7 @@ func TestHashKVWhenCompacting(t *testing.T) {
go func() {
defer wg.Done()
for i := 100; i >= 0; i-- {
_, err := s.Compact(int64(rev - 1 - i))
_, err := s.Compact(traceutil.TODO(), int64(rev-1-i))
if err != nil {
t.Error(err)
}
@ -602,14 +603,14 @@ func TestHashKVWhenCompacting(t *testing.T) {
// correct hash value with latest revision.
func TestHashKVZeroRevision(t *testing.T) {
b, tmpPath := backend.NewDefaultTmpBackend()
s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil)
s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil, StoreConfig{})
defer os.Remove(tmpPath)
rev := 1000
rev := 10000
for i := 2; i <= rev; i++ {
s.Put([]byte("foo"), []byte(fmt.Sprintf("bar%d", i)), lease.NoLease)
}
if _, err := s.Compact(int64(rev / 2)); err != nil {
if _, err := s.Compact(traceutil.TODO(), int64(rev/2)); err != nil {
t.Fatal(err)
}
@ -635,11 +636,11 @@ func TestTxnPut(t *testing.T) {
vals := createBytesSlice(bytesN, sliceN)
b, tmpPath := backend.NewDefaultTmpBackend()
s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil)
s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil, StoreConfig{})
defer cleanup(s, b, tmpPath)
for i := 0; i < sliceN; i++ {
txn := s.Write()
txn := s.Write(traceutil.TODO())
base := int64(i + 2)
if rev := txn.Put(keys[i], vals[i], lease.NoLease); rev != base {
t.Errorf("#%d: rev = %d, want %d", i, rev, base)
@ -651,14 +652,14 @@ func TestTxnPut(t *testing.T) {
// TestConcurrentReadNotBlockingWrite ensures Read does not blocking Write after its creation
func TestConcurrentReadNotBlockingWrite(t *testing.T) {
b, tmpPath := backend.NewDefaultTmpBackend()
s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil)
s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil, StoreConfig{})
defer os.Remove(tmpPath)
// write something to read later
s.Put([]byte("foo"), []byte("bar"), lease.NoLease)
// readTx simulates a long read request
readTx1 := s.Read()
readTx1 := s.Read(traceutil.TODO())
// write should not be blocked by reads
done := make(chan struct{})
@ -673,7 +674,7 @@ func TestConcurrentReadNotBlockingWrite(t *testing.T) {
}
// readTx2 simulates a short read request
readTx2 := s.Read()
readTx2 := s.Read(traceutil.TODO())
ro := RangeOptions{Limit: 1, Rev: 0, Count: false}
ret, err := readTx2.Range([]byte("foo"), nil, ro)
if err != nil {
@ -720,7 +721,7 @@ func TestConcurrentReadTxAndWrite(t *testing.T) {
mu sync.Mutex // mu protectes committedKVs
)
b, tmpPath := backend.NewDefaultTmpBackend()
s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil)
s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil, StoreConfig{})
defer os.Remove(tmpPath)
var wg sync.WaitGroup
@ -730,7 +731,7 @@ func TestConcurrentReadTxAndWrite(t *testing.T) {
defer wg.Done()
time.Sleep(time.Duration(mrand.Intn(100)) * time.Millisecond) // random starting time
tx := s.Write()
tx := s.Write(traceutil.TODO())
numOfPuts := mrand.Intn(maxNumOfPutsPerWrite) + 1
var pendingKvs kvs
for j := 0; j < numOfPuts; j++ {
@ -756,7 +757,7 @@ func TestConcurrentReadTxAndWrite(t *testing.T) {
mu.Lock()
wKVs := make(kvs, len(committedKVs))
copy(wKVs, committedKVs)
tx := s.Read()
tx := s.Read(traceutil.TODO())
mu.Unlock()
// get all keys in backend store, and compare with wKVs
ret, err := tx.Range([]byte("\x00000000"), []byte("\xffffffff"), RangeOptions{})
@ -846,6 +847,7 @@ func newFakeStore() *store {
indexCompactRespc: make(chan map[revision]struct{}, 1),
}
s := &store{
cfg: StoreConfig{CompactionBatchLimit: 10000},
b: b,
le: &lease.FakeLessor{},
kvindex: fi,

View File

@ -18,6 +18,7 @@ import (
"go.etcd.io/etcd/lease"
"go.etcd.io/etcd/mvcc/backend"
"go.etcd.io/etcd/mvcc/mvccpb"
"go.etcd.io/etcd/pkg/traceutil"
"go.uber.org/zap"
)
@ -27,9 +28,11 @@ type storeTxnRead struct {
firstRev int64
rev int64
trace *traceutil.Trace
}
func (s *store) Read() TxnRead {
func (s *store) Read(trace *traceutil.Trace) TxnRead {
s.mu.RLock()
s.revMu.RLock()
// backend holds b.readTx.RLock() only when creating the concurrentReadTx. After
@ -38,7 +41,7 @@ func (s *store) Read() TxnRead {
tx.RLock() // RLock is no-op. concurrentReadTx does not need to be locked after it is created.
firstRev, rev := s.compactMainRev, s.currentRev
s.revMu.RUnlock()
return newMetricsTxnRead(&storeTxnRead{s, tx, firstRev, rev})
return newMetricsTxnRead(&storeTxnRead{s, tx, firstRev, rev, trace})
}
func (tr *storeTxnRead) FirstRev() int64 { return tr.firstRev }
@ -61,12 +64,12 @@ type storeTxnWrite struct {
changes []mvccpb.KeyValue
}
func (s *store) Write() TxnWrite {
func (s *store) Write(trace *traceutil.Trace) TxnWrite {
s.mu.RLock()
tx := s.b.BatchTx()
tx.Lock()
tw := &storeTxnWrite{
storeTxnRead: storeTxnRead{s, tx, 0, 0},
storeTxnRead: storeTxnRead{s, tx, 0, 0, trace},
tx: tx,
beginRev: s.currentRev,
changes: make([]mvccpb.KeyValue, 0, 4),
@ -124,6 +127,7 @@ func (tr *storeTxnRead) rangeKeys(key, end []byte, curRev int64, ro RangeOptions
}
revpairs := tr.s.kvindex.Revisions(key, end, rev)
tr.trace.Step("range keys from in-memory index tree")
if len(revpairs) == 0 {
return &RangeResult{KVs: nil, Count: 0, Rev: curRev}, nil
}
@ -163,6 +167,7 @@ func (tr *storeTxnRead) rangeKeys(key, end []byte, curRev int64, ro RangeOptions
}
}
}
tr.trace.Step("range keys from bolt db")
return &RangeResult{KVs: kvs, Count: len(revpairs), Rev: curRev}, nil
}
@ -178,7 +183,7 @@ func (tw *storeTxnWrite) put(key, value []byte, leaseID lease.LeaseID) {
c = created.main
oldLease = tw.s.le.GetLease(lease.LeaseItem{Key: string(key)})
}
tw.trace.Step("get key's previous created_revision and leaseID")
ibytes := newRevBytes()
idxRev := revision{main: rev, sub: int64(len(tw.changes))}
revToBytes(idxRev, ibytes)
@ -205,9 +210,11 @@ func (tw *storeTxnWrite) put(key, value []byte, leaseID lease.LeaseID) {
}
}
tw.trace.Step("marshal mvccpb.KeyValue")
tw.tx.UnsafeSeqPut(keyBucketName, ibytes, d)
tw.s.kvindex.Put(key, idxRev)
tw.changes = append(tw.changes, kv)
tw.trace.Step("store kv pair into bolt db")
if oldLease != lease.NoLease {
if tw.s.le == nil {
@ -234,6 +241,7 @@ func (tw *storeTxnWrite) put(key, value []byte, leaseID lease.LeaseID) {
panic("unexpected error from lease Attach")
}
}
tw.trace.Step("attach lease to kv pair")
}
func (tw *storeTxnWrite) deleteRange(key, end []byte) int64 {

View File

@ -264,6 +264,46 @@ var (
// highest bucket start of 0.01 sec * 2^14 == 163.84 sec
Buckets: prometheus.ExponentialBuckets(.01, 2, 15),
})
currentRev = prometheus.NewGaugeFunc(prometheus.GaugeOpts{
Namespace: "etcd_debugging",
Subsystem: "mvcc",
Name: "current_revision",
Help: "The current revision of store.",
},
func() float64 {
reportCurrentRevMu.RLock()
defer reportCurrentRevMu.RUnlock()
return reportCurrentRev()
},
)
// overridden by mvcc initialization
reportCurrentRevMu sync.RWMutex
reportCurrentRev = func() float64 { return 0 }
compactRev = prometheus.NewGaugeFunc(prometheus.GaugeOpts{
Namespace: "etcd_debugging",
Subsystem: "mvcc",
Name: "compact_revision",
Help: "The revision of the last compaction in store.",
},
func() float64 {
reportCompactRevMu.RLock()
defer reportCompactRevMu.RUnlock()
return reportCompactRev()
},
)
// overridden by mvcc initialization
reportCompactRevMu sync.RWMutex
reportCompactRev = func() float64 { return 0 }
totalPutSizeGauge = prometheus.NewGauge(
prometheus.GaugeOpts{
Namespace: "etcd_debugging",
Subsystem: "mvcc",
Name: "total_put_size_in_bytes",
Help: "The total size of put kv pairs seen by this member.",
})
)
func init() {
@ -291,6 +331,9 @@ func init() {
prometheus.MustRegister(dbOpenReadTxN)
prometheus.MustRegister(hashSec)
prometheus.MustRegister(hashRevSec)
prometheus.MustRegister(currentRev)
prometheus.MustRegister(compactRev)
prometheus.MustRegister(totalPutSizeGauge)
}
// ReportEventReceived reports that an event is received.

View File

@ -21,14 +21,15 @@ type metricsTxnWrite struct {
ranges uint
puts uint
deletes uint
putSize int64
}
func newMetricsTxnRead(tr TxnRead) TxnRead {
return &metricsTxnWrite{&txnReadWrite{tr}, 0, 0, 0}
return &metricsTxnWrite{&txnReadWrite{tr}, 0, 0, 0, 0}
}
func newMetricsTxnWrite(tw TxnWrite) TxnWrite {
return &metricsTxnWrite{tw, 0, 0, 0}
return &metricsTxnWrite{tw, 0, 0, 0, 0}
}
func (tw *metricsTxnWrite) Range(key, end []byte, ro RangeOptions) (*RangeResult, error) {
@ -43,6 +44,8 @@ func (tw *metricsTxnWrite) DeleteRange(key, end []byte) (n, rev int64) {
func (tw *metricsTxnWrite) Put(key, value []byte, lease lease.LeaseID) (rev int64) {
tw.puts++
size := int64(len(key) + len(value))
tw.putSize += size
return tw.TxnWrite.Put(key, value, lease)
}
@ -60,6 +63,7 @@ func (tw *metricsTxnWrite) End() {
puts := float64(tw.puts)
putCounter.Add(puts)
putCounterDebug.Add(puts) // TODO: remove in 3.5 release
totalPutSizeGauge.Add(float64(tw.putSize))
deletes := float64(tw.deletes)
deleteCounter.Add(deletes)

View File

@ -21,6 +21,7 @@ import (
"go.etcd.io/etcd/lease"
"go.etcd.io/etcd/mvcc/backend"
"go.etcd.io/etcd/mvcc/mvccpb"
"go.etcd.io/etcd/pkg/traceutil"
"go.uber.org/zap"
)
@ -68,13 +69,13 @@ type watchableStore struct {
// cancel operations.
type cancelFunc func()
func New(lg *zap.Logger, b backend.Backend, le lease.Lessor, ig ConsistentIndexGetter) ConsistentWatchableKV {
return newWatchableStore(lg, b, le, ig)
func New(lg *zap.Logger, b backend.Backend, le lease.Lessor, ig ConsistentIndexGetter, cfg StoreConfig) ConsistentWatchableKV {
return newWatchableStore(lg, b, le, ig, cfg)
}
func newWatchableStore(lg *zap.Logger, b backend.Backend, le lease.Lessor, ig ConsistentIndexGetter) *watchableStore {
func newWatchableStore(lg *zap.Logger, b backend.Backend, le lease.Lessor, ig ConsistentIndexGetter, cfg StoreConfig) *watchableStore {
s := &watchableStore{
store: NewStore(lg, b, le, ig),
store: NewStore(lg, b, le, ig, cfg),
victimc: make(chan struct{}, 1),
unsynced: newWatcherGroup(),
synced: newWatcherGroup(),
@ -84,7 +85,7 @@ func newWatchableStore(lg *zap.Logger, b backend.Backend, le lease.Lessor, ig Co
s.store.WriteView = &writeView{s}
if s.le != nil {
// use this store as the deleter so revokes trigger watch events
s.le.SetRangeDeleter(func() lease.TxnDelete { return s.Write() })
s.le.SetRangeDeleter(func() lease.TxnDelete { return s.Write(traceutil.TODO()) })
}
s.wg.Add(2)
go s.syncWatchersLoop()

View File

@ -21,13 +21,14 @@ import (
"go.etcd.io/etcd/lease"
"go.etcd.io/etcd/mvcc/backend"
"go.etcd.io/etcd/pkg/traceutil"
"go.uber.org/zap"
)
func BenchmarkWatchableStorePut(b *testing.B) {
be, tmpPath := backend.NewDefaultTmpBackend()
s := New(zap.NewExample(), be, &lease.FakeLessor{}, nil)
s := New(zap.NewExample(), be, &lease.FakeLessor{}, nil, StoreConfig{})
defer cleanup(s, be, tmpPath)
// arbitrary number of bytes
@ -48,7 +49,7 @@ func BenchmarkWatchableStorePut(b *testing.B) {
func BenchmarkWatchableStoreTxnPut(b *testing.B) {
var i fakeConsistentIndex
be, tmpPath := backend.NewDefaultTmpBackend()
s := New(zap.NewExample(), be, &lease.FakeLessor{}, &i)
s := New(zap.NewExample(), be, &lease.FakeLessor{}, &i, StoreConfig{})
defer cleanup(s, be, tmpPath)
// arbitrary number of bytes
@ -59,7 +60,7 @@ func BenchmarkWatchableStoreTxnPut(b *testing.B) {
b.ResetTimer()
b.ReportAllocs()
for i := 0; i < b.N; i++ {
txn := s.Write()
txn := s.Write(traceutil.TODO())
txn.Put(keys[i], vals[i], lease.NoLease)
txn.End()
}
@ -79,7 +80,7 @@ func BenchmarkWatchableStoreWatchPutUnsync(b *testing.B) {
func benchmarkWatchableStoreWatchPut(b *testing.B, synced bool) {
be, tmpPath := backend.NewDefaultTmpBackend()
s := newWatchableStore(zap.NewExample(), be, &lease.FakeLessor{}, nil)
s := newWatchableStore(zap.NewExample(), be, &lease.FakeLessor{}, nil, StoreConfig{})
defer cleanup(s, be, tmpPath)
k := []byte("testkey")
@ -122,7 +123,7 @@ func benchmarkWatchableStoreWatchPut(b *testing.B, synced bool) {
// we should put to simulate the real-world use cases.
func BenchmarkWatchableStoreUnsyncedCancel(b *testing.B) {
be, tmpPath := backend.NewDefaultTmpBackend()
s := NewStore(zap.NewExample(), be, &lease.FakeLessor{}, nil)
s := NewStore(zap.NewExample(), be, &lease.FakeLessor{}, nil, StoreConfig{})
// manually create watchableStore instead of newWatchableStore
// because newWatchableStore periodically calls syncWatchersLoop
@ -179,7 +180,7 @@ func BenchmarkWatchableStoreUnsyncedCancel(b *testing.B) {
func BenchmarkWatchableStoreSyncedCancel(b *testing.B) {
be, tmpPath := backend.NewDefaultTmpBackend()
s := newWatchableStore(zap.NewExample(), be, &lease.FakeLessor{}, nil)
s := newWatchableStore(zap.NewExample(), be, &lease.FakeLessor{}, nil, StoreConfig{})
defer func() {
s.store.Close()

View File

@ -26,12 +26,13 @@ import (
"go.etcd.io/etcd/lease"
"go.etcd.io/etcd/mvcc/backend"
"go.etcd.io/etcd/mvcc/mvccpb"
"go.etcd.io/etcd/pkg/traceutil"
"go.uber.org/zap"
)
func TestWatch(t *testing.T) {
b, tmpPath := backend.NewDefaultTmpBackend()
s := newWatchableStore(zap.NewExample(), b, &lease.FakeLessor{}, nil)
s := newWatchableStore(zap.NewExample(), b, &lease.FakeLessor{}, nil, StoreConfig{})
defer func() {
s.store.Close()
@ -53,7 +54,7 @@ func TestWatch(t *testing.T) {
func TestNewWatcherCancel(t *testing.T) {
b, tmpPath := backend.NewDefaultTmpBackend()
s := newWatchableStore(zap.NewExample(), b, &lease.FakeLessor{}, nil)
s := newWatchableStore(zap.NewExample(), b, &lease.FakeLessor{}, nil, StoreConfig{})
defer func() {
s.store.Close()
@ -85,7 +86,7 @@ func TestCancelUnsynced(t *testing.T) {
// method to sync watchers in unsynced map. We want to keep watchers
// in unsynced to test if syncWatchers works as expected.
s := &watchableStore{
store: NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil),
store: NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil, StoreConfig{}),
unsynced: newWatcherGroup(),
// to make the test not crash from assigning to nil map.
@ -140,7 +141,7 @@ func TestSyncWatchers(t *testing.T) {
b, tmpPath := backend.NewDefaultTmpBackend()
s := &watchableStore{
store: NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil),
store: NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil, StoreConfig{}),
unsynced: newWatcherGroup(),
synced: newWatcherGroup(),
}
@ -223,7 +224,7 @@ func TestSyncWatchers(t *testing.T) {
// TestWatchCompacted tests a watcher that watches on a compacted revision.
func TestWatchCompacted(t *testing.T) {
b, tmpPath := backend.NewDefaultTmpBackend()
s := newWatchableStore(zap.NewExample(), b, &lease.FakeLessor{}, nil)
s := newWatchableStore(zap.NewExample(), b, &lease.FakeLessor{}, nil, StoreConfig{})
defer func() {
s.store.Close()
@ -237,7 +238,7 @@ func TestWatchCompacted(t *testing.T) {
for i := 0; i < maxRev; i++ {
s.Put(testKey, testValue, lease.NoLease)
}
_, err := s.Compact(compactRev)
_, err := s.Compact(traceutil.TODO(), compactRev)
if err != nil {
t.Fatalf("failed to compact kv (%v)", err)
}
@ -260,7 +261,7 @@ func TestWatchCompacted(t *testing.T) {
func TestWatchFutureRev(t *testing.T) {
b, tmpPath := backend.NewDefaultTmpBackend()
s := newWatchableStore(zap.NewExample(), b, &lease.FakeLessor{}, nil)
s := newWatchableStore(zap.NewExample(), b, &lease.FakeLessor{}, nil, StoreConfig{})
defer func() {
s.store.Close()
@ -301,7 +302,7 @@ func TestWatchRestore(t *testing.T) {
test := func(delay time.Duration) func(t *testing.T) {
return func(t *testing.T) {
b, tmpPath := backend.NewDefaultTmpBackend()
s := newWatchableStore(zap.NewExample(), b, &lease.FakeLessor{}, nil)
s := newWatchableStore(zap.NewExample(), b, &lease.FakeLessor{}, nil, StoreConfig{})
defer cleanup(s, b, tmpPath)
testKey := []byte("foo")
@ -309,7 +310,7 @@ func TestWatchRestore(t *testing.T) {
rev := s.Put(testKey, testValue, lease.NoLease)
newBackend, newPath := backend.NewDefaultTmpBackend()
newStore := newWatchableStore(zap.NewExample(), newBackend, &lease.FakeLessor{}, nil)
newStore := newWatchableStore(zap.NewExample(), newBackend, &lease.FakeLessor{}, nil, StoreConfig{})
defer cleanup(newStore, newBackend, newPath)
w := newStore.NewWatchStream()
@ -347,11 +348,11 @@ func TestWatchRestore(t *testing.T) {
// 5. choose the watcher from step 1, without panic
func TestWatchRestoreSyncedWatcher(t *testing.T) {
b1, b1Path := backend.NewDefaultTmpBackend()
s1 := newWatchableStore(zap.NewExample(), b1, &lease.FakeLessor{}, nil)
s1 := newWatchableStore(zap.NewExample(), b1, &lease.FakeLessor{}, nil, StoreConfig{})
defer cleanup(s1, b1, b1Path)
b2, b2Path := backend.NewDefaultTmpBackend()
s2 := newWatchableStore(zap.NewExample(), b2, &lease.FakeLessor{}, nil)
s2 := newWatchableStore(zap.NewExample(), b2, &lease.FakeLessor{}, nil, StoreConfig{})
defer cleanup(s2, b2, b2Path)
testKey, testValue := []byte("foo"), []byte("bar")
@ -398,7 +399,7 @@ func TestWatchRestoreSyncedWatcher(t *testing.T) {
// TestWatchBatchUnsynced tests batching on unsynced watchers
func TestWatchBatchUnsynced(t *testing.T) {
b, tmpPath := backend.NewDefaultTmpBackend()
s := newWatchableStore(zap.NewExample(), b, &lease.FakeLessor{}, nil)
s := newWatchableStore(zap.NewExample(), b, &lease.FakeLessor{}, nil, StoreConfig{})
oldMaxRevs := watchBatchMaxRevs
defer func() {
@ -532,7 +533,7 @@ func TestWatchVictims(t *testing.T) {
oldChanBufLen, oldMaxWatchersPerSync := chanBufLen, maxWatchersPerSync
b, tmpPath := backend.NewDefaultTmpBackend()
s := newWatchableStore(zap.NewExample(), b, &lease.FakeLessor{}, nil)
s := newWatchableStore(zap.NewExample(), b, &lease.FakeLessor{}, nil, StoreConfig{})
defer func() {
s.store.Close()
@ -610,7 +611,7 @@ func TestWatchVictims(t *testing.T) {
// canceling its watches.
func TestStressWatchCancelClose(t *testing.T) {
b, tmpPath := backend.NewDefaultTmpBackend()
s := newWatchableStore(zap.NewExample(), b, &lease.FakeLessor{}, nil)
s := newWatchableStore(zap.NewExample(), b, &lease.FakeLessor{}, nil, StoreConfig{})
defer func() {
s.store.Close()

View File

@ -14,7 +14,10 @@
package mvcc
import "go.etcd.io/etcd/mvcc/mvccpb"
import (
"go.etcd.io/etcd/mvcc/mvccpb"
"go.etcd.io/etcd/pkg/traceutil"
)
func (tw *watchableStoreTxnWrite) End() {
changes := tw.Changes()
@ -48,4 +51,6 @@ type watchableStoreTxnWrite struct {
s *watchableStore
}
func (s *watchableStore) Write() TxnWrite { return &watchableStoreTxnWrite{s.store.Write(), s} }
func (s *watchableStore) Write(trace *traceutil.Trace) TxnWrite {
return &watchableStoreTxnWrite{s.store.Write(trace), s}
}

View File

@ -26,7 +26,7 @@ import (
func BenchmarkKVWatcherMemoryUsage(b *testing.B) {
be, tmpPath := backend.NewDefaultTmpBackend()
watchable := newWatchableStore(zap.NewExample(), be, &lease.FakeLessor{}, nil)
watchable := newWatchableStore(zap.NewExample(), be, &lease.FakeLessor{}, nil, StoreConfig{})
defer cleanup(watchable, be, tmpPath)

View File

@ -32,7 +32,7 @@ import (
// and the watched event attaches the correct watchID.
func TestWatcherWatchID(t *testing.T) {
b, tmpPath := backend.NewDefaultTmpBackend()
s := WatchableKV(newWatchableStore(zap.NewExample(), b, &lease.FakeLessor{}, nil))
s := WatchableKV(newWatchableStore(zap.NewExample(), b, &lease.FakeLessor{}, nil, StoreConfig{}))
defer cleanup(s, b, tmpPath)
w := s.NewWatchStream()
@ -82,7 +82,7 @@ func TestWatcherWatchID(t *testing.T) {
func TestWatcherRequestsCustomID(t *testing.T) {
b, tmpPath := backend.NewDefaultTmpBackend()
s := WatchableKV(newWatchableStore(zap.NewExample(), b, &lease.FakeLessor{}, nil))
s := WatchableKV(newWatchableStore(zap.NewExample(), b, &lease.FakeLessor{}, nil, StoreConfig{}))
defer cleanup(s, b, tmpPath)
w := s.NewWatchStream()
@ -119,7 +119,7 @@ func TestWatcherRequestsCustomID(t *testing.T) {
// and returns events with matching prefixes.
func TestWatcherWatchPrefix(t *testing.T) {
b, tmpPath := backend.NewDefaultTmpBackend()
s := WatchableKV(newWatchableStore(zap.NewExample(), b, &lease.FakeLessor{}, nil))
s := WatchableKV(newWatchableStore(zap.NewExample(), b, &lease.FakeLessor{}, nil, StoreConfig{}))
defer cleanup(s, b, tmpPath)
w := s.NewWatchStream()
@ -193,7 +193,7 @@ func TestWatcherWatchPrefix(t *testing.T) {
// does not create watcher, which panics when canceling in range tree.
func TestWatcherWatchWrongRange(t *testing.T) {
b, tmpPath := backend.NewDefaultTmpBackend()
s := WatchableKV(newWatchableStore(zap.NewExample(), b, &lease.FakeLessor{}, nil))
s := WatchableKV(newWatchableStore(zap.NewExample(), b, &lease.FakeLessor{}, nil, StoreConfig{}))
defer cleanup(s, b, tmpPath)
w := s.NewWatchStream()
@ -213,7 +213,7 @@ func TestWatcherWatchWrongRange(t *testing.T) {
func TestWatchDeleteRange(t *testing.T) {
b, tmpPath := backend.NewDefaultTmpBackend()
s := newWatchableStore(zap.NewExample(), b, &lease.FakeLessor{}, nil)
s := newWatchableStore(zap.NewExample(), b, &lease.FakeLessor{}, nil, StoreConfig{})
defer func() {
s.store.Close()
@ -252,7 +252,7 @@ func TestWatchDeleteRange(t *testing.T) {
// with given id inside watchStream.
func TestWatchStreamCancelWatcherByID(t *testing.T) {
b, tmpPath := backend.NewDefaultTmpBackend()
s := WatchableKV(newWatchableStore(zap.NewExample(), b, &lease.FakeLessor{}, nil))
s := WatchableKV(newWatchableStore(zap.NewExample(), b, &lease.FakeLessor{}, nil, StoreConfig{}))
defer cleanup(s, b, tmpPath)
w := s.NewWatchStream()
@ -295,7 +295,7 @@ func TestWatcherRequestProgress(t *testing.T) {
// method to sync watchers in unsynced map. We want to keep watchers
// in unsynced to test if syncWatchers works as expected.
s := &watchableStore{
store: NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil),
store: NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil, StoreConfig{}),
unsynced: newWatcherGroup(),
synced: newWatcherGroup(),
}
@ -344,7 +344,7 @@ func TestWatcherRequestProgress(t *testing.T) {
func TestWatcherWatchWithFilter(t *testing.T) {
b, tmpPath := backend.NewDefaultTmpBackend()
s := WatchableKV(newWatchableStore(zap.NewExample(), b, &lease.FakeLessor{}, nil))
s := WatchableKV(newWatchableStore(zap.NewExample(), b, &lease.FakeLessor{}, nil, StoreConfig{}))
defer cleanup(s, b, tmpPath)
w := s.NewWatchStream()

View File

@ -25,13 +25,23 @@ import (
)
func PurgeFile(lg *zap.Logger, dirname string, suffix string, max uint, interval time.Duration, stop <-chan struct{}) <-chan error {
return purgeFile(lg, dirname, suffix, max, interval, stop, nil)
return purgeFile(lg, dirname, suffix, max, interval, stop, nil, nil)
}
func PurgeFileWithDoneNotify(lg *zap.Logger, dirname string, suffix string, max uint, interval time.Duration, stop <-chan struct{}) (<-chan struct{}, <-chan error) {
doneC := make(chan struct{})
errC := purgeFile(lg, dirname, suffix, max, interval, stop, nil, doneC)
return doneC, errC
}
// purgeFile is the internal implementation for PurgeFile which can post purged files to purgec if non-nil.
func purgeFile(lg *zap.Logger, dirname string, suffix string, max uint, interval time.Duration, stop <-chan struct{}, purgec chan<- string) <-chan error {
// if donec is non-nil, the function closes it to notify its exit.
func purgeFile(lg *zap.Logger, dirname string, suffix string, max uint, interval time.Duration, stop <-chan struct{}, purgec chan<- string, donec chan<- struct{}) <-chan error {
errC := make(chan error, 1)
go func() {
if donec != nil {
defer close(donec)
}
for {
fnames, err := ReadDir(dirname)
if err != nil {

View File

@ -45,7 +45,7 @@ func TestPurgeFile(t *testing.T) {
stop, purgec := make(chan struct{}), make(chan string, 10)
// keep 3 most recent files
errch := purgeFile(zap.NewExample(), dir, "test", 3, time.Millisecond, stop, purgec)
errch := purgeFile(zap.NewExample(), dir, "test", 3, time.Millisecond, stop, purgec, nil)
select {
case f := <-purgec:
t.Errorf("unexpected purge on %q", f)
@ -116,7 +116,7 @@ func TestPurgeFileHoldingLockFile(t *testing.T) {
}
stop, purgec := make(chan struct{}), make(chan string, 10)
errch := purgeFile(zap.NewExample(), dir, "test", 3, time.Millisecond, stop, purgec)
errch := purgeFile(zap.NewExample(), dir, "test", 3, time.Millisecond, stop, purgec, nil)
for i := 0; i < 5; i++ {
select {

View File

@ -95,12 +95,23 @@ func (pw *PageWriter) Write(p []byte) (n int, err error) {
return n, werr
}
// Flush flushes buffered data.
func (pw *PageWriter) Flush() error {
if pw.bufferedBytes == 0 {
return nil
}
_, err := pw.w.Write(pw.buf[:pw.bufferedBytes])
pw.pageOffset = (pw.pageOffset + pw.bufferedBytes) % pw.pageBytes
pw.bufferedBytes = 0
_, err := pw.flush()
return err
}
// FlushN flushes buffered data and returns the number of written bytes.
func (pw *PageWriter) FlushN() (int, error) {
return pw.flush()
}
func (pw *PageWriter) flush() (int, error) {
if pw.bufferedBytes == 0 {
return 0, nil
}
n, err := pw.w.Write(pw.buf[:pw.bufferedBytes])
pw.pageOffset = (pw.pageOffset + pw.bufferedBytes) % pw.pageBytes
pw.bufferedBytes = 0
return n, err
}

View File

@ -53,15 +53,12 @@ var DefaultZapLoggerConfig = zap.Config{
ErrorOutputPaths: []string{"stderr"},
}
// AddOutputPaths adds output paths to the existing output paths, resolving conflicts.
func AddOutputPaths(cfg zap.Config, outputPaths, errorOutputPaths []string) zap.Config {
// MergeOutputPaths merges logging output paths, resolving conflicts.
func MergeOutputPaths(cfg zap.Config) zap.Config {
outputs := make(map[string]struct{})
for _, v := range cfg.OutputPaths {
outputs[v] = struct{}{}
}
for _, v := range outputPaths {
outputs[v] = struct{}{}
}
outputSlice := make([]string, 0)
if _, ok := outputs["/dev/null"]; ok {
// "/dev/null" to discard all
@ -78,9 +75,6 @@ func AddOutputPaths(cfg zap.Config, outputPaths, errorOutputPaths []string) zap.
for _, v := range cfg.ErrorOutputPaths {
errOutputs[v] = struct{}{}
}
for _, v := range errorOutputPaths {
errOutputs[v] = struct{}{}
}
errOutputSlice := make([]string, 0)
if _, ok := errOutputs["/dev/null"]; ok {
// "/dev/null" to discard all

172
pkg/traceutil/trace.go Normal file
View File

@ -0,0 +1,172 @@
// Copyright 2019 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package traceutil implements tracing utilities using "context".
package traceutil
import (
"bytes"
"context"
"fmt"
"math/rand"
"time"
"go.uber.org/zap"
)
const (
TraceKey = "trace"
StartTimeKey = "startTime"
)
// Field is a kv pair to record additional details of the trace.
type Field struct {
Key string
Value interface{}
}
func (f *Field) format() string {
return fmt.Sprintf("%s:%v; ", f.Key, f.Value)
}
func writeFields(fields []Field) string {
if len(fields) == 0 {
return ""
}
var buf bytes.Buffer
buf.WriteString("{")
for _, f := range fields {
buf.WriteString(f.format())
}
buf.WriteString("}")
return buf.String()
}
type Trace struct {
operation string
lg *zap.Logger
fields []Field
startTime time.Time
steps []step
stepDisabled bool
}
type step struct {
time time.Time
msg string
fields []Field
}
func New(op string, lg *zap.Logger, fields ...Field) *Trace {
return &Trace{operation: op, lg: lg, startTime: time.Now(), fields: fields}
}
// TODO returns a non-nil, empty Trace
func TODO() *Trace {
return &Trace{}
}
func Get(ctx context.Context) *Trace {
if trace, ok := ctx.Value(TraceKey).(*Trace); ok && trace != nil {
return trace
}
return TODO()
}
func (t *Trace) GetStartTime() time.Time {
return t.startTime
}
func (t *Trace) SetStartTime(time time.Time) {
t.startTime = time
}
func (t *Trace) InsertStep(at int, time time.Time, msg string, fields ...Field) {
newStep := step{time, msg, fields}
if at < len(t.steps) {
t.steps = append(t.steps[:at+1], t.steps[at:]...)
t.steps[at] = newStep
} else {
t.steps = append(t.steps, newStep)
}
}
// Step adds step to trace
func (t *Trace) Step(msg string, fields ...Field) {
if !t.stepDisabled {
t.steps = append(t.steps, step{time: time.Now(), msg: msg, fields: fields})
}
}
// DisableStep sets the flag to prevent the trace from adding steps
func (t *Trace) DisableStep() {
t.stepDisabled = true
}
// EnableStep re-enable the trace to add steps
func (t *Trace) EnableStep() {
t.stepDisabled = false
}
func (t *Trace) AddField(fields ...Field) {
for _, f := range fields {
t.fields = append(t.fields, f)
}
}
// Log dumps all steps in the Trace
func (t *Trace) Log() {
t.LogWithStepThreshold(0)
}
// LogIfLong dumps logs if the duration is longer than threshold
func (t *Trace) LogIfLong(threshold time.Duration) {
if time.Since(t.startTime) > threshold {
stepThreshold := threshold / time.Duration(len(t.steps)+1)
t.LogWithStepThreshold(stepThreshold)
}
}
// LogWithStepThreshold only dumps step whose duration is longer than step threshold
func (t *Trace) LogWithStepThreshold(threshold time.Duration) {
msg, fs := t.logInfo(threshold)
if t.lg != nil {
t.lg.Info(msg, fs...)
}
}
func (t *Trace) logInfo(threshold time.Duration) (string, []zap.Field) {
endTime := time.Now()
totalDuration := endTime.Sub(t.startTime)
traceNum := rand.Int31()
msg := fmt.Sprintf("trace[%d] %s", traceNum, t.operation)
var steps []string
lastStepTime := t.startTime
for _, step := range t.steps {
stepDuration := step.time.Sub(lastStepTime)
if stepDuration > threshold {
steps = append(steps, fmt.Sprintf("trace[%d] '%v' %s (duration: %v)",
traceNum, step.msg, writeFields(step.fields), stepDuration))
}
lastStepTime = step.time
}
fs := []zap.Field{zap.String("detail", writeFields(t.fields)),
zap.Duration("duration", totalDuration),
zap.Time("start", t.startTime),
zap.Time("end", endTime),
zap.Strings("steps", steps)}
return msg, fs
}

262
pkg/traceutil/trace_test.go Normal file
View File

@ -0,0 +1,262 @@
// Copyright 2019 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package traceutil
import (
"bytes"
"context"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"testing"
"time"
"go.uber.org/zap"
)
func TestGet(t *testing.T) {
traceForTest := &Trace{operation: "test"}
tests := []struct {
name string
inputCtx context.Context
outputTrace *Trace
}{
{
name: "When the context does not have trace",
inputCtx: context.TODO(),
outputTrace: TODO(),
},
{
name: "When the context has trace",
inputCtx: context.WithValue(context.Background(), TraceKey, traceForTest),
outputTrace: traceForTest,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
trace := Get(tt.inputCtx)
if trace == nil {
t.Errorf("Expected %v; Got nil", tt.outputTrace)
}
if trace.operation != tt.outputTrace.operation {
t.Errorf("Expected %v; Got %v", tt.outputTrace, trace)
}
})
}
}
func TestCreate(t *testing.T) {
var (
op = "Test"
steps = []string{"Step1, Step2"}
fields = []Field{
{"traceKey1", "traceValue1"},
{"traceKey2", "traceValue2"},
}
stepFields = []Field{
{"stepKey1", "stepValue2"},
{"stepKey2", "stepValue2"},
}
)
trace := New(op, nil, fields[0], fields[1])
if trace.operation != op {
t.Errorf("Expected %v; Got %v", op, trace.operation)
}
for i, f := range trace.fields {
if f.Key != fields[i].Key {
t.Errorf("Expected %v; Got %v", fields[i].Key, f.Key)
}
if f.Value != fields[i].Value {
t.Errorf("Expected %v; Got %v", fields[i].Value, f.Value)
}
}
for i, v := range steps {
trace.Step(v, stepFields[i])
}
for i, v := range trace.steps {
if steps[i] != v.msg {
t.Errorf("Expected %v; Got %v", steps[i], v.msg)
}
if stepFields[i].Key != v.fields[0].Key {
t.Errorf("Expected %v; Got %v", stepFields[i].Key, v.fields[0].Key)
}
if stepFields[i].Value != v.fields[0].Value {
t.Errorf("Expected %v; Got %v", stepFields[i].Value, v.fields[0].Value)
}
}
}
func TestLog(t *testing.T) {
tests := []struct {
name string
trace *Trace
fields []Field
expectedMsg []string
}{
{
name: "When dump all logs",
trace: &Trace{
operation: "Test",
startTime: time.Now().Add(-100 * time.Millisecond),
steps: []step{
{time: time.Now().Add(-80 * time.Millisecond), msg: "msg1"},
{time: time.Now().Add(-50 * time.Millisecond), msg: "msg2"},
},
},
expectedMsg: []string{
"msg1", "msg2",
},
},
{
name: "When trace has fields",
trace: &Trace{
operation: "Test",
startTime: time.Now().Add(-100 * time.Millisecond),
steps: []step{
{
time: time.Now().Add(-80 * time.Millisecond),
msg: "msg1",
fields: []Field{{"stepKey1", "stepValue1"}},
},
{
time: time.Now().Add(-50 * time.Millisecond),
msg: "msg2",
fields: []Field{{"stepKey2", "stepValue2"}},
},
},
},
fields: []Field{
{"traceKey1", "traceValue1"},
{"count", 1},
},
expectedMsg: []string{
"Test",
"msg1", "msg2",
"traceKey1:traceValue1", "count:1",
"stepKey1:stepValue1", "stepKey2:stepValue2",
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
logPath := filepath.Join(os.TempDir(), fmt.Sprintf("test-log-%d", time.Now().UnixNano()))
defer os.RemoveAll(logPath)
lcfg := zap.NewProductionConfig()
lcfg.OutputPaths = []string{logPath}
lcfg.ErrorOutputPaths = []string{logPath}
lg, _ := lcfg.Build()
for _, f := range tt.fields {
tt.trace.AddField(f)
}
tt.trace.lg = lg
tt.trace.Log()
data, err := ioutil.ReadFile(logPath)
if err != nil {
t.Fatal(err)
}
for _, msg := range tt.expectedMsg {
if !bytes.Contains(data, []byte(msg)) {
t.Errorf("Expected to find %v in log", msg)
}
}
})
}
}
func TestLogIfLong(t *testing.T) {
tests := []struct {
name string
threshold time.Duration
trace *Trace
expectedMsg []string
}{
{
name: "When the duration is smaller than threshold",
threshold: time.Duration(200 * time.Millisecond),
trace: &Trace{
operation: "Test",
startTime: time.Now().Add(-100 * time.Millisecond),
steps: []step{
{time: time.Now().Add(-50 * time.Millisecond), msg: "msg1"},
{time: time.Now(), msg: "msg2"},
},
},
expectedMsg: []string{},
},
{
name: "When the duration is longer than threshold",
threshold: time.Duration(50 * time.Millisecond),
trace: &Trace{
operation: "Test",
startTime: time.Now().Add(-100 * time.Millisecond),
steps: []step{
{time: time.Now().Add(-50 * time.Millisecond), msg: "msg1"},
{time: time.Now(), msg: "msg2"},
},
},
expectedMsg: []string{
"msg1", "msg2",
},
},
{
name: "When not all steps are longer than step threshold",
threshold: time.Duration(50 * time.Millisecond),
trace: &Trace{
operation: "Test",
startTime: time.Now().Add(-100 * time.Millisecond),
steps: []step{
{time: time.Now(), msg: "msg1"},
{time: time.Now(), msg: "msg2"},
},
},
expectedMsg: []string{
"msg1",
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
logPath := filepath.Join(os.TempDir(), fmt.Sprintf("test-log-%d", time.Now().UnixNano()))
defer os.RemoveAll(logPath)
lcfg := zap.NewProductionConfig()
lcfg.OutputPaths = []string{logPath}
lcfg.ErrorOutputPaths = []string{logPath}
lg, _ := lcfg.Build()
tt.trace.lg = lg
tt.trace.LogIfLong(tt.threshold)
data, err := ioutil.ReadFile(logPath)
if err != nil {
t.Fatal(err)
}
for _, msg := range tt.expectedMsg {
if !bytes.Contains(data, []byte(msg)) {
t.Errorf("Expected to find %v in log", msg)
}
}
})
}
}

Some files were not shown because too many files have changed in this diff Show More