Compare commits

...

137 Commits

Author SHA1 Message Date
fbaef05885 *: bump to v2.0.10 2015-04-22 15:21:38 -07:00
31a94d28e3 etcdctl: add extended as output format
extended wasn't documented in the help as one of the output formats, fix
this!

Conflicts:
	etcdctl/main.go
2015-04-22 15:11:06 -07:00
88660a303f snap: load should only return ErrNoSnapshot
If there is no available snapshot, load should return
ErrNoSnapshot. etcdserver might recover from that error
if it still have complete WAL files.
2015-04-22 15:09:38 -07:00
53c74dbd0b etcdserver: prevExist=true + condition is compareAndSwap
PrevExist indicates the key should exist. Condition compares with
an existing key. So PrevExist+condition = CompareAndSwap not Update.
2015-04-22 15:09:28 -07:00
8a8af60fad etcdctl: backup tool should use the new layout 2015-04-22 15:09:15 -07:00
7de19fefe8 etcdserver: fix minor bug in EtcdServer.send
it seems to nothing serious.
after deleted peers, the log may output:
"etcdserver: send message to unknown receiver %s"
2015-04-22 15:09:04 -07:00
7750f387b0 wal: better log msg 2015-04-22 15:08:50 -07:00
e33ab24442 wal: never leave a corrupted wal file
If the process dies during wal.cut(), it might leave a corrupted wal
file. This commit solves the problem by creating a temp wal file first,
then atomically rename it to a wal file when we are sure it is vaild.

Conflicts:
	wal/wal.go
2015-04-22 15:08:42 -07:00
fce2c1eeaf discovery: drop trailing . from srv target 2015-04-22 15:06:20 -07:00
6a3bb93305 discovery: add a test case for srv
During srv discovery, it should try to match local member with
resolved addr and return unresolved hostnames for the cluster.

Conflicts:
	discovery/srv_test.go
2015-04-22 15:06:03 -07:00
21455d2f3b *: stop using resolved tcp addr
We start to resolve host into tcp addrs since we generate
tcp based initial-cluster during srv discovery. However it
creates problems around tls and cluster verification. The
srv discovery only needs to use resolved the tcp addr to
find the local node. It does not have to resolve everything
and use the resolved addrs.

This fixes #2488 and #2226
2015-04-22 14:59:07 -07:00
51bb4220c5 Clarify that it is the proxy doing the shuffle. 2015-04-22 14:58:54 -07:00
d8c506923f proxy: shuffle endpoints
Shuffle endpoitns to avoid being "stuck" to a single cluster member.
2015-04-22 14:58:40 -07:00
5d778f85ca *: bump to v2.0.9+git 2015-04-07 15:18:50 -07:00
02697ca725 *: bump to v2.0.9 2015-04-07 15:18:29 -07:00
bd693c7069 etcdctl: refactor message in import command 2015-04-07 15:16:13 -07:00
52c90cdcfb etcdctl: import hidden keys 2015-04-07 14:49:40 -07:00
a88b22ac0a store: fix watcher removal 2015-04-07 14:46:10 -07:00
e93f8b8a12 *: bump to v2.0.8+git 2015-03-31 14:29:38 -07:00
86e616c6e9 *: bump to v2.0.8 2015-03-31 14:29:13 -07:00
5ae55a2c0d etcdctl: fix import typos 2015-03-31 13:48:18 -07:00
62ce6eef7b etcdctl: main routine of import command should wait for goroutine existing 2015-03-31 13:26:15 -07:00
7df4f5c804 build: do not build internal debugging tool
We are still playing around with the dump-log tool.
Stop building it publicly until we are happy with its
ux and functionality.
2015-03-31 13:26:05 -07:00
461c24e899 etcdct: adopt new client port by default
etcdserver uses both 4001 and 2379 for serving client requests by
default. etcdctl supports both ports by default.
2015-03-31 13:25:56 -07:00
6d90d03bf0 etcdctl: add migratesnap command 2015-03-31 13:25:39 -07:00
9995e80a2c Revert "etcdhttp: add internalVersion"
This reverts commit a77bf97c14.

Conflicts:
	version/version.go

Conflicts:
	version/version.go
2015-03-31 13:25:22 -07:00
229405f113 *: remove upgrading related stuff 2015-03-31 13:24:28 -07:00
b3f2a998d4 docs: add clarity about the 1000 events history
When talking about missing events on a particular key, the 1000 event history
limit can be understood as being per key, instead of etcd-wide events. Make it
clear that it is across all etcd keys.
2015-03-31 13:24:19 -07:00
8436e901e9 etcdserver: loose member validation for joining existing cluster 2015-03-31 13:24:07 -07:00
c03f5cb941 *: bump to v2.0.7+git 2015-03-24 23:14:38 -07:00
0cb90e4bea *: bump to v2.0.7 2015-03-24 23:07:57 -07:00
df83b1b34e wal: fix missing import 2015-03-24 23:00:04 -07:00
f2bef04009 wal: releastTo should work with large release index 2015-03-24 22:51:02 -07:00
02198336f6 version: not return err NotExist in Detect 2015-03-24 22:50:44 -07:00
0c9a226e0e etcdserver: print out extra files in data dir instead of erroring 2015-03-24 22:50:33 -07:00
5bd1d420bb etcdserver: add join-existing check 2015-03-24 22:49:41 -07:00
a1cb5cb768 etcdmain: print error when non-flag args remain 2015-03-24 22:49:31 -07:00
acba49fe81 *: bump to v2.0.6+git 2015-03-23 14:05:08 -07:00
e3c902228b *: bump to v2.0.6 2015-03-23 13:52:00 -07:00
52a2d143d2 migrate: remove starter code
It has been moved to github.com/coreos/etcd-starter.
2015-03-21 11:15:26 -07:00
f53d550a79 store: fixed clone error for store stats. 2015-03-21 11:14:06 -07:00
63b799b891 migrate: detect version 2.0.1
Without this code a second start will crash:

```
$ ./bin/etcd -name foobar --data-dir=foobar
2015/03/18 18:06:28 starter: detect etcd version 2.0.1 in foobar
2015/03/18 18:06:28 starter: unhandled etcd version in foobar
panic: starter: unhandled etcd version in foobar

goroutine 1 [running]:
log.Panicf(0x594770, 0x25, 0x208927c70, 0x1, 0x1)
	/usr/local/go/src/log/log.go:314 +0xd0
github.com/coreos/etcd/migrate/starter.checkInternalVersion(0x20889a480, 0x0, 0x0)
	/Users/philips/src/github.com/coreos/etcd/gopath/src/github.com/coreos/etcd/migrate/starter/starter.go:160 +0xf2f
github.com/coreos/etcd/migrate/starter.StartDesiredVersion(0x20884a010, 0x3, 0x3)
	/Users/philips/src/github.com/coreos/etcd/gopath/src/github.com/coreos/etcd/migrate/starter/starter.go:77 +0x2a9
main.main()
	/Users/philips/src/github.com/coreos/etcd/gopath/src/github.com/coreos/etcd/main.go:46 +0x25e

goroutine 9 [syscall]:
os/signal.loop()
	/usr/local/go/src/os/signal/signal_unix.go:21 +0x1f
created by os/signal.init·1
	/usr/local/go/src/os/signal/signal_unix.go:27 +0x35
```
2015-03-21 11:13:55 -07:00
697883fb8c etcdmain: let user provide a name w/o initial-cluster update
Currently this doesn't work if a user wants to try out a single machine
cluster but change the name for whatever reason. This is because the
name is always "default" and the

```
./bin/etcd -name 'baz'
```

This solves our problem on CoreOS where the default is `ETCD_NAME=%m`.
2015-03-21 11:13:42 -07:00
f794f87f26 Documentation: fixup grammar around the unsafe flags 2015-03-21 11:13:28 -07:00
0847986d4a etcdmain: identify data dir type 2015-03-21 11:12:18 -07:00
9ea80c6ac1 raft: fix godoc about starting a node 2015-03-21 11:11:21 -07:00
02fb648abf etcdmain: verify heartbeat and election flag 2015-03-21 11:11:09 -07:00
4c9e1686b1 pkg/flags: Add support for IPv6 addresses
Support IPv6 address for ETCD_ADDR and ETCD_PEER_ADDR

pkg/flags: Support IPv6 address for ETCD_ADDR and ETCD_PEER_ADDR

pkg/flags: tests for IPv6 addr and bind-addr flags

pkg/flags: IPAddressPort.Host: do not enclose IPv6 address in square brackets

pkg/flags: set default bind address to [::] instead of 0.0.0.0

pkg/flags: we don't need fmt any more

also, one minor fix: net.JoinHostPort takes string as a port value

pkg/flags: fix ipv6 tests

pkg/flags: test both IPv4 and IPv6 addresses in TestIPAddressPortString

etcdmain: test: use [::] instead of 0.0.0.0
2015-03-21 11:05:20 -07:00
0fb9362c5c *: bump to v2.0.5+git 2015-03-11 17:00:51 -07:00
9481945228 *: bump to v2.0.5 2015-03-11 11:33:43 -07:00
e13b09e4d9 wal: fix ReleaseLockTo
ReleaseLockTo should not release the lock on the WAL
segment that is right before the given index. When
restarting etcd, etcd needs to read from the WAL segment
that has a smaller index than the snapshot index.

The correct behavior is that ReleaseLockTo releases
the locks w is holding so that w only holds one lock
that has an index smaller than the given index.
2015-03-10 09:45:46 -07:00
78e0149f41 raft: do not reset vote if term is not changed
raft MUST keep the voting information for the same term. reset
should not reset vote if term is not changed.
2015-03-10 09:42:45 -07:00
4c86ab4868 pkg/transport: fix downgrade https to http bug in transport
If the TLS config is empty, etcd downgrades https to http without a warning.
This commit avoid the downgrade and stoping etcd from bootstrap if it cannot
listen on TLS.
2015-03-10 09:39:01 -07:00
59327bab47 pkg/transport: set the maxIdleConnsPerHost to -1
for transport that are using timeout connections, we set the
maxIdleConnsPerHost to -1. The default transport does not clear
the timeout for the connections it sets to be idle. So the connections
with timeout cannot be reused.
2015-03-10 09:38:39 -07:00
62ed1ebf03 Documentation: fix "Missing infra1="
Documentation: fix "Missing infra1="
2015-03-10 09:38:27 -07:00
cea3448438 *: bump to v2.0.4+git 2015-02-27 12:25:50 -08:00
1a2c6d3f2f *: bump to v2.0.4 2015-02-26 22:01:24 -08:00
ecf7c27697 Merge pull request #2374 from wellbehavedsoftware/fix-2373
etcdtcl: fix etcdctl cluster-health ignores SSL settings
2015-02-25 07:44:10 -08:00
05ecdbc617 etcdtcl: fix etcdctl cluster-health ignores SSL settings
etcdctl reconnects to the leader, but was not picking up ssl settings in this
case, which causes it to show unhealthy when this is not the case.

Fixes #2373
2015-02-25 13:19:07 +01:00
6648b7e302 Merge pull request #2363 from yichengq/329
migrate/starter: fix v2 data dir checking
2015-02-24 22:44:10 -08:00
194105e02c Merge pull request #2369 from jonsyu1/master
Documentation fixes for proxy
2015-02-24 21:39:20 -08:00
31bfffaa48 Documentation: standardize on url over URL
url and URL both appear in this doc. Choose url due to higher frequency
2015-02-24 16:26:27 -05:00
1fbaf9dbb7 Documentation: fix discovery flag for proxy docs
It seems that the -discovery flag used to be -discovery-url. Updated this to use
the currently documented and supported -discovery flag.
2015-02-24 16:25:18 -05:00
3fd9136740 migrate/starter: fix v2 data dir checking 2015-02-24 11:47:56 -08:00
a560c52815 Merge pull request #2354 from xiang90/wait_time
pkg/wait: add WaitTime
2015-02-23 14:29:39 -08:00
53d20a8a29 pkg/wait: add WaitTime
WaitTime waits on deadline instead of id.
2015-02-23 14:26:42 -08:00
4b72095bd3 Merge pull request #2350 from jonsyu1/master
Fixed sample command flags in proxy docs
2015-02-23 09:19:15 -08:00
28e150e50e Documentation: fix sample command flags for proxy
The docs mention the listen-client-urls flag, but the examples use
client-listen-urls, which is an invalid flag.
2015-02-23 11:15:42 -05:00
4d0472029a Merge pull request #2348 from yichengq/326
etcdserver: fix cluster fallback recovery
2015-02-21 12:16:08 -08:00
e54fdfd9cc Merge pull request #2349 from yichengq/327
rafthttp: fix panic on receiving empty ents
2015-02-20 15:15:43 -08:00
ca390560f9 rafthttp: fix panic on receiving empty ents
2.0 rc may send empty ents. Fix it for backward compatibility.
2015-02-20 15:07:27 -08:00
cff005777a etcdserver: fix cluster fallback recovery
Cluster and transport may recover to old states when new node joins
the cluster. Record cluster last modified index to avoid this.
2015-02-20 14:30:00 -08:00
d57e07dcde Merge pull request #2347 from bdarnell/fix-nyet-test
Fix test for existence of go-nyet.
2015-02-20 14:07:55 -05:00
79bc3f4774 Fix test for existence of go-nyet.
When the file is not found, `which` returns an empty string,
which passes the -f test. `command -v` is the most portable alternative
to `which` per
http://stackoverflow.com/questions/592620/check-if-a-program-exists-from-a-bash-script/677212#677212
2015-02-20 14:02:43 -05:00
d2b0dd2419 Merge pull request #2345 from bdarnell/normal-entry-formatter
Only use the EntryFormatter for normal entries.
2015-02-20 11:00:12 -08:00
b53dc0826e Only use the EntryFormatter for normal entries.
ConfChange entries also have a Data field but the application-supplied
formatter won't know what to do with them.
2015-02-20 13:51:14 -05:00
0ea2173a7e Merge pull request #2343 from xiang90/fix_kill
osutil: pid 1 should exit directly instead of trying to kill itself
2015-02-20 09:01:49 -08:00
7ae94f2bf0 osutil: pid 1 should exit directly instead of trying to kill itself 2015-02-19 20:27:50 -08:00
4228c703a7 Merge pull request #2341 from yichengq/326
migrate/starter: fix flag parsing
2015-02-19 11:02:07 -08:00
10629c40e1 migrate/starter: fix flag parsing 2015-02-18 23:47:52 -08:00
e2928cd97a Merge pull request #2242 from barakmich/acl_doc
docs: Add v2 ACL RFC
2015-02-18 23:31:26 -08:00
40365c4f8d docs: add Security RFC
docs: Add v2 ACL RFC

Add workflow, fix terminology, make the API JSON, and general cleanup

fixes from xiang90s comments

add permissions struct

update regarding glob matches

rename file
2015-02-18 14:34:00 -05:00
88994f9ec8 Merge pull request #2335 from xiang90/dump-tool
tool: dump tool supports index
2015-02-18 09:35:49 -08:00
d6f8a30f7c tool: dump tool supports index 2015-02-18 09:13:47 -08:00
7c65857283 Merge pull request #2327 from barakmich/remove_shadowing
*: remove shadowing of variables from etcd and add travis test
2015-02-17 17:46:41 -05:00
92dca0af0f *: remove shadowing of variables from etcd and add travis test
We've been bitten by this enough times that I wrote a tool so that
it never happens again.
2015-02-17 16:31:42 -05:00
0a5707420b Merge pull request #2326 from yichengq/325
migrate/functional: fix `go build` failure
2015-02-17 10:46:39 -08:00
90b06f874d migrate/functional: fix go build failure 2015-02-17 10:35:30 -08:00
66199afb25 Merge pull request #2322 from kelseyhightower/add-etcd-docker-guide
doc: add etcd docker guide
2015-02-16 12:43:17 -08:00
217a1f0730 doc: add etcd docker guide
Fixes #2253
2015-02-16 11:44:41 -08:00
def62071f0 Merge pull request #2320 from xiang90/fix_error
etcdserver: fix error message when valide the discovery cluster
2015-02-16 09:53:24 -08:00
beb44ef6ba etcdserver: fix error message when valide the discovery cluster 2015-02-16 09:53:01 -08:00
d1ed54b734 Merge pull request #2317 from zhangbaitong/master
docs:small fix
2015-02-16 08:28:37 -08:00
518eb9fa2f docs:small fix
Signed-off-by: zhangbaitong <zhangbaitong@163.com>
2015-02-16 17:54:24 +08:00
73e67628d9 Merge pull request #2313 from xiang90/cluster_mu
etcdserver: move the mutex before what it guards
2015-02-14 23:05:53 -08:00
04bd06d20b etcdserver: move the mutex before what it guards 2015-02-14 22:26:12 -08:00
29f05bb217 Merge pull request #2307 from xiang90/refactor_cluster
etcdserver: getOtherPeerURLs -> getRemotePeerURLs
2015-02-14 20:59:38 -08:00
c5ca1218f3 etcdserver: GetClusterFromPeers -> GetClusterFromRemotePeers 2015-02-13 19:05:29 -08:00
f7540912d6 etcdserver: getOtherPeerURLs -> getRemotePeerURLs 2015-02-13 18:56:45 -08:00
0fcbadc10b Merge pull request #2305 from xiang90/fix_win
osutil: fix win build
2015-02-13 16:39:07 -08:00
e44dc0f3fe osutil: fix win build 2015-02-13 16:33:39 -08:00
4d728cc8c4 *: bump to v2.0.3 2015-02-13 15:27:24 -08:00
f7998bb2db Merge pull request #2304 from xiang90/fix_discovery_validation
etcdserver: validate discovery cluster
2015-02-13 14:41:09 -08:00
cfa7ab6074 etcdserver: validate discovery cluster 2015-02-13 14:32:24 -08:00
b59390c9c3 Merge pull request #2293 from barakmich/etcd_underscore
migrate: stop deleting _etcd
2015-02-13 17:10:14 -05:00
fdebf2b109 fix parent references 2015-02-13 16:54:15 -05:00
e9f4be498d migrate: decrease memory usage (only duplicate machines) 2015-02-13 15:26:54 -05:00
6d9d7b4497 Merge pull request #2302 from xiang90/fix_travis
integration: wait for slow travis
2015-02-13 11:49:37 -08:00
163ea3f5c5 integration: wait for slow travis 2015-02-13 11:41:03 -08:00
ea1e54b2a1 Merge pull request #2291 from ArtfulCoder/master
Added go build flag '-installsuffix cgo' to create a static library for etcd and etcdctl
2015-02-13 11:23:03 -08:00
b31109cfd7 Merge pull request #2290 from xiang90/fix_transport
etcdserver: recover transport when recovering from a snapshot
2015-02-13 10:23:29 -08:00
7a909c3950 Merge pull request #2282 from matishsiao/patch-1
add etcd-console tool to tools list
2015-02-13 10:20:31 -08:00
c16cc3a6a3 etcdserver: recover transport when recovering from a snapshot 2015-02-13 10:16:28 -08:00
d7840b75c3 Merge pull request #2301 from xiang90/fix_snap
integration: fix test
2015-02-13 10:03:45 -08:00
aed2c82e44 integration: fix test 2015-02-13 10:02:42 -08:00
39ee85470f Merge pull request #2300 from xiang90/fix_snap
etcdserver: fix snapshot
2015-02-13 09:56:19 -08:00
fbc4c8efb5 etcdserver: fix snapshot 2015-02-13 09:54:25 -08:00
12999ba083 Merge pull request #2298 from barakmich/issue2295
etcdserver: Unmask the snapshotter. Fixes #2295
2015-02-13 09:38:58 -08:00
a0e3bc9cbd etcdserver: Unmask the snapshotter. Fixes #2295 2015-02-13 11:56:00 -05:00
b06e43b803 Merge pull request #2289 from fabxc/feature/graceful_shutdown
main: shutdown gracefully.
2015-02-13 07:34:07 -08:00
8bf795dc3c etcdmain/osutil: shutdown gracefully, interrupt handling
The functionality in pkg/osutil ensures that all interrupt handlers finish
and the process kills itself with the proper signal.
Test for interrupt handling added.
The server shutsdown gracefully by stopping on interrupt (Issue #2277.)
2015-02-13 10:28:53 +01:00
02c52f175f migrate: stop deleting etcd 2015-02-12 19:35:33 -05:00
daf1a913bb Merge pull request #2287 from Amit-PivotalLabs/master
rafthttp/transport.go: Fix nil pointer dereference in RemovePeer
2015-02-12 14:49:12 -08:00
317e57a8a8 rafthttp: Panic informatively when removing unknown peer ID 2015-02-12 14:43:44 -08:00
5c0d3889f8 Added go build flag '-installsuffix cgo' to create a static library. This is needed when go 1.4 is used to build. 2015-02-12 14:08:02 -08:00
a71184424a *: bump to v2.0.2+git 2015-02-12 11:41:48 -08:00
409daceb73 *: bump to v2.0.2 2015-02-12 11:14:50 -08:00
c6cc276ef0 Merge pull request #2286 from barakmich/fix_migrations
etcdserver: Canonicalize migrations
2015-02-12 12:53:33 -05:00
cd50f0e058 etcdserver: Create MemberDir() and base {Snap,WAL}Dir() thereon. Audit DataDir. 2015-02-12 12:45:19 -05:00
fade9b6065 etcdserver: Refactor 2.0.1 directory rename into a proper migration
fix all instances

fix detection test
2015-02-12 11:53:19 -05:00
590205b8c0 Merge pull request #2284 from xiang90/cleanup
Cleanup
2015-02-11 16:21:10 -08:00
163f0f09f6 etcdserver: cleanup cluster_util 2015-02-11 16:20:38 -08:00
20497f1f85 etcdserver: move remote cluster retrive to cluster_util.go 2015-02-11 14:03:14 -08:00
4a0887ef7a Merge pull request #2283 from xiang90/etcd-dump
etcd dump
2015-02-11 11:24:05 -08:00
161b1d2e2e tools: etcd-dump-logs tool support dump from a given snapshot file 2015-02-11 10:50:04 -08:00
71bed48916 snap: add Read function 2015-02-11 10:21:19 -08:00
fd90ec6c26 add etcd-console tool to tools list
i add etcd-console tool to tools list for reference
2015-02-11 10:43:21 +08:00
92 changed files with 2185 additions and 2218 deletions

View File

@ -6,6 +6,7 @@ go:
install:
- go get golang.org/x/tools/cmd/cover
- go get golang.org/x/tools/cmd/vet
- go get github.com/barakmich/go-nyet
script:
- INTEGRATION=y ./test

View File

@ -1,120 +0,0 @@
## Allow-legacy mode
Allow-legacy is a special mode in etcd that contains logic to enable a running etcd cluster to smoothly transition between major versions of etcd. For example, the internal API versions between etcd 0.4 (internal v1) and etcd 2.0 (internal v2) aren't compatible and the cluster needs to be updated all at once to make the switch. To minimize downtime, allow-legacy coordinates with all of the members of the cluster to shutdown, migration of data and restart onto the new version.
Allow-legacy helps users upgrade v0.4 etcd clusters easily, and allows your etcd cluster to have a minimal amount of downtime -- less than 1 minute for clusters storing less than 50 MB.
It supports upgrading from internal v1 to internal v2 now.
### Setup
This mode is enabled if `ETCD_ALLOW_LEGACY_MODE` is set to true, or etcd is running in CoreOS system.
It treats `ETCD_BINARY_DIR` as the directory for etcd binaries, which is organized in this way:
```
ETCD_BINARY_DIR
|
-- 1
|
-- 2
```
`1` is etcd with internal v1 protocol. You should use etcd v0.4.7 here. `2` is etcd with internal v2 protocol, which is etcd v2.x.
The default value for `ETCD_BINARY_DIR` is `/usr/libexec/etcd/internal_versions/`.
### Upgrading a Cluster
When starting etcd with a v1 data directory and v1 flags, etcd executes the v0.4.7 binary and runs exactly the same as before. To start the migration, follow the steps below:
![Migration Steps](etcd-migration-steps.png)
#### 1. Check the Cluster Health
Before upgrading, you should check the health of the cluster to double check that everything working perfectly. Check the health by running:
```
$ etcdctl cluster-health
cluster is healthy
member 6e3bd23ae5f1eae0 is healthy
member 924e2e83e93f2560 is healthy
member a8266ecf031671f3 is healthy
```
If the cluster and all members are healthy, you can start the upgrading process. If not, check the unhealthy machines and repair them using [admin guide](./admin_guide.md).
#### 2. Trigger the Upgrade
When you're ready, use the `etcdctl upgrade` command to start the upgrade the etcd cluster to 2.0:
```
# Defaults work on a CoreOS machine running etcd
$ etcdctl upgrade
```
```
# Advanced example specifying a peer url
$ etcdctl upgrade --old-version=1 --new-version=2 --peer-url=$PEER_URL
```
`PEER_URL` can be any accessible peer url of the cluster.
Once triggered, all peer-mode members will print out:
```
detected next internal version 2, exit after 10 seconds.
```
#### Parallel Coordinated Upgrade
As part of the upgrade, etcd does internal coordination within the cluster for a brief period and then exits. Clusters storing 50 MB should be unavailable for less than 1 minute.
#### Restart etcd Processes
After the etcd processes exit, they need to be restarted. You can do this manually or configure your unit system to do this automatically. On CoreOS, etcd is already configured to start automatically with systemd.
When restarted, the data directory of each member is upgraded, and afterwards etcd v2.0 will be running and servicing requests. The upgrade is now complete!
Standby-mode members are a special case &mdash; they will be upgraded into proxy mode (a new feature in etcd 2.0) upon restarting. When the upgrade is triggered, any standbys will exit with the message:
```
Detect the cluster has been upgraded to internal API v2. Exit now.
```
Once restarted, standbys run in v2.0 proxy mode, which proxy user requests to the etcd cluster.
#### 3. Check the Cluster Health
After the upgrade process, you can run the health check again to verify the upgrade. If the cluster is unhealthy or there is an unhealthy member, please refer to start [failure recovery](#failure-recovery).
### Downgrade
If the upgrading fails due to disk/network issues, you still can restart the upgrading process manually. However, once you upgrade etcd to internal v2 protocol, you CANNOT downgrade it back to internal v1 protocol. If you want to downgrade etcd in the future, please backup your v1 data dir beforehand.
### Upgrade Process on CoreOS
When running on a CoreOS system, allow-legacy mode is enabled by default and an automatic update will set up everything needed to execute the upgrade. The `etcd.service` on CoreOS is already configured to restart automatically. All you need to do is run `etcdctl upgrade` when you're ready, as described
### Internal Details
etcd v0.4.7 registers versions of available etcd binaries in its local machine into the key space at bootstrap stage. When the upgrade command is executed, etcdctl checks whether each member has internal-version-v2 etcd binary around. If that is true, each member is asked to record the fact that it needs to be upgraded the next time it reboots, and exits after 10 seconds.
Once restarted, etcd v2.0 sees the upgrade flag recorded. It upgrades the data directory, and executes etcd v2.0.
### Failure Recovery
If `etcdctl cluster-health` says that the cluster is unhealthy, the upgrade process fails, which may happen if the network is broken, or the disk cannot work.
The way to recover it is to manually upgrade the whole cluster to v2.0:
- Log into machines that ran v0.4 peer-mode etcd
- Stop all etcd services
- Remove the `member` directory under the etcd data-dir
- Start etcd service using [2.0 flags](configuration.md). An example for this is:
```
$ etcd --data-dir=$DATA_DIR --listen-peer-urls http://$LISTEN_PEER_ADDR \
--advertise-client-urls http://$ADVERTISE_CLIENT_ADDR \
--listen-client-urls http://$LISTEN_CLIENT_ADDR
```
- When this is done, v2.0 etcd cluster should work now.

View File

@ -287,7 +287,7 @@ curl 'http://127.0.0.1:2379/v2/keys/foo?wait=true&waitIndex=7'
The watch command returns immediately with the same response as previously.
**Note**: etcd only keeps the responses of the most recent 1000 events.
**Note**: etcd only keeps the responses of the most recent 1000 events across all etcd keys.
It is recommended to send the response to another thread to process immediately
instead of blocking the watch while processing the result.

View File

@ -30,7 +30,7 @@ ETCD_INITIAL_CLUSTER_STATE=new
```
```
-initial-cluster infra0=http://10.0.1.10:2380,http://10.0.1.11:2380,infra2=http://10.0.1.12:2380 \
-initial-cluster infra0=http://10.0.1.10:2380,infra1=http://10.0.1.11:2380,infra2=http://10.0.1.12:2380 \
-initial-cluster-state new
```

View File

@ -135,7 +135,9 @@ The security flags help to [build a secure etcd cluster][security].
### Unsafe Flags
Be CAUTIOUS to use unsafe flags because it will break the guarantee given by consensus protocol. For example, it may panic if other members in the cluster are still alive. Follow the instructions when using these falgs.
Please be CAUTIOUS when using unsafe flags because it will break the guarantees given by the consensus protocol.
For example, it may panic if other members in the cluster are still alive.
Follow the instructions when using these flags.
##### -force-new-cluster
+ Force to create a new one-member cluster. It commits configuration changes in force to remove all existing members in the cluster and add itself. It needs to be set to [restore a backup][restore].

View File

@ -0,0 +1,88 @@
# Running etcd under Docker
The following guide will show you how to run etcd under Docker using the [static bootstrap process](clustering.md#static).
## Running etcd in standalone mode
In order to expose the etcd API to clients outside of the Docker host you'll need use the host IP address when configuring etcd.
```
export HostIP="192.168.12.50"
```
The following `docker run` command will expose the etcd client API over ports 4001 and 2379, and expose the peer port over 2380.
```
docker run -d -p 4001:4001 -p 2380:2380 -p 2379:2379 --name etcd quay.io/coreos/etcd:v2.0.3 \
-name etcd0 \
-advertise-client-urls http://${HostIP}:2379,http://${HostIP}:4001 \
-listen-client-urls http://0.0.0.0:2379,http://0.0.0.0:4001 \
-initial-advertise-peer-urls http://${HostIP}:2380 \
-listen-peer-urls http://0.0.0.0:2380 \
-initial-cluster-token etcd-cluster-1 \
-initial-cluster etcd0=http://${HostIP}:2380 \
-initial-cluster-state new
```
Configure etcd clients to use the Docker host IP and one of the listening ports from above.
```
etcdctl -C http://192.168.12.50:2379 member list
```
```
etcdctl -C http://192.168.12.50:4001 member list
```
## Running a 3 node etcd cluster
Using Docker to setup a multi-node cluster is very similar to the standalone mode configuration.
The main difference being the value used for the `-initial-cluster` flag, which must contain the peer urls for each etcd member in the cluster.
### etcd0
```
docker run -d -p 4001:4001 -p 2380:2380 -p 2379:2379 --name etcd quay.io/coreos/etcd:v2.0.3 \
-name etcd0 \
-advertise-client-urls http://192.168.12.50:2379,http://192.168.12.50:4001 \
-listen-client-urls http://0.0.0.0:2379,http://0.0.0.0:4001 \
-initial-advertise-peer-urls http://192.168.12.50:2380 \
-listen-peer-urls http://0.0.0.0:2380 \
-initial-cluster-token etcd-cluster-1 \
-initial-cluster etcd0=http://192.168.12.50:2380,etcd1=http://192.168.12.51:2380,etcd2=http://192.168.12.52:2380 \
-initial-cluster-state new
```
### etcd1
```
docker run -d -p 4001:4001 -p 2380:2380 -p 2379:2379 --name etcd quay.io/coreos/etcd:v2.0.3 \
-name etcd1 \
-advertise-client-urls http://192.168.12.51:2379,http://192.168.12.51:4001 \
-listen-client-urls http://0.0.0.0:2379,http://0.0.0.0:4001 \
-initial-advertise-peer-urls http://192.168.12.51:2380 \
-listen-peer-urls http://0.0.0.0:2380 \
-initial-cluster-token etcd-cluster-1 \
-initial-cluster etcd0=http://192.168.12.50:2380,etcd1=http://192.168.12.51:2380,etcd2=http://192.168.12.52:2380 \
-initial-cluster-state new
```
### etcd2
```
docker run -d -p 4001:4001 -p 2380:2380 -p 2379:2379 --name etcd quay.io/coreos/etcd:v2.0.3 \
-name etcd2 \
-advertise-client-urls http://192.168.12.52:2379,http://192.168.12.52:4001 \
-listen-client-urls http://0.0.0.0:2379,http://0.0.0.0:4001 \
-initial-advertise-peer-urls http://192.168.12.52:2380 \
-listen-peer-urls http://0.0.0.0:2380 \
-initial-cluster-token etcd-cluster-1 \
-initial-cluster etcd0=http://192.168.12.50:2380,etcd1=http://192.168.12.51:2380,etcd2=http://192.168.12.52:2380 \
-initial-cluster-state new
```
Once the cluster has been bootstrapped etcd clients can be configured with a list of etcd members:
```
etcdctl -C http://192.168.12.50:2379,http://192.168.12.51:2379,http://192.168.12.52:2379 member list
```

Binary file not shown.

Before

Width:  |  Height:  |  Size: 7.9 KiB

View File

@ -8,6 +8,7 @@
- [etcd-fs](https://github.com/xetorthio/etcd-fs) - FUSE filesystem for etcd
- [etcd-browser](https://github.com/henszey/etcd-browser) - A web-based key/value editor for etcd using AngularJS
- [etcd-lock](https://github.com/datawisesystems/etcd-lock) - A lock implementation for etcd
- [etcd-console](https://github.com/matishsiao/etcd-console) - A web-base key/value editor for etcd using PHP
**Go libraries**

View File

@ -99,7 +99,7 @@ curl http://10.0.0.10:2379/v2/members/272e204152 -XDELETE
## Change the peer urls of a member
Change the peer urls of a given mamber. The member ID must be a hex-encoded uint64. Returns 204 with empty content when successful. Returns a string describing the failure condition when unsuccessful.
Change the peer urls of a given member. The member ID must be a hex-encoded uint64. Returns 204 with empty content when successful. Returns a string describing the failure condition when unsuccessful.
If the POST body is malformed an HTTP 400 will be returned. If the member does not exist in the cluster an HTTP 404 will be returned. If any of the given peerURLs exists in the cluster an HTTP 409 will be returned. If the cluster fails to process the request within timeout an HTTP 500 will be returned, though the request may be processed later.

View File

@ -4,29 +4,31 @@ etcd can now run as a transparent proxy. Running etcd as a proxy allows for easi
etcd currently supports two proxy modes: `readwrite` and `readonly`. The default mode is `readwrite`, which forwards both read and write requests to the etcd cluster. A `readonly` etcd proxy only forwards read requests to the etcd cluster, and returns `HTTP 501` to all write requests.
The proxy will shuffle the list of cluster members periodically to avoid sending all connections to a single member.
### Using an etcd proxy
To start etcd in proxy mode, you need to provide three flags: `proxy`, `listen-client-urls`, and `initial-cluster` (or `discovery-url`).
To start etcd in proxy mode, you need to provide three flags: `proxy`, `listen-client-urls`, and `initial-cluster` (or `discovery`).
To start a readwrite proxy, set `-proxy on`; To start a readonly proxy, set `-proxy readonly`.
The proxy will be listening on `listen-client-urls` and forward requests to the etcd cluster discovered from in `initial-cluster` or `discovery url`.
The proxy will be listening on `listen-client-urls` and forward requests to the etcd cluster discovered from in `initial-cluster` or `discovery` url.
#### Start an etcd proxy with a static configuration
To start a proxy that will connect to a statically defined etcd cluster, specify the `initial-cluster` flag:
```
etcd -proxy on -client-listen-urls 127.0.0.1:8080 -initial-cluster infra0=http://10.0.1.10:2380,infra1=http://10.0.1.11:2380,infra2=http://10.0.1.12:2380
etcd -proxy on -listen-client-urls 127.0.0.1:8080 -initial-cluster infra0=http://10.0.1.10:2380,infra1=http://10.0.1.11:2380,infra2=http://10.0.1.12:2380
```
#### Start an etcd proxy with the discovery service
If you bootstrap an etcd cluster using the [discovery service][discovery-service], you can also start the proxy with the same `discovery-url`.
If you bootstrap an etcd cluster using the [discovery service][discovery-service], you can also start the proxy with the same `discovery`.
To start a proxy using the discovery service, specify the `discovery-url` flag. The proxy will wait until the etcd cluster defined at the `discovery-url` finishes bootstrapping, and then start to forward the requests.
To start a proxy using the discovery service, specify the `discovery` flag. The proxy will wait until the etcd cluster defined at the `discovery` url finishes bootstrapping, and then start to forward the requests.
```
etcd -proxy on -client-listen-urls 127.0.0.1:8080 -discovery https://discovery.etcd.io/3e86b59982e49066c5d813af1c2e2579cbf573de
etcd -proxy on -listen-client-urls 127.0.0.1:8080 -discovery https://discovery.etcd.io/3e86b59982e49066c5d813af1c2e2579cbf573de
```
#### Fallback to proxy mode with discovery service
If you bootstrap a etcd cluster using [discovery service][discovery-service] with more than the expected number of etcd members, the extra etcd processes will fall back to being `readwrite` proxies by default. They will forward the requests to the cluster as described above. For example, if you create a discovery url with `size=5`, and start ten etcd processes using that same discovery URL, the result will be a cluster with five etcd members and five proxies. Note that this behaviour can be disabled with the `proxy-fallback` flag.
If you bootstrap a etcd cluster using [discovery service][discovery-service] with more than the expected number of etcd members, the extra etcd processes will fall back to being `readwrite` proxies by default. They will forward the requests to the cluster as described above. For example, if you create a discovery url with `size=5`, and start ten etcd processes using that same discovery url, the result will be a cluster with five etcd members and five proxies. Note that this behaviour can be disabled with the `proxy-fallback` flag.
[discovery-service]: https://github.com/coreos/etcd/blob/master/Documentation/clustering.md#discovery

View File

@ -0,0 +1,470 @@
# v2 Auth and Security
## etcd Resources
There are three types of resources in etcd
1. user resources: users and roles in the user store
2. key-value resources: key-value pairs in the key-value store
3. settings resources: security settings, auth settings, and dynamic etcd cluster settings (election/heartbeat)
### User Resources
#### Users
A user is an identity to be authenticated. Each user can have multiple roles. The user has a capability on the resource if one of the roles has that capability.
The special static `root` user has a ROOT role. (Caps for visual aid throughout)
#### Role
Each role has exact one associated Permission List. An permission list exists for each permission on key-value resources. A role with `manage` permission of a key-value resource can grant/revoke capability of that key-value to other roles.
The special static ROOT role has a full permissions on all key-value resources, the permission to manage user resources and settings resources. Only the ROOT role has the permission to manage user resources and modify settings resources.
#### Permissions
There are two types of permissions, `read` and `write`. All management stems from the ROOT user.
A Permission List is a list of allowed patterns for that particular permission (read or write). Only ALLOW prefixes (incidentally, this is what Amazon S3 does). DENY becomes more complicated and is TBD.
### Key-Value Resources
A key-value resource is a key-value pairs in the store. Given a list of matching patterns, permission for any given key in a request is granted if any of the patterns in the list match.
The glob match rules are as follows:
* `*` and `\` are special characters, representing "greedy match" and "escape" respectively.
* As a corrolary, `\*` and `\\` are the corresponding literal matches.
* All other bytes match exactly their bytes, starting always from the *first byte*. (For regex fans, `re.match` in Python)
* Examples:
* `/foo` matches only the single key/directory of `/foo`
* `/foo*` matches the prefix `/foo`, and all subdirectories/keys
* `/foo/*/bar` matches the keys bar in any (recursive) subdirectory of `/foo`.
### Settings Resources
Specific settings for the cluster as a whole. This can include adding and removing cluster members, enabling or disabling security, replacing certificates, and any other dynamic configuration by the administrator.
## v2 Auth
### Basic Auth
We only support [Basic Auth](http://en.wikipedia.org/wiki/Basic_access_authentication) for the first version. Client needs to attach the basic auth to the HTTP Authorization Header.
### Authorization field for operations
Added to requests to /v2/keys, /v2/security
Add code 403 Forbidden to the set of responses from the v2 API
Authorization: Basic {encoded string}
### Future Work
Other types of auth can be considered for the future (eg, signed certs, public keys) but the `Authorization:` header allows for other such types
### Things out of Scope for etcd Permissions
* Pluggable AUTH backends like LDAP (other Authorization tokens generated by LDAP et al may be a possiblity)
* Very fine-grained access controls (eg: users modifying keys outside work hours)
## API endpoints
An Error JSON corresponds to:
{
"name": "ErrErrorName",
"description" : "The longer helpful description of the error."
}
#### Users
The User JSON object is formed as follows:
```
{
"user": "userName"
"password": "password"
"roles": [
"role1",
"role2"
],
"grant": [],
"revoke": [],
"lastModified": "2006-01-02Z04:05:07"
}
```
Password is only passed when necessary. Last Modified is set by the server and ignored in all client posts.
**Get a list of users**
GET/HEAD /v2/security/user
Sent Headers:
Authorization: Basic <BasicAuthString>
Possible Status Codes:
200 OK
403 Forbidden
200 Headers:
ETag: "<hash of list of users>"
Content-type: application/json
200 Body:
{
"users": ["alice", "bob", "eve"]
}
**Get User Details**
GET/HEAD /v2/security/users/alice
Sent Headers:
Authorization: Basic <BasicAuthString>
Possible Status Codes:
200 OK
403 Forbidden
404 Not Found
200 Headers:
ETag: "users/alice:<lastModified>"
Content-type: application/json
200 Body:
{
"user" : "alice"
"roles" : ["fleet", "etcd"]
"lastModified": "2015-02-05Z18:00:00"
}
**Create A User**
A user can be created with initial roles, if filled in. However, no roles are required; only the username and password fields
PUT /v2/security/users/charlie
Sent Headers:
Authorization: Basic <BasicAuthString>
Put Body:
JSON struct, above, matching the appropriate name and with starting roles.
Possible Status Codes:
200 OK
403 Forbidden
409 Conflict (if exists)
200 Headers:
ETag: "users/charlie:<tzNow>"
200 Body: (empty)
**Remove A User**
DELETE /v2/security/users/charlie
Sent Headers:
Authorization: Basic <BasicAuthString>
Possible Status Codes:
200 OK
403 Forbidden
404 Not Found
200 Headers:
200 Body: (empty)
**Grant a Role(s) to a User**
PUT /v2/security/users/charlie/grant
Sent Headers:
Authorization: Basic <BasicAuthString>
Put Body:
{ "grantRoles" : ["fleet", "etcd"], (extra JSON data for checking OK) }
Possible Status Codes:
200 OK
403 Forbidden
404 Not Found
409 Conflict
200 Headers:
ETag: "users/charlie:<tzNow>"
200 Body:
JSON user struct, updated. "roles" now contains the grants, and "grantRoles" is empty. If there is an error in the set of roles to be added, for example, a non-existent role, then 409 is returned, with an error JSON stating why.
**Revoke a Role(s) from a User**
PUT /v2/security/users/charlie/revoke
Sent Headers:
Authorization: Basic <BasicAuthString>
Put Body:
{ "revokeRoles" : ["fleet"], (extra JSON data for checking OK) }
Possible Status Codes:
200 OK
403 Forbidden
404 Not Found
409 Conflict
200 Headers:
ETag: "users/charlie:<tzNow>"
200 Body:
JSON user struct, updated. "roles" now doesn't contain the roles, and "revokeRoles" is empty. If there is an error in the set of roles to be removed, for example, a non-existent role, then 409 is returned, with an error JSON stating why.
**Change password**
PUT /v2/security/users/charlie/password
Sent Headers:
Authorization: Basic <BasicAuthString>
Put Body:
{"user": "charlie", "password": "newCharliePassword"}
Possible Status Codes:
200 OK
403 Forbidden
404 Not Found
200 Headers:
ETag: "users/charlie:<tzNow>"
200 Body:
JSON user struct, updated
#### Roles
A full role structure may look like this. A Permission List structure is used for the "permissions", "grant", and "revoke" keys.
```
{
"role" : "fleet",
"permissions" : {
"kv" {
"read" : [ "/fleet/" ],
"write": [ "/fleet/" ],
}
}
"grant" : {"kv": {...}},
"revoke": {"kv": {...}},
"members" : ["alice", "bob"],
"lastModified": "2015-02-05Z18:00:00"
}
```
**Get a list of Roles**
GET/HEAD /v2/security/roles
Sent Headers:
Authorization: Basic <BasicAuthString>
Possible Status Codes:
200 OK
403 Forbidden
200 Headers:
ETag: "<hash of list of roles>"
Content-type: application/json
200 Body:
{
"roles": ["fleet", "etcd", "quay"]
}
**Get Role Details**
GET/HEAD /v2/security/roles/fleet
Sent Headers:
Authorization: Basic <BasicAuthString>
Possible Status Codes:
200 OK
403 Forbidden
404 Not Found
200 Headers:
ETag: "roles/fleet:<lastModified>"
Content-type: application/json
200 Body:
{
"role" : "fleet",
"read": {
"prefixesAllowed": ["/fleet/"],
},
"write": {
"prefixesAllowed": ["/fleet/"],
},
"members" : ["alice", "bob"] // Reverse map optional?
"lastModified": "2015-02-05Z18:00:00"
}
**Create A Role**
PUT /v2/security/roles/rocket
Sent Headers:
Authorization: Basic <BasicAuthString>
Put Body:
Initial desired JSON state, complete with prefixes and
Possible Status Codes:
201 Created
403 Forbidden
404 Not Found
409 Conflict (if exists)
200 Headers:
ETag: "roles/rocket:<tzNow>"
200 Body:
JSON state of the role
**Remove A Role**
DELETE /v2/security/roles/rocket
Sent Headers:
Authorization: Basic <BasicAuthString>
Possible Status Codes:
200 OK
403 Forbidden
404 Not Found
200 Headers:
200 Body: (empty)
**Update a Roles Permission List for {read,write}ing**
PUT /v2/security/roles/rocket/update
Sent Headers:
Authorization: Basic <BasicAuthString>
Put Body:
{
"role" : "rocket",
"grant": {
"kv": {
"read" : [ "/rocket/"]
}
},
"revoke": {
"kv": {
"read" : [ "/fleet/"]
}
}
}
Possible Status Codes:
200 OK
403 Forbidden
404 Not Found
200 Headers:
ETag: "roles/rocket:<tzNow>"
200 Body:
JSON state of the role, with change containing empty lists and the deltas applied appropriately.
#### TBD Management modification
## Example Workflow
Let's walk through an example to show two tenants (applications, in our case) using etcd permissions.
### Enable security
//TODO(barakmich): Maybe this is dynamic? I don't like the idea of rebooting when we don't have to.
#### Default ROOT
etcd always has a ROOT when started with security enabled. The default username is `root`, and the password is `root`.
// TODO(barakmich): if the enabling is dynamic, perhaps that'd be a good time to set a password? Thus obviating the next section.
### Change root's password
```
PUT /v2/security/users/root/password
Headers:
Authorization: Basic <root:root>
Put Body:
{"user" : "root", "password": "betterRootPW!"}
```
//TODO(barakmich): How do you recover the root password? *This* may require a flag and a restart. `--disable-permissions`
### Create Roles for the Applications
Create the rocket role fully specified:
```
PUT /v2/security/roles/rocket
Headers:
Authorization: Basic <root:betterRootPW!>
Body:
{
"role" : "rocket",
"permissions" : {
"kv": {
"read": [
"/rocket/"
],
"write": [
"/rocket/"
]
}
}
}
```
But let's make fleet just a basic role for now:
```
PUT /v2/security/roles/fleet
Headers:
Authorization: Basic <root:betterRootPW!>
Body:
{
"role" : "fleet",
}
```
### Optional: Add some permissions to the roles
Well, we finally figured out where we want fleet to live. Let's fix it.
(Note that we avoided this in the rocket case. So this step is optional.)
```
PUT /v2/security/roles/fleet/update
Headers:
Authorization: Basic <root:betterRootPW!>
Put Body:
{
"role" : "fleet",
"grant" : {
"kv" : {
"read": [
"/fleet/"
]
}
}
}
```
### Create Users
Same as before, let's use rocket all at once and fleet separately
```
PUT /v2/security/users/rocketuser
Headers:
Authorization: Basic <root:betterRootPW!>
Body:
{"user" : "rocketuser", "password" : "rocketpw", "roles" : ["rocket"]}
```
```
PUT /v2/security/users/fleetuser
Headers:
Authorization: Basic <root:betterRootPW!>
Body:
{"user" : "fleetuser", "password" : "fleetpw"}
```
### Optional: Grant Roles to Users
Likewise, let's explicitly grant fleetuser access.
```
PUT /v2/security/users/fleetuser/grant
Headers:
Authorization: Basic <root:betterRootPW!>
Body:
{"user": "fleetuser", "grant": ["fleet"]}
```
#### Start to use fleetuser and rocketuser
For example:
```
PUT /v2/keys/rocket/RocketData
Headers:
Authorization: Basic <rocketuser:rocketpw>
```
Reads and writes outside the prefixes granted will fail with a 403 Forbidden.

6
build
View File

@ -12,7 +12,5 @@ ln -s ${PWD} $GOPATH/src/${REPO_PATH}
eval $(go env)
# Static compilation is useful when etcd is run in a container
CGO_ENABLED=0 go build -a -ldflags '-s' -o bin/etcd ${REPO_PATH}
CGO_ENABLED=0 go build -a -ldflags '-s' -o bin/etcdctl ${REPO_PATH}/etcdctl
go build -o bin/etcd-migrate ${REPO_PATH}/tools/etcd-migrate
go build -o bin/etcd-dump-logs ${REPO_PATH}/tools/etcd-dump-logs
CGO_ENABLED=0 go build -a -installsuffix cgo -ldflags '-s' -o bin/etcd ${REPO_PATH}
CGO_ENABLED=0 go build -a -installsuffix cgo -ldflags '-s' -o bin/etcdctl ${REPO_PATH}/etcdctl

View File

@ -89,7 +89,7 @@ func TestV2KeysURLHelper(t *testing.T) {
func TestGetAction(t *testing.T) {
ep := url.URL{Scheme: "http", Host: "example.com/v2/keys"}
wantURL := &url.URL{
baseWantURL := &url.URL{
Scheme: "http",
Host: "example.com",
Path: "/v2/keys/foo/bar",
@ -117,7 +117,7 @@ func TestGetAction(t *testing.T) {
}
got := *f.HTTPRequest(ep)
wantURL := wantURL
wantURL := baseWantURL
wantURL.RawQuery = tt.wantQuery
err := assertResponse(got, wantURL, wantHeader, nil)
@ -129,7 +129,7 @@ func TestGetAction(t *testing.T) {
func TestWaitAction(t *testing.T) {
ep := url.URL{Scheme: "http", Host: "example.com/v2/keys"}
wantURL := &url.URL{
baseWantURL := &url.URL{
Scheme: "http",
Host: "example.com",
Path: "/v2/keys/foo/bar",
@ -166,7 +166,7 @@ func TestWaitAction(t *testing.T) {
}
got := *f.HTTPRequest(ep)
wantURL := wantURL
wantURL := baseWantURL
wantURL.RawQuery = tt.wantQuery
err := assertResponse(got, wantURL, wantHeader, nil)

View File

@ -193,14 +193,14 @@ func TestCheckCluster(t *testing.T) {
})
}
c := &clientWithResp{rs: rs}
d := discovery{cluster: cluster, id: 1, c: c}
dBase := discovery{cluster: cluster, id: 1, c: c}
cRetry := &clientWithRetry{failTimes: 3}
cRetry.rs = rs
fc := clockwork.NewFakeClock()
dRetry := discovery{cluster: cluster, id: 1, c: cRetry, clock: fc}
for _, d := range []discovery{d, dRetry} {
for _, d := range []discovery{dBase, dRetry} {
go func() {
for i := uint(1); i <= maxRetryInTest; i++ {
fc.BlockUntil(1)
@ -263,7 +263,7 @@ func TestWaitNodes(t *testing.T) {
for i, tt := range tests {
// Basic case
c := &clientWithResp{nil, &watcherWithResp{tt.rs}}
d := &discovery{cluster: "1000", c: c}
dBase := &discovery{cluster: "1000", c: c}
// Retry case
retryScanResp := make([]*client.Response, 0)
@ -291,7 +291,7 @@ func TestWaitNodes(t *testing.T) {
clock: fc,
}
for _, d := range []*discovery{d, dRetry} {
for _, d := range []*discovery{dBase, dRetry} {
go func() {
for i := uint(1); i <= maxRetryInTest; i++ {
fc.BlockUntil(1)

View File

@ -25,7 +25,8 @@ import (
var (
// indirection for testing
lookupSRV = net.LookupSRV
lookupSRV = net.LookupSRV
resolveTCPAddr = net.ResolveTCPAddr
)
// TODO(barakmich): Currently ignores priority and weight (as they don't make as much sense for a bootstrap)
@ -38,7 +39,7 @@ func SRVGetCluster(name, dns string, defaultToken string, apurls types.URLs) (st
// First, resolve the apurls
for _, url := range apurls {
tcpAddr, err := net.ResolveTCPAddr("tcp", url.Host)
tcpAddr, err := resolveTCPAddr("tcp", url.Host)
if err != nil {
log.Printf("discovery: Couldn't resolve host %s during SRV discovery", url.Host)
return "", "", err
@ -52,8 +53,9 @@ func SRVGetCluster(name, dns string, defaultToken string, apurls types.URLs) (st
return err
}
for _, srv := range addrs {
host := net.JoinHostPort(srv.Target, fmt.Sprintf("%d", srv.Port))
tcpAddr, err := net.ResolveTCPAddr("tcp", host)
target := strings.TrimSuffix(srv.Target, ".")
host := net.JoinHostPort(target, fmt.Sprintf("%d", srv.Port))
tcpAddr, err := resolveTCPAddr("tcp", host)
if err != nil {
log.Printf("discovery: Couldn't resolve host %s during SRV discovery", host)
continue
@ -68,8 +70,8 @@ func SRVGetCluster(name, dns string, defaultToken string, apurls types.URLs) (st
n = fmt.Sprintf("%d", tempName)
tempName += 1
}
stringParts = append(stringParts, fmt.Sprintf("%s=%s%s", n, prefix, tcpAddr.String()))
log.Printf("discovery: Got bootstrap from DNS for %s at host %s to %s%s", service, host, prefix, tcpAddr.String())
stringParts = append(stringParts, fmt.Sprintf("%s=%s%s", n, prefix, host))
log.Printf("discovery: Got bootstrap from DNS for %s at %s%s", service, prefix, host)
}
return nil
}

View File

@ -23,19 +23,26 @@ import (
)
func TestSRVGetCluster(t *testing.T) {
defer func() { lookupSRV = net.LookupSRV }()
defer func() {
lookupSRV = net.LookupSRV
resolveTCPAddr = net.ResolveTCPAddr
}()
name := "dnsClusterTest"
tests := []struct {
withSSL []*net.SRV
withoutSSL []*net.SRV
urls []string
expected string
dns map[string]string
expected string
}{
{
[]*net.SRV{},
[]*net.SRV{},
nil,
nil,
"",
},
{
@ -46,6 +53,8 @@ func TestSRVGetCluster(t *testing.T) {
},
[]*net.SRV{},
nil,
nil,
"0=https://10.0.0.1:2480,1=https://10.0.0.2:2480,2=https://10.0.0.3:2480",
},
{
@ -58,6 +67,7 @@ func TestSRVGetCluster(t *testing.T) {
&net.SRV{Target: "10.0.0.1", Port: 7001},
},
nil,
nil,
"0=https://10.0.0.1:2480,1=https://10.0.0.2:2480,2=https://10.0.0.3:2480,3=http://10.0.0.1:7001",
},
{
@ -70,8 +80,22 @@ func TestSRVGetCluster(t *testing.T) {
&net.SRV{Target: "10.0.0.1", Port: 7001},
},
[]string{"https://10.0.0.1:2480"},
nil,
"dnsClusterTest=https://10.0.0.1:2480,0=https://10.0.0.2:2480,1=https://10.0.0.3:2480,2=http://10.0.0.1:7001",
},
// matching local member with resolved addr and return unresolved hostnames
{
[]*net.SRV{
&net.SRV{Target: "1.example.com.", Port: 2480},
&net.SRV{Target: "2.example.com.", Port: 2480},
&net.SRV{Target: "3.example.com.", Port: 2480},
},
nil,
[]string{"https://10.0.0.1:2480"},
map[string]string{"1.example.com:2480": "10.0.0.1:2480", "2.example.com:2480": "10.0.0.2:2480", "3.example.com:2480": "10.0.0.3:2480"},
"dnsClusterTest=https://1.example.com:2480,0=https://2.example.com:2480,1=https://3.example.com:2480",
},
}
for i, tt := range tests {
@ -84,6 +108,12 @@ func TestSRVGetCluster(t *testing.T) {
}
return "", nil, errors.New("Unkown service in mock")
}
resolveTCPAddr = func(network, addr string) (*net.TCPAddr, error) {
if tt.dns == nil || tt.dns[addr] == "" {
return net.ResolveTCPAddr(network, addr)
}
return net.ResolveTCPAddr(network, tt.dns[addr])
}
urls := testutil.MustNewURLs(t, tt.urls)
str, token, err := SRVGetCluster(name, "example.com", "token", urls)
if err != nil {

View File

@ -44,10 +44,10 @@ func NewBackupCommand() cli.Command {
// handleBackup handles a request that intends to do a backup.
func handleBackup(c *cli.Context) {
srcSnap := path.Join(c.String("data-dir"), "snap")
destSnap := path.Join(c.String("backup-dir"), "snap")
srcWAL := path.Join(c.String("data-dir"), "wal")
destWAL := path.Join(c.String("backup-dir"), "wal")
srcSnap := path.Join(c.String("data-dir"), "member", "snap")
destSnap := path.Join(c.String("backup-dir"), "member", "snap")
srcWAL := path.Join(c.String("data-dir"), "member", "wal")
destWAL := path.Join(c.String("backup-dir"), "member", "wal")
if err := os.MkdirAll(destSnap, 0700); err != nil {
log.Fatalf("failed creating backup snapshot dir %v: %v", destSnap, err)

View File

@ -54,6 +54,7 @@ func handleClusterHealth(c *cli.Context) {
// is raft stable and making progress?
client = etcd.NewClient([]string{ep})
client.SetTransport(tr)
resp, err := client.Get("/", false, false)
if err != nil {
fmt.Println("cluster is unhealthy")

View File

@ -0,0 +1,128 @@
package command
import (
"errors"
"fmt"
"io/ioutil"
"log"
"os"
"strings"
"sync"
"github.com/coreos/etcd/Godeps/_workspace/src/github.com/codegangsta/cli"
"github.com/coreos/etcd/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd"
"github.com/coreos/etcd/store"
)
type set struct {
key string
value string
ttl int64
}
func NewImportSnapCommand() cli.Command {
return cli.Command{
Name: "import",
Usage: "import a snapshot to a cluster",
Flags: []cli.Flag{
cli.StringFlag{Name: "snap", Value: "", Usage: "Path to the vaild etcd 0.4.x snapshot."},
cli.StringSliceFlag{Name: "hidden", Value: new(cli.StringSlice), Usage: "Hidden key spaces to import from snapshot"},
cli.IntFlag{Name: "c", Value: 10, Usage: "Number of concurrent clients to import the data"},
},
Action: handleImportSnap,
}
}
func handleImportSnap(c *cli.Context) {
d, err := ioutil.ReadFile(c.String("snap"))
if err != nil {
if c.String("snap") == "" {
fmt.Printf("no snapshot file provided (use --snap)\n")
} else {
fmt.Printf("cannot read snapshot file %s\n", c.String("snap"))
}
os.Exit(1)
}
st := store.New()
err = st.Recovery(d)
if err != nil {
fmt.Printf("cannot recover the snapshot file: %v\n", err)
os.Exit(1)
}
endpoints, err := getEndpoints(c)
if err != nil {
handleError(ErrorFromEtcd, err)
}
tr, err := getTransport(c)
if err != nil {
handleError(ErrorFromEtcd, err)
}
wg := &sync.WaitGroup{}
setc := make(chan set)
concurrent := c.Int("c")
fmt.Printf("starting to import snapshot %s with %d clients\n", c.String("snap"), concurrent)
for i := 0; i < concurrent; i++ {
client := etcd.NewClient(endpoints)
client.SetTransport(tr)
if c.GlobalBool("debug") {
go dumpCURL(client)
}
if ok := client.SyncCluster(); !ok {
handleError(FailedToConnectToHost, errors.New("cannot sync with the cluster using endpoints "+strings.Join(endpoints, ", ")))
}
wg.Add(1)
go runSet(client, setc, wg)
}
all, err := st.Get("/", true, true)
if err != nil {
handleError(ErrorFromEtcd, err)
}
n := copyKeys(all.Node, setc)
hiddens := c.StringSlice("hidden")
for _, h := range hiddens {
allh, err := st.Get(h, true, true)
if err != nil {
handleError(ErrorFromEtcd, err)
}
n += copyKeys(allh.Node, setc)
}
close(setc)
wg.Wait()
fmt.Printf("finished importing %d keys\n", n)
}
func copyKeys(n *store.NodeExtern, setc chan set) int {
num := 0
if !n.Dir {
setc <- set{n.Key, *n.Value, n.TTL}
return 1
}
log.Println("entering dir:", n.Key)
for _, nn := range n.Nodes {
sub := copyKeys(nn, setc)
num += sub
}
return num
}
func runSet(c *etcd.Client, setc chan set, wg *sync.WaitGroup) {
for s := range setc {
log.Println("copying key:", s.key)
if s.ttl != 0 && s.ttl < 300 {
log.Printf("extending key %s's ttl to 300 seconds", s.key)
s.ttl = 5 * 60
}
_, err := c.Set(s.key, s.value, uint64(s.ttl))
if err != nil {
log.Fatalf("failed to copy key: %v\n", err)
}
}
wg.Done()
}

View File

@ -134,10 +134,10 @@ func actionMemberAdd(c *cli.Context) {
}
conf := []string{}
for _, m := range members {
for _, u := range m.PeerURLs {
n := m.Name
if m.ID == newID {
for _, memb := range members {
for _, u := range memb.PeerURLs {
n := memb.Name
if memb.ID == newID {
n = newName
}
conf = append(conf, fmt.Sprintf("%s=%s", n, u))
@ -160,8 +160,9 @@ func actionMemberRemove(c *cli.Context) {
mAPI := mustNewMembersAPI(c)
// Get the list of members.
listctx, cancel := context.WithTimeout(context.Background(), client.DefaultRequestTimeout)
listctx, listCancel := context.WithTimeout(context.Background(), client.DefaultRequestTimeout)
members, err := mAPI.List(listctx)
listCancel()
if err != nil {
fmt.Fprintln(os.Stderr, "Error while verifying ID against known members:", err.Error())
os.Exit(1)
@ -184,9 +185,9 @@ func actionMemberRemove(c *cli.Context) {
}
// Actually attempt to remove the member.
ctx, cancel := context.WithTimeout(context.Background(), client.DefaultRequestTimeout)
ctx, removeCancel := context.WithTimeout(context.Background(), client.DefaultRequestTimeout)
err = mAPI.Remove(ctx, removalID)
cancel()
removeCancel()
if err != nil {
fmt.Fprintf(os.Stderr, "Recieved an error trying to remove member %s: %s", removalID, err.Error())
os.Exit(1)

View File

@ -1,78 +0,0 @@
/*
Copyright 2015 CoreOS, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package command
import (
"fmt"
"log"
"net/http"
"os"
"github.com/coreos/etcd/Godeps/_workspace/src/github.com/codegangsta/cli"
"github.com/coreos/etcd/pkg/transport"
)
func UpgradeCommand() cli.Command {
return cli.Command{
Name: "upgrade",
Usage: "upgrade an old version etcd cluster to a new version",
Flags: []cli.Flag{
cli.StringFlag{Name: "old-version", Value: "1", Usage: "Old internal version"},
cli.StringFlag{Name: "new-version", Value: "2", Usage: "New internal version"},
cli.StringFlag{Name: "peer-url", Value: "http://localhost:7001", Usage: "An etcd peer url string"},
cli.StringFlag{Name: "peer-cert-file", Value: "", Usage: "identify HTTPS peer using this SSL certificate file"},
cli.StringFlag{Name: "peer-key-file", Value: "", Usage: "identify HTTPS peer using this SSL key file"},
cli.StringFlag{Name: "peer-ca-file", Value: "", Usage: "verify certificates of HTTPS-enabled peers using this CA bundle"},
},
Action: handleUpgrade,
}
}
func handleUpgrade(c *cli.Context) {
if c.String("old-version") != "1" {
fmt.Printf("Do not support upgrade from version %s\n", c.String("old-version"))
os.Exit(1)
}
if c.String("new-version") != "2" {
fmt.Printf("Do not support upgrade to version %s\n", c.String("new-version"))
os.Exit(1)
}
tls := transport.TLSInfo{
CAFile: c.String("peer-ca-file"),
CertFile: c.String("peer-cert-file"),
KeyFile: c.String("peer-key-file"),
}
t, err := transport.NewTransport(tls)
if err != nil {
log.Fatal(err)
}
client := http.Client{Transport: t}
resp, err := client.Get(c.String("peer-url") + "/v2/admin/next-internal-version")
if err != nil {
fmt.Printf("Failed to send upgrade request to %s: %v\n", c.String("peer-url"), err)
return
}
if resp.StatusCode == http.StatusOK {
fmt.Println("Cluster will start upgrading from internal version 1 to 2 in 10 seconds.")
return
}
if resp.StatusCode == http.StatusNotFound {
fmt.Println("Cluster cannot upgrade to 2: version is not 0.4.7")
return
}
fmt.Printf("Faild to send upgrade request to %s: bad status code %d\n", c.String("cluster-url"), resp.StatusCode)
}

View File

@ -65,7 +65,7 @@ func getPeersFlagValue(c *cli.Context) []string {
// If we still don't have peers, use a default
if peerstr == "" {
peerstr = "127.0.0.1:4001"
peerstr = "127.0.0.1:4001,127.0.0.1:2379"
}
return strings.Split(peerstr, ",")

View File

@ -31,7 +31,7 @@ func main() {
app.Flags = []cli.Flag{
cli.BoolFlag{Name: "debug", Usage: "output cURL commands which can be used to reproduce the request"},
cli.BoolFlag{Name: "no-sync", Usage: "don't synchronize cluster information before sending request"},
cli.StringFlag{Name: "output, o", Value: "simple", Usage: "output response in the given format (`simple` or `json`)"},
cli.StringFlag{Name: "output, o", Value: "simple", Usage: "output response in the given format (`simple`, `extended` or `json`)"},
cli.StringFlag{Name: "peers, C", Value: "", Usage: "a comma-delimited list of machine addresses in the cluster (default: \"127.0.0.1:4001\")"},
cli.StringFlag{Name: "cert-file", Value: "", Usage: "identify HTTPS client using this SSL certificate file"},
cli.StringFlag{Name: "key-file", Value: "", Usage: "identify HTTPS client using this SSL key file"},
@ -53,7 +53,7 @@ func main() {
command.NewWatchCommand(),
command.NewExecWatchCommand(),
command.NewMemberCommand(),
command.UpgradeCommand(),
command.NewImportSnapCommand(),
}
app.Run(os.Args)

View File

@ -15,7 +15,6 @@
package etcdmain
import (
"errors"
"flag"
"fmt"
"log"
@ -26,7 +25,6 @@ import (
"github.com/coreos/etcd/etcdserver"
"github.com/coreos/etcd/pkg/cors"
"github.com/coreos/etcd/pkg/flags"
"github.com/coreos/etcd/pkg/netutil"
"github.com/coreos/etcd/pkg/transport"
"github.com/coreos/etcd/version"
)
@ -41,6 +39,8 @@ const (
clusterStateFlagNew = "new"
clusterStateFlagExisting = "existing"
defaultName = "default"
)
var (
@ -137,7 +137,7 @@ func NewConfig() *config {
fs.Var(flags.NewURLsValue("http://localhost:2379,http://localhost:4001"), "listen-client-urls", "List of URLs to listen on for client traffic")
fs.UintVar(&cfg.maxSnapFiles, "max-snapshots", defaultMaxSnapshots, "Maximum number of snapshot files to retain (0 is unlimited)")
fs.UintVar(&cfg.maxWalFiles, "max-wals", defaultMaxWALs, "Maximum number of wal files to retain (0 is unlimited)")
fs.StringVar(&cfg.name, "name", "default", "Unique human-readable name for this node")
fs.StringVar(&cfg.name, "name", defaultName, "Unique human-readable name for this node")
fs.Uint64Var(&cfg.snapCount, "snapshot-count", etcdserver.DefaultSnapCount, "Number of committed transactions to trigger a snapshot")
fs.UintVar(&cfg.TickMs, "heartbeat-interval", 100, "Time (in milliseconds) of a heartbeat interval.")
fs.UintVar(&cfg.ElectionMs, "election-timeout", 1000, "Time (in milliseconds) for an election to timeout.")
@ -153,7 +153,7 @@ func NewConfig() *config {
}
fs.StringVar(&cfg.dproxy, "discovery-proxy", "", "HTTP proxy to use for traffic to discovery service")
fs.StringVar(&cfg.dnsCluster, "discovery-srv", "", "DNS domain used to bootstrap initial cluster")
fs.StringVar(&cfg.initialCluster, "initial-cluster", "default=http://localhost:2380,default=http://localhost:7001", "Initial cluster configuration for bootstrapping")
fs.StringVar(&cfg.initialCluster, "initial-cluster", initialClusterFromName(defaultName), "Initial cluster configuration for bootstrapping")
fs.StringVar(&cfg.initialClusterToken, "initial-cluster-token", "etcd-cluster", "Initial cluster token for the etcd cluster during bootstrap")
fs.Var(cfg.clusterState, "initial-cluster-state", "Initial cluster configuration for bootstrapping")
if err := cfg.clusterState.Set(clusterStateFlagNew); err != nil {
@ -206,6 +206,9 @@ func (cfg *config) Parse(arguments []string) error {
default:
os.Exit(2)
}
if len(cfg.FlagSet.Args()) != 0 {
return fmt.Errorf("'%s' is not a valid flag", cfg.FlagSet.Arg(0))
}
if cfg.printVersion {
fmt.Println("etcd version", version.Version)
@ -251,15 +254,19 @@ func (cfg *config) Parse(arguments []string) error {
return err
}
if err := cfg.resolveUrls(); err != nil {
return errors.New("cannot resolve DNS hostnames.")
if 5*cfg.TickMs > cfg.ElectionMs {
return fmt.Errorf("-election-timeout[%vms] should be at least as 5 times as -heartbeat-interval[%vms]", cfg.ElectionMs, cfg.TickMs)
}
return nil
}
func (cfg *config) resolveUrls() error {
return netutil.ResolveTCPAddrs(cfg.lpurls, cfg.apurls, cfg.lcurls, cfg.acurls)
func initialClusterFromName(name string) string {
n := name
if name == "" {
n = defaultName
}
return fmt.Sprintf("%s=http://localhost:2380,%s=http://localhost:7001", n, n)
}
func (cfg config) isNewCluster() bool { return cfg.clusterState.String() == clusterStateFlagNew }

View File

@ -157,9 +157,9 @@ func TestConfigParsingV1Flags(t *testing.T) {
"-addr=127.0.0.1:4001",
}
wcfg := NewConfig()
wcfg.lpurls = []url.URL{{Scheme: "http", Host: "0.0.0.0:7001"}}
wcfg.lpurls = []url.URL{{Scheme: "http", Host: "[::]:7001"}}
wcfg.apurls = []url.URL{{Scheme: "http", Host: "127.0.0.1:7001"}}
wcfg.lcurls = []url.URL{{Scheme: "http", Host: "0.0.0.0:4001"}}
wcfg.lcurls = []url.URL{{Scheme: "http", Host: "[::]:4001"}}
wcfg.acurls = []url.URL{{Scheme: "http", Host: "127.0.0.1:4001"}}
cfg := NewConfig()

View File

@ -32,28 +32,51 @@ import (
"github.com/coreos/etcd/etcdserver/etcdhttp"
"github.com/coreos/etcd/pkg/cors"
"github.com/coreos/etcd/pkg/fileutil"
"github.com/coreos/etcd/pkg/osutil"
"github.com/coreos/etcd/pkg/transport"
"github.com/coreos/etcd/pkg/types"
"github.com/coreos/etcd/proxy"
"github.com/coreos/etcd/rafthttp"
)
type dirType string
const (
// the owner can make/remove files inside the directory
privateDirMode = 0700
)
var (
dirMember = dirType("member")
dirProxy = dirType("proxy")
dirEmpty = dirType("empty")
)
func Main() {
cfg := NewConfig()
err := cfg.Parse(os.Args[1:])
if err != nil {
log.Printf("etcd: error verifying flags, %v", err)
log.Printf("etcd: error verifying flags, %v. See 'etcd -help'.", err)
os.Exit(2)
}
var stopped <-chan struct{}
shouldProxy := cfg.isProxy()
if cfg.name != defaultName && cfg.initialCluster == initialClusterFromName(defaultName) {
cfg.initialCluster = initialClusterFromName(cfg.name)
}
if cfg.dir == "" {
cfg.dir = fmt.Sprintf("%v.etcd", cfg.name)
log.Printf("etcd: no data-dir provided, using default data-dir ./%s", cfg.dir)
}
which := identifyDataDirOrDie(cfg.dir)
if which != dirEmpty {
log.Printf("etcd: already initialized as %v before, starting as etcd %v...", which, which)
}
shouldProxy := cfg.isProxy() || which == dirProxy
if !shouldProxy {
stopped, err = startEtcd(cfg)
if err == discovery.ErrFullCluster && cfg.shouldFallbackToProxy() {
@ -74,7 +97,10 @@ func Main() {
}
}
osutil.HandleInterrupts()
<-stopped
osutil.Exit(0)
}
// startEtcd launches the etcd server and HTTP handlers for client/server communication.
@ -84,18 +110,6 @@ func startEtcd(cfg *config) (<-chan struct{}, error) {
return nil, fmt.Errorf("error setting up initial cluster: %v", err)
}
if cfg.dir == "" {
cfg.dir = fmt.Sprintf("%v.etcd", cfg.name)
log.Printf("no data-dir provided, using default data-dir ./%s", cfg.dir)
}
if err := makeMemberDir(cfg.dir); err != nil {
return nil, fmt.Errorf("cannot use /member sub-directory: %v", err)
}
membdir := path.Join(cfg.dir, "member")
if err := fileutil.IsDirWriteable(membdir); err != nil {
return nil, fmt.Errorf("cannot write to data directory: %v", err)
}
pt, err := transport.NewTimeoutTransport(cfg.peerTLSInfo, rafthttp.ConnReadTimeout, rafthttp.ConnWriteTimeout)
if err != nil {
return nil, err
@ -149,7 +163,7 @@ func startEtcd(cfg *config) (<-chan struct{}, error) {
Name: cfg.name,
ClientURLs: cfg.acurls,
PeerURLs: cfg.apurls,
DataDir: membdir,
DataDir: cfg.dir,
SnapCount: cfg.snapCount,
MaxSnapFiles: cfg.maxSnapFiles,
MaxWALFiles: cfg.maxWalFiles,
@ -168,6 +182,7 @@ func startEtcd(cfg *config) (<-chan struct{}, error) {
return nil, err
}
s.Start()
osutil.RegisterInterruptHandler(s.Stop)
if cfg.corsInfo.String() != "" {
log.Printf("etcd: cors = %s", cfg.corsInfo)
@ -176,7 +191,7 @@ func startEtcd(cfg *config) (<-chan struct{}, error) {
Handler: etcdhttp.NewClientHandler(s),
Info: cfg.corsInfo,
}
ph := etcdhttp.NewPeerHandler(s.Cluster, s.RaftHandler())
ph := etcdhttp.NewPeerHandler(s.Cluster, etcdserver.RaftTimer(s), s.RaftHandler())
// Start the peer server in a goroutine
for _, l := range plns {
go func(l net.Listener) {
@ -221,10 +236,6 @@ func startProxy(cfg *config) error {
return err
}
if cfg.dir == "" {
cfg.dir = fmt.Sprintf("%v.etcd", cfg.name)
log.Printf("no proxy data-dir provided, using default proxy data-dir ./%s", cfg.dir)
}
cfg.dir = path.Join(cfg.dir, "proxy")
err = os.MkdirAll(cfg.dir, 0700)
if err != nil {
@ -252,7 +263,7 @@ func startProxy(cfg *config) error {
}
uf := func() []string {
gcls, err := etcdserver.GetClusterFromPeers(peerURLs, tr)
gcls, err := etcdserver.GetClusterFromRemotePeers(peerURLs, tr)
// TODO: remove the 2nd check when we fix GetClusterFromPeers
// GetClusterFromPeers should not return nil error with an invaild empty cluster
if err != nil {
@ -336,42 +347,6 @@ func setupCluster(cfg *config) (*etcdserver.Cluster, error) {
return cls, err
}
func makeMemberDir(dir string) error {
membdir := path.Join(dir, "member")
_, err := os.Stat(membdir)
switch {
case err == nil:
return nil
case !os.IsNotExist(err):
return err
}
if err := os.MkdirAll(membdir, 0700); err != nil {
return err
}
v1Files := types.NewUnsafeSet("conf", "log", "snapshot")
v2Files := types.NewUnsafeSet("wal", "snap")
names, err := fileutil.ReadDir(dir)
if err != nil {
return err
}
for _, name := range names {
switch {
case v1Files.Contains(name):
// Link it to the subdir and keep the v1 file at the original
// location, so v0.4 etcd can still bootstrap if the upgrade
// failed.
if err := os.Symlink(path.Join(dir, name), path.Join(membdir, name)); err != nil {
return err
}
case v2Files.Contains(name):
if err := os.Rename(path.Join(dir, name), path.Join(membdir, name)); err != nil {
return err
}
}
}
return nil
}
func genClusterString(name string, urls types.URLs) string {
addrs := make([]string, 0)
for _, u := range urls {
@ -379,3 +354,38 @@ func genClusterString(name string, urls types.URLs) string {
}
return strings.Join(addrs, ",")
}
// identifyDataDirOrDie returns the type of the data dir.
// Dies if the datadir is invalid.
func identifyDataDirOrDie(dir string) dirType {
names, err := fileutil.ReadDir(dir)
if err != nil {
if os.IsNotExist(err) {
return dirEmpty
}
log.Fatalf("etcd: error listing data dir: %s", dir)
}
var m, p bool
for _, name := range names {
switch dirType(name) {
case dirMember:
m = true
case dirProxy:
p = true
default:
log.Printf("etcd: found invalid file/dir %s under data dir %s (Ignore this if you are upgrading etcd)", name, dir)
}
}
if m && p {
log.Fatal("etcd: invalid datadir. Both member and proxy directories exist.")
}
if m {
return dirMember
}
if p {
return dirProxy
}
return dirEmpty
}

View File

@ -91,8 +91,8 @@ security flags:
unsafe flags:
Please be CAUTIOUS to use unsafe flags because it will break the guarantee given
by consensus protocol.
Please be CAUTIOUS when using unsafe flags because it will break the guarantees
given by the consensus protocol.
--force-new-cluster 'false'
force to create a new one-member cluster.

View File

@ -56,14 +56,21 @@ type ClusterInfo interface {
// Cluster is a list of Members that belong to the same raft cluster
type Cluster struct {
id types.ID
token string
members map[types.ID]*Member
id types.ID
token string
store store.Store
// index is the raft index that cluster is updated at bootstrap
// from remote cluster info.
// It may have a higher value than local raft index, because it
// displays a further view of the cluster.
// TODO: upgrade it as last modified index
index uint64
sync.Mutex // guards members and removed map
members map[types.ID]*Member
// removed contains the ids of removed members in the cluster.
// removed id cannot be reused.
removed map[types.ID]bool
store store.Store
sync.Mutex
}
// NewClusterFromString returns a Cluster instantiated from the given cluster token
@ -229,6 +236,8 @@ func (c *Cluster) SetID(id types.ID) { c.id = id }
func (c *Cluster) SetStore(st store.Store) { c.store = st }
func (c *Cluster) UpdateIndex(index uint64) { c.index = index }
func (c *Cluster) Recover() {
c.members, c.removed = membersFromStore(c.store)
}
@ -346,6 +355,20 @@ func (c *Cluster) UpdateRaftAttributes(id types.ID, raftAttr RaftAttributes) {
c.members[id].RaftAttributes = raftAttr
}
// Validate ensures that there is no identical urls in the cluster peer list
func (c *Cluster) Validate() error {
urlMap := make(map[string]bool)
for _, m := range c.Members() {
for _, url := range m.PeerURLs {
if urlMap[url] {
return fmt.Errorf("duplicate url %v in cluster config", url)
}
urlMap[url] = true
}
}
return nil
}
func membersFromStore(st store.Store) (map[types.ID]*Member, map[types.ID]bool) {
members := make(map[types.ID]*Member)
removed := make(map[types.ID]bool)

123
etcdserver/cluster_util.go Normal file
View File

@ -0,0 +1,123 @@
// Copyright 2015 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package etcdserver
import (
"encoding/json"
"fmt"
"io/ioutil"
"log"
"net/http"
"sort"
"strconv"
"time"
"github.com/coreos/etcd/pkg/types"
)
// isMemberBootstrapped tries to check if the given member has been bootstrapped
// in the given cluster.
func isMemberBootstrapped(cl *Cluster, member string, tr *http.Transport) bool {
rcl, err := getClusterFromRemotePeers(getRemotePeerURLs(cl, member), false, tr)
if err != nil {
return false
}
id := cl.MemberByName(member).ID
m := rcl.Member(id)
if m == nil {
return false
}
if len(m.ClientURLs) > 0 {
return true
}
return false
}
// GetClusterFromRemotePeers takes a set of URLs representing etcd peers, and
// attempts to construct a Cluster by accessing the members endpoint on one of
// these URLs. The first URL to provide a response is used. If no URLs provide
// a response, or a Cluster cannot be successfully created from a received
// response, an error is returned.
func GetClusterFromRemotePeers(urls []string, tr *http.Transport) (*Cluster, error) {
return getClusterFromRemotePeers(urls, true, tr)
}
// If logerr is true, it prints out more error messages.
func getClusterFromRemotePeers(urls []string, logerr bool, tr *http.Transport) (*Cluster, error) {
cc := &http.Client{
Transport: tr,
Timeout: time.Second,
}
for _, u := range urls {
resp, err := cc.Get(u + "/members")
if err != nil {
if logerr {
log.Printf("etcdserver: could not get cluster response from %s: %v", u, err)
}
continue
}
b, err := ioutil.ReadAll(resp.Body)
if err != nil {
if logerr {
log.Printf("etcdserver: could not read the body of cluster response: %v", err)
}
continue
}
var membs []*Member
if err := json.Unmarshal(b, &membs); err != nil {
if logerr {
log.Printf("etcdserver: could not unmarshal cluster response: %v", err)
}
continue
}
id, err := types.IDFromString(resp.Header.Get("X-Etcd-Cluster-ID"))
if err != nil {
if logerr {
log.Printf("etcdserver: could not parse the cluster ID from cluster res: %v", err)
}
continue
}
var index uint64
// The header at or before v2.0.3 doesn't have this field. For backward
// compatibility, it checks whether the field exists.
if indexStr := resp.Header.Get("X-Raft-Index"); indexStr != "" {
index, err = strconv.ParseUint(indexStr, 10, 64)
if err != nil {
if logerr {
log.Printf("etcdserver: could not parse raft index: %v", err)
}
continue
}
}
cl := NewClusterFromMembers("", id, membs)
cl.UpdateIndex(index)
return cl, nil
}
return nil, fmt.Errorf("etcdserver: could not retrieve cluster information from the given urls")
}
// getRemotePeerURLs returns peer urls of remote members in the cluster. The
// returned list is sorted in ascending lexicographical order.
func getRemotePeerURLs(cl ClusterInfo, local string) []string {
us := make([]string, 0)
for _, m := range cl.Members() {
if m.Name == local {
continue
}
us = append(us, m.PeerURLs...)
}
sort.Strings(us)
return us
}

View File

@ -46,9 +46,43 @@ type ServerConfig struct {
ElectionTicks int
}
// VerifyBootstrapConfig sanity-checks the initial config and returns an error
// for things that should never happen.
func (c *ServerConfig) VerifyBootstrapConfig() error {
// VerifyBootstrapConfig sanity-checks the initial config for bootstrap case
// and returns an error for things that should never happen.
func (c *ServerConfig) VerifyBootstrap() error {
if err := c.verifyLocalMember(true); err != nil {
return err
}
if err := c.Cluster.Validate(); err != nil {
return err
}
if c.Cluster.String() == "" && c.DiscoveryURL == "" {
return fmt.Errorf("initial cluster unset and no discovery URL found")
}
return nil
}
// VerifyJoinExisting sanity-checks the initial config for join existing cluster
// case and returns an error for things that should never happen.
func (c *ServerConfig) VerifyJoinExisting() error {
// no need for strict checking since the member have announced its
// peer urls to the cluster before starting and do not have to set
// it in the configuration again.
if err := c.verifyLocalMember(false); err != nil {
return err
}
if err := c.Cluster.Validate(); err != nil {
return err
}
if c.DiscoveryURL != "" {
return fmt.Errorf("discovery URL should not be set when joining existing initial cluster")
}
return nil
}
// verifyLocalMember verifies the configured member is in configured
// cluster. If strict is set, it also verifies the configured member
// has the same peer urls as configured advertised peer urls.
func (c *ServerConfig) verifyLocalMember(strict bool) error {
m := c.Cluster.MemberByName(c.Name)
// Make sure the cluster at least contains the local server.
if m == nil {
@ -58,34 +92,23 @@ func (c *ServerConfig) VerifyBootstrapConfig() error {
return fmt.Errorf("cannot use %x as member id", raft.None)
}
if c.DiscoveryURL == "" && !c.NewCluster {
return fmt.Errorf("initial cluster state unset and no wal or discovery URL found")
}
// No identical IPs in the cluster peer list
urlMap := make(map[string]bool)
for _, m := range c.Cluster.Members() {
for _, url := range m.PeerURLs {
if urlMap[url] {
return fmt.Errorf("duplicate url %v in cluster config", url)
}
urlMap[url] = true
}
}
// Advertised peer URLs must match those in the cluster peer list
// TODO: Remove URLStringsEqual after improvement of using hostnames #2150 #2123
apurls := c.PeerURLs.StringSlice()
sort.Strings(apurls)
if !netutil.URLStringsEqual(apurls, m.PeerURLs) {
return fmt.Errorf("%s has different advertised URLs in the cluster and advertised peer URLs list", c.Name)
if strict {
if !netutil.URLStringsEqual(apurls, m.PeerURLs) {
return fmt.Errorf("%s has different advertised URLs in the cluster and advertised peer URLs list", c.Name)
}
}
return nil
}
func (c *ServerConfig) WALDir() string { return path.Join(c.DataDir, "wal") }
func (c *ServerConfig) MemberDir() string { return path.Join(c.DataDir, "member") }
func (c *ServerConfig) SnapDir() string { return path.Join(c.DataDir, "snap") }
func (c *ServerConfig) WALDir() string { return path.Join(c.MemberDir(), "wal") }
func (c *ServerConfig) SnapDir() string { return path.Join(c.MemberDir(), "snap") }
func (c *ServerConfig) ShouldDiscover() bool { return c.DiscoveryURL != "" }
@ -99,6 +122,7 @@ func (c *ServerConfig) print(initial bool) {
log.Println("etcdserver: force new cluster")
}
log.Printf("etcdserver: data dir = %s", c.DataDir)
log.Printf("etcdserver: member dir = %s", c.MemberDir())
log.Printf("etcdserver: heartbeat = %dms", c.TickMs)
log.Printf("etcdserver: election = %dms", c.ElectionTicks*int(c.TickMs))
log.Printf("etcdserver: snapshot count = %d", c.SnapCount)

View File

@ -22,6 +22,9 @@ import (
)
func mustNewURLs(t *testing.T, urls []string) []url.URL {
if len(urls) == 0 {
return nil
}
u, err := types.NewURLs(urls)
if err != nil {
t.Fatalf("error creating new URLs from %q: %v", urls, err)
@ -29,77 +32,101 @@ func mustNewURLs(t *testing.T, urls []string) []url.URL {
return u
}
func TestBootstrapConfigVerify(t *testing.T) {
func TestConfigVerifyBootstrapWithoutClusterAndDiscoveryURLFail(t *testing.T) {
cluster, err := NewClusterFromString("", "")
if err != nil {
t.Fatalf("NewClusterFromString error: %v", err)
}
c := &ServerConfig{
Name: "node1",
DiscoveryURL: "",
Cluster: cluster,
}
if err := c.VerifyBootstrap(); err == nil {
t.Errorf("err = nil, want not nil")
}
}
func TestConfigVerifyExistingWithDiscoveryURLFail(t *testing.T) {
cluster, err := NewClusterFromString("", "node1=http://127.0.0.1:2380")
if err != nil {
t.Fatalf("NewClusterFromString error: %v", err)
}
c := &ServerConfig{
Name: "node1",
DiscoveryURL: "http://127.0.0.1:4001/abcdefg",
PeerURLs: mustNewURLs(t, []string{"http://127.0.0.1:2380"}),
Cluster: cluster,
NewCluster: false,
}
if err := c.VerifyJoinExisting(); err == nil {
t.Errorf("err = nil, want not nil")
}
}
func TestConfigVerifyLocalMember(t *testing.T) {
tests := []struct {
clusterSetting string
newclst bool
apurls []string
disc string
strict bool
shouldError bool
}{
{
// Node must exist in cluster
"",
true,
nil,
"",
true,
true,
},
{
// Cannot have duplicate URLs in cluster config
"node1=http://localhost:7001,node2=http://localhost:7001,node2=http://localhost:7002",
true,
nil,
"",
true,
},
{
// Node defined, ClusterState OK
// Initial cluster set
"node1=http://localhost:7001,node2=http://localhost:7002",
true,
[]string{"http://localhost:7001"},
"",
true,
false,
},
{
// Node defined, discovery OK
"node1=http://localhost:7001",
false,
[]string{"http://localhost:7001"},
"http://discovery",
false,
},
{
// Cannot have ClusterState!=new && !discovery
"node1=http://localhost:7001",
false,
nil,
"",
// Default initial cluster
"node1=http://localhost:2380,node1=http://localhost:7001",
[]string{"http://localhost:2380", "http://localhost:7001"},
true,
false,
},
{
// Advertised peer URLs must match those in cluster-state
"node1=http://localhost:7001",
true,
[]string{"http://localhost:12345"},
"",
true,
true,
},
{
// Advertised peer URLs must match those in cluster-state
"node1=http://localhost:7001,node1=http://localhost:12345",
true,
[]string{"http://localhost:12345"},
"",
true,
true,
},
{
// Advertised peer URLs must match those in cluster-state
"node1=http://localhost:7001",
[]string{},
true,
true,
},
{
// do not care about the urls if strict is not set
"node1=http://localhost:7001",
[]string{},
false,
false,
},
}
for i, tt := range tests {
@ -108,15 +135,13 @@ func TestBootstrapConfigVerify(t *testing.T) {
t.Fatalf("#%d: Got unexpected error: %v", i, err)
}
cfg := ServerConfig{
Name: "node1",
DiscoveryURL: tt.disc,
Cluster: cluster,
NewCluster: tt.newclst,
Name: "node1",
Cluster: cluster,
}
if tt.apurls != nil {
cfg.PeerURLs = mustNewURLs(t, tt.apurls)
}
err = cfg.VerifyBootstrapConfig()
err = cfg.verifyLocalMember(tt.strict)
if (err == nil) && tt.shouldError {
t.Errorf("%#v", *cluster)
t.Errorf("#%d: Got no error where one was expected", i)
@ -129,8 +154,8 @@ func TestBootstrapConfigVerify(t *testing.T) {
func TestSnapDir(t *testing.T) {
tests := map[string]string{
"/": "/snap",
"/var/lib/etc": "/var/lib/etc/snap",
"/": "/member/snap",
"/var/lib/etc": "/var/lib/etc/member/snap",
}
for dd, w := range tests {
cfg := ServerConfig{
@ -144,8 +169,8 @@ func TestSnapDir(t *testing.T) {
func TestWALDir(t *testing.T) {
tests := map[string]string{
"/": "/wal",
"/var/lib/etc": "/var/lib/etc/wal",
"/": "/member/wal",
"/var/lib/etc": "/var/lib/etc/member/wal",
}
for dd, w := range tests {
cfg := ServerConfig{

View File

@ -119,7 +119,6 @@ func (h *keysHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
writeError(w, err)
return
}
switch {
case resp.Event != nil:
if err := writeKeyEvent(w, resp.Event, h.timer); err != nil {
@ -334,7 +333,7 @@ func serveVersion(w http.ResponseWriter, r *http.Request) {
if !allowMethod(w, r.Method, "GET") {
return
}
fmt.Fprintf(w, `{"releaseVersion":"%s","internalVersion":"%s"}`, version.Version, version.InternalVersion)
w.Write([]byte("etcd " + version.Version))
}
// parseKeyRequest converts a received http.Request on keysPrefix to

View File

@ -1064,13 +1064,13 @@ func TestServeMembersFail(t *testing.T) {
func TestWriteEvent(t *testing.T) {
// nil event should not panic
rw := httptest.NewRecorder()
writeKeyEvent(rw, nil, dummyRaftTimer{})
h := rw.Header()
rec := httptest.NewRecorder()
writeKeyEvent(rec, nil, dummyRaftTimer{})
h := rec.Header()
if len(h) > 0 {
t.Fatalf("unexpected non-empty headers: %#v", h)
}
b := rw.Body.String()
b := rec.Body.String()
if len(b) > 0 {
t.Fatalf("unexpected non-empty body: %q", b)
}
@ -1327,7 +1327,7 @@ func TestServeVersion(t *testing.T) {
if rw.Code != http.StatusOK {
t.Errorf("code=%d, want %d", rw.Code, http.StatusOK)
}
w := fmt.Sprintf(`{"releaseVersion":"%s","internalVersion":"%s"}`, version.Version, version.InternalVersion)
w := fmt.Sprintf("etcd %s", version.Version)
if g := rw.Body.String(); g != w {
t.Fatalf("body = %q, want %q", g, w)
}

View File

@ -76,13 +76,13 @@ func (fs *errServer) UpdateMember(ctx context.Context, m etcdserver.Member) erro
func TestWriteError(t *testing.T) {
// nil error should not panic
rw := httptest.NewRecorder()
writeError(rw, nil)
h := rw.Header()
rec := httptest.NewRecorder()
writeError(rec, nil)
h := rec.Header()
if len(h) > 0 {
t.Fatalf("unexpected non-empty headers: %#v", h)
}
b := rw.Body.String()
b := rec.Body.String()
if len(b) > 0 {
t.Fatalf("unexpected non-empty body: %q", b)
}

View File

@ -18,6 +18,7 @@ import (
"encoding/json"
"log"
"net/http"
"strconv"
"github.com/coreos/etcd/etcdserver"
"github.com/coreos/etcd/rafthttp"
@ -28,9 +29,10 @@ const (
)
// NewPeerHandler generates an http.Handler to handle etcd peer (raft) requests.
func NewPeerHandler(clusterInfo etcdserver.ClusterInfo, raftHandler http.Handler) http.Handler {
func NewPeerHandler(clusterInfo etcdserver.ClusterInfo, timer etcdserver.RaftTimer, raftHandler http.Handler) http.Handler {
mh := &peerMembersHandler{
clusterInfo: clusterInfo,
timer: timer,
}
mux := http.NewServeMux()
@ -43,6 +45,7 @@ func NewPeerHandler(clusterInfo etcdserver.ClusterInfo, raftHandler http.Handler
type peerMembersHandler struct {
clusterInfo etcdserver.ClusterInfo
timer etcdserver.RaftTimer
}
func (h *peerMembersHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
@ -50,6 +53,7 @@ func (h *peerMembersHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
return
}
w.Header().Set("X-Etcd-Cluster-ID", h.clusterInfo.ID().String())
w.Header().Set("X-Raft-Index", strconv.FormatUint(h.timer.Index(), 10))
if r.URL.Path != peerMembersPrefix {
http.Error(w, "bad path", http.StatusBadRequest)

View File

@ -33,7 +33,7 @@ func TestNewPeerHandlerOnRaftPrefix(t *testing.T) {
h := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Write([]byte("test data"))
})
ph := NewPeerHandler(&fakeCluster{}, h)
ph := NewPeerHandler(&fakeCluster{}, &dummyRaftTimer{}, h)
srv := httptest.NewServer(ph)
defer srv.Close()
@ -91,7 +91,7 @@ func TestServeMembersGet(t *testing.T) {
id: 1,
members: map[uint64]*etcdserver.Member{1: &memb1, 2: &memb2},
}
h := &peerMembersHandler{clusterInfo: cluster}
h := &peerMembersHandler{clusterInfo: cluster, timer: &dummyRaftTimer{}}
msb, err := json.Marshal([]etcdserver.Member{memb1, memb2})
if err != nil {
t.Fatal(err)

View File

@ -18,13 +18,11 @@ import (
"encoding/json"
"expvar"
"fmt"
"io/ioutil"
"log"
"math/rand"
"net/http"
"path"
"regexp"
"sort"
"sync/atomic"
"time"
@ -44,6 +42,7 @@ import (
"github.com/coreos/etcd/rafthttp"
"github.com/coreos/etcd/snap"
"github.com/coreos/etcd/store"
"github.com/coreos/etcd/version"
"github.com/coreos/etcd/wal"
"github.com/coreos/etcd/Godeps/_workspace/src/golang.org/x/net/context"
@ -146,55 +145,66 @@ func NewServer(cfg *ServerConfig) (*EtcdServer, error) {
var n raft.Node
var s *raft.MemoryStorage
var id types.ID
walVersion, err := wal.DetectVersion(cfg.DataDir)
// Run the migrations.
dataVer, err := version.DetectDataDir(cfg.DataDir)
if err != nil {
return nil, err
}
if walVersion == wal.WALUnknown {
return nil, fmt.Errorf("unknown wal version in data dir %s", cfg.DataDir)
if err := upgradeDataDir(cfg.DataDir, cfg.Name, dataVer); err != nil {
return nil, err
}
haveWAL := walVersion != wal.WALNotExist
haveWAL := wal.Exist(cfg.WALDir())
ss := snap.New(cfg.SnapDir())
switch {
case !haveWAL && !cfg.NewCluster:
us := getOtherPeerURLs(cfg.Cluster, cfg.Name)
existingCluster, err := GetClusterFromPeers(us, cfg.Transport)
if err := cfg.VerifyJoinExisting(); err != nil {
return nil, err
}
existingCluster, err := GetClusterFromRemotePeers(getRemotePeerURLs(cfg.Cluster, cfg.Name), cfg.Transport)
if err != nil {
return nil, fmt.Errorf("cannot fetch cluster info from peer urls: %v", err)
}
if err := ValidateClusterAndAssignIDs(cfg.Cluster, existingCluster); err != nil {
return nil, fmt.Errorf("error validating peerURLs %s: %v", existingCluster, err)
}
cfg.Cluster.UpdateIndex(existingCluster.index)
cfg.Cluster.SetID(existingCluster.id)
cfg.Cluster.SetStore(st)
cfg.Print()
id, n, s, w = startNode(cfg, nil)
case !haveWAL && cfg.NewCluster:
if err := cfg.VerifyBootstrapConfig(); err != nil {
if err := cfg.VerifyBootstrap(); err != nil {
return nil, err
}
m := cfg.Cluster.MemberByName(cfg.Name)
if isBootstrapped(cfg) {
if isMemberBootstrapped(cfg.Cluster, cfg.Name, cfg.Transport) {
return nil, fmt.Errorf("member %s has already been bootstrapped", m.ID)
}
if cfg.ShouldDiscover() {
s, err := discovery.JoinCluster(cfg.DiscoveryURL, cfg.DiscoveryProxy, m.ID, cfg.Cluster.String())
str, err := discovery.JoinCluster(cfg.DiscoveryURL, cfg.DiscoveryProxy, m.ID, cfg.Cluster.String())
if err != nil {
return nil, err
}
if cfg.Cluster, err = NewClusterFromString(cfg.Cluster.token, s); err != nil {
if cfg.Cluster, err = NewClusterFromString(cfg.Cluster.token, str); err != nil {
return nil, err
}
if err := cfg.Cluster.Validate(); err != nil {
return nil, fmt.Errorf("bad discovery cluster: %v", err)
}
}
cfg.Cluster.SetStore(st)
cfg.PrintWithInitial()
id, n, s, w = startNode(cfg, cfg.Cluster.MemberIDs())
case haveWAL:
if walVersion != wal.WALv0_5 {
if err := upgradeWAL(cfg, walVersion); err != nil {
return nil, err
}
if err := fileutil.IsDirWriteable(cfg.DataDir); err != nil {
return nil, fmt.Errorf("cannot write to data directory: %v", err)
}
if err := fileutil.IsDirWriteable(cfg.MemberDir()); err != nil {
return nil, fmt.Errorf("cannot write to member directory: %v", err)
}
if cfg.ShouldDiscover() {
@ -385,7 +395,21 @@ func (s *EtcdServer) run() {
if err := s.store.Recovery(rd.Snapshot.Data); err != nil {
log.Panicf("recovery store error: %v", err)
}
s.Cluster.Recover()
// It avoids snapshot recovery overwriting newer cluster and
// transport setting, which may block the communication.
if s.Cluster.index < rd.Snapshot.Metadata.Index {
s.Cluster.Recover()
// recover raft transport
s.r.transport.RemoveAllPeers()
for _, m := range s.Cluster.Members() {
if m.ID == s.ID() {
continue
}
s.r.transport.AddPeer(m.ID, m.PeerURLs)
}
}
appliedi = rd.Snapshot.Metadata.Index
confState = rd.Snapshot.Metadata.ConfState
log.Printf("etcdserver: recovered from incoming snapshot at index %d", snapi)
@ -647,9 +671,9 @@ func (s *EtcdServer) publish(retryInterval time.Duration) {
}
func (s *EtcdServer) send(ms []raftpb.Message) {
for _, m := range ms {
if !s.Cluster.IsIDRemoved(types.ID(m.To)) {
m.To = 0
for i, _ := range ms {
if s.Cluster.IsIDRemoved(types.ID(ms[i].To)) {
ms[i].To = 0
}
}
s.r.transport.Send(ms)
@ -699,7 +723,11 @@ func (s *EtcdServer) applyRequest(r pb.Request) Response {
switch {
case existsSet:
if exists {
return f(s.store.Update(r.Path, r.Val, expr))
if r.PrevIndex == 0 && r.PrevValue == "" {
return f(s.store.Update(r.Path, r.Val, expr))
} else {
return f(s.store.CompareAndSwap(r.Path, r.PrevValue, r.PrevIndex, r.Val, expr))
}
}
return f(s.store.Create(r.Path, r.Dir, r.Val, false, expr))
case r.PrevIndex > 0 || r.PrevValue != "":
@ -821,88 +849,3 @@ func (s *EtcdServer) snapshot(snapi uint64, confState *raftpb.ConfState) {
func (s *EtcdServer) PauseSending() { s.r.pauseSending() }
func (s *EtcdServer) ResumeSending() { s.r.resumeSending() }
// isBootstrapped tries to check if the given member has been bootstrapped
// in the given cluster.
func isBootstrapped(cfg *ServerConfig) bool {
cl := cfg.Cluster
member := cfg.Name
us := getOtherPeerURLs(cl, member)
rcl, err := getClusterFromPeers(us, false, cfg.Transport)
if err != nil {
return false
}
id := cl.MemberByName(member).ID
m := rcl.Member(id)
if m == nil {
return false
}
if len(m.ClientURLs) > 0 {
return true
}
return false
}
// GetClusterFromPeers takes a set of URLs representing etcd peers, and
// attempts to construct a Cluster by accessing the members endpoint on one of
// these URLs. The first URL to provide a response is used. If no URLs provide
// a response, or a Cluster cannot be successfully created from a received
// response, an error is returned.
func GetClusterFromPeers(urls []string, tr *http.Transport) (*Cluster, error) {
return getClusterFromPeers(urls, true, tr)
}
// If logerr is true, it prints out more error messages.
func getClusterFromPeers(urls []string, logerr bool, tr *http.Transport) (*Cluster, error) {
cc := &http.Client{
Transport: tr,
Timeout: time.Second,
}
for _, u := range urls {
resp, err := cc.Get(u + "/members")
if err != nil {
if logerr {
log.Printf("etcdserver: could not get cluster response from %s: %v", u, err)
}
continue
}
b, err := ioutil.ReadAll(resp.Body)
if err != nil {
if logerr {
log.Printf("etcdserver: could not read the body of cluster response: %v", err)
}
continue
}
var membs []*Member
if err := json.Unmarshal(b, &membs); err != nil {
if logerr {
log.Printf("etcdserver: could not unmarshal cluster response: %v", err)
}
continue
}
id, err := types.IDFromString(resp.Header.Get("X-Etcd-Cluster-ID"))
if err != nil {
if logerr {
log.Printf("etcdserver: could not parse the cluster ID from cluster res: %v", err)
}
continue
}
return NewClusterFromMembers("", id, membs), nil
}
return nil, fmt.Errorf("etcdserver: could not retrieve cluster information from the given urls")
}
// getOtherPeerURLs returns peer urls of other members in the cluster. The
// returned list is sorted in ascending lexicographical order.
func getOtherPeerURLs(cl ClusterInfo, self string) []string {
us := make([]string, 0)
for _, m := range cl.Members() {
if m.Name == self {
continue
}
us = append(us, m.PeerURLs...)
}
sort.Strings(us)
return us
}

View File

@ -235,20 +235,18 @@ func TestApplyRequest(t *testing.T) {
},
},
},
// PUT with PrevExist=true *and* PrevIndex set ==> Update
// TODO(jonboulle): is this expected?!
// PUT with PrevExist=true *and* PrevIndex set ==> CompareAndSwap
{
pb.Request{Method: "PUT", ID: 1, PrevExist: pbutil.Boolp(true), PrevIndex: 1},
Response{Event: &store.Event{}},
[]testutil.Action{
{
Name: "Update",
Params: []interface{}{"", "", time.Time{}},
Name: "CompareAndSwap",
Params: []interface{}{"", "", uint64(1), "", time.Time{}},
},
},
},
// PUT with PrevExist=false *and* PrevIndex set ==> Create
// TODO(jonboulle): is this expected?!
{
pb.Request{Method: "PUT", ID: 1, PrevExist: pbutil.Boolp(false), PrevIndex: 1},
Response{Event: &store.Event{}},
@ -1027,8 +1025,8 @@ func TestPublish(t *testing.T) {
t.Errorf("method = %s, want PUT", r.Method)
}
wm := Member{ID: 1, Attributes: Attributes{Name: "node1", ClientURLs: []string{"http://a", "http://b"}}}
if w := path.Join(memberStoreKey(wm.ID), attributesSuffix); r.Path != w {
t.Errorf("path = %s, want %s", r.Path, w)
if wpath := path.Join(memberStoreKey(wm.ID), attributesSuffix); r.Path != wpath {
t.Errorf("path = %s, want %s", r.Path, wpath)
}
var gattr Attributes
if err := json.Unmarshal([]byte(r.Val), &gattr); err != nil {
@ -1072,8 +1070,8 @@ func TestPublishRetry(t *testing.T) {
action := n.Action()
// multiple Proposes
if n := len(action); n < 2 {
t.Errorf("len(action) = %d, want >= 2", n)
if cnt := len(action); cnt < 2 {
t.Errorf("len(action) = %d, want >= 2", cnt)
}
}
@ -1135,7 +1133,7 @@ func TestGetOtherPeerURLs(t *testing.T) {
}
for i, tt := range tests {
cl := NewClusterFromMembers("", types.ID(0), tt.membs)
urls := getOtherPeerURLs(cl, tt.self)
urls := getRemotePeerURLs(cl, tt.self)
if !reflect.DeepEqual(urls, tt.wurls) {
t.Errorf("#%d: urls = %+v, want %+v", i, urls, tt.wurls)
}
@ -1393,6 +1391,7 @@ func (s *nopTransporter) Handler() http.Handler { return nil }
func (s *nopTransporter) Send(m []raftpb.Message) {}
func (s *nopTransporter) AddPeer(id types.ID, us []string) {}
func (s *nopTransporter) RemovePeer(id types.ID) {}
func (s *nopTransporter) RemoveAllPeers() {}
func (s *nopTransporter) UpdatePeer(id types.ID, us []string) {}
func (s *nopTransporter) Stop() {}
func (s *nopTransporter) Pause() {}

View File

@ -16,6 +16,8 @@ package etcdserver
import (
"log"
"os"
"path"
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
"github.com/coreos/etcd/migrate"
@ -23,6 +25,7 @@ import (
"github.com/coreos/etcd/pkg/types"
"github.com/coreos/etcd/raft/raftpb"
"github.com/coreos/etcd/snap"
"github.com/coreos/etcd/version"
"github.com/coreos/etcd/wal"
"github.com/coreos/etcd/wal/walpb"
)
@ -91,14 +94,47 @@ func readWAL(waldir string, snap walpb.Snapshot) (w *wal.WAL, id, cid types.ID,
// upgradeWAL converts an older version of the etcdServer data to the newest version.
// It must ensure that, after upgrading, the most recent version is present.
func upgradeWAL(cfg *ServerConfig, ver wal.WalVersion) error {
if ver == wal.WALv0_4 {
func upgradeDataDir(baseDataDir string, name string, ver version.DataDirVersion) error {
switch ver {
case version.DataDir0_4:
log.Print("etcdserver: converting v0.4 log to v2.0")
err := migrate.Migrate4To2(cfg.DataDir, cfg.Name)
err := migrate.Migrate4To2(baseDataDir, name)
if err != nil {
log.Fatalf("etcdserver: failed migrating data-dir: %v", err)
return err
}
fallthrough
case version.DataDir2_0:
err := makeMemberDir(baseDataDir)
if err != nil {
return err
}
fallthrough
case version.DataDir2_0_1:
fallthrough
default:
log.Printf("etcdserver: datadir is valid for the 2.0.1 format")
}
return nil
}
func makeMemberDir(dir string) error {
membdir := path.Join(dir, "member")
_, err := os.Stat(membdir)
switch {
case err == nil:
return nil
case !os.IsNotExist(err):
return err
}
if err := os.MkdirAll(membdir, 0700); err != nil {
return err
}
names := []string{"snap", "wal"}
for _, name := range names {
if err := os.Rename(path.Join(dir, name), path.Join(membdir, name)); err != nil {
return err
}
}
return nil
}

View File

@ -186,13 +186,13 @@ func clusterMustProgress(t *testing.T, membs []*member) {
for i, m := range membs {
u := m.URL()
cc := mustNewHTTPClient(t, []string{u})
kapi := client.NewKeysAPI(cc)
ctx, cancel := context.WithTimeout(context.Background(), requestTimeout)
if _, err := kapi.Watch(key, resp.Node.ModifiedIndex).Next(ctx); err != nil {
mcc := mustNewHTTPClient(t, []string{u})
mkapi := client.NewKeysAPI(mcc)
mctx, mcancel := context.WithTimeout(context.Background(), requestTimeout)
if _, err := mkapi.Watch(key, resp.Node.ModifiedIndex).Next(mctx); err != nil {
t.Fatalf("#%d: watch on %s error: %v", i, u, err)
}
cancel()
mcancel()
}
}
@ -526,7 +526,7 @@ func (m *member) Launch() error {
m.s.SyncTicker = time.Tick(500 * time.Millisecond)
m.s.Start()
m.raftHandler = &testutil.PauseableHandler{Next: etcdhttp.NewPeerHandler(m.s.Cluster, m.s.RaftHandler())}
m.raftHandler = &testutil.PauseableHandler{Next: etcdhttp.NewPeerHandler(m.s.Cluster, m.s, m.s.RaftHandler())}
for _, ln := range m.PeerListeners {
hs := &httptest.Server{
@ -547,6 +547,24 @@ func (m *member) Launch() error {
return nil
}
func (m *member) WaitOK(t *testing.T) {
cc := mustNewHTTPClient(t, []string{m.URL()})
kapi := client.NewKeysAPI(cc)
for {
ctx, cancel := context.WithTimeout(context.Background(), requestTimeout)
_, err := kapi.Get(ctx, "/")
if err != nil {
time.Sleep(tickDuration)
continue
}
cancel()
break
}
for m.s.Leader() == 0 {
time.Sleep(tickDuration)
}
}
func (m *member) URL() string { return m.ClientURLs[0].String() }
func (m *member) Pause() {

View File

@ -15,9 +15,14 @@
package integration
import (
"fmt"
"io/ioutil"
"os"
"reflect"
"testing"
"github.com/coreos/etcd/Godeps/_workspace/src/golang.org/x/net/context"
"github.com/coreos/etcd/client"
)
func TestPauseMember(t *testing.T) {
@ -74,3 +79,44 @@ func TestLaunchDuplicateMemberShouldFail(t *testing.T) {
t.Errorf("unexpect successful launch")
}
}
func TestSnapshotAndRestartMember(t *testing.T) {
defer afterTest(t)
m := mustNewMember(t, "snapAndRestartTest")
m.SnapCount = 100
m.Launch()
defer m.Terminate(t)
m.WaitOK(t)
resps := make([]*client.Response, 120)
var err error
for i := 0; i < 120; i++ {
cc := mustNewHTTPClient(t, []string{m.URL()})
kapi := client.NewKeysAPI(cc)
ctx, cancel := context.WithTimeout(context.Background(), requestTimeout)
key := fmt.Sprintf("foo%d", i)
resps[i], err = kapi.Create(ctx, "/"+key, "bar", -1)
if err != nil {
t.Fatalf("#%d: create on %s error: %v", i, m.URL(), err)
}
cancel()
}
m.Stop(t)
m.Restart(t)
for i := 0; i < 120; i++ {
cc := mustNewHTTPClient(t, []string{m.URL()})
kapi := client.NewKeysAPI(cc)
ctx, cancel := context.WithTimeout(context.Background(), requestTimeout)
key := fmt.Sprintf("foo%d", i)
resp, err := kapi.Get(ctx, "/"+key)
if err != nil {
t.Fatalf("#%d: get on %s error: %v", i, m.URL(), err)
}
cancel()
if !reflect.DeepEqual(resp.Node, resps[i].Node) {
t.Errorf("#%d: node = %v, want %v", i, resp.Node, resps[i].Node)
}
}
}

View File

@ -327,21 +327,21 @@ func TestV2Delete(t *testing.T) {
v := url.Values{}
v.Set("value", "XXX")
resp, err := tc.PutForm(fmt.Sprintf("%s%s", u, "/v2/keys/foo"), v)
r, err := tc.PutForm(fmt.Sprintf("%s%s", u, "/v2/keys/foo"), v)
if err != nil {
t.Error(err)
}
resp.Body.Close()
resp, err = tc.PutForm(fmt.Sprintf("%s%s", u, "/v2/keys/emptydir?dir=true"), v)
r.Body.Close()
r, err = tc.PutForm(fmt.Sprintf("%s%s", u, "/v2/keys/emptydir?dir=true"), v)
if err != nil {
t.Error(err)
}
resp.Body.Close()
resp, err = tc.PutForm(fmt.Sprintf("%s%s", u, "/v2/keys/foodir/bar?dir=true"), v)
r.Body.Close()
r, err = tc.PutForm(fmt.Sprintf("%s%s", u, "/v2/keys/foodir/bar?dir=true"), v)
if err != nil {
t.Error(err)
}
resp.Body.Close()
r.Body.Close()
tests := []struct {
relativeURL string
@ -423,17 +423,17 @@ func TestV2CAD(t *testing.T) {
v := url.Values{}
v.Set("value", "XXX")
resp, err := tc.PutForm(fmt.Sprintf("%s%s", u, "/v2/keys/foo"), v)
r, err := tc.PutForm(fmt.Sprintf("%s%s", u, "/v2/keys/foo"), v)
if err != nil {
t.Error(err)
}
resp.Body.Close()
r.Body.Close()
resp, err = tc.PutForm(fmt.Sprintf("%s%s", u, "/v2/keys/foovalue"), v)
r, err = tc.PutForm(fmt.Sprintf("%s%s", u, "/v2/keys/foovalue"), v)
if err != nil {
t.Error(err)
}
resp.Body.Close()
r.Body.Close()
tests := []struct {
relativeURL string
@ -582,11 +582,11 @@ func TestV2Get(t *testing.T) {
v := url.Values{}
v.Set("value", "XXX")
resp, err := tc.PutForm(fmt.Sprintf("%s%s", u, "/v2/keys/foo/bar/zar"), v)
r, err := tc.PutForm(fmt.Sprintf("%s%s", u, "/v2/keys/foo/bar/zar"), v)
if err != nil {
t.Error(err)
}
resp.Body.Close()
r.Body.Close()
tests := []struct {
relativeURL string
@ -676,11 +676,11 @@ func TestV2QuorumGet(t *testing.T) {
v := url.Values{}
v.Set("value", "XXX")
resp, err := tc.PutForm(fmt.Sprintf("%s%s", u, "/v2/keys/foo/bar/zar?quorum=true"), v)
r, err := tc.PutForm(fmt.Sprintf("%s%s", u, "/v2/keys/foo/bar/zar?quorum=true"), v)
if err != nil {
t.Error(err)
}
resp.Body.Close()
r.Body.Close()
tests := []struct {
relativeURL string

21
main.go
View File

@ -23,27 +23,8 @@
package main
import (
"log"
"os"
"strconv"
"github.com/coreos/etcd/etcdmain"
"github.com/coreos/etcd/migrate/starter"
"github.com/coreos/etcd/pkg/coreos"
)
import "github.com/coreos/etcd/etcdmain"
func main() {
if str := os.Getenv("ETCD_ALLOW_LEGACY_MODE"); str != "" {
v, err := strconv.ParseBool(str)
if err != nil {
log.Fatalf("failed to parse ETCD_ALLOW_LEGACY_MODE=%s as bool", str)
}
if v {
starter.StartDesiredVersion(os.Args[1:])
}
} else if coreos.IsCoreOS() {
starter.StartDesiredVersion(os.Args[1:])
}
etcdmain.Main()
}

View File

@ -175,8 +175,8 @@ func GuessNodeID(nodes map[string]uint64, snap4 *Snapshot4, cfg *Config4, name s
delete(snapNodes, p.Name)
}
if len(snapNodes) == 1 {
for name, id := range nodes {
log.Printf("Autodetected from snapshot: name %s", name)
for nodename, id := range nodes {
log.Printf("Autodetected from snapshot: name %s", nodename)
return id
}
}
@ -186,8 +186,8 @@ func GuessNodeID(nodes map[string]uint64, snap4 *Snapshot4, cfg *Config4, name s
delete(nodes, p.Name)
}
if len(nodes) == 1 {
for name, id := range nodes {
log.Printf("Autodetected name %s", name)
for nodename, id := range nodes {
log.Printf("Autodetected name %s", nodename)
return id
}
}

View File

@ -1,27 +0,0 @@
etcd migration functional tests
=====
This functional test suite deploys a etcd cluster using processes, and asserts etcd is functioning properly.
Dependencies
------------
The test suite can only be run in CoreOS system. It's recommended to run this in a virtual machine environment on CoreOS (e.g. using coreos-vagrant). The only dependency for the tests not provided on the CoreOS image is go.
Usage
-----
Set environment variables point to the respective binaries that are used to drive the actual tests:
```
$ export ETCD_V1_BIN=/path/to/v1_etcd
$ export ETCD_V2_BIN=/path/to/v2_etcd
$ export ETCDCTL_BIN=/path/to/etcdctl
```
Then the tests can be run:
```
$ go test github.com/coreos/etcd/migrate/functional
```

View File

@ -1,30 +0,0 @@
-----BEGIN CERTIFICATE-----
MIIFNDCCAx6gAwIBAgIBATALBgkqhkiG9w0BAQUwLTEMMAoGA1UEBhMDVVNBMRAw
DgYDVQQKEwdldGNkLWNhMQswCQYDVQQLEwJDQTAeFw0xNDAzMTMwMjA5MDlaFw0y
NDAzMTMwMjA5MDlaMC0xDDAKBgNVBAYTA1VTQTEQMA4GA1UEChMHZXRjZC1jYTEL
MAkGA1UECxMCQ0EwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDdlBlw
Jiakc4C1UpMUvQ+2fttyBMfMLivQgj51atpKd8qIBvpZwz1wtpzdRG0hSYMF0IUk
MfBqyg+T5tt2Lfs3Gx3cYKS7G0HTfmABC7GdG8gNvEVNl/efxqvhis7p7hur765e
J+N2GR4oOOP5Wa8O5flv10cp3ZJLhAguc2CONLzfh/iAYAItFgktGHXJ/AnUhhaj
KWdKlK9Cv71YsRPOiB1hCV+LKfNSqrXPMvQ4sarz3yECIBhpV/KfskJoDyeNMaJd
gabX/S7gUCd2FvuOpGWdSIsDwyJf0tnYmQX5XIQwBZJib/IFMmmoVNYc1bFtYvRH
j0g0Ax4tHeXU/0mglqEcaTuMejnx8jlxZAM8Z94wHLfKbtaP0zFwMXkaM4nmfZqh
vLZwowDGMv9M0VRFEhLGYIc3xQ8G2u8cFAGw1UqTxKhwAdRmrcFaQ38sk4kziy0u
AkpGavS7PKcFjjB/fdDFO/kwGQOthX/oTn9nP3BT+IK2h1A6ATMPI4lVnhb5/KBt
9M/fGgbiU+I9QT0Ilz/LlrcCuzyRXREvIZvoUL77Id+JT3qQxqPn/XMKLN4WEFII
112MFGqCD85JZzNoC4RkZd8kFlR4YJWsS4WqJlWprESr5cCDuLviK+31cnIRF4fJ
mz0gPsVgY7GFEan3JJnL8oRUVzdTPKfPt0atsQIDAQABo2MwYTAOBgNVHQ8BAf8E
BAMCAAQwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUnVlVvktY+zlLpG43nTpG
AWmUkrYwHwYDVR0jBBgwFoAUnVlVvktY+zlLpG43nTpGAWmUkrYwCwYJKoZIhvcN
AQEFA4ICAQAqIcPFux3V4h1N0aGM4fCS/iT50TzDnRb5hwILKbmyA6LFnH4YF7PZ
aA0utDNo1XSRDMpR38HWk0weh5Sfx6f2danaKZHAsea8oVEtdrz16ZMOvoh0CPIM
/hn0CGQOoXDADDNFASuExhhpoyYkDqTVTCQ/zbhZg1mjBljJ+BBzlSgeoE4rUDpn
nuDcmD9LtjpsVQL+J662rd51xV4Z6a7aZLvN9GfO8tYkfCGCD9+fGh1Cpz0IL7qw
VRie+p/XpjoHemswnRhYJ4wn10a1UkVSR++wld6Gvjb9ikyr9xVyU5yrRM55pP2J
VguhzjhTIDE1eDfIMMxv3Qj8+BdVQwtKFD+zQYQcbcjsvjTErlS7oCbM2DVlPnRT
QaCM0q0yorfzc4hmml5P95ngz2xlohavgNMhsYIlcWyq3NVbm7mIXz2pjqa16Iit
vL7WX6OVupv/EOMRx5cVcLqqEaYJmAlNd/CCD8ihDQCwoJ6DJhczPRexrVp+iZHK
SnIUONdXb/g8ungXUGL1jGNQrWuq49clpI5sLWNjMDMFAQo0qu5bLkOIMlK/evCt
gctOjXDvGXCk5h6Adf14q9zDGFdLoxw0/aciUSn9IekdzYPmkYUTifuzkVRsPKzS
nmI4dQvz0rHIh4FBUKWWrJhRWhrv9ty/YFuJXVUHeAwr5nz6RFZ4wQ==
-----END CERTIFICATE-----

View File

@ -1,54 +0,0 @@
-----BEGIN RSA PRIVATE KEY-----
Proc-Type: 4,ENCRYPTED
DEK-Info: DES-EDE3-CBC,d2e70acc12a86116
uAmKZ41MiYTa7CappCFEcLL/kWRX4rE8DJG3sL59lv3j/6bYFkdczy3kgrEWm4Pn
+pveJEssQkszXHkjA3vHx8nlTvfQOwa7ggcc76LNYj1sPHawVRNA0pb6WvjDzN7D
JMgAnptVuZGP8N6ZIzFvr5Rf58ar5Y2aI7Ti6KxLZvqYojgvz5dzGimC3+SwDlFy
Q2kwBA/HT4X9w2qSxpQ7WGPw2pkYILZ4Nxfqh9PWHd0Pk1d9KoLhbU5LEtGSy/y9
9jqKsUqBzp9905t7d2KmFDF9Nd7XvHrDZDPILlKcQYnBxg6c1ChH1NkIqdAW7lQ6
dAKAFZlMpVb/ArFBjhioljBIO+gLcWxYseHXbteOgbC1cw5xcBTHqH+7RotFH1VO
ya0DFeW2CyPj4mp7vORD+IOVQaG4H5j1vJXqA9OPBziZR+lHvD0gVJqZIquXIQlW
MBpX5CfV/3xITb6o0wA2OG2qlNM+VbKzg/cqh/kkusAqcfXIByh16K85k4jwPrBG
wsYWABgw1vLlrCJ7ug6P2rb6VmzTbMqe4gpqUROgCS36ARjs5eDBDYZsX6NaGSh6
twAUfzpwoGNuHwUpIYf5BjH1me+tnM0S8tAEtCFf9hy88nCg6v22cWQuAD6+6P6B
Skl/UYT4sxeeETFv7Vf70wLnBMA3/uymBM75FhPyD5Vvg9fxz7aAJbfB2ovUVZ/v
l3HCsCo8y7DtEXoiBmPCH28JWVhIZbmP3dYnU8c86SubhNWm0yjJIIwoghyFmCcO
Wjs0XkVUUa9fGrl6Mc6XQIGsS6UdQkFoIcO+dtIFPg5C5GWnPnF53ro0J4pGcyR0
zgt9ubCcFKNz5Cbcfw7fKJwswMt6zXtFxE/tVvOq2EPAPrmYYwPrnvbSNbuVL+as
OT5ukITR9MDsYR/19jFUsdRDjSvUQVwqH7PiKwTnZouuJUhYHfj3Bjhz6cWzadcd
pNdxqSgEeSzvaz390p1dOpN/0d1ItXlp3za6JZUarVkx8yH9UCFfpEEisPYgTASf
F2xIrWHgZY+87OjPluU+Gym12ldcs0dbySgsxhKZMyAUd0DB2Knnmug+cqVvN+xo
rJ2pD7J08zmQSRGyAUsbeUnuGb6fGNxaD5QpEN7nK4x3K1Q5N9QQ3RwL4Ik6jV0N
eO0LzXF/BZbOAvl/OXAse1f5c7FO21oUw6u6iI0xvTJAcnaH/0eE2N6Y9Lwt507K
HxhuN5j58/sOeb6kfkX563SoKSdYSrBqIaogDZFCtKpEBevsRM+QRdzAc//Fm67U
Zs2K/ADM8+IaQN7uhm8IAPtWEnJ5+9rM2PCF0NX+7qa9HtZxTd0cqbeL8Ayx4i/T
dHvN8k3kPuC+6He7+eZR6EQpN5GPt5SX3QGgKOQbbwBgF8mS/R0zaZpHvaqTY4Bi
RfsLbRBGoTvR8YjqaQW91tExe5FghH7k02slSGzEzgs/ZhqPMCLNC7uFcSKcx9jA
Bj+GmrYOMrUOYLQPT1iRtBFjLEUGPlvUGlaJS/JcvBN6DPW375tQHk7kbpVcudPh
6vVXftuDiYEJk1TIQLt3QdC9s6ieVuAds4KDjYaTZz4s5W2Lkwo5AZzwLeMRank1
96okoO1qRaDgagHsG8yPIwq+8/b/8dNl7E+wsbAWwLXLhYZGqDmHm/16pv/Ck59W
LXLoJfrOdKBoxTTZulIsTISZ14Bj87QWPW26kI6So9V5vN60rb2MWrd+HU46Qapi
JCsfCVsi715GUh4IkqAnec26TuXW2THcOp3p19SyubuJ33XqUR9H7BOZuBsIFeZV
8sihbgjJ/zb7fZ7AGT3VmAxEtgFi8u2NOBN/WqYb++khtXgnIbOhBx9PuhOBofrO
4M0R5s6F2SpbX2LEBJFN48wIlRmSMTsKdmZmA7f0IuxjYIcotBdRCGoXRlJJnZeH
7WriXQJsq0517GlrqgYMDx26xHJy/ao+zcDxsCtftzAQvENuGr1lzsCdIcGXs+FU
7C8qdmqSXgZgltFQpyR7+PMikXcdYdzkT3BjFh+VKJNiAeGXNnVXQH7L/V49zaij
BRYWWtHwEDz50vSzZz3fnrFl6Pk8tny4bKoLjB4vBjMlb4yte7LcK+vbfDdreISD
cDqfpzjAmIpv1GoQFKWGLQjagvwiAfOA8GUivEG9SQSAAImkV9qkr5qYzM7Jn2WU
icA8D0YfuILpGxTOQc1SgDMOiGboCB+f7cxPsjXHbVahNyxxAbDbTjbc6v7q1oiy
PESoLaBR0Bi0tdKivvbB63ok2Kq9XneFrQeCIyrhkXIvYDEwdcoCBpL1DEotbU+D
YjZTLr4UW92xi1M4d94zmG6pyJsfC4sHGflY5paml9dLiEy78rCPfrJkrSSUplf+
8CjfUoZsbq3haE0N4TbqV0I0W2Fm/a6U113CTRYxj9DeA3m/HFU3TLzk9Vg/vGxP
/xltsu/wd/GoyoD9OhWhW1Ck9dtQ0G64hQjeXVd/pzsDCMT8hrtKSlX1Q7vK96ml
OJ9Ju/CdhX2lJA8BrGVh4HS1fsuNFjr5KqZAY6MwFpjAPqvqD7WFE3Yflk5/7VtX
bsvBZoN2vp9hprXsgm8/KmSNnWxzQY1Nps4XjRJVYeTmND5EyQClGJyCYKg0QVDo
7L/2GAhnOrSLkAHOcYAlrNhZ85yBiLhjJcvWyT6DDcMpCusgictI2Qv2ZjMmz46v
62PzHm0/Z3yQMcJnpRO79OdodbY22Eg9xZGGhBp1Xbm/OXYLaEpGW9S7DqPvlD5v
O+VxENxJNwDELK9H2auGJAQdORwgF0VfvZxN6tGRyb7eI6aJj04YYMBkg5Nds+AR
sNEdGNzqKm8sWvINSoX+BCOyjElOSRW0glK+ala5Y7/mM3+KOWgMas2LZBcLZfBr
1/Z0DPIA2CkFtT1VsBKa+fSkEN0s+PRLRV/QWrcMbkSvIaKcswMwoyvI6OcddUEz
YgjAOZ3TdnRm1DMqZHIsPOj+3xQv6nETqSwhvLJT1wJwnJQVbxjZwoUmJKSsZDEB
2xL9OWlhFNY2qS7F77vv2ZUJYLYniiTGrC09AAQ4ti8zWnY1gqtaCp+1wynt/Abs
9gGcbEIaQGWhpVjPtlKjNm86jGP0IXPaAgaOViIuBH+0GeVOLuUMLvb0nL0NWMJa
-----END RSA PRIVATE KEY-----

View File

@ -1,31 +0,0 @@
-----BEGIN CERTIFICATE-----
MIIFWzCCA0WgAwIBAgIBAjALBgkqhkiG9w0BAQUwLTEMMAoGA1UEBhMDVVNBMRAw
DgYDVQQKEwdldGNkLWNhMQswCQYDVQQLEwJDQTAeFw0xNDAzMTMwMjA5MjJaFw0y
NDAzMTMwMjA5MjJaMEUxDDAKBgNVBAYTA1VTQTEQMA4GA1UEChMHZXRjZC1jYTEP
MA0GA1UECxMGc2VydmVyMRIwEAYDVQQDEwkxMjcuMC4wLjEwggIiMA0GCSqGSIb3
DQEBAQUAA4ICDwAwggIKAoICAQDI3EvqJrLWsnPbjAT8ENiMRyBINhhafubi5Nb+
glEzkbC2kv2zXkVkpkBubDRwyh3eomSbdwKYk3yz+IopT753teJueRpMPq9Ayr/+
PZl4Y1tG04KcjfOvOls6zPsDfHzluR8TE705If5wwZu3Bdwxzdtx9T0ROzIEgRt0
Axuce5qkg93IWNxOrIr+4LCxYfTpvpTXO20lz0IuQNm1Opo9PVoWn7PXdOmuCzSG
2hW1DcKqSyQP7IkplBJS0EhoovIsXavSkPKJssvQj73ZFIBVgKhXuHmPNdrypaQk
CtxsqbVdOOlojItqYTTDAiadwRQWkYgDOSQCGJiPqYVJx+rH4MlzxQ6n9x2qIcne
lfMr+VFDEc1YvHu1XLMg5b1ImD6ChutYW0RhFJ3CQVdQR2i4kJ8T1DSJYLISMODZ
ux1cZaUoSL/EkrC5/8POWZmP8nJXO6A4wrZDHF30/qWpo+T5PvsA6cABfX1jkcTx
PBXGK1qOZ8rToTxprJ2zc3zuZNxSgM32nzjcPUgn559Mgdl0HR4c4JeTZGsebWmx
MWmkz//BV4eUaGHqCpzRQHf3YIxysvDC2Xf4z2Alk8AlLRXp7/ksatdxAtyc+y8+
MWCc6N0YbI9zjv+ezCBqR+mu1P5Tb0HebPFz3dOdIpiC3kU8QyMEagw8u5xliZs4
AxwdNwIDAQABo3IwcDAdBgNVHSUEFjAUBggrBgEFBQcDAQYIKwYBBQUHAwIwHQYD
VR0OBBYEFD6UrVN8uolWz6et79jVeZetjd4XMB8GA1UdIwQYMBaAFJ1ZVb5LWPs5
S6RuN506RgFplJK2MA8GA1UdEQQIMAaHBH8AAAEwCwYJKoZIhvcNAQEFA4ICAQCo
sKn1Rjx0tIVWAZAZB4lCWvkQDp/txnb5zzQUlKhIW2o98IklASmOYYyZbE2PXlda
/n8TwKIzWgIoNh5AcgLWhtASrnZdGFXY88n5jGk6CVZ1+Dl+IX99h+r+YHQzf1jU
BjGrZHGv3pPjwhFGDS99lM/TEBk/eLI2Kx5laL+nWMTwa8M1OwSIh6ZxYPVlWUqb
rurk5l/YqW+UkYIXIQhe6LwtB7tBjr6nDIWBfHQ7uN8IdB8VIAF6lejr22VmERTW
j+zJ5eTzuQN1f0s930mEm8pW7KgGxlEqrUlSJtxlMFCv6ZHZk1Y4yEiOCBKlPNme
X3B+lhj//PH3gLNm3+ZRr5ena3k+wL9Dd3d3GDCIx0ERQyrGS/rJpqNPI+8ZQlG0
nrFlm7aP6UznESQnJoSFbydiD0EZ4hXSdmDdXQkTklRpeXfMcrYBGN7JrGZOZ2T2
WtXBMx2bgPeEH50KRrwUMFe122bchh0Fr+hGvNK2Q9/gRyQPiYHq6vSF4GzorzLb
aDuWA9JRH8/c0z8tMvJ7KjmmmIxd39WWGZqiBrGQR7utOJjpQl+HCsDIQM6yZ/Bu
RpwKj2yBz0OQg4tWbtqUuFkRMTkCR6vo3PadgO1VWokM7UFUXlScnYswcM5EwnzJ
/IsYJ2s1V706QVUzAGIbi3+wYi3enk7JfYoGIqa2oA==
-----END CERTIFICATE-----

View File

@ -1,51 +0,0 @@
-----BEGIN RSA PRIVATE KEY-----
MIIJKAIBAAKCAgEAyNxL6iay1rJz24wE/BDYjEcgSDYYWn7m4uTW/oJRM5GwtpL9
s15FZKZAbmw0cMod3qJkm3cCmJN8s/iKKU++d7XibnkaTD6vQMq//j2ZeGNbRtOC
nI3zrzpbOsz7A3x85bkfExO9OSH+cMGbtwXcMc3bcfU9ETsyBIEbdAMbnHuapIPd
yFjcTqyK/uCwsWH06b6U1zttJc9CLkDZtTqaPT1aFp+z13Tprgs0htoVtQ3Cqksk
D+yJKZQSUtBIaKLyLF2r0pDyibLL0I+92RSAVYCoV7h5jzXa8qWkJArcbKm1XTjp
aIyLamE0wwImncEUFpGIAzkkAhiYj6mFScfqx+DJc8UOp/cdqiHJ3pXzK/lRQxHN
WLx7tVyzIOW9SJg+gobrWFtEYRSdwkFXUEdouJCfE9Q0iWCyEjDg2bsdXGWlKEi/
xJKwuf/DzlmZj/JyVzugOMK2Qxxd9P6lqaPk+T77AOnAAX19Y5HE8TwVxitajmfK
06E8aayds3N87mTcUoDN9p843D1IJ+efTIHZdB0eHOCXk2RrHm1psTFppM//wVeH
lGhh6gqc0UB392CMcrLwwtl3+M9gJZPAJS0V6e/5LGrXcQLcnPsvPjFgnOjdGGyP
c47/nswgakfprtT+U29B3mzxc93TnSKYgt5FPEMjBGoMPLucZYmbOAMcHTcCAwEA
AQKCAgBS1vCESKOXgo/f61ae8v+skyUQQyc2I4Jr739wBiUhRKQCGIuDr4ylHyAR
qpTSM7mv+X/O0n2CmcljnEy3Dwl568zQTSf4bB3xde1LGPKzwR6DDnaexLjM+x9n
F+UqoewM/pV/U7PF3WxH6sGi8UrIS6OG02L1OVm+m9TLuwBnQF8eHLiaiXOLCwRk
bBzTe5f70zslrX+tiVY9J0fiw6GbQjNmg0UzxicePcbTGxy6yEsR2t2rp51GRahs
+TPz28hPXe6gcGFnQxNmF/JvllH7cY18aDvSQZ7kVkZlCwmv0ypWoUM6eESDgkW1
a6yrgVccm7bhxW5BYw2AqqSrMkV0oMcCUjh2rYvex7w6dM374Ok3DD/dXjTHLNV5
+0tHMxXUiCKwe7hVEg+iGD4E1jap5n5c4RzpEtAXsGEK5WUBksHi9qOBv+lubjZn
Kcfbos+BbnmUCU3MmU48EZwyFQIu9djkLXfJV2Cbbg9HmkrIOYgi4tFjoBKeQLE4
6GCucMWnNfMO7Kq/z7c+7sfWOAA55pu0Ojel8VH6US+Y/1mEuSUhQudrJn8GxAmc
4t+C2Ie1Q1bK3iJbd0NUqtlwd9xI9wQgCbaxfQceUmBBjuTUu3YFctZ7Jia7h18I
gZ3wsKfySDhW29XTFvnT3FUpc+AN9Pv4sB7uobm6qOBV8/AdKQKCAQEA1zwIuJki
bSgXxsD4cfKgQsyIk0eMj8bDOlf/A8AFursXliH3rRASoixXNgzWrMhaEIE2BeeT
InE13YCUjNCKoz8oZJqKYpjh3o/diZf1vCo6m/YUSR+4amynWE4FEAa58Og2WCJ3
Nx8/IMpmch2VZ+hSQuNr5uvpH84+eZADQ1GB6ypzqxb5HjIEeryLJecDQGe4ophd
JCo3loezq/K0XJQI8GTBe2GQPjXSmLMZKksyZoWEXAaC1Q+sdJWZvBpm3GfVQbXu
q7wyqTMknVIlEOy0sHxstsbayysSFFQ/fcgKjyQb8f4efOkyQg8mH5vQOZghbHJ+
7I8wVSSBt+bE2wKCAQEA7udRoo2NIoIpJH+2+SPqJJVq1gw/FHMM4oXNZp+AAjR1
hTWcIzIXleMyDATl5ZFzZIY1U2JMifS5u2R7fDZEu9vfZk4e6BJUJn+5/ahjYFU8
m8WV4rFWR6XN0SZxPb43Mn6OO7EoMqr8InRufiN4LwIqnPqDm2D9Fdijb9QFJ2UG
QLKNnIkLTcUfx1RYP4T48CHkeZdxV8Cp49SzSSV8PbhIVBx32bm/yO6nLHoro7Wl
YqXGW0wItf2BUA5a5eYNO0ezVkOkTp2aj/p9i+0rqbsYa480hzlnOzYI5F72Z8V2
iPltUAeQn53Vg1azySa1x8/0Xp5nVsgQSh18CH3p1QKCAQBxZv4pVPXgkXlFjTLZ
xr5Ns7pZ7x7OOiluuiJw9WGPazgYMDlxA8DtlXM11Tneu4lInOu73LGXOhLpa+/Y
6Z/CN2qu5wX2wRpwy1gsQNaGl7FdryAtDvt5h1n8ms7sDL83gQHxGee6MUpvmnSz
t4aawrtk5rJZbv7bdS1Rm2E8vNs47psXD/mdwTi++kxOYhNCgeO0N5cLkPrM4x71
f+ErzguPrWaL/XGkdXNKZULjF8+sWLjOS9fvLlzs6E2h4D9F7addAeCIt5XxtDKc
eUVyT2U8f7I/8zIgTccu0tzJBvcZSCs5K20g3zVNvPGXQd9KGS+zFfht51vN4HhA
TuR1AoIBAGuQBKZeexP1bJa9VeF4dRxBldeHrgMEBeIbgi5ZU+YqPltaltEV5Z6b
q1XUArpIsZ6p+mpvkKxwXgtsI1j6ihnW1g+Wzr2IOxEWYuQ9I3klB2PPIzvswj8B
/NfVKhk1gl6esmVXzxR4/Yp5x6HNUHhBznPdKtITaf+jCXr5B9UD3DvW6IF5Bnje
bv9tD0qSEQ71A4xnTiXHXfZxNsOROA4F4bLVGnUR97J9GRGic/GCgFMY9mT2p9lg
qQ8lV3G5EW4GS01kqR6oQQXgLxSIFSeXUFhlIq5bfwoeuwQvaVuxgTwMqVXmAgyL
oK1ApTPE1QWAsLLFORvOed8UxVqBbn0CggEBALfr/wheXCKLdzFzm03sO1i9qVz2
vnpxzexXW3V/TtM6Dff2ojgkDC+CVximtAiLA/Wj60hXnQxw53g5VVT5rESx0J3c
pq+azbi1eWzFeOrqJvKQhMfYc0nli7YuGnPkKzeepJJtWZHYkAjL4QZAn1jt0RqV
DQmlGPGiOuGP8uh59c23pbjgh4eSJnvhOT2BFKhKZpBdTBYeiQiZBqIyme8rNTFr
NmpBxtUr77tccVTrcWWhhViG36UNpetAP7b5QCHScIXZJXrEqyK5HaePqi5UMH8o
alSz6s2REG/xP7x54574TvRG/3cIamv1AfZAOjin7BwhlSLhPl2eeh4Cgas=
-----END RSA PRIVATE KEY-----

View File

@ -1,276 +0,0 @@
// Copyright 2015 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package functional
import (
"fmt"
"io"
"io/ioutil"
"log"
"net/http"
"net/url"
"os"
"os/exec"
"strconv"
"strings"
"time"
)
type Proc struct {
*exec.Cmd
Name string
DataDir string
URL string
PeerURL string
stderr io.ReadCloser
}
func NewProcWithDefaultFlags(path string) *Proc {
var args []string
dir, err := ioutil.TempDir(os.TempDir(), "etcd")
if err != nil {
fmt.Printf("unexpected TempDir error: %v", err)
os.Exit(1)
}
args = append(args, "--data-dir="+dir)
args = append(args, "--name=default")
p := &Proc{
Cmd: exec.Command(path, args...),
Name: "default",
DataDir: dir,
URL: "http://127.0.0.1:4001",
PeerURL: "http://127.0.0.1:7001",
}
// always expect to use start_desired_verson mode
p.Env = append(p.Env,
"ETCD_BINARY_DIR="+binDir,
)
return p
}
func NewProcWithV1Flags(path string) *Proc {
p := NewProcWithDefaultFlags(path)
p.SetV1PeerAddr("127.0.0.1:7001")
return p
}
func NewProcWithV2Flags(path string) *Proc {
p := NewProcWithDefaultFlags(path)
p.SetV2PeerURL("http://127.0.0.1:7001")
return p
}
func (p *Proc) SetV2PeerURL(url string) {
p.Args = append(p.Args,
"-listen-peer-urls="+url,
"-initial-advertise-peer-urls="+url,
"-initial-cluster",
p.Name+"="+url,
)
p.PeerURL = url
}
func (p *Proc) SetV1PeerAddr(addr string) {
p.Args = append(p.Args,
"-peer-addr="+addr,
)
p.PeerURL = "http://" + addr
}
func (p *Proc) SetV1Addr(addr string) {
p.Args = append(p.Args,
"-addr="+addr,
)
p.URL = "http://" + addr
}
func (p *Proc) SetV1Peers(peers []string) {
p.Args = append(p.Args,
"-peers="+strings.Join(peers, ","),
)
}
func (p *Proc) SetName(name string) {
p.Args = append(p.Args,
"-name="+name,
)
p.Name = name
}
func (p *Proc) SetDataDir(dataDir string) {
p.Args = append(p.Args,
"-data-dir="+dataDir,
)
p.DataDir = dataDir
}
func (p *Proc) SetSnapCount(cnt int) {
p.Args = append(p.Args,
"-snapshot-count="+strconv.Itoa(cnt),
)
}
func (p *Proc) SetDiscovery(url string) {
p.Args = append(p.Args,
"-discovery="+url,
)
}
func (p *Proc) SetPeerTLS(certFile, keyFile, caFile string) {
p.Args = append(p.Args,
"-peer-cert-file="+certFile,
"-peer-key-file="+keyFile,
"-peer-ca-file="+caFile,
)
u, err := url.Parse(p.PeerURL)
if err != nil {
log.Panicf("unexpected parse error: %v", err)
}
u.Scheme = "https"
p.PeerURL = u.String()
}
func (p *Proc) CleanUnsuppportedV1Flags() {
var args []string
for _, arg := range p.Args {
if !strings.HasPrefix(arg, "-peers=") {
args = append(args, arg)
}
}
p.Args = args
}
func (p *Proc) Start() error {
if err := p.Cmd.Start(); err != nil {
return err
}
for k := 0; k < 50; k++ {
_, err := http.Get(p.URL)
if err == nil {
return nil
}
time.Sleep(100 * time.Millisecond)
}
return fmt.Errorf("instance %s failed to be available after a long time", p.Name)
}
func (p *Proc) Stop() {
if err := p.Cmd.Process.Kill(); err != nil {
fmt.Printf("Process Kill error: %v", err)
return
}
p.Cmd.Wait()
}
func (p *Proc) Restart() error {
p.Stop()
return p.Start()
}
func (p *Proc) Terminate() {
p.Stop()
os.RemoveAll(p.DataDir)
}
type ProcGroup []*Proc
func NewProcInProcGroupWithV1Flags(path string, num int, idx int) *Proc {
pg := NewProcGroupWithV1Flags(path, num)
return pg[idx]
}
func NewProcGroupWithV1Flags(path string, num int) ProcGroup {
pg := make([]*Proc, num)
for i := 0; i < num; i++ {
pg[i] = NewProcWithDefaultFlags(path)
pg[i].SetName(fmt.Sprintf("etcd%d", i))
pg[i].SetV1PeerAddr(fmt.Sprintf("127.0.0.1:%d", 7001+i))
pg[i].SetV1Addr(fmt.Sprintf("127.0.0.1:%d", 4001+i))
if i > 0 {
pg[i].SetV1Peers([]string{"127.0.0.1:7001"})
}
}
return pg
}
func NewProcGroupViaDiscoveryWithV1Flags(path string, num int, url string) ProcGroup {
pg := make([]*Proc, num)
for i := range pg {
pg[i] = NewProcWithDefaultFlags(path)
pg[i].SetName(fmt.Sprintf("etcd%d", i))
pg[i].SetDiscovery(url)
pg[i].SetV1PeerAddr(fmt.Sprintf("127.0.0.1:%d", 7001+i))
pg[i].SetV1Addr(fmt.Sprintf("127.0.0.1:%d", 4001+i))
}
return pg
}
func (pg ProcGroup) SetPeerTLS(certFile, keyFile, caFile string) {
for i := range pg {
pg[i].SetPeerTLS(certFile, keyFile, caFile)
}
}
func (pg ProcGroup) InheritDataDir(opg ProcGroup) {
for i := range pg {
pg[i].SetDataDir(opg[i].DataDir)
}
}
func (pg ProcGroup) SetSnapCount(count int) {
for i := range pg {
pg[i].SetSnapCount(count)
}
}
func (pg ProcGroup) CleanUnsuppportedV1Flags() {
for _, p := range pg {
p.CleanUnsuppportedV1Flags()
}
}
func (pg ProcGroup) Start() error {
for _, p := range pg {
if err := p.Start(); err != nil {
return err
}
}
// leave time for instances to sync and write some entries into disk
// TODO: use more reliable method
time.Sleep(time.Second)
return nil
}
func (pg ProcGroup) Wait() error {
for _, p := range pg {
if err := p.Wait(); err != nil {
return err
}
}
return nil
}
func (pg ProcGroup) Stop() {
for _, p := range pg {
p.Stop()
}
}
func (pg ProcGroup) Terminate() {
for _, p := range pg {
p.Terminate()
}
}

View File

@ -1,415 +0,0 @@
package functional
import (
"bytes"
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"os"
"os/exec"
"path"
"path/filepath"
"testing"
)
var (
binDir = ".versions"
v1BinPath = path.Join(binDir, "1")
v2BinPath = path.Join(binDir, "2")
etcdctlBinPath string
)
func init() {
os.RemoveAll(binDir)
if err := os.Mkdir(binDir, 0700); err != nil {
fmt.Printf("unexpected Mkdir error: %v\n", err)
os.Exit(1)
}
if err := os.Symlink(absPathFromEnv("ETCD_V1_BIN"), v1BinPath); err != nil {
fmt.Printf("unexpected Symlink error: %v\n", err)
os.Exit(1)
}
if err := os.Symlink(absPathFromEnv("ETCD_V2_BIN"), v2BinPath); err != nil {
fmt.Printf("unexpected Symlink error: %v\n", err)
os.Exit(1)
}
etcdctlBinPath = os.Getenv("ETCDCTL_BIN")
mustExist(v1BinPath)
mustExist(v2BinPath)
mustExist(etcdctlBinPath)
}
func TestStartNewMember(t *testing.T) {
tests := []*Proc{
NewProcWithDefaultFlags(v2BinPath),
NewProcWithV1Flags(v2BinPath),
NewProcWithV2Flags(v2BinPath),
}
for i, tt := range tests {
if err := tt.Start(); err != nil {
t.Fatalf("#%d: Start error: %v", i, err)
}
defer tt.Terminate()
ver, err := checkInternalVersion(tt.URL)
if err != nil {
t.Fatalf("#%d: checkVersion error: %v", i, err)
}
if ver != "2" {
t.Errorf("#%d: internal version = %s, want %s", i, ver, "2")
}
}
}
func TestStartV2Member(t *testing.T) {
tests := []*Proc{
NewProcWithDefaultFlags(v2BinPath),
NewProcWithV1Flags(v2BinPath),
NewProcWithV2Flags(v2BinPath),
}
for i, tt := range tests {
// get v2 data dir
p := NewProcWithDefaultFlags(v2BinPath)
if err := p.Start(); err != nil {
t.Fatalf("#%d: Start error: %v", i, err)
}
p.Stop()
tt.SetDataDir(p.DataDir)
if err := tt.Start(); err != nil {
t.Fatalf("#%d: Start error: %v", i, err)
}
defer tt.Terminate()
ver, err := checkInternalVersion(tt.URL)
if err != nil {
t.Fatalf("#%d: checkVersion error: %v", i, err)
}
if ver != "2" {
t.Errorf("#%d: internal version = %s, want %s", i, ver, "2")
}
}
}
func TestStartV1Member(t *testing.T) {
tests := []*Proc{
NewProcWithDefaultFlags(v2BinPath),
NewProcWithV1Flags(v2BinPath),
NewProcWithV2Flags(v2BinPath),
}
for i, tt := range tests {
// get v1 data dir
p := NewProcWithDefaultFlags(v1BinPath)
if err := p.Start(); err != nil {
t.Fatalf("#%d: Start error: %v", i, err)
}
p.Stop()
tt.SetDataDir(p.DataDir)
if err := tt.Start(); err != nil {
t.Fatalf("#%d: Start error: %v", i, err)
}
defer tt.Terminate()
ver, err := checkInternalVersion(tt.URL)
if err != nil {
t.Fatalf("#%d: checkVersion error: %v", i, err)
}
if ver != "1" {
t.Errorf("#%d: internal version = %s, want %s", i, ver, "1")
}
}
}
func TestUpgradeV1Cluster(t *testing.T) {
// get v2-desired v1 data dir
pg := NewProcGroupWithV1Flags(v1BinPath, 3)
if err := pg.Start(); err != nil {
t.Fatalf("Start error: %v", err)
}
cmd := exec.Command(etcdctlBinPath, "upgrade", "--peer-url", pg[1].PeerURL)
if err := cmd.Start(); err != nil {
t.Fatalf("Start error: %v", err)
}
if err := cmd.Wait(); err != nil {
t.Fatalf("Wait error: %v", err)
}
t.Logf("wait until etcd exits...")
if err := pg.Wait(); err != nil {
t.Fatalf("Wait error: %v", err)
}
npg := NewProcGroupWithV1Flags(v2BinPath, 3)
npg.InheritDataDir(pg)
npg.CleanUnsuppportedV1Flags()
if err := npg.Start(); err != nil {
t.Fatalf("Start error: %v", err)
}
defer npg.Terminate()
for _, p := range npg {
ver, err := checkInternalVersion(p.URL)
if err != nil {
t.Fatalf("checkVersion error: %v", err)
}
if ver != "2" {
t.Errorf("internal version = %s, want %s", ver, "2")
}
}
}
func TestUpgradeV1SnapshotedCluster(t *testing.T) {
// get v2-desired v1 data dir
pg := NewProcGroupWithV1Flags(v1BinPath, 3)
pg.SetSnapCount(10)
if err := pg.Start(); err != nil {
t.Fatalf("Start error: %v", err)
}
cmd := exec.Command(etcdctlBinPath, "upgrade", "--peer-url", pg[1].PeerURL)
if err := cmd.Start(); err != nil {
t.Fatalf("Start error: %v", err)
}
if err := cmd.Wait(); err != nil {
t.Fatalf("Wait error: %v", err)
}
t.Logf("wait until etcd exits...")
if err := pg.Wait(); err != nil {
t.Fatalf("Wait error: %v", err)
}
for _, p := range pg {
// check it has taken snapshot
fis, err := ioutil.ReadDir(path.Join(p.DataDir, "snapshot"))
if err != nil {
t.Fatalf("unexpected ReadDir error: %v", err)
}
if len(fis) == 0 {
t.Fatalf("unexpected no-snapshot data dir")
}
}
npg := NewProcGroupWithV1Flags(v2BinPath, 3)
npg.InheritDataDir(pg)
npg.CleanUnsuppportedV1Flags()
if err := npg.Start(); err != nil {
t.Fatalf("Start error: %v", err)
}
defer npg.Terminate()
for _, p := range npg {
ver, err := checkInternalVersion(p.URL)
if err != nil {
t.Fatalf("checkVersion error: %v", err)
}
if ver != "2" {
t.Errorf("internal version = %s, want %s", ver, "2")
}
}
}
func TestJoinV1Cluster(t *testing.T) {
pg := NewProcGroupWithV1Flags(v1BinPath, 1)
if err := pg.Start(); err != nil {
t.Fatalf("Start error: %v", err)
}
pg.Stop()
npg := NewProcGroupWithV1Flags(v2BinPath, 3)
npg[0].SetDataDir(pg[0].DataDir)
if err := npg.Start(); err != nil {
t.Fatalf("Start error: %v", err)
}
defer npg.Terminate()
for _, p := range npg {
ver, err := checkInternalVersion(p.URL)
if err != nil {
t.Fatalf("checkVersion error: %v", err)
}
if ver != "1" {
t.Errorf("internal version = %s, want %s", ver, "1")
}
}
}
func TestJoinV1ClusterViaDiscovery(t *testing.T) {
dp := NewProcWithDefaultFlags(v1BinPath)
dp.SetV1Addr("127.0.0.1:5001")
dp.SetV1PeerAddr("127.0.0.1:8001")
if err := dp.Start(); err != nil {
t.Fatalf("Start error: %v", err)
}
defer dp.Terminate()
durl := "http://127.0.0.1:5001/v2/keys/cluster/"
pg := NewProcGroupViaDiscoveryWithV1Flags(v1BinPath, 1, durl)
if err := pg.Start(); err != nil {
t.Fatalf("Start error: %v", err)
}
pg.Stop()
npg := NewProcGroupViaDiscoveryWithV1Flags(v2BinPath, 3, durl)
npg[0].SetDataDir(pg[0].DataDir)
if err := npg.Start(); err != nil {
t.Fatalf("Start error: %v", err)
}
defer npg.Terminate()
for _, p := range npg {
ver, err := checkInternalVersion(p.URL)
if err != nil {
t.Fatalf("checkVersion error: %v", err)
}
if ver != "1" {
t.Errorf("internal version = %s, want %s", ver, "1")
}
}
}
func TestUpgradeV1Standby(t *testing.T) {
// get v1 standby data dir
pg := NewProcGroupWithV1Flags(v1BinPath, 3)
if err := pg.Start(); err != nil {
t.Fatalf("Start error: %v", err)
}
req, err := http.NewRequest("PUT", pg[0].PeerURL+"/v2/admin/config", bytes.NewBufferString(`{"activeSize":3,"removeDelay":1800,"syncInterval":5}`))
if err != nil {
t.Fatalf("NewRequest error: %v", err)
}
resp, err := http.DefaultClient.Do(req)
if err != nil {
t.Fatalf("http Do error: %v", err)
}
if resp.StatusCode != http.StatusOK {
t.Fatalf("status = %d, want %d", resp.StatusCode, http.StatusOK)
}
p := NewProcInProcGroupWithV1Flags(v2BinPath, 4, 3)
if err := p.Start(); err != nil {
t.Fatalf("Start error: %v", err)
}
fmt.Println("checking new member is in standby mode...")
mustExist(path.Join(p.DataDir, "standby_info"))
ver, err := checkInternalVersion(p.URL)
if err != nil {
t.Fatalf("checkVersion error: %v", err)
}
if ver != "1" {
t.Errorf("internal version = %s, want %s", ver, "1")
}
fmt.Println("upgrading the whole cluster...")
cmd := exec.Command(etcdctlBinPath, "upgrade", "--peer-url", pg[0].PeerURL)
if err := cmd.Start(); err != nil {
t.Fatalf("Start error: %v", err)
}
if err := cmd.Wait(); err != nil {
t.Fatalf("Wait error: %v", err)
}
fmt.Println("waiting until peer-mode etcd exits...")
if err := pg.Wait(); err != nil {
t.Fatalf("Wait error: %v", err)
}
fmt.Println("restarting the peer-mode etcd...")
npg := NewProcGroupWithV1Flags(v2BinPath, 3)
npg.InheritDataDir(pg)
npg.CleanUnsuppportedV1Flags()
if err := npg.Start(); err != nil {
t.Fatalf("Start error: %v", err)
}
defer npg.Terminate()
fmt.Println("waiting until standby-mode etcd exits...")
if err := p.Wait(); err != nil {
t.Fatalf("Wait error: %v", err)
}
fmt.Println("restarting the standby-mode etcd...")
np := NewProcInProcGroupWithV1Flags(v2BinPath, 4, 3)
np.SetDataDir(p.DataDir)
np.CleanUnsuppportedV1Flags()
if err := np.Start(); err != nil {
t.Fatalf("Start error: %v", err)
}
defer np.Terminate()
fmt.Println("checking the new member is in v2 proxy mode...")
ver, err = checkInternalVersion(np.URL)
if err != nil {
t.Fatalf("checkVersion error: %v", err)
}
if ver != "2" {
t.Errorf("internal version = %s, want %s", ver, "1")
}
if _, err := os.Stat(path.Join(np.DataDir, "proxy")); err != nil {
t.Errorf("stat proxy dir error = %v, want nil", err)
}
}
func TestUpgradeV1TLSCluster(t *testing.T) {
// get v2-desired v1 data dir
pg := NewProcGroupWithV1Flags(v1BinPath, 3)
pg.SetPeerTLS("./fixtures/server.crt", "./fixtures/server.key.insecure", "./fixtures/ca.crt")
if err := pg.Start(); err != nil {
t.Fatalf("Start error: %v", err)
}
cmd := exec.Command(etcdctlBinPath,
"upgrade", "--peer-url", pg[1].PeerURL,
"--peer-cert-file", "./fixtures/server.crt",
"--peer-key-file", "./fixtures/server.key.insecure",
"--peer-ca-file", "./fixtures/ca.crt",
)
if err := cmd.Start(); err != nil {
t.Fatalf("Start error: %v", err)
}
if err := cmd.Wait(); err != nil {
t.Fatalf("Wait error: %v", err)
}
t.Logf("wait until etcd exits...")
if err := pg.Wait(); err != nil {
t.Fatalf("Wait error: %v", err)
}
npg := NewProcGroupWithV1Flags(v2BinPath, 3)
npg.SetPeerTLS("./fixtures/server.crt", "./fixtures/server.key.insecure", "./fixtures/ca.crt")
npg.InheritDataDir(pg)
npg.CleanUnsuppportedV1Flags()
if err := npg.Start(); err != nil {
t.Fatalf("Start error: %v", err)
}
defer npg.Terminate()
for _, p := range npg {
ver, err := checkInternalVersion(p.URL)
if err != nil {
t.Fatalf("checkVersion error: %v", err)
}
if ver != "2" {
t.Errorf("internal version = %s, want %s", ver, "2")
}
}
}
func absPathFromEnv(name string) string {
path, err := filepath.Abs(os.Getenv(name))
if err != nil {
fmt.Printf("unexpected Abs error: %v\n", err)
}
return path
}
func mustExist(path string) {
if _, err := os.Stat(path); err != nil {
fmt.Printf("%v\n", err)
os.Exit(1)
}
}
func checkInternalVersion(url string) (string, error) {
resp, err := http.Get(url + "/version")
if err != nil {
return "", err
}
b, err := ioutil.ReadAll(resp.Body)
if err != nil {
return "", err
}
var m map[string]string
err = json.Unmarshal(b, &m)
return m["internalVersion"], err
}

View File

@ -63,6 +63,24 @@ type node struct {
Children map[string]*node // for directory
}
func deepCopyNode(n *node, parent *node) *node {
out := &node{
Path: n.Path,
CreatedIndex: n.CreatedIndex,
ModifiedIndex: n.ModifiedIndex,
Parent: parent,
ExpireTime: n.ExpireTime,
ACL: n.ACL,
Value: n.Value,
Children: make(map[string]*node),
}
for k, v := range n.Children {
out.Children[k] = deepCopyNode(v, out)
}
return out
}
func replacePathNames(n *node, s1, s2 string) {
n.Path = path.Clean(strings.Replace(n.Path, s1, s2, 1))
for _, c := range n.Children {
@ -87,9 +105,23 @@ func pullNodesFromEtcd(n *node) map[string]uint64 {
return out
}
func fixEtcd(n *node) {
n.Path = "/0"
machines := n.Children["machines"]
func fixEtcd(etcdref *node) *node {
n := &node{
Path: "/0",
CreatedIndex: etcdref.CreatedIndex,
ModifiedIndex: etcdref.ModifiedIndex,
ExpireTime: etcdref.ExpireTime,
ACL: etcdref.ACL,
Children: make(map[string]*node),
}
var machines *node
if machineOrig, ok := etcdref.Children["machines"]; ok {
machines = deepCopyNode(machineOrig, n)
}
if machines == nil {
return n
}
n.Children["members"] = &node{
Path: "/0/members",
CreatedIndex: machines.CreatedIndex,
@ -97,6 +129,7 @@ func fixEtcd(n *node) {
ExpireTime: machines.ExpireTime,
ACL: machines.ACL,
Children: make(map[string]*node),
Parent: n,
}
for name, c := range machines.Children {
q, err := url.ParseQuery(c.Value)
@ -121,29 +154,32 @@ func fixEtcd(n *node) {
ModifiedIndex: c.ModifiedIndex,
ExpireTime: c.ExpireTime,
ACL: c.ACL,
Children: map[string]*node{
"attributes": &node{
Path: path.Join("/0/members", m.ID.String(), "attributes"),
CreatedIndex: c.CreatedIndex,
ModifiedIndex: c.ModifiedIndex,
ExpireTime: c.ExpireTime,
ACL: c.ACL,
Value: string(attrBytes),
},
"raftAttributes": &node{
Path: path.Join("/0/members", m.ID.String(), "raftAttributes"),
CreatedIndex: c.CreatedIndex,
ModifiedIndex: c.ModifiedIndex,
ExpireTime: c.ExpireTime,
ACL: c.ACL,
Value: string(raftBytes),
},
},
Children: make(map[string]*node),
Parent: n.Children["members"],
}
attrs := &node{
Path: path.Join("/0/members", m.ID.String(), "attributes"),
CreatedIndex: c.CreatedIndex,
ModifiedIndex: c.ModifiedIndex,
ExpireTime: c.ExpireTime,
ACL: c.ACL,
Value: string(attrBytes),
Parent: newNode,
}
newNode.Children["attributes"] = attrs
raftAttrs := &node{
Path: path.Join("/0/members", m.ID.String(), "raftAttributes"),
CreatedIndex: c.CreatedIndex,
ModifiedIndex: c.ModifiedIndex,
ExpireTime: c.ExpireTime,
ACL: c.ACL,
Value: string(raftBytes),
Parent: newNode,
}
newNode.Children["raftAttributes"] = raftAttrs
n.Children["members"].Children[m.ID.String()] = newNode
}
delete(n.Children, "machines")
return n
}
func mangleRoot(n *node) *node {
@ -157,10 +193,10 @@ func mangleRoot(n *node) *node {
}
newRoot.Children["1"] = n
etcd := n.Children["_etcd"]
delete(n.Children, "_etcd")
replacePathNames(n, "/", "/1/")
fixEtcd(etcd)
newRoot.Children["0"] = etcd
newZero := fixEtcd(etcd)
newZero.Parent = newRoot
newRoot.Children["0"] = newZero
return newRoot
}

View File

@ -1,418 +0,0 @@
// Copyright 2015 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package starter
import (
"encoding/json"
"flag"
"fmt"
"io/ioutil"
"log"
"net/http"
"net/url"
"os"
"path"
"strings"
"syscall"
"github.com/coreos/etcd/client"
"github.com/coreos/etcd/etcdmain"
"github.com/coreos/etcd/migrate"
"github.com/coreos/etcd/pkg/fileutil"
"github.com/coreos/etcd/pkg/flags"
"github.com/coreos/etcd/pkg/osutil"
"github.com/coreos/etcd/pkg/types"
etcdversion "github.com/coreos/etcd/version"
"github.com/coreos/etcd/Godeps/_workspace/src/golang.org/x/net/context"
)
type version string
const (
internalV1 version = "1"
internalV2 version = "2"
internalV2Proxy version = "2.proxy"
internalUnknown version = "unknown"
v0_4 version = "v0.4"
v2_0 version = "v2.0"
v2_0Proxy version = "v2.0 proxy"
empty version = "empty"
unknown version = "unknown"
defaultInternalV1etcdBinaryDir = "/usr/libexec/etcd/internal_versions/"
)
var (
v2SpecialFlags = []string{
"initial-cluster",
"listen-peer-urls",
"listen-client-urls",
"proxy",
}
)
func StartDesiredVersion(args []string) {
fs, err := parseConfig(args)
if err != nil {
return
}
if fs.Lookup("version").Value.String() == "true" {
fmt.Println("etcd version", etcdversion.Version)
os.Exit(0)
}
ver := checkInternalVersion(fs)
log.Printf("starter: start etcd version %s", ver)
switch ver {
case internalV1:
startInternalV1()
case internalV2:
case internalV2Proxy:
if _, err := os.Stat(standbyInfo4(fs.Lookup("data-dir").Value.String())); err != nil {
log.Printf("starter: Detect standby_info file exists, and add --proxy=on flag to ensure it runs in v2.0 proxy mode.")
log.Printf("starter: Before removing v0.4 data, --proxy=on flag MUST be added.")
}
// append proxy flag to args to trigger proxy mode
os.Args = append(os.Args, "-proxy=on")
default:
log.Panicf("starter: unhandled start version")
}
}
func checkInternalVersion(fs *flag.FlagSet) version {
// If it uses 2.0 env var explicitly, start 2.0
for _, name := range v2SpecialFlags {
if fs.Lookup(name).Value.String() != "" {
return internalV2
}
}
dataDir := fs.Lookup("data-dir").Value.String()
if dataDir == "" {
log.Fatalf("starter: please set --data-dir or ETCD_DATA_DIR for etcd")
}
// check the data directory
ver, err := checkVersion(dataDir)
if err != nil {
log.Fatalf("starter: failed to detect etcd version in %v: %v", dataDir, err)
}
log.Printf("starter: detect etcd version %s in %s", ver, dataDir)
switch ver {
case v2_0:
return internalV2
case v2_0Proxy:
return internalV2Proxy
case v0_4:
standbyInfo, err := migrate.DecodeStandbyInfo4FromFile(standbyInfo4(dataDir))
if err != nil && !os.IsNotExist(err) {
log.Fatalf("starter: failed to decode standbyInfo in %v: %v", dataDir, err)
}
inStandbyMode := standbyInfo != nil && standbyInfo.Running
if inStandbyMode {
ver, err := checkInternalVersionByClientURLs(standbyInfo.ClientURLs(), clientTLSInfo(fs))
if err != nil {
log.Printf("starter: failed to check start version through peers: %v", err)
return internalV1
}
if ver == internalV2 {
osutil.Unsetenv("ETCD_DISCOVERY")
os.Args = append(os.Args, "-initial-cluster", standbyInfo.InitialCluster())
return internalV2Proxy
}
return ver
}
ver, err := checkInternalVersionByDataDir4(dataDir)
if err != nil {
log.Fatalf("starter: failed to check start version in %v: %v", dataDir, err)
}
return ver
case empty:
discovery := fs.Lookup("discovery").Value.String()
dpeers, err := getPeersFromDiscoveryURL(discovery)
if err != nil {
log.Printf("starter: failed to get peers from discovery %s: %v", discovery, err)
}
peerStr := fs.Lookup("peers").Value.String()
ppeers := getPeersFromPeersFlag(peerStr, peerTLSInfo(fs))
urls := getClientURLsByPeerURLs(append(dpeers, ppeers...), peerTLSInfo(fs))
ver, err := checkInternalVersionByClientURLs(urls, clientTLSInfo(fs))
if err != nil {
log.Printf("starter: failed to check start version through peers: %v", err)
return internalV2
}
return ver
}
// never reach here
log.Panicf("starter: unhandled etcd version in %v", dataDir)
return internalUnknown
}
func checkVersion(dataDir string) (version, error) {
names, err := fileutil.ReadDir(dataDir)
if err != nil {
if os.IsNotExist(err) {
err = nil
}
return empty, err
}
if len(names) == 0 {
return empty, nil
}
nameSet := types.NewUnsafeSet(names...)
if nameSet.ContainsAll([]string{"member"}) {
return v2_0, nil
}
if nameSet.ContainsAll([]string{"proxy"}) {
return v2_0Proxy, nil
}
if nameSet.ContainsAll([]string{"snapshot", "conf", "log"}) {
return v0_4, nil
}
if nameSet.ContainsAll([]string{"standby_info"}) {
return v0_4, nil
}
return unknown, fmt.Errorf("failed to check version")
}
func checkInternalVersionByDataDir4(dataDir string) (version, error) {
// check v0.4 snapshot
snap4, err := migrate.DecodeLatestSnapshot4FromDir(snapDir4(dataDir))
if err != nil {
return internalUnknown, err
}
if snap4 != nil {
st := &migrate.Store4{}
if err := json.Unmarshal(snap4.State, st); err != nil {
return internalUnknown, err
}
dir := st.Root.Children["_etcd"]
n, ok := dir.Children["next-internal-version"]
if ok && n.Value == "2" {
return internalV2, nil
}
}
// check v0.4 log
ents4, err := migrate.DecodeLog4FromFile(logFile4(dataDir))
if err != nil {
return internalUnknown, err
}
for _, e := range ents4 {
cmd, err := migrate.NewCommand4(e.GetCommandName(), e.GetCommand(), nil)
if err != nil {
return internalUnknown, err
}
setcmd, ok := cmd.(*migrate.SetCommand)
if !ok {
continue
}
if setcmd.Key == "/_etcd/next-internal-version" && setcmd.Value == "2" {
return internalV2, nil
}
}
return internalV1, nil
}
func getClientURLsByPeerURLs(peers []string, tls *TLSInfo) []string {
c, err := newDefaultClient(tls)
if err != nil {
log.Printf("starter: new client error: %v", err)
return nil
}
var urls []string
for _, u := range peers {
resp, err := c.Get(u + "/etcdURL")
if err != nil {
log.Printf("starter: failed to get /etcdURL from %s", u)
continue
}
b, err := ioutil.ReadAll(resp.Body)
if err != nil {
log.Printf("starter: failed to read body from %s", u)
continue
}
urls = append(urls, string(b))
}
return urls
}
func checkInternalVersionByClientURLs(urls []string, tls *TLSInfo) (version, error) {
c, err := newDefaultClient(tls)
if err != nil {
return internalUnknown, err
}
for _, u := range urls {
resp, err := c.Get(u + "/version")
if err != nil {
log.Printf("starter: failed to get /version from %s", u)
continue
}
b, err := ioutil.ReadAll(resp.Body)
if err != nil {
log.Printf("starter: failed to read body from %s", u)
continue
}
var m map[string]string
err = json.Unmarshal(b, &m)
if err != nil {
log.Printf("starter: failed to unmarshal body %s from %s", b, u)
continue
}
switch m["internalVersion"] {
case "1":
return internalV1, nil
case "2":
return internalV2, nil
default:
log.Printf("starter: unrecognized internal version %s from %s", m["internalVersion"], u)
}
}
return internalUnknown, fmt.Errorf("failed to get version from urls %v", urls)
}
func getPeersFromDiscoveryURL(discoverURL string) ([]string, error) {
if discoverURL == "" {
return nil, nil
}
u, err := url.Parse(discoverURL)
if err != nil {
return nil, err
}
token := u.Path
u.Path = ""
c, err := client.NewHTTPClient(&http.Transport{}, []string{u.String()})
if err != nil {
return nil, err
}
dc := client.NewDiscoveryKeysAPI(c)
ctx, cancel := context.WithTimeout(context.Background(), client.DefaultRequestTimeout)
resp, err := dc.Get(ctx, token)
cancel()
if err != nil {
return nil, err
}
peers := make([]string, 0)
// append non-config keys to peers
for _, n := range resp.Node.Nodes {
if g := path.Base(n.Key); g == "_config" || g == "_state" {
continue
}
peers = append(peers, n.Value)
}
return peers, nil
}
func getPeersFromPeersFlag(str string, tls *TLSInfo) []string {
peers := trimSplit(str, ",")
for i, p := range peers {
peers[i] = tls.Scheme() + "://" + p
}
return peers
}
func startInternalV1() {
p := os.Getenv("ETCD_BINARY_DIR")
if p == "" {
p = defaultInternalV1etcdBinaryDir
}
p = path.Join(p, "1")
err := syscall.Exec(p, os.Args, syscall.Environ())
if err != nil {
log.Fatalf("starter: failed to execute internal v1 etcd: %v", err)
}
}
func newDefaultClient(tls *TLSInfo) (*http.Client, error) {
tr := &http.Transport{}
if tls.Scheme() == "https" {
tlsConfig, err := tls.ClientConfig()
if err != nil {
return nil, err
}
tr.TLSClientConfig = tlsConfig
}
return &http.Client{Transport: tr}, nil
}
type value struct {
s string
}
func (v *value) String() string { return v.s }
func (v *value) Set(s string) error {
v.s = s
return nil
}
func (v *value) IsBoolFlag() bool { return true }
// parseConfig parses out the input config from cmdline arguments and
// environment variables.
func parseConfig(args []string) (*flag.FlagSet, error) {
fs := flag.NewFlagSet("full flagset", flag.ContinueOnError)
etcdmain.NewConfig().VisitAll(func(f *flag.Flag) {
fs.Var(&value{}, f.Name, "")
})
if err := fs.Parse(args); err != nil {
return nil, err
}
if err := flags.SetFlagsFromEnv(fs); err != nil {
return nil, err
}
return fs, nil
}
func clientTLSInfo(fs *flag.FlagSet) *TLSInfo {
return &TLSInfo{
CAFile: fs.Lookup("ca-file").Value.String(),
CertFile: fs.Lookup("cert-file").Value.String(),
KeyFile: fs.Lookup("key-file").Value.String(),
}
}
func peerTLSInfo(fs *flag.FlagSet) *TLSInfo {
return &TLSInfo{
CAFile: fs.Lookup("peer-ca-file").Value.String(),
CertFile: fs.Lookup("peer-cert-file").Value.String(),
KeyFile: fs.Lookup("peer-key-file").Value.String(),
}
}
func snapDir4(dataDir string) string {
return path.Join(dataDir, "snapshot")
}
func logFile4(dataDir string) string {
return path.Join(dataDir, "log")
}
func standbyInfo4(dataDir string) string {
return path.Join(dataDir, "standby_info")
}
func trimSplit(s, sep string) []string {
trimmed := strings.Split(s, sep)
for i := range trimmed {
trimmed[i] = strings.TrimSpace(trimmed[i])
}
return trimmed
}

View File

@ -1,120 +0,0 @@
// Copyright 2015 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package starter
import (
"crypto/tls"
"crypto/x509"
"encoding/pem"
"fmt"
"io/ioutil"
)
// TLSInfo holds the SSL certificates paths.
type TLSInfo struct {
CertFile string `json:"CertFile"`
KeyFile string `json:"KeyFile"`
CAFile string `json:"CAFile"`
}
func (info TLSInfo) Scheme() string {
if info.KeyFile != "" && info.CertFile != "" {
return "https"
} else {
return "http"
}
}
// Generates a tls.Config object for a server from the given files.
func (info TLSInfo) ServerConfig() (*tls.Config, error) {
// Both the key and cert must be present.
if info.KeyFile == "" || info.CertFile == "" {
return nil, fmt.Errorf("KeyFile and CertFile must both be present[key: %v, cert: %v]", info.KeyFile, info.CertFile)
}
var cfg tls.Config
tlsCert, err := tls.LoadX509KeyPair(info.CertFile, info.KeyFile)
if err != nil {
return nil, err
}
cfg.Certificates = []tls.Certificate{tlsCert}
if info.CAFile != "" {
cfg.ClientAuth = tls.RequireAndVerifyClientCert
cp, err := newCertPool(info.CAFile)
if err != nil {
return nil, err
}
cfg.RootCAs = cp
cfg.ClientCAs = cp
} else {
cfg.ClientAuth = tls.NoClientCert
}
return &cfg, nil
}
// Generates a tls.Config object for a client from the given files.
func (info TLSInfo) ClientConfig() (*tls.Config, error) {
var cfg tls.Config
if info.KeyFile == "" || info.CertFile == "" {
return &cfg, nil
}
tlsCert, err := tls.LoadX509KeyPair(info.CertFile, info.KeyFile)
if err != nil {
return nil, err
}
cfg.Certificates = []tls.Certificate{tlsCert}
if info.CAFile != "" {
cp, err := newCertPool(info.CAFile)
if err != nil {
return nil, err
}
cfg.RootCAs = cp
}
return &cfg, nil
}
// newCertPool creates x509 certPool with provided CA file
func newCertPool(CAFile string) (*x509.CertPool, error) {
certPool := x509.NewCertPool()
pemByte, err := ioutil.ReadFile(CAFile)
if err != nil {
return nil, err
}
for {
var block *pem.Block
block, pemByte = pem.Decode(pemByte)
if block == nil {
return certPool, nil
}
cert, err := x509.ParseCertificate(block.Bytes)
if err != nil {
return nil, err
}
certPool.AddCert(cert)
}
}

View File

@ -88,13 +88,13 @@ func SetFlagsFromEnv(fs *flag.FlagSet) error {
// SetBindAddrFromAddr sets the value of bindAddr flag from the value
// of addr flag. Both flags' Value must be of type IPAddressPort. If the
// bindAddr flag is set and the addr flag is unset, it will set bindAddr to
// 0.0.0.0:port of addr. Otherwise, it keeps the original values.
// [::]:port of addr. Otherwise, it keeps the original values.
func SetBindAddrFromAddr(fs *flag.FlagSet, bindAddrFlagName, addrFlagName string) {
if IsSet(fs, bindAddrFlagName) || !IsSet(fs, addrFlagName) {
return
}
addr := *fs.Lookup(addrFlagName).Value.(*IPAddressPort)
addr.IP = "0.0.0.0"
addr.IP = "::"
if err := fs.Set(bindAddrFlagName, addr.String()); err != nil {
log.Panicf("etcdmain: unexpected flags set error: %v", err)
}

View File

@ -94,7 +94,7 @@ func TestSetBindAddrFromAddr(t *testing.T) {
// addr flag set
{
args: []string{"-addr=192.0.3.17:4001"},
waddr: &IPAddressPort{IP: "0.0.0.0", Port: 4001},
waddr: &IPAddressPort{IP: "::", Port: 4001},
},
// bindAddr flag set
{
@ -106,6 +106,11 @@ func TestSetBindAddrFromAddr(t *testing.T) {
args: []string{"-bind-addr=127.0.0.1:4001", "-addr=192.0.3.17:4001"},
waddr: &IPAddressPort{IP: "127.0.0.1", Port: 4001},
},
// both addr flags set, IPv6
{
args: []string{"-bind-addr=[2001:db8::4:9]:4001", "-addr=[2001:db8::4:f0]:4001"},
waddr: &IPAddressPort{IP: "2001:db8::4:9", Port: 4001},
},
}
for i, tt := range tests {
fs := flag.NewFlagSet("test", flag.PanicOnError)

View File

@ -16,7 +16,6 @@ package flags
import (
"errors"
"fmt"
"net"
"strconv"
"strings"
@ -32,26 +31,26 @@ type IPAddressPort struct {
func (a *IPAddressPort) Set(arg string) error {
arg = strings.TrimSpace(arg)
parts := strings.SplitN(arg, ":", 2)
if len(parts) != 2 {
return errors.New("bad format in address specification")
host, portStr, err := net.SplitHostPort(arg)
if err != nil {
return err
}
if net.ParseIP(parts[0]) == nil {
if net.ParseIP(host) == nil {
return errors.New("bad IP in address specification")
}
port, err := strconv.Atoi(parts[1])
port, err := strconv.Atoi(portStr)
if err != nil {
return errors.New("bad port in address specification")
}
a.IP = parts[0]
a.IP = host
a.Port = port
return nil
}
func (a *IPAddressPort) String() string {
return fmt.Sprintf("%s:%d", a.IP, a.Port)
return net.JoinHostPort(a.IP, strconv.Itoa(a.Port))
}

View File

@ -22,6 +22,7 @@ func TestIPAddressPortSet(t *testing.T) {
pass := []string{
"1.2.3.4:8080",
"10.1.1.1:80",
"[2001:db8::1]:8080",
}
fail := []string{
@ -40,6 +41,8 @@ func TestIPAddressPortSet(t *testing.T) {
"234#$",
"file://foo/bar",
"http://hello",
"2001:db8::1",
"2001:db8::1:1",
}
for i, tt := range pass {
@ -58,14 +61,20 @@ func TestIPAddressPortSet(t *testing.T) {
}
func TestIPAddressPortString(t *testing.T) {
f := &IPAddressPort{}
if err := f.Set("127.0.0.1:4001"); err != nil {
t.Fatalf("unexpected error: %v", err)
addresses := []string{
"[2001:db8::1:1234]:4001",
"127.0.0.1:4001",
}
for i, tt := range addresses {
f := &IPAddressPort{}
if err := f.Set(tt); err != nil {
t.Errorf("#%d: unexpected error: %v", i, err)
}
want := "127.0.0.1:4001"
got := f.String()
if want != got {
t.Fatalf("IPAddressPort.String() value should be %q, got %q", want, got)
want := tt
got := f.String()
if want != got {
t.Errorf("#%d: IPAddressPort.String() value should be %q, got %q", i, want, got)
}
}
}

View File

@ -0,0 +1,81 @@
// Copyright 2015 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build !windows,!plan9
// InterruptHandler is a function that is called on receiving a
// SIGTERM or SIGINT signal.
package osutil
import (
"log"
"os"
"os/signal"
"sync"
"syscall"
)
type InterruptHandler func()
var (
interruptRegisterMu, interruptExitMu sync.Mutex
// interruptHandlers holds all registered InterruptHandlers in order
// they will be executed.
interruptHandlers = []InterruptHandler{}
)
// RegisterInterruptHandler registers a new InterruptHandler. Handlers registered
// after interrupt handing was initiated will not be executed.
func RegisterInterruptHandler(h InterruptHandler) {
interruptRegisterMu.Lock()
defer interruptRegisterMu.Unlock()
interruptHandlers = append(interruptHandlers, h)
}
// HandleInterrupts calls the handler functions on receiving a SIGINT or SIGTERM.
func HandleInterrupts() {
notifier := make(chan os.Signal, 1)
signal.Notify(notifier, syscall.SIGINT, syscall.SIGTERM)
go func() {
sig := <-notifier
interruptRegisterMu.Lock()
ihs := make([]InterruptHandler, len(interruptHandlers))
copy(ihs, interruptHandlers)
interruptRegisterMu.Unlock()
interruptExitMu.Lock()
log.Printf("received %v signal, shutting down...", sig)
for _, h := range ihs {
h()
}
signal.Stop(notifier)
pid := syscall.Getpid()
// exit directly if it is the "init" process, since the kernel will not help to kill pid 1.
if pid == 1 {
os.Exit(0)
}
syscall.Kill(pid, sig.(syscall.Signal))
}()
}
// Exit relays to os.Exit if no interrupt handlers are running, blocks otherwise.
func Exit(code int) {
interruptExitMu.Lock()
os.Exit(code)
}

View File

@ -0,0 +1,32 @@
// Copyright 2015 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build windows
package osutil
import "os"
type InterruptHandler func()
// RegisterInterruptHandler is a no-op on windows
func RegisterInterruptHandler(h InterruptHandler) {}
// HandleInterrupts is a no-op on windows
func HandleInterrupts() {}
// Exit calls os.Exit
func Exit(code int) {
os.Exit(code)
}

View File

@ -16,8 +16,11 @@ package osutil
import (
"os"
"os/signal"
"reflect"
"syscall"
"testing"
"time"
)
func TestUnsetenv(t *testing.T) {
@ -43,3 +46,43 @@ func TestUnsetenv(t *testing.T) {
}
}
}
func waitSig(t *testing.T, c <-chan os.Signal, sig os.Signal) {
select {
case s := <-c:
if s != sig {
t.Fatalf("signal was %v, want %v", s, sig)
}
case <-time.After(1 * time.Second):
t.Fatalf("timeout waiting for %v", sig)
}
}
func TestHandleInterrupts(t *testing.T) {
for _, sig := range []syscall.Signal{syscall.SIGINT, syscall.SIGTERM} {
n := 1
RegisterInterruptHandler(func() { n++ })
RegisterInterruptHandler(func() { n *= 2 })
c := make(chan os.Signal, 2)
signal.Notify(c, sig)
HandleInterrupts()
syscall.Kill(syscall.Getpid(), sig)
// we should receive the signal once from our own kill and
// a second time from HandleInterrupts
waitSig(t, c, sig)
waitSig(t, c, sig)
if n == 3 {
t.Fatalf("interrupt handlers were called in wrong order")
}
if n != 4 {
t.Fatalf("interrupt handlers were not called properly")
}
// reset interrupt handlers
interruptHandlers = interruptHandlers[:0]
interruptExitMu.Unlock()
}
}

View File

@ -31,7 +31,10 @@ func NewListener(addr string, scheme string, info TLSInfo) (net.Listener, error)
return nil, err
}
if !info.Empty() && scheme == "https" {
if scheme == "https" {
if info.Empty() {
return nil, fmt.Errorf("cannot listen on TLS for %s: KeyFile and CertFile are not presented", scheme+"://"+addr)
}
cfg, err := info.ServerConfig()
if err != nil {
return nil, err

View File

@ -70,6 +70,13 @@ func TestNewListenerTLSInfo(t *testing.T) {
}
}
func TestNewListenerTLSEmptyInfo(t *testing.T) {
_, err := NewListener("127.0.0.1:0", "https", TLSInfo{})
if err == nil {
t.Errorf("err = nil, want not presented error")
}
}
func TestNewListenerTLSInfoNonexist(t *testing.T) {
tlsInfo := TLSInfo{CertFile: "@badname", KeyFile: "@badname"}
_, err := NewListener("127.0.0.1:0", "https", tlsInfo)

View File

@ -28,6 +28,9 @@ func NewTimeoutTransport(info TLSInfo, rdtimeoutd, wtimeoutd time.Duration) (*ht
if err != nil {
return nil, err
}
// the timeouted connection will tiemout soon after it is idle.
// it should not be put back to http transport as an idle connection for future usage.
tr.MaxIdleConnsPerHost = -1
tr.Dial = (&rwTimeoutDialer{
Dialer: net.Dialer{
Timeout: 30 * time.Second,

View File

@ -15,6 +15,8 @@
package transport
import (
"bytes"
"io/ioutil"
"net/http"
"net/http/httptest"
"testing"
@ -28,7 +30,12 @@ func TestNewTimeoutTransport(t *testing.T) {
if err != nil {
t.Fatalf("unexpected NewTimeoutTransport error: %v", err)
}
srv := httptest.NewServer(http.NotFoundHandler())
remoteAddr := func(w http.ResponseWriter, r *http.Request) {
w.Write([]byte(r.RemoteAddr))
}
srv := httptest.NewServer(http.HandlerFunc(remoteAddr))
defer srv.Close()
conn, err := tr.Dial("tcp", srv.Listener.Addr().String())
if err != nil {
@ -46,4 +53,33 @@ func TestNewTimeoutTransport(t *testing.T) {
if tconn.wtimeoutd != time.Hour {
t.Errorf("write timeout = %s, want %s", tconn.wtimeoutd, time.Hour)
}
// ensure not reuse timeout connection
req, err := http.NewRequest("GET", srv.URL, nil)
if err != nil {
t.Fatalf("unexpected err %v", err)
}
resp, err := tr.RoundTrip(req)
if err != nil {
t.Fatalf("unexpected err %v", err)
}
addr0, err := ioutil.ReadAll(resp.Body)
resp.Body.Close()
if err != nil {
t.Fatalf("unexpected err %v", err)
}
resp, err = tr.RoundTrip(req)
if err != nil {
t.Fatalf("unexpected err %v", err)
}
addr1, err := ioutil.ReadAll(resp.Body)
resp.Body.Close()
if err != nil {
t.Fatalf("unexpected err %v", err)
}
if bytes.Equal(addr0, addr1) {
t.Errorf("addr0 = %s addr1= %s, want not equal", string(addr0), string(addr1))
}
}

62
pkg/wait/wait_time.go Normal file
View File

@ -0,0 +1,62 @@
/*
Copyright 2015 CoreOS, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package wait
import (
"sync"
"time"
)
type WaitTime interface {
// Wait returns a chan that waits on the given deadline.
// The chan will be triggered when Trigger is called with a
// deadline that is later than the one it is waiting for.
// The given deadline MUST be unique. The deadline should be
// retrived by calling time.Now() in most cases.
Wait(deadline time.Time) <-chan struct{}
// Trigger triggers all the waiting chans with an earlier deadline.
Trigger(deadline time.Time)
}
type timeList struct {
l sync.Mutex
m map[int64]chan struct{}
}
func NewTimeList() *timeList {
return &timeList{m: make(map[int64]chan struct{})}
}
func (tl *timeList) Wait(deadline time.Time) <-chan struct{} {
tl.l.Lock()
defer tl.l.Unlock()
ch := make(chan struct{}, 1)
// The given deadline SHOULD be unique.
tl.m[deadline.UnixNano()] = ch
return ch
}
func (tl *timeList) Trigger(deadline time.Time) {
tl.l.Lock()
defer tl.l.Unlock()
for t, ch := range tl.m {
if t < deadline.UnixNano() {
delete(tl.m, t)
close(ch)
}
}
}

View File

@ -0,0 +1,85 @@
/*
Copyright 2015 CoreOS, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package wait
import (
"testing"
"time"
)
func TestWaitTime(t *testing.T) {
wt := NewTimeList()
ch1 := wt.Wait(time.Now())
t1 := time.Now()
wt.Trigger(t1)
select {
case <-ch1:
case <-time.After(10 * time.Millisecond):
t.Fatalf("cannot receive from ch as expected")
}
ch2 := wt.Wait(time.Now())
t2 := time.Now()
wt.Trigger(t1)
select {
case <-ch2:
t.Fatalf("unexpected to receive from ch")
case <-time.After(10 * time.Millisecond):
}
wt.Trigger(t2)
select {
case <-ch2:
case <-time.After(10 * time.Millisecond):
t.Fatalf("cannot receive from ch as expected")
}
}
func TestWaitTestStress(t *testing.T) {
chs := make([]<-chan struct{}, 0)
wt := NewTimeList()
for i := 0; i < 10000; i++ {
chs = append(chs, wt.Wait(time.Now()))
}
wt.Trigger(time.Now())
for _, ch := range chs {
select {
case <-ch:
case <-time.After(10 * time.Millisecond):
t.Fatalf("cannot receive from ch as expected")
}
}
}
func BenchmarkWaitTime(b *testing.B) {
t := time.Now()
wt := NewTimeList()
for i := 0; i < b.N; i++ {
wt.Wait(t)
}
}
func BenchmarkTriggerAnd10KWaitTime(b *testing.B) {
for i := 0; i < b.N; i++ {
t := time.Now()
wt := NewTimeList()
for j := 0; j < 10000; j++ {
wt.Wait(t)
}
wt.Trigger(time.Now())
}
}

View File

@ -16,6 +16,7 @@ package proxy
import (
"log"
"math/rand"
"net/url"
"sync"
"time"
@ -65,6 +66,13 @@ func (d *director) refresh() {
}
endpoints = append(endpoints, newEndpoint(*uu))
}
// shuffle array to avoid connections being "stuck" to a single endpoint
for i := range endpoints {
j := rand.Intn(i + 1)
endpoints[i], endpoints[j] = endpoints[j], endpoints[i]
}
d.ep = endpoints
}

View File

@ -18,7 +18,7 @@ Package raft provides an implementation of the raft consensus algorithm.
The primary object in raft is a Node. You either start a Node from scratch
using raft.StartNode or start a Node from some initial state using raft.RestartNode.
storage := raft.NewMemoryStorage()
n := raft.StartNode(0x01, []int64{0x02, 0x03}, 3, 1, storage)
n := raft.StartNode(0x01, []raft.Peer{{ID: 0x02}, {ID: 0x03}}, 3, 1, storage)
Now that you are holding onto a Node you have a few responsibilities:

View File

@ -296,15 +296,15 @@ func TestCompactionSideEffects(t *testing.T) {
t.Errorf("lastIndex = %d, want %d", raftLog.lastIndex(), lastIndex)
}
for i := offset; i <= raftLog.lastIndex(); i++ {
if raftLog.term(i) != i {
t.Errorf("term(%d) = %d, want %d", i, raftLog.term(i), i)
for j := offset; j <= raftLog.lastIndex(); j++ {
if raftLog.term(j) != j {
t.Errorf("term(%d) = %d, want %d", j, raftLog.term(j), j)
}
}
for i := offset; i <= raftLog.lastIndex(); i++ {
if !raftLog.matchTerm(i, i) {
t.Errorf("matchTerm(%d) = false, want true", i)
for j := offset; j <= raftLog.lastIndex(); j++ {
if !raftLog.matchTerm(j, j) {
t.Errorf("matchTerm(%d) = false, want true", j)
}
}
@ -354,9 +354,9 @@ func TestNextEnts(t *testing.T) {
raftLog.maybeCommit(5, 1)
raftLog.appliedTo(tt.applied)
ents := raftLog.nextEnts()
if !reflect.DeepEqual(ents, tt.wents) {
t.Errorf("#%d: ents = %+v, want %+v", i, ents, tt.wents)
nents := raftLog.nextEnts()
if !reflect.DeepEqual(nents, tt.wents) {
t.Errorf("#%d: nents = %+v, want %+v", i, nents, tt.wents)
}
}
}
@ -649,10 +649,10 @@ func TestTerm(t *testing.T) {
{offset + num, 0},
}
for i, tt := range tests {
for j, tt := range tests {
term := l.term(tt.index)
if !reflect.DeepEqual(term, tt.w) {
t.Errorf("#%d: at = %d, want %d", i, term, tt.w)
t.Errorf("#%d: at = %d, want %d", j, term, tt.w)
}
}
}
@ -712,18 +712,18 @@ func TestSlice(t *testing.T) {
{offset + num, offset + num + 1, nil, true},
}
for i, tt := range tests {
for j, tt := range tests {
func() {
defer func() {
if r := recover(); r != nil {
if !tt.wpanic {
t.Errorf("%d: panic = %v, want %v: %v", i, true, false, r)
t.Errorf("%d: panic = %v, want %v: %v", j, true, false, r)
}
}
}()
g := l.slice(tt.from, tt.to)
if !reflect.DeepEqual(g, tt.w) {
t.Errorf("#%d: from %d to %d = %v, want %v", i, tt.from, tt.to, g, tt.w)
t.Errorf("#%d: from %d to %d = %v, want %v", j, tt.from, tt.to, g, tt.w)
}
}()
}

View File

@ -304,7 +304,7 @@ func TestNodeStart(t *testing.T) {
wants := []Ready{
{
SoftState: &SoftState{Lead: 1, RaftState: StateLeader},
HardState: raftpb.HardState{Term: 2, Commit: 2},
HardState: raftpb.HardState{Term: 2, Commit: 2, Vote: 1},
Entries: []raftpb.Entry{
{Type: raftpb.EntryConfChange, Term: 1, Index: 1, Data: ccdata},
{Term: 2, Index: 2},
@ -315,7 +315,7 @@ func TestNodeStart(t *testing.T) {
},
},
{
HardState: raftpb.HardState{Term: 2, Commit: 3},
HardState: raftpb.HardState{Term: 2, Commit: 3, Vote: 1},
Entries: []raftpb.Entry{{Term: 2, Index: 3, Data: []byte("foo")}},
CommittedEntries: []raftpb.Entry{{Term: 2, Index: 3, Data: []byte("foo")}},
},
@ -332,10 +332,10 @@ func TestNodeStart(t *testing.T) {
}
n.Propose(ctx, []byte("foo"))
if g := <-n.Ready(); !reflect.DeepEqual(g, wants[1]) {
t.Errorf("#%d: g = %+v,\n w %+v", 2, g, wants[1])
if g2 := <-n.Ready(); !reflect.DeepEqual(g2, wants[1]) {
t.Errorf("#%d: g = %+v,\n w %+v", 2, g2, wants[1])
} else {
storage.Append(g.Entries)
storage.Append(g2.Entries)
n.Advance()
}

View File

@ -306,9 +306,11 @@ func (r *raft) maybeCommit() bool {
}
func (r *raft) reset(term uint64) {
r.Term = term
if r.Term != term {
r.Term = term
r.Vote = None
}
r.lead = None
r.Vote = None
r.elapsed = 0
r.votes = make(map[uint64]bool)
for i := range r.prs {

View File

@ -774,7 +774,7 @@ func TestVoteRequest(t *testing.T) {
{[]pb.Entry{{Term: 1, Index: 1}}, 2},
{[]pb.Entry{{Term: 1, Index: 1}, {Term: 2, Index: 2}}, 3},
}
for i, tt := range tests {
for j, tt := range tests {
r := newRaft(1, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage(), 0)
r.Step(pb.Message{
From: 2, To: 1, Type: pb.MsgApp, Term: tt.wterm - 1, LogTerm: 0, Index: 0, Entries: tt.ents,
@ -788,7 +788,7 @@ func TestVoteRequest(t *testing.T) {
msgs := r.readMessages()
sort.Sort(messageSlice(msgs))
if len(msgs) != 2 {
t.Fatalf("#%d: len(msg) = %d, want %d", i, len(msgs), 2)
t.Fatalf("#%d: len(msg) = %d, want %d", j, len(msgs), 2)
}
for i, m := range msgs {
if m.Type != pb.MsgVote {

View File

@ -510,7 +510,7 @@ func TestOldMessages(t *testing.T) {
// commit a new entry
tt.send(pb.Message{From: 1, To: 1, Type: pb.MsgProp, Entries: []pb.Entry{{Data: []byte("somedata")}}})
l := &raftLog{
ilog := &raftLog{
storage: &MemoryStorage{
ents: []pb.Entry{
{}, {Data: nil, Term: 1, Index: 1},
@ -521,7 +521,7 @@ func TestOldMessages(t *testing.T) {
unstable: unstable{offset: 5},
committed: 4,
}
base := ltoa(l)
base := ltoa(ilog)
for i, p := range tt.peers {
if sm, ok := p.(*raft); ok {
l := ltoa(sm.raftLog)
@ -548,7 +548,7 @@ func TestProposal(t *testing.T) {
{newNetwork(nil, nopStepper, nopStepper, nil, nil), true},
}
for i, tt := range tests {
for j, tt := range tests {
send := func(m pb.Message) {
defer func() {
// only recover is we expect it to panic so
@ -556,7 +556,7 @@ func TestProposal(t *testing.T) {
if !tt.success {
e := recover()
if e != nil {
t.Logf("#%d: err: %s", i, e)
t.Logf("#%d: err: %s", j, e)
}
}
}()
@ -591,7 +591,7 @@ func TestProposal(t *testing.T) {
}
sm := tt.network.peers[1].(*raft)
if g := sm.Term; g != 1 {
t.Errorf("#%d: term = %d, want %d", i, g, 1)
t.Errorf("#%d: term = %d, want %d", j, g, 1)
}
}
}
@ -603,7 +603,7 @@ func TestProposalByProxy(t *testing.T) {
newNetwork(nil, nil, nopStepper),
}
for i, tt := range tests {
for j, tt := range tests {
// promote 0 the leader
tt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup})
@ -629,7 +629,7 @@ func TestProposalByProxy(t *testing.T) {
}
sm := tt.peers[1].(*raft)
if g := sm.Term; g != 1 {
t.Errorf("#%d: term = %d, want %d", i, g, 1)
t.Errorf("#%d: term = %d, want %d", j, g, 1)
}
}
}
@ -1601,8 +1601,8 @@ func newNetwork(peers ...Interface) *network {
npeers := make(map[uint64]Interface, size)
nstorage := make(map[uint64]*MemoryStorage, size)
for i, p := range peers {
id := peerAddrs[i]
for j, p := range peers {
id := peerAddrs[j]
switch v := p.(type) {
case nil:
nstorage[id] = NewMemoryStorage()

View File

@ -84,10 +84,10 @@ func DescribeMessage(m pb.Message, f EntryFormatter) string {
// Entry for debugging.
func DescribeEntry(e pb.Entry, f EntryFormatter) string {
var formatted string
if f == nil {
formatted = fmt.Sprintf("%q", e.Data)
} else {
if e.Type == pb.EntryNormal && f != nil {
formatted = f(e.Data)
} else {
formatted = fmt.Sprintf("%q", e.Data)
}
return fmt.Sprintf("%d/%d %s %s", e.Term, e.Index, e.Type, formatted)
}

View File

@ -54,7 +54,9 @@ func (er *entryReader) readEntries() ([]raftpb.Entry, error) {
}
er.ents.Add()
}
er.lastIndex.Set(int64(ents[l-1].Index))
if l > 0 {
er.lastIndex.Set(int64(ents[l-1].Index))
}
return ents, nil
}

View File

@ -295,6 +295,9 @@ func (s *streamReader) handle(r io.Reader) {
}
return
}
if len(ents) == 0 {
continue
}
// The commit index field in appendEntry message is not recovered.
// The follower updates its commit index through heartbeat.
msg := raftpb.Message{

View File

@ -37,6 +37,7 @@ type Transporter interface {
Send(m []raftpb.Message)
AddPeer(id types.ID, urls []string)
RemovePeer(id types.ID)
RemoveAllPeers()
UpdatePeer(id types.ID, urls []string)
Stop()
}
@ -132,7 +133,24 @@ func (t *transport) AddPeer(id types.ID, urls []string) {
func (t *transport) RemovePeer(id types.ID) {
t.mu.Lock()
defer t.mu.Unlock()
t.peers[id].Stop()
t.removePeer(id)
}
func (t *transport) RemoveAllPeers() {
t.mu.Lock()
defer t.mu.Unlock()
for id, _ := range t.peers {
t.removePeer(id)
}
}
// the caller of this function must have the peers mutex.
func (t *transport) removePeer(id types.ID) {
if peer, ok := t.peers[id]; ok {
peer.Stop()
} else {
log.Panicf("rafthttp: unexpected removal of unknown peer '%d'", id)
}
delete(t.peers, id)
delete(t.leaderStats.Followers, id.String())
}

View File

@ -82,29 +82,32 @@ func (s *Snapshotter) Load() (*raftpb.Snapshot, error) {
break
}
}
return snap, err
if err != nil {
return nil, ErrNoSnapshot
}
return snap, nil
}
func loadSnap(dir, name string) (*raftpb.Snapshot, error) {
var err error
var b []byte
fpath := path.Join(dir, name)
defer func() {
if err != nil {
renameBroken(fpath)
}
}()
b, err = ioutil.ReadFile(fpath)
snap, err := Read(fpath)
if err != nil {
log.Printf("snap: snapshotter cannot read file %v: %v", name, err)
renameBroken(fpath)
}
return snap, err
}
// Read reads the snapshot named by snapname and returns the snapshot.
func Read(snapname string) (*raftpb.Snapshot, error) {
b, err := ioutil.ReadFile(snapname)
if err != nil {
log.Printf("snap: snapshotter cannot read file %v: %v", snapname, err)
return nil, err
}
var serializedSnap snappb.Snapshot
if err = serializedSnap.Unmarshal(b); err != nil {
log.Printf("snap: corrupted snapshot file %v: %v", name, err)
log.Printf("snap: corrupted snapshot file %v: %v", snapname, err)
return nil, err
}
@ -115,13 +118,13 @@ func loadSnap(dir, name string) (*raftpb.Snapshot, error) {
crc := crc32.Update(0, crcTable, serializedSnap.Data)
if crc != serializedSnap.Crc {
log.Printf("snap: corrupted snapshot file %v: crc mismatch", name)
log.Printf("snap: corrupted snapshot file %v: crc mismatch", snapname)
return nil, ErrCRCMismatch
}
var snap raftpb.Snapshot
if err = snap.Unmarshal(serializedSnap.Data); err != nil {
log.Printf("snap: corrupted snapshot file %v: %v", name, err)
log.Printf("snap: corrupted snapshot file %v: %v", snapname, err)
return nil, err
}
return &snap, nil

View File

@ -76,7 +76,7 @@ func TestBadCRC(t *testing.T) {
// fake a crc mismatch
crcTable = crc32.MakeTable(crc32.Koopman)
_, err = ss.Load()
_, err = Read(path.Join(dir, fmt.Sprintf("%016x-%016x.snap", 1, 1)))
if err == nil || err != ErrCRCMismatch {
t.Errorf("err = %v, want %v", err, ErrCRCMismatch)
}
@ -182,7 +182,7 @@ func TestNoSnapshot(t *testing.T) {
defer os.RemoveAll(dir)
ss := New(dir)
_, err = ss.Load()
if err == nil || err != ErrNoSnapshot {
if err != ErrNoSnapshot {
t.Errorf("err = %v, want %v", err, ErrNoSnapshot)
}
}
@ -195,14 +195,35 @@ func TestEmptySnapshot(t *testing.T) {
}
defer os.RemoveAll(dir)
err = ioutil.WriteFile(path.Join(dir, "1.snap"), []byte("shit"), 0x700)
err = ioutil.WriteFile(path.Join(dir, "1.snap"), []byte(""), 0x700)
if err != nil {
t.Fatal(err)
}
_, err = Read(path.Join(dir, "1.snap"))
if err != ErrEmptySnapshot {
t.Errorf("err = %v, want %v", err, ErrEmptySnapshot)
}
}
// TestAllSnapshotBroken ensures snapshotter returens
// ErrNoSnapshot if all the snapshots are broken.
func TestAllSnapshotBroken(t *testing.T) {
dir := path.Join(os.TempDir(), "snapshot")
err := os.Mkdir(dir, 0700)
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(dir)
err = ioutil.WriteFile(path.Join(dir, "1.snap"), []byte("bad"), 0x700)
if err != nil {
t.Fatal(err)
}
ss := New(dir)
_, err = ss.Load()
if err == nil || err != ErrEmptySnapshot {
t.Errorf("err = %v, want %v", err, ErrEmptySnapshot)
if err != ErrNoSnapshot {
t.Errorf("err = %v, want %v", err, ErrNoSnapshot)
}
}

View File

@ -88,8 +88,8 @@ func TestFullEventQueue(t *testing.T) {
// Add
for i := 0; i < 1000; i++ {
e := newEvent(Create, "/foo", uint64(i), uint64(i))
eh.addEvent(e)
ce := newEvent(Create, "/foo", uint64(i), uint64(i))
eh.addEvent(ce)
e, err := eh.scan("/foo", true, uint64(i-1))
if i > 0 {
if e == nil || err != nil {

View File

@ -51,10 +51,10 @@ func TestHeapUpdate(t *testing.T) {
// add from older expire time to earlier expire time
// the path is equal to ttl from now
for i, n := range kvs {
for i := range kvs {
path := fmt.Sprintf("%v", 10-i)
m := time.Duration(10 - i)
n = newKV(nil, path, path, 0, nil, "", time.Now().Add(time.Second*m))
n := newKV(nil, path, path, 0, nil, "", time.Now().Add(time.Second*m))
kvs[i] = n
h.push(n)
}

View File

@ -78,10 +78,24 @@ func newStats() *Stats {
}
func (s *Stats) clone() *Stats {
return &Stats{s.GetSuccess, s.GetFail, s.SetSuccess, s.SetFail,
s.DeleteSuccess, s.DeleteFail, s.UpdateSuccess, s.UpdateFail, s.CreateSuccess,
s.CreateFail, s.CompareAndSwapSuccess, s.CompareAndSwapFail,
s.CompareAndDeleteSuccess, s.CompareAndDeleteFail, s.Watchers, s.ExpireCount}
return &Stats{
GetSuccess: s.GetSuccess,
GetFail: s.GetFail,
SetSuccess: s.SetSuccess,
SetFail: s.SetFail,
DeleteSuccess: s.DeleteSuccess,
DeleteFail: s.DeleteFail,
UpdateSuccess: s.UpdateSuccess,
UpdateFail: s.UpdateFail,
CreateSuccess: s.CreateSuccess,
CreateFail: s.CreateFail,
CompareAndSwapSuccess: s.CompareAndSwapSuccess,
CompareAndSwapFail: s.CompareAndSwapFail,
CompareAndDeleteSuccess: s.CompareAndDeleteSuccess,
CompareAndDeleteFail: s.CompareAndDeleteFail,
ExpireCount: s.ExpireCount,
Watchers: s.Watchers,
}
}
func (s *Stats) toJson() []byte {

View File

@ -84,7 +84,6 @@ func (wh *watcherHub) watch(key string, recursive, stream bool, index, storeInde
if ok { // add the new watcher to the back of the list
elem = l.PushBack(w)
} else { // create a new list and add the new watcher
l = list.New()
elem = l.PushBack(w)
@ -146,6 +145,7 @@ func (wh *watcherHub) notifyWatchers(e *Event, nodePath string, deleted bool) {
// if we successfully notify a watcher
// we need to remove the watcher from the list
// and decrease the counter
w.removed = true
l.Remove(curr)
atomic.AddInt64(&wh.count, -1)
}

11
test
View File

@ -15,7 +15,7 @@ COVER=${COVER:-"-cover"}
source ./build
# Hack: gofmt ./ will recursively check the .git directory. So use *.go for gofmt.
TESTABLE_AND_FORMATTABLE="client discovery error etcdctl/command etcdmain etcdserver etcdserver/etcdhttp etcdserver/etcdhttp/httptypes migrate pkg/fileutil pkg/flags pkg/idutil pkg/ioutil pkg/netutil pkg/osutil pkg/pbutil pkg/types pkg/transport pkg/wait proxy raft rafthttp snap store wal"
TESTABLE_AND_FORMATTABLE="client discovery error etcdctl/command etcdmain etcdserver etcdserver/etcdhttp etcdserver/etcdhttp/httptypes migrate pkg/fileutil pkg/flags pkg/idutil pkg/ioutil pkg/netutil pkg/osutil pkg/pbutil pkg/types pkg/transport pkg/wait proxy raft rafthttp snap store version wal"
FORMATTABLE="$TESTABLE_AND_FORMATTABLE *.go etcdctl/ integration"
# user has not provided PKG override
@ -60,4 +60,13 @@ if [ -n "${vetRes}" ]; then
exit 255
fi
if command -v go-nyet >/dev/null 2>&1; then
echo "Checking go-nyet..."
nyetRes=$(go-nyet -exitWith 0 $FMT)
if [ -n "${nyetRes}" ]; then
echo -e "go-nyet checking failed:\n${nyetRes}"
exit 255
fi
fi
echo "Success"

View File

@ -32,24 +32,47 @@ import (
func main() {
from := flag.String("data-dir", "", "")
snapfile := flag.String("start-snap", "", "The base name of snapshot file to start dumping")
index := flag.Uint64("start-index", 0, "The index to start dumping")
flag.Parse()
if *from == "" {
log.Fatal("Must provide -data-dir flag")
log.Fatal("Must provide -data-dir flag.")
}
if *snapfile != "" && *index != 0 {
log.Fatal("start-snap and start-index flags cannot be used together.")
}
ss := snap.New(snapDir(*from))
snapshot, err := ss.Load()
var walsnap walpb.Snapshot
switch err {
case nil:
walsnap.Index, walsnap.Term = snapshot.Metadata.Index, snapshot.Metadata.Term
nodes := genIDSlice(snapshot.Metadata.ConfState.Nodes)
fmt.Printf("Snapshot:\nterm=%d index=%d nodes=%s\n",
walsnap.Term, walsnap.Index, nodes)
case snap.ErrNoSnapshot:
fmt.Printf("Snapshot:\nempty\n")
default:
log.Fatalf("Failed loading snapshot: %v", err)
var (
walsnap walpb.Snapshot
snapshot *raftpb.Snapshot
err error
)
isIndex := *index != 0
if isIndex {
fmt.Printf("Start dumping log entries from index %d.\n", *index)
walsnap.Index = *index
} else {
if *snapfile == "" {
ss := snap.New(snapDir(*from))
snapshot, err = ss.Load()
} else {
snapshot, err = snap.Read(path.Join(snapDir(*from), *snapfile))
}
switch err {
case nil:
walsnap.Index, walsnap.Term = snapshot.Metadata.Index, snapshot.Metadata.Term
nodes := genIDSlice(snapshot.Metadata.ConfState.Nodes)
fmt.Printf("Snapshot:\nterm=%d index=%d nodes=%s\n",
walsnap.Term, walsnap.Index, nodes)
case snap.ErrNoSnapshot:
fmt.Printf("Snapshot:\nempty\n")
default:
log.Fatalf("Failed loading snapshot: %v", err)
}
fmt.Println("Start dupmping log entries from snapshot.")
}
w, err := wal.Open(walDir(*from), walsnap)
@ -58,7 +81,7 @@ func main() {
}
wmetadata, state, ents, err := w.ReadAll()
w.Close()
if err != nil {
if err != nil && (!isIndex || err != wal.ErrSnapshotNotFound) {
log.Fatalf("Failed reading WAL: %v", err)
}
id, cid := parseWALMetadata(wmetadata)
@ -102,9 +125,9 @@ func main() {
}
}
func walDir(dataDir string) string { return path.Join(dataDir, "wal") }
func walDir(dataDir string) string { return path.Join(dataDir, "member", "wal") }
func snapDir(dataDir string) string { return path.Join(dataDir, "snap") }
func snapDir(dataDir string) string { return path.Join(dataDir, "member", "snap") }
func parseWALMetadata(b []byte) (id, cid types.ID) {
var metadata etcdserverpb.Metadata

View File

@ -15,7 +15,8 @@ etcd will detect 0.4.x data dir and update the data automatically (while leaving
The tool can be run via:
```sh
./bin/etcd-migrate --data-dir=<PATH TO YOUR DATA>
./go build
./etcd-migrate --data-dir=<PATH TO YOUR DATA>
```
It should autodetect everything and convert the data-dir to be 2.0 compatible. It does not remove the 0.4.x data, and is safe to convert multiple times; the 2.0 data will be overwritten. Recovering the disk space once everything is settled is covered later in the document.
@ -44,4 +45,4 @@ If the conversion has completed, the entire cluster is running on something 2.0-
rm -ri snapshot conf log
```
It will ask before every deletion, but these are the 0.4.x files and will not affect the working 2.0 data.
It will ask before every deletion, but these are the 0.4.x files and will not affect the working 2.0 data.

View File

@ -14,7 +14,65 @@
package version
var (
Version = "2.0.1"
InternalVersion = "2"
import (
"os"
"path"
"github.com/coreos/etcd/pkg/fileutil"
"github.com/coreos/etcd/pkg/types"
)
var (
Version = "2.0.10"
)
// WalVersion is an enum for versions of etcd logs.
type DataDirVersion string
const (
DataDirUnknown DataDirVersion = "Unknown WAL"
DataDir0_4 DataDirVersion = "0.4.x"
DataDir2_0 DataDirVersion = "2.0.0"
DataDir2_0Proxy DataDirVersion = "2.0 proxy"
DataDir2_0_1 DataDirVersion = "2.0.1"
)
func DetectDataDir(dirpath string) (DataDirVersion, error) {
names, err := fileutil.ReadDir(dirpath)
if err != nil {
if os.IsNotExist(err) {
err = nil
}
// Error reading the directory
return DataDirUnknown, err
}
nameSet := types.NewUnsafeSet(names...)
if nameSet.Contains("member") {
ver, err := DetectDataDir(path.Join(dirpath, "member"))
if ver == DataDir2_0 {
return DataDir2_0_1, nil
} else if ver == DataDir0_4 {
// How in the blazes did it get there?
return DataDirUnknown, nil
}
return ver, err
}
if nameSet.ContainsAll([]string{"snap", "wal"}) {
// .../wal cannot be empty to exist.
walnames, err := fileutil.ReadDir(path.Join(dirpath, "wal"))
if err == nil && len(walnames) > 0 {
return DataDir2_0, nil
}
}
if nameSet.ContainsAll([]string{"proxy"}) {
return DataDir2_0Proxy, nil
}
if nameSet.ContainsAll([]string{"snapshot", "conf", "log"}) {
return DataDir0_4, nil
}
if nameSet.ContainsAll([]string{"standby_info"}) {
return DataDir0_4, nil
}
return DataDirUnknown, nil
}

View File

@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
package wal
package version
import (
"io/ioutil"
@ -22,20 +22,20 @@ import (
"testing"
)
func TestDetectVersion(t *testing.T) {
func TestDetectDataDir(t *testing.T) {
tests := []struct {
names []string
wver WalVersion
wver DataDirVersion
}{
{[]string{}, WALNotExist},
{[]string{"snap/", "wal/", "wal/1"}, WALv0_5},
{[]string{"snapshot/", "conf", "log"}, WALv0_4},
{[]string{"weird"}, WALUnknown},
{[]string{"snap/", "wal/"}, WALUnknown},
{[]string{"member/", "member/wal/", "member/wal/1", "member/snap/"}, DataDir2_0_1},
{[]string{"snap/", "wal/", "wal/1"}, DataDir2_0},
{[]string{"snapshot/", "conf", "log"}, DataDir0_4},
{[]string{"weird"}, DataDirUnknown},
{[]string{"snap/", "wal/"}, DataDirUnknown},
}
for i, tt := range tests {
p := mustMakeDir(t, tt.names...)
ver, err := DetectVersion(p)
ver, err := DetectDataDir(p)
if ver != tt.wver {
t.Errorf("#%d: version = %s, want %s", i, ver, tt.wver)
}
@ -44,15 +44,6 @@ func TestDetectVersion(t *testing.T) {
}
os.RemoveAll(p)
}
// detect on non-exist directory
v, err := DetectVersion(path.Join(os.TempDir(), "waltest", "not-exist"))
if v != WALNotExist {
t.Errorf("#non-exist: version = %s, want %s", v, WALNotExist)
}
if err != nil {
t.Errorf("#non-exist: err = %s, want %s", v, WALNotExist)
}
}
// mustMakeDir builds the directory that contains files with the given

View File

@ -17,50 +17,10 @@ package wal
import (
"fmt"
"log"
"os"
"path"
"github.com/coreos/etcd/pkg/fileutil"
"github.com/coreos/etcd/pkg/types"
)
// WalVersion is an enum for versions of etcd logs.
type WalVersion string
const (
WALUnknown WalVersion = "Unknown WAL"
WALNotExist WalVersion = "No WAL"
WALv0_4 WalVersion = "0.4.x"
WALv0_5 WalVersion = "0.5.x"
)
func DetectVersion(dirpath string) (WalVersion, error) {
names, err := fileutil.ReadDir(dirpath)
if err != nil {
if os.IsNotExist(err) {
err = nil
}
// Error reading the directory
return WALNotExist, err
}
if len(names) == 0 {
// Empty WAL directory
return WALNotExist, nil
}
nameSet := types.NewUnsafeSet(names...)
if nameSet.ContainsAll([]string{"snap", "wal"}) {
// .../wal cannot be empty to exist.
if Exist(path.Join(dirpath, "wal")) {
return WALv0_5, nil
}
}
if nameSet.ContainsAll([]string{"snapshot", "conf", "log"}) {
return WALv0_4, nil
}
return WALUnknown, nil
}
func Exist(dirpath string) bool {
names, err := fileutil.ReadDir(dirpath)
if err != nil {
@ -107,7 +67,7 @@ func checkWalNames(names []string) []string {
wnames := make([]string, 0)
for _, name := range names {
if _, _, err := parseWalName(name); err != nil {
log.Printf("wal: parse %s error: %v", name, err)
log.Printf("wal: ignored file %v in wal", name)
continue
}
wnames = append(wnames, name)

View File

@ -273,30 +273,28 @@ func (w *WAL) ReadAll() (metadata []byte, state raftpb.HardState, ents []raftpb.
}
// Cut closes current file written and creates a new one ready to append.
// cut first creates a temp wal file and writes necessary headers into it.
// Then cut atomtically rename temp wal file to a wal file.
func (w *WAL) Cut() error {
// create a new wal file with name sequence + 1
// close old wal file
if err := w.sync(); err != nil {
return err
}
if err := w.f.Close(); err != nil {
return err
}
fpath := path.Join(w.dir, walName(w.seq+1, w.enti+1))
f, err := os.OpenFile(fpath, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0600)
ftpath := fpath + ".tmp"
// create a temp wal file with name sequence + 1, or tuncate the existing one
ft, err := os.OpenFile(ftpath, os.O_WRONLY|os.O_APPEND|os.O_CREATE|os.O_TRUNC, 0600)
if err != nil {
return err
}
l, err := fileutil.NewLock(f.Name())
if err != nil {
return err
}
err = l.Lock()
if err != nil {
return err
}
w.locks = append(w.locks, l)
if err = w.sync(); err != nil {
return err
}
w.f.Close()
// update writer and save the previous crc
w.f = f
w.seq++
w.f = ft
prevCrc := w.encoder.crc.Sum32()
w.encoder = newEncoder(w.f, prevCrc)
if err := w.saveCrc(prevCrc); err != nil {
@ -308,7 +306,45 @@ func (w *WAL) Cut() error {
if err := w.saveState(&w.state); err != nil {
return err
}
return w.sync()
// close temp wal file
if err := w.sync(); err != nil {
return err
}
if err := w.f.Close(); err != nil {
return err
}
// atomically move temp wal file to wal file
if err := os.Rename(ftpath, fpath); err != nil {
return err
}
// open the wal file and update writer again
f, err := os.OpenFile(fpath, os.O_WRONLY|os.O_APPEND, 0600)
if err != nil {
return err
}
w.f = f
prevCrc = w.encoder.crc.Sum32()
w.encoder = newEncoder(w.f, prevCrc)
// lock the new wal file
l, err := fileutil.NewLock(f.Name())
if err != nil {
return err
}
err = l.Lock()
if err != nil {
return err
}
w.locks = append(w.locks, l)
// increase the wal seq
w.seq++
log.Printf("wal: segmented wal file %v is created", fpath)
return nil
}
func (w *WAL) sync() error {
@ -320,27 +356,42 @@ func (w *WAL) sync() error {
return w.f.Sync()
}
// ReleaseLockTo releases the locks w is holding, which
// have index smaller or equal to the given index.
// ReleaseLockTo releases the locks, which has smaller index than the given index
// except the largest one among them.
// For example, if WAL is holding lock 1,2,3,4,5,6, ReleaseLockTo(4) will release
// lock 1,2 but keep 3. ReleaseLockTo(5) will release 1,2,3 but keep 4.
func (w *WAL) ReleaseLockTo(index uint64) error {
for _, l := range w.locks {
_, i, err := parseWalName(path.Base(l.Name()))
var smaller int
found := false
for i, l := range w.locks {
_, lockIndex, err := parseWalName(path.Base(l.Name()))
if err != nil {
return err
}
if i > index {
return nil
if lockIndex >= index {
smaller = i - 1
found = true
break
}
err = l.Unlock()
if err != nil {
return err
}
err = l.Destroy()
if err != nil {
return err
}
w.locks = w.locks[1:]
}
// if no lock index is greater than the release index, we can
// release lock upto the last one(excluding).
if !found && len(w.locks) != 0 {
smaller = len(w.locks) - 1
}
if smaller <= 0 {
return nil
}
for i := 0; i < smaller; i++ {
w.locks[i].Unlock()
w.locks[i].Destroy()
}
w.locks = w.locks[smaller:]
return nil
}

View File

@ -323,23 +323,23 @@ func TestRecoverAfterCut(t *testing.T) {
}
defer os.RemoveAll(p)
w, err := Create(p, []byte("metadata"))
md, err := Create(p, []byte("metadata"))
if err != nil {
t.Fatal(err)
}
for i := 0; i < 10; i++ {
if err = w.SaveSnapshot(walpb.Snapshot{Index: uint64(i)}); err != nil {
if err = md.SaveSnapshot(walpb.Snapshot{Index: uint64(i)}); err != nil {
t.Fatal(err)
}
es := []raftpb.Entry{{Index: uint64(i)}}
if err = w.Save(raftpb.HardState{}, es); err != nil {
if err = md.Save(raftpb.HardState{}, es); err != nil {
t.Fatal(err)
}
if err = w.Cut(); err != nil {
if err = md.Cut(); err != nil {
t.Fatal(err)
}
}
w.Close()
md.Close()
if err := os.Remove(path.Join(p, walName(4, 4))); err != nil {
t.Fatal(err)
@ -435,6 +435,7 @@ func TestOpenNotInUse(t *testing.T) {
unlockIndex := uint64(5)
w.ReleaseLockTo(unlockIndex)
// 1,2,3 are avaliable.
w2, err := OpenNotInUse(p, walpb.Snapshot{})
defer w2.Close()
if err != nil {
@ -444,8 +445,8 @@ func TestOpenNotInUse(t *testing.T) {
if err != nil {
t.Fatalf("err = %v, want nil", err)
}
if g := ents[len(ents)-1].Index; g != unlockIndex {
t.Errorf("last index read = %d, want %d", g, unlockIndex)
if g := ents[len(ents)-1].Index; g != unlockIndex-2 {
t.Errorf("last index read = %d, want %d", g, unlockIndex-2)
}
}
@ -462,3 +463,62 @@ func TestSaveEmpty(t *testing.T) {
t.Errorf("buf.Bytes = %d, want 0", len(buf.Bytes()))
}
}
func TestReleaseLockTo(t *testing.T) {
p, err := ioutil.TempDir(os.TempDir(), "waltest")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(p)
// create WAL
w, err := Create(p, nil)
defer w.Close()
if err != nil {
t.Fatal(err)
}
// make 10 seperate files
for i := 0; i < 10; i++ {
es := []raftpb.Entry{{Index: uint64(i)}}
if err = w.Save(raftpb.HardState{}, es); err != nil {
t.Fatal(err)
}
if err = w.Cut(); err != nil {
t.Fatal(err)
}
}
// release the lock to 5
unlockIndex := uint64(5)
w.ReleaseLockTo(unlockIndex)
// expected remaining are 4,5,6,7,8,9,10
if len(w.locks) != 7 {
t.Errorf("len(w.locks) = %d, want %d", len(w.locks), 7)
}
for i, l := range w.locks {
_, lockIndex, err := parseWalName(path.Base(l.Name()))
if err != nil {
t.Fatal(err)
}
if lockIndex != uint64(i+4) {
t.Errorf("#%d: lockindex = %d, want %d", i, lockIndex, uint64(i+4))
}
}
// release the lock to 15
unlockIndex = uint64(15)
w.ReleaseLockTo(unlockIndex)
// expected remaining is 10
if len(w.locks) != 1 {
t.Errorf("len(w.locks) = %d, want %d", len(w.locks), 1)
}
_, lockIndex, err := parseWalName(path.Base(w.locks[0].Name()))
if err != nil {
t.Fatal(err)
}
if lockIndex != uint64(10) {
t.Errorf("lockindex = %d, want %d", lockIndex, 10)
}
}