Compare commits

...

50 Commits

Author SHA1 Message Date
9d43462d17 version: bump up to 3.2.6
Signed-off-by: Gyu-Ho Lee <gyuhox@gmail.com>
2017-08-21 10:40:55 -07:00
78d68226e6 mvcc: sending events after restore
Fixes: #8411
2017-08-21 10:39:46 -07:00
e9d576c3d6 Documentation: fix broken link on FAQ
Signed-off-by: Gyu-Ho Lee <gyuhox@gmail.com>
2017-08-18 08:43:14 -07:00
1c578cd442 e2e: test booting etcd with multiple peer listeners 2017-08-18 08:43:06 -07:00
b97714b3e6 embed: associate peer serve() listener with corresponding peer
Fixes #8383
2017-08-17 14:16:56 -07:00
ce0a61ff67 dl_build: fix minor typo 2017-08-17 11:44:02 -07:00
74783a38ae docs: revising to match sidebar structure. 2017-08-16 17:03:28 -07:00
d372ff96a0 docs: link fix. 2017-08-16 17:03:12 -07:00
8c7b9db9cc docs: slight rearranging of top two sections. 2017-08-16 17:02:05 -07:00
f1fb342305 docs: adding an index for upgrade pages. 2017-08-16 17:01:49 -07:00
fa0f278783 embed: add 'enable-pprof' tag for config file
Fix https://github.com/coreos/etcd/issues/8402.

Signed-off-by: Gyu-Ho Lee <gyuhox@gmail.com>
2017-08-15 11:52:48 -07:00
e197c14847 mvcc: test keys gauge is reloaded correctly on restore 2017-08-10 12:59:24 -07:00
e7bf5477de mvcc: reset keys gauge on restore
Fixes #8388
2017-08-10 12:59:19 -07:00
f81b72fd93 version: bump up to v3.2.5+git
Signed-off-by: Gyu-Ho Lee <gyuhox@gmail.com>
2017-08-04 11:30:57 -07:00
d0d1a87aa9 version: bump up to v3.2.5
Signed-off-by: Gyu-Ho Lee <gyuhox@gmail.com>
2017-08-04 08:40:50 -07:00
7c6a9a7317 contrib/raftexample: use bytes.Buffer.String (no 'string()')
Signed-off-by: Gyu-Ho Lee <gyuhox@gmail.com>
2017-08-04 08:40:29 -07:00
be8f102efb grpcproxy: forward PrevKv flag in Put 2017-08-04 07:32:17 -07:00
3003901447 integration: test Put with PrevKey=true
Was missing in proxy.
2017-08-04 07:32:11 -07:00
157cfac31b ctlv3/command: remove double-quote typos in fields printer
Signed-off-by: Gyu-Ho Lee <gyuhox@gmail.com>
2017-08-01 17:25:53 -07:00
40a1704e6f ctlv3: exit non-zero on unhealty ep command 2017-07-31 16:00:23 -07:00
30981ecb0a e2e/docker: docker image for testing wildcard DNS 2017-07-24 09:54:55 -07:00
f65a11ced5 fixtures: generate wildcard DNS SAN cert
DNS: *.etcd.local
2017-07-24 09:54:55 -07:00
db4838d4eb transport: use reverse lookup to match wildcard DNS SAN
Fixes #8268
2017-07-24 09:54:55 -07:00
8ab42fb045 *: move v2http handlers without /v2 prefix to etcdhttp
Lets --enable-v2=false configurations provide /metrics, /health, etc.

Fixes #8167
2017-07-24 09:54:48 -07:00
ff9a0a3527 version: bump up to 3.2.4+git
Signed-off-by: Gyu-Ho Lee <gyuhox@gmail.com>
2017-07-24 09:14:34 -07:00
c31bec0f29 version: bump up to 3.2.4
Signed-off-by: Gyu-Ho Lee <gyuhox@gmail.com>
2017-07-19 08:37:30 -07:00
19fe4b0cac grpcproxy: return nil on receiving snapshot EOF
Gets "code = OutOfRange desc = EOF" errors otherwise.
2017-07-19 08:33:44 -07:00
a5d94fe229 integration: test embed.Etcd.Close with watch
Ensure 'Close' returns in time when there are open
connections (watch streams).

Signed-off-by: Gyu-Ho Lee <gyuhox@gmail.com>
2017-07-14 18:52:20 -07:00
e8f3cbf1c6 embed: wait up to request timeout for pending RPCs when closing
Both grpc.Server.Stop and grpc.Server.GracefulStop close the listeners
first, to stop accepting the new connections. GracefulStop blocks until
all clients close their open transports(connections). Unary RPCs
only take a few seconds to finish. Stream RPCs, like watch, might never
close the connections from client side, thus making gRPC server wait
forever.

This patch still calls GracefulStop, but waits up to 10s before manually
closing the open transports.

Address https://github.com/coreos/etcd/issues/8224.

Signed-off-by: Gyu-Ho Lee <gyuhox@gmail.com>
2017-07-14 18:52:20 -07:00
856502f788 version: bump up to 3.2.3+git
Signed-off-by: Gyu-Ho Lee <gyuhox@gmail.com>
2017-07-14 16:04:54 -07:00
ae23b0ef2f version: bump up to 3.2.3
Signed-off-by: Gyu-Ho Lee <gyuhox@gmail.com>
2017-07-13 12:09:48 -07:00
5ee89be616 testutil: whitelist WaitGroup.Done
Calling a WaitGroup.Done() in a defer will sometimes trigger the leak
detector since the WaitGroup.Wait() will unblock before the defer
block completes. If the leak detector runs before the Done() is
rescheduled, it will spuriously report the finishing Done() as a leak.
This happens enough in CI to be irritating; whitelist it and ignore.
2017-07-13 11:14:12 -07:00
38373b342d test: sync with etcd-agent start in functional_pass
Fix https://github.com/coreos/etcd/issues/8211.

Signed-off-by: Gyu-Ho Lee <gyuhox@gmail.com>
2017-07-13 11:14:03 -07:00
536a5f594b v3rpc: Let clients establish unlimited streams
From go-grpc v1.2.0, the number of max streams per client is set to 100
by default by the server side. This change makes it impossible
for third party proxies and custom clients to establish many streams.
2017-07-12 10:46:33 -07:00
49e6916e66 dev-guide: document using range_end for prefixes with json
Lack of a range_end example has caused some confusion.
2017-07-12 10:40:37 -07:00
b9b6f6f7c4 Documentation: refer to LeaseKeepAliveRequest for lease refresh 2017-07-12 10:40:26 -07:00
6ecbb3bbc5 version: bump up to 3.2.2+git
Signed-off-by: Gyu-Ho Lee <gyuhox@gmail.com>
2017-07-12 10:36:16 -07:00
cb2a496c4d version: bump up to 3.2.2
Signed-off-by: Gyu-Ho Lee <gyuhox@gmail.com>
2017-07-07 09:01:47 -07:00
fdf525a3fd dev-guide: update experimental APIs
No experimental APIs at the moment.

Fixes #8212
2017-07-07 09:01:30 -07:00
40468ab11f transport: accept connection if matched IP SAN but no DNS match
The IP SAN check would always do a DNS SAN check if DNS is given
and the connection's IP is verified. Instead, don't check DNS
entries if there's a matching iP.

Fixes #8206
2017-07-07 09:01:11 -07:00
f8f79666d4 embed: connect json gateway with user-provided listen address
net.Listener says its address is [::] when given 0.0.0.0, breaking
hosts that have ipv6 disabled.

Fixes #8151
Fixes #7961
2017-07-07 09:00:40 -07:00
fefcf348f1 embed: share grpc connection for grpc json services 2017-07-07 09:00:32 -07:00
81d39a75ff fixtures: add gencerts.sh, generate CRL 2017-07-07 09:00:01 -07:00
8f2b48465f lease: stop lessors after tests
Signed-off-by: Gyu-Ho Lee <gyuhox@gmail.com>
2017-06-30 11:18:55 -07:00
026c1734b2 Documentation/faq: fix typo in flag names
Signed-off-by: Hui Kang <kangh@us.ibm.com>
2017-06-30 01:28:44 -07:00
81e1d03d02 Documentation/v2: 'etcd v2' to the title
Signed-off-by: Gyu-Ho Lee <gyuhox@gmail.com>
2017-06-30 01:28:20 -07:00
6171334595 benchmark: refactor watch benchmark 2017-06-27 07:35:01 -07:00
55de54a757 lessor: extend leases on promote if expires will be rate limited
Instead of unconditionally randomizing, extend leases on promotion
if too many leases expire within the same time span. If the server
has few leases or spread out expires, there will be no extension.

Squashed previous commits for https://github.com/coreos/etcd/pull/8149.

Author: Anthony Romano <anthony.romano@coreos.com>

This is a combination of 4 commits below:

lease: randomize expiry on initial refresh call

Randomize the very first expiry on lease recovery
to prevent recovered leases from expiring all at
the same time.

Address https://github.com/coreos/etcd/issues/8096.

integration: remove lease exist checking on randomized expiry

Lease with TTL 5 should be renewed with randomization,
thus it's still possible to exist after 3 seconds.

lessor: extend leases on promote if expires will be rate limited

Instead of unconditionally randomizing, extend leases on promotion
if too many leases expire within the same time span. If the server
has few leases or spread out expires, there will be no extension.

Revert "integration: remove lease exist checking on randomized expiry"

This reverts commit 95bc33f37f. The new
lease extension algorithm should pass this test.
2017-06-23 13:31:59 -07:00
c14aad0ba6 lease: rate limit revoke runLoop
Fix https://github.com/coreos/etcd/issues/8097.

Signed-off-by: Gyu-Ho Lee <gyuhox@gmail.com>
2017-06-23 13:28:33 -07:00
91ccc93042 version: bump up to v3.2.1+git
Signed-off-by: Gyu-Ho Lee <gyuhox@gmail.com>
2017-06-23 10:33:24 -07:00
104 changed files with 1718 additions and 647 deletions

View File

@ -24,6 +24,11 @@ curl -L http://localhost:2379/v3alpha/kv/put \
curl -L http://localhost:2379/v3alpha/kv/range \
-X POST -d '{"key": "Zm9v"}'
# {"header":{"cluster_id":"12585971608760269493","member_id":"13847567121247652255","revision":"2","raft_term":"3"},"kvs":[{"key":"Zm9v","create_revision":"2","mod_revision":"2","version":"1","value":"YmFy"}],"count":"1"}
# get all keys prefixed with "foo"
curl -L http://localhost:2379/v3alpha/kv/range \
-X POST -d '{"key": "Zm9v", "range_end": "Zm9w"}'
# {"header":{"cluster_id":"12585971608760269493","member_id":"13847567121247652255","revision":"2","raft_term":"3"},"kvs":[{"key":"Zm9v","create_revision":"2","mod_revision":"2","version":"1","value":"YmFy"}],"count":"1"}
```
Use `curl` to watch a key:

View File

@ -4,8 +4,4 @@ For the most part, the etcd project is stable, but we are still moving fast! We
## The current experimental API/features are:
- [gateway][gateway]: beta, to be stable in 3.2 release
- [gRPC proxy][grpc-proxy]: alpha, to be stable in 3.2 release
[gateway]: ../op-guide/gateway.md
[grpc-proxy]: ../op-guide/grpc_proxy.md
(none currently)

View File

@ -2,7 +2,7 @@
## System requirements
The etcd performance benchmarks run etcd on 8 vCPU, 16GB RAM, 50GB SSD GCE instances, but any relatively modern machine with low latency storage and a few gigabytes of memory should suffice for most use cases. Applications with large v2 data stores will require more memory than a large v3 data store since data is kept in anonymous memory instead of memory mapped from a file. than For running etcd on a cloud provider, we suggest at least a medium instance on AWS or a standard-1 instance on GCE.
The etcd performance benchmarks run etcd on 8 vCPU, 16GB RAM, 50GB SSD GCE instances, but any relatively modern machine with low latency storage and a few gigabytes of memory should suffice for most use cases. Applications with large v2 data stores will require more memory than a large v3 data store since data is kept in anonymous memory instead of memory mapped from a file. For running etcd on a cloud provider, we suggest at least a medium instance on AWS or a standard-1 instance on GCE.
## Download the pre-built binary

View File

@ -47,12 +47,19 @@ Administrators who need to create reliable and scalable key-value stores for the
- [Amazon Web Services][aws_platform]
- [FreeBSD][freebsd_platform]
### Upgrading and compatibility
### Security
- [Migrate applications from using API v2 to API v3][v2_migration]
- [Upgrading a v2.3 cluster to v3.0][v3_upgrade]
- [Upgrading a v3.0 cluster to v3.1][v31_upgrade]
- [Upgrading a v3.1 cluster to v3.2][v32_upgrade]
- [TLS][security]
- [Role-based access control][authentication]
### Maintenance and troubleshooting
- [Frequently asked questions][common questions]
- [Monitoring][monitoring]
- [Maintenance][maintenance]
- [Failure modes][failures]
- [Disaster recovery][recovery]
- [Upgrading][upgrading]
## Learning
@ -106,8 +113,6 @@ Answers to [common questions] about etcd.
[freebsd_platform]: platforms/freebsd.md
[aws_platform]: platforms/aws.md
[experimental]: dev-guide/experimental_apis.md
[v3_upgrade]: upgrades/upgrade_3_0.md
[v31_upgrade]: upgrades/upgrade_3_1.md
[v32_upgrade]: upgrades/upgrade_3_2.md
[authentication]: op-guide/authentication.md
[auth_design]: learning/auth_design.md
[upgrading]: upgrades/upgrading-etcd.md

View File

@ -8,11 +8,11 @@
### Configuration
#### What is the difference between advertise-urls and listen-urls?
#### What is the difference between listen-<client,peer>-urls, advertise-client-urls or initial-advertise-peer-urls?
`listen-urls` specifies the local addresses etcd server binds to for accepting incoming connections. To listen on a port for all interfaces, specify `0.0.0.0` as the listen IP address.
`listen-client-urls` and `listen-peer-urls` specify the local addresses etcd server binds to for accepting incoming connections. To listen on a port for all interfaces, specify `0.0.0.0` as the listen IP address.
`advertise-urls` specifies the addresses etcd clients or other etcd members should use to contact the etcd server. The advertise addresses must be reachable from the remote machines. Do not advertise addresses like `localhost` or `0.0.0.0` for a production setup since these addresses are unreachable from remote machines.
`advertise-client-urls` and `initial-advertise-peer-urls` specify the addresses etcd clients or other etcd members should use to contact the etcd server. The advertise addresses must be reachable from the remote machines. Do not advertise addresses like `localhost` or `0.0.0.0` for a production setup since these addresses are unreachable from remote machines.
### Deployment
@ -107,7 +107,7 @@ Try the [benchmark] tool. Current [benchmark results][benchmark-result] are avai
#### What does the etcd warning "apply entries took too long" mean?
After a majority of etcd members agree to commit a request, each etcd server applies the request to its data store and persists the result to disk. Even with a slow mechanical disk or a virtualized network disk, such as Amazons EBS or Googles PD, applying a request should normally take fewer than 50 milliseconds. If the average apply duration exceeds 100 milliseconds, etcd will warn that entries are taking too long to apply.
Usually this issue is caused by a slow disk. The disk could be experiencing contention among etcd and other applications, or the disk is too simply slow (e.g., a shared virtualized disk). To rule out a slow disk from causing this warning, monitor [backend_commit_duration_seconds][backend_commit_metrics] (p99 duration should be less than 25ms) to confirm the disk is reasonably fast. If the disk is too slow, assigning a dedicated disk to etcd or using faster disk will typically solve the problem.
The second most common cause is CPU starvation. If monitoring of the machines CPU usage shows heavy utilization, there may not be enough compute capacity for etcd. Moving etcd to dedicated machine, increasing process resource isolation cgroups, or renicing the etcd server process into a higher priority can usually solve the problem.

View File

@ -449,7 +449,7 @@ message LeaseRevokeRequest {
### Keep alives
Leases are refreshed using a bi-directional stream created with the `LeaseKeepAlive` API call. When the client wishes to refresh a lease, it sends a `LeaseGrantRequest` over the stream:
Leases are refreshed using a bi-directional stream created with the `LeaseKeepAlive` API call. When the client wishes to refresh a lease, it sends a `LeaseKeepAliveRequest` over the stream:
```protobuf
message LeaseKeepAliveRequest {

View File

@ -1,17 +1,17 @@
# Why etcd
# etcd versus other key-value stores
The name "etcd" originated from two ideas, the unix "/etc" folder and "d"istibuted systems. The "/etc" folder is a place to store configuration data for a single system whereas etcd stores configuration information for large scale distributed systems. Hence, a "d"istributed "/etc" is "etcd".
etcd stores metadata in a consistent and fault-tolerant way. Distributed systems use etcd as a consistent key-value store for configuration management, service discovery, and coordinating distributed work. Common distributed patterns using etcd include [leader election][etcd-etcdctl-elect], [distributed locks][etcd-etcdctl-lock], and monitoring machine liveness.
etcd is designed as a general substrate for large scale distributed systems. These are systems that will never tolerate split-brain operation and are willing to sacrifice availability to achieve this end. etcd stores metadata in a consistent and fault-tolerant way. An etcd cluster is meant to provide key-value storage with best of class stability, reliability, scalability and performance.
Distributed systems use etcd as a consistent key-value store for configuration management, service discovery, and coordinating distributed work. Many [organizations][production-users] use etcd to implement production systems such as container schedulers, service discovery services, and distributed data storage. Common distributed patterns using etcd include [leader election][etcd-etcdctl-elect], [distributed locks][etcd-etcdctl-lock], and monitoring machine liveness.
## Use cases
- Container Linux by CoreOS: Application running on [Container Linux][container-linux] gets automatic, zero-downtime Linux kernel updates. Container Linux uses [locksmith] to coordinate updates. locksmith implements a distributed semaphore over etcd to ensure only a subset of a cluster is rebooting at any given time.
- Container Linux by CoreOS: Applications running on [Container Linux][container-linux] get automatic, zero-downtime Linux kernel updates. Container Linux uses [locksmith] to coordinate updates. Locksmith implements a distributed semaphore over etcd to ensure only a subset of a cluster is rebooting at any given time.
- [Kubernetes][kubernetes] stores configuration data into etcd for service discovery and cluster management; etcd's consistency is crucial for correctly scheduling and operating services. The Kubernetes API server persists cluster state into etcd. It uses etcd's watch API to monitor the cluster and roll out critical configuration changes.
## etcd versus other key-value stores
When deciding whether to use etcd as a key-value store, its worth keeping in mind etcds main goal. Namely, etcd is designed as a general substrate for large scale distributed systems. These are systems that will never tolerate split-brain operation and are willing to sacrifice availability to achieve this end. An etcd cluster is meant to provide consistent key-value storage with best of class stability, reliability, scalability and performance. The upshot of this focus is many [organizations][production-users] already use etcd to implement production systems such as container schedulers, service discovery services, distributed data storage, and more.
## Comparison chart
Perhaps etcd already seems like a good fit, but as with all technological decisions, proceed with caution. Please note this documentation is written by the etcd team. Although the ideal is a disinterested comparison of technology and features, the authors expertise and biases obviously favor etcd. Use only as directed.
@ -84,7 +84,7 @@ For distributed coordination, choosing etcd can help prevent operational headach
[tidb]: https://github.com/pingcap/tidb
[etcd-v3lock]: https://godoc.org/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb
[etcd-v3election]: https://godoc.org/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb
[etcd-etcdctl-lock]: ../../etcdctl/README.md#lock-lockname
[etcd-etcdctl-lock]: ../../etcdctl/README.md#lock-lockname-command-arg1-arg2-
[etcd-etcdctl-elect]: ../../etcdctl/README.md#elect-options-election-name-proposal
[etcd-mvcc]: data_model.md
[etcd-recipe]: https://godoc.org/github.com/coreos/etcd/contrib/recipes
@ -113,4 +113,3 @@ For distributed coordination, choosing etcd can help prevent operational headach
[container-linux]: https://coreos.com/why
[locksmith]: https://github.com/coreos/locksmith
[kubernetes]: http://kubernetes.io/docs/whatisk8s

View File

@ -0,0 +1,19 @@
# Upgrading etcd clusters and applications
This section contains documents specific to upgrading etcd clusters and applications.
## Moving from etcd API v2 to API v3
* [Migrate applications from using API v2 to API v3][migrate-apps]
## Upgrading an etcd v3.x cluster
* [Upgrade etcd from 3.0 to 3.1][upgrade-3-1]
* [Upgrade etcd from 3.1 to 3.2][upgrade-3-2]
## Upgrading from etcd v2.3
* [Upgrade a v2.3 cluster to v3.0][upgrade-cluster]
[migrate-apps]: ../op-guide/v2-migration.md
[upgrade-cluster]: upgrade_3_0.md
[upgrade-3-1]: upgrade_3_1.md
[upgrade-3-2]: upgrade_3_2.md

View File

@ -1,3 +1,8 @@
**This is the documentation for etcd2 releases. Read [etcd3 doc][v3-docs] for etcd3 releases.**
[v3-docs]: ../docs.md#documentation
# Snapshot Migration
You can migrate a snapshot of your data from a v0.4.9+ cluster into a new etcd 2.2 cluster using a snapshot migration. After snapshot migration, the etcd indexes of your data will change. Many etcd applications rely on these indexes to behave correctly. This operation should only be done while all etcd applications are stopped.

View File

@ -1,165 +1,85 @@
# etcd2
# Documentation
[![Go Report Card](https://goreportcard.com/badge/github.com/coreos/etcd)](https://goreportcard.com/report/github.com/coreos/etcd)
[![Build Status](https://travis-ci.org/coreos/etcd.svg?branch=master)](https://travis-ci.org/coreos/etcd)
[![Build Status](https://semaphoreci.com/api/v1/coreos/etcd/branches/master/shields_badge.svg)](https://semaphoreci.com/coreos/etcd)
[![Docker Repository on Quay.io](https://quay.io/repository/coreos/etcd-git/status "Docker Repository on Quay.io")](https://quay.io/repository/coreos/etcd-git)
etcd is a distributed key-value store designed to reliably and quickly preserve and provide access to critical data. It enables reliable distributed coordination through distributed locking, leader elections, and write barriers. An etcd cluster is intended for high availability and permanent data storage and retrieval.
**Note**: The `master` branch may be in an *unstable or even broken state* during development. Please use [releases][github-release] instead of the `master` branch in order to get stable binaries.
This is the etcd v2 documentation set. For more recent versions, please see the [etcd v3 guides][etcd-v3].
![etcd Logo](../../logos/etcd-horizontal-color.png)
## Communicating with etcd v2
etcd is a distributed, consistent key-value store for shared configuration and service discovery, with a focus on being:
Reading and writing into the etcd keyspace is done via a simple, RESTful HTTP API, or using language-specific libraries that wrap the HTTP API with higher level primitives.
* *Simple*: curl'able user-facing API (HTTP+JSON)
* *Secure*: optional SSL client cert authentication
* *Fast*: benchmarked 1000s of writes/s per instance
* *Reliable*: properly distributed using Raft
### Reading and Writing
etcd is written in Go and uses the [Raft][raft] consensus algorithm to manage a highly-available replicated log.
- [Client API Documentation][api]
- [Libraries, Tools, and Language Bindings][libraries]
- [Admin API Documentation][admin-api]
- [Members API][members-api]
etcd is used [in production by many companies](./production-users.md), and the development team stands behind it in critical deployment scenarios, where etcd is frequently teamed with applications such as [Kubernetes][k8s], [fleet][fleet], [locksmith][locksmith], [vulcand][vulcand], and many others.
### Security, Auth, Access control
See [etcdctl][etcdctl] for a simple command line client.
Or feel free to just use `curl`, as in the examples below.
- [Security Model][security]
- [Auth and Security][auth_api]
- [Authentication Guide][authentication]
[raft]: https://raft.github.io/
[k8s]: http://kubernetes.io/
[fleet]: https://github.com/coreos/fleet
[locksmith]: https://github.com/coreos/locksmith
[vulcand]: https://github.com/vulcand/vulcand
[etcdctl]: https://github.com/coreos/etcd/tree/master/etcdctl
## etcd v2 Cluster Administration
## Getting Started
Configuration values are distributed within the cluster for your applications to read. Values can be changed programmatically and smart applications can reconfigure automatically. You'll never again have to run a configuration management tool on every machine in order to change a single config value.
### Getting etcd
### General Info
The easiest way to get etcd is to use one of the pre-built release binaries which are available for OSX, Linux, Windows, AppC (ACI), and Docker. Instructions for using these binaries are on the [GitHub releases page][github-release].
- [etcd Proxies][proxy]
- [Production Users][production-users]
- [Admin Guide][admin_guide]
- [Configuration Flags][configuration]
- [Frequently Asked Questions][faq]
For those wanting to try the very latest version, you can build the latest version of etcd from the `master` branch.
You will first need [*Go*](https://golang.org/) installed on your machine (version 1.5+ is required).
All development occurs on `master`, including new features and bug fixes.
Bug fixes are first targeted at `master` and subsequently ported to release branches, as described in the [branch management][branch-management] guide.
### Initial Setup
[github-release]: https://github.com/coreos/etcd/releases/
[branch-management]: branch_management.md
- [Tuning etcd Clusters][tuning]
- [Discovery Service Protocol][discovery_protocol]
- [Running etcd under Docker][docker_guide]
### Running etcd
### Live Reconfiguration
First start a single-member cluster of etcd:
- [Runtime Configuration][runtime-configuration]
```sh
./bin/etcd
```
### Debugging etcd
This will bring up etcd listening on port 2379 for client communication and on port 2380 for server-to-server communication.
- [Metrics Collection][metrics]
- [Error Code][errorcode]
- [Reporting Bugs][reporting_bugs]
Next, let's set a single key, and then retrieve it:
### Migration
```
curl -L http://127.0.0.1:2379/v2/keys/mykey -XPUT -d value="this is awesome"
curl -L http://127.0.0.1:2379/v2/keys/mykey
```
- [Upgrade etcd to 2.3][upgrade_2_3]
- [Upgrade etcd to 2.2][upgrade_2_2]
- [Upgrade to etcd 2.1][upgrade_2_1]
- [Snapshot Migration (0.4.x to 2.x)][04_to_2_snapshot_migration]
- [Backward Compatibility][backward_compatibility]
You have successfully started an etcd and written a key to the store.
### etcd TCP ports
The [official etcd ports][iana-ports] are 2379 for client requests, and 2380 for peer communication. To maintain compatibility, some etcd configuration and documentation continues to refer to the legacy ports 4001 and 7001, but all new etcd use and discussion should adopt the IANA-assigned ports. The legacy ports 4001 and 7001 will be fully deprecated, and support for their use removed, in future etcd releases.
[iana-ports]: http://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.txt
### Running local etcd cluster
First install [goreman](https://github.com/mattn/goreman), which manages Procfile-based applications.
Our [Procfile script](../../V2Procfile) will set up a local example cluster. You can start it with:
```sh
goreman start
```
This will bring up 3 etcd members `infra1`, `infra2` and `infra3` and etcd proxy `proxy`, which runs locally and composes a cluster.
You can write a key to the cluster and retrieve the value back from any member or proxy.
### Next Steps
Now it's time to dig into the full etcd API and other guides.
- Explore the full [API][api].
- Set up a [multi-machine cluster][clustering].
- Learn the [config format, env variables and flags][configuration].
- Find [language bindings and tools][libraries-and-tools].
- Use TLS to [secure an etcd cluster][security].
- [Tune etcd][tuning].
- [Upgrade from 0.4.9+ to 2.2.0][upgrade].
[api]: ./api.md
[clustering]: ./clustering.md
[configuration]: ./configuration.md
[libraries-and-tools]: ./libraries-and-tools.md
[security]: ./security.md
[tuning]: ./tuning.md
[upgrade]: ./04_to_2_snapshot_migration.md
## Contact
- Mailing list: [etcd-dev](https://groups.google.com/forum/?hl=en#!forum/etcd-dev)
- IRC: #[etcd](irc://irc.freenode.org:6667/#etcd) on freenode.org
- Planning/Roadmap: [milestones](https://github.com/coreos/etcd/milestones), [roadmap](../../ROADMAP.md)
- Bugs: [issues](https://github.com/coreos/etcd/issues)
## Contributing
See [CONTRIBUTING](../../CONTRIBUTING.md) for details on submitting patches and the contribution workflow.
## Reporting bugs
See [reporting bugs](reporting_bugs.md) for details about reporting any issue you may encounter.
## Known bugs
[GH518](https://github.com/coreos/etcd/issues/518) is a known bug. Issue is that:
```
curl http://127.0.0.1:2379/v2/keys/foo -XPUT -d value=bar
curl http://127.0.0.1:2379/v2/keys/foo -XPUT -d dir=true -d prevExist=true
```
If the previous node is a key and client tries to overwrite it with `dir=true`, it does not give warnings such as `Not a directory`. Instead, the key is set to empty value.
## Project Details
### Versioning
#### Service Versioning
etcd uses [semantic versioning](http://semver.org)
New minor versions may add additional features to the API.
You can get the version of etcd by issuing a request to /version:
```sh
curl -L http://127.0.0.1:2379/version
```
#### API Versioning
The `v2` API responses should not change after the 2.0.0 release but new features will be added over time.
#### 32-bit and other unsupported systems
etcd has known issues on 32-bit systems due to a bug in the Go runtime. See #[358][358] for more information.
To avoid inadvertently running a possibly unstable etcd server, `etcd` on unsupported architectures will print
a warning message and immediately exit if the environment variable `ETCD_UNSUPPORTED_ARCH` is not set to
the target architecture.
Currently only the amd64 architecture is officially supported by `etcd`.
[358]: https://github.com/coreos/etcd/issues/358
### License
etcd is under the Apache 2.0 license. See the [LICENSE](../../LICENSE) file for details.
[etcd-v3]: ../docs.md
[api]: api.md
[libraries]: libraries-and-tools.md
[admin-api]: other_apis.md
[members-api]: members_api.md
[security]: security.md
[auth_api]: auth_api.md
[authentication]: authentication.md
[proxy]: proxy.md
[production-users]: production-users.md
[admin_guide]: admin_guide.md
[configuration]: configuration.md
[faq]: faq.md
[tuning]: tuning.md
[discovery_protocol]: discovery_protocol.md
[docker_guide]: docker_guide.md
[runtime-configuration]: runtime-configuration.md
[metrics]: metrics.md
[errorcode]: errorcode.md
[reporting_bugs]: reporting_bugs.md
[upgrade_2_3]: upgrade_2_3.md
[upgrade_2_2]: upgrade_2_2.md
[upgrade_2_1]: upgrade_2_1.md
[04_to_2_snapshot_migration]: 04_to_2_snapshot_migration.md
[backward_compatibility]: backward_compatibility.md

View File

@ -1,3 +1,8 @@
**This is the documentation for etcd2 releases. Read [etcd3 doc][v3-docs] for etcd3 releases.**
[v3-docs]: ../docs.md#documentation
# Administration
## Data Directory
@ -8,7 +13,7 @@ When first started, etcd stores its configuration into a data directory specifie
Configuration is stored in the write ahead log and includes: the local member ID, cluster ID, and initial cluster configuration.
The write ahead log and snapshot files are used during member operation and to recover after a restart.
Having a dedicated disk to store wal files can improve the throughput and stabilize the cluster.
Having a dedicated disk to store wal files can improve the throughput and stabilize the cluster.
It is highly recommended to dedicate a wal disk and set `--wal-dir` to point to a directory on that device for a production cluster deployment.
If a members data directory is ever lost or corrupted then the user should [remove][remove-a-member] the etcd member from the cluster using `etcdctl` tool.
@ -51,7 +56,7 @@ $ curl -L http://127.0.0.1:2379/health
You can also use etcdctl to check the cluster-wide health information. It will contact all the members of the cluster and collect the health information for you.
```
$./etcdctl cluster-health
$./etcdctl cluster-health
member 8211f1d0f64f3269 is healthy: got healthy result from http://127.0.0.1:12379
member 91bc3c398fb3c146 is healthy: got healthy result from http://127.0.0.1:22379
member fd422379fda50e48 is healthy: got healthy result from http://127.0.0.1:32379

View File

@ -1,3 +1,8 @@
**This is the documentation for etcd2 releases. Read [etcd3 doc][v3-docs] for etcd3 releases.**
[v3-docs]: ../docs.md#documentation
# etcd API
## Running a Single Machine Cluster
@ -318,7 +323,7 @@ The first terminal should get the notification and return with the same response
However, the watch command can do more than this.
Using the index, we can watch for commands that have happened in the past.
This is useful for ensuring you don't miss events between watch commands.
This is useful for ensuring you don't miss events between watch commands.
Typically, we watch again from the `modifiedIndex` + 1 of the node we got.
Let's try to watch for the set command of index 7 again:
@ -338,13 +343,13 @@ curl 'http://127.0.0.1:2379/v2/keys/foo?wait=true&waitIndex=8'
Then even if etcd is on index 9 or 800, the first event to occur to the `/foo`
key between 8 and the current index will be returned.
**Note**: etcd only keeps the responses of the most recent 1000 events across all etcd keys.
**Note**: etcd only keeps the responses of the most recent 1000 events across all etcd keys.
It is recommended to send the response to another thread to process immediately
instead of blocking the watch while processing the result.
instead of blocking the watch while processing the result.
#### Watch from cleared event index
If we miss all the 1000 events, we need to recover the current state of the
If we miss all the 1000 events, we need to recover the current state of the
watching key space through a get and then start to watch from the
`X-Etcd-Index` + 1.
@ -366,7 +371,7 @@ To start watch, first we need to fetch the current state of key `/foo`:
curl 'http://127.0.0.1:2379/v2/keys/foo' -vv
```
```
```
< HTTP/1.1 200 OK
< Content-Type: application/json
< X-Etcd-Cluster-Id: 7e27652122e8b2ae
@ -375,7 +380,7 @@ curl 'http://127.0.0.1:2379/v2/keys/foo' -vv
< X-Raft-Term: 2
< Date: Mon, 05 Jan 2015 18:54:43 GMT
< Transfer-Encoding: chunked
<
<
{"action":"get","node":{"key":"/foo","value":"bar","modifiedIndex":7,"createdIndex":7}}
```

View File

@ -1,3 +1,8 @@
**This is the documentation for etcd2 releases. Read [etcd3 doc][v3-docs] for etcd3 releases.**
[v3-docs]: ../docs.md#documentation
# etcd3 API
TODO: API doc

View File

@ -1,13 +1,18 @@
**This is the documentation for etcd2 releases. Read [etcd3 doc][v3-docs] for etcd3 releases.**
[v3-docs]: ../docs.md#documentation
# v2 Auth and Security
## etcd Resources
## etcd Resources
There are three types of resources in etcd
1. permission resources: users and roles in the user store
2. key-value resources: key-value pairs in the key-value store
3. settings resources: security settings, auth settings, and dynamic etcd cluster settings (election/heartbeat)
### Permission Resources
### Permission Resources
#### Users
A user is an identity to be authenticated. Each user can have multiple roles. The user has a capability (such as reading or writing) on the resource if one of the roles has that capability.
@ -15,7 +20,7 @@ A user is an identity to be authenticated. Each user can have multiple roles. Th
A user named `root` is required before authentication can be enabled, and it always has the ROOT role. The ROOT role can be granted to multiple users, but `root` is required for recovery purposes.
#### Roles
Each role has exact one associated Permission List. An permission list exists for each permission on key-value resources.
Each role has exact one associated Permission List. An permission list exists for each permission on key-value resources.
The special static ROOT (named `root`) role has a full permissions on all key-value resources, the permission to manage user resources and settings resources. Only the ROOT role has the permission to manage user resources and modify settings resources. The ROOT role is built-in and does not need to be created.
@ -30,8 +35,8 @@ A Permission List is a list of allowed patterns for that particular permission (
### Key-Value Resources
A key-value resource is a key-value pairs in the store. Given a list of matching patterns, permission for any given key in a request is granted if any of the patterns in the list match.
Only prefixes or exact keys are supported. A prefix permission string ends in `*`.
A permission on `/foo` is for that exact key or directory, not its children or recursively. `/foo*` is a prefix that matches `/foo` recursively, and all keys thereunder, and keys with that prefix (eg. `/foobar`. Contrast to the prefix `/foo/*`). `*` alone is permission on the full keyspace.
Only prefixes or exact keys are supported. A prefix permission string ends in `*`.
A permission on `/foo` is for that exact key or directory, not its children or recursively. `/foo*` is a prefix that matches `/foo` recursively, and all keys thereunder, and keys with that prefix (eg. `/foobar`. Contrast to the prefix `/foo/*`). `*` alone is permission on the full keyspace.
### Settings Resources
@ -66,7 +71,7 @@ An Error JSON corresponds to:
}
#### Enable and Disable Authentication
**Get auth status**
GET /v2/auth/enable
@ -215,8 +220,8 @@ PUT /v2/auth/users/charlie
Sent Headers:
Authorization: Basic <BasicAuthString>
Put Body:
JSON struct, above, matching the appropriate name
* Starting password and roles when creating.
JSON struct, above, matching the appropriate name
* Starting password and roles when creating.
* Grant/Revoke/Password filled in when updating (to grant roles, revoke roles, or change the password).
Possible Status Codes:
200 OK
@ -345,7 +350,7 @@ PUT /v2/auth/roles/rkt
401 Unauthorized
404 Not Found (update non-existent roles)
409 Conflict (when granting duplicated permission or revoking non-existent permission)
200 Body:
200 Body:
JSON state of the role
**Remove A Role**

View File

@ -1,3 +1,8 @@
**This is the documentation for etcd2 releases. Read [etcd3 doc][v3-docs] for etcd3 releases.**
[v3-docs]: ../docs.md#documentation
# Authentication Guide
## Overview
@ -14,7 +19,7 @@ There is one special user, `root`, and there are two special roles, `root` and `
### User `root`
User `root` must be created before security can be activated. It has the `root` role and allows for the changing of anything inside etcd. The idea behind the `root` user is for recovery purposes -- a password is generated and stored somewhere -- and the root role is granted to the administrator accounts on the system. In the future, for troubleshooting and recovery, we will need to assume some access to the system, and future documentation will assume this root user (though anyone with the role will suffice).
User `root` must be created before security can be activated. It has the `root` role and allows for the changing of anything inside etcd. The idea behind the `root` user is for recovery purposes -- a password is generated and stored somewhere -- and the root role is granted to the administrator accounts on the system. In the future, for troubleshooting and recovery, we will need to assume some access to the system, and future documentation will assume this root user (though anyone with the role will suffice).
### Role `root`
@ -104,7 +109,7 @@ $ etcdctl role grant myrolename -path '/foo/bar' -write
$ etcdctl role grant myrolename -path '/pub/*' -readwrite
```
Beware that
Beware that
```
# Give full access to keys under /pub??
@ -133,12 +138,12 @@ $ etcdctl role remove myrolename
## Enabling authentication
The minimal steps to enabling auth are as follows. The administrator can set up users and roles before or after enabling authentication, as a matter of preference.
The minimal steps to enabling auth are as follows. The administrator can set up users and roles before or after enabling authentication, as a matter of preference.
Make sure the root user is created:
```
$ etcdctl user add root
$ etcdctl user add root
New password:
```

View File

@ -1,3 +1,8 @@
**This is the documentation for etcd2 releases. Read [etcd3 doc][v3-docs] for etcd3 releases.**
[v3-docs]: ../docs.md#documentation
# Backward Compatibility
The main goal of etcd 2.0 release is to improve cluster safety around bootstrapping and dynamic reconfiguration. To do this, we deprecated the old error-prone APIs and provide a new set of APIs.

View File

@ -1,3 +1,8 @@
**This is the documentation for etcd2 releases. Read [etcd3 doc][v3-docs] for etcd3 releases.**
[v3-docs]: ../../docs.md#documentation
# Benchmarks
etcd benchmarks will be published regularly and tracked for each release below:

View File

@ -1,3 +1,8 @@
**This is the documentation for etcd2 releases. Read [etcd3 doc][v3-docs] for etcd3 releases.**
[v3-docs]: ../../docs.md#documentation
## Physical machines
GCE n1-highcpu-2 machine type

View File

@ -1,3 +1,8 @@
**This is the documentation for etcd2 releases. Read [etcd3 doc][v3-docs] for etcd3 releases.**
[v3-docs]: ../../docs.md#documentation
# Benchmarking etcd v2.2.0
## Physical Machines

View File

@ -1,3 +1,8 @@
**This is the documentation for etcd2 releases. Read [etcd3 doc][v3-docs] for etcd3 releases.**
[v3-docs]: ../../docs.md#documentation
## Physical machines
GCE n1-highcpu-2 machine type

View File

@ -1,3 +1,8 @@
**This is the documentation for etcd2 releases. Read [etcd3 doc][v3-docs] for etcd3 releases.**
[v3-docs]: ../../docs.md#documentation
## Physical machine
GCE n1-standard-2 machine type

View File

@ -1,3 +1,8 @@
**This is the documentation for etcd2 releases. Read [etcd3 doc][v3-docs] for etcd3 releases.**
[v3-docs]: ../../docs.md#documentation
## Physical machines
GCE n1-highcpu-2 machine type

View File

@ -1,3 +1,8 @@
**This is the documentation for etcd2 releases. Read [etcd3 doc][v3-docs] for etcd3 releases.**
[v3-docs]: ../../docs.md#documentation
# Watch Memory Usage Benchmark
*NOTE*: The watch features are under active development, and their memory usage may change as that development progresses. We do not expect it to significantly increase beyond the figures stated below.
@ -5,10 +10,10 @@
A primary goal of etcd is supporting a very large number of watchers doing a massively large amount of watching. etcd aims to support O(10k) clients, O(100K) watch streams (O(10) streams per client) and O(10M) total watchings (O(100) watching per stream). The memory consumed by each individual watching accounts for the largest portion of etcd's overall usage, and is therefore the focus of current and future optimizations.
Three related components of etcd watch consume physical memory: each `grpc.Conn`, each watch stream, and each instance of the watching activity. `grpc.Conn` maintains the actual TCP connection and other gRPC connection state. Each `grpc.Conn` consumes O(10kb) of memory, and might have multiple watch streams attached.
Three related components of etcd watch consume physical memory: each `grpc.Conn`, each watch stream, and each instance of the watching activity. `grpc.Conn` maintains the actual TCP connection and other gRPC connection state. Each `grpc.Conn` consumes O(10kb) of memory, and might have multiple watch streams attached.
Each watch stream is an independent HTTP2 connection which consumes another O(10kb) of memory.
Multiple watchings might share one watch stream.
Each watch stream is an independent HTTP2 connection which consumes another O(10kb) of memory.
Multiple watchings might share one watch stream.
Watching is the actual struct that tracks the changes on the key-value store. Each watching should only consume < O(1kb).

View File

@ -1,3 +1,8 @@
**This is the documentation for etcd2 releases. Read [etcd3 doc][v3-docs] for etcd3 releases.**
[v3-docs]: ../../docs.md#documentation
# Storage Memory Usage Benchmark
<!---todo: link storage to storage design doc-->
@ -60,7 +65,7 @@ GCE n1-standard-2 machine type
In this test, we only benchmark the memory usage of the in-memory index. The goal is to find `c1` and `c2` mentioned above and to understand the hard limit of memory consumption of the storage.
We calculate the memory usage consumption via the Go runtime.ReadMemStats. We calculate the total allocated bytes difference before creating the index and after creating the index. It cannot perfectly reflect the memory usage of the in-memory index itself but can show the rough consumption pattern.
We calculate the memory usage consumption via the Go runtime.ReadMemStats. We calculate the total allocated bytes difference before creating the index and after creating the index. It cannot perfectly reflect the memory usage of the in-memory index itself but can show the rough consumption pattern.
| N | versions | key size | memory usage |
|------|----------|----------|--------------|

View File

@ -1,3 +1,8 @@
**This is the documentation for etcd2 releases. Read [etcd3 doc][v3-docs] for etcd3 releases.**
[v3-docs]: ../docs.md#documentation
# Branch Management
## Guide

View File

@ -1,3 +1,8 @@
**This is the documentation for etcd2 releases. Read [etcd3 doc][v3-docs] for etcd3 releases.**
[v3-docs]: ../docs.md#documentation
# Clustering Guide
## Overview

View File

@ -1,3 +1,8 @@
**This is the documentation for etcd2 releases. Read [etcd3 doc][v3-docs] for etcd3 releases.**
[v3-docs]: ../docs.md#documentation
# Configuration Flags
etcd is configurable through command-line flags and environment variables. Options set on the command line take precedence over those from the environment.

View File

@ -1,8 +1,13 @@
**This is the documentation for etcd2 releases. Read [etcd3 doc][v3-docs] for etcd3 releases.**
[v3-docs]: ../../docs.md#documentation
# etcd release guide
The guide talks about how to release a new version of etcd.
The procedure includes some manual steps for sanity checking but it can probably be further scripted. Please keep this document up-to-date if you want to make changes to the release process.
The procedure includes some manual steps for sanity checking but it can probably be further scripted. Please keep this document up-to-date if you want to make changes to the release process.
## Prepare Release

View File

@ -1,3 +1,8 @@
**This is the documentation for etcd2 releases. Read [etcd3 doc][v3-docs] for etcd3 releases.**
[v3-docs]: ../docs.md#documentation
# Discovery Service Protocol
Discovery service protocol helps new etcd member to discover all other members in cluster bootstrap phase using a shared discovery URL.

View File

@ -1,3 +1,8 @@
**This is the documentation for etcd2 releases. Read [etcd3 doc][v3-docs] for etcd3 releases.**
[v3-docs]: ../docs.md#documentation
# Running etcd under Docker
The following guide will show you how to run etcd under Docker using the [static bootstrap process](clustering.md#static).

View File

@ -1,3 +1,8 @@
**This is the documentation for etcd2 releases. Read [etcd3 doc][v3-docs] for etcd3 releases.**
[v3-docs]: ../docs.md#documentation
# Error Code
======

View File

@ -1,3 +1,8 @@
**This is the documentation for etcd2 releases. Read [etcd3 doc][v3-docs] for etcd3 releases.**
[v3-docs]: ../docs.md#documentation
# FAQ
## 1) Why can an etcd client read an old version of data when a majority of the etcd cluster members are down?

View File

@ -1,3 +1,8 @@
**This is the documentation for etcd2 releases. Read [etcd3 doc][v3-docs] for etcd3 releases.**
[v3-docs]: ../docs.md#documentation
# Glossary
This document defines the various terms used in etcd documentation, command line and source code.

View File

@ -1,3 +1,8 @@
**This is the documentation for etcd2 releases. Read [etcd3 doc][v3-docs] for etcd3 releases.**
[v3-docs]: ../docs.md#documentation
# FAQ
## Initial Bootstrapping UX

View File

@ -1,3 +1,8 @@
**This is the documentation for etcd2 releases. Read [etcd3 doc][v3-docs] for etcd3 releases.**
[v3-docs]: ../docs.md#documentation
# Versioning
Goal: We want to be able to upgrade an individual peer in an etcd cluster to a newer version of etcd.

View File

@ -1,3 +1,8 @@
**This is the documentation for etcd2 releases. Read [etcd3 doc][v3-docs] for etcd3 releases.**
[v3-docs]: ../docs.md#documentation
# Libraries and Tools
**Tools**

View File

@ -1,3 +1,8 @@
**This is the documentation for etcd2 releases. Read [etcd3 doc][v3-docs] for etcd3 releases.**
[v3-docs]: ../docs.md#documentation
# Members API
* [List members](#list-members)

View File

@ -1,3 +1,8 @@
**This is the documentation for etcd2 releases. Read [etcd3 doc][v3-docs] for etcd3 releases.**
[v3-docs]: ../docs.md#documentation
# Metrics
etcd uses [Prometheus][prometheus] for metrics reporting. The metrics can be used for real-time monitoring and debugging. etcd does not persist its metrics; if a member restarts, the metrics will be reset.
@ -14,9 +19,9 @@ The metrics under the `etcd` prefix are for monitoring and alerting. They are st
### http requests
These metrics describe the serving of requests (non-watch events) served by etcd members in non-proxy mode: total
These metrics describe the serving of requests (non-watch events) served by etcd members in non-proxy mode: total
incoming requests, request failures and processing latency (inc. raft rounds for storage). They are useful for tracking
user-generated traffic hitting the etcd cluster .
user-generated traffic hitting the etcd cluster .
All these metrics are prefixed with `etcd_http_`
@ -28,20 +33,20 @@ All these metrics are prefixed with `etcd_http_`
Example Prometheus queries that may be useful from these metrics (across all etcd members):
* `sum(rate(etcd_http_failed_total{job="etcd"}[1m]) by (method) / sum(rate(etcd_http_events_received_total{job="etcd"})[1m]) by (method)`
* `sum(rate(etcd_http_failed_total{job="etcd"}[1m]) by (method) / sum(rate(etcd_http_events_received_total{job="etcd"})[1m]) by (method)`
Shows the fraction of events that failed by HTTP method across all members, across a time window of `1m`.
* `sum(rate(etcd_http_received_total{job="etcd",method="GET})[1m]) by (method)`
`sum(rate(etcd_http_received_total{job="etcd",method~="GET})[1m]) by (method)`
Shows the rate of successful readonly/write queries across all servers, across a time window of `1m`.
* `histogram_quantile(0.9, sum(rate(etcd_http_successful_duration_seconds{job="etcd",method="GET"}[5m]) ) by (le))`
`histogram_quantile(0.9, sum(rate(etcd_http_successful_duration_seconds{job="etcd",method!="GET"}[5m]) ) by (le))`
Show the 0.90-tile latency (in seconds) of read/write (respectively) event handling across all members, with a window of `5m`.
Show the 0.90-tile latency (in seconds) of read/write (respectively) event handling across all members, with a window of `5m`.
### proxy
@ -56,21 +61,21 @@ All these metrics are prefixed with `etcd_proxy_`
| requests_total | Total number of requests by this proxy instance. | Counter(method) |
| handled_total | Total number of fully handled requests, with responses from etcd members. | Counter(method) |
| dropped_total | Total number of dropped requests due to forwarding errors to etcd members.  | Counter(method,error) |
| handling_duration_seconds | Bucketed handling times by HTTP method, including round trip to member instances. | Histogram(method) |
| handling_duration_seconds | Bucketed handling times by HTTP method, including round trip to member instances. | Histogram(method) |
Example Prometheus queries that may be useful from these metrics (across all etcd servers):
* `sum(rate(etcd_proxy_handled_total{job="etcd"}[1m])) by (method)`
Rate of requests (by HTTP method) handled by all proxies, across a window of `1m`.
Rate of requests (by HTTP method) handled by all proxies, across a window of `1m`.
* `histogram_quantile(0.9, sum(rate(handling_duration_seconds{job="etcd",method="GET"}[5m])) by (le))`
`histogram_quantile(0.9, sum(rate(handling_duration_seconds{job="etcd",method!="GET"}[5m])) by (le))`
Show the 0.90-tile latency (in seconds) of handling of user requests across all proxy machines, with a window of `5m`.
Show the 0.90-tile latency (in seconds) of handling of user requests across all proxy machines, with a window of `5m`.
* `sum(rate(etcd_proxy_dropped_total{job="etcd"}[1m])) by (proxying_error)`
Number of failed request on the proxy. This should be 0, spikes here indicate connectivity issues to the etcd cluster.
## etcd_debugging namespace metrics

View File

@ -1,3 +1,8 @@
**This is the documentation for etcd2 releases. Read [etcd3 doc][v3-docs] for etcd3 releases.**
[v3-docs]: ../docs.md#documentation
# Miscellaneous APIs
* [Getting the etcd version](#getting-the-etcd-version)

View File

@ -1,3 +1,8 @@
**This is the documentation for etcd2 releases. Read [etcd3 doc][v3-docs] for etcd3 releases.**
[v3-docs]: ../../docs.md#documentation
# FreeBSD
Starting with version 0.1.2 both etcd and etcdctl have been ported to FreeBSD and can

View File

@ -1,3 +1,8 @@
**This is the documentation for etcd2 releases. Read [etcd3 doc][v3-docs] for etcd3 releases.**
[v3-docs]: ../docs.md#documentation
# Production Users
This document tracks people and use cases for etcd in production. By creating a list of production use cases we hope to build a community of advisors that we can reach out to with experience using various etcd applications, operation environments, and cluster sizes. The etcd development team may reach out periodically to check-in on your experience and update this list.

View File

@ -1,3 +1,8 @@
**This is the documentation for etcd2 releases. Read [etcd3 doc][v3-docs] for etcd3 releases.**
[v3-docs]: ../docs.md#documentation
# Proxy
etcd can run as a transparent proxy. Doing so allows for easy discovery of etcd within your infrastructure, since it can run on each machine as a local service. In this mode, etcd acts as a reverse proxy and forwards client requests to an active etcd cluster. The etcd proxy does not participate in the consensus replication of the etcd cluster, thus it neither increases the resilience nor decreases the write performance of the etcd cluster.

View File

@ -1,3 +1,8 @@
**This is the documentation for etcd2 releases. Read [etcd3 doc][v3-docs] for etcd3 releases.**
[v3-docs]: ../docs.md#documentation
# Reporting Bugs
If you find bugs or documentation mistakes in the etcd project, please let us know by [opening an issue][etcd-issue]. We treat bugs and mistakes very seriously and believe no issue is too small. Before creating a bug report, please check that an issue reporting the same problem does not already exist.

View File

@ -1,3 +1,8 @@
**This is the documentation for etcd2 releases. Read [etcd3 doc][v3-docs] for etcd3 releases.**
[v3-docs]: ../../docs.md#documentation
# Overview
The etcd v3 API is designed to give users a more efficient and cleaner abstraction compared to etcd v2. There are a number of semantic and protocol changes in this new API. For an overview [see Xiang Li's video](https://youtu.be/J5AioGtEPeQ?t=211).

View File

@ -1,3 +1,8 @@
**This is the documentation for etcd2 releases. Read [etcd3 doc][v3-docs] for etcd3 releases.**
[v3-docs]: ../docs.md#documentation
# Runtime Reconfiguration
etcd comes with support for incremental runtime reconfiguration, which allows users to update the membership of the cluster at run time.
@ -61,9 +66,9 @@ A wrongly updated client URL will not affect the health of the etcd cluster.
#### Update advertise peer URLs
If you would like to update the advertise peer URLs of a member, you have to first update
If you would like to update the advertise peer URLs of a member, you have to first update
it explicitly via member command and then restart the member. The additional action is required
since updating peer URLs changes the cluster wide configuration and can affect the health of the etcd cluster.
since updating peer URLs changes the cluster wide configuration and can affect the health of the etcd cluster.
To update the peer URLs, first, we need to find the target member's ID. You can list all members with `etcdctl`:

View File

@ -1,3 +1,8 @@
**This is the documentation for etcd2 releases. Read [etcd3 doc][v3-docs] for etcd3 releases.**
[v3-docs]: ../docs.md#documentation
# Design of Runtime Reconfiguration
Runtime reconfiguration is one of the hardest and most error prone features in a distributed system, especially in a consensus based system like etcd.
@ -26,21 +31,21 @@ We think runtime reconfiguration should be a low frequent operation. We made the
If a cluster permanently loses a majority of its members, a new cluster will need to be started from an old data directory to recover the previous state.
It is entirely possible to force removing the failed members from the existing cluster to recover. However, we decided not to support this method since it bypasses the normal consensus committing phase, which is unsafe. If the member to remove is not actually dead or you force to remove different members through different members in the same cluster, you will end up with diverged cluster with same clusterID. This is very dangerous and hard to debug/fix afterwards.
It is entirely possible to force removing the failed members from the existing cluster to recover. However, we decided not to support this method since it bypasses the normal consensus committing phase, which is unsafe. If the member to remove is not actually dead or you force to remove different members through different members in the same cluster, you will end up with diverged cluster with same clusterID. This is very dangerous and hard to debug/fix afterwards.
If you have a correct deployment, the possibility of permanent majority lose is very low. But it is a severe enough problem that worth special care. We strongly suggest you to read the [disaster recovery documentation][disaster-recovery] and prepare for permanent majority lose before you put etcd into production.
## Do Not Use Public Discovery Service For Runtime Reconfiguration
The public discovery service should only be used for bootstrapping a cluster. To join member into an existing cluster, you should use runtime reconfiguration API.
The public discovery service should only be used for bootstrapping a cluster. To join member into an existing cluster, you should use runtime reconfiguration API.
Discovery service is designed for bootstrapping an etcd cluster in the cloud environment, when you do not know the IP addresses of all the members beforehand. After you successfully bootstrap a cluster, the IP addresses of all the members are known. Technically, you should not need the discovery service any more.
It seems that using public discovery service is a convenient way to do runtime reconfiguration, after all discovery service already has all the cluster configuration information. However relying on public discovery service brings troubles:
It seems that using public discovery service is a convenient way to do runtime reconfiguration, after all discovery service already has all the cluster configuration information. However relying on public discovery service brings troubles:
1. it introduces external dependencies for the entire life-cycle of your cluster, not just bootstrap time. If there is a network issue between your cluster and public discovery service, your cluster will suffer from it.
2. public discovery service must reflect correct runtime configuration of your cluster during it life-cycle. It has to provide security mechanism to avoid bad actions, and it is hard.
2. public discovery service must reflect correct runtime configuration of your cluster during it life-cycle. It has to provide security mechanism to avoid bad actions, and it is hard.
3. public discovery service has to keep tens of thousands of cluster configurations. Our public discovery service backend is not ready for that workload.

View File

@ -1,3 +1,8 @@
**This is the documentation for etcd2 releases. Read [etcd3 doc][v3-docs] for etcd3 releases.**
[v3-docs]: ../docs.md#documentation
# Security Model
etcd supports SSL/TLS as well as authentication through client certificates, both for clients to server as well as peer (server to server / cluster) communication.

View File

@ -1,3 +1,8 @@
**This is the documentation for etcd2 releases. Read [etcd3 doc][v3-docs] for etcd3 releases.**
[v3-docs]: ../docs.md#documentation
# Tuning
The default settings in etcd should work well for installations on a local network where the average network latency is low.

View File

@ -1,3 +1,8 @@
**This is the documentation for etcd2 releases. Read [etcd3 doc][v3-docs] for etcd3 releases.**
[v3-docs]: ../docs.md#documentation
# Upgrade etcd to 2.1
In the general case, upgrading from etcd 2.0 to 2.1 can be a zero-downtime, rolling upgrade:
@ -12,11 +17,11 @@ Before [starting an upgrade](#upgrade-procedure), read through the rest of this
To upgrade an existing etcd deployment to 2.1, you must be running 2.0. If youre running a version of etcd before 2.0, you must upgrade to [2.0][v2.0] before upgrading to 2.1.
Also, to ensure a smooth rolling upgrade, your running cluster must be healthy. You can check the health of the cluster by using `etcdctl cluster-health` command.
Also, to ensure a smooth rolling upgrade, your running cluster must be healthy. You can check the health of the cluster by using `etcdctl cluster-health` command.
### Preparedness
### Preparedness
Before upgrading etcd, always test the services relying on etcd in a staging environment before deploying the upgrade to the production environment.
Before upgrading etcd, always test the services relying on etcd in a staging environment before deploying the upgrade to the production environment.
You might also want to [backup your data directory][backup-datastore] for a potential [downgrade](#downgrade).
@ -38,7 +43,7 @@ If you have even more data, this might take more time. If you have a data size l
### Downgrade
If all members have been upgraded to v2.1, the cluster will be upgraded to v2.1, and downgrade is **not possible**. If any member is still v2.0, the cluster will remain in v2.0, and you can go back to use v2.0 binary.
If all members have been upgraded to v2.1, the cluster will be upgraded to v2.1, and downgrade is **not possible**. If any member is still v2.0, the cluster will remain in v2.0, and you can go back to use v2.0 binary.
Please [backup your data directory][backup-datastore] of all etcd members if you want to downgrade the cluster, even if it is upgraded.
@ -96,7 +101,7 @@ member 924e2e83e93f2560 is healthy
member a8266ecf031671f3 is healthy
```
#### 4. Repeat step 2 to step 3 for all other members
#### 4. Repeat step 2 to step 3 for all other members
#### 5. Finish

View File

@ -1,3 +1,8 @@
**This is the documentation for etcd2 releases. Read [etcd3 doc][v3-docs] for etcd3 releases.**
[v3-docs]: ../docs.md#documentation
# Upgrade etcd from 2.1 to 2.2
In the general case, upgrading from etcd 2.1 to 2.2 can be a zero-downtime, rolling upgrade:
@ -13,11 +18,11 @@ Before [starting an upgrade](#upgrade-procedure), read through the rest of this
To upgrade an existing etcd deployment to 2.2, you must be running 2.1. If youre running a version of etcd before 2.1, you must upgrade to [2.1][v2.1] before upgrading to 2.2.
Also, to ensure a smooth rolling upgrade, your running cluster must be healthy. You can check the health of the cluster by using `etcdctl cluster-health` command.
Also, to ensure a smooth rolling upgrade, your running cluster must be healthy. You can check the health of the cluster by using `etcdctl cluster-health` command.
### Preparedness
### Preparedness
Before upgrading etcd, always test the services relying on etcd in a staging environment before deploying the upgrade to the production environment.
Before upgrading etcd, always test the services relying on etcd in a staging environment before deploying the upgrade to the production environment.
You might also want to [backup the data directory][backup-datastore] for a potential [downgrade].
@ -31,11 +36,11 @@ Internally, etcd members negotiate with each other to determine the overall etcd
If you have a data size larger than 100MB you should contact us before upgrading, so we can make sure the upgrades work smoothly.
Every etcd 2.2 member will do health checking across the cluster periodically. etcd 2.1 member does not support health checking. During the upgrade, etcd 2.2 member will log warning about the unhealthy state of etcd 2.1 member. You can ignore the warning.
Every etcd 2.2 member will do health checking across the cluster periodically. etcd 2.1 member does not support health checking. During the upgrade, etcd 2.2 member will log warning about the unhealthy state of etcd 2.1 member. You can ignore the warning.
### Downgrade
If all members have been upgraded to v2.2, the cluster will be upgraded to v2.2, and downgrade is **not possible**. If any member is still v2.1, the cluster will remain in v2.1, and you can go back to use v2.1 binary.
If all members have been upgraded to v2.2, the cluster will be upgraded to v2.2, and downgrade is **not possible**. If any member is still v2.1, the cluster will remain in v2.1, and you can go back to use v2.1 binary.
Please [backup the data directory][backup-datastore] of all etcd members if you want to downgrade the cluster, even if it is upgraded.
@ -112,7 +117,7 @@ member a8266ecf031671f3 is healthy: got healthy result from http://localhost:123
cluster is healthy
```
#### 4. Repeat step 2 to step 3 for all other members
#### 4. Repeat step 2 to step 3 for all other members
#### 5. Finish

View File

@ -1,3 +1,8 @@
**This is the documentation for etcd2 releases. Read [etcd3 doc][v3-docs] for etcd3 releases.**
[v3-docs]: ../docs.md#documentation
## Upgrade etcd from 2.2 to 2.3
In the general case, upgrading from etcd 2.2 to 2.3 can be a zero-downtime, rolling upgrade:

View File

@ -58,7 +58,7 @@ func (s *kvstore) Propose(k string, v string) {
if err := gob.NewEncoder(&buf).Encode(kv{k, v}); err != nil {
log.Fatal(err)
}
s.proposeC <- string(buf.Bytes())
s.proposeC <- buf.String()
}
func (s *kvstore) readCommits(commitC <-chan *string, errorC <-chan error) {

12
e2e/docker/Dockerfile Normal file
View File

@ -0,0 +1,12 @@
FROM golang:1.8.3-stretch
LABEL Description="Image for etcd DNS testing"
RUN apt update -y
RUN go get github.com/mattn/goreman
RUN apt install -y bind9
RUN mkdir /var/bind
RUN chown bind /var/bind
ADD Procfile.tls /Procfile.tls
ADD run.sh /run.sh
ADD named.conf etcd.zone rdns.zone /etc/bind/
ADD resolv.conf /etc/resolv.conf
CMD ["/run.sh"]

7
e2e/docker/Makefile Normal file
View File

@ -0,0 +1,7 @@
# run makefile from repo root
docker-dns-build:
docker build -t etcd-dns e2e/docker/
docker-dns-test: docker-dns-build
docker run --dns 127.0.0.1 --rm -v `pwd`/bin/:/etcd -v `pwd`/integration/fixtures:/certs -w /etcd -t etcd-dns

6
e2e/docker/Procfile.tls Normal file
View File

@ -0,0 +1,6 @@
# Use goreman to run `go get github.com/mattn/goreman`
etcd1: ./etcd --name infra1 --listen-client-urls https://127.0.0.1:2379 --advertise-client-urls https://m1.etcd.local:2379 --listen-peer-urls https://127.0.0.1:12380 --initial-advertise-peer-urls=https://m1.etcd.local:12380 --initial-cluster-token etcd-cluster-1 --initial-cluster=infra1=https://m1.etcd.local:12380,infra2=https://m2.etcd.local:22380,infra3=https://m3.etcd.local:32380 --initial-cluster-state new --enable-pprof --peer-cert-file=/certs/server-wildcard.crt --peer-key-file=/certs/server-wildcard.key.insecure --peer-client-cert-auth --cert-file=/certs/server-wildcard.crt --key-file=/certs/server-wildcard.key.insecure --peer-trusted-ca-file=/certs/ca.crt --trusted-ca-file=/certs/ca.crt
etcd2: ./etcd --name infra2 --listen-client-urls https://127.0.0.1:22379 --advertise-client-urls https://m2.etcd.local:22379 --listen-peer-urls https://127.0.0.1:22380 --initial-advertise-peer-urls=https://m2.etcd.local:22380 --initial-cluster-token etcd-cluster-1 --initial-cluster=infra1=https://m1.etcd.local:12380,infra2=https://m2.etcd.local:22380,infra3=https://m3.etcd.local:32380 --initial-cluster-state new --enable-pprof --peer-cert-file=/certs/server-wildcard.crt -peer-key-file=/certs/server-wildcard.key.insecure --peer-client-cert-auth --cert-file=/certs/server-wildcard.crt --key-file=/certs/server-wildcard.key.insecure --peer-trusted-ca-file=/certs/ca.crt --trusted-ca-file=/certs/ca.crt
etcd3: ./etcd --name infra3 --listen-client-urls https://127.0.0.1:32379 --advertise-client-urls https://m3.etcd.local:32379 --listen-peer-urls https://127.0.0.1:32380 --initial-advertise-peer-urls=https://m3.etcd.local:32380 --initial-cluster-token etcd-cluster-1 --initial-cluster=infra1=https://m1.etcd.local:12380,infra2=https://m2.etcd.local:22380,infra3=https://m3.etcd.local:32380 --initial-cluster-state new --enable-pprof --peer-cert-file=/certs/server-wildcard.crt --peer-key-file=/certs/server-wildcard.key.insecure --peer-client-cert-auth --cert-file=/certs/server-wildcard.crt --key-file=/certs/server-wildcard.key.insecure --peer-trusted-ca-file=/certs/ca.crt --trusted-ca-file=/certs/ca.crt

14
e2e/docker/etcd.zone Normal file
View File

@ -0,0 +1,14 @@
$TTL 86400
@ IN SOA etcdns.local. root.etcdns.local. (
100500 ; Serial
604800 ; Refresh
86400 ; Retry
2419200 ; Expire
86400 ) ; Negative Cache TTL
IN NS ns.etcdns.local.
IN A 127.0.0.1
ns IN A 127.0.0.1
m1 IN A 127.0.0.1
m2 IN A 127.0.0.1
m3 IN A 127.0.0.1

23
e2e/docker/named.conf Normal file
View File

@ -0,0 +1,23 @@
options {
directory "/var/bind";
listen-on { 127.0.0.1; };
listen-on-v6 { none; };
allow-transfer {
none;
};
// If you have problems and are behind a firewall:
query-source address * port 53;
pid-file "/var/run/named/named.pid";
allow-recursion { none; };
recursion no;
};
zone "etcd.local" IN {
type master;
file "/etc/bind/etcd.zone";
};
zone "0.0.127.in-addr.arpa" {
type master;
file "/etc/bind/rdns.zone";
};

13
e2e/docker/rdns.zone Normal file
View File

@ -0,0 +1,13 @@
$TTL 86400
@ IN SOA etcdns.local. root.etcdns.local. (
100500 ; Serial
604800 ; Refresh
86400 ; Retry
2419200 ; Expire
86400 ) ; Negative Cache TTL
IN NS ns.etcdns.local.
IN A 127.0.0.1
1 IN PTR m1.etcd.local.
1 IN PTR m2.etcd.local.
1 IN PTR m3.etcd.local.

1
e2e/docker/resolv.conf Normal file
View File

@ -0,0 +1 @@
nameserver 127.0.0.1

8
e2e/docker/run.sh Executable file
View File

@ -0,0 +1,8 @@
#!/bin/sh
/etc/init.d/bind9 start
# get rid of hosts so go lookup won't resolve 127.0.0.1 to localhost
cat /dev/null >/etc/hosts
goreman -f /Procfile.tls start &
sleep 5s
ETCDCTL_API=3 ./etcdctl --cacert=/certs/ca.crt --endpoints=https://m1.etcd.local:2379 put abc def

View File

@ -15,7 +15,13 @@
package e2e
import (
"fmt"
"io/ioutil"
"os"
"strings"
"testing"
"github.com/coreos/etcd/pkg/expect"
)
const exampleConfigFile = "../etcd.conf.yml.sample"
@ -32,3 +38,49 @@ func TestEtcdExampleConfig(t *testing.T) {
t.Fatal(err)
}
}
func TestEtcdMultiPeer(t *testing.T) {
peers, tmpdirs := make([]string, 3), make([]string, 3)
for i := range peers {
peers[i] = fmt.Sprintf("e%d=http://127.0.0.1:%d", i, etcdProcessBasePort+i)
d, err := ioutil.TempDir("", fmt.Sprintf("e%d.etcd", i))
if err != nil {
t.Fatal(err)
}
tmpdirs[i] = d
}
ic := strings.Join(peers, ",")
procs := make([]*expect.ExpectProcess, len(peers))
defer func() {
for i := range procs {
if procs[i] != nil {
procs[i].Stop()
}
os.RemoveAll(tmpdirs[i])
}
}()
for i := range procs {
args := []string{
binDir + "/etcd",
"--name", fmt.Sprintf("e%d", i),
"--listen-client-urls", "http://0.0.0.0:0",
"--data-dir", tmpdirs[i],
"--advertise-client-urls", "http://0.0.0.0:0",
"--listen-peer-urls", fmt.Sprintf("http://127.0.0.1:%d,http://127.0.0.1:%d", etcdProcessBasePort+i, etcdProcessBasePort+len(peers)+i),
"--initial-advertise-peer-urls", fmt.Sprintf("http://127.0.0.1:%d", etcdProcessBasePort+i),
"--initial-cluster", ic,
}
p, err := spawnCmd(args)
if err != nil {
t.Fatal(err)
}
procs[i] = p
}
for _, p := range procs {
if err := waitReadyExpectProc(p, false); err != nil {
t.Fatal(err)
}
}
}

View File

@ -109,7 +109,7 @@ type Config struct {
Debug bool `json:"debug"`
LogPkgLevels string `json:"log-package-levels"`
EnablePprof bool
EnablePprof bool `json:"enable-pprof"`
Metrics string `json:"metrics"`
// ForceNewCluster starts a new cluster even if previously started; unsafe.

View File

@ -27,6 +27,7 @@ import (
"time"
"github.com/coreos/etcd/etcdserver"
"github.com/coreos/etcd/etcdserver/api/etcdhttp"
"github.com/coreos/etcd/etcdserver/api/v2http"
"github.com/coreos/etcd/pkg/cors"
"github.com/coreos/etcd/pkg/debugutil"
@ -149,17 +150,17 @@ func StartEtcd(inCfg *Config) (e *Etcd, err error) {
}
// configure peer handlers after rafthttp.Transport started
ph := v2http.NewPeerHandler(e.Server)
for i := range e.Peers {
ph := etcdhttp.NewPeerHandler(e.Server)
for _, p := range e.Peers {
srv := &http.Server{
Handler: ph,
ReadTimeout: 5 * time.Minute,
ErrorLog: defaultLog.New(ioutil.Discard, "", 0), // do not log user error
}
e.Peers[i].serve = func() error {
return srv.Serve(e.Peers[i].Listener)
}
e.Peers[i].close = func(ctx context.Context) error {
l := p.Listener
p.serve = func() error { return srv.Serve(l) }
p.close = func(ctx context.Context) error {
// gracefully shutdown http.Server
// close open listeners, idle connections
// until context cancel or time-out
@ -186,11 +187,29 @@ func (e *Etcd) Config() Config {
func (e *Etcd) Close() {
e.closeOnce.Do(func() { close(e.stopc) })
// (gRPC server) stops accepting new connections,
// RPCs, and blocks until all pending RPCs are finished
timeout := 2 * time.Second
if e.Server != nil {
timeout = e.Server.Cfg.ReqTimeout()
}
for _, sctx := range e.sctxs {
for gs := range sctx.grpcServerC {
gs.GracefulStop()
ch := make(chan struct{})
go func() {
defer close(ch)
// close listeners to stop accepting new connections,
// will block on any existing transports
gs.GracefulStop()
}()
// wait until all pending RPCs are finished
select {
case <-ch:
case <-time.After(timeout):
// took too long, manually close open transports
// e.g. watch streams
gs.Stop()
// concurrent GracefulStop should be interrupted
<-ch
}
}
}
@ -326,6 +345,9 @@ func startClientListeners(cfg *Config) (sctxs map[string]*serveCtx, err error) {
if sctx.l, err = net.Listen(proto, addr); err != nil {
return nil, err
}
// net.Listener will rewrite ipv4 0.0.0.0 to ipv6 [::], breaking
// hosts that disable ipv6. So, use the address given by the user.
sctx.addr = addr
if fdLimit, fderr := runtimeutil.FDLimit(); fderr == nil {
if fdLimit <= reservedInternalFDNum {
@ -383,16 +405,19 @@ func (e *Etcd) serve() (err error) {
}
// Start a client server goroutine for each listen address
var v2h http.Handler
var h http.Handler
if e.Config().EnableV2 {
v2h = http.Handler(&cors.CORSHandler{
Handler: v2http.NewClientHandler(e.Server, e.Server.Cfg.ReqTimeout()),
Info: e.cfg.CorsInfo,
})
h = v2http.NewClientHandler(e.Server, e.Server.Cfg.ReqTimeout())
} else {
mux := http.NewServeMux()
etcdhttp.HandleBasic(mux, e.Server)
h = mux
}
h = http.Handler(&cors.CORSHandler{Handler: h, Info: e.cfg.CorsInfo})
for _, sctx := range e.sctxs {
go func(s *serveCtx) {
e.errHandler(s.serve(e.Server, ctlscfg, v2h, e.errHandler))
e.errHandler(s.serve(e.Server, ctlscfg, h, e.errHandler))
}(sctx)
}
return nil

View File

@ -44,6 +44,7 @@ import (
type serveCtx struct {
l net.Listener
addr string
secure bool
insecure bool
@ -160,28 +161,38 @@ func grpcHandlerFunc(grpcServer *grpc.Server, otherHandler http.Handler) http.Ha
})
}
type registerHandlerFunc func(context.Context, *gw.ServeMux, string, []grpc.DialOption) error
type registerHandlerFunc func(context.Context, *gw.ServeMux, *grpc.ClientConn) error
func (sctx *serveCtx) registerGateway(opts []grpc.DialOption) (*gw.ServeMux, error) {
ctx := sctx.ctx
addr := sctx.l.Addr().String()
conn, err := grpc.DialContext(ctx, sctx.addr, opts...)
if err != nil {
return nil, err
}
gwmux := gw.NewServeMux()
handlers := []registerHandlerFunc{
etcdservergw.RegisterKVHandlerFromEndpoint,
etcdservergw.RegisterWatchHandlerFromEndpoint,
etcdservergw.RegisterLeaseHandlerFromEndpoint,
etcdservergw.RegisterClusterHandlerFromEndpoint,
etcdservergw.RegisterMaintenanceHandlerFromEndpoint,
etcdservergw.RegisterAuthHandlerFromEndpoint,
v3lockgw.RegisterLockHandlerFromEndpoint,
v3electiongw.RegisterElectionHandlerFromEndpoint,
etcdservergw.RegisterKVHandler,
etcdservergw.RegisterWatchHandler,
etcdservergw.RegisterLeaseHandler,
etcdservergw.RegisterClusterHandler,
etcdservergw.RegisterMaintenanceHandler,
etcdservergw.RegisterAuthHandler,
v3lockgw.RegisterLockHandler,
v3electiongw.RegisterElectionHandler,
}
for _, h := range handlers {
if err := h(ctx, gwmux, addr, opts); err != nil {
if err := h(ctx, gwmux, conn); err != nil {
return nil, err
}
}
go func() {
<-ctx.Done()
if cerr := conn.Close(); cerr != nil {
plog.Warningf("failed to close conn to %s: %v", sctx.l.Addr().String(), cerr)
}
}()
return gwmux, nil
}

View File

@ -154,9 +154,10 @@ func (e Error) StatusCode() int {
return status
}
func (e Error) WriteTo(w http.ResponseWriter) {
func (e Error) WriteTo(w http.ResponseWriter) error {
w.Header().Add("X-Etcd-Index", fmt.Sprint(e.Index))
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(e.StatusCode())
fmt.Fprintln(w, e.toJsonString())
_, err := w.Write([]byte(e.toJsonString() + "\n"))
return err
}

View File

@ -35,7 +35,7 @@ max-snapshots: 5
max-wals: 5
# Comma-separated white list of origins for CORS (cross-origin resource sharing).
cors:
cors:
# List of this member's peer URLs to advertise to the rest of the cluster.
# The URLs needed to be a comma-separated list.
@ -46,16 +46,16 @@ initial-advertise-peer-urls: http://localhost:2380
advertise-client-urls: http://localhost:2379
# Discovery URL used to bootstrap the cluster.
discovery:
discovery:
# Valid values include 'exit', 'proxy'
discovery-fallback: 'proxy'
# HTTP proxy to use for traffic to discovery service.
discovery-proxy:
discovery-proxy:
# DNS domain used to bootstrap initial cluster.
discovery-srv:
discovery-srv:
# Initial cluster configuration for bootstrapping.
initial-cluster:
@ -72,6 +72,9 @@ strict-reconfig-check: false
# Accept etcd V2 client requests
enable-v2: true
# Enable runtime profiling data via HTTP server
enable-pprof: true
# Valid values include 'on', 'readonly', 'off'
proxy: 'off'
@ -90,40 +93,40 @@ proxy-write-timeout: 5000
# Time (in milliseconds) for a read to timeout.
proxy-read-timeout: 0
client-transport-security:
client-transport-security:
# DEPRECATED: Path to the client server TLS CA file.
ca-file:
ca-file:
# Path to the client server TLS cert file.
cert-file:
cert-file:
# Path to the client server TLS key file.
key-file:
key-file:
# Enable client cert authentication.
client-cert-auth: false
# Path to the client server TLS trusted CA key file.
trusted-ca-file:
trusted-ca-file:
# Client TLS using generated certificates
auto-tls: false
peer-transport-security:
peer-transport-security:
# DEPRECATED: Path to the peer server TLS CA file.
ca-file:
# Path to the peer server TLS cert file.
cert-file:
cert-file:
# Path to the peer server TLS key file.
key-file:
key-file:
# Enable peer client cert authentication.
client-cert-auth: false
# Path to the peer server TLS trusted CA key file.
trusted-ca-file:
trusted-ca-file:
# Peer TLS using generated certificates.
auto-tls: false
@ -132,7 +135,7 @@ peer-transport-security:
debug: false
# Specify a particular log level for each etcd package (eg: 'etcdmain=CRITICAL,etcdserver=DEBUG'.
log-package-levels:
log-package-levels:
# Force to create a new one member cluster.
force-new-cluster: false

View File

@ -82,7 +82,7 @@ func epHealthCommandFunc(cmd *cobra.Command, args []string) {
}
var wg sync.WaitGroup
errc := make(chan error, len(cfgs))
for _, cfg := range cfgs {
wg.Add(1)
go func(cfg *v3.Config) {
@ -90,7 +90,7 @@ func epHealthCommandFunc(cmd *cobra.Command, args []string) {
ep := cfg.Endpoints[0]
cli, err := v3.New(*cfg)
if err != nil {
fmt.Printf("%s is unhealthy: failed to connect: %v\n", ep, err)
errc <- fmt.Errorf("%s is unhealthy: failed to connect: %v", ep, err)
return
}
st := time.Now()
@ -103,12 +103,24 @@ func epHealthCommandFunc(cmd *cobra.Command, args []string) {
if err == nil || err == rpctypes.ErrPermissionDenied {
fmt.Printf("%s is healthy: successfully committed proposal: took = %v\n", ep, time.Since(st))
} else {
fmt.Printf("%s is unhealthy: failed to commit proposal: %v\n", ep, err)
errc <- fmt.Errorf("%s is unhealthy: failed to commit proposal: %v", ep, err)
}
}(cfg)
}
wg.Wait()
close(errc)
errs := false
for err := range errc {
if err != nil {
errs = true
fmt.Fprintln(os.Stderr, err)
}
}
if errs {
ExitWithError(ExitError, fmt.Errorf("unhealthy cluster"))
}
}
type epStatus struct {

View File

@ -137,10 +137,10 @@ func (p *fieldsPrinter) EndpointStatus(eps []epStatus) {
for _, ep := range eps {
p.hdr(ep.Resp.Header)
fmt.Printf("\"Version\" : %q\n", ep.Resp.Version)
fmt.Println(`"DBSize" :"`, ep.Resp.DbSize)
fmt.Println(`"Leader" :"`, ep.Resp.Leader)
fmt.Println(`"RaftIndex" :"`, ep.Resp.RaftIndex)
fmt.Println(`"RaftTerm" :"`, ep.Resp.RaftTerm)
fmt.Println(`"DBSize" :`, ep.Resp.DbSize)
fmt.Println(`"Leader" :`, ep.Resp.Leader)
fmt.Println(`"RaftIndex" :`, ep.Resp.RaftIndex)
fmt.Println(`"RaftTerm" :`, ep.Resp.RaftTerm)
fmt.Printf("\"Endpoint\" : %q\n", ep.Ep)
fmt.Println()
}

View File

@ -0,0 +1,186 @@
// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package etcdhttp
import (
"encoding/json"
"expvar"
"fmt"
"net/http"
"strings"
"time"
etcdErr "github.com/coreos/etcd/error"
"github.com/coreos/etcd/etcdserver"
"github.com/coreos/etcd/etcdserver/api"
"github.com/coreos/etcd/etcdserver/api/v2http/httptypes"
"github.com/coreos/etcd/etcdserver/etcdserverpb"
"github.com/coreos/etcd/pkg/logutil"
"github.com/coreos/etcd/raft"
"github.com/coreos/etcd/version"
"github.com/coreos/pkg/capnslog"
"github.com/prometheus/client_golang/prometheus"
"golang.org/x/net/context"
)
var (
plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "etcdserver/api/etcdhttp")
mlog = logutil.NewMergeLogger(plog)
)
const (
configPath = "/config"
metricsPath = "/metrics"
healthPath = "/health"
varsPath = "/debug/vars"
versionPath = "/version"
)
// HandleBasic adds handlers to a mux for serving JSON etcd client requests
// that do not access the v2 store.
func HandleBasic(mux *http.ServeMux, server *etcdserver.EtcdServer) {
mux.HandleFunc(varsPath, serveVars)
mux.HandleFunc(configPath+"/local/log", logHandleFunc)
mux.Handle(metricsPath, prometheus.Handler())
mux.Handle(healthPath, healthHandler(server))
mux.HandleFunc(versionPath, versionHandler(server.Cluster(), serveVersion))
}
func healthHandler(server *etcdserver.EtcdServer) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
if !allowMethod(w, r, "GET") {
return
}
if uint64(server.Leader()) == raft.None {
http.Error(w, `{"health": "false"}`, http.StatusServiceUnavailable)
return
}
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
defer cancel()
if _, err := server.Do(ctx, etcdserverpb.Request{Method: "QGET"}); err != nil {
http.Error(w, `{"health": "false"}`, http.StatusServiceUnavailable)
return
}
w.WriteHeader(http.StatusOK)
w.Write([]byte(`{"health": "true"}`))
}
}
func versionHandler(c api.Cluster, fn func(http.ResponseWriter, *http.Request, string)) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
v := c.Version()
if v != nil {
fn(w, r, v.String())
} else {
fn(w, r, "not_decided")
}
}
}
func serveVersion(w http.ResponseWriter, r *http.Request, clusterV string) {
if !allowMethod(w, r, "GET") {
return
}
vs := version.Versions{
Server: version.Version,
Cluster: clusterV,
}
w.Header().Set("Content-Type", "application/json")
b, err := json.Marshal(&vs)
if err != nil {
plog.Panicf("cannot marshal versions to json (%v)", err)
}
w.Write(b)
}
func logHandleFunc(w http.ResponseWriter, r *http.Request) {
if !allowMethod(w, r, "PUT") {
return
}
in := struct{ Level string }{}
d := json.NewDecoder(r.Body)
if err := d.Decode(&in); err != nil {
WriteError(w, r, httptypes.NewHTTPError(http.StatusBadRequest, "Invalid json body"))
return
}
logl, err := capnslog.ParseLevel(strings.ToUpper(in.Level))
if err != nil {
WriteError(w, r, httptypes.NewHTTPError(http.StatusBadRequest, "Invalid log level "+in.Level))
return
}
plog.Noticef("globalLogLevel set to %q", logl.String())
capnslog.SetGlobalLogLevel(logl)
w.WriteHeader(http.StatusNoContent)
}
func serveVars(w http.ResponseWriter, r *http.Request) {
if !allowMethod(w, r, "GET") {
return
}
w.Header().Set("Content-Type", "application/json; charset=utf-8")
fmt.Fprintf(w, "{\n")
first := true
expvar.Do(func(kv expvar.KeyValue) {
if !first {
fmt.Fprintf(w, ",\n")
}
first = false
fmt.Fprintf(w, "%q: %s", kv.Key, kv.Value)
})
fmt.Fprintf(w, "\n}\n")
}
func allowMethod(w http.ResponseWriter, r *http.Request, m string) bool {
if m == r.Method {
return true
}
w.Header().Set("Allow", m)
http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed)
return false
}
// WriteError logs and writes the given Error to the ResponseWriter
// If Error is an etcdErr, it is rendered to the ResponseWriter
// Otherwise, it is assumed to be a StatusInternalServerError
func WriteError(w http.ResponseWriter, r *http.Request, err error) {
if err == nil {
return
}
switch e := err.(type) {
case *etcdErr.Error:
e.WriteTo(w)
case *httptypes.HTTPError:
if et := e.WriteTo(w); et != nil {
plog.Debugf("error writing HTTPError (%v) to %s", et, r.RemoteAddr)
}
default:
switch err {
case etcdserver.ErrTimeoutDueToLeaderFail, etcdserver.ErrTimeoutDueToConnectionLost, etcdserver.ErrNotEnoughStartedMembers, etcdserver.ErrUnhealthy:
mlog.MergeError(err)
default:
mlog.MergeErrorf("got unexpected response error (%v)", err)
}
herr := httptypes.NewHTTPError(http.StatusInternalServerError, "Internal Server Error")
if et := herr.WriteTo(w); et != nil {
plog.Debugf("error writing HTTPError (%v) to %s", et, r.RemoteAddr)
}
}
}

View File

@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
package v2http
package etcdhttp
import (
"encoding/json"
@ -61,7 +61,7 @@ type peerMembersHandler struct {
}
func (h *peerMembersHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
if !allowMethod(w, r.Method, "GET") {
if !allowMethod(w, r, "GET") {
return
}
w.Header().Set("X-Etcd-Cluster-ID", h.cluster.ID().String())

View File

@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
package v2http
package etcdhttp
import (
"encoding/json"
@ -20,13 +20,36 @@ import (
"net/http"
"net/http/httptest"
"path"
"sort"
"testing"
"github.com/coreos/etcd/etcdserver/membership"
"github.com/coreos/etcd/pkg/testutil"
"github.com/coreos/etcd/pkg/types"
"github.com/coreos/etcd/rafthttp"
"github.com/coreos/go-semver/semver"
)
type fakeCluster struct {
id uint64
clientURLs []string
members map[uint64]*membership.Member
}
func (c *fakeCluster) ID() types.ID { return types.ID(c.id) }
func (c *fakeCluster) ClientURLs() []string { return c.clientURLs }
func (c *fakeCluster) Members() []*membership.Member {
var ms membership.MembersByID
for _, m := range c.members {
ms = append(ms, m)
}
sort.Sort(ms)
return []*membership.Member(ms)
}
func (c *fakeCluster) Member(id types.ID) *membership.Member { return c.members[uint64(id)] }
func (c *fakeCluster) IsIDRemoved(id types.ID) bool { return false }
func (c *fakeCluster) Version() *semver.Version { return nil }
// TestNewPeerHandlerOnRaftPrefix tests that NewPeerHandler returns a handler that
// handles raft-prefix requests well.
func TestNewPeerHandlerOnRaftPrefix(t *testing.T) {

View File

@ -0,0 +1,66 @@
// Copyright 2017 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package etcdhttp
import (
"encoding/json"
"net/http"
"net/http/httptest"
"testing"
"github.com/coreos/etcd/version"
)
func TestServeVersion(t *testing.T) {
req, err := http.NewRequest("GET", "", nil)
if err != nil {
t.Fatalf("error creating request: %v", err)
}
rw := httptest.NewRecorder()
serveVersion(rw, req, "2.1.0")
if rw.Code != http.StatusOK {
t.Errorf("code=%d, want %d", rw.Code, http.StatusOK)
}
vs := version.Versions{
Server: version.Version,
Cluster: "2.1.0",
}
w, err := json.Marshal(&vs)
if err != nil {
t.Fatal(err)
}
if g := rw.Body.String(); g != string(w) {
t.Fatalf("body = %q, want %q", g, string(w))
}
if ct := rw.HeaderMap.Get("Content-Type"); ct != "application/json" {
t.Errorf("contet-type header = %s, want %s", ct, "application/json")
}
}
func TestServeVersionFails(t *testing.T) {
for _, m := range []string{
"CONNECT", "TRACE", "PUT", "POST", "HEAD",
} {
req, err := http.NewRequest(m, "", nil)
if err != nil {
t.Fatalf("error creating request: %v", err)
}
rw := httptest.NewRecorder()
serveVersion(rw, req, "2.1.0")
if rw.Code != http.StatusMethodNotAllowed {
t.Errorf("method %s: code=%d, want %d", m, rw.Code, http.StatusMethodNotAllowed)
}
}
}

View File

@ -17,7 +17,6 @@ package v2http
import (
"encoding/json"
"errors"
"expvar"
"fmt"
"io/ioutil"
"net/http"
@ -30,18 +29,15 @@ import (
etcdErr "github.com/coreos/etcd/error"
"github.com/coreos/etcd/etcdserver"
"github.com/coreos/etcd/etcdserver/api"
"github.com/coreos/etcd/etcdserver/api/etcdhttp"
"github.com/coreos/etcd/etcdserver/api/v2http/httptypes"
"github.com/coreos/etcd/etcdserver/auth"
"github.com/coreos/etcd/etcdserver/etcdserverpb"
"github.com/coreos/etcd/etcdserver/membership"
"github.com/coreos/etcd/etcdserver/stats"
"github.com/coreos/etcd/pkg/types"
"github.com/coreos/etcd/raft"
"github.com/coreos/etcd/store"
"github.com/coreos/etcd/version"
"github.com/coreos/pkg/capnslog"
"github.com/jonboulle/clockwork"
"github.com/prometheus/client_golang/prometheus"
"golang.org/x/net/context"
)
@ -51,17 +47,18 @@ const (
machinesPrefix = "/v2/machines"
membersPrefix = "/v2/members"
statsPrefix = "/v2/stats"
varsPath = "/debug/vars"
metricsPath = "/metrics"
healthPath = "/health"
versionPath = "/version"
configPath = "/config"
)
// NewClientHandler generates a muxed http.Handler with the given parameters to serve etcd client requests.
func NewClientHandler(server *etcdserver.EtcdServer, timeout time.Duration) http.Handler {
sec := auth.NewStore(server, timeout)
mux := http.NewServeMux()
etcdhttp.HandleBasic(mux, server)
handleV2(mux, server, timeout)
return requestLogger(mux)
}
func handleV2(mux *http.ServeMux, server *etcdserver.EtcdServer, timeout time.Duration) {
sec := auth.NewStore(server, timeout)
kh := &keysHandler{
sec: sec,
server: server,
@ -91,25 +88,16 @@ func NewClientHandler(server *etcdserver.EtcdServer, timeout time.Duration) http
cluster: server.Cluster(),
clientCertAuthEnabled: server.Cfg.ClientCertAuthEnabled,
}
mux := http.NewServeMux()
mux.HandleFunc("/", http.NotFound)
mux.Handle(healthPath, healthHandler(server))
mux.HandleFunc(versionPath, versionHandler(server.Cluster(), serveVersion))
mux.Handle(keysPrefix, kh)
mux.Handle(keysPrefix+"/", kh)
mux.HandleFunc(statsPrefix+"/store", sh.serveStore)
mux.HandleFunc(statsPrefix+"/self", sh.serveSelf)
mux.HandleFunc(statsPrefix+"/leader", sh.serveLeader)
mux.HandleFunc(varsPath, serveVars)
mux.HandleFunc(configPath+"/local/log", logHandleFunc)
mux.Handle(metricsPath, prometheus.Handler())
mux.Handle(membersPrefix, mh)
mux.Handle(membersPrefix+"/", mh)
mux.Handle(machinesPrefix, mah)
handleAuth(mux, sech)
return requestLogger(mux)
}
type keysHandler struct {
@ -319,103 +307,13 @@ func (h *statsHandler) serveLeader(w http.ResponseWriter, r *http.Request) {
}
stats := h.stats.LeaderStats()
if stats == nil {
writeError(w, r, httptypes.NewHTTPError(http.StatusForbidden, "not current leader"))
etcdhttp.WriteError(w, r, httptypes.NewHTTPError(http.StatusForbidden, "not current leader"))
return
}
w.Header().Set("Content-Type", "application/json")
w.Write(stats)
}
func serveVars(w http.ResponseWriter, r *http.Request) {
if !allowMethod(w, r.Method, "GET") {
return
}
w.Header().Set("Content-Type", "application/json; charset=utf-8")
fmt.Fprintf(w, "{\n")
first := true
expvar.Do(func(kv expvar.KeyValue) {
if !first {
fmt.Fprintf(w, ",\n")
}
first = false
fmt.Fprintf(w, "%q: %s", kv.Key, kv.Value)
})
fmt.Fprintf(w, "\n}\n")
}
func healthHandler(server *etcdserver.EtcdServer) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
if !allowMethod(w, r.Method, "GET") {
return
}
if uint64(server.Leader()) == raft.None {
http.Error(w, `{"health": "false"}`, http.StatusServiceUnavailable)
return
}
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
defer cancel()
if _, err := server.Do(ctx, etcdserverpb.Request{Method: "QGET"}); err != nil {
http.Error(w, `{"health": "false"}`, http.StatusServiceUnavailable)
return
}
w.WriteHeader(http.StatusOK)
w.Write([]byte(`{"health": "true"}`))
}
}
func versionHandler(c api.Cluster, fn func(http.ResponseWriter, *http.Request, string)) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
v := c.Version()
if v != nil {
fn(w, r, v.String())
} else {
fn(w, r, "not_decided")
}
}
}
func serveVersion(w http.ResponseWriter, r *http.Request, clusterV string) {
if !allowMethod(w, r.Method, "GET") {
return
}
vs := version.Versions{
Server: version.Version,
Cluster: clusterV,
}
w.Header().Set("Content-Type", "application/json")
b, err := json.Marshal(&vs)
if err != nil {
plog.Panicf("cannot marshal versions to json (%v)", err)
}
w.Write(b)
}
func logHandleFunc(w http.ResponseWriter, r *http.Request) {
if !allowMethod(w, r.Method, "PUT") {
return
}
in := struct{ Level string }{}
d := json.NewDecoder(r.Body)
if err := d.Decode(&in); err != nil {
writeError(w, r, httptypes.NewHTTPError(http.StatusBadRequest, "Invalid json body"))
return
}
logl, err := capnslog.ParseLevel(strings.ToUpper(in.Level))
if err != nil {
writeError(w, r, httptypes.NewHTTPError(http.StatusBadRequest, "Invalid log level "+in.Level))
return
}
plog.Noticef("globalLogLevel set to %q", logl.String())
capnslog.SetGlobalLogLevel(logl)
w.WriteHeader(http.StatusNoContent)
}
// parseKeyRequest converts a received http.Request on keysPrefix to
// a server Request, performing validation of supplied fields as appropriate.
// If any validation fails, an empty Request and non-nil error is returned.

View File

@ -37,7 +37,6 @@ import (
"github.com/coreos/etcd/pkg/types"
"github.com/coreos/etcd/raft/raftpb"
"github.com/coreos/etcd/store"
"github.com/coreos/etcd/version"
"github.com/coreos/go-semver/semver"
"github.com/jonboulle/clockwork"
"golang.org/x/net/context"
@ -1409,48 +1408,6 @@ func TestServeStoreStats(t *testing.T) {
}
func TestServeVersion(t *testing.T) {
req, err := http.NewRequest("GET", "", nil)
if err != nil {
t.Fatalf("error creating request: %v", err)
}
rw := httptest.NewRecorder()
serveVersion(rw, req, "2.1.0")
if rw.Code != http.StatusOK {
t.Errorf("code=%d, want %d", rw.Code, http.StatusOK)
}
vs := version.Versions{
Server: version.Version,
Cluster: "2.1.0",
}
w, err := json.Marshal(&vs)
if err != nil {
t.Fatal(err)
}
if g := rw.Body.String(); g != string(w) {
t.Fatalf("body = %q, want %q", g, string(w))
}
if ct := rw.HeaderMap.Get("Content-Type"); ct != "application/json" {
t.Errorf("contet-type header = %s, want %s", ct, "application/json")
}
}
func TestServeVersionFails(t *testing.T) {
for _, m := range []string{
"CONNECT", "TRACE", "PUT", "POST", "HEAD",
} {
req, err := http.NewRequest(m, "", nil)
if err != nil {
t.Fatalf("error creating request: %v", err)
}
rw := httptest.NewRecorder()
serveVersion(rw, req, "2.1.0")
if rw.Code != http.StatusMethodNotAllowed {
t.Errorf("method %s: code=%d, want %d", m, rw.Code, http.StatusMethodNotAllowed)
}
}
}
func TestBadServeKeys(t *testing.T) {
testBadCases := []struct {
req *http.Request

View File

@ -20,12 +20,11 @@ import (
"strings"
"time"
etcdErr "github.com/coreos/etcd/error"
"github.com/coreos/etcd/etcdserver"
"github.com/coreos/etcd/etcdserver/api/etcdhttp"
"github.com/coreos/etcd/etcdserver/api/v2http/httptypes"
"github.com/coreos/etcd/etcdserver/auth"
"github.com/coreos/etcd/pkg/logutil"
"github.com/coreos/pkg/capnslog"
)
@ -39,37 +38,18 @@ var (
mlog = logutil.NewMergeLogger(plog)
)
// writeError logs and writes the given Error to the ResponseWriter
// If Error is an etcdErr, it is rendered to the ResponseWriter
// Otherwise, it is assumed to be a StatusInternalServerError
func writeError(w http.ResponseWriter, r *http.Request, err error) {
if err == nil {
return
}
switch e := err.(type) {
case *etcdErr.Error:
e.WriteTo(w)
case *httptypes.HTTPError:
if et := e.WriteTo(w); et != nil {
plog.Debugf("error writing HTTPError (%v) to %s", et, r.RemoteAddr)
}
case auth.Error:
if e, ok := err.(auth.Error); ok {
herr := httptypes.NewHTTPError(e.HTTPStatus(), e.Error())
if et := herr.WriteTo(w); et != nil {
plog.Debugf("error writing HTTPError (%v) to %s", et, r.RemoteAddr)
}
default:
switch err {
case etcdserver.ErrTimeoutDueToLeaderFail, etcdserver.ErrTimeoutDueToConnectionLost, etcdserver.ErrNotEnoughStartedMembers, etcdserver.ErrUnhealthy:
mlog.MergeError(err)
default:
mlog.MergeErrorf("got unexpected response error (%v)", err)
}
herr := httptypes.NewHTTPError(http.StatusInternalServerError, "Internal Server Error")
if et := herr.WriteTo(w); et != nil {
plog.Debugf("error writing HTTPError (%v) to %s", et, r.RemoteAddr)
}
return
}
etcdhttp.WriteError(w, r, err)
}
// allowMethod verifies that the given method is one of the allowed methods,

View File

@ -16,6 +16,7 @@ package v3rpc
import (
"crypto/tls"
"math"
"github.com/coreos/etcd/etcdserver"
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
@ -24,6 +25,8 @@ import (
"google.golang.org/grpc/grpclog"
)
const maxStreams = math.MaxUint32
func init() {
grpclog.SetLogger(plog)
}
@ -36,8 +39,9 @@ func Server(s *etcdserver.EtcdServer, tls *tls.Config) *grpc.Server {
}
opts = append(opts, grpc.UnaryInterceptor(newUnaryInterceptor(s)))
opts = append(opts, grpc.StreamInterceptor(newStreamInterceptor(s)))
opts = append(opts, grpc.MaxConcurrentStreams(maxStreams))
grpcServer := grpc.NewServer(opts...)
pb.RegisterKVServer(grpcServer, NewQuotaKVServer(s))
pb.RegisterWatchServer(grpcServer, NewWatchServer(s))
pb.RegisterLeaseServer(grpcServer, NewQuotaLeaseServer(s))

View File

@ -37,6 +37,7 @@ import (
"github.com/coreos/etcd/client"
"github.com/coreos/etcd/clientv3"
"github.com/coreos/etcd/etcdserver"
"github.com/coreos/etcd/etcdserver/api/etcdhttp"
"github.com/coreos/etcd/etcdserver/api/v2http"
"github.com/coreos/etcd/etcdserver/api/v3client"
"github.com/coreos/etcd/etcdserver/api/v3election"
@ -631,7 +632,7 @@ func (m *member) Launch() error {
m.s.SyncTicker = time.NewTicker(500 * time.Millisecond)
m.s.Start()
m.raftHandler = &testutil.PauseableHandler{Next: v2http.NewPeerHandler(m.s)}
m.raftHandler = &testutil.PauseableHandler{Next: etcdhttp.NewPeerHandler(m.s)}
for _, ln := range m.PeerListeners {
hs := &httptest.Server{

View File

@ -15,13 +15,16 @@
package integration
import (
"context"
"fmt"
"net/url"
"os"
"path/filepath"
"strings"
"testing"
"time"
"github.com/coreos/etcd/clientv3"
"github.com/coreos/etcd/embed"
)
@ -102,6 +105,47 @@ func TestEmbedEtcd(t *testing.T) {
}
}
// TestEmbedEtcdGracefulStop ensures embedded server stops
// cutting existing transports.
func TestEmbedEtcdGracefulStop(t *testing.T) {
cfg := embed.NewConfig()
urls := newEmbedURLs(2)
setupEmbedCfg(cfg, []url.URL{urls[0]}, []url.URL{urls[1]})
cfg.Dir = filepath.Join(os.TempDir(), fmt.Sprintf("embed-etcd"))
os.RemoveAll(cfg.Dir)
defer os.RemoveAll(cfg.Dir)
e, err := embed.StartEtcd(cfg)
if err != nil {
t.Fatal(err)
}
<-e.Server.ReadyNotify() // wait for e.Server to join the cluster
cli, err := clientv3.New(clientv3.Config{Endpoints: []string{urls[0].String()}})
if err != nil {
t.Fatal(err)
}
defer cli.Close()
// open watch connection
cli.Watch(context.Background(), "foo")
donec := make(chan struct{})
go func() {
e.Close()
close(donec)
}()
select {
case err := <-e.Err():
t.Fatal(err)
case <-donec:
case <-time.After(2*time.Second + e.Server.Cfg.ReqTimeout()):
t.Fatalf("took too long to close server")
}
}
func newEmbedURLs(n int) (urls []url.URL) {
for i := 0; i < n; i++ {
u, _ := url.Parse(fmt.Sprintf("unix://localhost:%d%06d", os.Getpid(), i))

View File

@ -0,0 +1,19 @@
{
"key": {
"algo": "rsa",
"size": 4096
},
"names": [
{
"O": "etcd",
"OU": "etcd Security",
"L": "San Francisco",
"ST": "California",
"C": "USA"
}
],
"CN": "ca",
"ca": {
"expiry": "87600h"
}
}

View File

@ -1,23 +1,33 @@
-----BEGIN CERTIFICATE-----
MIID2zCCAsOgAwIBAgIUZXdXtcOe421Geq9VjM35+SRJUS8wDQYJKoZIhvcNAQEL
BQAwdTEMMAoGA1UEBhMDVVNBMRMwEQYDVQQIEwpDYWxpZm9ybmlhMRYwFAYDVQQH
Ew1TYW4gRnJhbmNpc2NvMRAwDgYDVQQKEwdldGNkLWNhMQswCQYDVQQLEwJDQTEZ
MBcGA1UEAxMQQXV0b2dlbmVyYXRlZCBDQTAeFw0xNjA3MDUxOTQ1MDBaFw0yMTA3
MDQxOTQ1MDBaMHUxDDAKBgNVBAYTA1VTQTETMBEGA1UECBMKQ2FsaWZvcm5pYTEW
MBQGA1UEBxMNU2FuIEZyYW5jaXNjbzEQMA4GA1UEChMHZXRjZC1jYTELMAkGA1UE
CxMCQ0ExGTAXBgNVBAMTEEF1dG9nZW5lcmF0ZWQgQ0EwggEiMA0GCSqGSIb3DQEB
AQUAA4IBDwAwggEKAoIBAQDBMoRjH0ULs+0cRZWZ8BGJ7Fmf152J9uUE3/NgYV3M
4Ntu6l3IYALXT5QSHQZIz5425HP6827mwAOZ/bk6E3yzq6XR/vHzxPFLzBMzFuq/
elQA4nb7eYHICriEFUdJo2EUg3lSD3m6Deof/NjPMgUHtuvhn1OJMezaALZiMZ0K
9B9/1ktW4Roi6FMVFfJM5rKr9EIz6P2mFUpVHI7KSGbeuHiTPq0FLVv7wFPxRFX5
Ygd/nF6bbSsE2LAx/JdY1j0LQi0WUcA/HaWYVOpFSKohO6FmshP5bX0o//wWSkg2
8CSbtqvSxRF/Ril7raZlX713AAZVn8+B83tpjFqOLH+7AgMBAAGjYzBhMA4GA1Ud
DwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBSlyMYprKNDkzyP
gGA5cYnEEe9Y8DAfBgNVHSMEGDAWgBSlyMYprKNDkzyPgGA5cYnEEe9Y8DANBgkq
hkiG9w0BAQsFAAOCAQEAjjZkuoBl6meveg1frQuUhWtgtN/g9JqIjhEQ7tr4H46/
cHz3ngCuJh/GKSt7MTqafP99kqtm1GBs7BcoFKwsNFxNOo/a2MV2oYe2T5ol5U6/
RnmPv7yXzV1WlSC2IxFdtKEIfM859TFrWFN+NyH7yyYzjx+CzFdu6SHMwrQkETKr
R/PJrb0pV+gbeFpe/VfVyT7tFSxRTkSqwvMFNjQmbSLSiIFDNdZmPBmnWk418zoP
lkUESi3OQc4Eh/yQuldDXKl7L8+Ar8DddAu4nsni9EAJWi1u5wPPaLd+3s5USr1f
zFC3tb8o+WfNf+VSxWWPWyZXlcnB2glT+TWW40Ng1w==
MIIFrjCCA5agAwIBAgIUXWXsuLEZuHtKgeQSIVthb14+9EQwDQYJKoZIhvcNAQEN
BQAwbzEMMAoGA1UEBhMDVVNBMRMwEQYDVQQIEwpDYWxpZm9ybmlhMRYwFAYDVQQH
Ew1TYW4gRnJhbmNpc2NvMQ0wCwYDVQQKEwRldGNkMRYwFAYDVQQLEw1ldGNkIFNl
Y3VyaXR5MQswCQYDVQQDEwJjYTAeFw0xNzA3MjAyMjA1MDBaFw0yNzA3MTgyMjA1
MDBaMG8xDDAKBgNVBAYTA1VTQTETMBEGA1UECBMKQ2FsaWZvcm5pYTEWMBQGA1UE
BxMNU2FuIEZyYW5jaXNjbzENMAsGA1UEChMEZXRjZDEWMBQGA1UECxMNZXRjZCBT
ZWN1cml0eTELMAkGA1UEAxMCY2EwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIK
AoICAQCmtwjSg7gQBcVaoMycpePT0qoM0SKJVuvQRXIjL53/Bae5zuWiBdDVTElf
6OOFkjqPAxU7t28jmn/EqNcKkaVuFcFtVbgyD+vXWQITGSGfE1hmqVUcpbSpzLim
UIFNy6slMeUdFGiLG7/4P6mCHePgoW9r1+J2oAHSooCzJDqLNAGkgHhFQPhBC62G
3QrY2gwKlJ6Yl+2Ilb+bdT4PJq8sSlyAynPFTp07hnciEG6Ef6IQxc9pZb+UCa2A
Cyn9RU83AWj/aIcdlB8iNf86np4wFe8VEkgBdih91vfEzvoMhJZYBb0b0CnrRo1e
jVXAJkqTbajQM+yxlvlhB2PNCZusJa69eDCtnnO29MbTjOTqElTxlvU9c3huZycc
VMDgzyzm87F+Me3vh/6l6VC4Pm0zkA3XdwydncxreFoD/G+fQK2m6wXWzIsSGwqG
gzgAq8neJFfkcgzRu6WU1S8S/idqK9AoQAFIEPXYyIk3+K6JzHxhYZIBFE3OrZ58
oEo2PCP4snzTysZk7eWCe/WTZvReKtytzKAIS/CcjxsmgaviHee5tlV/rIghAxq8
QFnldJ1J9AtqPriRv0+EDFwOL8eyA+cVbWgX9UR0gWLe5lUqooowpq2ioWHG5F1m
cyi0u8cUtf5YZN6SVktQUdddsOCFfxvCU1NigxVxqs1ZWhSSrwIDAQABo0IwQDAO
BgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUp//gP4sC
l0dWZOXJWaPOYED2YdQwDQYJKoZIhvcNAQENBQADggIBABXyQox/T4kD+sLuTAp9
IP6Hr/XaHmKj3Zkrp8DdWt62R13ugCdWA8hu2yYzu92mSHBGbssaSaLzsNeb+LqE
/gSNQBvbfV0btQN2h+B3+BmEUuiv4ZTMPNArGfG7L1p35kH0NL46Bcssu59XSFLe
RIc7M5yT/C5+f/muhIxsAT6AdnwwkcxjQvQj9257S1gonOjLmmsVXW+Z+G9Y3YIf
hp84yvrJh86QVGsDC5Cu5i9kC/0CodCouIlBjWdELZDWV5KvbLAuWoQ5Jp1Y6+Jo
Dhx+2HB9mKmDWJfS8rWd//EiX/JH8iSMSaltmrzk6PYlWFAuM8jycDyyQI4mCe6J
wPMRyism7cowcGqHb+Nn2OiPvJtX6bGcVb8DbaGDmfgPdACqjdguzLHnaFyLmDe/
la0y1FAfW7jOyQrXEzqB4tJ8ZhI+HxRiXAh8ahBcKnMQFpjsEse03d2t65ZPDgev
NjIcoqhbANpYXdygux4hJNCT8KB194frC+eK0XqyO8BJYvid1Qp7SlnpFdEo1vMK
whLje6QkrgIyqoTP1+SiB3R79rtg+41bTb8paPJs9AqNaxS/l2bSnWnRvdkiJv89
YWgQGNO21XW+VbNV7Z0tMglmTvJc0ubbV5zZpVsuSOAQjdRXKieAxWAePrzDx5AM
ZiQgL5b9icqHm0aV7bcfp8H+
-----END CERTIFICATE-----

View File

@ -0,0 +1,13 @@
{
"signing": {
"default": {
"usages": [
"signing",
"key encipherment",
"server auth",
"client auth"
],
"expiry": "87600h"
}
}
}

View File

@ -0,0 +1,46 @@
#!/bin/bash
if ! [[ "$0" =~ "./gencerts.sh" ]]; then
echo "must be run from 'fixtures'"
exit 255
fi
if ! which cfssl; then
echo "cfssl is not installed"
exit 255
fi
cfssl gencert --initca=true ./ca-csr.json | cfssljson --bare ./ca
mv ca.pem ca.crt
openssl x509 -in ca.crt -noout -text
# generate DNS: localhost, IP: 127.0.0.1, CN: example.com certificates
cfssl gencert \
--ca ./ca.crt \
--ca-key ./ca-key.pem \
--config ./gencert.json \
./server-ca-csr.json | cfssljson --bare ./server
mv server.pem server.crt
mv server-key.pem server.key.insecure
# generate revoked certificates and crl
cfssl gencert --ca ./ca.crt \
--ca-key ./ca-key.pem \
--config ./gencert.json \
./server-ca-csr.json 2>revoked.stderr | cfssljson --bare ./server-revoked
mv server-revoked.pem server-revoked.crt
mv server-revoked-key.pem server-revoked.key.insecure
grep serial revoked.stderr | awk ' { print $9 } ' >revoke.txt
cfssl gencrl revoke.txt ca.crt ca-key.pem | base64 -d >revoke.crl
# generate wildcard certificates DNS: *.etcd.local
cfssl gencert \
--ca ./ca.crt \
--ca-key ./ca-key.pem \
--config ./gencert.json \
./server-ca-csr-wildcard.json | cfssljson --bare ./server-wildcard
mv server-wildcard.pem server-wildcard.crt
mv server-wildcard-key.pem server-wildcard.key.insecure
rm -f *.csr *.pem *.stderr *.txt

Binary file not shown.

View File

@ -0,0 +1,20 @@
{
"key": {
"algo": "rsa",
"size": 4096
},
"names": [
{
"O": "etcd",
"OU": "etcd Security",
"L": "San Francisco",
"ST": "California",
"C": "USA"
}
],
"CN": "example.com",
"hosts": [
"127.0.0.1",
"localhost"
]
}

View File

@ -0,0 +1,35 @@
-----BEGIN CERTIFICATE-----
MIIGEjCCA/qgAwIBAgIUBmQ4fvS9/9znydzkBFJ6EwYeoC0wDQYJKoZIhvcNAQEN
BQAwbzEMMAoGA1UEBhMDVVNBMRMwEQYDVQQIEwpDYWxpZm9ybmlhMRYwFAYDVQQH
Ew1TYW4gRnJhbmNpc2NvMQ0wCwYDVQQKEwRldGNkMRYwFAYDVQQLEw1ldGNkIFNl
Y3VyaXR5MQswCQYDVQQDEwJjYTAeFw0xNzA3MjAyMjA1MDBaFw0yNzA3MTgyMjA1
MDBaMHgxDDAKBgNVBAYTA1VTQTETMBEGA1UECBMKQ2FsaWZvcm5pYTEWMBQGA1UE
BxMNU2FuIEZyYW5jaXNjbzENMAsGA1UEChMEZXRjZDEWMBQGA1UECxMNZXRjZCBT
ZWN1cml0eTEUMBIGA1UEAxMLZXhhbXBsZS5jb20wggIiMA0GCSqGSIb3DQEBAQUA
A4ICDwAwggIKAoICAQDKaPgCc+vw1m8qNuPp4ujMhd0ZomWb4Ev5ik3wqp0M0ebK
fVXFwKhLwsmkA8YRafVsjZsyDeS7rRPm1tyAXonTjIXeQfYNEmS9SlqV3zSlNJk4
lRGJSAEnAJwqpH4fAAxAGzvyHL8o7Pu+Rg2yMXEaWb0niuafa5s0oRQln25DGMWq
dwdzVeiRb8MkUhlyWrmEKe5gr/lolxAxwXh7pVXqyqqzisnP+UETY/kmwNO5feGS
Ox195tyWsYNFq142gWJlfI+atrslBW1qOD+zam+niVkvSwATLQqrqF6HmfBRLIUO
xzvzWarnXskV5PiF7WVZuYZgw7Ez7JYxqo713xcGzB6IygMjjXpirLuhIon2E8aq
jtsOJHuEiJvfMywYbAd3ryPDU040iKHxHLsiQXMjycv+AjK/Z6JWBRk9OM06v8ua
/XeyBdChiXrRPwwSfPvkggyWo07ZYzuWCYZgxke5Bt7LsJ/uIBmX6gjGzW/mxNuA
X+c9FLKtMsqTRjHdU5Y1GkgGx9ZOWKXnDICPosz5fYqiGKKAy0sS2/4+/TFRssvY
Ef7KWjYQoMHJpWzhpLPmimp69XCTIdVh6JbkNxQlebiMS/NRD4qyCnZRvVvRhetp
9wlzDf+T/aUmBMdd6ins05mLjol/TCDb3IOhoMkzjwPKXzl89FAGhHrEXTFexQID
AQABo4GcMIGZMA4GA1UdDwEB/wQEAwIFoDAdBgNVHSUEFjAUBggrBgEFBQcDAQYI
KwYBBQUHAwIwDAYDVR0TAQH/BAIwADAdBgNVHQ4EFgQUO3LyYm5pTwmuadST/Y3N
d78VIAswHwYDVR0jBBgwFoAUp//gP4sCl0dWZOXJWaPOYED2YdQwGgYDVR0RBBMw
EYIJbG9jYWxob3N0hwR/AAABMA0GCSqGSIb3DQEBDQUAA4ICAQA37eO0sz0gXZcn
rEp/jLoCpD6PF96tMYtWztqhoCBj3IlzkSHBiZG3o8Jm08ldbgyOe4xCKrLgTPM5
zDCAXcTN1JpV4NPBzk+Oeyzl7Hayg8WftftoAyxzZqYZA2JAdQrjuW8nC18eKZ5P
05t+lQiXgOI0DaYpfJabJ2AFr8kqsZKW/gw1kvdhhar72Ar2rJwm+h6XWpWghGzQ
CWgr+q+FbWCqCtJ+MvwVe9qxwc8vwG/YxPeumNI2sC4pGIx6AxnNvvTtEVndQdA1
AG2HCjDm/6hbTre+4ps4orFfgwkavSxT4SPJYsLloD914oJ8ekuPKoMqgqF4jQRp
IiuwXZ5dhDJu5qAL/LcgG99j8beyNhpXbsiO7iWgskUHPOjFzZEfnV1K7g2yu0zS
Ym4zRKKIWWePn2Tvnu14aIC3pVaaYGL13+0UCbI1Dhm5qyJ+I7MNQte/bMVKEdfG
Xr+fL7VQL0MH58cNJoPdUBPmmiDTR8ZH73iuFA+6YpTzOtoDi1mWAu/PHdFDiR3o
hqTzUBEisfWsvj9Dd2las+glsCsmhCon00kuxau1zyvqxZrVXA82rdy981E09NOu
kSagZkOb60q/il1BCKYVXlZ5Mn9IUMQur7y8Tg2NPY66BXs0neTS/RcrMKtOdTdM
hE/fY15ykrUxtAio49yuhfQm5SxSQA==
-----END CERTIFICATE-----

View File

@ -0,0 +1,51 @@
-----BEGIN RSA PRIVATE KEY-----
MIIJKQIBAAKCAgEAymj4AnPr8NZvKjbj6eLozIXdGaJlm+BL+YpN8KqdDNHmyn1V
xcCoS8LJpAPGEWn1bI2bMg3ku60T5tbcgF6J04yF3kH2DRJkvUpald80pTSZOJUR
iUgBJwCcKqR+HwAMQBs78hy/KOz7vkYNsjFxGlm9J4rmn2ubNKEUJZ9uQxjFqncH
c1XokW/DJFIZclq5hCnuYK/5aJcQMcF4e6VV6sqqs4rJz/lBE2P5JsDTuX3hkjsd
febclrGDRateNoFiZXyPmra7JQVtajg/s2pvp4lZL0sAEy0Kq6heh5nwUSyFDsc7
81mq517JFeT4he1lWbmGYMOxM+yWMaqO9d8XBsweiMoDI416Yqy7oSKJ9hPGqo7b
DiR7hIib3zMsGGwHd68jw1NONIih8Ry7IkFzI8nL/gIyv2eiVgUZPTjNOr/Lmv13
sgXQoYl60T8MEnz75IIMlqNO2WM7lgmGYMZHuQbey7Cf7iAZl+oIxs1v5sTbgF/n
PRSyrTLKk0Yx3VOWNRpIBsfWTlil5wyAj6LM+X2KohiigMtLEtv+Pv0xUbLL2BH+
ylo2EKDByaVs4aSz5opqevVwkyHVYeiW5DcUJXm4jEvzUQ+Ksgp2Ub1b0YXrafcJ
cw3/k/2lJgTHXeop7NOZi46Jf0wg29yDoaDJM48Dyl85fPRQBoR6xF0xXsUCAwEA
AQKCAgA3rEubERtycOi+qb5ilIEH0EISTPK5vyXmiz4I1kTAQ/PA+lxfOjCQNhGU
RV1zaLuSkhh/2gZyAJcaxTp9LIOoZlxj16y/x7Fhx1PHKU+nqSIDyCy8n8uBWM+b
gwcVq8Oy1krUu0dxEE3l9grKWoMLhmdauv/YFZbpEO0jtAh1+BhWXMr11ElVx3Hb
SaGqLH4edhIVEhH9zJ8tsFNdXwqUvA3buG7t/1cA6FydZihWOuOSfyQLfzZpVIYQ
4aPWRhw0YeB145DyC94eez46MSpo4IRhV7W7kIYA1Ry7G4JYMXCfsfkxZBZ38UBJ
/2LEI5ne5gKqFulkqMxe+NS9mBES1c5s2EqGdIsRAyOHlh7XvE+aHoiPSNqXGC4S
h1x5HIakc8AITtCkoJ1WF5Ir01vc2XFQzZW+yJtXoO2GmZjJUO0NowNkpnCJPIaL
ri3q5Yx0O/1ZqctS4J1+/U6FG3ccdZvgui+XfX6x7Nf/l5JSuKIYNiHoPmQXfFcn
yfw0e2xR84IXkXnXy8pOHfFelQkAzNIoH+wcH0hQrECD+R8XDIrv5i7ys2IRsJuG
/PLJ0I2adYjMgn2s4Z8upMJcHDzn2wbIhzxG1LY7yVvyr233ScuaJE8+LpVwQvU9
fDjfJ+wXmGm2R+xfwXUAOtnvMpKOYOrVBDwT3Cp4V1b8NUaYwQKCAQEAz7hZ9kCk
wSDOE/LEdzf/OSIKgb9UVAl9af3F1n02U06PUHrlhB9qxhOi5z+Hqox1cFkHlVvl
1P6S6VGLAEnyMaD7e2zhVx0ByOSZAjMGaEve3ataocCWJfOM16WrC7r65gptjdZZ
FabzUBAW1YB7kadrdVg767c/xf9fAQ7bEYuDoJB4ER3o3h6aMZ1eeL2tiKrgIn8m
ql9WHX4X0dfF94V2bIaHnqTpTmmDMYE4tFZarfG4+e21YvnREd1PhlGMrsfGhYFS
DLYJw1gtJKM9ht4g1A0dggBt6GW7ziOvA23HGeIAEqLO+zT5e4WvxBV95x/OjWrs
v7YcipW8Kndd3QKCAQEA+XSoz0fsiKvfGJeg6dR64pr9hmGZz9nJRkCOejVd3U5L
uCbkPq5bFRjDGsBgih+9lWDApQq1lKeQmnin+sXDSmQEtVpDqW4gHuOk++xf6CkG
D9X6NR9PMrXDkRCJDONfyk7WppMjLzpFOrUcBW2VI2ZJdZd2oeQGV8QhQIHQEw0W
iYmAEFAjRoU6Y6g/XDngMkwL5PqmOuAXrw+HvwEJ0cyNisl0F9a6C0pkRQeZGWNi
EEpCvsg1NasTzsk/Rd0YjWGY5cNJKIkKVQoIX1Y8O644aIam3klpZLpA3Z41zmdl
/saIMcJHtW4s2s/YZ8wJrjxY/vsvCK9SrIFB2nc6CQKCAQEArCyQVO9MINDCQAKw
GmmN4Zt3vggLLAUZsSsqOxlaWD77x6e2aodoB3rrQmaEWzdFeLQy9vhPTvccasyu
PLUOTVi0Wp/rQDvI6O2ibhJBM14AAxkvbHenfRmdGno1humbYeYu3KxY9vuFHk3X
v2xaAGcXSRKzyDQCZmnAI65eovJTTlmxS+7QwJv5DzrzvXzrRCbu1WkzcXDIfnWv
5L3HV92GRXpVG4hx4g7PMc98Yu5ZB2ke+/quqLWNOBOLATjDNXRd9vc0PVj+Sq7h
7EZqj0m7SEj+tm0IrOL2gm1Nebgamjeb+9Z7XfmQ+XPcNtnhnBvfJ5UDW/zlN3HZ
oBGE+QKCAQBAjjAhdDuCIvhZJOQ/nv0uJ230mM11PKcZxxsYBTeFTf3KakYm8ngf
vYjAI4jYNd6aCa/RBjR3g+WoFBFklEk1tyyAwhtAfX9SfxbzGvi22+b1sipOFQwp
02AI6n6NF0py0HQ8J1ezoSDJUJUv0mwF/TKFe+z3eEsr61Wvm+h9BaYPccXycsqu
NUwm/iNnepLKcWOinjrmgZefdiVrCJnB8W2vvPKOOMNsqJPKSW0VZOK71HvxY15h
xSQbH3mAWvc/n0IyM5d7JfvGhhIkmex8hVmTs0T7wYPEzW5767WA9MEwbbBRMDXe
feSdu6wFMIhQzs05L0e8t6JtggXIw5IBAoIBAQCtyxIDHmoxKQeomXvdsCSL45F2
caA8b8THXTD0IDLPq1Z8J568lE86meVmyogLktyWRouatE4ZAg/ehOZdJwcTBGki
ZcQ1oycveFzH8cbVrS1x1zMXRcWh4ERkm28M9xTfOFBJRhcCADUOB6RHlz5q7iRj
5wQMd4OEtrXHnUaewelCNmfTUA+Z2JBA+wGUXEPpg9tD5NMzYMtYKYRCljUEyLTQ
oieAAiPzZI9UmolaHufW5+9s0kyGqGU69ORk8i/Fy27g/Ca83A6i9BsNiPaWUk5m
HF9YrkAh5TQDOA3dRoPmjJfPg6Pa92m95QHq9GcfdV3ifrNTJb0X3JCxCa9b
-----END RSA PRIVATE KEY-----

View File

@ -0,0 +1,30 @@
-----BEGIN CERTIFICATE-----
MIIFDzCCAvegAwIBAgIUeJ6mpnfPEW9DQsXb3n1/wfj6y04wDQYJKoZIhvcNAQEN
BQAwbzEMMAoGA1UEBhMDVVNBMRMwEQYDVQQIEwpDYWxpZm9ybmlhMRYwFAYDVQQH
Ew1TYW4gRnJhbmNpc2NvMQ0wCwYDVQQKEwRldGNkMRYwFAYDVQQLEw1ldGNkIFNl
Y3VyaXR5MQswCQYDVQQDEwJjYTAeFw0xNzA3MjAyMjA1MDBaFw0yNzA3MTgyMjA1
MDBaMHgxDDAKBgNVBAYTA1VTQTETMBEGA1UECBMKQ2FsaWZvcm5pYTEWMBQGA1UE
BxMNU2FuIEZyYW5jaXNjbzENMAsGA1UEChMEZXRjZDEWMBQGA1UECxMNZXRjZCBT
ZWN1cml0eTEUMBIGA1UEAxMLZXhhbXBsZS5jb20wggEiMA0GCSqGSIb3DQEBAQUA
A4IBDwAwggEKAoIBAQClaGICudWL2LbCIxBEtsSUC9cFzskwH0+9M17nKN9mA6dx
PWnOK3kLuLcV5kjzc70kechTzHXHfSSCw7DozUfecQQlxxeRZsf/sGJ88aUqnmn9
oYA0Yf32rZ5M4MkyeZM1uG279N/LLUvBt1wojgmddw9VhgM3FQ2wC8L6TwCPCER4
0pDjLx2wqjsGd/M2D2ixBSenRKnDMrIe1zY5RQYMfSX0Y1zmkr3ld2SfMpeIh28T
fG8tmOx19y++mB3kz8VJKknC2AbdnX/8i3BV+Y4DWZlxR7CT1flBih358w+6TOIi
BR6yuErkpsiYUP5dt4tyTYxxMIkHl+bIWRxswRQ7AgMBAAGjgZkwgZYwDgYDVR0P
AQH/BAQDAgWgMB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjAMBgNVHRMB
Af8EAjAAMB0GA1UdDgQWBBQPhUhOX+soZsPIPqa0cw9SEqM9lzAfBgNVHSMEGDAW
gBSn/+A/iwKXR1Zk5clZo85gQPZh1DAXBgNVHREEEDAOggwqLmV0Y2QubG9jYWww
DQYJKoZIhvcNAQENBQADggIBAHKU/dvRCfMXW2XurSpuIuGtnMn6ozSapiYETy3C
9UZ1hPldZc4D7QE2MsC2nthEzXkrf+uU/TtYl2OR/DWjaCWX4zP1F3qyLSUiXdZh
e48HQViLtLN8ZG+PRxM7KOW+DYChIRA+OAtYj9Ti6OiBOcnWY5K8MyJfNqq2wxD/
ja+dGD3u4qeHlnsfloFwr0Nj1+fhj+PYrAKgRJ7VsPKNisDgTWnBGfzO9pFY9e1E
aAoPDQJEfxpnEzupATfrHeEKBJVBj2SW1MX0zUwDv9NS7+AToxBuRbNtdsC7zfrS
657LAGQMHcahxuLvQQjYhP662ke98JsYNld49i/SP4o4ViK16BURwAHlm1ljNw43
7WQxdCuH7Fk5lXouaX+Btn5pkKGvguQfuW3T6lU+Kv2i3ZRKPdvhyN+aSfK7t8Rp
G2tDodGjZDorV6ZjBbMNUKHK8J/r+MHYiU95JIgjfn3pXztU0Nxhq15eIW+74yug
IZQfZ5TIMUlmhtR5truKSvdrwJqznFzPfr1iupyy30HhIHLVxy3fmyXHjdD9YeHC
CjQu+juiJrVo2JCrmlp/pEIe7sY2UnzkC30tJT1Ys4tRSlbN8YHrzdSgcd/0bkSA
TlkDkee6yb5jKf9/xHtUYFhGu3nLDX9aJwkJcKuZ0pz8yjlBK7sN/HFsQACbAVeW
aGks
-----END CERTIFICATE-----

View File

@ -0,0 +1,27 @@
-----BEGIN RSA PRIVATE KEY-----
MIIEpAIBAAKCAQEApWhiArnVi9i2wiMQRLbElAvXBc7JMB9PvTNe5yjfZgOncT1p
zit5C7i3FeZI83O9JHnIU8x1x30kgsOw6M1H3nEEJccXkWbH/7BifPGlKp5p/aGA
NGH99q2eTODJMnmTNbhtu/Tfyy1LwbdcKI4JnXcPVYYDNxUNsAvC+k8AjwhEeNKQ
4y8dsKo7BnfzNg9osQUnp0SpwzKyHtc2OUUGDH0l9GNc5pK95XdknzKXiIdvE3xv
LZjsdfcvvpgd5M/FSSpJwtgG3Z1//ItwVfmOA1mZcUewk9X5QYod+fMPukziIgUe
srhK5KbImFD+XbeLck2McTCJB5fmyFkcbMEUOwIDAQABAoIBAFFMoP/d/0whVJLP
USu0+aIav7EnFlQEz9ia60aLHGKz4RUTrnGbhH9yZuroqWqftJO9F+24TRukMtHj
BY0neO+odPVsifT5o8vVElN/IkN0YXw8aRtWHtGkPG5k/f9FKkn5QFZl4amntdid
GzsDtU4kOvE6UVI9kuC4pzkIo6mpCDmDFu9SzsSKVAakKPPrvZ7w25jiyBuJUYKF
XOMOlTviZjbrsEW2BsNF/HNZywH3YRcE1jfiRHcQMLyF4Bk1VJSrPh4ahXlvTWmO
/5Pwl7OIaxFUSjrjSioRXP9VJRMgeMH0D2giQCHDXzYrqg6cxGTCwLnvVNagVBLx
/uiVFuECgYEAxyA11z778PyIxpbPxC6ui+vODKRh+sHERFUkhJTXLfYFZ55fIJ24
XjQJkZp8qMLhiBoqZjYvVCp+HUmyXoS2ts+DwNP+r/dOm/yyc1DsAyhvb39g9tHO
a2IjhkOiR5z6OxgYuv90gefq0glyVvFzmIhnRi398lhXJ2OU4XJWxHUCgYEA1KbB
iKKAT5Tvw3T23Z2T52YEorTS2DfPyp07zmiKxSdJiW2Or0mZAtNwDT72emy+9bSD
THs9PvS/Csoq6pWer1humF84K6qZ/ICPQnzt6jDG44R8vkdPdBSm2K8lWqlXxYFp
ya3Y8Fen5XFxEvrp7eD5NYkqZY3tqRVO8c9vn+8CgYEAxQz0+tqTSzk8yPj5BbUE
eeaR8yTA6PrTFKQFDUaVYiAx3QZ2MLqjdmWcioAMmJyxvpPWHWvFjk62mpkRcEN4
5JOaWDnxsYTUP7zjgwYzaDSdggLVm6qn0NA/Q2CuuJt5bP09i9+8FcnBMLS0d6Fc
uTdSq7pbsXUGWi5LaIZTovkCgYBacbpqxMLSFkSL21mMFJNtneRm14W91K8aPBnN
xoUPKZCLVP+U6jacDxXfbGIk28+0bVxS0S/RcQM4MZhjQdPGPFR9ljIr0FnCHWPR
IZWHP8u3xQfRXj8a3hXAn23By7i7FjnKP5i/UGjmm4M+UV3hgQg9juNrYhwtCBUV
n+aYHQKBgQCtidqW1EajQmZL26mzCb9ChtAgRT0fR4ui3wA2b4aZGu9CmJdRBS2K
K6XjqlQhFDtARWc8Svwuw5FZ+SJNDBFiHmuxu3uwmzMsPPgMk9YJxh5olu/ZgiKg
GApXR4U+CKlKLzeGk2WgpmfsF3KR12aUuc+BQnaeILRBZqjR0W4kvw==
-----END RSA PRIVATE KEY-----

View File

@ -1,24 +1,35 @@
-----BEGIN CERTIFICATE-----
MIID9TCCAt2gAwIBAgIUXtrXPwZLfKUJiGr6ClP3lqhOuKUwDQYJKoZIhvcNAQEL
BQAwdTEMMAoGA1UEBhMDVVNBMRMwEQYDVQQIEwpDYWxpZm9ybmlhMRYwFAYDVQQH
Ew1TYW4gRnJhbmNpc2NvMRAwDgYDVQQKEwdldGNkLWNhMQswCQYDVQQLEwJDQTEZ
MBcGA1UEAxMQQXV0b2dlbmVyYXRlZCBDQTAeFw0xNjA3MDUxOTQ1MDBaFw0xNzA3
MDUxOTQ1MDBaMFUxFTATBgNVBAcTDHRoZSBpbnRlcm5ldDEWMBQGA1UEChMNYXV0
b2dlbmVyYXRlZDEVMBMGA1UECxMMZXRjZCBjbHVzdGVyMQ0wCwYDVQQDEwRldGNk
MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEArGvOLPmy5i+1j4JitG4V
g99w125ncs2ImhqucmJe6YtSnKruaLdOx93X7lzC3k0umnvLgh643R4eYS5PvrDk
vw1dSYB7BHhveFPmmWd7m7n7bXtgbcdkCmUeTbSeqvptPgyMJOQfXzfOGbEHfu7U
0raulR6KtqAatofKpRZhZgzZQpVkhdd0UTsOwqCWdX3Qe0D1MS922kX99c4UlGyD
OTVL6tulvDBBYgHbGErFmhxdgwm4e6dFfdkPUeHczzUWnKo2sIGBvo4R/NwPIp6G
PnebrO0VWvcQfdSqjYk3BmILl8BVL5W1/EBRLtz9mZuQgc/VC62LvsgXusC9pwXC
3QIDAQABo4GcMIGZMA4GA1UdDwEB/wQEAwIFoDAdBgNVHSUEFjAUBggrBgEFBQcD
AQYIKwYBBQUHAwIwDAYDVR0TAQH/BAIwADAdBgNVHQ4EFgQULLrktzdBK6iLINQ7
hGRjQbMYXKowHwYDVR0jBBgwFoAUpcjGKayjQ5M8j4BgOXGJxBHvWPAwGgYDVR0R
BBMwEYIJbG9jYWxob3N0hwR/AAABMA0GCSqGSIb3DQEBCwUAA4IBAQCI2Tp4pMYk
LFLzGy4e/5pwpA4x/C2zl01Sv/eC79RA5Zz1NtSF/7LCfL+KPNpNkxzPyTxWOaX5
YMuAbD49ZBQYeEyNUxKcwWEpaVlmlIUj3b21fBXQ7Nw25Uea45bNhdZcdMUOTums
J1/BrA2eoEB0guTlh3E8iadbVmSf6elA9TbYLd7QTTgcb3XclYCwhV3eKdm3IEiX
g4q50iM6/LRz1E5C3LlQ0aNqpGroBv/9ahLVfLr06ziSRcecLJ4485MtJOxP4guA
1tc6qPyw2MLmAlLZfOCHKLbK3KboZI8IANmrpNyL590D9bDl9nLnHmJuitBpIVp1
Hw0I8e4ZYhab
MIIGEjCCA/qgAwIBAgIUPViBCYkAU+aOqe9Db3rdN4EJxj8wDQYJKoZIhvcNAQEN
BQAwbzEMMAoGA1UEBhMDVVNBMRMwEQYDVQQIEwpDYWxpZm9ybmlhMRYwFAYDVQQH
Ew1TYW4gRnJhbmNpc2NvMQ0wCwYDVQQKEwRldGNkMRYwFAYDVQQLEw1ldGNkIFNl
Y3VyaXR5MQswCQYDVQQDEwJjYTAeFw0xNzA3MjAyMjA1MDBaFw0yNzA3MTgyMjA1
MDBaMHgxDDAKBgNVBAYTA1VTQTETMBEGA1UECBMKQ2FsaWZvcm5pYTEWMBQGA1UE
BxMNU2FuIEZyYW5jaXNjbzENMAsGA1UEChMEZXRjZDEWMBQGA1UECxMNZXRjZCBT
ZWN1cml0eTEUMBIGA1UEAxMLZXhhbXBsZS5jb20wggIiMA0GCSqGSIb3DQEBAQUA
A4ICDwAwggIKAoICAQDi5InXatzyDc9B+ho+S4+6O1ZmIU44OMOnNfROaYBz4wnH
xGRcwFPBONVT+mAZ7CIdXV02oxHAJ2d5+Asjkt3p93dWJk+Q3DTnBL/3puPDUXt7
LCuYJ8sQrihqoF7KVHZeDVKUXMDKTwp0CnyDXAEG0wNbI81AqDA/umo1Kh9gacP6
1z4129EybwRd7K/ZqC97Zo36qsT3iCKrxS2njvQbbBfqr7njpFxF0qTBgDERhFeR
SFcaMPn1v0AFvaFFczTQDCVvSdLD0XswJVhe29DI0TsB/f85OV8R5rHcBXNvHdzB
vYpfH6Iy10/lKCj1CDNsGxaZHK3nhwODeTxa6qQCfFSRBvL8e5ieBNekHK0mM2pb
8P4nu3Xo4C3FMUDmO2qE0xYaJ076nTqkMQRYoLM/hTQzbIaXt2kUw9D6oqbmu3xt
gaZTFZjHveXVRg3hzjUKVLeqHqB2nMozBtx9wSCOvLCZSKj8DHwvh5kJrJpxYgpM
9uNQk32XL+QYF/OK2eIFVU9Mk1NC7PtrsQczdvFyAAkCTBzba0xOd3VzK8SQQmCr
RBFmsIAABPzPUv1WkwsWbYz5LaWOJXKUGMC00dH97eD6apgvA7jK2ILO14ETbNm9
hxod/DZqr6z8ijlw3WcqHzjYW49oZYg9H6YCRisf3yWuWFTWyL03fM00RgCSYwID
AQABo4GcMIGZMA4GA1UdDwEB/wQEAwIFoDAdBgNVHSUEFjAUBggrBgEFBQcDAQYI
KwYBBQUHAwIwDAYDVR0TAQH/BAIwADAdBgNVHQ4EFgQURgEb31GFrs1S/4cgV4ZI
fonP7OkwHwYDVR0jBBgwFoAUp//gP4sCl0dWZOXJWaPOYED2YdQwGgYDVR0RBBMw
EYIJbG9jYWxob3N0hwR/AAABMA0GCSqGSIb3DQEBDQUAA4ICAQBS506hAyC9R6qo
KZY8E1tFeIxlkunnWqKrc/ElDxOvJOVJRJmY88KzyGN8v7gUog3RUmen4h5v6+SJ
IY00ljeu8zSqYoBOajlX+ej8LnNcr4viq+m+eMljA3jt7TRyHt9TIZ5MOns+M7Vb
HGTa9juRMfdX7oL8KdCsdzKVI7p8bK9qcE4JEDjDsh1vX5qc61hQfLyG6hkwTgyb
WjFeCjhP41ZrxoTS85qtBsmCj0Rv7uPjvR4GI9MCP/q41sXve7ejcepLFPTl0diH
E/5/9IO8TcrcaEBG8k5QvGoknW2vR8Y1IpcakcxoijjtDewJYcfMnB7uchBriGaR
sHFE8t3kmjOCo7Ve3kdsOGmcQAi3rCQ6PTs8t11L1g435NOYrNP72D22N96YYgYT
0VFNsJYUpDlGHGShTVtIAQFWlRmR9GxRb2OoJWN/JsxOuO++jc/SJw/X1NXOdpHH
pp+OhQTZqkU1KRMkdtxqI4LcQUN6TL6BPeXFcCKJ8F7kQ78DntYFOdf/WVsoY/8i
GmTIaCCGsB4yBDM6oe7WroVMgOd2ES+yeyyfyQr591/7peoFrTGUlVpZjjMHaGxl
HORsnQO48dN6h7EIoKxG/cfrx3TwwaNgzWSQJdA5pB+Vsia4we92UK/r+5Lu8Uzv
1DSnPpH/KZ1ASOH2ebWeBJP+PU8+cg==
-----END CERTIFICATE-----

View File

@ -1,27 +1,51 @@
-----BEGIN RSA PRIVATE KEY-----
MIIEowIBAAKCAQEArGvOLPmy5i+1j4JitG4Vg99w125ncs2ImhqucmJe6YtSnKru
aLdOx93X7lzC3k0umnvLgh643R4eYS5PvrDkvw1dSYB7BHhveFPmmWd7m7n7bXtg
bcdkCmUeTbSeqvptPgyMJOQfXzfOGbEHfu7U0raulR6KtqAatofKpRZhZgzZQpVk
hdd0UTsOwqCWdX3Qe0D1MS922kX99c4UlGyDOTVL6tulvDBBYgHbGErFmhxdgwm4
e6dFfdkPUeHczzUWnKo2sIGBvo4R/NwPIp6GPnebrO0VWvcQfdSqjYk3BmILl8BV
L5W1/EBRLtz9mZuQgc/VC62LvsgXusC9pwXC3QIDAQABAoIBAQCNseKz71hn9tk8
YKiBIt6nix6OzHpTTDlwe3DVK6ZYQ1jWy1o10D773YIwryatzbv41Ld/7YN6o9/P
eWGrkm/J2k/Jsb5nBBqHRlwBwZtBdOv9IyEx1mSObl8i+MZUOI1CKsmZH6fwdkn3
rxY76EYaDGsYvQq93oFVc+7DEMtmMtr03xm2bleEvsUH0VVqLhiAof/PCgOzja/L
mPxhK0FqOmhk94JFo2l0XNMn/b2lpUhrx+xny5RD6/W/k2C1DuzBiFiNZkbPW1r1
n5QccJHpe/S3Y4WZ75yKyQdrcIz6AKSeHNNGw2mYERAOmejpVV+8OIvKY6pzyXi9
EM/BsLaBAoGBAN+XiqHHGilsrvjLKGak2KIaPRxA7EgFKKWBv8DojpXLqgkoloDL
1wS6uG4XE0FeJCiKZk/DpVgPSiKYkQJEFLgU8N3q8OO2cGYW8kfH/TuejWRebtgJ
GC7o5CqAHjFqRbTPJBLLNlSUZP08HVIRhob3t0zkvVRdDjA1rZIM/FlxAoGBAMVp
jTcimGEOhFbOvfLwFeMCFLglTzbxjSnxCLCKF5TbxcBN7iUE2wYRfexBLoP/3+rk
RheyRnMr4PeZ/JPQLHs80TUm9HGg8Phy+jAsIW/rF8BJ4aAExt2T4uLNsj4TXw1y
ckDMBLmZi0OFy4vDtwg4T2wVo55eN/oQfVNFFaotAoGAGLQ8q/08pcENYA3KS/UA
voBZqip+MMLpJ8g7MIxBXMmg4twqLNbYzfv3bqp8BSfqpNQN09hRB3bBASuMMgzl
oSUnK83OicpZht4YLNgq4ZB2HNXWN2Zh1qUCuLNpIpqUUxLj8HOlcBjpQ5WFw9CN
5ZGvHf7T8GNLswXrRIzMwPECgYAC5Q5WDaLQYYcdQsDUTCL2BjTJknp74sTgJZGs
DQpVe3eF316rmkuf5ifDjB0jgGAHMLu6YznXPIB7AP4MKNROJlEnB2A0PljqO71h
cXQ4EOlzP2IYl5lW7HE6RCvl7yDIsLHuM0+qbQ72uYKHlSIc875uZk7U5qrJdu5v
hybPLQKBgAmswE0nM9Fnj4ue9QaDgOvp1p7peZuzywBI4+TTJ/3++5vtUrgRl9Ak
UVzSVvltxhFpFtNfVrZxckDwb6louumRtBrLVWJDlAakvc5eG5tky+SA2u/bdXSr
8tq8c24K19Pg+OLkdZpiJqmKyyV0dVn6NNmiBmiLe2tClNsUHI47
MIIJKQIBAAKCAgEA4uSJ12rc8g3PQfoaPkuPujtWZiFOODjDpzX0TmmAc+MJx8Rk
XMBTwTjVU/pgGewiHV1dNqMRwCdnefgLI5Ld6fd3ViZPkNw05wS/96bjw1F7eywr
mCfLEK4oaqBeylR2Xg1SlFzAyk8KdAp8g1wBBtMDWyPNQKgwP7pqNSofYGnD+tc+
NdvRMm8EXeyv2agve2aN+qrE94giq8Utp470G2wX6q+546RcRdKkwYAxEYRXkUhX
GjD59b9ABb2hRXM00Awlb0nSw9F7MCVYXtvQyNE7Af3/OTlfEeax3AVzbx3cwb2K
Xx+iMtdP5Sgo9QgzbBsWmRyt54cDg3k8WuqkAnxUkQby/HuYngTXpBytJjNqW/D+
J7t16OAtxTFA5jtqhNMWGidO+p06pDEEWKCzP4U0M2yGl7dpFMPQ+qKm5rt8bYGm
UxWYx73l1UYN4c41ClS3qh6gdpzKMwbcfcEgjrywmUio/Ax8L4eZCayacWIKTPbj
UJN9ly/kGBfzitniBVVPTJNTQuz7a7EHM3bxcgAJAkwc22tMTnd1cyvEkEJgq0QR
ZrCAAAT8z1L9VpMLFm2M+S2ljiVylBjAtNHR/e3g+mqYLwO4ytiCzteBE2zZvYca
Hfw2aq+s/Io5cN1nKh842FuPaGWIPR+mAkYrH98lrlhU1si9N3zNNEYAkmMCAwEA
AQKCAgBU1KuwzfSTz5P5EAB14Bx5vau8/aDYJmkIgIS6OHndWjqS5Ru9De+Co7Qm
9Mqvhnjuz7SFNAzz8gefM50+jK/JxUtp+2LuP1bMNRttBYnMwg9P6yDVf7NNpj/Y
NeOa9F9ZJNQGQnOWcFzxK+aH2oNLwONVVRptnTSE7za8b+ZRTtoGVCmfS3N4zscs
Ms1ArMAr/BkDaovAHLcRz+QU7L7Z7d03UsJGmXIibPJKopo1+WwgFpLyaPNb7UiL
nO3KIJvTWWc5p7lUm2Laimvy826pVokgYo3lIE0qTT2cjLEvD1Q1X593l0U2iwgF
HrHJg2pudeZKEs32baVjp5VoTKhKzWZMugY/ZZG4fuOdmJRZksj4Op9Swk6F5cgT
tZ+EWEK48jIWfxVyAyua41301APTZwohwWp1qc9QOkA/tuqULZLfsVGRqrupF8te
UKbMG0MnTBKlL+ojGmFUSnrj2GBd4NSUbGLOve0TxvktTakKATdFMphE2A5JnvxC
lET3J1THj4xsDX3XwJ2KDkNCv86VUK0EptsaP7C/h3DbRhqJU30VTRhGjmtpwXj4
4+EKDmWKHpIXY9Qu/qMXm1utM1q39wUFYdTpVDY1/VcHVu1VDjqHwxJXwLzv3B5U
u17P2RIqvgD2IT1Xo0phi9WgzcllMRlEREpuLPnAZ8sSRbdNKQKCAQEA70cRtk5E
yPoQ8X18RFxllZle6MfIuYv1v6ojF9Cd/Klf/OKu1VAbXjcUxhH4gpKsdrvvA6TS
6Io8UonR3zqMuBsY+/L817ZTr8N/Q+iLK5hkhj4p2J3uX6ipLmfuQovlL478LpR6
nD1eB+2tF1yk76A5uyny1m+rUHufpdQ+MlsoV0kLdo18uRF02w5HY1D2S5/lTACz
WAmFCe9mnuQK0YMFyfiM/oVp9GjuOkf15QBnKW5klJByrB7oxw64VPwyxR1kkJhF
UkTb6AIHEGcZkI5pKyuifaGNhDk5b9x8R9nvqoNMRgNV9XR/euJ33W0BzePB5fAQ
NIrw4Y6z+oKJhQKCAQEA8r/j9vtFucZYfxPGXauipqubPER7YyXNbbO6skaQm7T7
CUzaJdAYwRorHsDZ8Cvii0W+iZ9s0JLekcPtEECoiVm7Xg8pcFfzMu/B7Cb7MkEu
w1kxrhYkb9xLZtBUaPD5vD7irxUxnu13GB9pbMYc/vw78F7SYjh/xyZXUjA7EVza
q2IgCS8oWqlROJBh4lzNPw787WPpBokeUujIY9HOcE7MFE8+p5AUm8Zz62sgY/4f
AMrSWD+cQ71bCMcdu12O4PUFCZtUx+ON8p0Awoy5KZniyUwIbt8UAlaoCT581Xh1
1BGEVHKkwv1nwuH6hOL80/9zl6yZSKu0mBgq+IK8xwKCAQAuh1tiYAXwLvBshUJM
6Mq4NILIMVFPA3BePO9mCiMupqELw+jLgjBQOdXITmZMvcjbrd/kjYCVx4vDYRl2
lyQWCO7qz21rZQERBKsSwX2OlKu3jw8EGHHqGBoN9BfYyOtgPCW9yRGuoCBQ2l72
VAWes0GGq7mVCVH+7Is26/bMQ/2sO4AHJaxDMKnQjw5CudOrEQS9qsU1MWS1ceA6
tY2FAD138OU5+SeJZ34rxyKBzXpCDD1yxkQGRFxvmOUvYXtd6UFM/M8+GDXK/9nv
zpyiB49b3bhTRb8HHzmUDwP71N1OAwop8ywb9vNzKea1ICVhrBBgbjY4gWwl8GH/
LLMhAoIBAQDHci4FARKavoJ7dm3nDFwJALn83G9cWPTeC2t7ikrKA/q1+3TI2J9e
GPgQvnbRw9zQfS89t8UZ4XII5adjURyoLRerAl4Ttc9VrHPyaVy+P5wCWMhetkad
uawh/007I7KsniZ1n74zS/wrz7M48dVlEyzUI7RLiwxBPhlEp+gALgBkC60ynpJT
WwYmqUojSAhCpTfee9Y7znEhwazThtBMqhE3Jpzd451rF7SqWkw0m9gxOHN2mlzz
syKWpbKh/Q6leer3p64Sxb4c9i5nqmN/8LXKmjPblGHGQhix76t1YRG+ed313HPO
2ZFlJ3JDuJPuQtZgailO8fThegnkQNaFAoIBAQCCG81C/p7BcUPdKoCT0vKXEEeB
4Kf0ziKy/1asIY365qAWXJ+wet/Y6erl4JddeUEp2IupB67G35uf/5Eit2+/4sOG
utd8BwxPKxkDY/iah00+2jRuNp+CYMr9MnIfTJE5daxG/YPabhMpUxRPsdveBWUL
sogPgEvbxvvlzg6NyQDsmCFRQfmwNZOsfUqOoFwmrE5Woz26y4wvF+ZooxOHnxx8
RsoJy1DvJgKtisE5eVBAq4ToLiFdQsb4NP39JBPWeaRZTRonS6F2NZw/lTWqKnQk
QZZMSoUwzJWFUqJ22sE4NHJzv17rJ3txnPtMroB0RPXcQUIywNn7mvF3ltZp
-----END RSA PRIVATE KEY-----

View File

@ -45,7 +45,7 @@ func TestV3PutOverwrite(t *testing.T) {
kvc := toGRPC(clus.RandClient()).KV
key := []byte("foo")
reqput := &pb.PutRequest{Key: key, Value: []byte("bar")}
reqput := &pb.PutRequest{Key: key, Value: []byte("bar"), PrevKv: true}
respput, err := kvc.Put(context.TODO(), reqput)
if err != nil {
@ -62,6 +62,9 @@ func TestV3PutOverwrite(t *testing.T) {
t.Fatalf("expected newer revision on overwrite, got %v <= %v",
respput2.Header.Revision, respput.Header.Revision)
}
if pkv := respput2.PrevKv; pkv == nil || string(pkv.Value) != "bar" {
t.Fatalf("expected PrevKv=bar, got response %+v", respput2)
}
reqrange := &pb.RangeRequest{Key: key}
resprange, err := kvc.Range(context.TODO(), reqrange)

View File

@ -31,12 +31,15 @@ import (
const (
// NoLease is a special LeaseID representing the absence of a lease.
NoLease = LeaseID(0)
forever = monotime.Time(math.MaxInt64)
)
var (
leaseBucketName = []byte("lease")
forever = monotime.Time(math.MaxInt64)
// maximum number of leases to revoke per second; configurable for tests
leaseRevokeRate = 1000
ErrNotPrimary = errors.New("not a primary lessor")
ErrLeaseNotFound = errors.New("lease not found")
@ -324,8 +327,53 @@ func (le *lessor) Promote(extend time.Duration) {
for _, l := range le.leaseMap {
l.refresh(extend)
}
if len(le.leaseMap) < leaseRevokeRate {
// no possibility of lease pile-up
return
}
// adjust expiries in case of overlap
leases := make([]*Lease, 0, len(le.leaseMap))
for _, l := range le.leaseMap {
leases = append(leases, l)
}
sort.Sort(leasesByExpiry(leases))
baseWindow := leases[0].Remaining()
nextWindow := baseWindow + time.Second
expires := 0
// have fewer expires than the total revoke rate so piled up leases
// don't consume the entire revoke limit
targetExpiresPerSecond := (3 * leaseRevokeRate) / 4
for _, l := range leases {
remaining := l.Remaining()
if remaining > nextWindow {
baseWindow = remaining
nextWindow = baseWindow + time.Second
expires = 1
continue
}
expires++
if expires <= targetExpiresPerSecond {
continue
}
rateDelay := float64(time.Second) * (float64(expires) / float64(targetExpiresPerSecond))
// If leases are extended by n seconds, leases n seconds ahead of the
// base window should be extended by only one second.
rateDelay -= float64(remaining - baseWindow)
delay := time.Duration(rateDelay)
nextWindow = baseWindow + delay
l.refresh(delay + extend)
}
}
type leasesByExpiry []*Lease
func (le leasesByExpiry) Len() int { return len(le) }
func (le leasesByExpiry) Less(i, j int) bool { return le[i].Remaining() < le[j].Remaining() }
func (le leasesByExpiry) Swap(i, j int) { le[i], le[j] = le[j], le[i] }
func (le *lessor) Demote() {
le.mu.Lock()
defer le.mu.Unlock()
@ -422,6 +470,10 @@ func (le *lessor) runLoop() {
le.mu.Unlock()
if len(ls) != 0 {
// rate limit
if len(ls) > leaseRevokeRate/2 {
ls = ls[:leaseRevokeRate/2]
}
select {
case <-le.stopC:
return

View File

@ -42,6 +42,7 @@ func TestLessorGrant(t *testing.T) {
defer be.Close()
le := newLessor(be, minLeaseTTL)
defer le.Stop()
le.Promote(0)
l, err := le.Grant(1, 1)
@ -87,6 +88,7 @@ func TestLeaseConcurrentKeys(t *testing.T) {
defer be.Close()
le := newLessor(be, minLeaseTTL)
defer le.Stop()
le.SetRangeDeleter(func() TxnDelete { return newFakeDeleter(be) })
// grant a lease with long term (100 seconds) to
@ -134,6 +136,7 @@ func TestLessorRevoke(t *testing.T) {
defer be.Close()
le := newLessor(be, minLeaseTTL)
defer le.Stop()
var fd *fakeDeleter
le.SetRangeDeleter(func() TxnDelete {
fd = newFakeDeleter(be)
@ -185,6 +188,7 @@ func TestLessorRenew(t *testing.T) {
defer os.RemoveAll(dir)
le := newLessor(be, minLeaseTTL)
defer le.Stop()
le.Promote(0)
l, err := le.Grant(1, minLeaseTTL)
@ -210,12 +214,66 @@ func TestLessorRenew(t *testing.T) {
}
}
// TestLessorRenewExtendPileup ensures Lessor extends leases on promotion if too many
// expire at the same time.
func TestLessorRenewExtendPileup(t *testing.T) {
oldRevokeRate := leaseRevokeRate
defer func() { leaseRevokeRate = oldRevokeRate }()
leaseRevokeRate = 10
dir, be := NewTestBackend(t)
defer os.RemoveAll(dir)
le := newLessor(be, minLeaseTTL)
ttl := int64(10)
for i := 1; i <= leaseRevokeRate*10; i++ {
if _, err := le.Grant(LeaseID(2*i), ttl); err != nil {
t.Fatal(err)
}
// ttls that overlap spillover for ttl=10
if _, err := le.Grant(LeaseID(2*i+1), ttl+1); err != nil {
t.Fatal(err)
}
}
// simulate stop and recovery
le.Stop()
be.Close()
bcfg := backend.DefaultBackendConfig()
bcfg.Path = filepath.Join(dir, "be")
be = backend.New(bcfg)
defer be.Close()
le = newLessor(be, minLeaseTTL)
defer le.Stop()
// extend after recovery should extend expiration on lease pile-up
le.Promote(0)
windowCounts := make(map[int64]int)
for _, l := range le.leaseMap {
// round up slightly for baseline ttl
s := int64(l.Remaining().Seconds() + 0.1)
windowCounts[s]++
}
for i := ttl; i < ttl+20; i++ {
c := windowCounts[i]
if c > leaseRevokeRate {
t.Errorf("expected at most %d expiring at %ds, got %d", leaseRevokeRate, i, c)
}
if c < leaseRevokeRate/2 {
t.Errorf("expected at least %d expiring at %ds, got %d", leaseRevokeRate/2, i, c)
}
}
}
func TestLessorDetach(t *testing.T) {
dir, be := NewTestBackend(t)
defer os.RemoveAll(dir)
defer be.Close()
le := newLessor(be, minLeaseTTL)
defer le.Stop()
le.SetRangeDeleter(func() TxnDelete { return newFakeDeleter(be) })
// grant a lease with long term (100 seconds) to
@ -255,6 +313,7 @@ func TestLessorRecover(t *testing.T) {
defer be.Close()
le := newLessor(be, minLeaseTTL)
defer le.Stop()
l1, err1 := le.Grant(1, 10)
l2, err2 := le.Grant(2, 20)
if err1 != nil || err2 != nil {
@ -263,6 +322,7 @@ func TestLessorRecover(t *testing.T) {
// Create a new lessor with the same backend
nle := newLessor(be, minLeaseTTL)
defer nle.Stop()
nl1 := nle.Lookup(l1.ID)
if nl1 == nil || nl1.ttl != l1.ttl {
t.Errorf("nl1 = %v, want nl1.ttl= %d", nl1.ttl, l1.ttl)

View File

@ -25,6 +25,9 @@ import (
"github.com/coreos/etcd/mvcc/backend"
"github.com/coreos/etcd/mvcc/mvccpb"
"github.com/coreos/etcd/pkg/testutil"
"github.com/prometheus/client_golang/prometheus"
dto "github.com/prometheus/client_model/go"
)
// Functional tests for features implemented in v3 store. It treats v3 store
@ -612,6 +615,7 @@ func TestKVRestore(t *testing.T) {
kv.Put([]byte("foo"), []byte("bar0"), 1)
kv.Put([]byte("foo"), []byte("bar1"), 2)
kv.Put([]byte("foo"), []byte("bar2"), 3)
kv.Put([]byte("foo2"), []byte("bar0"), 1)
},
func(kv KV) {
kv.Put([]byte("foo"), []byte("bar0"), 1)
@ -633,10 +637,17 @@ func TestKVRestore(t *testing.T) {
r, _ := s.Range([]byte("a"), []byte("z"), RangeOptions{Rev: k})
kvss = append(kvss, r.KVs)
}
keysBefore := readGaugeInt(&keysGauge)
s.Close()
// ns should recover the the previous state from backend.
ns := NewStore(b, &lease.FakeLessor{}, nil)
if keysRestore := readGaugeInt(&keysGauge); keysBefore != keysRestore {
t.Errorf("#%d: got %d key count, expected %d", i, keysRestore, keysBefore)
}
// wait for possible compaction to finish
testutil.WaitSchedule()
var nkvss [][]mvccpb.KeyValue
@ -652,6 +663,15 @@ func TestKVRestore(t *testing.T) {
}
}
func readGaugeInt(g *prometheus.Gauge) int {
ch := make(chan prometheus.Metric, 1)
keysGauge.Collect(ch)
m := <-ch
mm := &dto.Metric{}
m.Write(mm)
return int(mm.GetGauge().GetValue())
}
func TestKVSnapshot(t *testing.T) {
b, tmpPath := backend.NewDefaultTmpBackend()
s := NewStore(b, &lease.FakeLessor{}, nil)

View File

@ -272,6 +272,7 @@ func (s *store) restore() error {
}
// index keys concurrently as they're loaded in from tx
keysGauge.Set(0)
rkvc, revc := restoreIntoIndex(s.kvindex)
for {
keys, vals := tx.UnsafeRange(keyBucketName, min, max, int64(restoreChunkKeys))

View File

@ -179,6 +179,21 @@ func (s *watchableStore) cancelWatcher(wa *watcher) {
s.mu.Unlock()
}
func (s *watchableStore) Restore(b backend.Backend) error {
s.mu.Lock()
defer s.mu.Unlock()
err := s.store.Restore(b)
if err != nil {
return err
}
for wa := range s.synced.watchers {
s.unsynced.watchers.add(wa)
}
s.synced = newWatcherGroup()
return nil
}
// syncWatchersLoop syncs the watcher in the unsynced map every 100ms.
func (s *watchableStore) syncWatchersLoop() {
defer s.wg.Done()

View File

@ -294,6 +294,39 @@ func TestWatchFutureRev(t *testing.T) {
}
}
func TestWatchRestore(t *testing.T) {
b, tmpPath := backend.NewDefaultTmpBackend()
s := newWatchableStore(b, &lease.FakeLessor{}, nil)
defer cleanup(s, b, tmpPath)
testKey := []byte("foo")
testValue := []byte("bar")
rev := s.Put(testKey, testValue, lease.NoLease)
newBackend, newPath := backend.NewDefaultTmpBackend()
newStore := newWatchableStore(newBackend, &lease.FakeLessor{}, nil)
defer cleanup(newStore, newBackend, newPath)
w := newStore.NewWatchStream()
w.Watch(testKey, nil, rev-1)
newStore.Restore(b)
select {
case resp := <-w.Chan():
if resp.Revision != rev {
t.Fatalf("rev = %d, want %d", resp.Revision, rev)
}
if len(resp.Events) != 1 {
t.Fatalf("failed to get events from the response")
}
if resp.Events[0].Kv.ModRevision != rev {
t.Fatalf("kv.rev = %d, want %d", resp.Events[0].Kv.ModRevision, rev)
}
case <-time.After(time.Second):
t.Fatal("failed to receive event in 1 second.")
}
}
// TestWatchBatchUnsynced tests batching on unsynced watchers
func TestWatchBatchUnsynced(t *testing.T) {
b, tmpPath := backend.NewDefaultTmpBackend()

View File

@ -118,6 +118,7 @@ func interestingGoroutines() (gs []string) {
}
stack := strings.TrimSpace(sl[1])
if stack == "" ||
strings.Contains(stack, "sync.(*WaitGroup).Done") ||
strings.Contains(stack, "created by os/signal.init") ||
strings.Contains(stack, "runtime/panic.go") ||
strings.Contains(stack, "created by testing.RunTests") ||

View File

@ -20,6 +20,7 @@ import (
"crypto/x509"
"fmt"
"net"
"strings"
"sync"
)
@ -142,27 +143,73 @@ func checkCert(ctx context.Context, cert *x509.Certificate, remoteAddr string) e
return herr
}
if len(cert.IPAddresses) > 0 {
if cerr := cert.VerifyHostname(h); cerr != nil && len(cert.DNSNames) == 0 {
cerr := cert.VerifyHostname(h)
if cerr == nil {
return nil
}
if len(cert.DNSNames) == 0 {
return cerr
}
}
if len(cert.DNSNames) > 0 {
for _, dns := range cert.DNSNames {
addrs, lerr := net.DefaultResolver.LookupHost(ctx, dns)
if lerr != nil {
continue
}
for _, addr := range addrs {
if addr == h {
return nil
}
}
ok, err := isHostInDNS(ctx, h, cert.DNSNames)
if ok {
return nil
}
return fmt.Errorf("tls: %q does not match any of DNSNames %q", h, cert.DNSNames)
errStr := ""
if err != nil {
errStr = " (" + err.Error() + ")"
}
return fmt.Errorf("tls: %q does not match any of DNSNames %q"+errStr, h, cert.DNSNames)
}
return nil
}
func isHostInDNS(ctx context.Context, host string, dnsNames []string) (ok bool, err error) {
// reverse lookup
wildcards, names := []string{}, []string{}
for _, dns := range dnsNames {
if strings.HasPrefix(dns, "*.") {
wildcards = append(wildcards, dns[1:])
} else {
names = append(names, dns)
}
}
lnames, lerr := net.DefaultResolver.LookupAddr(ctx, host)
for _, name := range lnames {
// strip trailing '.' from PTR record
if name[len(name)-1] == '.' {
name = name[:len(name)-1]
}
for _, wc := range wildcards {
if strings.HasSuffix(name, wc) {
return true, nil
}
}
for _, n := range names {
if n == name {
return true, nil
}
}
}
err = lerr
// forward lookup
for _, dns := range names {
addrs, lerr := net.DefaultResolver.LookupHost(ctx, dns)
if lerr != nil {
err = lerr
continue
}
for _, addr := range addrs {
if addr == host {
return true, nil
}
}
}
return false, err
}
func (l *tlsListener) Close() error {
err := l.Listener.Close()
<-l.donec

View File

@ -206,6 +206,9 @@ func PutRequestToOp(r *pb.PutRequest) clientv3.Op {
if r.IgnoreLease {
opts = append(opts, clientv3.WithIgnoreLease())
}
if r.PrevKv {
opts = append(opts, clientv3.WithPrevKV())
}
return clientv3.OpPut(string(r.Key), string(r.Value), opts...)
}

Some files were not shown because too many files have changed in this diff Show More