Compare commits
129 Commits
Author | SHA1 | Date | |
---|---|---|---|
d6523fe463 | |||
c25127a699 | |||
9f031e6218 | |||
e55724e959 | |||
29af192e3d | |||
2fc79912c2 | |||
ebb8d781b5 | |||
2e30b3c17f | |||
9a2d82854e | |||
b077dcf6c4 | |||
2b572cb6e8 | |||
f36d55f062 | |||
9f70568a02 | |||
1ca7d1e064 | |||
4f1f003d04 | |||
49e0dff2b8 | |||
686837227e | |||
f2652f005e | |||
5490eb5406 | |||
70dda950ed | |||
a884f2a18a | |||
bdeb96be0f | |||
c00594e680 | |||
919cd380ec | |||
b83aec6b87 | |||
05bfb369ef | |||
0639c4c86d | |||
877b3d51bb | |||
d9df58beb8 | |||
1cffdb3a48 | |||
0593a52107 | |||
f7854c4ab9 | |||
13b0e72304 | |||
43791a2f41 | |||
a8d966d8f3 | |||
ad7194d5d0 | |||
084dcb5596 | |||
8a0266a806 | |||
2338481bb1 | |||
ce1e19ae2f | |||
a288333e6f | |||
097aac79f5 | |||
cd820269a6 | |||
ac7e6bb002 | |||
4b45cd4110 | |||
fb426aec9e | |||
774cb03f83 | |||
4fb6087f4a | |||
5524131a9e | |||
3efb4d837b | |||
20147c5357 | |||
973bde9a07 | |||
494d2c67aa | |||
fb32a999a6 | |||
d2f5934aa1 | |||
4f3fb5a702 | |||
9f5ec7732e | |||
774cf34827 | |||
92df44276d | |||
eb00f200d3 | |||
38d16775ab | |||
690fd12b07 | |||
b31483b2be | |||
e9a21dda4b | |||
2134036942 | |||
6bd2ee4c49 | |||
fcd429467e | |||
e5e759b962 | |||
d8a08f53e3 | |||
3e95bf0fa3 | |||
0d2512cb99 | |||
a29f6fb799 | |||
f4f429d4e3 | |||
fc2afe1ed2 | |||
24a442383b | |||
f387bf8464 | |||
83b06c0715 | |||
75dc10c39d | |||
66acf8a4e9 | |||
1359d29fa4 | |||
dc1f4adcd0 | |||
9970141f76 | |||
16c2bcf951 | |||
868b7f7902 | |||
1c958f8fc3 | |||
dfeecd2537 | |||
ed58193ebe | |||
79c650d900 | |||
a451cf2333 | |||
3455431da3 | |||
9424a10f49 | |||
fbcfe8e1c4 | |||
757bb3af13 | |||
2cd367e9d9 | |||
a974bbfe4f | |||
99dcc8c322 | |||
3d2523e7e0 | |||
25e69d9659 | |||
707174b56a | |||
ce92cc3dc5 | |||
5bfbf3a48c | |||
e04a188358 | |||
a51fda3e5e | |||
ca44801650 | |||
2387ef3f21 | |||
d5bfca9465 | |||
7cb126967c | |||
444e017c05 | |||
356675b70f | |||
d7768635fd | |||
37796ed84c | |||
f007cf321d | |||
ca29691543 | |||
4bebb538eb | |||
c27db1ec5e | |||
a5fc1d214d | |||
1df0b941d7 | |||
3a71eb9d72 | |||
001cceb1cd | |||
98ff4af7f2 | |||
db4c5e0eaa | |||
b3c5ed60bd | |||
673d90728e | |||
22c944d8ef | |||
e01a1f70c3 | |||
2e4ea503b0 | |||
c7aef5fdf2 | |||
c4605160c5 | |||
054de85da2 |
33
CHANGELOG
33
CHANGELOG
@ -1,3 +1,36 @@
|
||||
v0.4.6
|
||||
* Fix long-term timer leak (#900, #875, #868, #904)
|
||||
* Fix `Running` field in standby_info file (#881)
|
||||
* Add `quorum=true` query parameter for GET requests (#866, #883)
|
||||
* Add `Access-Control-Allow-Headers` header for CORS requests (#886)
|
||||
* Various documentation improvements (#907, #882)
|
||||
|
||||
v0.4.5
|
||||
* Flush headers immediatly on `wait=true` requests (#877)
|
||||
* Add `ETCD_HTTP_READ_TIMEOUT` and `ETCD_HTTP_WRITE_TIMEOUT` (#880)
|
||||
* Add `ETCDCTL_PEERS` configuration to etcdctl (#95)
|
||||
* etcdctl takes stdin for mk (#91)
|
||||
|
||||
v0.4.4
|
||||
* Fix `--no-sync` flag in etcdctl (#83)
|
||||
* Improved logging for machine removal (#844)
|
||||
* Various documentation improvements (#858, #851, #847)
|
||||
|
||||
v0.4.3
|
||||
* Avoid panic() on truncated or unexpected log data (#834, #833)
|
||||
* Fix missing stats field (#807)
|
||||
* Lengthen default peer removal delay to 30mins (#835)
|
||||
* Reduce logging on heartbeat timeouts (#836)
|
||||
|
||||
v0.4.2
|
||||
* Improvements to the clustering documents
|
||||
* Set content-type properly on errors (#469)
|
||||
* Standbys re-join if they should be part of the cluster (#810, #815, #818)
|
||||
|
||||
v0.4.1
|
||||
* Re-introduce DELETE on the machines endpoint
|
||||
* Document the machines endpoint
|
||||
|
||||
v0.4.0
|
||||
* Introduced standby mode
|
||||
* Added HEAD requests
|
||||
|
@ -30,47 +30,32 @@ The coding style suggested by the Golang community is used in etcd. See [style d
|
||||
|
||||
Please follow this style to make etcd easy to review, maintain and develop.
|
||||
|
||||
### Format of the commit message
|
||||
### Format of the Commit Message
|
||||
|
||||
etcd follows a rough convention for commit messages borrowed from Angularjs. This is an example of a commit:
|
||||
We follow a rough convention for commit messages that is designed to answer two
|
||||
questions: what changed and why. The subject line should feature the what and
|
||||
the body of the commit should describe the why.
|
||||
|
||||
```
|
||||
feat(scripts/test-cluster): add a cluster test command
|
||||
scripts: add the test-cluster command
|
||||
|
||||
this uses tmux to setup a test cluster that you can easily kill and
|
||||
start for debugging.
|
||||
this uses tmux to setup a test cluster that you can easily kill and
|
||||
start for debugging.
|
||||
|
||||
Fixes #38
|
||||
```
|
||||
|
||||
The format can be more formally described as follows:
|
||||
The format can be described more formally as follows:
|
||||
|
||||
```
|
||||
<type>(<scope>): <subject>
|
||||
<subsystem>: <what changed>
|
||||
<BLANK LINE>
|
||||
<body>
|
||||
<why this change was made>
|
||||
<BLANK LINE>
|
||||
<footer>
|
||||
```
|
||||
|
||||
The first line is the subject and should be no longer than 70 characters, the second line is always blank, and other lines should be wrapped at 80 characters. This allows the message to be easier to read on github as well as
|
||||
in various git tools.
|
||||
|
||||
### Subject line
|
||||
|
||||
The subject line contains a succinct description of the change.
|
||||
|
||||
### Allowed <type>s
|
||||
- feat (feature)
|
||||
- fix (bug fix)
|
||||
- docs (documentation)
|
||||
- style (formatting, missing semi colons, …)
|
||||
- refactor
|
||||
- test (when adding missing tests)
|
||||
- chore (maintain)
|
||||
|
||||
### Allowed <scope>s
|
||||
|
||||
Scopes can be anything specifying the place of the commit change within the repository. For example, "store", "API", etc.
|
||||
|
||||
### More details on commits
|
||||
|
||||
For more details see the [angularjs commit style guide](https://docs.google.com/a/coreos.com/document/d/1QrDFcIiPjSLDn3EL15IJygNPiHORgU1_OOAqWjiDU5Y/edit#).
|
||||
The first line is the subject and should be no longer than 70 characters, the
|
||||
second line is always blank, and other lines should be wrapped at 80 characters.
|
||||
This allows the message to be easier to read on GitHub as well as in various
|
||||
git tools.
|
||||
|
@ -2,7 +2,7 @@ FROM ubuntu:12.04
|
||||
# Let's install go just like Docker (from source).
|
||||
RUN apt-get update -q
|
||||
RUN DEBIAN_FRONTEND=noninteractive apt-get install -qy build-essential curl git
|
||||
RUN curl -s https://go.googlecode.com/files/go1.2.1.src.tar.gz | tar -v -C /usr/local -xz
|
||||
RUN curl -s https://storage.googleapis.com/golang/go1.3.src.tar.gz | tar -v -C /usr/local -xz
|
||||
RUN cd /usr/local/go/src && ./make.bash --no-clean 2>&1
|
||||
ENV PATH /usr/local/go/bin:$PATH
|
||||
ADD . /opt/etcd
|
||||
|
@ -13,6 +13,14 @@ This will bring up etcd listening on default ports (4001 for client communicatio
|
||||
The `-data-dir machine0` argument tells etcd to write machine configuration, logs and snapshots to the `./machine0/` directory.
|
||||
The `-name machine0` tells the rest of the cluster that this machine is named machine0.
|
||||
|
||||
## Getting the etcd version
|
||||
|
||||
The etcd version of a specific instance can be obtained from the `/version` endpoint.
|
||||
|
||||
```sh
|
||||
curl -L http://127.0.0.1:4001/version
|
||||
```
|
||||
|
||||
## Key Space Operations
|
||||
|
||||
The primary API of etcd is a hierarchical key space.
|
||||
@ -833,6 +841,8 @@ curl -L http://127.0.0.1:4001/v2/keys/afile -XPUT --data-urlencode value@afile.t
|
||||
|
||||
### Read Consistency
|
||||
|
||||
#### Read from the Master
|
||||
|
||||
Followers in a cluster can be behind the leader in their copy of the keyspace.
|
||||
If your application wants or needs the most up-to-date version of a key then it should ensure it reads from the current leader.
|
||||
By using the `consistent=true` flag in your GET requests, etcd will make sure you are talking to the current master.
|
||||
@ -843,13 +853,26 @@ The client is told the write was successful and the keyspace is updated.
|
||||
Meanwhile F2 has partitioned from the network and will have an out-of-date version of the keyspace until the partition resolves.
|
||||
Since F2 missed the most recent write, a client reading from F2 will have an out-of-date version of the keyspace.
|
||||
|
||||
## Lock Module (*Deprecated*)
|
||||
Implementation notes on `consistent=true`: If the leader you are talking to is
|
||||
partitioned it will be unable to determine if it is not currently the master.
|
||||
In a later version we will provide a mechanism to set an upperbound of time
|
||||
that the current master can be unable to contact the quorom and still serve
|
||||
reads.
|
||||
|
||||
### Read Linearization
|
||||
|
||||
If you want a read that is fully linearized you can use a `quorum=true` GET.
|
||||
The read will take a very similar path to a write and will have a similar
|
||||
speed. If you are unsure if you need this feature feel free to email etcd-dev
|
||||
for advice.
|
||||
|
||||
## Lock Module (*Deprecated and Removed*)
|
||||
|
||||
The lock module is used to serialize access to resources used by clients.
|
||||
Multiple clients can attempt to acquire a lock but only one can have it at a time.
|
||||
Once the lock is released, the next client waiting for the lock will receive it.
|
||||
|
||||
**Warning:** This module is deprecated at v0.4. See [Modules][modules] for more details.
|
||||
**Warning:** This module is deprecated and removed at v0.4. See [Modules][modules] for more details.
|
||||
|
||||
|
||||
### Acquiring a Lock
|
||||
|
@ -29,16 +29,16 @@ The v2 API has a lot of features, we will categorize them in a few categories:
|
||||
|
||||
### Supported features matrix
|
||||
|
||||
| Client| [go-etcd](https://github.com/coreos/go-etcd) | [jetcd](https://github.com/diwakergupta/jetcd) | [python-etcd](https://github.com/jplana/python-etcd) | [python-etcd-client](https://github.com/dsoprea/PythonEtcdClient) | [node-etcd](https://github.com/stianeikeland/node-etcd) | [nodejs-etcd](https://github.com/lavagetto/nodejs-etcd) | [etcd-ruby](https://github.com/ranjib/etcd-ruby) | [etcd-api](https://github.com/jdarcy/etcd-api) | [cetcd](https://github.com/dwwoelfel/cetcd) | [clj-etcd](https://github.com/rthomas/clj-etcd) | [etcetera](https://github.com/drusellers/etcetera)| [Etcd.jl](https://github.com/forio/Etcd.jl) |
|
||||
| --- | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |
|
||||
| **HTTPS Auth** | Y | Y | Y | Y | Y | Y | - | - | - | - | - | - |
|
||||
| **Reconnect** | Y | - | Y | Y | - | - | - | Y | - | - | - | - |
|
||||
| **Mod/Lock** | - | - | Y | Y | - | - | - | - | - | - | - | Y |
|
||||
| **Mod/Leader** | - | - | - | Y | - | - | - | - | - | - | - | Y |
|
||||
| **GET Features** | F | B | F | F | F | F | F | B | F | G | F | F |
|
||||
| **PUT Features** | F | B | F | F | F | F | F | G | F | G | F | F |
|
||||
| **POST Features** | F | - | F | F | - | F | F | - | - | - | F | F |
|
||||
| **DEL Features** | F | B | F | F | F | F | F | B | G | B | F | F |
|
||||
| Client| [go-etcd](https://github.com/coreos/go-etcd) | [jetcd](https://github.com/diwakergupta/jetcd) | [python-etcd](https://github.com/jplana/python-etcd) | [python-etcd-client](https://github.com/dsoprea/PythonEtcdClient) | [node-etcd](https://github.com/stianeikeland/node-etcd) | [nodejs-etcd](https://github.com/lavagetto/nodejs-etcd) | [etcd-ruby](https://github.com/ranjib/etcd-ruby) | [etcd-api](https://github.com/jdarcy/etcd-api) | [cetcd](https://github.com/dwwoelfel/cetcd) | [clj-etcd](https://github.com/rthomas/clj-etcd) | [etcetera](https://github.com/drusellers/etcetera)| [Etcd.jl](https://github.com/forio/Etcd.jl) | [p5-etcd](https://metacpan.org/release/Etcd)
|
||||
| --- | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |
|
||||
| **HTTPS Auth** | Y | Y | Y | Y | Y | Y | - | - | - | - | - | - | - |
|
||||
| **Reconnect** | Y | - | Y | Y | - | - | - | Y | - | - | - | - | - |
|
||||
| **Mod/Lock** | - | - | Y | Y | - | - | - | - | - | - | - | Y | - |
|
||||
| **Mod/Leader** | - | - | - | Y | - | - | - | - | - | - | - | Y | - |
|
||||
| **GET Features** | F | B | F | F | F | F | F | B | F | G | F | F | F |
|
||||
| **PUT Features** | F | B | F | F | F | F | F | G | F | G | F | F | F |
|
||||
| **POST Features** | F | - | F | F | - | F | F | - | - | - | F | F | F |
|
||||
| **DEL Features** | F | B | F | F | F | F | F | B | G | B | F | F | F |
|
||||
|
||||
**Legend**
|
||||
|
||||
|
@ -8,6 +8,7 @@ For more information on how etcd can locate the cluster, see the [finding the cl
|
||||
|
||||
Please note - at least 3 nodes are required for [cluster availability][optimal-cluster-size].
|
||||
|
||||
[cluster-finding]: https://github.com/coreos/etcd/blob/master/Documentation/design/cluster-finding.md
|
||||
[optimal-cluster-size]: https://github.com/coreos/etcd/blob/master/Documentation/optimal-cluster-size.md
|
||||
|
||||
## Using discovery.etcd.io
|
||||
@ -27,8 +28,8 @@ Here's a full example:
|
||||
```
|
||||
TOKEN=$(curl https://discovery.etcd.io/new)
|
||||
./etcd -name instance1 -peer-addr 10.1.2.3:7001 -addr 10.1.2.3:4001 -discovery $TOKEN
|
||||
./etcd -name instance2 -peer-addr 10.1.2.4:7002 -addr 10.1.2.4:4002 -discovery $TOKEN
|
||||
./etcd -name instance3 -peer-addr 10.1.2.5:7002 -addr 10.1.2.5:4002 -discovery $TOKEN
|
||||
./etcd -name instance2 -peer-addr 10.1.2.4:7001 -addr 10.1.2.4:4001 -discovery $TOKEN
|
||||
./etcd -name instance3 -peer-addr 10.1.2.5:7001 -addr 10.1.2.5:4001 -discovery $TOKEN
|
||||
```
|
||||
|
||||
## Running Your Own Discovery Endpoint
|
||||
@ -38,8 +39,8 @@ The discovery API communicates with a separate etcd cluster to store and retriev
|
||||
```
|
||||
TOKEN="testcluster"
|
||||
./etcd -name instance1 -peer-addr 10.1.2.3:7001 -addr 10.1.2.3:4001 -discovery http://10.10.10.10:4001/v2/keys/$TOKEN
|
||||
./etcd -name instance2 -peer-addr 10.1.2.4:7002 -addr 10.1.2.4:4002 -discovery http://10.10.10.10:4001/v2/keys/$TOKEN
|
||||
./etcd -name instance3 -peer-addr 10.1.2.5:7002 -addr 10.1.2.5:4002 -discovery http://10.10.10.10:4001/v2/keys/$TOKEN
|
||||
./etcd -name instance2 -peer-addr 10.1.2.4:7001 -addr 10.1.2.4:4001 -discovery http://10.10.10.10:4001/v2/keys/$TOKEN
|
||||
./etcd -name instance3 -peer-addr 10.1.2.5:7001 -addr 10.1.2.5:4001 -discovery http://10.10.10.10:4001/v2/keys/$TOKEN
|
||||
```
|
||||
|
||||
If you're interested in how to discovery API works behind the scenes, read about the [Discovery Protocol](https://github.com/coreos/etcd/blob/master/Documentation/discovery-protocol.md).
|
||||
@ -52,4 +53,8 @@ The Discovery API submits the `-peer-addr` of each etcd instance to the configur
|
||||
|
||||
The discovery API will automatically clean up the address of a stale peer that is no longer part of the cluster. The TTL for this process is a week, which should be long enough to handle any extremely long outage you may encounter. There is no harm in having stale peers in the list until they are cleaned up, since an etcd instance only needs to connect to one valid peer in the cluster to join.
|
||||
|
||||
[discovery-design]: https://github.com/coreos/etcd/blob/master/Documentation/design/cluster-finding.md
|
||||
## Lifetime of a Discovery URL
|
||||
|
||||
A discovery URL identifies a single etcd cluster. Do not re-use discovery URLs for new clusters.
|
||||
|
||||
When a machine starts with a new discovery URL the discovery URL will be activated and record the machine's metadata. If you destroy the whole cluster and attempt to bring the cluster back up with the same discovery URL it will fail. This is intentional because all of the registered machines are gone including their logs so there is nothing to recover the killed cluster.
|
||||
|
@ -107,7 +107,7 @@ curl -L http://127.0.0.1:4001/v2/keys/foo -XPUT -d value=bar
|
||||
|
||||
If one machine disconnects from the cluster, it could rejoin the cluster automatically when the communication is recovered.
|
||||
|
||||
If one machine is killed, it could rejoin the cluster when started with old name. If the peer address is changed, etcd will treat the new peer address as the refreshed one, which benefits instance migration, or virtual machine boot with different IP.
|
||||
If one machine is killed, it could rejoin the cluster when started with old name. If the peer address is changed, etcd will treat the new peer address as the refreshed one, which benefits instance migration, or virtual machine boot with different IP. The peer-address-changing functionality is only supported when the majority of the cluster is alive, because this behavior needs the consensus of the etcd cluster.
|
||||
|
||||
**Note:** For now, it is user responsibility to ensure that the machine doesn't join the cluster that has the member with the same name. Or unexpected error will happen. It would be improved sooner or later.
|
||||
|
||||
@ -167,15 +167,3 @@ Etcd can also do internal server-to-server communication using SSL client certs.
|
||||
To do this just change the `-*-file` flags to `-peer-*-file`.
|
||||
|
||||
If you are using SSL for server-to-server communication, you must use it on all instances of etcd.
|
||||
|
||||
|
||||
### What size cluster should I use?
|
||||
|
||||
Every command the client sends to the master is broadcast to all of the followers.
|
||||
The command is not committed until the majority of the cluster peers receive that command.
|
||||
|
||||
Because of this majority voting property, the ideal cluster should be kept small to keep speed up and be made up of an odd number of peers.
|
||||
|
||||
Odd numbers are good because if you have 8 peers the majority will be 5 and if you have 9 peers the majority will still be 5.
|
||||
The result is that an 8 peer cluster can tolerate 3 peer failures and a 9 peer cluster can tolerate 4 machine failures.
|
||||
And in the best case when all 9 peers are responding the cluster will perform at the speed of the fastest 5 machines.
|
||||
|
@ -1,6 +1,8 @@
|
||||
# Etcd Configuration
|
||||
|
||||
Configuration options can be set in three places:
|
||||
## Node Configuration
|
||||
|
||||
Individual node configuration options can be set in three places:
|
||||
|
||||
1. Command line flags
|
||||
2. Environment variables
|
||||
@ -10,16 +12,28 @@ Options set on the command line take precedence over all other sources.
|
||||
Options set in environment variables take precedence over options set in
|
||||
configuration files.
|
||||
|
||||
## Cluster Configuration
|
||||
|
||||
Cluster-wide settings are configured via the `/config` admin endpoint and additionally in the configuration file. Values contained in the configuration file will seed the cluster setting with the provided value. After the cluster is running, only the admin endpoint is used.
|
||||
|
||||
The full documentation is contained in the [API docs](https://github.com/coreos/etcd/blob/master/Documentation/api.md#cluster-config).
|
||||
|
||||
* `activeSize` - the maximum number of peers that can participate in the consensus protocol. Other peers will join as standbys.
|
||||
* `removeDelay` - the minimum time in seconds that a machine has been observed to be unresponsive before it is removed from the cluster.
|
||||
* `syncInterval` - the amount of time in seconds between cluster sync when it runs in standby mode.
|
||||
|
||||
## Command Line Flags
|
||||
|
||||
### Required
|
||||
|
||||
* `-name` - The node name. Defaults to the hostname.
|
||||
* `-name` - The node name. Defaults to a UUID.
|
||||
|
||||
### Optional
|
||||
|
||||
* `-addr` - The advertised public hostname:port for client communication. Defaults to `127.0.0.1:4001`.
|
||||
* `-discovery` - A URL to use for discovering the peer list. (i.e `"https://discovery.etcd.io/your-unique-key"`).
|
||||
* `-http-read-timeout` - The number of seconds before an HTTP read operation is timed out.
|
||||
* `-http-write-timeout` - The number of seconds before an HTTP write operation is timed out.
|
||||
* `-bind-addr` - The listening hostname for client communication. Defaults to advertised IP.
|
||||
* `-peers` - A comma separated list of peers in the cluster (i.e `"203.0.113.101:7001,203.0.113.102:7001"`).
|
||||
* `-peers-file` - The file path containing a comma separated list of peers in the cluster.
|
||||
@ -31,7 +45,6 @@ configuration files.
|
||||
* `-cpuprofile` - The path to a file to output CPU profile data. Enables CPU profiling when present.
|
||||
* `-data-dir` - The directory to store log and snapshot. Defaults to the current working directory.
|
||||
* `-max-result-buffer` - The max size of result buffer. Defaults to `1024`.
|
||||
* `-max-cluster-size` - The max size of the cluster. Defaults to `9`.
|
||||
* `-max-retry-attempts` - The max retry attempts when trying to join a cluster. Defaults to `3`.
|
||||
* `-peer-addr` - The advertised public hostname:port for server communication. Defaults to `127.0.0.1:7001`.
|
||||
* `-peer-bind-addr` - The listening hostname for server communication. Defaults to advertised IP.
|
||||
@ -41,6 +54,9 @@ configuration files.
|
||||
* `-peer-election-timeout` - The number of milliseconds to wait before the leader is declared unhealthy.
|
||||
* `-peer-heartbeat-interval` - The number of milliseconds in between heartbeat requests
|
||||
* `-snapshot=false` - Disable log snapshots. Defaults to `true`.
|
||||
* `-cluster-active-size` - The expected number of instances participating in the consensus protocol. Only applied if the etcd instance is the first peer in the cluster.
|
||||
* `-cluster-remove-delay` - The number of seconds before one node is removed from the cluster since it cannot be connected at all. Only applied if the etcd instance is the first peer in the cluster.
|
||||
* `-cluster-sync-interval` - The number of seconds between synchronization for standby-mode instance with the cluster. Only applied if the etcd instance is the first peer in the cluster.
|
||||
* `-v` - Enable verbose logging. Defaults to `false`.
|
||||
* `-vv` - Enable very verbose logging. Defaults to `false`.
|
||||
* `-version` - Print the version and exit.
|
||||
@ -59,6 +75,8 @@ cors = []
|
||||
cpu_profile_file = ""
|
||||
data_dir = "."
|
||||
discovery = "http://etcd.local:4001/v2/keys/_etcd/registry/examplecluster"
|
||||
http_read_timeout = 10
|
||||
http_write_timeout = 10
|
||||
key_file = ""
|
||||
peers = []
|
||||
peers_file = ""
|
||||
@ -76,6 +94,11 @@ bind_addr = "127.0.0.1:7001"
|
||||
ca_file = ""
|
||||
cert_file = ""
|
||||
key_file = ""
|
||||
|
||||
[cluster]
|
||||
active_size = 9
|
||||
remove_delay = 1800.0
|
||||
sync_interval = 5.0
|
||||
```
|
||||
|
||||
## Environment Variables
|
||||
@ -89,6 +112,8 @@ key_file = ""
|
||||
* `ETCD_CPU_PROFILE_FILE`
|
||||
* `ETCD_DATA_DIR`
|
||||
* `ETCD_DISCOVERY`
|
||||
* `ETCD_CLUSTER_HTTP_READ_TIMEOUT`
|
||||
* `ETCD_CLUSTER_HTTP_WRITE_TIMEOUT`
|
||||
* `ETCD_KEY_FILE`
|
||||
* `ETCD_PEERS`
|
||||
* `ETCD_PEERS_FILE`
|
||||
@ -105,3 +130,6 @@ key_file = ""
|
||||
* `ETCD_PEER_CERT_FILE`
|
||||
* `ETCD_PEER_KEY_FILE`
|
||||
* `ETCD_PEER_ELECTION_TIMEOUT`
|
||||
* `ETCD_CLUSTER_ACTIVE_SIZE`
|
||||
* `ETCD_CLUSTER_REMOVE_DELAY`
|
||||
* `ETCD_CLUSTER_SYNC_INTERVAL`
|
||||
|
@ -18,8 +18,8 @@ If there are not enough peers to meet the active size, standbys will send join r
|
||||
If there are more peers than the target active size then peers are removed by the leader and will become standbys.
|
||||
|
||||
The remove delay specifies how long the cluster should wait before removing a dead peer.
|
||||
By default this is 5 seconds.
|
||||
If a peer is inactive for 5 seconds then the peer is removed.
|
||||
By default this is 30 minutes.
|
||||
If a peer is inactive for 30 minutes then the peer is removed.
|
||||
|
||||
The standby sync interval specifies the synchronization interval of standbys with the cluster.
|
||||
By default this is 5 seconds.
|
||||
|
@ -17,9 +17,9 @@
|
||||
|
||||
**Python libraries**
|
||||
|
||||
- [transitorykris/etcd-py](https://github.com/transitorykris/etcd-py)
|
||||
- [jplana/python-etcd](https://github.com/jplana/python-etcd) - Supports v2
|
||||
- [russellhaering/txetcd](https://github.com/russellhaering/txetcd) - a Twisted Python library
|
||||
- [cholcombe973/autodock](https://github.com/cholcombe973/autodock) - A docker deployment automation tool
|
||||
|
||||
**Node libraries**
|
||||
|
||||
@ -89,3 +89,6 @@ A detailed recap of client functionalities can be found in the [clients compatib
|
||||
- [configdb](https://git.autistici.org/ai/configdb/tree/master) - A REST relational abstraction on top of arbitrary database backends, aimed at storing configs and inventories.
|
||||
- [scrz](https://github.com/scrz/scrz) - Container manager, stores configuration in etcd.
|
||||
- [fleet](https://github.com/coreos/fleet) - Distributed init system
|
||||
- [GoogleCloudPlatform/kubernetes](https://github.com/GoogleCloudPlatform/kubernetes) - Container cluster manager.
|
||||
- [mailgun/vulcand](https://github.com/mailgun/vulcand) - HTTP proxy that uses etcd as a configuration backend.
|
||||
- [duedil-ltd/discodns](https://github.com/duedil-ltd/discodns) - Simple DNS nameserver using etcd as a database for names and records.
|
||||
|
@ -1,7 +1,7 @@
|
||||
## Modules
|
||||
|
||||
etcd has a number of modules that are built on top of the core etcd API.
|
||||
These modules provide things like dashboards, locks and leader election.
|
||||
These modules provide things like dashboards, locks and leader election (removed).
|
||||
|
||||
**Warning**: Modules are deprecated from v0.4 until we have a solid base we can apply them back onto.
|
||||
For now, we are choosing to focus on raft algorithm and core etcd to make sure that it works correctly and fast.
|
||||
@ -81,7 +81,7 @@ curl -X DELETE http://127.0.0.1:4001/mod/v2/lock/customer1?value=bar
|
||||
```
|
||||
|
||||
|
||||
### Leader Election
|
||||
### Leader Election (Deprecated and Removed in 0.4)
|
||||
|
||||
The Leader Election module wraps the Lock module to allow clients to come to consensus on a single value.
|
||||
This is useful when you want one server to process at a time but allow other servers to fail over.
|
||||
|
@ -1,30 +1,38 @@
|
||||
# Optimal etcd Cluster Size
|
||||
|
||||
etcd's Raft consensus algorithm is most efficient in small clusters between 3 and 9 peers. Let's briefly explore how etcd works internally to understand why.
|
||||
|
||||
## Writing to etcd
|
||||
|
||||
Writes to an etcd peer are always redirected to the leader of the cluster and distributed to all of the peers immediately. A write is only considered successful when a majority of the peers acknowledge the write.
|
||||
|
||||
For example, in a 5 node cluster, a write operation is only as fast as the 3rd fastest machine. This is the main reason for keeping your etcd cluster below 9 nodes. In practice, you only need to worry about write performance in high latency environments such as a cluster spanning multiple data centers.
|
||||
|
||||
## Leader Election
|
||||
|
||||
The leader election process is similar to writing a key — a majority of the cluster must acknowledge the new leader before cluster operations can continue. The longer each node takes to elect a new leader means you have to wait longer before you can write to the cluster again. In low latency environments this process takes milliseconds.
|
||||
|
||||
## Odd Cluster Size
|
||||
|
||||
The other important cluster optimization is to always have an odd cluster size. Adding an odd node to the cluster doesn't change the size of the majority and therefore doesn't increase the total latency of the majority as described above. But you do gain a higher tolerance for peer failure by adding the extra machine. You can see this in practice when comparing two even and odd sized clusters:
|
||||
|
||||
| Cluster Size | Majority | Failure Tolerance |
|
||||
|--------------|------------|-------------------|
|
||||
| 8 machines | 5 machines | 3 machines |
|
||||
| 9 machines | 5 machines | **4 machines** |
|
||||
|
||||
As you can see, adding another node to bring the cluster up to an odd size is always worth it. During a network partition, an odd cluster size also guarantees that there will almost always be a majority of the cluster that can continue to operate and be the source of truth when the partition ends.
|
||||
etcd's Raft consensus algorithm is most efficient in small clusters between 3 and 9 peers. For clusters larger than 9, etcd will select a subset of instances to participate in the algorithm in order to keep it efficient. The end of this document briefly explores how etcd works internally and why these choices have been made.
|
||||
|
||||
## Cluster Management
|
||||
|
||||
Currently, each CoreOS machine is an etcd peer — if you have 30 CoreOS machines, you have 30 etcd peers and end up with a cluster size that is way too large. If desired, you may manually stop some of these etcd instances to increase cluster performance.
|
||||
You can manage the active cluster size through the [cluster config API](https://github.com/coreos/etcd/blob/master/Documentation/api.md#cluster-config). `activeSize` represents the etcd peers allowed to actively participate in the consensus algorithm.
|
||||
|
||||
Functionality is being developed to expose two different types of followers: active and benched followers. Active followers will influence operations within the cluster. Benched followers will not participate, but will transparently proxy etcd traffic to an active follower. This allows every CoreOS machine to expose etcd on port 4001 for ease of use. Benched followers will have the ability to transition into an active follower if needed.
|
||||
If the total number of etcd instances exceeds this number, additional peers are started as [standbys](https://github.com/coreos/etcd/blob/master/Documentation/design/standbys.md), which can be promoted to active participation if one of the existing active instances has failed or been removed.
|
||||
|
||||
## Internals of etcd
|
||||
|
||||
### Writing to etcd
|
||||
|
||||
Writes to an etcd peer are always redirected to the leader of the cluster and distributed to all of the peers immediately. A write is only considered successful when a majority of the peers acknowledge the write.
|
||||
|
||||
For example, in a cluster with 5 peers, a write operation is only as fast as the 3rd fastest machine. This is the main reason for keeping the number of active peers below 9. In practice, you only need to worry about write performance in high latency environments such as a cluster spanning multiple data centers.
|
||||
|
||||
### Leader Election
|
||||
|
||||
The leader election process is similar to writing a key — a majority of the active peers must acknowledge the new leader before cluster operations can continue. The longer each peer takes to elect a new leader means you have to wait longer before you can write to the cluster again. In low latency environments this process takes milliseconds.
|
||||
|
||||
### Odd Active Cluster Size
|
||||
|
||||
The other important cluster optimization is to always have an odd active cluster size (i.e. `activeSize`). Adding an odd node to the number of peers doesn't change the size of the majority and therefore doesn't increase the total latency of the majority as described above. But, you gain a higher tolerance for peer failure by adding the extra machine. You can see this in practice when comparing two even and odd sized clusters:
|
||||
|
||||
| Active Peers | Majority | Failure Tolerance |
|
||||
|--------------|------------|-------------------|
|
||||
| 1 peers | 1 peers | None |
|
||||
| 3 peers | 2 peers | 1 peer |
|
||||
| 4 peers | 3 peers | 1 peer |
|
||||
| 5 peers | 3 peers | **2 peers** |
|
||||
| 6 peers | 4 peers | 2 peers |
|
||||
| 7 peers | 4 peers | **3 peers** |
|
||||
| 8 peers | 5 peers | 3 peers |
|
||||
| 9 peers | 5 peers | **4 peers** |
|
||||
|
||||
As you can see, adding another peer to bring the number of active peers up to an odd size is always worth it. During a network partition, an odd number of active peers also guarantees that there will almost always be a majority of the cluster that can continue to operate and be the source of truth when the partition ends.
|
||||
|
@ -1,4 +1,4 @@
|
||||
ectd is being used successfully by many companies in production. It is,
|
||||
etcd is being used successfully by many companies in production. It is,
|
||||
however, under active development and systems like etcd are difficult to get
|
||||
correct. If you are comfortable with bleeding-edge software please use etcd and
|
||||
provide us with the feedback and testing young software needs.
|
||||
|
5
NOTICE
Normal file
5
NOTICE
Normal file
@ -0,0 +1,5 @@
|
||||
CoreOS Project
|
||||
Copyright 2014 CoreOS, Inc
|
||||
|
||||
This product includes software developed at CoreOS, Inc.
|
||||
(http://www.coreos.com/).
|
@ -1,6 +1,6 @@
|
||||
# etcd
|
||||
|
||||
README version 0.4.0
|
||||
README version 0.4.7
|
||||
|
||||
A highly-available key value store for shared configuration and service discovery.
|
||||
etcd is inspired by [Apache ZooKeeper][zookeeper] and [doozer][doozer], with a focus on being:
|
||||
@ -95,7 +95,7 @@ You have successfully started an etcd on a single machine and written a key to t
|
||||
## Contact
|
||||
|
||||
- Mailing list: [etcd-dev](https://groups.google.com/forum/?hl=en#!forum/etcd-dev)
|
||||
- IRC: #[coreos](irc://irc.freenode.org:6667/#coreos) oon freenode.org
|
||||
- IRC: #[coreos](irc://irc.freenode.org:6667/#coreos) on freenode.org
|
||||
- Planning/Roadmap: [milestones](https://github.com/coreos/etcd/issues/milestones)
|
||||
- Bugs: [issues](https://github.com/coreos/etcd/issues)
|
||||
|
||||
|
@ -36,7 +36,7 @@ var newFlagNameLookup = map[string]string{
|
||||
"d": "data-dir",
|
||||
"m": "max-result-buffer",
|
||||
"r": "max-retry-attempts",
|
||||
"maxsize": "max-cluster-size",
|
||||
"maxsize": "cluster-active-size",
|
||||
"clientCAFile": "ca-file",
|
||||
"clientCert": "cert-file",
|
||||
"clientKey": "key-file",
|
||||
@ -45,6 +45,7 @@ var newFlagNameLookup = map[string]string{
|
||||
"serverKey": "peer-key-file",
|
||||
"snapshotCount": "snapshot-count",
|
||||
"peer-heartbeat-timeout": "peer-heartbeat-interval",
|
||||
"max-cluster-size": "cluster-active-size",
|
||||
}
|
||||
|
||||
// Config represents the server configuration.
|
||||
@ -61,6 +62,8 @@ type Config struct {
|
||||
Discovery string `toml:"discovery" env:"ETCD_DISCOVERY"`
|
||||
Force bool
|
||||
KeyFile string `toml:"key_file" env:"ETCD_KEY_FILE"`
|
||||
HTTPReadTimeout float64 `toml:"http_read_timeout" env:"ETCD_HTTP_READ_TIMEOUT"`
|
||||
HTTPWriteTimeout float64 `toml:"http_write_timeout" env:"ETCD_HTTP_WRITE_TIMEOUT"`
|
||||
Peers []string `toml:"peers" env:"ETCD_PEERS"`
|
||||
PeersFile string `toml:"peers_file" env:"ETCD_PEERS_FILE"`
|
||||
MaxResultBuffer int `toml:"max_result_buffer" env:"ETCD_MAX_RESULT_BUFFER"`
|
||||
@ -97,6 +100,8 @@ func New() *Config {
|
||||
c := new(Config)
|
||||
c.SystemPath = DefaultSystemConfigPath
|
||||
c.Addr = "127.0.0.1:4001"
|
||||
c.HTTPReadTimeout = server.DefaultReadTimeout
|
||||
c.HTTPWriteTimeout = server.DefaultWriteTimeout
|
||||
c.MaxResultBuffer = 1024
|
||||
c.MaxRetryAttempts = 3
|
||||
c.RetryInterval = 10.0
|
||||
@ -254,6 +259,9 @@ func (c *Config) LoadFlags(arguments []string) error {
|
||||
f.StringVar(&c.Peer.CertFile, "peer-cert-file", c.Peer.CertFile, "")
|
||||
f.StringVar(&c.Peer.KeyFile, "peer-key-file", c.Peer.KeyFile, "")
|
||||
|
||||
f.Float64Var(&c.HTTPReadTimeout, "http-read-timeout", c.HTTPReadTimeout, "")
|
||||
f.Float64Var(&c.HTTPWriteTimeout, "http-write-timeout", c.HTTPReadTimeout, "")
|
||||
|
||||
f.StringVar(&c.DataDir, "data-dir", c.DataDir, "")
|
||||
f.IntVar(&c.MaxResultBuffer, "max-result-buffer", c.MaxResultBuffer, "")
|
||||
f.IntVar(&c.MaxRetryAttempts, "max-retry-attempts", c.MaxRetryAttempts, "")
|
||||
@ -297,6 +305,8 @@ func (c *Config) LoadFlags(arguments []string) error {
|
||||
f.IntVar(&c.MaxRetryAttempts, "r", c.MaxRetryAttempts, "(deprecated)")
|
||||
f.IntVar(&c.SnapshotCount, "snapshotCount", c.SnapshotCount, "(deprecated)")
|
||||
f.IntVar(&c.Peer.HeartbeatInterval, "peer-heartbeat-timeout", c.Peer.HeartbeatInterval, "(deprecated)")
|
||||
f.IntVar(&c.Cluster.ActiveSize, "max-cluster-size", c.Cluster.ActiveSize, "(deprecated)")
|
||||
f.IntVar(&c.Cluster.ActiveSize, "maxsize", c.Cluster.ActiveSize, "(deprecated)")
|
||||
// END DEPRECATED FLAGS
|
||||
|
||||
if err := f.Parse(arguments); err != nil {
|
||||
|
@ -27,9 +27,11 @@ func TestConfigTOML(t *testing.T) {
|
||||
max_result_buffer = 512
|
||||
max_retry_attempts = 5
|
||||
name = "test-name"
|
||||
http_read_timeout = 2.34
|
||||
snapshot = true
|
||||
verbose = true
|
||||
very_verbose = true
|
||||
http_write_timeout = 1.23
|
||||
|
||||
[peer]
|
||||
addr = "127.0.0.1:7002"
|
||||
@ -52,6 +54,8 @@ func TestConfigTOML(t *testing.T) {
|
||||
assert.Equal(t, c.CorsOrigins, []string{"*"}, "")
|
||||
assert.Equal(t, c.DataDir, "/tmp/data", "")
|
||||
assert.Equal(t, c.Discovery, "http://example.com/foobar", "")
|
||||
assert.Equal(t, c.HTTPReadTimeout, 2.34, "")
|
||||
assert.Equal(t, c.HTTPWriteTimeout, 1.23, "")
|
||||
assert.Equal(t, c.KeyFile, "/tmp/file.key", "")
|
||||
assert.Equal(t, c.BindAddr, "127.0.0.1:4003", "")
|
||||
assert.Equal(t, c.Peers, []string{"coreos.com:4001", "coreos.com:4002"}, "")
|
||||
@ -80,6 +84,8 @@ func TestConfigEnv(t *testing.T) {
|
||||
os.Setenv("ETCD_CORS", "localhost:4001,localhost:4002")
|
||||
os.Setenv("ETCD_DATA_DIR", "/tmp/data")
|
||||
os.Setenv("ETCD_DISCOVERY", "http://example.com/foobar")
|
||||
os.Setenv("ETCD_HTTP_READ_TIMEOUT", "2.34")
|
||||
os.Setenv("ETCD_HTTP_WRITE_TIMEOUT", "1.23")
|
||||
os.Setenv("ETCD_KEY_FILE", "/tmp/file.key")
|
||||
os.Setenv("ETCD_BIND_ADDR", "127.0.0.1:4003")
|
||||
os.Setenv("ETCD_PEERS", "coreos.com:4001,coreos.com:4002")
|
||||
@ -107,6 +113,8 @@ func TestConfigEnv(t *testing.T) {
|
||||
assert.Equal(t, c.CorsOrigins, []string{"localhost:4001", "localhost:4002"}, "")
|
||||
assert.Equal(t, c.DataDir, "/tmp/data", "")
|
||||
assert.Equal(t, c.Discovery, "http://example.com/foobar", "")
|
||||
assert.Equal(t, c.HTTPReadTimeout, 2.34, "")
|
||||
assert.Equal(t, c.HTTPWriteTimeout, 1.23, "")
|
||||
assert.Equal(t, c.KeyFile, "/tmp/file.key", "")
|
||||
assert.Equal(t, c.BindAddr, "127.0.0.1:4003", "")
|
||||
assert.Equal(t, c.Peers, []string{"coreos.com:4001", "coreos.com:4002"}, "")
|
||||
@ -553,19 +561,12 @@ func TestConfigClusterRemoveDelayFlag(t *testing.T) {
|
||||
assert.Equal(t, c.Cluster.RemoveDelay, 100.0, "")
|
||||
}
|
||||
|
||||
// Ensures that the cluster sync interval can be parsed from the environment.
|
||||
func TestConfigClusterSyncIntervalEnv(t *testing.T) {
|
||||
withEnv("ETCD_CLUSTER_SYNC_INTERVAL", "10", func(c *Config) {
|
||||
assert.Nil(t, c.LoadEnv(), "")
|
||||
assert.Equal(t, c.Cluster.SyncInterval, 10.0, "")
|
||||
})
|
||||
}
|
||||
|
||||
// Ensures that the cluster sync interval flag can be parsed.
|
||||
func TestConfigClusterSyncIntervalFlag(t *testing.T) {
|
||||
c := New()
|
||||
assert.Nil(t, c.LoadFlags([]string{"-cluster-sync-interval", "10"}), "")
|
||||
assert.Equal(t, c.Cluster.SyncInterval, 10.0, "")
|
||||
assert.Nil(t, c.LoadFlags([]string{"-http-read-timeout", "2.34"}), "")
|
||||
assert.Equal(t, c.HTTPReadTimeout, 2.34, "")
|
||||
assert.Nil(t, c.LoadFlags([]string{"-http-write-timeout", "1.23"}), "")
|
||||
assert.Equal(t, c.HTTPWriteTimeout, 1.23, "")
|
||||
}
|
||||
|
||||
// Ensures that a system config field is overridden by a custom config field.
|
||||
|
@ -143,5 +143,6 @@ func (e Error) Write(w http.ResponseWriter) {
|
||||
status = http.StatusInternalServerError
|
||||
}
|
||||
}
|
||||
http.Error(w, e.toJsonString(), status)
|
||||
w.WriteHeader(status)
|
||||
fmt.Fprintln(w, e.toJsonString())
|
||||
}
|
||||
|
16
etcd/etcd.go
16
etcd/etcd.go
@ -232,6 +232,7 @@ func (e *Etcd) Run() {
|
||||
DataDir: e.Config.DataDir,
|
||||
}
|
||||
e.StandbyServer = server.NewStandbyServer(ssConfig, client)
|
||||
e.StandbyServer.SetRaftServer(raftServer)
|
||||
|
||||
// Generating config could be slow.
|
||||
// Put it here to make listen happen immediately after peer-server starting.
|
||||
@ -259,10 +260,19 @@ func (e *Etcd) Run() {
|
||||
|
||||
log.Infof("etcd server [name %s, listen on %s, advertised url %s]", e.Server.Name, e.Config.BindAddr, e.Server.URL())
|
||||
listener := server.NewListener(e.Config.EtcdTLSInfo().Scheme(), e.Config.BindAddr, etcdTLSConfig)
|
||||
e.server = &http.Server{Handler: &ModeHandler{e, serverHTTPHandler, standbyServerHTTPHandler}}
|
||||
|
||||
e.server = &http.Server{Handler: &ModeHandler{e, serverHTTPHandler, standbyServerHTTPHandler},
|
||||
ReadTimeout: time.Duration(e.Config.HTTPReadTimeout) * time.Second,
|
||||
WriteTimeout: time.Duration(e.Config.HTTPWriteTimeout) * time.Second,
|
||||
}
|
||||
|
||||
log.Infof("peer server [name %s, listen on %s, advertised url %s]", e.PeerServer.Config.Name, e.Config.Peer.BindAddr, e.PeerServer.Config.URL)
|
||||
peerListener := server.NewListener(e.Config.PeerTLSInfo().Scheme(), e.Config.Peer.BindAddr, peerTLSConfig)
|
||||
e.peerServer = &http.Server{Handler: &ModeHandler{e, peerServerHTTPHandler, http.NotFoundHandler()}}
|
||||
|
||||
e.peerServer = &http.Server{Handler: &ModeHandler{e, peerServerHTTPHandler, http.NotFoundHandler()},
|
||||
ReadTimeout: time.Duration(server.DefaultReadTimeout) * time.Second,
|
||||
WriteTimeout: time.Duration(server.DefaultWriteTimeout) * time.Second,
|
||||
}
|
||||
|
||||
wg := sync.WaitGroup{}
|
||||
wg.Add(2)
|
||||
@ -299,6 +309,7 @@ func (e *Etcd) runServer() {
|
||||
for {
|
||||
if e.mode == PeerMode {
|
||||
log.Infof("%v starting in peer mode", e.Config.Name)
|
||||
go registerAvailableInternalVersions(e.Config.Name, e.Config.Addr, e.Config.EtcdTLSInfo())
|
||||
// Starting peer server should be followed close by listening on its port
|
||||
// If not, it may leave many requests unaccepted, or cannot receive heartbeat from the cluster.
|
||||
// One severe problem caused if failing receiving heartbeats is when the second node joins one-node cluster,
|
||||
@ -347,6 +358,7 @@ func (e *Etcd) runServer() {
|
||||
raftServer.SetElectionTimeout(electionTimeout)
|
||||
raftServer.SetHeartbeatInterval(heartbeatInterval)
|
||||
e.PeerServer.SetRaftServer(raftServer, e.Config.Snapshot)
|
||||
e.StandbyServer.SetRaftServer(raftServer)
|
||||
|
||||
e.PeerServer.SetJoinIndex(e.StandbyServer.JoinIndex())
|
||||
e.setMode(PeerMode)
|
||||
|
59
etcd/upgrade.go
Normal file
59
etcd/upgrade.go
Normal file
@ -0,0 +1,59 @@
|
||||
package etcd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"runtime"
|
||||
"time"
|
||||
|
||||
"github.com/coreos/etcd/log"
|
||||
"github.com/coreos/etcd/server"
|
||||
"github.com/coreos/etcd/third_party/github.com/coreos/go-etcd/etcd"
|
||||
)
|
||||
|
||||
var defaultEtcdBinaryDir = "/usr/libexec/etcd/internal_versions/"
|
||||
|
||||
func registerAvailableInternalVersions(name string, addr string, tls *server.TLSInfo) {
|
||||
var c *etcd.Client
|
||||
if tls.Scheme() == "http" {
|
||||
c = etcd.NewClient([]string{addr})
|
||||
} else {
|
||||
var err error
|
||||
c, err = etcd.NewTLSClient([]string{addr}, tls.CertFile, tls.KeyFile, tls.CAFile)
|
||||
if err != nil {
|
||||
log.Fatalf("client TLS error: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
vers, err := getInternalVersions()
|
||||
if err != nil {
|
||||
log.Infof("failed to get local etcd versions: %v", err)
|
||||
return
|
||||
}
|
||||
for _, v := range vers {
|
||||
for {
|
||||
_, err := c.Set("/_etcd/available-internal-versions/"+v+"/"+name, "ok", 0)
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
time.Sleep(time.Second)
|
||||
}
|
||||
}
|
||||
log.Infof("%s: available_internal_versions %s is registered into key space successfully.", name, vers)
|
||||
}
|
||||
|
||||
func getInternalVersions() ([]string, error) {
|
||||
if runtime.GOOS != "linux" {
|
||||
return nil, fmt.Errorf("unmatched os version %v", runtime.GOOS)
|
||||
}
|
||||
etcdBinaryDir := os.Getenv("ETCD_BINARY_DIR")
|
||||
if etcdBinaryDir == "" {
|
||||
etcdBinaryDir = defaultEtcdBinaryDir
|
||||
}
|
||||
dir, err := os.Open(etcdBinaryDir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer dir.Close()
|
||||
return dir.Readdirnames(-1)
|
||||
}
|
@ -54,6 +54,7 @@ type CORSHandler struct {
|
||||
func (h *CORSHandler) addHeader(w http.ResponseWriter, origin string) {
|
||||
w.Header().Add("Access-Control-Allow-Methods", "POST, GET, OPTIONS, PUT, DELETE")
|
||||
w.Header().Add("Access-Control-Allow-Origin", origin)
|
||||
w.Header().Add("Access-Control-Allow-Headers", "accept, content-type")
|
||||
}
|
||||
|
||||
// ServeHTTP adds the correct CORS headers based on the origin and returns immediately
|
||||
|
@ -1,85 +0,0 @@
|
||||
package leader
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/coreos/etcd/server"
|
||||
"github.com/coreos/etcd/tests"
|
||||
"github.com/coreos/etcd/third_party/github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
// Ensure that a leader can be set and read.
|
||||
func TestModLeaderSet(t *testing.T) {
|
||||
tests.RunServer(func(s *server.Server) {
|
||||
// Set leader.
|
||||
body, status, err := testSetLeader(s, "foo", "xxx", 10)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, status, 200)
|
||||
assert.Equal(t, body, "2")
|
||||
|
||||
// Check that the leader is set.
|
||||
body, status, err = testGetLeader(s, "foo")
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, status, 200)
|
||||
assert.Equal(t, body, "xxx")
|
||||
|
||||
// Delete leader.
|
||||
body, status, err = testDeleteLeader(s, "foo", "xxx")
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, status, 200)
|
||||
assert.Equal(t, body, "")
|
||||
|
||||
// Check that the leader is removed.
|
||||
body, status, err = testGetLeader(s, "foo")
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, status, 200)
|
||||
assert.Equal(t, body, "")
|
||||
})
|
||||
}
|
||||
|
||||
// Ensure that a leader can be renewed.
|
||||
func TestModLeaderRenew(t *testing.T) {
|
||||
tests.RunServer(func(s *server.Server) {
|
||||
// Set leader.
|
||||
body, status, err := testSetLeader(s, "foo", "xxx", 2)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, status, 200)
|
||||
assert.Equal(t, body, "2")
|
||||
|
||||
time.Sleep(1 * time.Second)
|
||||
|
||||
// Renew leader.
|
||||
body, status, err = testSetLeader(s, "foo", "xxx", 3)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, status, 200)
|
||||
assert.Equal(t, body, "2")
|
||||
|
||||
time.Sleep(2 * time.Second)
|
||||
|
||||
// Check that the leader is set.
|
||||
body, status, err = testGetLeader(s, "foo")
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, status, 200)
|
||||
assert.Equal(t, body, "xxx")
|
||||
})
|
||||
}
|
||||
|
||||
func testSetLeader(s *server.Server, key string, name string, ttl int) (string, int, error) {
|
||||
resp, err := tests.PutForm(fmt.Sprintf("%s/mod/v2/leader/%s?name=%s&ttl=%d", s.URL(), key, name, ttl), nil)
|
||||
ret := tests.ReadBody(resp)
|
||||
return string(ret), resp.StatusCode, err
|
||||
}
|
||||
|
||||
func testGetLeader(s *server.Server, key string) (string, int, error) {
|
||||
resp, err := tests.Get(fmt.Sprintf("%s/mod/v2/leader/%s", s.URL(), key))
|
||||
ret := tests.ReadBody(resp)
|
||||
return string(ret), resp.StatusCode, err
|
||||
}
|
||||
|
||||
func testDeleteLeader(s *server.Server, key string, name string) (string, int, error) {
|
||||
resp, err := tests.DeleteForm(fmt.Sprintf("%s/mod/v2/leader/%s?name=%s", s.URL(), key, name), nil)
|
||||
ret := tests.ReadBody(resp)
|
||||
return string(ret), resp.StatusCode, err
|
||||
}
|
Binary file not shown.
@ -12,7 +12,7 @@ const (
|
||||
MinActiveSize = 3
|
||||
|
||||
// DefaultRemoveDelay is the default elapsed time before removal.
|
||||
DefaultRemoveDelay = float64((5 * time.Second) / time.Second)
|
||||
DefaultRemoveDelay = float64((30 * time.Minute) / time.Second)
|
||||
|
||||
// MinRemoveDelay is the minimum remove delay allowed.
|
||||
MinRemoveDelay = float64((2 * time.Second) / time.Second)
|
||||
@ -25,7 +25,6 @@ const (
|
||||
)
|
||||
|
||||
// ClusterConfig represents cluster-wide configuration settings.
|
||||
// These settings can only be changed through Raft.
|
||||
type ClusterConfig struct {
|
||||
// ActiveSize is the maximum number of node that can join as Raft followers.
|
||||
// Nodes that join the cluster after the limit is reached are standbys.
|
||||
|
@ -46,6 +46,7 @@ func (c *JoinCommand) NodeName() string {
|
||||
// applyJoin attempts to join a machine to the cluster.
|
||||
func applyJoin(c *JoinCommand, context raft.Context) (uint64, error) {
|
||||
ps, _ := context.Server().Context().(*PeerServer)
|
||||
ps.raftServer.FlushCommitIndex()
|
||||
commitIndex := context.CommitIndex()
|
||||
|
||||
// Make sure we're not getting a cached value from the registry.
|
||||
|
@ -3,10 +3,16 @@ package server
|
||||
import (
|
||||
"crypto/tls"
|
||||
"net"
|
||||
"time"
|
||||
|
||||
"github.com/coreos/etcd/log"
|
||||
)
|
||||
|
||||
const (
|
||||
DefaultReadTimeout = float64((5 * time.Minute) / time.Second)
|
||||
DefaultWriteTimeout = float64((5 * time.Minute) / time.Second)
|
||||
)
|
||||
|
||||
// TLSServerConfig generates tls configuration based on TLSInfo
|
||||
// If any error happens, this function will call log.Fatal
|
||||
func TLSServerConfig(info *TLSInfo) *tls.Config {
|
||||
|
@ -6,6 +6,7 @@ import (
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
@ -23,6 +24,10 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
// MaxHeartbeatTimeoutBackoff is the maximum number of seconds before we warn
|
||||
// the user again about a peer not accepting heartbeats.
|
||||
MaxHeartbeatTimeoutBackoff = 15 * time.Second
|
||||
|
||||
// ThresholdMonitorTimeout is the time between log notifications that the
|
||||
// Raft heartbeat is too close to the election timeout.
|
||||
ThresholdMonitorTimeout = 5 * time.Second
|
||||
@ -70,10 +75,18 @@ type PeerServer struct {
|
||||
routineGroup sync.WaitGroup
|
||||
timeoutThresholdChan chan interface{}
|
||||
|
||||
logBackoffs map[string]*logBackoff
|
||||
|
||||
metrics *metrics.Bucket
|
||||
sync.Mutex
|
||||
}
|
||||
|
||||
type logBackoff struct {
|
||||
next time.Time
|
||||
backoff time.Duration
|
||||
count int
|
||||
}
|
||||
|
||||
// TODO: find a good policy to do snapshot
|
||||
type snapshotConf struct {
|
||||
// Etcd will check if snapshot is need every checkingInterval
|
||||
@ -97,6 +110,7 @@ func NewPeerServer(psConfig PeerServerConfig, client *Client, registry *Registry
|
||||
serverStats: serverStats,
|
||||
|
||||
timeoutThresholdChan: make(chan interface{}, 1),
|
||||
logBackoffs: make(map[string]*logBackoff),
|
||||
|
||||
metrics: mb,
|
||||
}
|
||||
@ -214,6 +228,7 @@ func (s *PeerServer) FindCluster(discoverURL string, peers []string) (toStart bo
|
||||
// TODO(yichengq): Think about the action that should be done
|
||||
// if it cannot connect any of the previous known node.
|
||||
log.Debugf("%s is restarting the cluster %v", name, possiblePeers)
|
||||
s.SetJoinIndex(s.raftServer.CommitIndex())
|
||||
toStart = true
|
||||
return
|
||||
}
|
||||
@ -278,6 +293,7 @@ func (s *PeerServer) Start(snapshot bool, clusterConfig *ClusterConfig) error {
|
||||
s.startRoutine(s.monitorTimeoutThreshold)
|
||||
s.startRoutine(s.monitorActiveSize)
|
||||
s.startRoutine(s.monitorPeerActivity)
|
||||
s.startRoutine(s.monitorVersion)
|
||||
|
||||
// open the snapshot
|
||||
if snapshot {
|
||||
@ -356,6 +372,7 @@ func (s *PeerServer) HTTPHandler() http.Handler {
|
||||
router.HandleFunc("/v2/admin/machines", s.getMachinesHttpHandler).Methods("GET")
|
||||
router.HandleFunc("/v2/admin/machines/{name}", s.getMachineHttpHandler).Methods("GET")
|
||||
router.HandleFunc("/v2/admin/machines/{name}", s.RemoveHttpHandler).Methods("DELETE")
|
||||
router.HandleFunc("/v2/admin/next-internal-version", s.NextInternalVersionHandler).Methods("GET")
|
||||
|
||||
return router
|
||||
}
|
||||
@ -626,7 +643,7 @@ func (s *PeerServer) joinByPeer(server raft.Server, peer string, scheme string)
|
||||
}
|
||||
|
||||
func (s *PeerServer) Stats() []byte {
|
||||
s.serverStats.LeaderInfo.Uptime = time.Now().Sub(s.serverStats.LeaderInfo.startTime).String()
|
||||
s.serverStats.LeaderInfo.Uptime = time.Now().Sub(s.serverStats.LeaderInfo.StartTime).String()
|
||||
|
||||
// TODO: register state listener to raft to change this field
|
||||
// rather than compare the state each time Stats() is called.
|
||||
@ -686,11 +703,12 @@ func (s *PeerServer) raftEventLogger(event raft.Event) {
|
||||
case raft.RemovePeerEventType:
|
||||
log.Infof("%s: peer removed: '%v'", s.Config.Name, value)
|
||||
case raft.HeartbeatIntervalEventType:
|
||||
var name = "<unknown>"
|
||||
if peer, ok := value.(*raft.Peer); ok {
|
||||
name = peer.Name
|
||||
peer, ok := value.(*raft.Peer)
|
||||
if !ok {
|
||||
log.Warnf("%s: heatbeat timeout from unknown peer", s.Config.Name)
|
||||
return
|
||||
}
|
||||
log.Infof("%s: warning: heartbeat timed out: '%v'", s.Config.Name, name)
|
||||
s.logHeartbeatTimeout(peer)
|
||||
case raft.ElectionTimeoutThresholdEventType:
|
||||
select {
|
||||
case s.timeoutThresholdChan <- value:
|
||||
@ -700,6 +718,35 @@ func (s *PeerServer) raftEventLogger(event raft.Event) {
|
||||
}
|
||||
}
|
||||
|
||||
// logHeartbeatTimeout logs about the edge triggered heartbeat timeout event
|
||||
// only if we haven't warned within a reasonable interval.
|
||||
func (s *PeerServer) logHeartbeatTimeout(peer *raft.Peer) {
|
||||
b, ok := s.logBackoffs[peer.Name]
|
||||
if !ok {
|
||||
b = &logBackoff{time.Time{}, time.Second, 1}
|
||||
s.logBackoffs[peer.Name] = b
|
||||
}
|
||||
|
||||
if peer.LastActivity().After(b.next) {
|
||||
b.next = time.Time{}
|
||||
b.backoff = time.Second
|
||||
b.count = 1
|
||||
}
|
||||
|
||||
if b.next.After(time.Now()) {
|
||||
b.count++
|
||||
return
|
||||
}
|
||||
|
||||
b.backoff = 2 * b.backoff
|
||||
if b.backoff > MaxHeartbeatTimeoutBackoff {
|
||||
b.backoff = MaxHeartbeatTimeoutBackoff
|
||||
}
|
||||
b.next = time.Now().Add(b.backoff)
|
||||
|
||||
log.Infof("%s: warning: heartbeat time out peer=%q missed=%d backoff=%q", s.Config.Name, peer.Name, b.count, b.backoff)
|
||||
}
|
||||
|
||||
func (s *PeerServer) recordMetricEvent(event raft.Event) {
|
||||
name := fmt.Sprintf("raft.event.%s", event.Type())
|
||||
value := event.Value().(time.Duration)
|
||||
@ -728,9 +775,9 @@ func (s *PeerServer) startRoutine(f func()) {
|
||||
func (s *PeerServer) monitorSnapshot() {
|
||||
for {
|
||||
timer := time.NewTimer(s.snapConf.checkingInterval)
|
||||
defer timer.Stop()
|
||||
select {
|
||||
case <-s.closeChan:
|
||||
timer.Stop()
|
||||
return
|
||||
case <-timer.C:
|
||||
}
|
||||
@ -763,6 +810,8 @@ func (s *PeerServer) monitorSync() {
|
||||
// monitorTimeoutThreshold groups timeout threshold events together and prints
|
||||
// them as a single log line.
|
||||
func (s *PeerServer) monitorTimeoutThreshold() {
|
||||
ticker := time.NewTicker(ThresholdMonitorTimeout)
|
||||
defer ticker.Stop()
|
||||
for {
|
||||
select {
|
||||
case <-s.closeChan:
|
||||
@ -771,12 +820,10 @@ func (s *PeerServer) monitorTimeoutThreshold() {
|
||||
log.Infof("%s: warning: heartbeat near election timeout: %v", s.Config.Name, value)
|
||||
}
|
||||
|
||||
timer := time.NewTimer(ThresholdMonitorTimeout)
|
||||
defer timer.Stop()
|
||||
select {
|
||||
case <-s.closeChan:
|
||||
return
|
||||
case <-timer.C:
|
||||
case <-ticker.C:
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -784,13 +831,13 @@ func (s *PeerServer) monitorTimeoutThreshold() {
|
||||
// monitorActiveSize has the leader periodically check the status of cluster
|
||||
// nodes and swaps them out for standbys as needed.
|
||||
func (s *PeerServer) monitorActiveSize() {
|
||||
ticker := time.NewTicker(ActiveMonitorTimeout)
|
||||
defer ticker.Stop()
|
||||
for {
|
||||
timer := time.NewTimer(ActiveMonitorTimeout)
|
||||
defer timer.Stop()
|
||||
select {
|
||||
case <-s.closeChan:
|
||||
return
|
||||
case <-timer.C:
|
||||
case <-ticker.C:
|
||||
}
|
||||
|
||||
// Ignore while this peer is not a leader.
|
||||
@ -809,7 +856,7 @@ func (s *PeerServer) monitorActiveSize() {
|
||||
// If we have more active nodes than we should then remove.
|
||||
if peerCount > activeSize {
|
||||
peer := peers[rand.Intn(len(peers))]
|
||||
log.Infof("%s: removing: %v", s.Config.Name, peer)
|
||||
log.Infof("%s: removing node: %v; peer number %d > expected size %d", s.Config.Name, peer, peerCount, activeSize)
|
||||
if _, err := s.raftServer.Do(&RemoveCommand{Name: peer}); err != nil {
|
||||
log.Infof("%s: warning: remove error: %v", s.Config.Name, err)
|
||||
}
|
||||
@ -820,13 +867,13 @@ func (s *PeerServer) monitorActiveSize() {
|
||||
|
||||
// monitorPeerActivity has the leader periodically for dead nodes and demotes them.
|
||||
func (s *PeerServer) monitorPeerActivity() {
|
||||
ticker := time.NewTicker(PeerActivityMonitorTimeout)
|
||||
defer ticker.Stop()
|
||||
for {
|
||||
timer := time.NewTimer(PeerActivityMonitorTimeout)
|
||||
defer timer.Stop()
|
||||
select {
|
||||
case <-s.closeChan:
|
||||
return
|
||||
case <-timer.C:
|
||||
case <-ticker.C:
|
||||
}
|
||||
|
||||
// Ignore while this peer is not a leader.
|
||||
@ -851,3 +898,30 @@ func (s *PeerServer) monitorPeerActivity() {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *PeerServer) monitorVersion() {
|
||||
for {
|
||||
select {
|
||||
case <-s.closeChan:
|
||||
return
|
||||
case <-time.After(time.Second):
|
||||
}
|
||||
|
||||
resp, err := s.store.Get("/_etcd/next-internal-version", false, false)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
// only support upgrading to etcd2
|
||||
if *resp.Node.Value == "2" {
|
||||
log.Infof("%s: detected next internal version 2, exit after 10 seconds.", s.Config.Name)
|
||||
} else {
|
||||
log.Infof("%s: detected invaild next internal version %s", s.Config.Name, *resp.Node.Value)
|
||||
continue
|
||||
}
|
||||
time.Sleep(10 * time.Second)
|
||||
// be nice to raft. try not to corrupt log file.
|
||||
go s.raftServer.Stop()
|
||||
time.Sleep(time.Second)
|
||||
os.Exit(0)
|
||||
}
|
||||
}
|
||||
|
@ -3,6 +3,7 @@ package server
|
||||
import (
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"path"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
@ -188,6 +189,7 @@ func (ps *PeerServer) RemoveHttpHandler(w http.ResponseWriter, req *http.Request
|
||||
|
||||
// Returns a JSON-encoded cluster configuration.
|
||||
func (ps *PeerServer) getClusterConfigHttpHandler(w http.ResponseWriter, req *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
json.NewEncoder(w).Encode(ps.ClusterConfig())
|
||||
}
|
||||
|
||||
@ -217,6 +219,7 @@ func (ps *PeerServer) setClusterConfigHttpHandler(w http.ResponseWriter, req *ht
|
||||
log.Debugf("[recv] Update Cluster Config Request")
|
||||
ps.server.Dispatch(c, w, req)
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
json.NewEncoder(w).Encode(ps.ClusterConfig())
|
||||
}
|
||||
|
||||
@ -230,6 +233,7 @@ func (ps *PeerServer) getMachinesHttpHandler(w http.ResponseWriter, req *http.Re
|
||||
}
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
json.NewEncoder(w).Encode(&machines)
|
||||
}
|
||||
|
||||
@ -237,6 +241,7 @@ func (ps *PeerServer) getMachinesHttpHandler(w http.ResponseWriter, req *http.Re
|
||||
func (ps *PeerServer) getMachineHttpHandler(w http.ResponseWriter, req *http.Request) {
|
||||
vars := mux.Vars(req)
|
||||
m := ps.getMachineMessage(vars["name"], ps.raftServer.Leader())
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
json.NewEncoder(w).Encode(m)
|
||||
}
|
||||
|
||||
@ -305,6 +310,48 @@ func (ps *PeerServer) UpgradeHttpHandler(w http.ResponseWriter, req *http.Reques
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}
|
||||
|
||||
func (ps *PeerServer) NextInternalVersionHandler(w http.ResponseWriter, req *http.Request) {
|
||||
for i := 0; i < 50; i++ {
|
||||
if ps.raftServer.State() != raft.Leader {
|
||||
l := ps.raftServer.Leader()
|
||||
if l == "" {
|
||||
time.Sleep(5 * time.Second)
|
||||
continue
|
||||
}
|
||||
url, _ := ps.registry.PeerURL(l)
|
||||
uhttp.Redirect(url, w, req)
|
||||
return
|
||||
}
|
||||
resp, err := ps.store.Get("/_etcd/available-internal-versions/2", true, true)
|
||||
if err != nil {
|
||||
time.Sleep(5 * time.Second)
|
||||
continue
|
||||
}
|
||||
available := make(map[string]bool)
|
||||
for _, n := range resp.Node.Nodes {
|
||||
available[path.Base(n.Key)] = true
|
||||
}
|
||||
|
||||
notfound := false
|
||||
for _, n := range ps.registry.Names() {
|
||||
if !available[n] {
|
||||
notfound = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if notfound {
|
||||
time.Sleep(5 * time.Second)
|
||||
continue
|
||||
}
|
||||
c := ps.store.CommandFactory().CreateSetCommand("/_etcd/next-internal-version", false, "2", store.Permanent)
|
||||
_, err = ps.raftServer.Do(c)
|
||||
if err == nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
w.WriteHeader(http.StatusServiceUnavailable)
|
||||
}
|
||||
|
||||
// machineMessage represents information about a peer or standby in the registry.
|
||||
type machineMessage struct {
|
||||
Name string `json:"name"`
|
||||
|
@ -13,9 +13,9 @@ type raftServerStats struct {
|
||||
StartTime time.Time `json:"startTime"`
|
||||
|
||||
LeaderInfo struct {
|
||||
Name string `json:"leader"`
|
||||
Uptime string `json:"uptime"`
|
||||
startTime time.Time
|
||||
Name string `json:"leader"`
|
||||
Uptime string `json:"uptime"`
|
||||
StartTime time.Time `json:"startTime"`
|
||||
} `json:"leaderInfo"`
|
||||
|
||||
RecvAppendRequestCnt uint64 `json:"recvAppendRequestCnt,"`
|
||||
@ -43,7 +43,7 @@ func NewRaftServerStats(name string) *raftServerStats {
|
||||
back: -1,
|
||||
},
|
||||
}
|
||||
stats.LeaderInfo.startTime = time.Now()
|
||||
stats.LeaderInfo.StartTime = time.Now()
|
||||
return stats
|
||||
}
|
||||
|
||||
@ -54,7 +54,7 @@ func (ss *raftServerStats) RecvAppendReq(leaderName string, pkgSize int) {
|
||||
ss.State = raft.Follower
|
||||
if leaderName != ss.LeaderInfo.Name {
|
||||
ss.LeaderInfo.Name = leaderName
|
||||
ss.LeaderInfo.startTime = time.Now()
|
||||
ss.LeaderInfo.StartTime = time.Now()
|
||||
}
|
||||
|
||||
ss.recvRateQueue.Insert(NewPackageStats(time.Now(), pkgSize))
|
||||
@ -70,7 +70,7 @@ func (ss *raftServerStats) SendAppendReq(pkgSize int) {
|
||||
if ss.State != raft.Leader {
|
||||
ss.State = raft.Leader
|
||||
ss.LeaderInfo.Name = ss.Name
|
||||
ss.LeaderInfo.startTime = now
|
||||
ss.LeaderInfo.StartTime = now
|
||||
}
|
||||
|
||||
ss.sendRateQueue.Insert(NewPackageStats(now, pkgSize))
|
||||
|
@ -1,3 +0,0 @@
|
||||
package server
|
||||
|
||||
const ReleaseVersion = "0.4.1"
|
@ -285,7 +285,7 @@ func (s *Server) Dispatch(c raft.Command, w http.ResponseWriter, req *http.Reque
|
||||
// Handler to return the current version of etcd.
|
||||
func (s *Server) GetVersionHandler(w http.ResponseWriter, req *http.Request) error {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
fmt.Fprintf(w, "etcd %s", ReleaseVersion)
|
||||
fmt.Fprintf(w, `{"releaseVersion":"%s","internalVersion":"%s"}`, ReleaseVersion, InternalVersion)
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -311,6 +311,7 @@ func (s *Server) GetPeersHandler(w http.ResponseWriter, req *http.Request) error
|
||||
|
||||
// Retrieves stats on the Raft server.
|
||||
func (s *Server) GetStatsHandler(w http.ResponseWriter, req *http.Request) error {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.Write(s.peerServer.Stats())
|
||||
return nil
|
||||
}
|
||||
@ -318,21 +319,19 @@ func (s *Server) GetStatsHandler(w http.ResponseWriter, req *http.Request) error
|
||||
// Retrieves stats on the leader.
|
||||
func (s *Server) GetLeaderStatsHandler(w http.ResponseWriter, req *http.Request) error {
|
||||
if s.peerServer.RaftServer().State() == raft.Leader {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.Write(s.peerServer.PeerStats())
|
||||
return nil
|
||||
}
|
||||
|
||||
leader := s.peerServer.RaftServer().Leader()
|
||||
if leader == "" {
|
||||
return etcdErr.NewError(300, "", s.Store().Index())
|
||||
}
|
||||
hostname, _ := s.registry.ClientURL(leader)
|
||||
uhttp.Redirect(hostname, w, req)
|
||||
w.WriteHeader(http.StatusForbidden)
|
||||
w.Write([]byte("not current leader"))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Retrieves stats on the leader.
|
||||
func (s *Server) GetStoreStatsHandler(w http.ResponseWriter, req *http.Request) error {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.Write(s.store.JsonStats())
|
||||
return nil
|
||||
}
|
||||
|
@ -30,14 +30,16 @@ type StandbyServerConfig struct {
|
||||
}
|
||||
|
||||
type standbyInfo struct {
|
||||
// stay running in standby mode
|
||||
Running bool
|
||||
Cluster []*machineMessage
|
||||
SyncInterval float64
|
||||
}
|
||||
|
||||
type StandbyServer struct {
|
||||
Config StandbyServerConfig
|
||||
client *Client
|
||||
Config StandbyServerConfig
|
||||
client *Client
|
||||
raftServer raft.Server
|
||||
|
||||
standbyInfo
|
||||
joinIndex uint64
|
||||
@ -62,6 +64,10 @@ func NewStandbyServer(config StandbyServerConfig, client *Client) *StandbyServer
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *StandbyServer) SetRaftServer(raftServer raft.Server) {
|
||||
s.raftServer = raftServer
|
||||
}
|
||||
|
||||
func (s *StandbyServer) Start() {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
@ -73,12 +79,16 @@ func (s *StandbyServer) Start() {
|
||||
s.removeNotify = make(chan bool)
|
||||
s.closeChan = make(chan bool)
|
||||
|
||||
s.Running = true
|
||||
if err := s.saveInfo(); err != nil {
|
||||
log.Warnf("error saving cluster info for standby")
|
||||
}
|
||||
|
||||
s.routineGroup.Add(1)
|
||||
go func() {
|
||||
defer s.routineGroup.Done()
|
||||
s.monitorCluster()
|
||||
}()
|
||||
s.Running = true
|
||||
}
|
||||
|
||||
// Stop stops the server gracefully.
|
||||
@ -92,11 +102,6 @@ func (s *StandbyServer) Stop() {
|
||||
|
||||
close(s.closeChan)
|
||||
s.routineGroup.Wait()
|
||||
|
||||
if err := s.saveInfo(); err != nil {
|
||||
log.Warnf("error saving cluster info for standby")
|
||||
}
|
||||
s.Running = false
|
||||
}
|
||||
|
||||
// RemoveNotify notifies the server is removed from standby mode and ready
|
||||
@ -173,13 +178,21 @@ func (s *StandbyServer) redirectRequests(w http.ResponseWriter, r *http.Request)
|
||||
// monitorCluster assumes that the machine has tried to join the cluster and
|
||||
// failed, so it waits for the interval at the beginning.
|
||||
func (s *StandbyServer) monitorCluster() {
|
||||
ticker := time.NewTicker(time.Duration(int64(s.SyncInterval * float64(time.Second))))
|
||||
defer ticker.Stop()
|
||||
for {
|
||||
timer := time.NewTimer(time.Duration(int64(s.SyncInterval * float64(time.Second))))
|
||||
defer timer.Stop()
|
||||
select {
|
||||
case <-s.closeChan:
|
||||
return
|
||||
case <-timer.C:
|
||||
case <-ticker.C:
|
||||
}
|
||||
|
||||
ok, err := s.checkMemberInternalVersionIsV2()
|
||||
if err != nil {
|
||||
log.Warnf("fail checking internal version(%v): %v", s.ClusterURLs(), err)
|
||||
} else if ok {
|
||||
log.Infof("Detect the cluster has been upgraded to v2. Exit now.")
|
||||
os.Exit(0)
|
||||
}
|
||||
|
||||
if err := s.syncCluster(nil); err != nil {
|
||||
@ -199,6 +212,10 @@ func (s *StandbyServer) monitorCluster() {
|
||||
}
|
||||
|
||||
log.Infof("join through leader %v", leader.PeerURL)
|
||||
s.Running = false
|
||||
if err := s.saveInfo(); err != nil {
|
||||
log.Warnf("error saving cluster info for standby")
|
||||
}
|
||||
go func() {
|
||||
s.Stop()
|
||||
close(s.removeNotify)
|
||||
@ -207,6 +224,39 @@ func (s *StandbyServer) monitorCluster() {
|
||||
}
|
||||
}
|
||||
|
||||
func (s *StandbyServer) checkMemberInternalVersionIsV2() (bool, error) {
|
||||
c := &http.Client{Transport: s.client.Client.Transport}
|
||||
for _, memb := range s.Cluster {
|
||||
url := memb.ClientURL
|
||||
resp, err := c.Get(url + "/version")
|
||||
if err != nil {
|
||||
log.Debugf("failed to get /version from %s", url)
|
||||
continue
|
||||
}
|
||||
b, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
log.Debugf("failed to read body from %s", url)
|
||||
continue
|
||||
}
|
||||
|
||||
var m map[string]string
|
||||
err = json.Unmarshal(b, &m)
|
||||
if err != nil {
|
||||
log.Debugf("failed to unmarshal body %s from %s", b, url)
|
||||
continue
|
||||
}
|
||||
switch m["internalVersion"] {
|
||||
case "1":
|
||||
return false, nil
|
||||
case "2":
|
||||
return true, nil
|
||||
default:
|
||||
log.Warnf("unrecognized internal version %s from %s", m["internalVersion"], url)
|
||||
}
|
||||
}
|
||||
return false, fmt.Errorf("failed to get version")
|
||||
}
|
||||
|
||||
func (s *StandbyServer) syncCluster(peerURLs []string) error {
|
||||
peerURLs = append(s.ClusterURLs(), peerURLs...)
|
||||
|
||||
@ -235,6 +285,13 @@ func (s *StandbyServer) syncCluster(peerURLs []string) error {
|
||||
}
|
||||
|
||||
func (s *StandbyServer) join(peer string) error {
|
||||
for _, url := range s.ClusterURLs() {
|
||||
if s.Config.PeerURL == url {
|
||||
s.joinIndex = s.raftServer.CommitIndex()
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// Our version must match the leaders version
|
||||
version, err := s.client.GetVersion(peer)
|
||||
if err != nil {
|
||||
|
@ -53,9 +53,11 @@ Other Options:
|
||||
-max-result-buffer Max size of the result buffer.
|
||||
-max-retry-attempts Number of times a node will try to join a cluster.
|
||||
-retry-interval Seconds to wait between cluster join retry attempts.
|
||||
-max-cluster-size Maximum number of nodes in the cluster.
|
||||
-snapshot=false Disable log snapshots
|
||||
-snapshot-count Number of transactions before issuing a snapshot.
|
||||
-cluster-active-size Number of active nodes in the cluster.
|
||||
-cluster-remove-delay Seconds before one node is removed.
|
||||
-cluster-sync-interval Seconds between synchronizations for standby mode.
|
||||
`
|
||||
|
||||
// Usage returns the usage message for etcd.
|
||||
|
@ -17,6 +17,14 @@ func GetHandler(w http.ResponseWriter, req *http.Request, s Server) error {
|
||||
vars := mux.Vars(req)
|
||||
key := "/" + vars["key"]
|
||||
|
||||
recursive := (req.FormValue("recursive") == "true")
|
||||
sort := (req.FormValue("sorted") == "true")
|
||||
|
||||
if req.FormValue("quorum") == "true" {
|
||||
c := s.Store().CommandFactory().CreateGetCommand(key, recursive, sort)
|
||||
return s.Dispatch(c, w, req)
|
||||
}
|
||||
|
||||
// Help client to redirect the request to the current leader
|
||||
if req.FormValue("consistent") == "true" && s.State() != raft.Leader {
|
||||
leader := s.Leader()
|
||||
@ -35,8 +43,6 @@ func GetHandler(w http.ResponseWriter, req *http.Request, s Server) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
recursive := (req.FormValue("recursive") == "true")
|
||||
sort := (req.FormValue("sorted") == "true")
|
||||
waitIndex := req.FormValue("waitIndex")
|
||||
stream := (req.FormValue("stream") == "true")
|
||||
|
||||
@ -68,6 +74,7 @@ func handleWatch(key string, recursive, stream bool, waitIndex string, w http.Re
|
||||
closeChan := cn.CloseNotify()
|
||||
|
||||
writeHeaders(w, s)
|
||||
w.(http.Flusher).Flush()
|
||||
|
||||
if stream {
|
||||
// watcher hub will not help to remove stream watcher
|
||||
|
@ -24,12 +24,15 @@ func TestV2GetKey(t *testing.T) {
|
||||
v.Set("value", "XXX")
|
||||
fullURL := fmt.Sprintf("%s%s", s.URL(), "/v2/keys/foo/bar")
|
||||
resp, _ := tests.Get(fullURL)
|
||||
assert.Equal(t, resp.Header.Get("Content-Type"), "application/json")
|
||||
assert.Equal(t, resp.StatusCode, http.StatusNotFound)
|
||||
|
||||
resp, _ = tests.PutForm(fullURL, v)
|
||||
assert.Equal(t, resp.Header.Get("Content-Type"), "application/json")
|
||||
tests.ReadBody(resp)
|
||||
|
||||
resp, _ = tests.Get(fullURL)
|
||||
assert.Equal(t, resp.Header.Get("Content-Type"), "application/json")
|
||||
assert.Equal(t, resp.StatusCode, http.StatusOK)
|
||||
body := tests.ReadBodyJSON(resp)
|
||||
assert.Equal(t, body["action"], "get", "")
|
||||
|
@ -1,3 +1,5 @@
|
||||
package server
|
||||
|
||||
const ReleaseVersion = "0.4.7"
|
||||
const InternalVersion = "1"
|
||||
const Version = "v2"
|
||||
|
@ -24,6 +24,7 @@ type CommandFactory interface {
|
||||
prevIndex uint64, expireTime time.Time) raft.Command
|
||||
CreateCompareAndDeleteCommand(key string, prevValue string, prevIndex uint64) raft.Command
|
||||
CreateSyncCommand(now time.Time) raft.Command
|
||||
CreateGetCommand(key string, recursive, sorted bool) raft.Command
|
||||
}
|
||||
|
||||
// RegisterCommandFactory adds a command factory to the global registry.
|
||||
|
@ -89,3 +89,11 @@ func (f *CommandFactory) CreateSyncCommand(now time.Time) raft.Command {
|
||||
Time: time.Now(),
|
||||
}
|
||||
}
|
||||
|
||||
func (f *CommandFactory) CreateGetCommand(key string, recursive, sorted bool) raft.Command {
|
||||
return &GetCommand{
|
||||
Key: key,
|
||||
Recursive: recursive,
|
||||
Sorted: sorted,
|
||||
}
|
||||
}
|
||||
|
35
store/v2/get_command.go
Normal file
35
store/v2/get_command.go
Normal file
@ -0,0 +1,35 @@
|
||||
package v2
|
||||
|
||||
import (
|
||||
"github.com/coreos/etcd/log"
|
||||
"github.com/coreos/etcd/store"
|
||||
"github.com/coreos/etcd/third_party/github.com/goraft/raft"
|
||||
)
|
||||
|
||||
func init() {
|
||||
raft.RegisterCommand(&GetCommand{})
|
||||
}
|
||||
|
||||
// The GetCommand gets a key from the Store.
|
||||
type GetCommand struct {
|
||||
Key string `json:"key"`
|
||||
Recursive bool `json:"recursive"`
|
||||
Sorted bool `json:sorted`
|
||||
}
|
||||
|
||||
// The name of the get command in the log
|
||||
func (c *GetCommand) CommandName() string {
|
||||
return "etcd:get"
|
||||
}
|
||||
|
||||
// Get the key
|
||||
func (c *GetCommand) Apply(context raft.Context) (interface{}, error) {
|
||||
s, _ := context.Server().StateMachine().(store.Store)
|
||||
e, err := s.Get(c.Key, c.Recursive, c.Sorted)
|
||||
|
||||
if err != nil {
|
||||
log.Debug(err)
|
||||
return nil, err
|
||||
}
|
||||
return e, nil
|
||||
}
|
@ -25,6 +25,7 @@ func TestClusterConfigSet(t *testing.T) {
|
||||
resp, _ = tests.Get("http://localhost:7002/v2/admin/config")
|
||||
body := tests.ReadBodyJSON(resp)
|
||||
assert.Equal(t, resp.StatusCode, 200)
|
||||
assert.Equal(t, resp.Header.Get("Content-Type"), "application/json")
|
||||
assert.Equal(t, body["activeSize"], 3)
|
||||
assert.Equal(t, body["removeDelay"], 60)
|
||||
}
|
||||
@ -44,6 +45,7 @@ func TestClusterConfigReload(t *testing.T) {
|
||||
resp, _ = tests.Get("http://localhost:7002/v2/admin/config")
|
||||
body := tests.ReadBodyJSON(resp)
|
||||
assert.Equal(t, resp.StatusCode, 200)
|
||||
assert.Equal(t, resp.Header.Get("Content-Type"), "application/json")
|
||||
assert.Equal(t, body["activeSize"], 3)
|
||||
assert.Equal(t, body["removeDelay"], 60)
|
||||
|
||||
@ -59,6 +61,7 @@ func TestClusterConfigReload(t *testing.T) {
|
||||
resp, _ = tests.Get("http://localhost:7002/v2/admin/config")
|
||||
body = tests.ReadBodyJSON(resp)
|
||||
assert.Equal(t, resp.StatusCode, 200)
|
||||
assert.Equal(t, resp.Header.Get("Content-Type"), "application/json")
|
||||
assert.Equal(t, body["activeSize"], 3)
|
||||
assert.Equal(t, body["removeDelay"], 60)
|
||||
}
|
||||
@ -76,6 +79,7 @@ func TestGetMachines(t *testing.T) {
|
||||
t.FailNow()
|
||||
}
|
||||
assert.Equal(t, resp.StatusCode, 200)
|
||||
assert.Equal(t, resp.Header.Get("Content-Type"), "application/json")
|
||||
machines := make([]map[string]interface{}, 0)
|
||||
b := tests.ReadBody(resp)
|
||||
json.Unmarshal(b, &machines)
|
||||
|
@ -4,6 +4,7 @@ import (
|
||||
"bytes"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@ -100,6 +101,8 @@ func TestTLSMultiNodeKillAllAndRecovery(t *testing.T) {
|
||||
t.Fatal("cannot create cluster")
|
||||
}
|
||||
|
||||
time.Sleep(time.Second)
|
||||
|
||||
c := etcd.NewClient(nil)
|
||||
|
||||
go Monitor(clusterSize, clusterSize, leaderChan, all, stop)
|
||||
@ -239,3 +242,74 @@ func TestMultiNodeKillAllAndRecoveryWithStandbys(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, len(result.Node.Nodes), 7)
|
||||
}
|
||||
|
||||
// Create a five nodes
|
||||
// Kill all the nodes and restart, then remove the leader
|
||||
func TestMultiNodeKillAllAndRecoveryAndRemoveLeader(t *testing.T) {
|
||||
procAttr := new(os.ProcAttr)
|
||||
procAttr.Files = []*os.File{nil, os.Stdout, os.Stderr}
|
||||
|
||||
stop := make(chan bool)
|
||||
leaderChan := make(chan string, 1)
|
||||
all := make(chan bool, 1)
|
||||
|
||||
clusterSize := 5
|
||||
argGroup, etcds, err := CreateCluster(clusterSize, procAttr, false)
|
||||
defer DestroyCluster(etcds)
|
||||
|
||||
if err != nil {
|
||||
t.Fatal("cannot create cluster")
|
||||
}
|
||||
|
||||
c := etcd.NewClient(nil)
|
||||
|
||||
go Monitor(clusterSize, clusterSize, leaderChan, all, stop)
|
||||
<-all
|
||||
<-leaderChan
|
||||
stop <- true
|
||||
|
||||
// It needs some time to sync current commits and write it to disk.
|
||||
// Or some instance may be restarted as a new peer, and we don't support
|
||||
// to connect back the old cluster that doesn't have majority alive
|
||||
// without log now.
|
||||
time.Sleep(time.Second)
|
||||
|
||||
c.SyncCluster()
|
||||
|
||||
// kill all
|
||||
DestroyCluster(etcds)
|
||||
|
||||
time.Sleep(time.Second)
|
||||
|
||||
stop = make(chan bool)
|
||||
leaderChan = make(chan string, 1)
|
||||
all = make(chan bool, 1)
|
||||
|
||||
time.Sleep(time.Second)
|
||||
|
||||
for i := 0; i < clusterSize; i++ {
|
||||
etcds[i], err = os.StartProcess(EtcdBinPath, argGroup[i], procAttr)
|
||||
}
|
||||
|
||||
go Monitor(clusterSize, 1, leaderChan, all, stop)
|
||||
|
||||
<-all
|
||||
leader := <-leaderChan
|
||||
|
||||
_, err = c.Set("foo", "bar", 0)
|
||||
if err != nil {
|
||||
t.Fatalf("Recovery error: %s", err)
|
||||
}
|
||||
|
||||
port, _ := strconv.Atoi(strings.Split(leader, ":")[2])
|
||||
num := port - 7000
|
||||
resp, _ := tests.Delete(leader+"/v2/admin/machines/node"+strconv.Itoa(num), "application/json", nil)
|
||||
if !assert.Equal(t, resp.StatusCode, 200) {
|
||||
t.FailNow()
|
||||
}
|
||||
|
||||
// check the old leader is in standby mode now
|
||||
time.Sleep(time.Second)
|
||||
resp, _ = tests.Get(leader + "/name")
|
||||
assert.Equal(t, resp.StatusCode, 404)
|
||||
}
|
||||
|
@ -31,7 +31,7 @@ func TestRemoveNode(t *testing.T) {
|
||||
|
||||
c.SyncCluster()
|
||||
|
||||
resp, _ := tests.Put("http://localhost:7001/v2/admin/config", "application/json", bytes.NewBufferString(`{"activeSize":4, "syncInterval":1}`))
|
||||
resp, _ := tests.Put("http://localhost:7001/v2/admin/config", "application/json", bytes.NewBufferString(`{"activeSize":4, "syncInterval":5}`))
|
||||
if !assert.Equal(t, resp.StatusCode, 200) {
|
||||
t.FailNow()
|
||||
}
|
||||
@ -41,11 +41,6 @@ func TestRemoveNode(t *testing.T) {
|
||||
client := &http.Client{}
|
||||
for i := 0; i < 2; i++ {
|
||||
for i := 0; i < 2; i++ {
|
||||
r, _ := tests.Put("http://localhost:7001/v2/admin/config", "application/json", bytes.NewBufferString(`{"activeSize":3}`))
|
||||
if !assert.Equal(t, r.StatusCode, 200) {
|
||||
t.FailNow()
|
||||
}
|
||||
|
||||
client.Do(rmReq)
|
||||
|
||||
fmt.Println("send remove to node3 and wait for its exiting")
|
||||
@ -76,12 +71,7 @@ func TestRemoveNode(t *testing.T) {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
r, _ = tests.Put("http://localhost:7001/v2/admin/config", "application/json", bytes.NewBufferString(`{"activeSize":4}`))
|
||||
if !assert.Equal(t, r.StatusCode, 200) {
|
||||
t.FailNow()
|
||||
}
|
||||
|
||||
time.Sleep(time.Second + time.Second)
|
||||
time.Sleep(time.Second + 5*time.Second)
|
||||
|
||||
resp, err = c.Get("_etcd/machines", false, false)
|
||||
|
||||
@ -96,11 +86,6 @@ func TestRemoveNode(t *testing.T) {
|
||||
|
||||
// first kill the node, then remove it, then add it back
|
||||
for i := 0; i < 2; i++ {
|
||||
r, _ := tests.Put("http://localhost:7001/v2/admin/config", "application/json", bytes.NewBufferString(`{"activeSize":3}`))
|
||||
if !assert.Equal(t, r.StatusCode, 200) {
|
||||
t.FailNow()
|
||||
}
|
||||
|
||||
etcds[2].Kill()
|
||||
fmt.Println("kill node3 and wait for its exiting")
|
||||
etcds[2].Wait()
|
||||
@ -131,11 +116,6 @@ func TestRemoveNode(t *testing.T) {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
r, _ = tests.Put("http://localhost:7001/v2/admin/config", "application/json", bytes.NewBufferString(`{"activeSize":4}`))
|
||||
if !assert.Equal(t, r.StatusCode, 200) {
|
||||
t.FailNow()
|
||||
}
|
||||
|
||||
time.Sleep(time.Second + time.Second)
|
||||
|
||||
resp, err = c.Get("_etcd/machines", false, false)
|
||||
@ -169,7 +149,8 @@ func TestRemovePausedNode(t *testing.T) {
|
||||
if !assert.Equal(t, r.StatusCode, 200) {
|
||||
t.FailNow()
|
||||
}
|
||||
time.Sleep(2 * time.Second)
|
||||
// Wait for standby instances to update its cluster config
|
||||
time.Sleep(6 * time.Second)
|
||||
|
||||
resp, err := c.Get("_etcd/machines", false, false)
|
||||
if err != nil {
|
||||
|
@ -89,7 +89,7 @@ func TestSnapshot(t *testing.T) {
|
||||
|
||||
index, _ = strconv.Atoi(snapshots[0].Name()[2:6])
|
||||
|
||||
if index < 1010 || index > 1025 {
|
||||
if index < 1010 || index > 1029 {
|
||||
t.Fatal("wrong name of snapshot :", snapshots[0].Name())
|
||||
}
|
||||
}
|
||||
|
@ -8,6 +8,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/coreos/etcd/server"
|
||||
"github.com/coreos/etcd/store"
|
||||
"github.com/coreos/etcd/tests"
|
||||
"github.com/coreos/etcd/third_party/github.com/coreos/go-etcd/etcd"
|
||||
"github.com/coreos/etcd/third_party/github.com/stretchr/testify/assert"
|
||||
@ -279,3 +280,61 @@ func TestStandbyDramaticChange(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestStandbyJoinMiss(t *testing.T) {
|
||||
clusterSize := 2
|
||||
_, etcds, err := CreateCluster(clusterSize, &os.ProcAttr{Files: []*os.File{nil, os.Stdout, os.Stderr}}, false)
|
||||
if err != nil {
|
||||
t.Fatal("cannot create cluster")
|
||||
}
|
||||
defer DestroyCluster(etcds)
|
||||
|
||||
c := etcd.NewClient(nil)
|
||||
c.SyncCluster()
|
||||
|
||||
time.Sleep(1 * time.Second)
|
||||
|
||||
// Verify that we have two machines.
|
||||
result, err := c.Get("_etcd/machines", false, true)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, len(result.Node.Nodes), clusterSize)
|
||||
|
||||
resp, _ := tests.Put("http://localhost:7001/v2/admin/config", "application/json", bytes.NewBufferString(`{"removeDelay":4, "syncInterval":4}`))
|
||||
if !assert.Equal(t, resp.StatusCode, 200) {
|
||||
t.FailNow()
|
||||
}
|
||||
time.Sleep(time.Second)
|
||||
|
||||
resp, _ = tests.Delete("http://localhost:7001/v2/admin/machines/node2", "application/json", nil)
|
||||
if !assert.Equal(t, resp.StatusCode, 200) {
|
||||
t.FailNow()
|
||||
}
|
||||
|
||||
// Wait for a monitor cycle before checking for removal.
|
||||
time.Sleep(server.ActiveMonitorTimeout + (1 * time.Second))
|
||||
|
||||
// Verify that we now have one peer.
|
||||
result, err = c.Get("_etcd/machines", false, true)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, len(result.Node.Nodes), 1)
|
||||
|
||||
// Simulate the join failure
|
||||
_, err = server.NewClient(nil).AddMachine("http://localhost:7001",
|
||||
&server.JoinCommand{
|
||||
MinVersion: store.MinVersion(),
|
||||
MaxVersion: store.MaxVersion(),
|
||||
Name: "node2",
|
||||
RaftURL: "http://127.0.0.1:7002",
|
||||
EtcdURL: "http://127.0.0.1:4002",
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
|
||||
time.Sleep(6 * time.Second)
|
||||
|
||||
go tests.Delete("http://localhost:7001/v2/admin/machines/node2", "application/json", nil)
|
||||
|
||||
time.Sleep(time.Second)
|
||||
result, err = c.Get("_etcd/machines", false, true)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, len(result.Node.Nodes), 1)
|
||||
}
|
||||
|
@ -507,61 +507,61 @@ func TestReset(t *testing.T) {
|
||||
func TestEncodeDecode1(t *testing.T) {
|
||||
pb := initGoTest(false)
|
||||
overify(t, pb,
|
||||
"0807"+ // field 1, encoding 0, value 7
|
||||
"220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField)
|
||||
"5001"+ // field 10, encoding 0, value 1
|
||||
"5803"+ // field 11, encoding 0, value 3
|
||||
"6006"+ // field 12, encoding 0, value 6
|
||||
"6d20000000"+ // field 13, encoding 5, value 0x20
|
||||
"714000000000000000"+ // field 14, encoding 1, value 0x40
|
||||
"78a019"+ // field 15, encoding 0, value 0xca0 = 3232
|
||||
"8001c032"+ // field 16, encoding 0, value 0x1940 = 6464
|
||||
"8d0100004a45"+ // field 17, encoding 5, value 3232.0
|
||||
"9101000000000040b940"+ // field 18, encoding 1, value 6464.0
|
||||
"9a0106"+"737472696e67"+ // field 19, encoding 2, string "string"
|
||||
"b304"+ // field 70, encoding 3, start group
|
||||
"ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required"
|
||||
"b404"+ // field 70, encoding 4, end group
|
||||
"aa0605"+"6279746573"+ // field 101, encoding 2, string "bytes"
|
||||
"b0063f"+ // field 102, encoding 0, 0x3f zigzag32
|
||||
"b8067f") // field 103, encoding 0, 0x7f zigzag64
|
||||
"0807"+ // field 1, encoding 0, value 7
|
||||
"220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField)
|
||||
"5001"+ // field 10, encoding 0, value 1
|
||||
"5803"+ // field 11, encoding 0, value 3
|
||||
"6006"+ // field 12, encoding 0, value 6
|
||||
"6d20000000"+ // field 13, encoding 5, value 0x20
|
||||
"714000000000000000"+ // field 14, encoding 1, value 0x40
|
||||
"78a019"+ // field 15, encoding 0, value 0xca0 = 3232
|
||||
"8001c032"+ // field 16, encoding 0, value 0x1940 = 6464
|
||||
"8d0100004a45"+ // field 17, encoding 5, value 3232.0
|
||||
"9101000000000040b940"+ // field 18, encoding 1, value 6464.0
|
||||
"9a0106"+"737472696e67"+ // field 19, encoding 2, string "string"
|
||||
"b304"+ // field 70, encoding 3, start group
|
||||
"ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required"
|
||||
"b404"+ // field 70, encoding 4, end group
|
||||
"aa0605"+"6279746573"+ // field 101, encoding 2, string "bytes"
|
||||
"b0063f"+ // field 102, encoding 0, 0x3f zigzag32
|
||||
"b8067f") // field 103, encoding 0, 0x7f zigzag64
|
||||
}
|
||||
|
||||
// All required fields set, defaults provided.
|
||||
func TestEncodeDecode2(t *testing.T) {
|
||||
pb := initGoTest(true)
|
||||
overify(t, pb,
|
||||
"0807"+ // field 1, encoding 0, value 7
|
||||
"220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField)
|
||||
"5001"+ // field 10, encoding 0, value 1
|
||||
"5803"+ // field 11, encoding 0, value 3
|
||||
"6006"+ // field 12, encoding 0, value 6
|
||||
"6d20000000"+ // field 13, encoding 5, value 32
|
||||
"714000000000000000"+ // field 14, encoding 1, value 64
|
||||
"78a019"+ // field 15, encoding 0, value 3232
|
||||
"8001c032"+ // field 16, encoding 0, value 6464
|
||||
"8d0100004a45"+ // field 17, encoding 5, value 3232.0
|
||||
"9101000000000040b940"+ // field 18, encoding 1, value 6464.0
|
||||
"9a0106"+"737472696e67"+ // field 19, encoding 2 string "string"
|
||||
"c00201"+ // field 40, encoding 0, value 1
|
||||
"c80220"+ // field 41, encoding 0, value 32
|
||||
"d00240"+ // field 42, encoding 0, value 64
|
||||
"dd0240010000"+ // field 43, encoding 5, value 320
|
||||
"e1028002000000000000"+ // field 44, encoding 1, value 640
|
||||
"e8028019"+ // field 45, encoding 0, value 3200
|
||||
"f0028032"+ // field 46, encoding 0, value 6400
|
||||
"fd02e0659948"+ // field 47, encoding 5, value 314159.0
|
||||
"81030000000050971041"+ // field 48, encoding 1, value 271828.0
|
||||
"8a0310"+"68656c6c6f2c2022776f726c6421220a"+ // field 49, encoding 2 string "hello, \"world!\"\n"
|
||||
"b304"+ // start group field 70 level 1
|
||||
"ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required"
|
||||
"b404"+ // end group field 70 level 1
|
||||
"aa0605"+"6279746573"+ // field 101, encoding 2 string "bytes"
|
||||
"b0063f"+ // field 102, encoding 0, 0x3f zigzag32
|
||||
"b8067f"+ // field 103, encoding 0, 0x7f zigzag64
|
||||
"8a1907"+"4269676e6f7365"+ // field 401, encoding 2, string "Bignose"
|
||||
"90193f"+ // field 402, encoding 0, value 63
|
||||
"98197f") // field 403, encoding 0, value 127
|
||||
"0807"+ // field 1, encoding 0, value 7
|
||||
"220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField)
|
||||
"5001"+ // field 10, encoding 0, value 1
|
||||
"5803"+ // field 11, encoding 0, value 3
|
||||
"6006"+ // field 12, encoding 0, value 6
|
||||
"6d20000000"+ // field 13, encoding 5, value 32
|
||||
"714000000000000000"+ // field 14, encoding 1, value 64
|
||||
"78a019"+ // field 15, encoding 0, value 3232
|
||||
"8001c032"+ // field 16, encoding 0, value 6464
|
||||
"8d0100004a45"+ // field 17, encoding 5, value 3232.0
|
||||
"9101000000000040b940"+ // field 18, encoding 1, value 6464.0
|
||||
"9a0106"+"737472696e67"+ // field 19, encoding 2 string "string"
|
||||
"c00201"+ // field 40, encoding 0, value 1
|
||||
"c80220"+ // field 41, encoding 0, value 32
|
||||
"d00240"+ // field 42, encoding 0, value 64
|
||||
"dd0240010000"+ // field 43, encoding 5, value 320
|
||||
"e1028002000000000000"+ // field 44, encoding 1, value 640
|
||||
"e8028019"+ // field 45, encoding 0, value 3200
|
||||
"f0028032"+ // field 46, encoding 0, value 6400
|
||||
"fd02e0659948"+ // field 47, encoding 5, value 314159.0
|
||||
"81030000000050971041"+ // field 48, encoding 1, value 271828.0
|
||||
"8a0310"+"68656c6c6f2c2022776f726c6421220a"+ // field 49, encoding 2 string "hello, \"world!\"\n"
|
||||
"b304"+ // start group field 70 level 1
|
||||
"ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required"
|
||||
"b404"+ // end group field 70 level 1
|
||||
"aa0605"+"6279746573"+ // field 101, encoding 2 string "bytes"
|
||||
"b0063f"+ // field 102, encoding 0, 0x3f zigzag32
|
||||
"b8067f"+ // field 103, encoding 0, 0x7f zigzag64
|
||||
"8a1907"+"4269676e6f7365"+ // field 401, encoding 2, string "Bignose"
|
||||
"90193f"+ // field 402, encoding 0, value 63
|
||||
"98197f") // field 403, encoding 0, value 127
|
||||
|
||||
}
|
||||
|
||||
@ -583,37 +583,37 @@ func TestEncodeDecode3(t *testing.T) {
|
||||
pb.F_Sint64Defaulted = Int64(-64)
|
||||
|
||||
overify(t, pb,
|
||||
"0807"+ // field 1, encoding 0, value 7
|
||||
"220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField)
|
||||
"5001"+ // field 10, encoding 0, value 1
|
||||
"5803"+ // field 11, encoding 0, value 3
|
||||
"6006"+ // field 12, encoding 0, value 6
|
||||
"6d20000000"+ // field 13, encoding 5, value 32
|
||||
"714000000000000000"+ // field 14, encoding 1, value 64
|
||||
"78a019"+ // field 15, encoding 0, value 3232
|
||||
"8001c032"+ // field 16, encoding 0, value 6464
|
||||
"8d0100004a45"+ // field 17, encoding 5, value 3232.0
|
||||
"9101000000000040b940"+ // field 18, encoding 1, value 6464.0
|
||||
"9a0106"+"737472696e67"+ // field 19, encoding 2 string "string"
|
||||
"c00201"+ // field 40, encoding 0, value 1
|
||||
"c80220"+ // field 41, encoding 0, value 32
|
||||
"d00240"+ // field 42, encoding 0, value 64
|
||||
"dd0240010000"+ // field 43, encoding 5, value 320
|
||||
"e1028002000000000000"+ // field 44, encoding 1, value 640
|
||||
"e8028019"+ // field 45, encoding 0, value 3200
|
||||
"f0028032"+ // field 46, encoding 0, value 6400
|
||||
"fd02e0659948"+ // field 47, encoding 5, value 314159.0
|
||||
"81030000000050971041"+ // field 48, encoding 1, value 271828.0
|
||||
"8a0310"+"68656c6c6f2c2022776f726c6421220a"+ // field 49, encoding 2 string "hello, \"world!\"\n"
|
||||
"b304"+ // start group field 70 level 1
|
||||
"ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required"
|
||||
"b404"+ // end group field 70 level 1
|
||||
"aa0605"+"6279746573"+ // field 101, encoding 2 string "bytes"
|
||||
"b0063f"+ // field 102, encoding 0, 0x3f zigzag32
|
||||
"b8067f"+ // field 103, encoding 0, 0x7f zigzag64
|
||||
"8a1907"+"4269676e6f7365"+ // field 401, encoding 2, string "Bignose"
|
||||
"90193f"+ // field 402, encoding 0, value 63
|
||||
"98197f") // field 403, encoding 0, value 127
|
||||
"0807"+ // field 1, encoding 0, value 7
|
||||
"220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField)
|
||||
"5001"+ // field 10, encoding 0, value 1
|
||||
"5803"+ // field 11, encoding 0, value 3
|
||||
"6006"+ // field 12, encoding 0, value 6
|
||||
"6d20000000"+ // field 13, encoding 5, value 32
|
||||
"714000000000000000"+ // field 14, encoding 1, value 64
|
||||
"78a019"+ // field 15, encoding 0, value 3232
|
||||
"8001c032"+ // field 16, encoding 0, value 6464
|
||||
"8d0100004a45"+ // field 17, encoding 5, value 3232.0
|
||||
"9101000000000040b940"+ // field 18, encoding 1, value 6464.0
|
||||
"9a0106"+"737472696e67"+ // field 19, encoding 2 string "string"
|
||||
"c00201"+ // field 40, encoding 0, value 1
|
||||
"c80220"+ // field 41, encoding 0, value 32
|
||||
"d00240"+ // field 42, encoding 0, value 64
|
||||
"dd0240010000"+ // field 43, encoding 5, value 320
|
||||
"e1028002000000000000"+ // field 44, encoding 1, value 640
|
||||
"e8028019"+ // field 45, encoding 0, value 3200
|
||||
"f0028032"+ // field 46, encoding 0, value 6400
|
||||
"fd02e0659948"+ // field 47, encoding 5, value 314159.0
|
||||
"81030000000050971041"+ // field 48, encoding 1, value 271828.0
|
||||
"8a0310"+"68656c6c6f2c2022776f726c6421220a"+ // field 49, encoding 2 string "hello, \"world!\"\n"
|
||||
"b304"+ // start group field 70 level 1
|
||||
"ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required"
|
||||
"b404"+ // end group field 70 level 1
|
||||
"aa0605"+"6279746573"+ // field 101, encoding 2 string "bytes"
|
||||
"b0063f"+ // field 102, encoding 0, 0x3f zigzag32
|
||||
"b8067f"+ // field 103, encoding 0, 0x7f zigzag64
|
||||
"8a1907"+"4269676e6f7365"+ // field 401, encoding 2, string "Bignose"
|
||||
"90193f"+ // field 402, encoding 0, value 63
|
||||
"98197f") // field 403, encoding 0, value 127
|
||||
|
||||
}
|
||||
|
||||
@ -639,56 +639,56 @@ func TestEncodeDecode4(t *testing.T) {
|
||||
pb.Optionalgroup = initGoTest_OptionalGroup()
|
||||
|
||||
overify(t, pb,
|
||||
"0807"+ // field 1, encoding 0, value 7
|
||||
"1205"+"68656c6c6f"+ // field 2, encoding 2, string "hello"
|
||||
"1807"+ // field 3, encoding 0, value 7
|
||||
"220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField)
|
||||
"320d"+"0a056c6162656c120474797065"+ // field 6, encoding 2 (GoTestField)
|
||||
"5001"+ // field 10, encoding 0, value 1
|
||||
"5803"+ // field 11, encoding 0, value 3
|
||||
"6006"+ // field 12, encoding 0, value 6
|
||||
"6d20000000"+ // field 13, encoding 5, value 32
|
||||
"714000000000000000"+ // field 14, encoding 1, value 64
|
||||
"78a019"+ // field 15, encoding 0, value 3232
|
||||
"8001c032"+ // field 16, encoding 0, value 6464
|
||||
"8d0100004a45"+ // field 17, encoding 5, value 3232.0
|
||||
"9101000000000040b940"+ // field 18, encoding 1, value 6464.0
|
||||
"9a0106"+"737472696e67"+ // field 19, encoding 2 string "string"
|
||||
"f00101"+ // field 30, encoding 0, value 1
|
||||
"f80120"+ // field 31, encoding 0, value 32
|
||||
"800240"+ // field 32, encoding 0, value 64
|
||||
"8d02a00c0000"+ // field 33, encoding 5, value 3232
|
||||
"91024019000000000000"+ // field 34, encoding 1, value 6464
|
||||
"9802a0dd13"+ // field 35, encoding 0, value 323232
|
||||
"a002c0ba27"+ // field 36, encoding 0, value 646464
|
||||
"ad0200000042"+ // field 37, encoding 5, value 32.0
|
||||
"b1020000000000005040"+ // field 38, encoding 1, value 64.0
|
||||
"ba0205"+"68656c6c6f"+ // field 39, encoding 2, string "hello"
|
||||
"c00201"+ // field 40, encoding 0, value 1
|
||||
"c80220"+ // field 41, encoding 0, value 32
|
||||
"d00240"+ // field 42, encoding 0, value 64
|
||||
"dd0240010000"+ // field 43, encoding 5, value 320
|
||||
"e1028002000000000000"+ // field 44, encoding 1, value 640
|
||||
"e8028019"+ // field 45, encoding 0, value 3200
|
||||
"f0028032"+ // field 46, encoding 0, value 6400
|
||||
"fd02e0659948"+ // field 47, encoding 5, value 314159.0
|
||||
"81030000000050971041"+ // field 48, encoding 1, value 271828.0
|
||||
"8a0310"+"68656c6c6f2c2022776f726c6421220a"+ // field 49, encoding 2 string "hello, \"world!\"\n"
|
||||
"b304"+ // start group field 70 level 1
|
||||
"ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required"
|
||||
"b404"+ // end group field 70 level 1
|
||||
"d305"+ // start group field 90 level 1
|
||||
"da0508"+"6f7074696f6e616c"+ // field 91, encoding 2, string "optional"
|
||||
"d405"+ // end group field 90 level 1
|
||||
"aa0605"+"6279746573"+ // field 101, encoding 2 string "bytes"
|
||||
"b0063f"+ // field 102, encoding 0, 0x3f zigzag32
|
||||
"b8067f"+ // field 103, encoding 0, 0x7f zigzag64
|
||||
"ea1207"+"4269676e6f7365"+ // field 301, encoding 2, string "Bignose"
|
||||
"f0123f"+ // field 302, encoding 0, value 63
|
||||
"f8127f"+ // field 303, encoding 0, value 127
|
||||
"8a1907"+"4269676e6f7365"+ // field 401, encoding 2, string "Bignose"
|
||||
"90193f"+ // field 402, encoding 0, value 63
|
||||
"98197f") // field 403, encoding 0, value 127
|
||||
"0807"+ // field 1, encoding 0, value 7
|
||||
"1205"+"68656c6c6f"+ // field 2, encoding 2, string "hello"
|
||||
"1807"+ // field 3, encoding 0, value 7
|
||||
"220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField)
|
||||
"320d"+"0a056c6162656c120474797065"+ // field 6, encoding 2 (GoTestField)
|
||||
"5001"+ // field 10, encoding 0, value 1
|
||||
"5803"+ // field 11, encoding 0, value 3
|
||||
"6006"+ // field 12, encoding 0, value 6
|
||||
"6d20000000"+ // field 13, encoding 5, value 32
|
||||
"714000000000000000"+ // field 14, encoding 1, value 64
|
||||
"78a019"+ // field 15, encoding 0, value 3232
|
||||
"8001c032"+ // field 16, encoding 0, value 6464
|
||||
"8d0100004a45"+ // field 17, encoding 5, value 3232.0
|
||||
"9101000000000040b940"+ // field 18, encoding 1, value 6464.0
|
||||
"9a0106"+"737472696e67"+ // field 19, encoding 2 string "string"
|
||||
"f00101"+ // field 30, encoding 0, value 1
|
||||
"f80120"+ // field 31, encoding 0, value 32
|
||||
"800240"+ // field 32, encoding 0, value 64
|
||||
"8d02a00c0000"+ // field 33, encoding 5, value 3232
|
||||
"91024019000000000000"+ // field 34, encoding 1, value 6464
|
||||
"9802a0dd13"+ // field 35, encoding 0, value 323232
|
||||
"a002c0ba27"+ // field 36, encoding 0, value 646464
|
||||
"ad0200000042"+ // field 37, encoding 5, value 32.0
|
||||
"b1020000000000005040"+ // field 38, encoding 1, value 64.0
|
||||
"ba0205"+"68656c6c6f"+ // field 39, encoding 2, string "hello"
|
||||
"c00201"+ // field 40, encoding 0, value 1
|
||||
"c80220"+ // field 41, encoding 0, value 32
|
||||
"d00240"+ // field 42, encoding 0, value 64
|
||||
"dd0240010000"+ // field 43, encoding 5, value 320
|
||||
"e1028002000000000000"+ // field 44, encoding 1, value 640
|
||||
"e8028019"+ // field 45, encoding 0, value 3200
|
||||
"f0028032"+ // field 46, encoding 0, value 6400
|
||||
"fd02e0659948"+ // field 47, encoding 5, value 314159.0
|
||||
"81030000000050971041"+ // field 48, encoding 1, value 271828.0
|
||||
"8a0310"+"68656c6c6f2c2022776f726c6421220a"+ // field 49, encoding 2 string "hello, \"world!\"\n"
|
||||
"b304"+ // start group field 70 level 1
|
||||
"ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required"
|
||||
"b404"+ // end group field 70 level 1
|
||||
"d305"+ // start group field 90 level 1
|
||||
"da0508"+"6f7074696f6e616c"+ // field 91, encoding 2, string "optional"
|
||||
"d405"+ // end group field 90 level 1
|
||||
"aa0605"+"6279746573"+ // field 101, encoding 2 string "bytes"
|
||||
"b0063f"+ // field 102, encoding 0, 0x3f zigzag32
|
||||
"b8067f"+ // field 103, encoding 0, 0x7f zigzag64
|
||||
"ea1207"+"4269676e6f7365"+ // field 301, encoding 2, string "Bignose"
|
||||
"f0123f"+ // field 302, encoding 0, value 63
|
||||
"f8127f"+ // field 303, encoding 0, value 127
|
||||
"8a1907"+"4269676e6f7365"+ // field 401, encoding 2, string "Bignose"
|
||||
"90193f"+ // field 402, encoding 0, value 63
|
||||
"98197f") // field 403, encoding 0, value 127
|
||||
|
||||
}
|
||||
|
||||
@ -712,71 +712,71 @@ func TestEncodeDecode5(t *testing.T) {
|
||||
pb.Repeatedgroup = []*GoTest_RepeatedGroup{initGoTest_RepeatedGroup(), initGoTest_RepeatedGroup()}
|
||||
|
||||
overify(t, pb,
|
||||
"0807"+ // field 1, encoding 0, value 7
|
||||
"220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField)
|
||||
"2a0d"+"0a056c6162656c120474797065"+ // field 5, encoding 2 (GoTestField)
|
||||
"2a0d"+"0a056c6162656c120474797065"+ // field 5, encoding 2 (GoTestField)
|
||||
"5001"+ // field 10, encoding 0, value 1
|
||||
"5803"+ // field 11, encoding 0, value 3
|
||||
"6006"+ // field 12, encoding 0, value 6
|
||||
"6d20000000"+ // field 13, encoding 5, value 32
|
||||
"714000000000000000"+ // field 14, encoding 1, value 64
|
||||
"78a019"+ // field 15, encoding 0, value 3232
|
||||
"8001c032"+ // field 16, encoding 0, value 6464
|
||||
"8d0100004a45"+ // field 17, encoding 5, value 3232.0
|
||||
"9101000000000040b940"+ // field 18, encoding 1, value 6464.0
|
||||
"9a0106"+"737472696e67"+ // field 19, encoding 2 string "string"
|
||||
"a00100"+ // field 20, encoding 0, value 0
|
||||
"a00101"+ // field 20, encoding 0, value 1
|
||||
"a80120"+ // field 21, encoding 0, value 32
|
||||
"a80121"+ // field 21, encoding 0, value 33
|
||||
"b00140"+ // field 22, encoding 0, value 64
|
||||
"b00141"+ // field 22, encoding 0, value 65
|
||||
"bd01a00c0000"+ // field 23, encoding 5, value 3232
|
||||
"bd01050d0000"+ // field 23, encoding 5, value 3333
|
||||
"c1014019000000000000"+ // field 24, encoding 1, value 6464
|
||||
"c101a519000000000000"+ // field 24, encoding 1, value 6565
|
||||
"c801a0dd13"+ // field 25, encoding 0, value 323232
|
||||
"c80195ac14"+ // field 25, encoding 0, value 333333
|
||||
"d001c0ba27"+ // field 26, encoding 0, value 646464
|
||||
"d001b58928"+ // field 26, encoding 0, value 656565
|
||||
"dd0100000042"+ // field 27, encoding 5, value 32.0
|
||||
"dd0100000442"+ // field 27, encoding 5, value 33.0
|
||||
"e1010000000000005040"+ // field 28, encoding 1, value 64.0
|
||||
"e1010000000000405040"+ // field 28, encoding 1, value 65.0
|
||||
"ea0105"+"68656c6c6f"+ // field 29, encoding 2, string "hello"
|
||||
"ea0106"+"7361696c6f72"+ // field 29, encoding 2, string "sailor"
|
||||
"c00201"+ // field 40, encoding 0, value 1
|
||||
"c80220"+ // field 41, encoding 0, value 32
|
||||
"d00240"+ // field 42, encoding 0, value 64
|
||||
"dd0240010000"+ // field 43, encoding 5, value 320
|
||||
"e1028002000000000000"+ // field 44, encoding 1, value 640
|
||||
"e8028019"+ // field 45, encoding 0, value 3200
|
||||
"f0028032"+ // field 46, encoding 0, value 6400
|
||||
"fd02e0659948"+ // field 47, encoding 5, value 314159.0
|
||||
"81030000000050971041"+ // field 48, encoding 1, value 271828.0
|
||||
"8a0310"+"68656c6c6f2c2022776f726c6421220a"+ // field 49, encoding 2 string "hello, \"world!\"\n"
|
||||
"b304"+ // start group field 70 level 1
|
||||
"ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required"
|
||||
"b404"+ // end group field 70 level 1
|
||||
"8305"+ // start group field 80 level 1
|
||||
"8a0508"+"7265706561746564"+ // field 81, encoding 2, string "repeated"
|
||||
"8405"+ // end group field 80 level 1
|
||||
"8305"+ // start group field 80 level 1
|
||||
"8a0508"+"7265706561746564"+ // field 81, encoding 2, string "repeated"
|
||||
"8405"+ // end group field 80 level 1
|
||||
"aa0605"+"6279746573"+ // field 101, encoding 2 string "bytes"
|
||||
"b0063f"+ // field 102, encoding 0, 0x3f zigzag32
|
||||
"b8067f"+ // field 103, encoding 0, 0x7f zigzag64
|
||||
"ca0c03"+"626967"+ // field 201, encoding 2, string "big"
|
||||
"ca0c04"+"6e6f7365"+ // field 201, encoding 2, string "nose"
|
||||
"d00c40"+ // field 202, encoding 0, value 32
|
||||
"d00c3f"+ // field 202, encoding 0, value -32
|
||||
"d80c8001"+ // field 203, encoding 0, value 64
|
||||
"d80c7f"+ // field 203, encoding 0, value -64
|
||||
"8a1907"+"4269676e6f7365"+ // field 401, encoding 2, string "Bignose"
|
||||
"90193f"+ // field 402, encoding 0, value 63
|
||||
"98197f") // field 403, encoding 0, value 127
|
||||
"0807"+ // field 1, encoding 0, value 7
|
||||
"220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField)
|
||||
"2a0d"+"0a056c6162656c120474797065"+ // field 5, encoding 2 (GoTestField)
|
||||
"2a0d"+"0a056c6162656c120474797065"+ // field 5, encoding 2 (GoTestField)
|
||||
"5001"+ // field 10, encoding 0, value 1
|
||||
"5803"+ // field 11, encoding 0, value 3
|
||||
"6006"+ // field 12, encoding 0, value 6
|
||||
"6d20000000"+ // field 13, encoding 5, value 32
|
||||
"714000000000000000"+ // field 14, encoding 1, value 64
|
||||
"78a019"+ // field 15, encoding 0, value 3232
|
||||
"8001c032"+ // field 16, encoding 0, value 6464
|
||||
"8d0100004a45"+ // field 17, encoding 5, value 3232.0
|
||||
"9101000000000040b940"+ // field 18, encoding 1, value 6464.0
|
||||
"9a0106"+"737472696e67"+ // field 19, encoding 2 string "string"
|
||||
"a00100"+ // field 20, encoding 0, value 0
|
||||
"a00101"+ // field 20, encoding 0, value 1
|
||||
"a80120"+ // field 21, encoding 0, value 32
|
||||
"a80121"+ // field 21, encoding 0, value 33
|
||||
"b00140"+ // field 22, encoding 0, value 64
|
||||
"b00141"+ // field 22, encoding 0, value 65
|
||||
"bd01a00c0000"+ // field 23, encoding 5, value 3232
|
||||
"bd01050d0000"+ // field 23, encoding 5, value 3333
|
||||
"c1014019000000000000"+ // field 24, encoding 1, value 6464
|
||||
"c101a519000000000000"+ // field 24, encoding 1, value 6565
|
||||
"c801a0dd13"+ // field 25, encoding 0, value 323232
|
||||
"c80195ac14"+ // field 25, encoding 0, value 333333
|
||||
"d001c0ba27"+ // field 26, encoding 0, value 646464
|
||||
"d001b58928"+ // field 26, encoding 0, value 656565
|
||||
"dd0100000042"+ // field 27, encoding 5, value 32.0
|
||||
"dd0100000442"+ // field 27, encoding 5, value 33.0
|
||||
"e1010000000000005040"+ // field 28, encoding 1, value 64.0
|
||||
"e1010000000000405040"+ // field 28, encoding 1, value 65.0
|
||||
"ea0105"+"68656c6c6f"+ // field 29, encoding 2, string "hello"
|
||||
"ea0106"+"7361696c6f72"+ // field 29, encoding 2, string "sailor"
|
||||
"c00201"+ // field 40, encoding 0, value 1
|
||||
"c80220"+ // field 41, encoding 0, value 32
|
||||
"d00240"+ // field 42, encoding 0, value 64
|
||||
"dd0240010000"+ // field 43, encoding 5, value 320
|
||||
"e1028002000000000000"+ // field 44, encoding 1, value 640
|
||||
"e8028019"+ // field 45, encoding 0, value 3200
|
||||
"f0028032"+ // field 46, encoding 0, value 6400
|
||||
"fd02e0659948"+ // field 47, encoding 5, value 314159.0
|
||||
"81030000000050971041"+ // field 48, encoding 1, value 271828.0
|
||||
"8a0310"+"68656c6c6f2c2022776f726c6421220a"+ // field 49, encoding 2 string "hello, \"world!\"\n"
|
||||
"b304"+ // start group field 70 level 1
|
||||
"ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required"
|
||||
"b404"+ // end group field 70 level 1
|
||||
"8305"+ // start group field 80 level 1
|
||||
"8a0508"+"7265706561746564"+ // field 81, encoding 2, string "repeated"
|
||||
"8405"+ // end group field 80 level 1
|
||||
"8305"+ // start group field 80 level 1
|
||||
"8a0508"+"7265706561746564"+ // field 81, encoding 2, string "repeated"
|
||||
"8405"+ // end group field 80 level 1
|
||||
"aa0605"+"6279746573"+ // field 101, encoding 2 string "bytes"
|
||||
"b0063f"+ // field 102, encoding 0, 0x3f zigzag32
|
||||
"b8067f"+ // field 103, encoding 0, 0x7f zigzag64
|
||||
"ca0c03"+"626967"+ // field 201, encoding 2, string "big"
|
||||
"ca0c04"+"6e6f7365"+ // field 201, encoding 2, string "nose"
|
||||
"d00c40"+ // field 202, encoding 0, value 32
|
||||
"d00c3f"+ // field 202, encoding 0, value -32
|
||||
"d80c8001"+ // field 203, encoding 0, value 64
|
||||
"d80c7f"+ // field 203, encoding 0, value -64
|
||||
"8a1907"+"4269676e6f7365"+ // field 401, encoding 2, string "Bignose"
|
||||
"90193f"+ // field 402, encoding 0, value 63
|
||||
"98197f") // field 403, encoding 0, value 127
|
||||
|
||||
}
|
||||
|
||||
@ -796,43 +796,43 @@ func TestEncodeDecode6(t *testing.T) {
|
||||
pb.F_Sint64RepeatedPacked = []int64{64, -64}
|
||||
|
||||
overify(t, pb,
|
||||
"0807"+ // field 1, encoding 0, value 7
|
||||
"220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField)
|
||||
"5001"+ // field 10, encoding 0, value 1
|
||||
"5803"+ // field 11, encoding 0, value 3
|
||||
"6006"+ // field 12, encoding 0, value 6
|
||||
"6d20000000"+ // field 13, encoding 5, value 32
|
||||
"714000000000000000"+ // field 14, encoding 1, value 64
|
||||
"78a019"+ // field 15, encoding 0, value 3232
|
||||
"8001c032"+ // field 16, encoding 0, value 6464
|
||||
"8d0100004a45"+ // field 17, encoding 5, value 3232.0
|
||||
"9101000000000040b940"+ // field 18, encoding 1, value 6464.0
|
||||
"9a0106"+"737472696e67"+ // field 19, encoding 2 string "string"
|
||||
"9203020001"+ // field 50, encoding 2, 2 bytes, value 0, value 1
|
||||
"9a03022021"+ // field 51, encoding 2, 2 bytes, value 32, value 33
|
||||
"a203024041"+ // field 52, encoding 2, 2 bytes, value 64, value 65
|
||||
"aa0308"+ // field 53, encoding 2, 8 bytes
|
||||
"a00c0000050d0000"+ // value 3232, value 3333
|
||||
"b20310"+ // field 54, encoding 2, 16 bytes
|
||||
"4019000000000000a519000000000000"+ // value 6464, value 6565
|
||||
"ba0306"+ // field 55, encoding 2, 6 bytes
|
||||
"a0dd1395ac14"+ // value 323232, value 333333
|
||||
"c20306"+ // field 56, encoding 2, 6 bytes
|
||||
"c0ba27b58928"+ // value 646464, value 656565
|
||||
"ca0308"+ // field 57, encoding 2, 8 bytes
|
||||
"0000004200000442"+ // value 32.0, value 33.0
|
||||
"d20310"+ // field 58, encoding 2, 16 bytes
|
||||
"00000000000050400000000000405040"+ // value 64.0, value 65.0
|
||||
"b304"+ // start group field 70 level 1
|
||||
"ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required"
|
||||
"b404"+ // end group field 70 level 1
|
||||
"aa0605"+"6279746573"+ // field 101, encoding 2 string "bytes"
|
||||
"b0063f"+ // field 102, encoding 0, 0x3f zigzag32
|
||||
"b8067f"+ // field 103, encoding 0, 0x7f zigzag64
|
||||
"b21f02"+ // field 502, encoding 2, 2 bytes
|
||||
"403f"+ // value 32, value -32
|
||||
"ba1f03"+ // field 503, encoding 2, 3 bytes
|
||||
"80017f") // value 64, value -64
|
||||
"0807"+ // field 1, encoding 0, value 7
|
||||
"220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField)
|
||||
"5001"+ // field 10, encoding 0, value 1
|
||||
"5803"+ // field 11, encoding 0, value 3
|
||||
"6006"+ // field 12, encoding 0, value 6
|
||||
"6d20000000"+ // field 13, encoding 5, value 32
|
||||
"714000000000000000"+ // field 14, encoding 1, value 64
|
||||
"78a019"+ // field 15, encoding 0, value 3232
|
||||
"8001c032"+ // field 16, encoding 0, value 6464
|
||||
"8d0100004a45"+ // field 17, encoding 5, value 3232.0
|
||||
"9101000000000040b940"+ // field 18, encoding 1, value 6464.0
|
||||
"9a0106"+"737472696e67"+ // field 19, encoding 2 string "string"
|
||||
"9203020001"+ // field 50, encoding 2, 2 bytes, value 0, value 1
|
||||
"9a03022021"+ // field 51, encoding 2, 2 bytes, value 32, value 33
|
||||
"a203024041"+ // field 52, encoding 2, 2 bytes, value 64, value 65
|
||||
"aa0308"+ // field 53, encoding 2, 8 bytes
|
||||
"a00c0000050d0000"+ // value 3232, value 3333
|
||||
"b20310"+ // field 54, encoding 2, 16 bytes
|
||||
"4019000000000000a519000000000000"+ // value 6464, value 6565
|
||||
"ba0306"+ // field 55, encoding 2, 6 bytes
|
||||
"a0dd1395ac14"+ // value 323232, value 333333
|
||||
"c20306"+ // field 56, encoding 2, 6 bytes
|
||||
"c0ba27b58928"+ // value 646464, value 656565
|
||||
"ca0308"+ // field 57, encoding 2, 8 bytes
|
||||
"0000004200000442"+ // value 32.0, value 33.0
|
||||
"d20310"+ // field 58, encoding 2, 16 bytes
|
||||
"00000000000050400000000000405040"+ // value 64.0, value 65.0
|
||||
"b304"+ // start group field 70 level 1
|
||||
"ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required"
|
||||
"b404"+ // end group field 70 level 1
|
||||
"aa0605"+"6279746573"+ // field 101, encoding 2 string "bytes"
|
||||
"b0063f"+ // field 102, encoding 0, 0x3f zigzag32
|
||||
"b8067f"+ // field 103, encoding 0, 0x7f zigzag64
|
||||
"b21f02"+ // field 502, encoding 2, 2 bytes
|
||||
"403f"+ // value 32, value -32
|
||||
"ba1f03"+ // field 503, encoding 2, 3 bytes
|
||||
"80017f") // value 64, value -64
|
||||
}
|
||||
|
||||
// Test that we can encode empty bytes fields.
|
||||
@ -898,13 +898,13 @@ func TestSkippingUnrecognizedFields(t *testing.T) {
|
||||
|
||||
// Now new a GoSkipTest record.
|
||||
skip := &GoSkipTest{
|
||||
SkipInt32: Int32(32),
|
||||
SkipFixed32: Uint32(3232),
|
||||
SkipFixed64: Uint64(6464),
|
||||
SkipString: String("skipper"),
|
||||
SkipInt32: Int32(32),
|
||||
SkipFixed32: Uint32(3232),
|
||||
SkipFixed64: Uint64(6464),
|
||||
SkipString: String("skipper"),
|
||||
Skipgroup: &GoSkipTest_SkipGroup{
|
||||
GroupInt32: Int32(75),
|
||||
GroupString: String("wxyz"),
|
||||
GroupInt32: Int32(75),
|
||||
GroupString: String("wxyz"),
|
||||
},
|
||||
}
|
||||
|
||||
@ -944,8 +944,8 @@ func TestSkippingUnrecognizedFields(t *testing.T) {
|
||||
func TestSubmessageUnrecognizedFields(t *testing.T) {
|
||||
nm := &NewMessage{
|
||||
Nested: &NewMessage_Nested{
|
||||
Name: String("Nigel"),
|
||||
FoodGroup: String("carbs"),
|
||||
Name: String("Nigel"),
|
||||
FoodGroup: String("carbs"),
|
||||
},
|
||||
}
|
||||
b, err := Marshal(nm)
|
||||
@ -960,9 +960,9 @@ func TestSubmessageUnrecognizedFields(t *testing.T) {
|
||||
}
|
||||
exp := &OldMessage{
|
||||
Nested: &OldMessage_Nested{
|
||||
Name: String("Nigel"),
|
||||
Name: String("Nigel"),
|
||||
// normal protocol buffer users should not do this
|
||||
XXX_unrecognized: []byte("\x12\x05carbs"),
|
||||
XXX_unrecognized: []byte("\x12\x05carbs"),
|
||||
},
|
||||
}
|
||||
if !Equal(om, exp) {
|
||||
@ -999,7 +999,7 @@ func TestBigRepeated(t *testing.T) {
|
||||
pb := initGoTest(true)
|
||||
|
||||
// Create the arrays
|
||||
const N = 50 // Internally the library starts much smaller.
|
||||
const N = 50 // Internally the library starts much smaller.
|
||||
pb.Repeatedgroup = make([]*GoTest_RepeatedGroup, N)
|
||||
pb.F_Sint64Repeated = make([]int64, N)
|
||||
pb.F_Sint32Repeated = make([]int32, N)
|
||||
@ -1047,7 +1047,7 @@ func TestBigRepeated(t *testing.T) {
|
||||
|
||||
// Check the checkable values
|
||||
for i := uint64(0); i < N; i++ {
|
||||
if pbd.Repeatedgroup[i] == nil { // TODO: more checking?
|
||||
if pbd.Repeatedgroup[i] == nil { // TODO: more checking?
|
||||
t.Error("pbd.Repeatedgroup bad")
|
||||
}
|
||||
var x uint64
|
||||
@ -1099,7 +1099,7 @@ func TestBigRepeated(t *testing.T) {
|
||||
if pbd.F_BoolRepeated[i] != (i%2 == 0) {
|
||||
t.Error("pbd.F_BoolRepeated bad", x, i)
|
||||
}
|
||||
if pbd.RepeatedField[i] == nil { // TODO: more checking?
|
||||
if pbd.RepeatedField[i] == nil { // TODO: more checking?
|
||||
t.Error("pbd.RepeatedField bad")
|
||||
}
|
||||
}
|
||||
@ -1159,8 +1159,8 @@ func TestProto1RepeatedGroup(t *testing.T) {
|
||||
pb := &MessageList{
|
||||
Message: []*MessageList_Message{
|
||||
{
|
||||
Name: String("blah"),
|
||||
Count: Int32(7),
|
||||
Name: String("blah"),
|
||||
Count: Int32(7),
|
||||
},
|
||||
// NOTE: pb.Message[1] is a nil
|
||||
nil,
|
||||
@ -1240,9 +1240,9 @@ type NNIMessage struct {
|
||||
nni nonNillableInt
|
||||
}
|
||||
|
||||
func (*NNIMessage) Reset() {}
|
||||
func (*NNIMessage) String() string { return "" }
|
||||
func (*NNIMessage) ProtoMessage() {}
|
||||
func (*NNIMessage) Reset() {}
|
||||
func (*NNIMessage) String() string { return "" }
|
||||
func (*NNIMessage) ProtoMessage() {}
|
||||
|
||||
// A type that implements the Marshaler interface and is nillable.
|
||||
type nillableMessage struct {
|
||||
@ -1257,9 +1257,9 @@ type NMMessage struct {
|
||||
nm *nillableMessage
|
||||
}
|
||||
|
||||
func (*NMMessage) Reset() {}
|
||||
func (*NMMessage) String() string { return "" }
|
||||
func (*NMMessage) ProtoMessage() {}
|
||||
func (*NMMessage) Reset() {}
|
||||
func (*NMMessage) String() string { return "" }
|
||||
func (*NMMessage) ProtoMessage() {}
|
||||
|
||||
// Verify a type that uses the Marshaler interface, but has a nil pointer.
|
||||
func TestNilMarshaler(t *testing.T) {
|
||||
@ -1273,7 +1273,7 @@ func TestNilMarshaler(t *testing.T) {
|
||||
// Try a struct with a Marshaler field that is not nillable.
|
||||
nnim := new(NNIMessage)
|
||||
nnim.nni = 7
|
||||
var _ Marshaler = nnim.nni // verify it is truly a Marshaler
|
||||
var _ Marshaler = nnim.nni // verify it is truly a Marshaler
|
||||
if _, err := Marshal(nnim); err != nil {
|
||||
t.Error("unexpected error marshaling nnim: ", err)
|
||||
}
|
||||
@ -1286,23 +1286,23 @@ func TestAllSetDefaults(t *testing.T) {
|
||||
F_Nan: Float32(1.7),
|
||||
}
|
||||
expected := &Defaults{
|
||||
F_Bool: Bool(true),
|
||||
F_Int32: Int32(32),
|
||||
F_Int64: Int64(64),
|
||||
F_Fixed32: Uint32(320),
|
||||
F_Fixed64: Uint64(640),
|
||||
F_Uint32: Uint32(3200),
|
||||
F_Uint64: Uint64(6400),
|
||||
F_Float: Float32(314159),
|
||||
F_Double: Float64(271828),
|
||||
F_String: String(`hello, "world!"` + "\n"),
|
||||
F_Bytes: []byte("Bignose"),
|
||||
F_Sint32: Int32(-32),
|
||||
F_Sint64: Int64(-64),
|
||||
F_Enum: Defaults_GREEN.Enum(),
|
||||
F_Pinf: Float32(float32(math.Inf(1))),
|
||||
F_Ninf: Float32(float32(math.Inf(-1))),
|
||||
F_Nan: Float32(1.7),
|
||||
F_Bool: Bool(true),
|
||||
F_Int32: Int32(32),
|
||||
F_Int64: Int64(64),
|
||||
F_Fixed32: Uint32(320),
|
||||
F_Fixed64: Uint64(640),
|
||||
F_Uint32: Uint32(3200),
|
||||
F_Uint64: Uint64(6400),
|
||||
F_Float: Float32(314159),
|
||||
F_Double: Float64(271828),
|
||||
F_String: String(`hello, "world!"` + "\n"),
|
||||
F_Bytes: []byte("Bignose"),
|
||||
F_Sint32: Int32(-32),
|
||||
F_Sint64: Int64(-64),
|
||||
F_Enum: Defaults_GREEN.Enum(),
|
||||
F_Pinf: Float32(float32(math.Inf(1))),
|
||||
F_Ninf: Float32(float32(math.Inf(-1))),
|
||||
F_Nan: Float32(1.7),
|
||||
}
|
||||
SetDefaults(m)
|
||||
if !Equal(m, expected) {
|
||||
@ -1323,16 +1323,16 @@ func TestSetDefaultsWithSetField(t *testing.T) {
|
||||
|
||||
func TestSetDefaultsWithSubMessage(t *testing.T) {
|
||||
m := &OtherMessage{
|
||||
Key: Int64(123),
|
||||
Key: Int64(123),
|
||||
Inner: &InnerMessage{
|
||||
Host: String("gopher"),
|
||||
},
|
||||
}
|
||||
expected := &OtherMessage{
|
||||
Key: Int64(123),
|
||||
Key: Int64(123),
|
||||
Inner: &InnerMessage{
|
||||
Host: String("gopher"),
|
||||
Port: Int32(4000),
|
||||
Host: String("gopher"),
|
||||
Port: Int32(4000),
|
||||
},
|
||||
}
|
||||
SetDefaults(m)
|
||||
@ -1375,12 +1375,12 @@ func TestMaximumTagNumber(t *testing.T) {
|
||||
|
||||
func TestJSON(t *testing.T) {
|
||||
m := &MyMessage{
|
||||
Count: Int32(4),
|
||||
Pet: []string{"bunny", "kitty"},
|
||||
Count: Int32(4),
|
||||
Pet: []string{"bunny", "kitty"},
|
||||
Inner: &InnerMessage{
|
||||
Host: String("cauchy"),
|
||||
},
|
||||
Bikeshed: MyMessage_GREEN.Enum(),
|
||||
Bikeshed: MyMessage_GREEN.Enum(),
|
||||
}
|
||||
const expected = `{"count":4,"pet":["bunny","kitty"],"inner":{"host":"cauchy"},"bikeshed":1}`
|
||||
|
||||
@ -1413,7 +1413,7 @@ func TestJSON(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestBadWireType(t *testing.T) {
|
||||
b := []byte{7<<3 | 6} // field 7, wire type 6
|
||||
b := []byte{7<<3 | 6} // field 7, wire type 6
|
||||
pb := new(OtherMessage)
|
||||
if err := Unmarshal(b, pb); err == nil {
|
||||
t.Errorf("Unmarshal did not fail")
|
||||
@ -1610,10 +1610,10 @@ func TestUnmarshalMergesMessages(t *testing.T) {
|
||||
// If a nested message occurs twice in the input,
|
||||
// the fields should be merged when decoding.
|
||||
a := &OtherMessage{
|
||||
Key: Int64(123),
|
||||
Key: Int64(123),
|
||||
Inner: &InnerMessage{
|
||||
Host: String("polhode"),
|
||||
Port: Int32(1234),
|
||||
Host: String("polhode"),
|
||||
Port: Int32(1234),
|
||||
},
|
||||
}
|
||||
aData, err := Marshal(a)
|
||||
@ -1621,10 +1621,10 @@ func TestUnmarshalMergesMessages(t *testing.T) {
|
||||
t.Fatalf("Marshal(a): %v", err)
|
||||
}
|
||||
b := &OtherMessage{
|
||||
Weight: Float32(1.2),
|
||||
Weight: Float32(1.2),
|
||||
Inner: &InnerMessage{
|
||||
Host: String("herpolhode"),
|
||||
Connected: Bool(true),
|
||||
Host: String("herpolhode"),
|
||||
Connected: Bool(true),
|
||||
},
|
||||
}
|
||||
bData, err := Marshal(b)
|
||||
@ -1632,12 +1632,12 @@ func TestUnmarshalMergesMessages(t *testing.T) {
|
||||
t.Fatalf("Marshal(b): %v", err)
|
||||
}
|
||||
want := &OtherMessage{
|
||||
Key: Int64(123),
|
||||
Weight: Float32(1.2),
|
||||
Key: Int64(123),
|
||||
Weight: Float32(1.2),
|
||||
Inner: &InnerMessage{
|
||||
Host: String("herpolhode"),
|
||||
Port: Int32(1234),
|
||||
Connected: Bool(true),
|
||||
Host: String("herpolhode"),
|
||||
Port: Int32(1234),
|
||||
Connected: Bool(true),
|
||||
},
|
||||
}
|
||||
got := new(OtherMessage)
|
||||
@ -1651,8 +1651,8 @@ func TestUnmarshalMergesMessages(t *testing.T) {
|
||||
|
||||
func TestEncodingSizes(t *testing.T) {
|
||||
tests := []struct {
|
||||
m Message
|
||||
n int
|
||||
m Message
|
||||
n int
|
||||
}{
|
||||
{&Defaults{F_Int32: Int32(math.MaxInt32)}, 6},
|
||||
{&Defaults{F_Int32: Int32(math.MinInt32)}, 6},
|
||||
@ -1676,22 +1676,22 @@ func TestRequiredNotSetError(t *testing.T) {
|
||||
pb.F_Int32Required = nil
|
||||
pb.F_Int64Required = nil
|
||||
|
||||
expected := "0807" + // field 1, encoding 0, value 7
|
||||
"2206" + "120474797065" + // field 4, encoding 2 (GoTestField)
|
||||
"5001" + // field 10, encoding 0, value 1
|
||||
"6d20000000" + // field 13, encoding 5, value 0x20
|
||||
"714000000000000000" + // field 14, encoding 1, value 0x40
|
||||
"78a019" + // field 15, encoding 0, value 0xca0 = 3232
|
||||
"8001c032" + // field 16, encoding 0, value 0x1940 = 6464
|
||||
"8d0100004a45" + // field 17, encoding 5, value 3232.0
|
||||
"9101000000000040b940" + // field 18, encoding 1, value 6464.0
|
||||
"9a0106" + "737472696e67" + // field 19, encoding 2, string "string"
|
||||
"b304" + // field 70, encoding 3, start group
|
||||
"ba0408" + "7265717569726564" + // field 71, encoding 2, string "required"
|
||||
"b404" + // field 70, encoding 4, end group
|
||||
"aa0605" + "6279746573" + // field 101, encoding 2, string "bytes"
|
||||
"b0063f" + // field 102, encoding 0, 0x3f zigzag32
|
||||
"b8067f" // field 103, encoding 0, 0x7f zigzag64
|
||||
expected := "0807" + // field 1, encoding 0, value 7
|
||||
"2206" + "120474797065" + // field 4, encoding 2 (GoTestField)
|
||||
"5001" + // field 10, encoding 0, value 1
|
||||
"6d20000000" + // field 13, encoding 5, value 0x20
|
||||
"714000000000000000" + // field 14, encoding 1, value 0x40
|
||||
"78a019" + // field 15, encoding 0, value 0xca0 = 3232
|
||||
"8001c032" + // field 16, encoding 0, value 0x1940 = 6464
|
||||
"8d0100004a45" + // field 17, encoding 5, value 3232.0
|
||||
"9101000000000040b940" + // field 18, encoding 1, value 6464.0
|
||||
"9a0106" + "737472696e67" + // field 19, encoding 2, string "string"
|
||||
"b304" + // field 70, encoding 3, start group
|
||||
"ba0408" + "7265717569726564" + // field 71, encoding 2, string "required"
|
||||
"b404" + // field 70, encoding 4, end group
|
||||
"aa0605" + "6279746573" + // field 101, encoding 2, string "bytes"
|
||||
"b0063f" + // field 102, encoding 0, 0x3f zigzag32
|
||||
"b8067f" // field 103, encoding 0, 0x7f zigzag64
|
||||
|
||||
o := old()
|
||||
bytes, err := Marshal(pb)
|
||||
@ -1751,7 +1751,7 @@ func fuzzUnmarshal(t *testing.T, data []byte) {
|
||||
|
||||
func testMsg() *GoTest {
|
||||
pb := initGoTest(true)
|
||||
const N = 1000 // Internally the library starts much smaller.
|
||||
const N = 1000 // Internally the library starts much smaller.
|
||||
pb.F_Int32Repeated = make([]int32, N)
|
||||
pb.F_DoubleRepeated = make([]float64, N)
|
||||
for i := 0; i < N; i++ {
|
||||
@ -1869,13 +1869,13 @@ func BenchmarkUnmarshalUnrecognizedFields(b *testing.B) {
|
||||
b.StopTimer()
|
||||
pb := initGoTestField()
|
||||
skip := &GoSkipTest{
|
||||
SkipInt32: Int32(32),
|
||||
SkipFixed32: Uint32(3232),
|
||||
SkipFixed64: Uint64(6464),
|
||||
SkipString: String("skipper"),
|
||||
SkipInt32: Int32(32),
|
||||
SkipFixed32: Uint32(3232),
|
||||
SkipFixed64: Uint64(6464),
|
||||
SkipString: String("skipper"),
|
||||
Skipgroup: &GoSkipTest_SkipGroup{
|
||||
GroupInt32: Int32(75),
|
||||
GroupString: String("wxyz"),
|
||||
GroupInt32: Int32(75),
|
||||
GroupString: String("wxyz"),
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -83,9 +83,14 @@ func mergeStruct(out, in reflect.Value) {
|
||||
mergeAny(out.Field(i), in.Field(i))
|
||||
}
|
||||
|
||||
if emIn, ok := in.Addr().Interface().(extendableProto); ok {
|
||||
emOut := out.Addr().Interface().(extendableProto)
|
||||
if emIn, ok := in.Addr().Interface().(extensionsMap); ok {
|
||||
emOut := out.Addr().Interface().(extensionsMap)
|
||||
mergeExtension(emOut.ExtensionMap(), emIn.ExtensionMap())
|
||||
} else if emIn, ok := in.Addr().Interface().(extensionsBytes); ok {
|
||||
emOut := out.Addr().Interface().(extensionsBytes)
|
||||
bIn := emIn.GetExtensions()
|
||||
bOut := emOut.GetExtensions()
|
||||
*bOut = append(*bOut, *bIn...)
|
||||
}
|
||||
|
||||
uf := in.FieldByName("XXX_unrecognized")
|
||||
|
@ -40,13 +40,13 @@ import (
|
||||
)
|
||||
|
||||
var cloneTestMessage = &pb.MyMessage{
|
||||
Count: proto.Int32(42),
|
||||
Name: proto.String("Dave"),
|
||||
Pet: []string{"bunny", "kitty", "horsey"},
|
||||
Count: proto.Int32(42),
|
||||
Name: proto.String("Dave"),
|
||||
Pet: []string{"bunny", "kitty", "horsey"},
|
||||
Inner: &pb.InnerMessage{
|
||||
Host: proto.String("niles"),
|
||||
Port: proto.Int32(9099),
|
||||
Connected: proto.Bool(true),
|
||||
Host: proto.String("niles"),
|
||||
Port: proto.Int32(9099),
|
||||
Connected: proto.Bool(true),
|
||||
},
|
||||
Others: []*pb.OtherMessage{
|
||||
{
|
||||
@ -56,7 +56,7 @@ var cloneTestMessage = &pb.MyMessage{
|
||||
Somegroup: &pb.MyMessage_SomeGroup{
|
||||
GroupField: proto.Int32(6),
|
||||
},
|
||||
RepBytes: [][]byte{[]byte("sham"), []byte("wow")},
|
||||
RepBytes: [][]byte{[]byte("sham"), []byte("wow")},
|
||||
}
|
||||
|
||||
func init() {
|
||||
@ -99,17 +99,17 @@ var mergeTests = []struct {
|
||||
Name: proto.String("Dave"),
|
||||
},
|
||||
want: &pb.MyMessage{
|
||||
Count: proto.Int32(42),
|
||||
Name: proto.String("Dave"),
|
||||
Count: proto.Int32(42),
|
||||
Name: proto.String("Dave"),
|
||||
},
|
||||
},
|
||||
{
|
||||
src: &pb.MyMessage{
|
||||
Inner: &pb.InnerMessage{
|
||||
Host: proto.String("hey"),
|
||||
Connected: proto.Bool(true),
|
||||
Host: proto.String("hey"),
|
||||
Connected: proto.Bool(true),
|
||||
},
|
||||
Pet: []string{"horsey"},
|
||||
Pet: []string{"horsey"},
|
||||
Others: []*pb.OtherMessage{
|
||||
{
|
||||
Value: []byte("some bytes"),
|
||||
@ -118,10 +118,10 @@ var mergeTests = []struct {
|
||||
},
|
||||
dst: &pb.MyMessage{
|
||||
Inner: &pb.InnerMessage{
|
||||
Host: proto.String("niles"),
|
||||
Port: proto.Int32(9099),
|
||||
Host: proto.String("niles"),
|
||||
Port: proto.Int32(9099),
|
||||
},
|
||||
Pet: []string{"bunny", "kitty"},
|
||||
Pet: []string{"bunny", "kitty"},
|
||||
Others: []*pb.OtherMessage{
|
||||
{
|
||||
Key: proto.Int64(31415926535),
|
||||
@ -134,11 +134,11 @@ var mergeTests = []struct {
|
||||
},
|
||||
want: &pb.MyMessage{
|
||||
Inner: &pb.InnerMessage{
|
||||
Host: proto.String("hey"),
|
||||
Connected: proto.Bool(true),
|
||||
Port: proto.Int32(9099),
|
||||
Host: proto.String("hey"),
|
||||
Connected: proto.Bool(true),
|
||||
Port: proto.Int32(9099),
|
||||
},
|
||||
Pet: []string{"bunny", "kitty", "horsey"},
|
||||
Pet: []string{"bunny", "kitty", "horsey"},
|
||||
Others: []*pb.OtherMessage{
|
||||
{
|
||||
Key: proto.Int64(31415926535),
|
||||
@ -158,13 +158,13 @@ var mergeTests = []struct {
|
||||
Somegroup: &pb.MyMessage_SomeGroup{
|
||||
GroupField: proto.Int32(6),
|
||||
},
|
||||
RepBytes: [][]byte{[]byte("sham")},
|
||||
RepBytes: [][]byte{[]byte("sham")},
|
||||
},
|
||||
want: &pb.MyMessage{
|
||||
Somegroup: &pb.MyMessage_SomeGroup{
|
||||
GroupField: proto.Int32(6),
|
||||
},
|
||||
RepBytes: [][]byte{[]byte("sham"), []byte("wow")},
|
||||
RepBytes: [][]byte{[]byte("sham"), []byte("wow")},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
@ -235,12 +235,6 @@ func (o *Buffer) skipAndSave(t reflect.Type, tag, wire int, base structPointer,
|
||||
|
||||
ptr := structPointer_Bytes(base, unrecField)
|
||||
|
||||
if *ptr == nil {
|
||||
// This is the first skipped element,
|
||||
// allocate a new buffer.
|
||||
*ptr = o.bufalloc()
|
||||
}
|
||||
|
||||
// Add the skipped field to struct field
|
||||
obuf := o.buf
|
||||
|
||||
@ -381,9 +375,14 @@ func (o *Buffer) unmarshalType(st reflect.Type, prop *StructProperties, is_group
|
||||
if prop.extendable {
|
||||
if e := structPointer_Interface(base, st).(extendableProto); isExtensionField(e, int32(tag)) {
|
||||
if err = o.skip(st, tag, wire); err == nil {
|
||||
ext := e.ExtensionMap()[int32(tag)] // may be missing
|
||||
ext.enc = append(ext.enc, o.buf[oi:o.index]...)
|
||||
e.ExtensionMap()[int32(tag)] = ext
|
||||
if ee, ok := e.(extensionsMap); ok {
|
||||
ext := ee.ExtensionMap()[int32(tag)] // may be missing
|
||||
ext.enc = append(ext.enc, o.buf[oi:o.index]...)
|
||||
ee.ExtensionMap()[int32(tag)] = ext
|
||||
} else if ee, ok := e.(extensionsBytes); ok {
|
||||
ext := ee.GetExtensions()
|
||||
*ext = append(*ext, o.buf[oi:o.index]...)
|
||||
}
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
@ -221,6 +221,10 @@ func Marshal(pb Message) ([]byte, error) {
|
||||
if err != nil && !state.shouldContinue(err, nil) {
|
||||
return nil, err
|
||||
}
|
||||
if p.buf == nil && err == nil {
|
||||
// Return a non-nil slice on success.
|
||||
return []byte{}, nil
|
||||
}
|
||||
return p.buf, err
|
||||
}
|
||||
|
||||
@ -400,23 +404,8 @@ func (o *Buffer) enc_struct_message(p *Properties, base structPointer) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// need the length before we can write out the message itself,
|
||||
// so marshal into a separate byte buffer first.
|
||||
obuf := o.buf
|
||||
o.buf = o.bufalloc()
|
||||
|
||||
err := o.enc_struct(p.stype, p.sprop, structp)
|
||||
|
||||
nbuf := o.buf
|
||||
o.buf = obuf
|
||||
if err != nil && !state.shouldContinue(err, nil) {
|
||||
o.buffree(nbuf)
|
||||
return err
|
||||
}
|
||||
o.buf = append(o.buf, p.tagcode...)
|
||||
o.EncodeRawBytes(nbuf)
|
||||
o.buffree(nbuf)
|
||||
return state.err
|
||||
return o.enc_len_struct(p.stype, p.sprop, structp, &state)
|
||||
}
|
||||
|
||||
func size_struct_message(p *Properties, base structPointer) int {
|
||||
@ -748,24 +737,14 @@ func (o *Buffer) enc_slice_struct_message(p *Properties, base structPointer) err
|
||||
continue
|
||||
}
|
||||
|
||||
obuf := o.buf
|
||||
o.buf = o.bufalloc()
|
||||
|
||||
err := o.enc_struct(p.stype, p.sprop, structp)
|
||||
|
||||
nbuf := o.buf
|
||||
o.buf = obuf
|
||||
o.buf = append(o.buf, p.tagcode...)
|
||||
err := o.enc_len_struct(p.stype, p.sprop, structp, &state)
|
||||
if err != nil && !state.shouldContinue(err, nil) {
|
||||
o.buffree(nbuf)
|
||||
if err == ErrNil {
|
||||
return ErrRepeatedHasNil
|
||||
}
|
||||
return err
|
||||
}
|
||||
o.buf = append(o.buf, p.tagcode...)
|
||||
o.EncodeRawBytes(nbuf)
|
||||
|
||||
o.buffree(nbuf)
|
||||
}
|
||||
return state.err
|
||||
}
|
||||
@ -923,6 +902,36 @@ func size_struct(t reflect.Type, prop *StructProperties, base structPointer) (n
|
||||
return
|
||||
}
|
||||
|
||||
var zeroes [20]byte // longer than any conceivable sizeVarint
|
||||
|
||||
// Encode a struct, preceded by its encoded length (as a varint).
|
||||
func (o *Buffer) enc_len_struct(t reflect.Type, prop *StructProperties, base structPointer, state *errorState) error {
|
||||
iLen := len(o.buf)
|
||||
o.buf = append(o.buf, 0, 0, 0, 0) // reserve four bytes for length
|
||||
iMsg := len(o.buf)
|
||||
err := o.enc_struct(t, prop, base)
|
||||
if err != nil && !state.shouldContinue(err, nil) {
|
||||
return err
|
||||
}
|
||||
lMsg := len(o.buf) - iMsg
|
||||
lLen := sizeVarint(uint64(lMsg))
|
||||
switch x := lLen - (iMsg - iLen); {
|
||||
case x > 0: // actual length is x bytes larger than the space we reserved
|
||||
// Move msg x bytes right.
|
||||
o.buf = append(o.buf, zeroes[:x]...)
|
||||
copy(o.buf[iMsg+x:], o.buf[iMsg:iMsg+lMsg])
|
||||
case x < 0: // actual length is x bytes smaller than the space we reserved
|
||||
// Move msg x bytes left.
|
||||
copy(o.buf[iMsg+x:], o.buf[iMsg:iMsg+lMsg])
|
||||
o.buf = o.buf[:len(o.buf)+x] // x is negative
|
||||
}
|
||||
// Encode the length in the reserved space.
|
||||
o.buf = o.buf[:iLen]
|
||||
o.EncodeVarint(uint64(lMsg))
|
||||
o.buf = o.buf[:len(o.buf)+lMsg]
|
||||
return state.err
|
||||
}
|
||||
|
||||
// errorState maintains the first error that occurs and updates that error
|
||||
// with additional context.
|
||||
type errorState struct {
|
||||
|
@ -44,6 +44,24 @@ type Sizer interface {
|
||||
Size() int
|
||||
}
|
||||
|
||||
func (o *Buffer) enc_ext_slice_byte(p *Properties, base structPointer) error {
|
||||
s := *structPointer_Bytes(base, p.field)
|
||||
if s == nil {
|
||||
return ErrNil
|
||||
}
|
||||
o.buf = append(o.buf, s...)
|
||||
return nil
|
||||
}
|
||||
|
||||
func size_ext_slice_byte(p *Properties, base structPointer) (n int) {
|
||||
s := *structPointer_Bytes(base, p.field)
|
||||
if s == nil {
|
||||
return 0
|
||||
}
|
||||
n += len(s)
|
||||
return
|
||||
}
|
||||
|
||||
// Encode a reference to bool pointer.
|
||||
func (o *Buffer) enc_ref_bool(p *Properties, base structPointer) error {
|
||||
v := structPointer_RefBool(base, p.field)
|
||||
@ -156,23 +174,8 @@ func (o *Buffer) enc_ref_struct_message(p *Properties, base structPointer) error
|
||||
return nil
|
||||
}
|
||||
|
||||
// need the length before we can write out the message itself,
|
||||
// so marshal into a separate byte buffer first.
|
||||
obuf := o.buf
|
||||
o.buf = o.bufalloc()
|
||||
|
||||
err := o.enc_struct(p.stype, p.sprop, structp)
|
||||
|
||||
nbuf := o.buf
|
||||
o.buf = obuf
|
||||
if err != nil && !state.shouldContinue(err, nil) {
|
||||
o.buffree(nbuf)
|
||||
return err
|
||||
}
|
||||
o.buf = append(o.buf, p.tagcode...)
|
||||
o.EncodeRawBytes(nbuf)
|
||||
o.buffree(nbuf)
|
||||
return nil
|
||||
return o.enc_len_struct(p.stype, p.sprop, structp, &state)
|
||||
}
|
||||
|
||||
//TODO this is only copied, please fix this
|
||||
@ -222,26 +225,17 @@ func (o *Buffer) enc_slice_ref_struct_message(p *Properties, base structPointer)
|
||||
continue
|
||||
}
|
||||
|
||||
obuf := o.buf
|
||||
o.buf = o.bufalloc()
|
||||
|
||||
err := o.enc_struct(p.stype, p.sprop, structp)
|
||||
|
||||
nbuf := o.buf
|
||||
o.buf = obuf
|
||||
o.buf = append(o.buf, p.tagcode...)
|
||||
err := o.enc_len_struct(p.stype, p.sprop, structp, &state)
|
||||
if err != nil && !state.shouldContinue(err, nil) {
|
||||
o.buffree(nbuf)
|
||||
if err == ErrNil {
|
||||
return ErrRepeatedHasNil
|
||||
}
|
||||
return err
|
||||
}
|
||||
o.buf = append(o.buf, p.tagcode...)
|
||||
o.EncodeRawBytes(nbuf)
|
||||
|
||||
o.buffree(nbuf)
|
||||
}
|
||||
return nil
|
||||
return state.err
|
||||
}
|
||||
|
||||
//TODO this is only copied, please fix this
|
||||
|
@ -85,9 +85,9 @@ func init() {
|
||||
}
|
||||
|
||||
var EqualTests = []struct {
|
||||
desc string
|
||||
a, b Message
|
||||
exp bool
|
||||
desc string
|
||||
a, b Message
|
||||
exp bool
|
||||
}{
|
||||
{"different types", &pb.GoEnum{}, &pb.GoTestField{}, false},
|
||||
{"equal empty", &pb.GoEnum{}, &pb.GoEnum{}, true},
|
||||
@ -142,13 +142,13 @@ var EqualTests = []struct {
|
||||
{
|
||||
"message with group",
|
||||
&pb.MyMessage{
|
||||
Count: Int32(1),
|
||||
Count: Int32(1),
|
||||
Somegroup: &pb.MyMessage_SomeGroup{
|
||||
GroupField: Int32(5),
|
||||
},
|
||||
},
|
||||
&pb.MyMessage{
|
||||
Count: Int32(1),
|
||||
Count: Int32(1),
|
||||
Somegroup: &pb.MyMessage_SomeGroup{
|
||||
GroupField: Int32(5),
|
||||
},
|
||||
|
@ -55,9 +55,18 @@ type ExtensionRange struct {
|
||||
type extendableProto interface {
|
||||
Message
|
||||
ExtensionRangeArray() []ExtensionRange
|
||||
}
|
||||
|
||||
type extensionsMap interface {
|
||||
extendableProto
|
||||
ExtensionMap() map[int32]Extension
|
||||
}
|
||||
|
||||
type extensionsBytes interface {
|
||||
extendableProto
|
||||
GetExtensions() *[]byte
|
||||
}
|
||||
|
||||
var extendableProtoType = reflect.TypeOf((*extendableProto)(nil)).Elem()
|
||||
|
||||
// ExtensionDesc represents an extension specification.
|
||||
@ -92,7 +101,15 @@ type Extension struct {
|
||||
|
||||
// SetRawExtension is for testing only.
|
||||
func SetRawExtension(base extendableProto, id int32, b []byte) {
|
||||
base.ExtensionMap()[id] = Extension{enc: b}
|
||||
if ebase, ok := base.(extensionsMap); ok {
|
||||
ebase.ExtensionMap()[id] = Extension{enc: b}
|
||||
} else if ebase, ok := base.(extensionsBytes); ok {
|
||||
clearExtension(base, id)
|
||||
ext := ebase.GetExtensions()
|
||||
*ext = append(*ext, b...)
|
||||
} else {
|
||||
panic("unreachable")
|
||||
}
|
||||
}
|
||||
|
||||
// isExtensionField returns true iff the given field number is in an extension range.
|
||||
@ -210,51 +227,127 @@ func sizeExtensionMap(m map[int32]Extension) (n int) {
|
||||
// HasExtension returns whether the given extension is present in pb.
|
||||
func HasExtension(pb extendableProto, extension *ExtensionDesc) bool {
|
||||
// TODO: Check types, field numbers, etc.?
|
||||
_, ok := pb.ExtensionMap()[extension.Field]
|
||||
return ok
|
||||
if epb, doki := pb.(extensionsMap); doki {
|
||||
_, ok := epb.ExtensionMap()[extension.Field]
|
||||
return ok
|
||||
} else if epb, doki := pb.(extensionsBytes); doki {
|
||||
ext := epb.GetExtensions()
|
||||
buf := *ext
|
||||
o := 0
|
||||
for o < len(buf) {
|
||||
tag, n := DecodeVarint(buf[o:])
|
||||
fieldNum := int32(tag >> 3)
|
||||
if int32(fieldNum) == extension.Field {
|
||||
return true
|
||||
}
|
||||
wireType := int(tag & 0x7)
|
||||
o += n
|
||||
l, err := size(buf[o:], wireType)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
o += l
|
||||
}
|
||||
return false
|
||||
}
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
func deleteExtension(pb extensionsBytes, theFieldNum int32, offset int) int {
|
||||
ext := pb.GetExtensions()
|
||||
for offset < len(*ext) {
|
||||
tag, n1 := DecodeVarint((*ext)[offset:])
|
||||
fieldNum := int32(tag >> 3)
|
||||
wireType := int(tag & 0x7)
|
||||
n2, err := size((*ext)[offset+n1:], wireType)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
newOffset := offset + n1 + n2
|
||||
if fieldNum == theFieldNum {
|
||||
*ext = append((*ext)[:offset], (*ext)[newOffset:]...)
|
||||
return offset
|
||||
}
|
||||
offset = newOffset
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
func clearExtension(pb extendableProto, fieldNum int32) {
|
||||
if epb, doki := pb.(extensionsMap); doki {
|
||||
delete(epb.ExtensionMap(), fieldNum)
|
||||
} else if epb, doki := pb.(extensionsBytes); doki {
|
||||
offset := 0
|
||||
for offset != -1 {
|
||||
offset = deleteExtension(epb, fieldNum, offset)
|
||||
}
|
||||
} else {
|
||||
panic("unreachable")
|
||||
}
|
||||
}
|
||||
|
||||
// ClearExtension removes the given extension from pb.
|
||||
func ClearExtension(pb extendableProto, extension *ExtensionDesc) {
|
||||
// TODO: Check types, field numbers, etc.?
|
||||
delete(pb.ExtensionMap(), extension.Field)
|
||||
clearExtension(pb, extension.Field)
|
||||
}
|
||||
|
||||
// GetExtension parses and returns the given extension of pb.
|
||||
// If the extension is not present it returns ErrMissingExtension.
|
||||
// If the returned extension is modified, SetExtension must be called
|
||||
// for the modifications to be reflected in pb.
|
||||
func GetExtension(pb extendableProto, extension *ExtensionDesc) (interface{}, error) {
|
||||
if err := checkExtensionTypes(pb, extension); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
e, ok := pb.ExtensionMap()[extension.Field]
|
||||
if !ok {
|
||||
return nil, ErrMissingExtension
|
||||
}
|
||||
if e.value != nil {
|
||||
// Already decoded. Check the descriptor, though.
|
||||
if e.desc != extension {
|
||||
// This shouldn't happen. If it does, it means that
|
||||
// GetExtension was called twice with two different
|
||||
// descriptors with the same field number.
|
||||
return nil, errors.New("proto: descriptor conflict")
|
||||
if epb, doki := pb.(extensionsMap); doki {
|
||||
e, ok := epb.ExtensionMap()[extension.Field]
|
||||
if !ok {
|
||||
return nil, ErrMissingExtension
|
||||
}
|
||||
if e.value != nil {
|
||||
// Already decoded. Check the descriptor, though.
|
||||
if e.desc != extension {
|
||||
// This shouldn't happen. If it does, it means that
|
||||
// GetExtension was called twice with two different
|
||||
// descriptors with the same field number.
|
||||
return nil, errors.New("proto: descriptor conflict")
|
||||
}
|
||||
return e.value, nil
|
||||
}
|
||||
|
||||
v, err := decodeExtension(e.enc, extension)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Remember the decoded version and drop the encoded version.
|
||||
// That way it is safe to mutate what we return.
|
||||
e.value = v
|
||||
e.desc = extension
|
||||
e.enc = nil
|
||||
return e.value, nil
|
||||
} else if epb, doki := pb.(extensionsBytes); doki {
|
||||
ext := epb.GetExtensions()
|
||||
o := 0
|
||||
for o < len(*ext) {
|
||||
tag, n := DecodeVarint((*ext)[o:])
|
||||
fieldNum := int32(tag >> 3)
|
||||
wireType := int(tag & 0x7)
|
||||
l, err := size((*ext)[o+n:], wireType)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if int32(fieldNum) == extension.Field {
|
||||
v, err := decodeExtension((*ext)[o:o+n+l], extension)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return v, nil
|
||||
}
|
||||
o += n + l
|
||||
}
|
||||
}
|
||||
|
||||
v, err := decodeExtension(e.enc, extension)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Remember the decoded version and drop the encoded version.
|
||||
// That way it is safe to mutate what we return.
|
||||
e.value = v
|
||||
e.desc = extension
|
||||
e.enc = nil
|
||||
return e.value, nil
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
// decodeExtension decodes an extension encoded in b.
|
||||
@ -319,7 +412,21 @@ func SetExtension(pb extendableProto, extension *ExtensionDesc, value interface{
|
||||
return errors.New("proto: bad extension value type")
|
||||
}
|
||||
|
||||
pb.ExtensionMap()[extension.Field] = Extension{desc: extension, value: value}
|
||||
if epb, doki := pb.(extensionsMap); doki {
|
||||
epb.ExtensionMap()[extension.Field] = Extension{desc: extension, value: value}
|
||||
} else if epb, doki := pb.(extensionsBytes); doki {
|
||||
ClearExtension(pb, extension)
|
||||
ext := epb.GetExtensions()
|
||||
et := reflect.TypeOf(extension.ExtensionType)
|
||||
props := extensionProperties(extension)
|
||||
p := NewBuffer(nil)
|
||||
x := reflect.New(et)
|
||||
x.Elem().Set(reflect.ValueOf(value))
|
||||
if err := props.enc(p, props, toStructPointer(x)); err != nil {
|
||||
return err
|
||||
}
|
||||
*ext = append(*ext, p.buf...)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -31,6 +31,7 @@ import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func GetBoolExtension(pb extendableProto, extension *ExtensionDesc, ifnotset bool) bool {
|
||||
@ -58,6 +59,48 @@ func SizeOfExtensionMap(m map[int32]Extension) (n int) {
|
||||
return sizeExtensionMap(m)
|
||||
}
|
||||
|
||||
type sortableMapElem struct {
|
||||
field int32
|
||||
ext Extension
|
||||
}
|
||||
|
||||
func newSortableExtensionsFromMap(m map[int32]Extension) sortableExtensions {
|
||||
s := make(sortableExtensions, 0, len(m))
|
||||
for k, v := range m {
|
||||
s = append(s, &sortableMapElem{field: k, ext: v})
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
type sortableExtensions []*sortableMapElem
|
||||
|
||||
func (this sortableExtensions) Len() int { return len(this) }
|
||||
|
||||
func (this sortableExtensions) Swap(i, j int) { this[i], this[j] = this[j], this[i] }
|
||||
|
||||
func (this sortableExtensions) Less(i, j int) bool { return this[i].field < this[j].field }
|
||||
|
||||
func (this sortableExtensions) String() string {
|
||||
sort.Sort(this)
|
||||
ss := make([]string, len(this))
|
||||
for i := range this {
|
||||
ss[i] = fmt.Sprintf("%d: %v", this[i].field, this[i].ext)
|
||||
}
|
||||
return "map[" + strings.Join(ss, ",") + "]"
|
||||
}
|
||||
|
||||
func StringFromExtensionsMap(m map[int32]Extension) string {
|
||||
return newSortableExtensionsFromMap(m).String()
|
||||
}
|
||||
|
||||
func StringFromExtensionsBytes(ext []byte) string {
|
||||
m, err := BytesToExtensionsMap(ext)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return StringFromExtensionsMap(m)
|
||||
}
|
||||
|
||||
func EncodeExtensionMap(m map[int32]Extension, data []byte) (n int, err error) {
|
||||
if err := encodeExtensionMap(m); err != nil {
|
||||
return 0, err
|
||||
@ -83,6 +126,58 @@ func GetRawExtension(m map[int32]Extension, id int32) ([]byte, error) {
|
||||
return m[id].enc, nil
|
||||
}
|
||||
|
||||
func size(buf []byte, wire int) (int, error) {
|
||||
switch wire {
|
||||
case WireVarint:
|
||||
_, n := DecodeVarint(buf)
|
||||
return n, nil
|
||||
case WireFixed64:
|
||||
return 8, nil
|
||||
case WireBytes:
|
||||
v, n := DecodeVarint(buf)
|
||||
return int(v) + n, nil
|
||||
case WireFixed32:
|
||||
return 4, nil
|
||||
case WireStartGroup:
|
||||
offset := 0
|
||||
for {
|
||||
u, n := DecodeVarint(buf[offset:])
|
||||
fwire := int(u & 0x7)
|
||||
offset += n
|
||||
if fwire == WireEndGroup {
|
||||
return offset, nil
|
||||
}
|
||||
s, err := size(buf[offset:], wire)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
offset += s
|
||||
}
|
||||
}
|
||||
return 0, fmt.Errorf("proto: can't get size for unknown wire type %d", wire)
|
||||
}
|
||||
|
||||
func BytesToExtensionsMap(buf []byte) (map[int32]Extension, error) {
|
||||
m := make(map[int32]Extension)
|
||||
i := 0
|
||||
for i < len(buf) {
|
||||
tag, n := DecodeVarint(buf[i:])
|
||||
if n <= 0 {
|
||||
return nil, fmt.Errorf("unable to decode varint")
|
||||
}
|
||||
fieldNum := int32(tag >> 3)
|
||||
wireType := int(tag & 0x7)
|
||||
l, err := size(buf[i+n:], wireType)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
end := i + int(l) + n
|
||||
m[int32(fieldNum)] = Extension{enc: buf[i:end]}
|
||||
i = end
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func NewExtension(e []byte) Extension {
|
||||
ee := Extension{enc: make([]byte, len(e))}
|
||||
copy(ee.enc, e)
|
||||
|
@ -240,10 +240,8 @@ func GetStats() Stats { return stats }
|
||||
// the global functions Marshal and Unmarshal create a
|
||||
// temporary Buffer and are fine for most applications.
|
||||
type Buffer struct {
|
||||
buf []byte // encode/decode byte stream
|
||||
index int // write point
|
||||
freelist [10][]byte // list of available buffers
|
||||
nfreelist int // number of free buffers
|
||||
buf []byte // encode/decode byte stream
|
||||
index int // write point
|
||||
|
||||
// pools of basic types to amortize allocation.
|
||||
bools []bool
|
||||
@ -260,20 +258,11 @@ type Buffer struct {
|
||||
// NewBuffer allocates a new Buffer and initializes its internal data to
|
||||
// the contents of the argument slice.
|
||||
func NewBuffer(e []byte) *Buffer {
|
||||
p := new(Buffer)
|
||||
if e == nil {
|
||||
e = p.bufalloc()
|
||||
}
|
||||
p.buf = e
|
||||
p.index = 0
|
||||
return p
|
||||
return &Buffer{buf: e}
|
||||
}
|
||||
|
||||
// Reset resets the Buffer, ready for marshaling a new protocol buffer.
|
||||
func (p *Buffer) Reset() {
|
||||
if p.buf == nil {
|
||||
p.buf = p.bufalloc()
|
||||
}
|
||||
p.buf = p.buf[0:0] // for reading/writing
|
||||
p.index = 0 // for reading
|
||||
}
|
||||
@ -288,44 +277,6 @@ func (p *Buffer) SetBuf(s []byte) {
|
||||
// Bytes returns the contents of the Buffer.
|
||||
func (p *Buffer) Bytes() []byte { return p.buf }
|
||||
|
||||
// Allocate a buffer for the Buffer.
|
||||
func (p *Buffer) bufalloc() []byte {
|
||||
if p.nfreelist > 0 {
|
||||
// reuse an old one
|
||||
p.nfreelist--
|
||||
s := p.freelist[p.nfreelist]
|
||||
return s[0:0]
|
||||
}
|
||||
// make a new one
|
||||
s := make([]byte, 0, 16)
|
||||
return s
|
||||
}
|
||||
|
||||
// Free (and remember in freelist) a byte buffer for the Buffer.
|
||||
func (p *Buffer) buffree(s []byte) {
|
||||
if p.nfreelist < len(p.freelist) {
|
||||
// Take next slot.
|
||||
p.freelist[p.nfreelist] = s
|
||||
p.nfreelist++
|
||||
return
|
||||
}
|
||||
|
||||
// Find the smallest.
|
||||
besti := -1
|
||||
bestl := len(s)
|
||||
for i, b := range p.freelist {
|
||||
if len(b) < bestl {
|
||||
besti = i
|
||||
bestl = len(b)
|
||||
}
|
||||
}
|
||||
|
||||
// Overwrite the smallest.
|
||||
if besti >= 0 {
|
||||
p.freelist[besti] = s
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Helper routines for simplifying the creation of optional fields of basic type.
|
||||
*/
|
||||
|
@ -51,10 +51,17 @@ func structPointer_InterfaceRef(p structPointer, f field, t reflect.Type) interf
|
||||
}
|
||||
|
||||
func copyUintPtr(oldptr, newptr uintptr, size int) {
|
||||
for j := 0; j < size; j++ {
|
||||
oldb := (*byte)(unsafe.Pointer(oldptr + uintptr(j)))
|
||||
*(*byte)(unsafe.Pointer(newptr + uintptr(j))) = *oldb
|
||||
}
|
||||
oldbytes := make([]byte, 0)
|
||||
oldslice := (*reflect.SliceHeader)(unsafe.Pointer(&oldbytes))
|
||||
oldslice.Data = oldptr
|
||||
oldslice.Len = size
|
||||
oldslice.Cap = size
|
||||
newbytes := make([]byte, 0)
|
||||
newslice := (*reflect.SliceHeader)(unsafe.Pointer(&newbytes))
|
||||
newslice.Data = newptr
|
||||
newslice.Len = size
|
||||
newslice.Cap = size
|
||||
copy(newbytes, oldbytes)
|
||||
}
|
||||
|
||||
func structPointer_Copy(oldptr structPointer, newptr structPointer, size int) {
|
||||
|
@ -575,9 +575,15 @@ func getPropertiesLocked(t reflect.Type) *StructProperties {
|
||||
p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false)
|
||||
|
||||
if f.Name == "XXX_extensions" { // special case
|
||||
p.enc = (*Buffer).enc_map
|
||||
p.dec = nil // not needed
|
||||
p.size = size_map
|
||||
if len(f.Tag.Get("protobuf")) > 0 {
|
||||
p.enc = (*Buffer).enc_ext_slice_byte
|
||||
p.dec = nil // not needed
|
||||
p.size = size_ext_slice_byte
|
||||
} else {
|
||||
p.enc = (*Buffer).enc_map
|
||||
p.dec = nil // not needed
|
||||
p.size = size_map
|
||||
}
|
||||
}
|
||||
if f.Name == "XXX_unrecognized" { // special case
|
||||
prop.unrecField = toField(&f)
|
||||
|
@ -58,8 +58,8 @@ func init() {
|
||||
}
|
||||
|
||||
var SizeTests = []struct {
|
||||
desc string
|
||||
pb Message
|
||||
desc string
|
||||
pb Message
|
||||
}{
|
||||
{"empty", &pb.OtherMessage{}},
|
||||
// Basic types.
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -79,6 +79,13 @@ type textWriter struct {
|
||||
w writer
|
||||
}
|
||||
|
||||
// textMarshaler is implemented by Messages that can marshal themsleves.
|
||||
// It is identical to encoding.TextMarshaler, introduced in go 1.2,
|
||||
// which will eventually replace it.
|
||||
type textMarshaler interface {
|
||||
MarshalText() (text []byte, err error)
|
||||
}
|
||||
|
||||
func (w *textWriter) WriteString(s string) (n int, err error) {
|
||||
if !strings.Contains(s, "\n") {
|
||||
if !w.compact && w.complete {
|
||||
@ -366,7 +373,15 @@ func writeAny(w *textWriter, v reflect.Value, props *Properties) error {
|
||||
}
|
||||
}
|
||||
w.indent()
|
||||
if err := writeStruct(w, v); err != nil {
|
||||
if tm, ok := v.Interface().(textMarshaler); ok {
|
||||
text, err := tm.MarshalText()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err = w.Write(text); err != nil {
|
||||
return err
|
||||
}
|
||||
} else if err := writeStruct(w, v); err != nil {
|
||||
return err
|
||||
}
|
||||
w.unindent()
|
||||
@ -555,7 +570,18 @@ func writeExtensions(w *textWriter, pv reflect.Value) error {
|
||||
// Order the extensions by ID.
|
||||
// This isn't strictly necessary, but it will give us
|
||||
// canonical output, which will also make testing easier.
|
||||
m := ep.ExtensionMap()
|
||||
var m map[int32]Extension
|
||||
if em, ok := ep.(extensionsMap); ok {
|
||||
m = em.ExtensionMap()
|
||||
} else if em, ok := ep.(extensionsBytes); ok {
|
||||
eb := em.GetExtensions()
|
||||
var err error
|
||||
m, err = BytesToExtensionsMap(*eb)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
ids := make([]int32, 0, len(m))
|
||||
for id := range m {
|
||||
ids = append(ids, id)
|
||||
@ -653,6 +679,19 @@ func marshalText(w io.Writer, pb Message, compact bool) error {
|
||||
compact: compact,
|
||||
}
|
||||
|
||||
if tm, ok := pb.(textMarshaler); ok {
|
||||
text, err := tm.MarshalText()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err = aw.Write(text); err != nil {
|
||||
return err
|
||||
}
|
||||
if bw != nil {
|
||||
return bw.Flush()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
// Dereference the received pointer so we don't have outer < and >.
|
||||
v := reflect.Indirect(val)
|
||||
if err := writeStruct(aw, v); err != nil {
|
||||
@ -666,7 +705,9 @@ func marshalText(w io.Writer, pb Message, compact bool) error {
|
||||
|
||||
// MarshalText writes a given protocol buffer in text format.
|
||||
// The only errors returned are from w.
|
||||
func MarshalText(w io.Writer, pb Message) error { return marshalText(w, pb, false) }
|
||||
func MarshalText(w io.Writer, pb Message) error {
|
||||
return marshalText(w, pb, false)
|
||||
}
|
||||
|
||||
// MarshalTextString is the same as MarshalText, but returns the string directly.
|
||||
func MarshalTextString(pb Message) string {
|
||||
|
@ -48,6 +48,13 @@ import (
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
// textUnmarshaler is implemented by Messages that can unmarshal themsleves.
|
||||
// It is identical to encoding.TextUnmarshaler, introduced in go 1.2,
|
||||
// which will eventually replace it.
|
||||
type textUnmarshaler interface {
|
||||
UnmarshalText(text []byte) error
|
||||
}
|
||||
|
||||
type ParseError struct {
|
||||
Message string
|
||||
Line int // 1-based line number
|
||||
@ -686,6 +693,7 @@ func (p *textParser) readAny(v reflect.Value, props *Properties) *ParseError {
|
||||
default:
|
||||
return p.errorf("expected '{' or '<', found %q", tok.value)
|
||||
}
|
||||
// TODO: Handle nested messages which implement textUnmarshaler.
|
||||
return p.readStruct(fv, terminator)
|
||||
case reflect.Uint32:
|
||||
if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil {
|
||||
@ -704,6 +712,10 @@ func (p *textParser) readAny(v reflect.Value, props *Properties) *ParseError {
|
||||
// UnmarshalText reads a protocol buffer in Text format. UnmarshalText resets pb
|
||||
// before starting to unmarshal, so any existing data in pb is always removed.
|
||||
func UnmarshalText(s string, pb Message) error {
|
||||
if um, ok := pb.(textUnmarshaler); ok {
|
||||
err := um.UnmarshalText([]byte(s))
|
||||
return err
|
||||
}
|
||||
pb.Reset()
|
||||
v := reflect.ValueOf(pb)
|
||||
if pe := newTextParser(s).readStruct(v.Elem(), ""); pe != nil {
|
||||
|
@ -41,9 +41,9 @@ import (
|
||||
)
|
||||
|
||||
type UnmarshalTextTest struct {
|
||||
in string
|
||||
err string // if "", no error expected
|
||||
out *MyMessage
|
||||
in string
|
||||
err string // if "", no error expected
|
||||
out *MyMessage
|
||||
}
|
||||
|
||||
func buildExtStructTest(text string) UnmarshalTextTest {
|
||||
@ -78,97 +78,97 @@ func buildExtRepStringTest(text string) UnmarshalTextTest {
|
||||
var unMarshalTextTests = []UnmarshalTextTest{
|
||||
// Basic
|
||||
{
|
||||
in: " count:42\n name:\"Dave\" ",
|
||||
in: " count:42\n name:\"Dave\" ",
|
||||
out: &MyMessage{
|
||||
Count: Int32(42),
|
||||
Name: String("Dave"),
|
||||
Count: Int32(42),
|
||||
Name: String("Dave"),
|
||||
},
|
||||
},
|
||||
|
||||
// Empty quoted string
|
||||
{
|
||||
in: `count:42 name:""`,
|
||||
in: `count:42 name:""`,
|
||||
out: &MyMessage{
|
||||
Count: Int32(42),
|
||||
Name: String(""),
|
||||
Count: Int32(42),
|
||||
Name: String(""),
|
||||
},
|
||||
},
|
||||
|
||||
// Quoted string concatenation
|
||||
{
|
||||
in: `count:42 name: "My name is "` + "\n" + `"elsewhere"`,
|
||||
in: `count:42 name: "My name is "` + "\n" + `"elsewhere"`,
|
||||
out: &MyMessage{
|
||||
Count: Int32(42),
|
||||
Name: String("My name is elsewhere"),
|
||||
Count: Int32(42),
|
||||
Name: String("My name is elsewhere"),
|
||||
},
|
||||
},
|
||||
|
||||
// Quoted string with escaped apostrophe
|
||||
{
|
||||
in: `count:42 name: "HOLIDAY - New Year\'s Day"`,
|
||||
in: `count:42 name: "HOLIDAY - New Year\'s Day"`,
|
||||
out: &MyMessage{
|
||||
Count: Int32(42),
|
||||
Name: String("HOLIDAY - New Year's Day"),
|
||||
Count: Int32(42),
|
||||
Name: String("HOLIDAY - New Year's Day"),
|
||||
},
|
||||
},
|
||||
|
||||
// Quoted string with single quote
|
||||
{
|
||||
in: `count:42 name: 'Roger "The Ramster" Ramjet'`,
|
||||
in: `count:42 name: 'Roger "The Ramster" Ramjet'`,
|
||||
out: &MyMessage{
|
||||
Count: Int32(42),
|
||||
Name: String(`Roger "The Ramster" Ramjet`),
|
||||
Count: Int32(42),
|
||||
Name: String(`Roger "The Ramster" Ramjet`),
|
||||
},
|
||||
},
|
||||
|
||||
// Quoted string with all the accepted special characters from the C++ test
|
||||
{
|
||||
in: `count:42 name: ` + "\"\\\"A string with \\' characters \\n and \\r newlines and \\t tabs and \\001 slashes \\\\ and multiple spaces\"",
|
||||
in: `count:42 name: ` + "\"\\\"A string with \\' characters \\n and \\r newlines and \\t tabs and \\001 slashes \\\\ and multiple spaces\"",
|
||||
out: &MyMessage{
|
||||
Count: Int32(42),
|
||||
Name: String("\"A string with ' characters \n and \r newlines and \t tabs and \001 slashes \\ and multiple spaces"),
|
||||
Count: Int32(42),
|
||||
Name: String("\"A string with ' characters \n and \r newlines and \t tabs and \001 slashes \\ and multiple spaces"),
|
||||
},
|
||||
},
|
||||
|
||||
// Quoted string with quoted backslash
|
||||
{
|
||||
in: `count:42 name: "\\'xyz"`,
|
||||
in: `count:42 name: "\\'xyz"`,
|
||||
out: &MyMessage{
|
||||
Count: Int32(42),
|
||||
Name: String(`\'xyz`),
|
||||
Count: Int32(42),
|
||||
Name: String(`\'xyz`),
|
||||
},
|
||||
},
|
||||
|
||||
// Quoted string with UTF-8 bytes.
|
||||
{
|
||||
in: "count:42 name: '\303\277\302\201\xAB'",
|
||||
in: "count:42 name: '\303\277\302\201\xAB'",
|
||||
out: &MyMessage{
|
||||
Count: Int32(42),
|
||||
Name: String("\303\277\302\201\xAB"),
|
||||
Count: Int32(42),
|
||||
Name: String("\303\277\302\201\xAB"),
|
||||
},
|
||||
},
|
||||
|
||||
// Bad quoted string
|
||||
{
|
||||
in: `inner: < host: "\0" >` + "\n",
|
||||
err: `line 1.15: invalid quoted string "\0"`,
|
||||
in: `inner: < host: "\0" >` + "\n",
|
||||
err: `line 1.15: invalid quoted string "\0"`,
|
||||
},
|
||||
|
||||
// Number too large for int64
|
||||
{
|
||||
in: "count: 123456789012345678901",
|
||||
err: "line 1.7: invalid int32: 123456789012345678901",
|
||||
in: "count: 123456789012345678901",
|
||||
err: "line 1.7: invalid int32: 123456789012345678901",
|
||||
},
|
||||
|
||||
// Number too large for int32
|
||||
{
|
||||
in: "count: 1234567890123",
|
||||
err: "line 1.7: invalid int32: 1234567890123",
|
||||
in: "count: 1234567890123",
|
||||
err: "line 1.7: invalid int32: 1234567890123",
|
||||
},
|
||||
|
||||
// Number in hexadecimal
|
||||
{
|
||||
in: "count: 0x2beef",
|
||||
in: "count: 0x2beef",
|
||||
out: &MyMessage{
|
||||
Count: Int32(0x2beef),
|
||||
},
|
||||
@ -176,7 +176,7 @@ var unMarshalTextTests = []UnmarshalTextTest{
|
||||
|
||||
// Number in octal
|
||||
{
|
||||
in: "count: 024601",
|
||||
in: "count: 024601",
|
||||
out: &MyMessage{
|
||||
Count: Int32(024601),
|
||||
},
|
||||
@ -184,9 +184,9 @@ var unMarshalTextTests = []UnmarshalTextTest{
|
||||
|
||||
// Floating point number with "f" suffix
|
||||
{
|
||||
in: "count: 4 others:< weight: 17.0f >",
|
||||
in: "count: 4 others:< weight: 17.0f >",
|
||||
out: &MyMessage{
|
||||
Count: Int32(4),
|
||||
Count: Int32(4),
|
||||
Others: []*OtherMessage{
|
||||
{
|
||||
Weight: Float32(17),
|
||||
@ -197,69 +197,69 @@ var unMarshalTextTests = []UnmarshalTextTest{
|
||||
|
||||
// Floating point positive infinity
|
||||
{
|
||||
in: "count: 4 bigfloat: inf",
|
||||
in: "count: 4 bigfloat: inf",
|
||||
out: &MyMessage{
|
||||
Count: Int32(4),
|
||||
Bigfloat: Float64(math.Inf(1)),
|
||||
Count: Int32(4),
|
||||
Bigfloat: Float64(math.Inf(1)),
|
||||
},
|
||||
},
|
||||
|
||||
// Floating point negative infinity
|
||||
{
|
||||
in: "count: 4 bigfloat: -inf",
|
||||
in: "count: 4 bigfloat: -inf",
|
||||
out: &MyMessage{
|
||||
Count: Int32(4),
|
||||
Bigfloat: Float64(math.Inf(-1)),
|
||||
Count: Int32(4),
|
||||
Bigfloat: Float64(math.Inf(-1)),
|
||||
},
|
||||
},
|
||||
|
||||
// Number too large for float32
|
||||
{
|
||||
in: "others:< weight: 12345678901234567890123456789012345678901234567890 >",
|
||||
err: "line 1.17: invalid float32: 12345678901234567890123456789012345678901234567890",
|
||||
in: "others:< weight: 12345678901234567890123456789012345678901234567890 >",
|
||||
err: "line 1.17: invalid float32: 12345678901234567890123456789012345678901234567890",
|
||||
},
|
||||
|
||||
// Number posing as a quoted string
|
||||
{
|
||||
in: `inner: < host: 12 >` + "\n",
|
||||
err: `line 1.15: invalid string: 12`,
|
||||
in: `inner: < host: 12 >` + "\n",
|
||||
err: `line 1.15: invalid string: 12`,
|
||||
},
|
||||
|
||||
// Quoted string posing as int32
|
||||
{
|
||||
in: `count: "12"`,
|
||||
err: `line 1.7: invalid int32: "12"`,
|
||||
in: `count: "12"`,
|
||||
err: `line 1.7: invalid int32: "12"`,
|
||||
},
|
||||
|
||||
// Quoted string posing a float32
|
||||
{
|
||||
in: `others:< weight: "17.4" >`,
|
||||
err: `line 1.17: invalid float32: "17.4"`,
|
||||
in: `others:< weight: "17.4" >`,
|
||||
err: `line 1.17: invalid float32: "17.4"`,
|
||||
},
|
||||
|
||||
// Enum
|
||||
{
|
||||
in: `count:42 bikeshed: BLUE`,
|
||||
in: `count:42 bikeshed: BLUE`,
|
||||
out: &MyMessage{
|
||||
Count: Int32(42),
|
||||
Bikeshed: MyMessage_BLUE.Enum(),
|
||||
Count: Int32(42),
|
||||
Bikeshed: MyMessage_BLUE.Enum(),
|
||||
},
|
||||
},
|
||||
|
||||
// Repeated field
|
||||
{
|
||||
in: `count:42 pet: "horsey" pet:"bunny"`,
|
||||
in: `count:42 pet: "horsey" pet:"bunny"`,
|
||||
out: &MyMessage{
|
||||
Count: Int32(42),
|
||||
Pet: []string{"horsey", "bunny"},
|
||||
Count: Int32(42),
|
||||
Pet: []string{"horsey", "bunny"},
|
||||
},
|
||||
},
|
||||
|
||||
// Repeated message with/without colon and <>/{}
|
||||
{
|
||||
in: `count:42 others:{} others{} others:<> others:{}`,
|
||||
in: `count:42 others:{} others{} others:<> others:{}`,
|
||||
out: &MyMessage{
|
||||
Count: Int32(42),
|
||||
Count: Int32(42),
|
||||
Others: []*OtherMessage{
|
||||
{},
|
||||
{},
|
||||
@ -271,9 +271,9 @@ var unMarshalTextTests = []UnmarshalTextTest{
|
||||
|
||||
// Missing colon for inner message
|
||||
{
|
||||
in: `count:42 inner < host: "cauchy.syd" >`,
|
||||
in: `count:42 inner < host: "cauchy.syd" >`,
|
||||
out: &MyMessage{
|
||||
Count: Int32(42),
|
||||
Count: Int32(42),
|
||||
Inner: &InnerMessage{
|
||||
Host: String("cauchy.syd"),
|
||||
},
|
||||
@ -282,33 +282,33 @@ var unMarshalTextTests = []UnmarshalTextTest{
|
||||
|
||||
// Missing colon for string field
|
||||
{
|
||||
in: `name "Dave"`,
|
||||
err: `line 1.5: expected ':', found "\"Dave\""`,
|
||||
in: `name "Dave"`,
|
||||
err: `line 1.5: expected ':', found "\"Dave\""`,
|
||||
},
|
||||
|
||||
// Missing colon for int32 field
|
||||
{
|
||||
in: `count 42`,
|
||||
err: `line 1.6: expected ':', found "42"`,
|
||||
in: `count 42`,
|
||||
err: `line 1.6: expected ':', found "42"`,
|
||||
},
|
||||
|
||||
// Missing required field
|
||||
{
|
||||
in: ``,
|
||||
err: `line 1.0: message testdata.MyMessage missing required field "count"`,
|
||||
in: ``,
|
||||
err: `line 1.0: message testdata.MyMessage missing required field "count"`,
|
||||
},
|
||||
|
||||
// Repeated non-repeated field
|
||||
{
|
||||
in: `name: "Rob" name: "Russ"`,
|
||||
err: `line 1.12: non-repeated field "name" was repeated`,
|
||||
in: `name: "Rob" name: "Russ"`,
|
||||
err: `line 1.12: non-repeated field "name" was repeated`,
|
||||
},
|
||||
|
||||
// Group
|
||||
{
|
||||
in: `count: 17 SomeGroup { group_field: 12 }`,
|
||||
in: `count: 17 SomeGroup { group_field: 12 }`,
|
||||
out: &MyMessage{
|
||||
Count: Int32(17),
|
||||
Count: Int32(17),
|
||||
Somegroup: &MyMessage_SomeGroup{
|
||||
GroupField: Int32(12),
|
||||
},
|
||||
@ -317,18 +317,18 @@ var unMarshalTextTests = []UnmarshalTextTest{
|
||||
|
||||
// Semicolon between fields
|
||||
{
|
||||
in: `count:3;name:"Calvin"`,
|
||||
in: `count:3;name:"Calvin"`,
|
||||
out: &MyMessage{
|
||||
Count: Int32(3),
|
||||
Name: String("Calvin"),
|
||||
Count: Int32(3),
|
||||
Name: String("Calvin"),
|
||||
},
|
||||
},
|
||||
// Comma between fields
|
||||
{
|
||||
in: `count:4,name:"Ezekiel"`,
|
||||
in: `count:4,name:"Ezekiel"`,
|
||||
out: &MyMessage{
|
||||
Count: Int32(4),
|
||||
Name: String("Ezekiel"),
|
||||
Count: Int32(4),
|
||||
Name: String("Ezekiel"),
|
||||
},
|
||||
},
|
||||
|
||||
@ -363,25 +363,25 @@ var unMarshalTextTests = []UnmarshalTextTest{
|
||||
` >` +
|
||||
`>`,
|
||||
out: &MyMessage{
|
||||
Count: Int32(42),
|
||||
Name: String("Dave"),
|
||||
Quote: String(`"I didn't want to go."`),
|
||||
Pet: []string{"bunny", "kitty", "horsey"},
|
||||
Count: Int32(42),
|
||||
Name: String("Dave"),
|
||||
Quote: String(`"I didn't want to go."`),
|
||||
Pet: []string{"bunny", "kitty", "horsey"},
|
||||
Inner: &InnerMessage{
|
||||
Host: String("footrest.syd"),
|
||||
Port: Int32(7001),
|
||||
Connected: Bool(true),
|
||||
Host: String("footrest.syd"),
|
||||
Port: Int32(7001),
|
||||
Connected: Bool(true),
|
||||
},
|
||||
Others: []*OtherMessage{
|
||||
{
|
||||
Key: Int64(3735928559),
|
||||
Value: []byte{0x1, 'A', '\a', '\f'},
|
||||
Key: Int64(3735928559),
|
||||
Value: []byte{0x1, 'A', '\a', '\f'},
|
||||
},
|
||||
{
|
||||
Weight: Float32(58.9),
|
||||
Weight: Float32(58.9),
|
||||
Inner: &InnerMessage{
|
||||
Host: String("lesha.mtv"),
|
||||
Port: Int32(8002),
|
||||
Host: String("lesha.mtv"),
|
||||
Port: Int32(8002),
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -413,6 +413,16 @@ func TestUnmarshalText(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestUnmarshalTextCustomMessage(t *testing.T) {
|
||||
msg := &textMessage{}
|
||||
if err := UnmarshalText("custom", msg); err != nil {
|
||||
t.Errorf("Unexpected error from custom unmarshal: %v", err)
|
||||
}
|
||||
if UnmarshalText("not custom", msg) == nil {
|
||||
t.Errorf("Didn't get expected error from custom unmarshal")
|
||||
}
|
||||
}
|
||||
|
||||
// Regression test; this caused a panic.
|
||||
func TestRepeatedEnum(t *testing.T) {
|
||||
pb := new(RepeatedEnum)
|
||||
|
@ -44,37 +44,57 @@ import (
|
||||
pb "./testdata"
|
||||
)
|
||||
|
||||
// textMessage implements the methods that allow it to marshal and unmarshal
|
||||
// itself as text.
|
||||
type textMessage struct {
|
||||
}
|
||||
|
||||
func (*textMessage) MarshalText() ([]byte, error) {
|
||||
return []byte("custom"), nil
|
||||
}
|
||||
|
||||
func (*textMessage) UnmarshalText(bytes []byte) error {
|
||||
if string(bytes) != "custom" {
|
||||
return errors.New("expected 'custom'")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (*textMessage) Reset() {}
|
||||
func (*textMessage) String() string { return "" }
|
||||
func (*textMessage) ProtoMessage() {}
|
||||
|
||||
func newTestMessage() *pb.MyMessage {
|
||||
msg := &pb.MyMessage{
|
||||
Count: proto.Int32(42),
|
||||
Name: proto.String("Dave"),
|
||||
Quote: proto.String(`"I didn't want to go."`),
|
||||
Pet: []string{"bunny", "kitty", "horsey"},
|
||||
Count: proto.Int32(42),
|
||||
Name: proto.String("Dave"),
|
||||
Quote: proto.String(`"I didn't want to go."`),
|
||||
Pet: []string{"bunny", "kitty", "horsey"},
|
||||
Inner: &pb.InnerMessage{
|
||||
Host: proto.String("footrest.syd"),
|
||||
Port: proto.Int32(7001),
|
||||
Connected: proto.Bool(true),
|
||||
Host: proto.String("footrest.syd"),
|
||||
Port: proto.Int32(7001),
|
||||
Connected: proto.Bool(true),
|
||||
},
|
||||
Others: []*pb.OtherMessage{
|
||||
{
|
||||
Key: proto.Int64(0xdeadbeef),
|
||||
Value: []byte{1, 65, 7, 12},
|
||||
Key: proto.Int64(0xdeadbeef),
|
||||
Value: []byte{1, 65, 7, 12},
|
||||
},
|
||||
{
|
||||
Weight: proto.Float32(6.022),
|
||||
Weight: proto.Float32(6.022),
|
||||
Inner: &pb.InnerMessage{
|
||||
Host: proto.String("lesha.mtv"),
|
||||
Port: proto.Int32(8002),
|
||||
Host: proto.String("lesha.mtv"),
|
||||
Port: proto.Int32(8002),
|
||||
},
|
||||
},
|
||||
},
|
||||
Bikeshed: pb.MyMessage_BLUE.Enum(),
|
||||
Bikeshed: pb.MyMessage_BLUE.Enum(),
|
||||
Somegroup: &pb.MyMessage_SomeGroup{
|
||||
GroupField: proto.Int32(8),
|
||||
},
|
||||
// One normally wouldn't do this.
|
||||
// This is an undeclared tag 13, as a varint (wire type 0) with value 4.
|
||||
XXX_unrecognized: []byte{13<<3 | 0, 4},
|
||||
XXX_unrecognized: []byte{13<<3 | 0, 4},
|
||||
}
|
||||
ext := &pb.Ext{
|
||||
Data: proto.String("Big gobs for big rats"),
|
||||
@ -153,6 +173,16 @@ func TestMarshalText(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestMarshalTextCustomMessage(t *testing.T) {
|
||||
buf := new(bytes.Buffer)
|
||||
if err := proto.MarshalText(buf, &textMessage{}); err != nil {
|
||||
t.Fatalf("proto.MarshalText: %v", err)
|
||||
}
|
||||
s := buf.String()
|
||||
if s != "custom" {
|
||||
t.Errorf("Got %q, expected %q", s, "custom")
|
||||
}
|
||||
}
|
||||
func TestMarshalTextNil(t *testing.T) {
|
||||
want := "<nil>"
|
||||
tests := []proto.Message{nil, (*pb.MyMessage)(nil)}
|
||||
@ -250,8 +280,8 @@ func TestCompactText(t *testing.T) {
|
||||
|
||||
func TestStringEscaping(t *testing.T) {
|
||||
testCases := []struct {
|
||||
in *pb.Strings
|
||||
out string
|
||||
in *pb.Strings
|
||||
out string
|
||||
}{
|
||||
{
|
||||
// Test data from C++ test (TextFormatTest.StringEscape).
|
||||
@ -299,8 +329,8 @@ func TestStringEscaping(t *testing.T) {
|
||||
// This is a proxy for something like a nearly-full or imminently-failing disk,
|
||||
// or a network connection that is about to die.
|
||||
type limitedWriter struct {
|
||||
b bytes.Buffer
|
||||
limit int
|
||||
b bytes.Buffer
|
||||
limit int
|
||||
}
|
||||
|
||||
var outOfSpace = errors.New("proto: insufficient space")
|
||||
@ -337,8 +367,8 @@ func TestMarshalTextFailing(t *testing.T) {
|
||||
|
||||
func TestFloats(t *testing.T) {
|
||||
tests := []struct {
|
||||
f float64
|
||||
want string
|
||||
f float64
|
||||
want string
|
||||
}{
|
||||
{0, "0"},
|
||||
{4.7, "4.7"},
|
||||
|
3
third_party/github.com/goraft/raft/log.go
vendored
3
third_party/github.com/goraft/raft/log.go
vendored
@ -168,9 +168,10 @@ func (l *Log) open(path string) error {
|
||||
if err == io.EOF {
|
||||
debugln("open.log.append: finish ")
|
||||
} else {
|
||||
if err = os.Truncate(path, readBytes); err != nil {
|
||||
if err = l.file.Truncate(readBytes); err != nil {
|
||||
return fmt.Errorf("raft.Log: Unable to recover: %v", err)
|
||||
}
|
||||
l.file.Seek(readBytes, os.SEEK_SET)
|
||||
}
|
||||
break
|
||||
}
|
||||
|
@ -2,6 +2,23 @@
|
||||
// source: append_entries_request.proto
|
||||
// DO NOT EDIT!
|
||||
|
||||
/*
|
||||
Package protobuf is a generated protocol buffer package.
|
||||
|
||||
It is generated from these files:
|
||||
append_entries_request.proto
|
||||
append_entries_responses.proto
|
||||
log_entry.proto
|
||||
request_vote_request.proto
|
||||
request_vote_responses.proto
|
||||
snapshot_recovery_request.proto
|
||||
snapshot_recovery_response.proto
|
||||
snapshot_request.proto
|
||||
snapshot_response.proto
|
||||
|
||||
It has these top-level messages:
|
||||
AppendEntriesRequest
|
||||
*/
|
||||
package protobuf
|
||||
|
||||
import proto "github.com/coreos/etcd/third_party/code.google.com/p/gogoprotobuf/proto"
|
||||
@ -110,7 +127,7 @@ func (m *AppendEntriesRequest) Unmarshal(data []byte) error {
|
||||
switch fieldNum {
|
||||
case 1:
|
||||
if wireType != 0 {
|
||||
return proto.ErrWrongType
|
||||
return code_google_com_p_gogoprotobuf_proto2.ErrWrongType
|
||||
}
|
||||
var v uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
@ -127,7 +144,7 @@ func (m *AppendEntriesRequest) Unmarshal(data []byte) error {
|
||||
m.Term = &v
|
||||
case 2:
|
||||
if wireType != 0 {
|
||||
return proto.ErrWrongType
|
||||
return code_google_com_p_gogoprotobuf_proto2.ErrWrongType
|
||||
}
|
||||
var v uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
@ -144,7 +161,7 @@ func (m *AppendEntriesRequest) Unmarshal(data []byte) error {
|
||||
m.PrevLogIndex = &v
|
||||
case 3:
|
||||
if wireType != 0 {
|
||||
return proto.ErrWrongType
|
||||
return code_google_com_p_gogoprotobuf_proto2.ErrWrongType
|
||||
}
|
||||
var v uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
@ -161,7 +178,7 @@ func (m *AppendEntriesRequest) Unmarshal(data []byte) error {
|
||||
m.PrevLogTerm = &v
|
||||
case 4:
|
||||
if wireType != 0 {
|
||||
return proto.ErrWrongType
|
||||
return code_google_com_p_gogoprotobuf_proto2.ErrWrongType
|
||||
}
|
||||
var v uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
@ -178,7 +195,7 @@ func (m *AppendEntriesRequest) Unmarshal(data []byte) error {
|
||||
m.CommitIndex = &v
|
||||
case 5:
|
||||
if wireType != 2 {
|
||||
return proto.ErrWrongType
|
||||
return code_google_com_p_gogoprotobuf_proto2.ErrWrongType
|
||||
}
|
||||
var stringLen uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
@ -201,7 +218,7 @@ func (m *AppendEntriesRequest) Unmarshal(data []byte) error {
|
||||
index = postIndex
|
||||
case 6:
|
||||
if wireType != 2 {
|
||||
return proto.ErrWrongType
|
||||
return code_google_com_p_gogoprotobuf_proto2.ErrWrongType
|
||||
}
|
||||
var msglen int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
@ -236,6 +253,9 @@ func (m *AppendEntriesRequest) Unmarshal(data []byte) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if (index + skippy) > l {
|
||||
return io1.ErrUnexpectedEOF
|
||||
}
|
||||
m.XXX_unrecognized = append(m.XXX_unrecognized, data[index:index+skippy]...)
|
||||
index += skippy
|
||||
}
|
||||
@ -309,7 +329,6 @@ func sovAppendEntriesRequest(x uint64) (n int) {
|
||||
}
|
||||
func sozAppendEntriesRequest(x uint64) (n int) {
|
||||
return sovAppendEntriesRequest(uint64((x << 1) ^ uint64((int64(x) >> 63))))
|
||||
return sovAppendEntriesRequest(uint64((x << 1) ^ uint64((int64(x) >> 63))))
|
||||
}
|
||||
func NewPopulatedAppendEntriesRequest(r randyAppendEntriesRequest, easy bool) *AppendEntriesRequest {
|
||||
this := &AppendEntriesRequest{}
|
||||
|
@ -94,7 +94,7 @@ func (m *AppendEntriesResponse) Unmarshal(data []byte) error {
|
||||
switch fieldNum {
|
||||
case 1:
|
||||
if wireType != 0 {
|
||||
return proto.ErrWrongType
|
||||
return code_google_com_p_gogoprotobuf_proto4.ErrWrongType
|
||||
}
|
||||
var v uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
@ -111,7 +111,7 @@ func (m *AppendEntriesResponse) Unmarshal(data []byte) error {
|
||||
m.Term = &v
|
||||
case 2:
|
||||
if wireType != 0 {
|
||||
return proto.ErrWrongType
|
||||
return code_google_com_p_gogoprotobuf_proto4.ErrWrongType
|
||||
}
|
||||
var v uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
@ -128,7 +128,7 @@ func (m *AppendEntriesResponse) Unmarshal(data []byte) error {
|
||||
m.Index = &v
|
||||
case 3:
|
||||
if wireType != 0 {
|
||||
return proto.ErrWrongType
|
||||
return code_google_com_p_gogoprotobuf_proto4.ErrWrongType
|
||||
}
|
||||
var v uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
@ -145,7 +145,7 @@ func (m *AppendEntriesResponse) Unmarshal(data []byte) error {
|
||||
m.CommitIndex = &v
|
||||
case 4:
|
||||
if wireType != 0 {
|
||||
return proto.ErrWrongType
|
||||
return code_google_com_p_gogoprotobuf_proto4.ErrWrongType
|
||||
}
|
||||
var v int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
@ -175,6 +175,9 @@ func (m *AppendEntriesResponse) Unmarshal(data []byte) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if (index + skippy) > l {
|
||||
return io2.ErrUnexpectedEOF
|
||||
}
|
||||
m.XXX_unrecognized = append(m.XXX_unrecognized, data[index:index+skippy]...)
|
||||
index += skippy
|
||||
}
|
||||
@ -236,7 +239,6 @@ func sovAppendEntriesResponses(x uint64) (n int) {
|
||||
}
|
||||
func sozAppendEntriesResponses(x uint64) (n int) {
|
||||
return sovAppendEntriesResponses(uint64((x << 1) ^ uint64((int64(x) >> 63))))
|
||||
return sovAppendEntriesResponses(uint64((x << 1) ^ uint64((int64(x) >> 63))))
|
||||
}
|
||||
func NewPopulatedAppendEntriesResponse(r randyAppendEntriesResponses, easy bool) *AppendEntriesResponse {
|
||||
this := &AppendEntriesResponse{}
|
||||
|
@ -94,7 +94,7 @@ func (m *LogEntry) Unmarshal(data []byte) error {
|
||||
switch fieldNum {
|
||||
case 1:
|
||||
if wireType != 0 {
|
||||
return proto.ErrWrongType
|
||||
return code_google_com_p_gogoprotobuf_proto.ErrWrongType
|
||||
}
|
||||
var v uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
@ -111,7 +111,7 @@ func (m *LogEntry) Unmarshal(data []byte) error {
|
||||
m.Index = &v
|
||||
case 2:
|
||||
if wireType != 0 {
|
||||
return proto.ErrWrongType
|
||||
return code_google_com_p_gogoprotobuf_proto.ErrWrongType
|
||||
}
|
||||
var v uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
@ -128,7 +128,7 @@ func (m *LogEntry) Unmarshal(data []byte) error {
|
||||
m.Term = &v
|
||||
case 3:
|
||||
if wireType != 2 {
|
||||
return proto.ErrWrongType
|
||||
return code_google_com_p_gogoprotobuf_proto.ErrWrongType
|
||||
}
|
||||
var stringLen uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
@ -151,7 +151,7 @@ func (m *LogEntry) Unmarshal(data []byte) error {
|
||||
index = postIndex
|
||||
case 4:
|
||||
if wireType != 2 {
|
||||
return proto.ErrWrongType
|
||||
return code_google_com_p_gogoprotobuf_proto.ErrWrongType
|
||||
}
|
||||
var byteLen int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
@ -185,6 +185,9 @@ func (m *LogEntry) Unmarshal(data []byte) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if (index + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.XXX_unrecognized = append(m.XXX_unrecognized, data[index:index+skippy]...)
|
||||
index += skippy
|
||||
}
|
||||
@ -248,7 +251,6 @@ func sovLogEntry(x uint64) (n int) {
|
||||
}
|
||||
func sozLogEntry(x uint64) (n int) {
|
||||
return sovLogEntry(uint64((x << 1) ^ uint64((int64(x) >> 63))))
|
||||
return sovLogEntry(uint64((x << 1) ^ uint64((int64(x) >> 63))))
|
||||
}
|
||||
func NewPopulatedLogEntry(r randyLogEntry, easy bool) *LogEntry {
|
||||
this := &LogEntry{}
|
||||
|
@ -94,7 +94,7 @@ func (m *RequestVoteRequest) Unmarshal(data []byte) error {
|
||||
switch fieldNum {
|
||||
case 1:
|
||||
if wireType != 0 {
|
||||
return proto.ErrWrongType
|
||||
return code_google_com_p_gogoprotobuf_proto6.ErrWrongType
|
||||
}
|
||||
var v uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
@ -111,7 +111,7 @@ func (m *RequestVoteRequest) Unmarshal(data []byte) error {
|
||||
m.Term = &v
|
||||
case 2:
|
||||
if wireType != 0 {
|
||||
return proto.ErrWrongType
|
||||
return code_google_com_p_gogoprotobuf_proto6.ErrWrongType
|
||||
}
|
||||
var v uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
@ -128,7 +128,7 @@ func (m *RequestVoteRequest) Unmarshal(data []byte) error {
|
||||
m.LastLogIndex = &v
|
||||
case 3:
|
||||
if wireType != 0 {
|
||||
return proto.ErrWrongType
|
||||
return code_google_com_p_gogoprotobuf_proto6.ErrWrongType
|
||||
}
|
||||
var v uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
@ -145,7 +145,7 @@ func (m *RequestVoteRequest) Unmarshal(data []byte) error {
|
||||
m.LastLogTerm = &v
|
||||
case 4:
|
||||
if wireType != 2 {
|
||||
return proto.ErrWrongType
|
||||
return code_google_com_p_gogoprotobuf_proto6.ErrWrongType
|
||||
}
|
||||
var stringLen uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
@ -180,6 +180,9 @@ func (m *RequestVoteRequest) Unmarshal(data []byte) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if (index + skippy) > l {
|
||||
return io3.ErrUnexpectedEOF
|
||||
}
|
||||
m.XXX_unrecognized = append(m.XXX_unrecognized, data[index:index+skippy]...)
|
||||
index += skippy
|
||||
}
|
||||
@ -242,7 +245,6 @@ func sovRequestVoteRequest(x uint64) (n int) {
|
||||
}
|
||||
func sozRequestVoteRequest(x uint64) (n int) {
|
||||
return sovRequestVoteRequest(uint64((x << 1) ^ uint64((int64(x) >> 63))))
|
||||
return sovRequestVoteRequest(uint64((x << 1) ^ uint64((int64(x) >> 63))))
|
||||
}
|
||||
func NewPopulatedRequestVoteRequest(r randyRequestVoteRequest, easy bool) *RequestVoteRequest {
|
||||
this := &RequestVoteRequest{}
|
||||
|
@ -78,7 +78,7 @@ func (m *RequestVoteResponse) Unmarshal(data []byte) error {
|
||||
switch fieldNum {
|
||||
case 1:
|
||||
if wireType != 0 {
|
||||
return proto.ErrWrongType
|
||||
return code_google_com_p_gogoprotobuf_proto8.ErrWrongType
|
||||
}
|
||||
var v uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
@ -95,7 +95,7 @@ func (m *RequestVoteResponse) Unmarshal(data []byte) error {
|
||||
m.Term = &v
|
||||
case 2:
|
||||
if wireType != 0 {
|
||||
return proto.ErrWrongType
|
||||
return code_google_com_p_gogoprotobuf_proto8.ErrWrongType
|
||||
}
|
||||
var v int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
@ -125,6 +125,9 @@ func (m *RequestVoteResponse) Unmarshal(data []byte) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if (index + skippy) > l {
|
||||
return io4.ErrUnexpectedEOF
|
||||
}
|
||||
m.XXX_unrecognized = append(m.XXX_unrecognized, data[index:index+skippy]...)
|
||||
index += skippy
|
||||
}
|
||||
@ -178,7 +181,6 @@ func sovRequestVoteResponses(x uint64) (n int) {
|
||||
}
|
||||
func sozRequestVoteResponses(x uint64) (n int) {
|
||||
return sovRequestVoteResponses(uint64((x << 1) ^ uint64((int64(x) >> 63))))
|
||||
return sovRequestVoteResponses(uint64((x << 1) ^ uint64((int64(x) >> 63))))
|
||||
}
|
||||
func NewPopulatedRequestVoteResponse(r randyRequestVoteResponses, easy bool) *RequestVoteResponse {
|
||||
this := &RequestVoteResponse{}
|
||||
|
@ -125,7 +125,7 @@ func (m *SnapshotRecoveryRequest) Unmarshal(data []byte) error {
|
||||
switch fieldNum {
|
||||
case 1:
|
||||
if wireType != 2 {
|
||||
return proto.ErrWrongType
|
||||
return code_google_com_p_gogoprotobuf_proto10.ErrWrongType
|
||||
}
|
||||
var stringLen uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
@ -148,7 +148,7 @@ func (m *SnapshotRecoveryRequest) Unmarshal(data []byte) error {
|
||||
index = postIndex
|
||||
case 2:
|
||||
if wireType != 0 {
|
||||
return proto.ErrWrongType
|
||||
return code_google_com_p_gogoprotobuf_proto10.ErrWrongType
|
||||
}
|
||||
var v uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
@ -165,7 +165,7 @@ func (m *SnapshotRecoveryRequest) Unmarshal(data []byte) error {
|
||||
m.LastIndex = &v
|
||||
case 3:
|
||||
if wireType != 0 {
|
||||
return proto.ErrWrongType
|
||||
return code_google_com_p_gogoprotobuf_proto10.ErrWrongType
|
||||
}
|
||||
var v uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
@ -182,7 +182,7 @@ func (m *SnapshotRecoveryRequest) Unmarshal(data []byte) error {
|
||||
m.LastTerm = &v
|
||||
case 4:
|
||||
if wireType != 2 {
|
||||
return proto.ErrWrongType
|
||||
return code_google_com_p_gogoprotobuf_proto10.ErrWrongType
|
||||
}
|
||||
var msglen int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
@ -205,7 +205,7 @@ func (m *SnapshotRecoveryRequest) Unmarshal(data []byte) error {
|
||||
index = postIndex
|
||||
case 5:
|
||||
if wireType != 2 {
|
||||
return proto.ErrWrongType
|
||||
return code_google_com_p_gogoprotobuf_proto10.ErrWrongType
|
||||
}
|
||||
var byteLen int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
@ -239,6 +239,9 @@ func (m *SnapshotRecoveryRequest) Unmarshal(data []byte) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if (index + skippy) > l {
|
||||
return io5.ErrUnexpectedEOF
|
||||
}
|
||||
m.XXX_unrecognized = append(m.XXX_unrecognized, data[index:index+skippy]...)
|
||||
index += skippy
|
||||
}
|
||||
@ -266,7 +269,7 @@ func (m *SnapshotRecoveryRequest_Peer) Unmarshal(data []byte) error {
|
||||
switch fieldNum {
|
||||
case 1:
|
||||
if wireType != 2 {
|
||||
return proto.ErrWrongType
|
||||
return code_google_com_p_gogoprotobuf_proto10.ErrWrongType
|
||||
}
|
||||
var stringLen uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
@ -289,7 +292,7 @@ func (m *SnapshotRecoveryRequest_Peer) Unmarshal(data []byte) error {
|
||||
index = postIndex
|
||||
case 2:
|
||||
if wireType != 2 {
|
||||
return proto.ErrWrongType
|
||||
return code_google_com_p_gogoprotobuf_proto10.ErrWrongType
|
||||
}
|
||||
var stringLen uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
@ -324,6 +327,9 @@ func (m *SnapshotRecoveryRequest_Peer) Unmarshal(data []byte) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if (index + skippy) > l {
|
||||
return io5.ErrUnexpectedEOF
|
||||
}
|
||||
m.XXX_unrecognized = append(m.XXX_unrecognized, data[index:index+skippy]...)
|
||||
index += skippy
|
||||
}
|
||||
@ -422,7 +428,6 @@ func sovSnapshotRecoveryRequest(x uint64) (n int) {
|
||||
}
|
||||
func sozSnapshotRecoveryRequest(x uint64) (n int) {
|
||||
return sovSnapshotRecoveryRequest(uint64((x << 1) ^ uint64((int64(x) >> 63))))
|
||||
return sovSnapshotRecoveryRequest(uint64((x << 1) ^ uint64((int64(x) >> 63))))
|
||||
}
|
||||
func NewPopulatedSnapshotRecoveryRequest(r randySnapshotRecoveryRequest, easy bool) *SnapshotRecoveryRequest {
|
||||
this := &SnapshotRecoveryRequest{}
|
||||
|
@ -86,7 +86,7 @@ func (m *SnapshotRecoveryResponse) Unmarshal(data []byte) error {
|
||||
switch fieldNum {
|
||||
case 1:
|
||||
if wireType != 0 {
|
||||
return proto.ErrWrongType
|
||||
return code_google_com_p_gogoprotobuf_proto12.ErrWrongType
|
||||
}
|
||||
var v uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
@ -103,7 +103,7 @@ func (m *SnapshotRecoveryResponse) Unmarshal(data []byte) error {
|
||||
m.Term = &v
|
||||
case 2:
|
||||
if wireType != 0 {
|
||||
return proto.ErrWrongType
|
||||
return code_google_com_p_gogoprotobuf_proto12.ErrWrongType
|
||||
}
|
||||
var v int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
@ -121,7 +121,7 @@ func (m *SnapshotRecoveryResponse) Unmarshal(data []byte) error {
|
||||
m.Success = &b
|
||||
case 3:
|
||||
if wireType != 0 {
|
||||
return proto.ErrWrongType
|
||||
return code_google_com_p_gogoprotobuf_proto12.ErrWrongType
|
||||
}
|
||||
var v uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
@ -150,6 +150,9 @@ func (m *SnapshotRecoveryResponse) Unmarshal(data []byte) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if (index + skippy) > l {
|
||||
return io6.ErrUnexpectedEOF
|
||||
}
|
||||
m.XXX_unrecognized = append(m.XXX_unrecognized, data[index:index+skippy]...)
|
||||
index += skippy
|
||||
}
|
||||
@ -207,7 +210,6 @@ func sovSnapshotRecoveryResponse(x uint64) (n int) {
|
||||
}
|
||||
func sozSnapshotRecoveryResponse(x uint64) (n int) {
|
||||
return sovSnapshotRecoveryResponse(uint64((x << 1) ^ uint64((int64(x) >> 63))))
|
||||
return sovSnapshotRecoveryResponse(uint64((x << 1) ^ uint64((int64(x) >> 63))))
|
||||
}
|
||||
func NewPopulatedSnapshotRecoveryResponse(r randySnapshotRecoveryResponse, easy bool) *SnapshotRecoveryResponse {
|
||||
this := &SnapshotRecoveryResponse{}
|
||||
|
@ -86,7 +86,7 @@ func (m *SnapshotRequest) Unmarshal(data []byte) error {
|
||||
switch fieldNum {
|
||||
case 1:
|
||||
if wireType != 2 {
|
||||
return proto.ErrWrongType
|
||||
return code_google_com_p_gogoprotobuf_proto14.ErrWrongType
|
||||
}
|
||||
var stringLen uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
@ -109,7 +109,7 @@ func (m *SnapshotRequest) Unmarshal(data []byte) error {
|
||||
index = postIndex
|
||||
case 2:
|
||||
if wireType != 0 {
|
||||
return proto.ErrWrongType
|
||||
return code_google_com_p_gogoprotobuf_proto14.ErrWrongType
|
||||
}
|
||||
var v uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
@ -126,7 +126,7 @@ func (m *SnapshotRequest) Unmarshal(data []byte) error {
|
||||
m.LastIndex = &v
|
||||
case 3:
|
||||
if wireType != 0 {
|
||||
return proto.ErrWrongType
|
||||
return code_google_com_p_gogoprotobuf_proto14.ErrWrongType
|
||||
}
|
||||
var v uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
@ -155,6 +155,9 @@ func (m *SnapshotRequest) Unmarshal(data []byte) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if (index + skippy) > l {
|
||||
return io7.ErrUnexpectedEOF
|
||||
}
|
||||
m.XXX_unrecognized = append(m.XXX_unrecognized, data[index:index+skippy]...)
|
||||
index += skippy
|
||||
}
|
||||
@ -213,7 +216,6 @@ func sovSnapshotRequest(x uint64) (n int) {
|
||||
}
|
||||
func sozSnapshotRequest(x uint64) (n int) {
|
||||
return sovSnapshotRequest(uint64((x << 1) ^ uint64((int64(x) >> 63))))
|
||||
return sovSnapshotRequest(uint64((x << 1) ^ uint64((int64(x) >> 63))))
|
||||
}
|
||||
func NewPopulatedSnapshotRequest(r randySnapshotRequest, easy bool) *SnapshotRequest {
|
||||
this := &SnapshotRequest{}
|
||||
|
@ -70,7 +70,7 @@ func (m *SnapshotResponse) Unmarshal(data []byte) error {
|
||||
switch fieldNum {
|
||||
case 1:
|
||||
if wireType != 0 {
|
||||
return proto.ErrWrongType
|
||||
return code_google_com_p_gogoprotobuf_proto16.ErrWrongType
|
||||
}
|
||||
var v int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
@ -100,6 +100,9 @@ func (m *SnapshotResponse) Unmarshal(data []byte) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if (index + skippy) > l {
|
||||
return io8.ErrUnexpectedEOF
|
||||
}
|
||||
m.XXX_unrecognized = append(m.XXX_unrecognized, data[index:index+skippy]...)
|
||||
index += skippy
|
||||
}
|
||||
@ -149,7 +152,6 @@ func sovSnapshotResponse(x uint64) (n int) {
|
||||
}
|
||||
func sozSnapshotResponse(x uint64) (n int) {
|
||||
return sovSnapshotResponse(uint64((x << 1) ^ uint64((int64(x) >> 63))))
|
||||
return sovSnapshotResponse(uint64((x << 1) ^ uint64((int64(x) >> 63))))
|
||||
}
|
||||
func NewPopulatedSnapshotResponse(r randySnapshotResponse, easy bool) *SnapshotResponse {
|
||||
this := &SnapshotResponse{}
|
||||
|
Reference in New Issue
Block a user