Compare commits

..

44 Commits

Author SHA1 Message Date
4fb6087f4a CHANGELOG: release 0.4.4 2014-06-24 10:56:53 -07:00
5524131a9e Merge pull request #865 from robstrong/hotfix/contentType
fix(peer_server) set content type to application/json in admin
2014-06-23 16:31:49 -07:00
3efb4d837b Merge pull request #844 from unihorn/102
chore(peer_server): improve log for auto removal
2014-06-23 14:19:48 -07:00
494d2c67aa fix(peer_server) set content type to application/json in admin 2014-06-21 13:13:10 -04:00
fb32a999a6 doc: add note about removal of leader mod 2014-06-19 17:10:34 -07:00
d2f5934aa1 mod: remove defunct leader test 2014-06-19 17:10:34 -07:00
4f3fb5a702 Merge pull request #861 from andybons/patch-2
Update to Go v1.3
2014-06-19 13:54:34 -07:00
9f5ec7732e Update to Go v1.3
Now I’m just being OCD about it.
2014-06-19 10:25:52 -04:00
eb00f200d3 Merge pull request #856 from robn/patch-1
Add p5-etcd (Perl client lib) to clients-matrix
2014-06-18 10:45:11 -07:00
38d16775ab Merge pull request #858 from mikeumus/patch-1
docs(readme.md): spelling fix "oon" to "on"
2014-06-17 23:20:23 -07:00
690fd12b07 docs(readme.md): spelling fix "oon" to "on" 2014-06-18 02:11:32 -04:00
b31483b2be Merge pull request #850 from robszumski/update-config
feat(docs): add cluster config section
2014-06-17 17:45:50 -07:00
e9a21dda4b Merge pull request #851 from unihorn/103
docs(configuration): add cluster configuration
2014-06-17 17:44:51 -07:00
2134036942 Merge pull request #857 from tarnfeld/tools-discodns
Added discodns to the list of tools using etcd
2014-06-17 16:10:57 -07:00
6bd2ee4c49 Added discodns to the list of tools using etcd 2014-06-18 00:08:21 +01:00
fcd429467e Add p5-etcd (Perl client lib) to clients-matrix 2014-06-18 08:15:03 +10:00
e5e759b962 docs(config): refine cluster configuration 2014-06-17 09:31:08 -07:00
d8a08f53e3 feat(docs): add cluster config section 2014-06-16 22:31:13 -07:00
3e95bf0fa3 Merge pull request #854 from brianredbeard/moarthings
docs(libraries-and-tools.md) Add vulcan proxy and kubernetes
2014-06-16 20:05:43 -07:00
0d2512cb99 docs(libraries-and-tools.md) Add vulcan proxy and kubernetes
Both vulcan proxy (vulcand) and Google kubernetes utilize etcd as
as storage engine.
2014-06-16 20:03:47 -07:00
a29f6fb799 docs(configuration): add cluster configuration 2014-06-16 13:58:00 -07:00
fc2afe1ed2 Merge pull request #847 from pwaller/patch-1
docs(production-ready.md): Tiny typo fix
2014-06-13 08:58:33 -07:00
24a442383b docs(production-ready.md): Tiny typo fix 2014-06-13 14:41:23 +01:00
f387bf8464 chore(peer_server): improve log for auto removal 2014-06-12 10:02:56 -07:00
83b06c0715 Merge pull request #841 from andybons/patch-1
Update to Go v1.2.2
2014-06-11 09:43:25 -07:00
75dc10c39d Update to Go v1.2.2 2014-06-09 16:03:01 -04:00
66acf8a4e9 Merge pull request #839 from jonboulle/jonboulle-master
docs(cluster-discovery): fix bad link and confusing port references
2014-06-09 09:47:58 -07:00
1359d29fa4 docs(cluster-discovery): fix bad link and confusing port references 2014-06-08 23:58:14 -07:00
dc1f4adcd0 chore(server): bump to 0.4.3+git 2014-06-07 18:17:54 -07:00
9970141f76 chore(server): bump to 0.4.3 2014-06-07 18:17:05 -07:00
16c2bcf951 chore(server): go fmt
blame me for not running test first.
2014-06-07 18:03:22 -07:00
868b7f7902 Merge pull request #836 from philips/reduce-heartbeat-logs
fix(server): reduce the screaming heartbeat logs
2014-06-07 17:48:22 -07:00
1c958f8fc3 fix(server): reduce the screaming heartbeat logs
Currently the only way we know that a peer isn't getting a heartbeat is
an edge triggered event from go raft on every missed heartbeat. This
means that we need to do some book keeping in order to do exponential
backoff.

The upside is that instead of screaming thousands of log lines before a
machine hits the default removal of 30 minutes it is only ~100.
2014-06-07 17:47:10 -07:00
dfeecd2537 Merge pull request #835 from unihorn/101
chore(server): set DefaultRemoveDelay to 30mins
2014-06-06 17:56:01 -07:00
ed58193ebe chore(server): set DefaultRemoveDelay to 30mins
Its value was 5s before, which could remove the node insanely fast.
2014-06-06 16:57:35 -07:00
79c650d900 Merge pull request #834 from unihorn/100
fix(raft/protobuf): avoid panic on unexcepted data
2014-06-06 15:08:13 -07:00
a451cf2333 fix(raft/protobuf): avoid panic on unexcepted data 2014-06-06 14:34:32 -07:00
3455431da3 Merge pull request #833 from unihorn/99
bump(code.google.com/p/gogoprotobuf): 7fd1620f09
2014-06-06 13:48:47 -07:00
9424a10f49 bump(code.google.com/p/gogoprotobuf): 7fd1620f09 2014-06-06 13:35:59 -07:00
fbcfe8e1c4 Merge pull request #807 from Shopify/raft-server-stats-struct-field-tag-fix
style(server): changed a LeaderInfo struct field from "startTime" to "StartTime"
2014-06-05 12:45:34 -07:00
757bb3af13 Merge pull request #830 from unihorn/98
fix(raft/log): truncate file and reset offset correctly
2014-06-05 12:40:23 -07:00
2cd367e9d9 fix(raft/log): truncate file and reset offset correctly 2014-06-05 12:09:25 -07:00
a974bbfe4f chore(server): bump to 0.4.2+git 2014-06-02 15:26:06 -07:00
673d90728e style(server): changed a LeaderInfo struct field from "startTime" to "StartTime"
Changed the LeaderInfo struct "start time" field from "startTime" to "StartTime" so that it is an exported identifier. This required adding the `json:"startTime"` structure field tag so that the encoding/json package correctly performs JSON encoding (i.e. the correct property name --> startTime).
2014-05-21 11:19:56 -04:00
47 changed files with 1739 additions and 1398 deletions

View File

@ -1,3 +1,14 @@
v0.4.4
* Fix `--no-sync` flag in etcdctl (#83)
* Improved logging for machine removal (#844)
* Various documentation improvements (#858, #851, #847)
v0.4.3
* Avoid panic() on truncated or unexpected log data (#834, #833)
* Fix missing stats field (#807)
* Lengthen default peer removal delay to 30mins (#835)
* Reduce logging on heartbeat timeouts (#836)
v0.4.2 v0.4.2
* Improvements to the clustering documents * Improvements to the clustering documents
* Set content-type properly on errors (#469) * Set content-type properly on errors (#469)

View File

@ -2,7 +2,7 @@ FROM ubuntu:12.04
# Let's install go just like Docker (from source). # Let's install go just like Docker (from source).
RUN apt-get update -q RUN apt-get update -q
RUN DEBIAN_FRONTEND=noninteractive apt-get install -qy build-essential curl git RUN DEBIAN_FRONTEND=noninteractive apt-get install -qy build-essential curl git
RUN curl -s https://go.googlecode.com/files/go1.2.1.src.tar.gz | tar -v -C /usr/local -xz RUN curl -s https://storage.googleapis.com/golang/go1.3.src.tar.gz | tar -v -C /usr/local -xz
RUN cd /usr/local/go/src && ./make.bash --no-clean 2>&1 RUN cd /usr/local/go/src && ./make.bash --no-clean 2>&1
ENV PATH /usr/local/go/bin:$PATH ENV PATH /usr/local/go/bin:$PATH
ADD . /opt/etcd ADD . /opt/etcd

View File

@ -843,13 +843,13 @@ The client is told the write was successful and the keyspace is updated.
Meanwhile F2 has partitioned from the network and will have an out-of-date version of the keyspace until the partition resolves. Meanwhile F2 has partitioned from the network and will have an out-of-date version of the keyspace until the partition resolves.
Since F2 missed the most recent write, a client reading from F2 will have an out-of-date version of the keyspace. Since F2 missed the most recent write, a client reading from F2 will have an out-of-date version of the keyspace.
## Lock Module (*Deprecated*) ## Lock Module (*Deprecated and Removed*)
The lock module is used to serialize access to resources used by clients. The lock module is used to serialize access to resources used by clients.
Multiple clients can attempt to acquire a lock but only one can have it at a time. Multiple clients can attempt to acquire a lock but only one can have it at a time.
Once the lock is released, the next client waiting for the lock will receive it. Once the lock is released, the next client waiting for the lock will receive it.
**Warning:** This module is deprecated at v0.4. See [Modules][modules] for more details. **Warning:** This module is deprecated and removed at v0.4. See [Modules][modules] for more details.
### Acquiring a Lock ### Acquiring a Lock

View File

@ -29,16 +29,16 @@ The v2 API has a lot of features, we will categorize them in a few categories:
### Supported features matrix ### Supported features matrix
| Client| [go-etcd](https://github.com/coreos/go-etcd) | [jetcd](https://github.com/diwakergupta/jetcd) | [python-etcd](https://github.com/jplana/python-etcd) | [python-etcd-client](https://github.com/dsoprea/PythonEtcdClient) | [node-etcd](https://github.com/stianeikeland/node-etcd) | [nodejs-etcd](https://github.com/lavagetto/nodejs-etcd) | [etcd-ruby](https://github.com/ranjib/etcd-ruby) | [etcd-api](https://github.com/jdarcy/etcd-api) | [cetcd](https://github.com/dwwoelfel/cetcd) | [clj-etcd](https://github.com/rthomas/clj-etcd) | [etcetera](https://github.com/drusellers/etcetera)| [Etcd.jl](https://github.com/forio/Etcd.jl) | | Client| [go-etcd](https://github.com/coreos/go-etcd) | [jetcd](https://github.com/diwakergupta/jetcd) | [python-etcd](https://github.com/jplana/python-etcd) | [python-etcd-client](https://github.com/dsoprea/PythonEtcdClient) | [node-etcd](https://github.com/stianeikeland/node-etcd) | [nodejs-etcd](https://github.com/lavagetto/nodejs-etcd) | [etcd-ruby](https://github.com/ranjib/etcd-ruby) | [etcd-api](https://github.com/jdarcy/etcd-api) | [cetcd](https://github.com/dwwoelfel/cetcd) | [clj-etcd](https://github.com/rthomas/clj-etcd) | [etcetera](https://github.com/drusellers/etcetera)| [Etcd.jl](https://github.com/forio/Etcd.jl) | [p5-etcd](https://metacpan.org/release/Etcd)
| --- | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | | --- | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |
| **HTTPS Auth** | Y | Y | Y | Y | Y | Y | - | - | - | - | - | - | | **HTTPS Auth** | Y | Y | Y | Y | Y | Y | - | - | - | - | - | - | - |
| **Reconnect** | Y | - | Y | Y | - | - | - | Y | - | - | - | - | | **Reconnect** | Y | - | Y | Y | - | - | - | Y | - | - | - | - | - |
| **Mod/Lock** | - | - | Y | Y | - | - | - | - | - | - | - | Y | | **Mod/Lock** | - | - | Y | Y | - | - | - | - | - | - | - | Y | - |
| **Mod/Leader** | - | - | - | Y | - | - | - | - | - | - | - | Y | | **Mod/Leader** | - | - | - | Y | - | - | - | - | - | - | - | Y | - |
| **GET Features** | F | B | F | F | F | F | F | B | F | G | F | F | | **GET Features** | F | B | F | F | F | F | F | B | F | G | F | F | F |
| **PUT Features** | F | B | F | F | F | F | F | G | F | G | F | F | | **PUT Features** | F | B | F | F | F | F | F | G | F | G | F | F | F |
| **POST Features** | F | - | F | F | - | F | F | - | - | - | F | F | | **POST Features** | F | - | F | F | - | F | F | - | - | - | F | F | F |
| **DEL Features** | F | B | F | F | F | F | F | B | G | B | F | F | | **DEL Features** | F | B | F | F | F | F | F | B | G | B | F | F | F |
**Legend** **Legend**

View File

@ -8,6 +8,7 @@ For more information on how etcd can locate the cluster, see the [finding the cl
Please note - at least 3 nodes are required for [cluster availability][optimal-cluster-size]. Please note - at least 3 nodes are required for [cluster availability][optimal-cluster-size].
[cluster-finding]: https://github.com/coreos/etcd/blob/master/Documentation/design/cluster-finding.md
[optimal-cluster-size]: https://github.com/coreos/etcd/blob/master/Documentation/optimal-cluster-size.md [optimal-cluster-size]: https://github.com/coreos/etcd/blob/master/Documentation/optimal-cluster-size.md
## Using discovery.etcd.io ## Using discovery.etcd.io
@ -27,8 +28,8 @@ Here's a full example:
``` ```
TOKEN=$(curl https://discovery.etcd.io/new) TOKEN=$(curl https://discovery.etcd.io/new)
./etcd -name instance1 -peer-addr 10.1.2.3:7001 -addr 10.1.2.3:4001 -discovery $TOKEN ./etcd -name instance1 -peer-addr 10.1.2.3:7001 -addr 10.1.2.3:4001 -discovery $TOKEN
./etcd -name instance2 -peer-addr 10.1.2.4:7002 -addr 10.1.2.4:4002 -discovery $TOKEN ./etcd -name instance2 -peer-addr 10.1.2.4:7001 -addr 10.1.2.4:4001 -discovery $TOKEN
./etcd -name instance3 -peer-addr 10.1.2.5:7002 -addr 10.1.2.5:4002 -discovery $TOKEN ./etcd -name instance3 -peer-addr 10.1.2.5:7001 -addr 10.1.2.5:4001 -discovery $TOKEN
``` ```
## Running Your Own Discovery Endpoint ## Running Your Own Discovery Endpoint
@ -38,8 +39,8 @@ The discovery API communicates with a separate etcd cluster to store and retriev
``` ```
TOKEN="testcluster" TOKEN="testcluster"
./etcd -name instance1 -peer-addr 10.1.2.3:7001 -addr 10.1.2.3:4001 -discovery http://10.10.10.10:4001/v2/keys/$TOKEN ./etcd -name instance1 -peer-addr 10.1.2.3:7001 -addr 10.1.2.3:4001 -discovery http://10.10.10.10:4001/v2/keys/$TOKEN
./etcd -name instance2 -peer-addr 10.1.2.4:7002 -addr 10.1.2.4:4002 -discovery http://10.10.10.10:4001/v2/keys/$TOKEN ./etcd -name instance2 -peer-addr 10.1.2.4:7001 -addr 10.1.2.4:4001 -discovery http://10.10.10.10:4001/v2/keys/$TOKEN
./etcd -name instance3 -peer-addr 10.1.2.5:7002 -addr 10.1.2.5:4002 -discovery http://10.10.10.10:4001/v2/keys/$TOKEN ./etcd -name instance3 -peer-addr 10.1.2.5:7001 -addr 10.1.2.5:4001 -discovery http://10.10.10.10:4001/v2/keys/$TOKEN
``` ```
If you're interested in how to discovery API works behind the scenes, read about the [Discovery Protocol](https://github.com/coreos/etcd/blob/master/Documentation/discovery-protocol.md). If you're interested in how to discovery API works behind the scenes, read about the [Discovery Protocol](https://github.com/coreos/etcd/blob/master/Documentation/discovery-protocol.md).
@ -52,8 +53,6 @@ The Discovery API submits the `-peer-addr` of each etcd instance to the configur
The discovery API will automatically clean up the address of a stale peer that is no longer part of the cluster. The TTL for this process is a week, which should be long enough to handle any extremely long outage you may encounter. There is no harm in having stale peers in the list until they are cleaned up, since an etcd instance only needs to connect to one valid peer in the cluster to join. The discovery API will automatically clean up the address of a stale peer that is no longer part of the cluster. The TTL for this process is a week, which should be long enough to handle any extremely long outage you may encounter. There is no harm in having stale peers in the list until they are cleaned up, since an etcd instance only needs to connect to one valid peer in the cluster to join.
[discovery-design]: https://github.com/coreos/etcd/blob/master/Documentation/design/cluster-finding.md
## Lifetime of a Discovery URL ## Lifetime of a Discovery URL
A discovery URL identifies a single etcd cluster. Do not re-use discovery URLs for new clusters. A discovery URL identifies a single etcd cluster. Do not re-use discovery URLs for new clusters.

View File

@ -1,6 +1,8 @@
# Etcd Configuration # Etcd Configuration
Configuration options can be set in three places: ## Node Configuration
Individual node configuration options can be set in three places:
1. Command line flags 1. Command line flags
2. Environment variables 2. Environment variables
@ -10,6 +12,16 @@ Options set on the command line take precedence over all other sources.
Options set in environment variables take precedence over options set in Options set in environment variables take precedence over options set in
configuration files. configuration files.
## Cluster Configuration
Cluster-wide settings are configured via the `/config` admin endpoint and additionally in the configuration file. Values contained in the configuration file will seed the cluster setting with the provided value. After the cluster is running, only the admin endpoint is used.
The full documentation is contained in the [API docs](https://github.com/coreos/etcd/blob/master/Documentation/api.md#cluster-config).
* `activeSize` - the maximum number of peers that can participate in the consensus protocol. Other peers will join as standbys.
* `removeDelay` - the minimum time in seconds that a machine has been observed to be unresponsive before it is removed from the cluster.
* `syncInterval` - the amount of time in seconds between cluster sync when it runs in standby mode.
## Command Line Flags ## Command Line Flags
### Required ### Required
@ -41,6 +53,9 @@ configuration files.
* `-peer-election-timeout` - The number of milliseconds to wait before the leader is declared unhealthy. * `-peer-election-timeout` - The number of milliseconds to wait before the leader is declared unhealthy.
* `-peer-heartbeat-interval` - The number of milliseconds in between heartbeat requests * `-peer-heartbeat-interval` - The number of milliseconds in between heartbeat requests
* `-snapshot=false` - Disable log snapshots. Defaults to `true`. * `-snapshot=false` - Disable log snapshots. Defaults to `true`.
* `-cluster-active-size` - The expected number of instances participating in the consensus protocol. Only applied if the etcd instance is the first peer in the cluster.
* `-cluster-remove-delay` - The delay before one node is removed from the cluster since it cannot be connected at all. Only applied if the etcd instance is the first peer in the cluster.
* `-cluster-sync-interval` - The interval between synchronization for standby-mode instance with the cluster. Only applied if the etcd instance is the first peer in the cluster.
* `-v` - Enable verbose logging. Defaults to `false`. * `-v` - Enable verbose logging. Defaults to `false`.
* `-vv` - Enable very verbose logging. Defaults to `false`. * `-vv` - Enable very verbose logging. Defaults to `false`.
* `-version` - Print the version and exit. * `-version` - Print the version and exit.
@ -76,6 +91,11 @@ bind_addr = "127.0.0.1:7001"
ca_file = "" ca_file = ""
cert_file = "" cert_file = ""
key_file = "" key_file = ""
[cluster]
active_size = 9
remove_delay = 1800.0
sync_interval = 5.0
``` ```
## Environment Variables ## Environment Variables
@ -105,3 +125,6 @@ key_file = ""
* `ETCD_PEER_CERT_FILE` * `ETCD_PEER_CERT_FILE`
* `ETCD_PEER_KEY_FILE` * `ETCD_PEER_KEY_FILE`
* `ETCD_PEER_ELECTION_TIMEOUT` * `ETCD_PEER_ELECTION_TIMEOUT`
* `ETCD_CLUSTER_ACTIVE_SIZE`
* `ETCD_CLUSTER_REMOVE_DELAY`
* `ETCD_CLUSTER_SYNC_INTERVAL`

View File

@ -18,8 +18,8 @@ If there are not enough peers to meet the active size, standbys will send join r
If there are more peers than the target active size then peers are removed by the leader and will become standbys. If there are more peers than the target active size then peers are removed by the leader and will become standbys.
The remove delay specifies how long the cluster should wait before removing a dead peer. The remove delay specifies how long the cluster should wait before removing a dead peer.
By default this is 5 seconds. By default this is 30 minutes.
If a peer is inactive for 5 seconds then the peer is removed. If a peer is inactive for 30 minutes then the peer is removed.
The standby sync interval specifies the synchronization interval of standbys with the cluster. The standby sync interval specifies the synchronization interval of standbys with the cluster.
By default this is 5 seconds. By default this is 5 seconds.

View File

@ -90,3 +90,6 @@ A detailed recap of client functionalities can be found in the [clients compatib
- [configdb](https://git.autistici.org/ai/configdb/tree/master) - A REST relational abstraction on top of arbitrary database backends, aimed at storing configs and inventories. - [configdb](https://git.autistici.org/ai/configdb/tree/master) - A REST relational abstraction on top of arbitrary database backends, aimed at storing configs and inventories.
- [scrz](https://github.com/scrz/scrz) - Container manager, stores configuration in etcd. - [scrz](https://github.com/scrz/scrz) - Container manager, stores configuration in etcd.
- [fleet](https://github.com/coreos/fleet) - Distributed init system - [fleet](https://github.com/coreos/fleet) - Distributed init system
- [GoogleCloudPlatform/kubernetes](https://github.com/GoogleCloudPlatform/kubernetes) - Container cluster manager.
- [mailgun/vulcand](https://github.com/mailgun/vulcand) - HTTP proxy that uses etcd as a configuration backend.
- [duedil-ltd/discodns](https://github.com/duedil-ltd/discodns) - Simple DNS nameserver using etcd as a database for names and records.

View File

@ -1,7 +1,7 @@
## Modules ## Modules
etcd has a number of modules that are built on top of the core etcd API. etcd has a number of modules that are built on top of the core etcd API.
These modules provide things like dashboards, locks and leader election. These modules provide things like dashboards, locks and leader election (removed).
**Warning**: Modules are deprecated from v0.4 until we have a solid base we can apply them back onto. **Warning**: Modules are deprecated from v0.4 until we have a solid base we can apply them back onto.
For now, we are choosing to focus on raft algorithm and core etcd to make sure that it works correctly and fast. For now, we are choosing to focus on raft algorithm and core etcd to make sure that it works correctly and fast.
@ -81,7 +81,7 @@ curl -X DELETE http://127.0.0.1:4001/mod/v2/lock/customer1?value=bar
``` ```
### Leader Election ### Leader Election (Deprecated and Removed in 0.4)
The Leader Election module wraps the Lock module to allow clients to come to consensus on a single value. The Leader Election module wraps the Lock module to allow clients to come to consensus on a single value.
This is useful when you want one server to process at a time but allow other servers to fail over. This is useful when you want one server to process at a time but allow other servers to fail over.

View File

@ -1,4 +1,4 @@
ectd is being used successfully by many companies in production. It is, etcd is being used successfully by many companies in production. It is,
however, under active development and systems like etcd are difficult to get however, under active development and systems like etcd are difficult to get
correct. If you are comfortable with bleeding-edge software please use etcd and correct. If you are comfortable with bleeding-edge software please use etcd and
provide us with the feedback and testing young software needs. provide us with the feedback and testing young software needs.

View File

@ -1,6 +1,6 @@
# etcd # etcd
README version 0.4.2 README version 0.4.4
A highly-available key value store for shared configuration and service discovery. A highly-available key value store for shared configuration and service discovery.
etcd is inspired by [Apache ZooKeeper][zookeeper] and [doozer][doozer], with a focus on being: etcd is inspired by [Apache ZooKeeper][zookeeper] and [doozer][doozer], with a focus on being:
@ -95,7 +95,7 @@ You have successfully started an etcd on a single machine and written a key to t
## Contact ## Contact
- Mailing list: [etcd-dev](https://groups.google.com/forum/?hl=en#!forum/etcd-dev) - Mailing list: [etcd-dev](https://groups.google.com/forum/?hl=en#!forum/etcd-dev)
- IRC: #[coreos](irc://irc.freenode.org:6667/#coreos) oon freenode.org - IRC: #[coreos](irc://irc.freenode.org:6667/#coreos) on freenode.org
- Planning/Roadmap: [milestones](https://github.com/coreos/etcd/issues/milestones) - Planning/Roadmap: [milestones](https://github.com/coreos/etcd/issues/milestones)
- Bugs: [issues](https://github.com/coreos/etcd/issues) - Bugs: [issues](https://github.com/coreos/etcd/issues)

View File

@ -1,85 +0,0 @@
package leader
import (
"fmt"
"testing"
"time"
"github.com/coreos/etcd/server"
"github.com/coreos/etcd/tests"
"github.com/coreos/etcd/third_party/github.com/stretchr/testify/assert"
)
// Ensure that a leader can be set and read.
func TestModLeaderSet(t *testing.T) {
tests.RunServer(func(s *server.Server) {
// Set leader.
body, status, err := testSetLeader(s, "foo", "xxx", 10)
assert.NoError(t, err)
assert.Equal(t, status, 200)
assert.Equal(t, body, "2")
// Check that the leader is set.
body, status, err = testGetLeader(s, "foo")
assert.NoError(t, err)
assert.Equal(t, status, 200)
assert.Equal(t, body, "xxx")
// Delete leader.
body, status, err = testDeleteLeader(s, "foo", "xxx")
assert.NoError(t, err)
assert.Equal(t, status, 200)
assert.Equal(t, body, "")
// Check that the leader is removed.
body, status, err = testGetLeader(s, "foo")
assert.NoError(t, err)
assert.Equal(t, status, 200)
assert.Equal(t, body, "")
})
}
// Ensure that a leader can be renewed.
func TestModLeaderRenew(t *testing.T) {
tests.RunServer(func(s *server.Server) {
// Set leader.
body, status, err := testSetLeader(s, "foo", "xxx", 2)
assert.NoError(t, err)
assert.Equal(t, status, 200)
assert.Equal(t, body, "2")
time.Sleep(1 * time.Second)
// Renew leader.
body, status, err = testSetLeader(s, "foo", "xxx", 3)
assert.NoError(t, err)
assert.Equal(t, status, 200)
assert.Equal(t, body, "2")
time.Sleep(2 * time.Second)
// Check that the leader is set.
body, status, err = testGetLeader(s, "foo")
assert.NoError(t, err)
assert.Equal(t, status, 200)
assert.Equal(t, body, "xxx")
})
}
func testSetLeader(s *server.Server, key string, name string, ttl int) (string, int, error) {
resp, err := tests.PutForm(fmt.Sprintf("%s/mod/v2/leader/%s?name=%s&ttl=%d", s.URL(), key, name, ttl), nil)
ret := tests.ReadBody(resp)
return string(ret), resp.StatusCode, err
}
func testGetLeader(s *server.Server, key string) (string, int, error) {
resp, err := tests.Get(fmt.Sprintf("%s/mod/v2/leader/%s", s.URL(), key))
ret := tests.ReadBody(resp)
return string(ret), resp.StatusCode, err
}
func testDeleteLeader(s *server.Server, key string, name string) (string, int, error) {
resp, err := tests.DeleteForm(fmt.Sprintf("%s/mod/v2/leader/%s?name=%s", s.URL(), key, name), nil)
ret := tests.ReadBody(resp)
return string(ret), resp.StatusCode, err
}

View File

@ -12,7 +12,7 @@ const (
MinActiveSize = 3 MinActiveSize = 3
// DefaultRemoveDelay is the default elapsed time before removal. // DefaultRemoveDelay is the default elapsed time before removal.
DefaultRemoveDelay = float64((5 * time.Second) / time.Second) DefaultRemoveDelay = float64((30 * time.Minute) / time.Second)
// MinRemoveDelay is the minimum remove delay allowed. // MinRemoveDelay is the minimum remove delay allowed.
MinRemoveDelay = float64((2 * time.Second) / time.Second) MinRemoveDelay = float64((2 * time.Second) / time.Second)
@ -25,7 +25,6 @@ const (
) )
// ClusterConfig represents cluster-wide configuration settings. // ClusterConfig represents cluster-wide configuration settings.
// These settings can only be changed through Raft.
type ClusterConfig struct { type ClusterConfig struct {
// ActiveSize is the maximum number of node that can join as Raft followers. // ActiveSize is the maximum number of node that can join as Raft followers.
// Nodes that join the cluster after the limit is reached are standbys. // Nodes that join the cluster after the limit is reached are standbys.

View File

@ -23,6 +23,10 @@ import (
) )
const ( const (
// MaxHeartbeatTimeoutBackoff is the maximum number of seconds before we warn
// the user again about a peer not accepting heartbeats.
MaxHeartbeatTimeoutBackoff = 15 * time.Second
// ThresholdMonitorTimeout is the time between log notifications that the // ThresholdMonitorTimeout is the time between log notifications that the
// Raft heartbeat is too close to the election timeout. // Raft heartbeat is too close to the election timeout.
ThresholdMonitorTimeout = 5 * time.Second ThresholdMonitorTimeout = 5 * time.Second
@ -70,10 +74,18 @@ type PeerServer struct {
routineGroup sync.WaitGroup routineGroup sync.WaitGroup
timeoutThresholdChan chan interface{} timeoutThresholdChan chan interface{}
logBackoffs map[string]*logBackoff
metrics *metrics.Bucket metrics *metrics.Bucket
sync.Mutex sync.Mutex
} }
type logBackoff struct {
next time.Time
backoff time.Duration
count int
}
// TODO: find a good policy to do snapshot // TODO: find a good policy to do snapshot
type snapshotConf struct { type snapshotConf struct {
// Etcd will check if snapshot is need every checkingInterval // Etcd will check if snapshot is need every checkingInterval
@ -97,6 +109,7 @@ func NewPeerServer(psConfig PeerServerConfig, client *Client, registry *Registry
serverStats: serverStats, serverStats: serverStats,
timeoutThresholdChan: make(chan interface{}, 1), timeoutThresholdChan: make(chan interface{}, 1),
logBackoffs: make(map[string]*logBackoff),
metrics: mb, metrics: mb,
} }
@ -627,7 +640,7 @@ func (s *PeerServer) joinByPeer(server raft.Server, peer string, scheme string)
} }
func (s *PeerServer) Stats() []byte { func (s *PeerServer) Stats() []byte {
s.serverStats.LeaderInfo.Uptime = time.Now().Sub(s.serverStats.LeaderInfo.startTime).String() s.serverStats.LeaderInfo.Uptime = time.Now().Sub(s.serverStats.LeaderInfo.StartTime).String()
// TODO: register state listener to raft to change this field // TODO: register state listener to raft to change this field
// rather than compare the state each time Stats() is called. // rather than compare the state each time Stats() is called.
@ -687,11 +700,12 @@ func (s *PeerServer) raftEventLogger(event raft.Event) {
case raft.RemovePeerEventType: case raft.RemovePeerEventType:
log.Infof("%s: peer removed: '%v'", s.Config.Name, value) log.Infof("%s: peer removed: '%v'", s.Config.Name, value)
case raft.HeartbeatIntervalEventType: case raft.HeartbeatIntervalEventType:
var name = "<unknown>" peer, ok := value.(*raft.Peer)
if peer, ok := value.(*raft.Peer); ok { if !ok {
name = peer.Name log.Warnf("%s: heatbeat timeout from unknown peer", s.Config.Name)
return
} }
log.Infof("%s: warning: heartbeat timed out: '%v'", s.Config.Name, name) s.logHeartbeatTimeout(peer)
case raft.ElectionTimeoutThresholdEventType: case raft.ElectionTimeoutThresholdEventType:
select { select {
case s.timeoutThresholdChan <- value: case s.timeoutThresholdChan <- value:
@ -701,6 +715,35 @@ func (s *PeerServer) raftEventLogger(event raft.Event) {
} }
} }
// logHeartbeatTimeout logs about the edge triggered heartbeat timeout event
// only if we haven't warned within a reasonable interval.
func (s *PeerServer) logHeartbeatTimeout(peer *raft.Peer) {
b, ok := s.logBackoffs[peer.Name]
if !ok {
b = &logBackoff{time.Time{}, time.Second, 1}
s.logBackoffs[peer.Name] = b
}
if peer.LastActivity().After(b.next) {
b.next = time.Time{}
b.backoff = time.Second
b.count = 1
}
if b.next.After(time.Now()) {
b.count++
return
}
b.backoff = 2 * b.backoff
if b.backoff > MaxHeartbeatTimeoutBackoff {
b.backoff = MaxHeartbeatTimeoutBackoff
}
b.next = time.Now().Add(b.backoff)
log.Infof("%s: warning: heartbeat time out peer=%q missed=%d backoff=%q", s.Config.Name, peer.Name, b.count, b.backoff)
}
func (s *PeerServer) recordMetricEvent(event raft.Event) { func (s *PeerServer) recordMetricEvent(event raft.Event) {
name := fmt.Sprintf("raft.event.%s", event.Type()) name := fmt.Sprintf("raft.event.%s", event.Type())
value := event.Value().(time.Duration) value := event.Value().(time.Duration)
@ -810,7 +853,7 @@ func (s *PeerServer) monitorActiveSize() {
// If we have more active nodes than we should then remove. // If we have more active nodes than we should then remove.
if peerCount > activeSize { if peerCount > activeSize {
peer := peers[rand.Intn(len(peers))] peer := peers[rand.Intn(len(peers))]
log.Infof("%s: removing: %v", s.Config.Name, peer) log.Infof("%s: removing node: %v; peer number %d > expected size %d", s.Config.Name, peer, peerCount, activeSize)
if _, err := s.raftServer.Do(&RemoveCommand{Name: peer}); err != nil { if _, err := s.raftServer.Do(&RemoveCommand{Name: peer}); err != nil {
log.Infof("%s: warning: remove error: %v", s.Config.Name, err) log.Infof("%s: warning: remove error: %v", s.Config.Name, err)
} }

View File

@ -188,6 +188,7 @@ func (ps *PeerServer) RemoveHttpHandler(w http.ResponseWriter, req *http.Request
// Returns a JSON-encoded cluster configuration. // Returns a JSON-encoded cluster configuration.
func (ps *PeerServer) getClusterConfigHttpHandler(w http.ResponseWriter, req *http.Request) { func (ps *PeerServer) getClusterConfigHttpHandler(w http.ResponseWriter, req *http.Request) {
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(ps.ClusterConfig()) json.NewEncoder(w).Encode(ps.ClusterConfig())
} }
@ -217,6 +218,7 @@ func (ps *PeerServer) setClusterConfigHttpHandler(w http.ResponseWriter, req *ht
log.Debugf("[recv] Update Cluster Config Request") log.Debugf("[recv] Update Cluster Config Request")
ps.server.Dispatch(c, w, req) ps.server.Dispatch(c, w, req)
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(ps.ClusterConfig()) json.NewEncoder(w).Encode(ps.ClusterConfig())
} }
@ -230,6 +232,7 @@ func (ps *PeerServer) getMachinesHttpHandler(w http.ResponseWriter, req *http.Re
} }
} }
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(&machines) json.NewEncoder(w).Encode(&machines)
} }
@ -237,6 +240,7 @@ func (ps *PeerServer) getMachinesHttpHandler(w http.ResponseWriter, req *http.Re
func (ps *PeerServer) getMachineHttpHandler(w http.ResponseWriter, req *http.Request) { func (ps *PeerServer) getMachineHttpHandler(w http.ResponseWriter, req *http.Request) {
vars := mux.Vars(req) vars := mux.Vars(req)
m := ps.getMachineMessage(vars["name"], ps.raftServer.Leader()) m := ps.getMachineMessage(vars["name"], ps.raftServer.Leader())
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(m) json.NewEncoder(w).Encode(m)
} }

View File

@ -15,7 +15,7 @@ type raftServerStats struct {
LeaderInfo struct { LeaderInfo struct {
Name string `json:"leader"` Name string `json:"leader"`
Uptime string `json:"uptime"` Uptime string `json:"uptime"`
startTime time.Time StartTime time.Time `json:"startTime"`
} `json:"leaderInfo"` } `json:"leaderInfo"`
RecvAppendRequestCnt uint64 `json:"recvAppendRequestCnt,"` RecvAppendRequestCnt uint64 `json:"recvAppendRequestCnt,"`
@ -43,7 +43,7 @@ func NewRaftServerStats(name string) *raftServerStats {
back: -1, back: -1,
}, },
} }
stats.LeaderInfo.startTime = time.Now() stats.LeaderInfo.StartTime = time.Now()
return stats return stats
} }
@ -54,7 +54,7 @@ func (ss *raftServerStats) RecvAppendReq(leaderName string, pkgSize int) {
ss.State = raft.Follower ss.State = raft.Follower
if leaderName != ss.LeaderInfo.Name { if leaderName != ss.LeaderInfo.Name {
ss.LeaderInfo.Name = leaderName ss.LeaderInfo.Name = leaderName
ss.LeaderInfo.startTime = time.Now() ss.LeaderInfo.StartTime = time.Now()
} }
ss.recvRateQueue.Insert(NewPackageStats(time.Now(), pkgSize)) ss.recvRateQueue.Insert(NewPackageStats(time.Now(), pkgSize))
@ -70,7 +70,7 @@ func (ss *raftServerStats) SendAppendReq(pkgSize int) {
if ss.State != raft.Leader { if ss.State != raft.Leader {
ss.State = raft.Leader ss.State = raft.Leader
ss.LeaderInfo.Name = ss.Name ss.LeaderInfo.Name = ss.Name
ss.LeaderInfo.startTime = now ss.LeaderInfo.StartTime = now
} }
ss.sendRateQueue.Insert(NewPackageStats(now, pkgSize)) ss.sendRateQueue.Insert(NewPackageStats(now, pkgSize))

View File

@ -1,3 +1,3 @@
package server package server
const ReleaseVersion = "0.4.2" const ReleaseVersion = "0.4.4"

View File

@ -56,6 +56,9 @@ Other Options:
-max-cluster-size Maximum number of nodes in the cluster. -max-cluster-size Maximum number of nodes in the cluster.
-snapshot=false Disable log snapshots -snapshot=false Disable log snapshots
-snapshot-count Number of transactions before issuing a snapshot. -snapshot-count Number of transactions before issuing a snapshot.
-cluster-active-size Number of active nodes in the cluster.
-cluster-remove-delay Seconds before one node is removed.
-cluster-sync-interval Seconds between synchronizations for standby mode.
` `
// Usage returns the usage message for etcd. // Usage returns the usage message for etcd.

View File

@ -25,6 +25,7 @@ func TestClusterConfigSet(t *testing.T) {
resp, _ = tests.Get("http://localhost:7002/v2/admin/config") resp, _ = tests.Get("http://localhost:7002/v2/admin/config")
body := tests.ReadBodyJSON(resp) body := tests.ReadBodyJSON(resp)
assert.Equal(t, resp.StatusCode, 200) assert.Equal(t, resp.StatusCode, 200)
assert.Equal(t, resp.Header.Get("Content-Type"), "application/json")
assert.Equal(t, body["activeSize"], 3) assert.Equal(t, body["activeSize"], 3)
assert.Equal(t, body["removeDelay"], 60) assert.Equal(t, body["removeDelay"], 60)
} }
@ -44,6 +45,7 @@ func TestClusterConfigReload(t *testing.T) {
resp, _ = tests.Get("http://localhost:7002/v2/admin/config") resp, _ = tests.Get("http://localhost:7002/v2/admin/config")
body := tests.ReadBodyJSON(resp) body := tests.ReadBodyJSON(resp)
assert.Equal(t, resp.StatusCode, 200) assert.Equal(t, resp.StatusCode, 200)
assert.Equal(t, resp.Header.Get("Content-Type"), "application/json")
assert.Equal(t, body["activeSize"], 3) assert.Equal(t, body["activeSize"], 3)
assert.Equal(t, body["removeDelay"], 60) assert.Equal(t, body["removeDelay"], 60)
@ -59,6 +61,7 @@ func TestClusterConfigReload(t *testing.T) {
resp, _ = tests.Get("http://localhost:7002/v2/admin/config") resp, _ = tests.Get("http://localhost:7002/v2/admin/config")
body = tests.ReadBodyJSON(resp) body = tests.ReadBodyJSON(resp)
assert.Equal(t, resp.StatusCode, 200) assert.Equal(t, resp.StatusCode, 200)
assert.Equal(t, resp.Header.Get("Content-Type"), "application/json")
assert.Equal(t, body["activeSize"], 3) assert.Equal(t, body["activeSize"], 3)
assert.Equal(t, body["removeDelay"], 60) assert.Equal(t, body["removeDelay"], 60)
} }
@ -76,6 +79,7 @@ func TestGetMachines(t *testing.T) {
t.FailNow() t.FailNow()
} }
assert.Equal(t, resp.StatusCode, 200) assert.Equal(t, resp.StatusCode, 200)
assert.Equal(t, resp.Header.Get("Content-Type"), "application/json")
machines := make([]map[string]interface{}, 0) machines := make([]map[string]interface{}, 0)
b := tests.ReadBody(resp) b := tests.ReadBody(resp)
json.Unmarshal(b, &machines) json.Unmarshal(b, &machines)

View File

@ -83,9 +83,14 @@ func mergeStruct(out, in reflect.Value) {
mergeAny(out.Field(i), in.Field(i)) mergeAny(out.Field(i), in.Field(i))
} }
if emIn, ok := in.Addr().Interface().(extendableProto); ok { if emIn, ok := in.Addr().Interface().(extensionsMap); ok {
emOut := out.Addr().Interface().(extendableProto) emOut := out.Addr().Interface().(extensionsMap)
mergeExtension(emOut.ExtensionMap(), emIn.ExtensionMap()) mergeExtension(emOut.ExtensionMap(), emIn.ExtensionMap())
} else if emIn, ok := in.Addr().Interface().(extensionsBytes); ok {
emOut := out.Addr().Interface().(extensionsBytes)
bIn := emIn.GetExtensions()
bOut := emOut.GetExtensions()
*bOut = append(*bOut, *bIn...)
} }
uf := in.FieldByName("XXX_unrecognized") uf := in.FieldByName("XXX_unrecognized")

View File

@ -235,12 +235,6 @@ func (o *Buffer) skipAndSave(t reflect.Type, tag, wire int, base structPointer,
ptr := structPointer_Bytes(base, unrecField) ptr := structPointer_Bytes(base, unrecField)
if *ptr == nil {
// This is the first skipped element,
// allocate a new buffer.
*ptr = o.bufalloc()
}
// Add the skipped field to struct field // Add the skipped field to struct field
obuf := o.buf obuf := o.buf
@ -381,9 +375,14 @@ func (o *Buffer) unmarshalType(st reflect.Type, prop *StructProperties, is_group
if prop.extendable { if prop.extendable {
if e := structPointer_Interface(base, st).(extendableProto); isExtensionField(e, int32(tag)) { if e := structPointer_Interface(base, st).(extendableProto); isExtensionField(e, int32(tag)) {
if err = o.skip(st, tag, wire); err == nil { if err = o.skip(st, tag, wire); err == nil {
ext := e.ExtensionMap()[int32(tag)] // may be missing if ee, ok := e.(extensionsMap); ok {
ext := ee.ExtensionMap()[int32(tag)] // may be missing
ext.enc = append(ext.enc, o.buf[oi:o.index]...) ext.enc = append(ext.enc, o.buf[oi:o.index]...)
e.ExtensionMap()[int32(tag)] = ext ee.ExtensionMap()[int32(tag)] = ext
} else if ee, ok := e.(extensionsBytes); ok {
ext := ee.GetExtensions()
*ext = append(*ext, o.buf[oi:o.index]...)
}
} }
continue continue
} }

View File

@ -221,6 +221,10 @@ func Marshal(pb Message) ([]byte, error) {
if err != nil && !state.shouldContinue(err, nil) { if err != nil && !state.shouldContinue(err, nil) {
return nil, err return nil, err
} }
if p.buf == nil && err == nil {
// Return a non-nil slice on success.
return []byte{}, nil
}
return p.buf, err return p.buf, err
} }
@ -400,23 +404,8 @@ func (o *Buffer) enc_struct_message(p *Properties, base structPointer) error {
return nil return nil
} }
// need the length before we can write out the message itself,
// so marshal into a separate byte buffer first.
obuf := o.buf
o.buf = o.bufalloc()
err := o.enc_struct(p.stype, p.sprop, structp)
nbuf := o.buf
o.buf = obuf
if err != nil && !state.shouldContinue(err, nil) {
o.buffree(nbuf)
return err
}
o.buf = append(o.buf, p.tagcode...) o.buf = append(o.buf, p.tagcode...)
o.EncodeRawBytes(nbuf) return o.enc_len_struct(p.stype, p.sprop, structp, &state)
o.buffree(nbuf)
return state.err
} }
func size_struct_message(p *Properties, base structPointer) int { func size_struct_message(p *Properties, base structPointer) int {
@ -748,24 +737,14 @@ func (o *Buffer) enc_slice_struct_message(p *Properties, base structPointer) err
continue continue
} }
obuf := o.buf o.buf = append(o.buf, p.tagcode...)
o.buf = o.bufalloc() err := o.enc_len_struct(p.stype, p.sprop, structp, &state)
err := o.enc_struct(p.stype, p.sprop, structp)
nbuf := o.buf
o.buf = obuf
if err != nil && !state.shouldContinue(err, nil) { if err != nil && !state.shouldContinue(err, nil) {
o.buffree(nbuf)
if err == ErrNil { if err == ErrNil {
return ErrRepeatedHasNil return ErrRepeatedHasNil
} }
return err return err
} }
o.buf = append(o.buf, p.tagcode...)
o.EncodeRawBytes(nbuf)
o.buffree(nbuf)
} }
return state.err return state.err
} }
@ -923,6 +902,36 @@ func size_struct(t reflect.Type, prop *StructProperties, base structPointer) (n
return return
} }
var zeroes [20]byte // longer than any conceivable sizeVarint
// Encode a struct, preceded by its encoded length (as a varint).
func (o *Buffer) enc_len_struct(t reflect.Type, prop *StructProperties, base structPointer, state *errorState) error {
iLen := len(o.buf)
o.buf = append(o.buf, 0, 0, 0, 0) // reserve four bytes for length
iMsg := len(o.buf)
err := o.enc_struct(t, prop, base)
if err != nil && !state.shouldContinue(err, nil) {
return err
}
lMsg := len(o.buf) - iMsg
lLen := sizeVarint(uint64(lMsg))
switch x := lLen - (iMsg - iLen); {
case x > 0: // actual length is x bytes larger than the space we reserved
// Move msg x bytes right.
o.buf = append(o.buf, zeroes[:x]...)
copy(o.buf[iMsg+x:], o.buf[iMsg:iMsg+lMsg])
case x < 0: // actual length is x bytes smaller than the space we reserved
// Move msg x bytes left.
copy(o.buf[iMsg+x:], o.buf[iMsg:iMsg+lMsg])
o.buf = o.buf[:len(o.buf)+x] // x is negative
}
// Encode the length in the reserved space.
o.buf = o.buf[:iLen]
o.EncodeVarint(uint64(lMsg))
o.buf = o.buf[:len(o.buf)+lMsg]
return state.err
}
// errorState maintains the first error that occurs and updates that error // errorState maintains the first error that occurs and updates that error
// with additional context. // with additional context.
type errorState struct { type errorState struct {

View File

@ -44,6 +44,24 @@ type Sizer interface {
Size() int Size() int
} }
func (o *Buffer) enc_ext_slice_byte(p *Properties, base structPointer) error {
s := *structPointer_Bytes(base, p.field)
if s == nil {
return ErrNil
}
o.buf = append(o.buf, s...)
return nil
}
func size_ext_slice_byte(p *Properties, base structPointer) (n int) {
s := *structPointer_Bytes(base, p.field)
if s == nil {
return 0
}
n += len(s)
return
}
// Encode a reference to bool pointer. // Encode a reference to bool pointer.
func (o *Buffer) enc_ref_bool(p *Properties, base structPointer) error { func (o *Buffer) enc_ref_bool(p *Properties, base structPointer) error {
v := structPointer_RefBool(base, p.field) v := structPointer_RefBool(base, p.field)
@ -156,23 +174,8 @@ func (o *Buffer) enc_ref_struct_message(p *Properties, base structPointer) error
return nil return nil
} }
// need the length before we can write out the message itself,
// so marshal into a separate byte buffer first.
obuf := o.buf
o.buf = o.bufalloc()
err := o.enc_struct(p.stype, p.sprop, structp)
nbuf := o.buf
o.buf = obuf
if err != nil && !state.shouldContinue(err, nil) {
o.buffree(nbuf)
return err
}
o.buf = append(o.buf, p.tagcode...) o.buf = append(o.buf, p.tagcode...)
o.EncodeRawBytes(nbuf) return o.enc_len_struct(p.stype, p.sprop, structp, &state)
o.buffree(nbuf)
return nil
} }
//TODO this is only copied, please fix this //TODO this is only copied, please fix this
@ -222,26 +225,17 @@ func (o *Buffer) enc_slice_ref_struct_message(p *Properties, base structPointer)
continue continue
} }
obuf := o.buf o.buf = append(o.buf, p.tagcode...)
o.buf = o.bufalloc() err := o.enc_len_struct(p.stype, p.sprop, structp, &state)
err := o.enc_struct(p.stype, p.sprop, structp)
nbuf := o.buf
o.buf = obuf
if err != nil && !state.shouldContinue(err, nil) { if err != nil && !state.shouldContinue(err, nil) {
o.buffree(nbuf)
if err == ErrNil { if err == ErrNil {
return ErrRepeatedHasNil return ErrRepeatedHasNil
} }
return err return err
} }
o.buf = append(o.buf, p.tagcode...)
o.EncodeRawBytes(nbuf)
o.buffree(nbuf)
} }
return nil return state.err
} }
//TODO this is only copied, please fix this //TODO this is only copied, please fix this

View File

@ -55,9 +55,18 @@ type ExtensionRange struct {
type extendableProto interface { type extendableProto interface {
Message Message
ExtensionRangeArray() []ExtensionRange ExtensionRangeArray() []ExtensionRange
}
type extensionsMap interface {
extendableProto
ExtensionMap() map[int32]Extension ExtensionMap() map[int32]Extension
} }
type extensionsBytes interface {
extendableProto
GetExtensions() *[]byte
}
var extendableProtoType = reflect.TypeOf((*extendableProto)(nil)).Elem() var extendableProtoType = reflect.TypeOf((*extendableProto)(nil)).Elem()
// ExtensionDesc represents an extension specification. // ExtensionDesc represents an extension specification.
@ -92,7 +101,15 @@ type Extension struct {
// SetRawExtension is for testing only. // SetRawExtension is for testing only.
func SetRawExtension(base extendableProto, id int32, b []byte) { func SetRawExtension(base extendableProto, id int32, b []byte) {
base.ExtensionMap()[id] = Extension{enc: b} if ebase, ok := base.(extensionsMap); ok {
ebase.ExtensionMap()[id] = Extension{enc: b}
} else if ebase, ok := base.(extensionsBytes); ok {
clearExtension(base, id)
ext := ebase.GetExtensions()
*ext = append(*ext, b...)
} else {
panic("unreachable")
}
} }
// isExtensionField returns true iff the given field number is in an extension range. // isExtensionField returns true iff the given field number is in an extension range.
@ -210,26 +227,80 @@ func sizeExtensionMap(m map[int32]Extension) (n int) {
// HasExtension returns whether the given extension is present in pb. // HasExtension returns whether the given extension is present in pb.
func HasExtension(pb extendableProto, extension *ExtensionDesc) bool { func HasExtension(pb extendableProto, extension *ExtensionDesc) bool {
// TODO: Check types, field numbers, etc.? // TODO: Check types, field numbers, etc.?
_, ok := pb.ExtensionMap()[extension.Field] if epb, doki := pb.(extensionsMap); doki {
_, ok := epb.ExtensionMap()[extension.Field]
return ok return ok
} else if epb, doki := pb.(extensionsBytes); doki {
ext := epb.GetExtensions()
buf := *ext
o := 0
for o < len(buf) {
tag, n := DecodeVarint(buf[o:])
fieldNum := int32(tag >> 3)
if int32(fieldNum) == extension.Field {
return true
}
wireType := int(tag & 0x7)
o += n
l, err := size(buf[o:], wireType)
if err != nil {
return false
}
o += l
}
return false
}
panic("unreachable")
}
func deleteExtension(pb extensionsBytes, theFieldNum int32, offset int) int {
ext := pb.GetExtensions()
for offset < len(*ext) {
tag, n1 := DecodeVarint((*ext)[offset:])
fieldNum := int32(tag >> 3)
wireType := int(tag & 0x7)
n2, err := size((*ext)[offset+n1:], wireType)
if err != nil {
panic(err)
}
newOffset := offset + n1 + n2
if fieldNum == theFieldNum {
*ext = append((*ext)[:offset], (*ext)[newOffset:]...)
return offset
}
offset = newOffset
}
return -1
}
func clearExtension(pb extendableProto, fieldNum int32) {
if epb, doki := pb.(extensionsMap); doki {
delete(epb.ExtensionMap(), fieldNum)
} else if epb, doki := pb.(extensionsBytes); doki {
offset := 0
for offset != -1 {
offset = deleteExtension(epb, fieldNum, offset)
}
} else {
panic("unreachable")
}
} }
// ClearExtension removes the given extension from pb. // ClearExtension removes the given extension from pb.
func ClearExtension(pb extendableProto, extension *ExtensionDesc) { func ClearExtension(pb extendableProto, extension *ExtensionDesc) {
// TODO: Check types, field numbers, etc.? // TODO: Check types, field numbers, etc.?
delete(pb.ExtensionMap(), extension.Field) clearExtension(pb, extension.Field)
} }
// GetExtension parses and returns the given extension of pb. // GetExtension parses and returns the given extension of pb.
// If the extension is not present it returns ErrMissingExtension. // If the extension is not present it returns ErrMissingExtension.
// If the returned extension is modified, SetExtension must be called
// for the modifications to be reflected in pb.
func GetExtension(pb extendableProto, extension *ExtensionDesc) (interface{}, error) { func GetExtension(pb extendableProto, extension *ExtensionDesc) (interface{}, error) {
if err := checkExtensionTypes(pb, extension); err != nil { if err := checkExtensionTypes(pb, extension); err != nil {
return nil, err return nil, err
} }
e, ok := pb.ExtensionMap()[extension.Field] if epb, doki := pb.(extensionsMap); doki {
e, ok := epb.ExtensionMap()[extension.Field]
if !ok { if !ok {
return nil, ErrMissingExtension return nil, ErrMissingExtension
} }
@ -255,6 +326,28 @@ func GetExtension(pb extendableProto, extension *ExtensionDesc) (interface{}, er
e.desc = extension e.desc = extension
e.enc = nil e.enc = nil
return e.value, nil return e.value, nil
} else if epb, doki := pb.(extensionsBytes); doki {
ext := epb.GetExtensions()
o := 0
for o < len(*ext) {
tag, n := DecodeVarint((*ext)[o:])
fieldNum := int32(tag >> 3)
wireType := int(tag & 0x7)
l, err := size((*ext)[o+n:], wireType)
if err != nil {
return nil, err
}
if int32(fieldNum) == extension.Field {
v, err := decodeExtension((*ext)[o:o+n+l], extension)
if err != nil {
return nil, err
}
return v, nil
}
o += n + l
}
}
panic("unreachable")
} }
// decodeExtension decodes an extension encoded in b. // decodeExtension decodes an extension encoded in b.
@ -319,7 +412,21 @@ func SetExtension(pb extendableProto, extension *ExtensionDesc, value interface{
return errors.New("proto: bad extension value type") return errors.New("proto: bad extension value type")
} }
pb.ExtensionMap()[extension.Field] = Extension{desc: extension, value: value} if epb, doki := pb.(extensionsMap); doki {
epb.ExtensionMap()[extension.Field] = Extension{desc: extension, value: value}
} else if epb, doki := pb.(extensionsBytes); doki {
ClearExtension(pb, extension)
ext := epb.GetExtensions()
et := reflect.TypeOf(extension.ExtensionType)
props := extensionProperties(extension)
p := NewBuffer(nil)
x := reflect.New(et)
x.Elem().Set(reflect.ValueOf(value))
if err := props.enc(p, props, toStructPointer(x)); err != nil {
return err
}
*ext = append(*ext, p.buf...)
}
return nil return nil
} }

View File

@ -31,6 +31,7 @@ import (
"fmt" "fmt"
"reflect" "reflect"
"sort" "sort"
"strings"
) )
func GetBoolExtension(pb extendableProto, extension *ExtensionDesc, ifnotset bool) bool { func GetBoolExtension(pb extendableProto, extension *ExtensionDesc, ifnotset bool) bool {
@ -58,6 +59,48 @@ func SizeOfExtensionMap(m map[int32]Extension) (n int) {
return sizeExtensionMap(m) return sizeExtensionMap(m)
} }
type sortableMapElem struct {
field int32
ext Extension
}
func newSortableExtensionsFromMap(m map[int32]Extension) sortableExtensions {
s := make(sortableExtensions, 0, len(m))
for k, v := range m {
s = append(s, &sortableMapElem{field: k, ext: v})
}
return s
}
type sortableExtensions []*sortableMapElem
func (this sortableExtensions) Len() int { return len(this) }
func (this sortableExtensions) Swap(i, j int) { this[i], this[j] = this[j], this[i] }
func (this sortableExtensions) Less(i, j int) bool { return this[i].field < this[j].field }
func (this sortableExtensions) String() string {
sort.Sort(this)
ss := make([]string, len(this))
for i := range this {
ss[i] = fmt.Sprintf("%d: %v", this[i].field, this[i].ext)
}
return "map[" + strings.Join(ss, ",") + "]"
}
func StringFromExtensionsMap(m map[int32]Extension) string {
return newSortableExtensionsFromMap(m).String()
}
func StringFromExtensionsBytes(ext []byte) string {
m, err := BytesToExtensionsMap(ext)
if err != nil {
panic(err)
}
return StringFromExtensionsMap(m)
}
func EncodeExtensionMap(m map[int32]Extension, data []byte) (n int, err error) { func EncodeExtensionMap(m map[int32]Extension, data []byte) (n int, err error) {
if err := encodeExtensionMap(m); err != nil { if err := encodeExtensionMap(m); err != nil {
return 0, err return 0, err
@ -83,6 +126,58 @@ func GetRawExtension(m map[int32]Extension, id int32) ([]byte, error) {
return m[id].enc, nil return m[id].enc, nil
} }
func size(buf []byte, wire int) (int, error) {
switch wire {
case WireVarint:
_, n := DecodeVarint(buf)
return n, nil
case WireFixed64:
return 8, nil
case WireBytes:
v, n := DecodeVarint(buf)
return int(v) + n, nil
case WireFixed32:
return 4, nil
case WireStartGroup:
offset := 0
for {
u, n := DecodeVarint(buf[offset:])
fwire := int(u & 0x7)
offset += n
if fwire == WireEndGroup {
return offset, nil
}
s, err := size(buf[offset:], wire)
if err != nil {
return 0, err
}
offset += s
}
}
return 0, fmt.Errorf("proto: can't get size for unknown wire type %d", wire)
}
func BytesToExtensionsMap(buf []byte) (map[int32]Extension, error) {
m := make(map[int32]Extension)
i := 0
for i < len(buf) {
tag, n := DecodeVarint(buf[i:])
if n <= 0 {
return nil, fmt.Errorf("unable to decode varint")
}
fieldNum := int32(tag >> 3)
wireType := int(tag & 0x7)
l, err := size(buf[i+n:], wireType)
if err != nil {
return nil, err
}
end := i + int(l) + n
m[int32(fieldNum)] = Extension{enc: buf[i:end]}
i = end
}
return m, nil
}
func NewExtension(e []byte) Extension { func NewExtension(e []byte) Extension {
ee := Extension{enc: make([]byte, len(e))} ee := Extension{enc: make([]byte, len(e))}
copy(ee.enc, e) copy(ee.enc, e)

View File

@ -242,8 +242,6 @@ func GetStats() Stats { return stats }
type Buffer struct { type Buffer struct {
buf []byte // encode/decode byte stream buf []byte // encode/decode byte stream
index int // write point index int // write point
freelist [10][]byte // list of available buffers
nfreelist int // number of free buffers
// pools of basic types to amortize allocation. // pools of basic types to amortize allocation.
bools []bool bools []bool
@ -260,20 +258,11 @@ type Buffer struct {
// NewBuffer allocates a new Buffer and initializes its internal data to // NewBuffer allocates a new Buffer and initializes its internal data to
// the contents of the argument slice. // the contents of the argument slice.
func NewBuffer(e []byte) *Buffer { func NewBuffer(e []byte) *Buffer {
p := new(Buffer) return &Buffer{buf: e}
if e == nil {
e = p.bufalloc()
}
p.buf = e
p.index = 0
return p
} }
// Reset resets the Buffer, ready for marshaling a new protocol buffer. // Reset resets the Buffer, ready for marshaling a new protocol buffer.
func (p *Buffer) Reset() { func (p *Buffer) Reset() {
if p.buf == nil {
p.buf = p.bufalloc()
}
p.buf = p.buf[0:0] // for reading/writing p.buf = p.buf[0:0] // for reading/writing
p.index = 0 // for reading p.index = 0 // for reading
} }
@ -288,44 +277,6 @@ func (p *Buffer) SetBuf(s []byte) {
// Bytes returns the contents of the Buffer. // Bytes returns the contents of the Buffer.
func (p *Buffer) Bytes() []byte { return p.buf } func (p *Buffer) Bytes() []byte { return p.buf }
// Allocate a buffer for the Buffer.
func (p *Buffer) bufalloc() []byte {
if p.nfreelist > 0 {
// reuse an old one
p.nfreelist--
s := p.freelist[p.nfreelist]
return s[0:0]
}
// make a new one
s := make([]byte, 0, 16)
return s
}
// Free (and remember in freelist) a byte buffer for the Buffer.
func (p *Buffer) buffree(s []byte) {
if p.nfreelist < len(p.freelist) {
// Take next slot.
p.freelist[p.nfreelist] = s
p.nfreelist++
return
}
// Find the smallest.
besti := -1
bestl := len(s)
for i, b := range p.freelist {
if len(b) < bestl {
besti = i
bestl = len(b)
}
}
// Overwrite the smallest.
if besti >= 0 {
p.freelist[besti] = s
}
}
/* /*
* Helper routines for simplifying the creation of optional fields of basic type. * Helper routines for simplifying the creation of optional fields of basic type.
*/ */

View File

@ -51,10 +51,17 @@ func structPointer_InterfaceRef(p structPointer, f field, t reflect.Type) interf
} }
func copyUintPtr(oldptr, newptr uintptr, size int) { func copyUintPtr(oldptr, newptr uintptr, size int) {
for j := 0; j < size; j++ { oldbytes := make([]byte, 0)
oldb := (*byte)(unsafe.Pointer(oldptr + uintptr(j))) oldslice := (*reflect.SliceHeader)(unsafe.Pointer(&oldbytes))
*(*byte)(unsafe.Pointer(newptr + uintptr(j))) = *oldb oldslice.Data = oldptr
} oldslice.Len = size
oldslice.Cap = size
newbytes := make([]byte, 0)
newslice := (*reflect.SliceHeader)(unsafe.Pointer(&newbytes))
newslice.Data = newptr
newslice.Len = size
newslice.Cap = size
copy(newbytes, oldbytes)
} }
func structPointer_Copy(oldptr structPointer, newptr structPointer, size int) { func structPointer_Copy(oldptr structPointer, newptr structPointer, size int) {

View File

@ -575,10 +575,16 @@ func getPropertiesLocked(t reflect.Type) *StructProperties {
p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false) p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false)
if f.Name == "XXX_extensions" { // special case if f.Name == "XXX_extensions" { // special case
if len(f.Tag.Get("protobuf")) > 0 {
p.enc = (*Buffer).enc_ext_slice_byte
p.dec = nil // not needed
p.size = size_ext_slice_byte
} else {
p.enc = (*Buffer).enc_map p.enc = (*Buffer).enc_map
p.dec = nil // not needed p.dec = nil // not needed
p.size = size_map p.size = size_map
} }
}
if f.Name == "XXX_unrecognized" { // special case if f.Name == "XXX_unrecognized" { // special case
prop.unrecField = toField(&f) prop.unrecField = toField(&f)
} }

View File

@ -2,9 +2,41 @@
// source: test.proto // source: test.proto
// DO NOT EDIT! // DO NOT EDIT!
/*
Package testdata is a generated protocol buffer package.
It is generated from these files:
test.proto
It has these top-level messages:
GoEnum
GoTestField
GoTest
GoSkipTest
NonPackedTest
PackedTest
MaxTag
OldMessage
NewMessage
InnerMessage
OtherMessage
MyMessage
Ext
MyMessageSet
Empty
MessageList
Strings
Defaults
SubDefaults
RepeatedEnum
MoreRepeated
GroupOld
GroupNew
FloatingPoint
*/
package testdata package testdata
import proto "github.com/coreos/etcd/third_party/code.google.com/p/gogoprotobuf/proto" import proto "github.com/coreos/etcd/third_party/github.com/coreos/etcd/third_party/code.google.com/p/gogoprotobuf/proto"
import json "encoding/json" import json "encoding/json"
import math "math" import math "math"

View File

@ -79,6 +79,13 @@ type textWriter struct {
w writer w writer
} }
// textMarshaler is implemented by Messages that can marshal themsleves.
// It is identical to encoding.TextMarshaler, introduced in go 1.2,
// which will eventually replace it.
type textMarshaler interface {
MarshalText() (text []byte, err error)
}
func (w *textWriter) WriteString(s string) (n int, err error) { func (w *textWriter) WriteString(s string) (n int, err error) {
if !strings.Contains(s, "\n") { if !strings.Contains(s, "\n") {
if !w.compact && w.complete { if !w.compact && w.complete {
@ -366,7 +373,15 @@ func writeAny(w *textWriter, v reflect.Value, props *Properties) error {
} }
} }
w.indent() w.indent()
if err := writeStruct(w, v); err != nil { if tm, ok := v.Interface().(textMarshaler); ok {
text, err := tm.MarshalText()
if err != nil {
return err
}
if _, err = w.Write(text); err != nil {
return err
}
} else if err := writeStruct(w, v); err != nil {
return err return err
} }
w.unindent() w.unindent()
@ -555,7 +570,18 @@ func writeExtensions(w *textWriter, pv reflect.Value) error {
// Order the extensions by ID. // Order the extensions by ID.
// This isn't strictly necessary, but it will give us // This isn't strictly necessary, but it will give us
// canonical output, which will also make testing easier. // canonical output, which will also make testing easier.
m := ep.ExtensionMap() var m map[int32]Extension
if em, ok := ep.(extensionsMap); ok {
m = em.ExtensionMap()
} else if em, ok := ep.(extensionsBytes); ok {
eb := em.GetExtensions()
var err error
m, err = BytesToExtensionsMap(*eb)
if err != nil {
return err
}
}
ids := make([]int32, 0, len(m)) ids := make([]int32, 0, len(m))
for id := range m { for id := range m {
ids = append(ids, id) ids = append(ids, id)
@ -653,6 +679,19 @@ func marshalText(w io.Writer, pb Message, compact bool) error {
compact: compact, compact: compact,
} }
if tm, ok := pb.(textMarshaler); ok {
text, err := tm.MarshalText()
if err != nil {
return err
}
if _, err = aw.Write(text); err != nil {
return err
}
if bw != nil {
return bw.Flush()
}
return nil
}
// Dereference the received pointer so we don't have outer < and >. // Dereference the received pointer so we don't have outer < and >.
v := reflect.Indirect(val) v := reflect.Indirect(val)
if err := writeStruct(aw, v); err != nil { if err := writeStruct(aw, v); err != nil {
@ -666,7 +705,9 @@ func marshalText(w io.Writer, pb Message, compact bool) error {
// MarshalText writes a given protocol buffer in text format. // MarshalText writes a given protocol buffer in text format.
// The only errors returned are from w. // The only errors returned are from w.
func MarshalText(w io.Writer, pb Message) error { return marshalText(w, pb, false) } func MarshalText(w io.Writer, pb Message) error {
return marshalText(w, pb, false)
}
// MarshalTextString is the same as MarshalText, but returns the string directly. // MarshalTextString is the same as MarshalText, but returns the string directly.
func MarshalTextString(pb Message) string { func MarshalTextString(pb Message) string {

View File

@ -48,6 +48,13 @@ import (
"unicode/utf8" "unicode/utf8"
) )
// textUnmarshaler is implemented by Messages that can unmarshal themsleves.
// It is identical to encoding.TextUnmarshaler, introduced in go 1.2,
// which will eventually replace it.
type textUnmarshaler interface {
UnmarshalText(text []byte) error
}
type ParseError struct { type ParseError struct {
Message string Message string
Line int // 1-based line number Line int // 1-based line number
@ -686,6 +693,7 @@ func (p *textParser) readAny(v reflect.Value, props *Properties) *ParseError {
default: default:
return p.errorf("expected '{' or '<', found %q", tok.value) return p.errorf("expected '{' or '<', found %q", tok.value)
} }
// TODO: Handle nested messages which implement textUnmarshaler.
return p.readStruct(fv, terminator) return p.readStruct(fv, terminator)
case reflect.Uint32: case reflect.Uint32:
if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil { if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil {
@ -704,6 +712,10 @@ func (p *textParser) readAny(v reflect.Value, props *Properties) *ParseError {
// UnmarshalText reads a protocol buffer in Text format. UnmarshalText resets pb // UnmarshalText reads a protocol buffer in Text format. UnmarshalText resets pb
// before starting to unmarshal, so any existing data in pb is always removed. // before starting to unmarshal, so any existing data in pb is always removed.
func UnmarshalText(s string, pb Message) error { func UnmarshalText(s string, pb Message) error {
if um, ok := pb.(textUnmarshaler); ok {
err := um.UnmarshalText([]byte(s))
return err
}
pb.Reset() pb.Reset()
v := reflect.ValueOf(pb) v := reflect.ValueOf(pb)
if pe := newTextParser(s).readStruct(v.Elem(), ""); pe != nil { if pe := newTextParser(s).readStruct(v.Elem(), ""); pe != nil {

View File

@ -413,6 +413,16 @@ func TestUnmarshalText(t *testing.T) {
} }
} }
func TestUnmarshalTextCustomMessage(t *testing.T) {
msg := &textMessage{}
if err := UnmarshalText("custom", msg); err != nil {
t.Errorf("Unexpected error from custom unmarshal: %v", err)
}
if UnmarshalText("not custom", msg) == nil {
t.Errorf("Didn't get expected error from custom unmarshal")
}
}
// Regression test; this caused a panic. // Regression test; this caused a panic.
func TestRepeatedEnum(t *testing.T) { func TestRepeatedEnum(t *testing.T) {
pb := new(RepeatedEnum) pb := new(RepeatedEnum)

View File

@ -44,6 +44,26 @@ import (
pb "./testdata" pb "./testdata"
) )
// textMessage implements the methods that allow it to marshal and unmarshal
// itself as text.
type textMessage struct {
}
func (*textMessage) MarshalText() ([]byte, error) {
return []byte("custom"), nil
}
func (*textMessage) UnmarshalText(bytes []byte) error {
if string(bytes) != "custom" {
return errors.New("expected 'custom'")
}
return nil
}
func (*textMessage) Reset() {}
func (*textMessage) String() string { return "" }
func (*textMessage) ProtoMessage() {}
func newTestMessage() *pb.MyMessage { func newTestMessage() *pb.MyMessage {
msg := &pb.MyMessage{ msg := &pb.MyMessage{
Count: proto.Int32(42), Count: proto.Int32(42),
@ -153,6 +173,16 @@ func TestMarshalText(t *testing.T) {
} }
} }
func TestMarshalTextCustomMessage(t *testing.T) {
buf := new(bytes.Buffer)
if err := proto.MarshalText(buf, &textMessage{}); err != nil {
t.Fatalf("proto.MarshalText: %v", err)
}
s := buf.String()
if s != "custom" {
t.Errorf("Got %q, expected %q", s, "custom")
}
}
func TestMarshalTextNil(t *testing.T) { func TestMarshalTextNil(t *testing.T) {
want := "<nil>" want := "<nil>"
tests := []proto.Message{nil, (*pb.MyMessage)(nil)} tests := []proto.Message{nil, (*pb.MyMessage)(nil)}

View File

@ -168,9 +168,10 @@ func (l *Log) open(path string) error {
if err == io.EOF { if err == io.EOF {
debugln("open.log.append: finish ") debugln("open.log.append: finish ")
} else { } else {
if err = os.Truncate(path, readBytes); err != nil { if err = l.file.Truncate(readBytes); err != nil {
return fmt.Errorf("raft.Log: Unable to recover: %v", err) return fmt.Errorf("raft.Log: Unable to recover: %v", err)
} }
l.file.Seek(readBytes, os.SEEK_SET)
} }
break break
} }

View File

@ -2,6 +2,23 @@
// source: append_entries_request.proto // source: append_entries_request.proto
// DO NOT EDIT! // DO NOT EDIT!
/*
Package protobuf is a generated protocol buffer package.
It is generated from these files:
append_entries_request.proto
append_entries_responses.proto
log_entry.proto
request_vote_request.proto
request_vote_responses.proto
snapshot_recovery_request.proto
snapshot_recovery_response.proto
snapshot_request.proto
snapshot_response.proto
It has these top-level messages:
AppendEntriesRequest
*/
package protobuf package protobuf
import proto "github.com/coreos/etcd/third_party/code.google.com/p/gogoprotobuf/proto" import proto "github.com/coreos/etcd/third_party/code.google.com/p/gogoprotobuf/proto"
@ -110,7 +127,7 @@ func (m *AppendEntriesRequest) Unmarshal(data []byte) error {
switch fieldNum { switch fieldNum {
case 1: case 1:
if wireType != 0 { if wireType != 0 {
return proto.ErrWrongType return code_google_com_p_gogoprotobuf_proto2.ErrWrongType
} }
var v uint64 var v uint64
for shift := uint(0); ; shift += 7 { for shift := uint(0); ; shift += 7 {
@ -127,7 +144,7 @@ func (m *AppendEntriesRequest) Unmarshal(data []byte) error {
m.Term = &v m.Term = &v
case 2: case 2:
if wireType != 0 { if wireType != 0 {
return proto.ErrWrongType return code_google_com_p_gogoprotobuf_proto2.ErrWrongType
} }
var v uint64 var v uint64
for shift := uint(0); ; shift += 7 { for shift := uint(0); ; shift += 7 {
@ -144,7 +161,7 @@ func (m *AppendEntriesRequest) Unmarshal(data []byte) error {
m.PrevLogIndex = &v m.PrevLogIndex = &v
case 3: case 3:
if wireType != 0 { if wireType != 0 {
return proto.ErrWrongType return code_google_com_p_gogoprotobuf_proto2.ErrWrongType
} }
var v uint64 var v uint64
for shift := uint(0); ; shift += 7 { for shift := uint(0); ; shift += 7 {
@ -161,7 +178,7 @@ func (m *AppendEntriesRequest) Unmarshal(data []byte) error {
m.PrevLogTerm = &v m.PrevLogTerm = &v
case 4: case 4:
if wireType != 0 { if wireType != 0 {
return proto.ErrWrongType return code_google_com_p_gogoprotobuf_proto2.ErrWrongType
} }
var v uint64 var v uint64
for shift := uint(0); ; shift += 7 { for shift := uint(0); ; shift += 7 {
@ -178,7 +195,7 @@ func (m *AppendEntriesRequest) Unmarshal(data []byte) error {
m.CommitIndex = &v m.CommitIndex = &v
case 5: case 5:
if wireType != 2 { if wireType != 2 {
return proto.ErrWrongType return code_google_com_p_gogoprotobuf_proto2.ErrWrongType
} }
var stringLen uint64 var stringLen uint64
for shift := uint(0); ; shift += 7 { for shift := uint(0); ; shift += 7 {
@ -201,7 +218,7 @@ func (m *AppendEntriesRequest) Unmarshal(data []byte) error {
index = postIndex index = postIndex
case 6: case 6:
if wireType != 2 { if wireType != 2 {
return proto.ErrWrongType return code_google_com_p_gogoprotobuf_proto2.ErrWrongType
} }
var msglen int var msglen int
for shift := uint(0); ; shift += 7 { for shift := uint(0); ; shift += 7 {
@ -236,6 +253,9 @@ func (m *AppendEntriesRequest) Unmarshal(data []byte) error {
if err != nil { if err != nil {
return err return err
} }
if (index + skippy) > l {
return io1.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, data[index:index+skippy]...) m.XXX_unrecognized = append(m.XXX_unrecognized, data[index:index+skippy]...)
index += skippy index += skippy
} }
@ -309,7 +329,6 @@ func sovAppendEntriesRequest(x uint64) (n int) {
} }
func sozAppendEntriesRequest(x uint64) (n int) { func sozAppendEntriesRequest(x uint64) (n int) {
return sovAppendEntriesRequest(uint64((x << 1) ^ uint64((int64(x) >> 63)))) return sovAppendEntriesRequest(uint64((x << 1) ^ uint64((int64(x) >> 63))))
return sovAppendEntriesRequest(uint64((x << 1) ^ uint64((int64(x) >> 63))))
} }
func NewPopulatedAppendEntriesRequest(r randyAppendEntriesRequest, easy bool) *AppendEntriesRequest { func NewPopulatedAppendEntriesRequest(r randyAppendEntriesRequest, easy bool) *AppendEntriesRequest {
this := &AppendEntriesRequest{} this := &AppendEntriesRequest{}

View File

@ -94,7 +94,7 @@ func (m *AppendEntriesResponse) Unmarshal(data []byte) error {
switch fieldNum { switch fieldNum {
case 1: case 1:
if wireType != 0 { if wireType != 0 {
return proto.ErrWrongType return code_google_com_p_gogoprotobuf_proto4.ErrWrongType
} }
var v uint64 var v uint64
for shift := uint(0); ; shift += 7 { for shift := uint(0); ; shift += 7 {
@ -111,7 +111,7 @@ func (m *AppendEntriesResponse) Unmarshal(data []byte) error {
m.Term = &v m.Term = &v
case 2: case 2:
if wireType != 0 { if wireType != 0 {
return proto.ErrWrongType return code_google_com_p_gogoprotobuf_proto4.ErrWrongType
} }
var v uint64 var v uint64
for shift := uint(0); ; shift += 7 { for shift := uint(0); ; shift += 7 {
@ -128,7 +128,7 @@ func (m *AppendEntriesResponse) Unmarshal(data []byte) error {
m.Index = &v m.Index = &v
case 3: case 3:
if wireType != 0 { if wireType != 0 {
return proto.ErrWrongType return code_google_com_p_gogoprotobuf_proto4.ErrWrongType
} }
var v uint64 var v uint64
for shift := uint(0); ; shift += 7 { for shift := uint(0); ; shift += 7 {
@ -145,7 +145,7 @@ func (m *AppendEntriesResponse) Unmarshal(data []byte) error {
m.CommitIndex = &v m.CommitIndex = &v
case 4: case 4:
if wireType != 0 { if wireType != 0 {
return proto.ErrWrongType return code_google_com_p_gogoprotobuf_proto4.ErrWrongType
} }
var v int var v int
for shift := uint(0); ; shift += 7 { for shift := uint(0); ; shift += 7 {
@ -175,6 +175,9 @@ func (m *AppendEntriesResponse) Unmarshal(data []byte) error {
if err != nil { if err != nil {
return err return err
} }
if (index + skippy) > l {
return io2.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, data[index:index+skippy]...) m.XXX_unrecognized = append(m.XXX_unrecognized, data[index:index+skippy]...)
index += skippy index += skippy
} }
@ -236,7 +239,6 @@ func sovAppendEntriesResponses(x uint64) (n int) {
} }
func sozAppendEntriesResponses(x uint64) (n int) { func sozAppendEntriesResponses(x uint64) (n int) {
return sovAppendEntriesResponses(uint64((x << 1) ^ uint64((int64(x) >> 63)))) return sovAppendEntriesResponses(uint64((x << 1) ^ uint64((int64(x) >> 63))))
return sovAppendEntriesResponses(uint64((x << 1) ^ uint64((int64(x) >> 63))))
} }
func NewPopulatedAppendEntriesResponse(r randyAppendEntriesResponses, easy bool) *AppendEntriesResponse { func NewPopulatedAppendEntriesResponse(r randyAppendEntriesResponses, easy bool) *AppendEntriesResponse {
this := &AppendEntriesResponse{} this := &AppendEntriesResponse{}

View File

@ -94,7 +94,7 @@ func (m *LogEntry) Unmarshal(data []byte) error {
switch fieldNum { switch fieldNum {
case 1: case 1:
if wireType != 0 { if wireType != 0 {
return proto.ErrWrongType return code_google_com_p_gogoprotobuf_proto.ErrWrongType
} }
var v uint64 var v uint64
for shift := uint(0); ; shift += 7 { for shift := uint(0); ; shift += 7 {
@ -111,7 +111,7 @@ func (m *LogEntry) Unmarshal(data []byte) error {
m.Index = &v m.Index = &v
case 2: case 2:
if wireType != 0 { if wireType != 0 {
return proto.ErrWrongType return code_google_com_p_gogoprotobuf_proto.ErrWrongType
} }
var v uint64 var v uint64
for shift := uint(0); ; shift += 7 { for shift := uint(0); ; shift += 7 {
@ -128,7 +128,7 @@ func (m *LogEntry) Unmarshal(data []byte) error {
m.Term = &v m.Term = &v
case 3: case 3:
if wireType != 2 { if wireType != 2 {
return proto.ErrWrongType return code_google_com_p_gogoprotobuf_proto.ErrWrongType
} }
var stringLen uint64 var stringLen uint64
for shift := uint(0); ; shift += 7 { for shift := uint(0); ; shift += 7 {
@ -151,7 +151,7 @@ func (m *LogEntry) Unmarshal(data []byte) error {
index = postIndex index = postIndex
case 4: case 4:
if wireType != 2 { if wireType != 2 {
return proto.ErrWrongType return code_google_com_p_gogoprotobuf_proto.ErrWrongType
} }
var byteLen int var byteLen int
for shift := uint(0); ; shift += 7 { for shift := uint(0); ; shift += 7 {
@ -185,6 +185,9 @@ func (m *LogEntry) Unmarshal(data []byte) error {
if err != nil { if err != nil {
return err return err
} }
if (index + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, data[index:index+skippy]...) m.XXX_unrecognized = append(m.XXX_unrecognized, data[index:index+skippy]...)
index += skippy index += skippy
} }
@ -248,7 +251,6 @@ func sovLogEntry(x uint64) (n int) {
} }
func sozLogEntry(x uint64) (n int) { func sozLogEntry(x uint64) (n int) {
return sovLogEntry(uint64((x << 1) ^ uint64((int64(x) >> 63)))) return sovLogEntry(uint64((x << 1) ^ uint64((int64(x) >> 63))))
return sovLogEntry(uint64((x << 1) ^ uint64((int64(x) >> 63))))
} }
func NewPopulatedLogEntry(r randyLogEntry, easy bool) *LogEntry { func NewPopulatedLogEntry(r randyLogEntry, easy bool) *LogEntry {
this := &LogEntry{} this := &LogEntry{}

View File

@ -94,7 +94,7 @@ func (m *RequestVoteRequest) Unmarshal(data []byte) error {
switch fieldNum { switch fieldNum {
case 1: case 1:
if wireType != 0 { if wireType != 0 {
return proto.ErrWrongType return code_google_com_p_gogoprotobuf_proto6.ErrWrongType
} }
var v uint64 var v uint64
for shift := uint(0); ; shift += 7 { for shift := uint(0); ; shift += 7 {
@ -111,7 +111,7 @@ func (m *RequestVoteRequest) Unmarshal(data []byte) error {
m.Term = &v m.Term = &v
case 2: case 2:
if wireType != 0 { if wireType != 0 {
return proto.ErrWrongType return code_google_com_p_gogoprotobuf_proto6.ErrWrongType
} }
var v uint64 var v uint64
for shift := uint(0); ; shift += 7 { for shift := uint(0); ; shift += 7 {
@ -128,7 +128,7 @@ func (m *RequestVoteRequest) Unmarshal(data []byte) error {
m.LastLogIndex = &v m.LastLogIndex = &v
case 3: case 3:
if wireType != 0 { if wireType != 0 {
return proto.ErrWrongType return code_google_com_p_gogoprotobuf_proto6.ErrWrongType
} }
var v uint64 var v uint64
for shift := uint(0); ; shift += 7 { for shift := uint(0); ; shift += 7 {
@ -145,7 +145,7 @@ func (m *RequestVoteRequest) Unmarshal(data []byte) error {
m.LastLogTerm = &v m.LastLogTerm = &v
case 4: case 4:
if wireType != 2 { if wireType != 2 {
return proto.ErrWrongType return code_google_com_p_gogoprotobuf_proto6.ErrWrongType
} }
var stringLen uint64 var stringLen uint64
for shift := uint(0); ; shift += 7 { for shift := uint(0); ; shift += 7 {
@ -180,6 +180,9 @@ func (m *RequestVoteRequest) Unmarshal(data []byte) error {
if err != nil { if err != nil {
return err return err
} }
if (index + skippy) > l {
return io3.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, data[index:index+skippy]...) m.XXX_unrecognized = append(m.XXX_unrecognized, data[index:index+skippy]...)
index += skippy index += skippy
} }
@ -242,7 +245,6 @@ func sovRequestVoteRequest(x uint64) (n int) {
} }
func sozRequestVoteRequest(x uint64) (n int) { func sozRequestVoteRequest(x uint64) (n int) {
return sovRequestVoteRequest(uint64((x << 1) ^ uint64((int64(x) >> 63)))) return sovRequestVoteRequest(uint64((x << 1) ^ uint64((int64(x) >> 63))))
return sovRequestVoteRequest(uint64((x << 1) ^ uint64((int64(x) >> 63))))
} }
func NewPopulatedRequestVoteRequest(r randyRequestVoteRequest, easy bool) *RequestVoteRequest { func NewPopulatedRequestVoteRequest(r randyRequestVoteRequest, easy bool) *RequestVoteRequest {
this := &RequestVoteRequest{} this := &RequestVoteRequest{}

View File

@ -78,7 +78,7 @@ func (m *RequestVoteResponse) Unmarshal(data []byte) error {
switch fieldNum { switch fieldNum {
case 1: case 1:
if wireType != 0 { if wireType != 0 {
return proto.ErrWrongType return code_google_com_p_gogoprotobuf_proto8.ErrWrongType
} }
var v uint64 var v uint64
for shift := uint(0); ; shift += 7 { for shift := uint(0); ; shift += 7 {
@ -95,7 +95,7 @@ func (m *RequestVoteResponse) Unmarshal(data []byte) error {
m.Term = &v m.Term = &v
case 2: case 2:
if wireType != 0 { if wireType != 0 {
return proto.ErrWrongType return code_google_com_p_gogoprotobuf_proto8.ErrWrongType
} }
var v int var v int
for shift := uint(0); ; shift += 7 { for shift := uint(0); ; shift += 7 {
@ -125,6 +125,9 @@ func (m *RequestVoteResponse) Unmarshal(data []byte) error {
if err != nil { if err != nil {
return err return err
} }
if (index + skippy) > l {
return io4.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, data[index:index+skippy]...) m.XXX_unrecognized = append(m.XXX_unrecognized, data[index:index+skippy]...)
index += skippy index += skippy
} }
@ -178,7 +181,6 @@ func sovRequestVoteResponses(x uint64) (n int) {
} }
func sozRequestVoteResponses(x uint64) (n int) { func sozRequestVoteResponses(x uint64) (n int) {
return sovRequestVoteResponses(uint64((x << 1) ^ uint64((int64(x) >> 63)))) return sovRequestVoteResponses(uint64((x << 1) ^ uint64((int64(x) >> 63))))
return sovRequestVoteResponses(uint64((x << 1) ^ uint64((int64(x) >> 63))))
} }
func NewPopulatedRequestVoteResponse(r randyRequestVoteResponses, easy bool) *RequestVoteResponse { func NewPopulatedRequestVoteResponse(r randyRequestVoteResponses, easy bool) *RequestVoteResponse {
this := &RequestVoteResponse{} this := &RequestVoteResponse{}

View File

@ -125,7 +125,7 @@ func (m *SnapshotRecoveryRequest) Unmarshal(data []byte) error {
switch fieldNum { switch fieldNum {
case 1: case 1:
if wireType != 2 { if wireType != 2 {
return proto.ErrWrongType return code_google_com_p_gogoprotobuf_proto10.ErrWrongType
} }
var stringLen uint64 var stringLen uint64
for shift := uint(0); ; shift += 7 { for shift := uint(0); ; shift += 7 {
@ -148,7 +148,7 @@ func (m *SnapshotRecoveryRequest) Unmarshal(data []byte) error {
index = postIndex index = postIndex
case 2: case 2:
if wireType != 0 { if wireType != 0 {
return proto.ErrWrongType return code_google_com_p_gogoprotobuf_proto10.ErrWrongType
} }
var v uint64 var v uint64
for shift := uint(0); ; shift += 7 { for shift := uint(0); ; shift += 7 {
@ -165,7 +165,7 @@ func (m *SnapshotRecoveryRequest) Unmarshal(data []byte) error {
m.LastIndex = &v m.LastIndex = &v
case 3: case 3:
if wireType != 0 { if wireType != 0 {
return proto.ErrWrongType return code_google_com_p_gogoprotobuf_proto10.ErrWrongType
} }
var v uint64 var v uint64
for shift := uint(0); ; shift += 7 { for shift := uint(0); ; shift += 7 {
@ -182,7 +182,7 @@ func (m *SnapshotRecoveryRequest) Unmarshal(data []byte) error {
m.LastTerm = &v m.LastTerm = &v
case 4: case 4:
if wireType != 2 { if wireType != 2 {
return proto.ErrWrongType return code_google_com_p_gogoprotobuf_proto10.ErrWrongType
} }
var msglen int var msglen int
for shift := uint(0); ; shift += 7 { for shift := uint(0); ; shift += 7 {
@ -205,7 +205,7 @@ func (m *SnapshotRecoveryRequest) Unmarshal(data []byte) error {
index = postIndex index = postIndex
case 5: case 5:
if wireType != 2 { if wireType != 2 {
return proto.ErrWrongType return code_google_com_p_gogoprotobuf_proto10.ErrWrongType
} }
var byteLen int var byteLen int
for shift := uint(0); ; shift += 7 { for shift := uint(0); ; shift += 7 {
@ -239,6 +239,9 @@ func (m *SnapshotRecoveryRequest) Unmarshal(data []byte) error {
if err != nil { if err != nil {
return err return err
} }
if (index + skippy) > l {
return io5.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, data[index:index+skippy]...) m.XXX_unrecognized = append(m.XXX_unrecognized, data[index:index+skippy]...)
index += skippy index += skippy
} }
@ -266,7 +269,7 @@ func (m *SnapshotRecoveryRequest_Peer) Unmarshal(data []byte) error {
switch fieldNum { switch fieldNum {
case 1: case 1:
if wireType != 2 { if wireType != 2 {
return proto.ErrWrongType return code_google_com_p_gogoprotobuf_proto10.ErrWrongType
} }
var stringLen uint64 var stringLen uint64
for shift := uint(0); ; shift += 7 { for shift := uint(0); ; shift += 7 {
@ -289,7 +292,7 @@ func (m *SnapshotRecoveryRequest_Peer) Unmarshal(data []byte) error {
index = postIndex index = postIndex
case 2: case 2:
if wireType != 2 { if wireType != 2 {
return proto.ErrWrongType return code_google_com_p_gogoprotobuf_proto10.ErrWrongType
} }
var stringLen uint64 var stringLen uint64
for shift := uint(0); ; shift += 7 { for shift := uint(0); ; shift += 7 {
@ -324,6 +327,9 @@ func (m *SnapshotRecoveryRequest_Peer) Unmarshal(data []byte) error {
if err != nil { if err != nil {
return err return err
} }
if (index + skippy) > l {
return io5.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, data[index:index+skippy]...) m.XXX_unrecognized = append(m.XXX_unrecognized, data[index:index+skippy]...)
index += skippy index += skippy
} }
@ -422,7 +428,6 @@ func sovSnapshotRecoveryRequest(x uint64) (n int) {
} }
func sozSnapshotRecoveryRequest(x uint64) (n int) { func sozSnapshotRecoveryRequest(x uint64) (n int) {
return sovSnapshotRecoveryRequest(uint64((x << 1) ^ uint64((int64(x) >> 63)))) return sovSnapshotRecoveryRequest(uint64((x << 1) ^ uint64((int64(x) >> 63))))
return sovSnapshotRecoveryRequest(uint64((x << 1) ^ uint64((int64(x) >> 63))))
} }
func NewPopulatedSnapshotRecoveryRequest(r randySnapshotRecoveryRequest, easy bool) *SnapshotRecoveryRequest { func NewPopulatedSnapshotRecoveryRequest(r randySnapshotRecoveryRequest, easy bool) *SnapshotRecoveryRequest {
this := &SnapshotRecoveryRequest{} this := &SnapshotRecoveryRequest{}

View File

@ -86,7 +86,7 @@ func (m *SnapshotRecoveryResponse) Unmarshal(data []byte) error {
switch fieldNum { switch fieldNum {
case 1: case 1:
if wireType != 0 { if wireType != 0 {
return proto.ErrWrongType return code_google_com_p_gogoprotobuf_proto12.ErrWrongType
} }
var v uint64 var v uint64
for shift := uint(0); ; shift += 7 { for shift := uint(0); ; shift += 7 {
@ -103,7 +103,7 @@ func (m *SnapshotRecoveryResponse) Unmarshal(data []byte) error {
m.Term = &v m.Term = &v
case 2: case 2:
if wireType != 0 { if wireType != 0 {
return proto.ErrWrongType return code_google_com_p_gogoprotobuf_proto12.ErrWrongType
} }
var v int var v int
for shift := uint(0); ; shift += 7 { for shift := uint(0); ; shift += 7 {
@ -121,7 +121,7 @@ func (m *SnapshotRecoveryResponse) Unmarshal(data []byte) error {
m.Success = &b m.Success = &b
case 3: case 3:
if wireType != 0 { if wireType != 0 {
return proto.ErrWrongType return code_google_com_p_gogoprotobuf_proto12.ErrWrongType
} }
var v uint64 var v uint64
for shift := uint(0); ; shift += 7 { for shift := uint(0); ; shift += 7 {
@ -150,6 +150,9 @@ func (m *SnapshotRecoveryResponse) Unmarshal(data []byte) error {
if err != nil { if err != nil {
return err return err
} }
if (index + skippy) > l {
return io6.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, data[index:index+skippy]...) m.XXX_unrecognized = append(m.XXX_unrecognized, data[index:index+skippy]...)
index += skippy index += skippy
} }
@ -207,7 +210,6 @@ func sovSnapshotRecoveryResponse(x uint64) (n int) {
} }
func sozSnapshotRecoveryResponse(x uint64) (n int) { func sozSnapshotRecoveryResponse(x uint64) (n int) {
return sovSnapshotRecoveryResponse(uint64((x << 1) ^ uint64((int64(x) >> 63)))) return sovSnapshotRecoveryResponse(uint64((x << 1) ^ uint64((int64(x) >> 63))))
return sovSnapshotRecoveryResponse(uint64((x << 1) ^ uint64((int64(x) >> 63))))
} }
func NewPopulatedSnapshotRecoveryResponse(r randySnapshotRecoveryResponse, easy bool) *SnapshotRecoveryResponse { func NewPopulatedSnapshotRecoveryResponse(r randySnapshotRecoveryResponse, easy bool) *SnapshotRecoveryResponse {
this := &SnapshotRecoveryResponse{} this := &SnapshotRecoveryResponse{}

View File

@ -86,7 +86,7 @@ func (m *SnapshotRequest) Unmarshal(data []byte) error {
switch fieldNum { switch fieldNum {
case 1: case 1:
if wireType != 2 { if wireType != 2 {
return proto.ErrWrongType return code_google_com_p_gogoprotobuf_proto14.ErrWrongType
} }
var stringLen uint64 var stringLen uint64
for shift := uint(0); ; shift += 7 { for shift := uint(0); ; shift += 7 {
@ -109,7 +109,7 @@ func (m *SnapshotRequest) Unmarshal(data []byte) error {
index = postIndex index = postIndex
case 2: case 2:
if wireType != 0 { if wireType != 0 {
return proto.ErrWrongType return code_google_com_p_gogoprotobuf_proto14.ErrWrongType
} }
var v uint64 var v uint64
for shift := uint(0); ; shift += 7 { for shift := uint(0); ; shift += 7 {
@ -126,7 +126,7 @@ func (m *SnapshotRequest) Unmarshal(data []byte) error {
m.LastIndex = &v m.LastIndex = &v
case 3: case 3:
if wireType != 0 { if wireType != 0 {
return proto.ErrWrongType return code_google_com_p_gogoprotobuf_proto14.ErrWrongType
} }
var v uint64 var v uint64
for shift := uint(0); ; shift += 7 { for shift := uint(0); ; shift += 7 {
@ -155,6 +155,9 @@ func (m *SnapshotRequest) Unmarshal(data []byte) error {
if err != nil { if err != nil {
return err return err
} }
if (index + skippy) > l {
return io7.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, data[index:index+skippy]...) m.XXX_unrecognized = append(m.XXX_unrecognized, data[index:index+skippy]...)
index += skippy index += skippy
} }
@ -213,7 +216,6 @@ func sovSnapshotRequest(x uint64) (n int) {
} }
func sozSnapshotRequest(x uint64) (n int) { func sozSnapshotRequest(x uint64) (n int) {
return sovSnapshotRequest(uint64((x << 1) ^ uint64((int64(x) >> 63)))) return sovSnapshotRequest(uint64((x << 1) ^ uint64((int64(x) >> 63))))
return sovSnapshotRequest(uint64((x << 1) ^ uint64((int64(x) >> 63))))
} }
func NewPopulatedSnapshotRequest(r randySnapshotRequest, easy bool) *SnapshotRequest { func NewPopulatedSnapshotRequest(r randySnapshotRequest, easy bool) *SnapshotRequest {
this := &SnapshotRequest{} this := &SnapshotRequest{}

View File

@ -70,7 +70,7 @@ func (m *SnapshotResponse) Unmarshal(data []byte) error {
switch fieldNum { switch fieldNum {
case 1: case 1:
if wireType != 0 { if wireType != 0 {
return proto.ErrWrongType return code_google_com_p_gogoprotobuf_proto16.ErrWrongType
} }
var v int var v int
for shift := uint(0); ; shift += 7 { for shift := uint(0); ; shift += 7 {
@ -100,6 +100,9 @@ func (m *SnapshotResponse) Unmarshal(data []byte) error {
if err != nil { if err != nil {
return err return err
} }
if (index + skippy) > l {
return io8.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, data[index:index+skippy]...) m.XXX_unrecognized = append(m.XXX_unrecognized, data[index:index+skippy]...)
index += skippy index += skippy
} }
@ -149,7 +152,6 @@ func sovSnapshotResponse(x uint64) (n int) {
} }
func sozSnapshotResponse(x uint64) (n int) { func sozSnapshotResponse(x uint64) (n int) {
return sovSnapshotResponse(uint64((x << 1) ^ uint64((int64(x) >> 63)))) return sovSnapshotResponse(uint64((x << 1) ^ uint64((int64(x) >> 63))))
return sovSnapshotResponse(uint64((x << 1) ^ uint64((int64(x) >> 63))))
} }
func NewPopulatedSnapshotResponse(r randySnapshotResponse, easy bool) *SnapshotResponse { func NewPopulatedSnapshotResponse(r randySnapshotResponse, easy bool) *SnapshotResponse {
this := &SnapshotResponse{} this := &SnapshotResponse{}