Compare commits

..

44 Commits

Author SHA1 Message Date
4fb6087f4a CHANGELOG: release 0.4.4 2014-06-24 10:56:53 -07:00
5524131a9e Merge pull request #865 from robstrong/hotfix/contentType
fix(peer_server) set content type to application/json in admin
2014-06-23 16:31:49 -07:00
3efb4d837b Merge pull request #844 from unihorn/102
chore(peer_server): improve log for auto removal
2014-06-23 14:19:48 -07:00
494d2c67aa fix(peer_server) set content type to application/json in admin 2014-06-21 13:13:10 -04:00
fb32a999a6 doc: add note about removal of leader mod 2014-06-19 17:10:34 -07:00
d2f5934aa1 mod: remove defunct leader test 2014-06-19 17:10:34 -07:00
4f3fb5a702 Merge pull request #861 from andybons/patch-2
Update to Go v1.3
2014-06-19 13:54:34 -07:00
9f5ec7732e Update to Go v1.3
Now I’m just being OCD about it.
2014-06-19 10:25:52 -04:00
eb00f200d3 Merge pull request #856 from robn/patch-1
Add p5-etcd (Perl client lib) to clients-matrix
2014-06-18 10:45:11 -07:00
38d16775ab Merge pull request #858 from mikeumus/patch-1
docs(readme.md): spelling fix "oon" to "on"
2014-06-17 23:20:23 -07:00
690fd12b07 docs(readme.md): spelling fix "oon" to "on" 2014-06-18 02:11:32 -04:00
b31483b2be Merge pull request #850 from robszumski/update-config
feat(docs): add cluster config section
2014-06-17 17:45:50 -07:00
e9a21dda4b Merge pull request #851 from unihorn/103
docs(configuration): add cluster configuration
2014-06-17 17:44:51 -07:00
2134036942 Merge pull request #857 from tarnfeld/tools-discodns
Added discodns to the list of tools using etcd
2014-06-17 16:10:57 -07:00
6bd2ee4c49 Added discodns to the list of tools using etcd 2014-06-18 00:08:21 +01:00
fcd429467e Add p5-etcd (Perl client lib) to clients-matrix 2014-06-18 08:15:03 +10:00
e5e759b962 docs(config): refine cluster configuration 2014-06-17 09:31:08 -07:00
d8a08f53e3 feat(docs): add cluster config section 2014-06-16 22:31:13 -07:00
3e95bf0fa3 Merge pull request #854 from brianredbeard/moarthings
docs(libraries-and-tools.md) Add vulcan proxy and kubernetes
2014-06-16 20:05:43 -07:00
0d2512cb99 docs(libraries-and-tools.md) Add vulcan proxy and kubernetes
Both vulcan proxy (vulcand) and Google kubernetes utilize etcd as
as storage engine.
2014-06-16 20:03:47 -07:00
a29f6fb799 docs(configuration): add cluster configuration 2014-06-16 13:58:00 -07:00
fc2afe1ed2 Merge pull request #847 from pwaller/patch-1
docs(production-ready.md): Tiny typo fix
2014-06-13 08:58:33 -07:00
24a442383b docs(production-ready.md): Tiny typo fix 2014-06-13 14:41:23 +01:00
f387bf8464 chore(peer_server): improve log for auto removal 2014-06-12 10:02:56 -07:00
83b06c0715 Merge pull request #841 from andybons/patch-1
Update to Go v1.2.2
2014-06-11 09:43:25 -07:00
75dc10c39d Update to Go v1.2.2 2014-06-09 16:03:01 -04:00
66acf8a4e9 Merge pull request #839 from jonboulle/jonboulle-master
docs(cluster-discovery): fix bad link and confusing port references
2014-06-09 09:47:58 -07:00
1359d29fa4 docs(cluster-discovery): fix bad link and confusing port references 2014-06-08 23:58:14 -07:00
dc1f4adcd0 chore(server): bump to 0.4.3+git 2014-06-07 18:17:54 -07:00
9970141f76 chore(server): bump to 0.4.3 2014-06-07 18:17:05 -07:00
16c2bcf951 chore(server): go fmt
blame me for not running test first.
2014-06-07 18:03:22 -07:00
868b7f7902 Merge pull request #836 from philips/reduce-heartbeat-logs
fix(server): reduce the screaming heartbeat logs
2014-06-07 17:48:22 -07:00
1c958f8fc3 fix(server): reduce the screaming heartbeat logs
Currently the only way we know that a peer isn't getting a heartbeat is
an edge triggered event from go raft on every missed heartbeat. This
means that we need to do some book keeping in order to do exponential
backoff.

The upside is that instead of screaming thousands of log lines before a
machine hits the default removal of 30 minutes it is only ~100.
2014-06-07 17:47:10 -07:00
dfeecd2537 Merge pull request #835 from unihorn/101
chore(server): set DefaultRemoveDelay to 30mins
2014-06-06 17:56:01 -07:00
ed58193ebe chore(server): set DefaultRemoveDelay to 30mins
Its value was 5s before, which could remove the node insanely fast.
2014-06-06 16:57:35 -07:00
79c650d900 Merge pull request #834 from unihorn/100
fix(raft/protobuf): avoid panic on unexcepted data
2014-06-06 15:08:13 -07:00
a451cf2333 fix(raft/protobuf): avoid panic on unexcepted data 2014-06-06 14:34:32 -07:00
3455431da3 Merge pull request #833 from unihorn/99
bump(code.google.com/p/gogoprotobuf): 7fd1620f09
2014-06-06 13:48:47 -07:00
9424a10f49 bump(code.google.com/p/gogoprotobuf): 7fd1620f09 2014-06-06 13:35:59 -07:00
fbcfe8e1c4 Merge pull request #807 from Shopify/raft-server-stats-struct-field-tag-fix
style(server): changed a LeaderInfo struct field from "startTime" to "StartTime"
2014-06-05 12:45:34 -07:00
757bb3af13 Merge pull request #830 from unihorn/98
fix(raft/log): truncate file and reset offset correctly
2014-06-05 12:40:23 -07:00
2cd367e9d9 fix(raft/log): truncate file and reset offset correctly 2014-06-05 12:09:25 -07:00
a974bbfe4f chore(server): bump to 0.4.2+git 2014-06-02 15:26:06 -07:00
673d90728e style(server): changed a LeaderInfo struct field from "startTime" to "StartTime"
Changed the LeaderInfo struct "start time" field from "startTime" to "StartTime" so that it is an exported identifier. This required adding the `json:"startTime"` structure field tag so that the encoding/json package correctly performs JSON encoding (i.e. the correct property name --> startTime).
2014-05-21 11:19:56 -04:00
47 changed files with 1739 additions and 1398 deletions

View File

@ -1,3 +1,14 @@
v0.4.4
* Fix `--no-sync` flag in etcdctl (#83)
* Improved logging for machine removal (#844)
* Various documentation improvements (#858, #851, #847)
v0.4.3
* Avoid panic() on truncated or unexpected log data (#834, #833)
* Fix missing stats field (#807)
* Lengthen default peer removal delay to 30mins (#835)
* Reduce logging on heartbeat timeouts (#836)
v0.4.2
* Improvements to the clustering documents
* Set content-type properly on errors (#469)

View File

@ -2,7 +2,7 @@ FROM ubuntu:12.04
# Let's install go just like Docker (from source).
RUN apt-get update -q
RUN DEBIAN_FRONTEND=noninteractive apt-get install -qy build-essential curl git
RUN curl -s https://go.googlecode.com/files/go1.2.1.src.tar.gz | tar -v -C /usr/local -xz
RUN curl -s https://storage.googleapis.com/golang/go1.3.src.tar.gz | tar -v -C /usr/local -xz
RUN cd /usr/local/go/src && ./make.bash --no-clean 2>&1
ENV PATH /usr/local/go/bin:$PATH
ADD . /opt/etcd

View File

@ -843,13 +843,13 @@ The client is told the write was successful and the keyspace is updated.
Meanwhile F2 has partitioned from the network and will have an out-of-date version of the keyspace until the partition resolves.
Since F2 missed the most recent write, a client reading from F2 will have an out-of-date version of the keyspace.
## Lock Module (*Deprecated*)
## Lock Module (*Deprecated and Removed*)
The lock module is used to serialize access to resources used by clients.
Multiple clients can attempt to acquire a lock but only one can have it at a time.
Once the lock is released, the next client waiting for the lock will receive it.
**Warning:** This module is deprecated at v0.4. See [Modules][modules] for more details.
**Warning:** This module is deprecated and removed at v0.4. See [Modules][modules] for more details.
### Acquiring a Lock

View File

@ -29,16 +29,16 @@ The v2 API has a lot of features, we will categorize them in a few categories:
### Supported features matrix
| Client| [go-etcd](https://github.com/coreos/go-etcd) | [jetcd](https://github.com/diwakergupta/jetcd) | [python-etcd](https://github.com/jplana/python-etcd) | [python-etcd-client](https://github.com/dsoprea/PythonEtcdClient) | [node-etcd](https://github.com/stianeikeland/node-etcd) | [nodejs-etcd](https://github.com/lavagetto/nodejs-etcd) | [etcd-ruby](https://github.com/ranjib/etcd-ruby) | [etcd-api](https://github.com/jdarcy/etcd-api) | [cetcd](https://github.com/dwwoelfel/cetcd) | [clj-etcd](https://github.com/rthomas/clj-etcd) | [etcetera](https://github.com/drusellers/etcetera)| [Etcd.jl](https://github.com/forio/Etcd.jl) |
| --- | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |
| **HTTPS Auth** | Y | Y | Y | Y | Y | Y | - | - | - | - | - | - |
| **Reconnect** | Y | - | Y | Y | - | - | - | Y | - | - | - | - |
| **Mod/Lock** | - | - | Y | Y | - | - | - | - | - | - | - | Y |
| **Mod/Leader** | - | - | - | Y | - | - | - | - | - | - | - | Y |
| **GET Features** | F | B | F | F | F | F | F | B | F | G | F | F |
| **PUT Features** | F | B | F | F | F | F | F | G | F | G | F | F |
| **POST Features** | F | - | F | F | - | F | F | - | - | - | F | F |
| **DEL Features** | F | B | F | F | F | F | F | B | G | B | F | F |
| Client| [go-etcd](https://github.com/coreos/go-etcd) | [jetcd](https://github.com/diwakergupta/jetcd) | [python-etcd](https://github.com/jplana/python-etcd) | [python-etcd-client](https://github.com/dsoprea/PythonEtcdClient) | [node-etcd](https://github.com/stianeikeland/node-etcd) | [nodejs-etcd](https://github.com/lavagetto/nodejs-etcd) | [etcd-ruby](https://github.com/ranjib/etcd-ruby) | [etcd-api](https://github.com/jdarcy/etcd-api) | [cetcd](https://github.com/dwwoelfel/cetcd) | [clj-etcd](https://github.com/rthomas/clj-etcd) | [etcetera](https://github.com/drusellers/etcetera)| [Etcd.jl](https://github.com/forio/Etcd.jl) | [p5-etcd](https://metacpan.org/release/Etcd)
| --- | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |
| **HTTPS Auth** | Y | Y | Y | Y | Y | Y | - | - | - | - | - | - | - |
| **Reconnect** | Y | - | Y | Y | - | - | - | Y | - | - | - | - | - |
| **Mod/Lock** | - | - | Y | Y | - | - | - | - | - | - | - | Y | - |
| **Mod/Leader** | - | - | - | Y | - | - | - | - | - | - | - | Y | - |
| **GET Features** | F | B | F | F | F | F | F | B | F | G | F | F | F |
| **PUT Features** | F | B | F | F | F | F | F | G | F | G | F | F | F |
| **POST Features** | F | - | F | F | - | F | F | - | - | - | F | F | F |
| **DEL Features** | F | B | F | F | F | F | F | B | G | B | F | F | F |
**Legend**

View File

@ -8,6 +8,7 @@ For more information on how etcd can locate the cluster, see the [finding the cl
Please note - at least 3 nodes are required for [cluster availability][optimal-cluster-size].
[cluster-finding]: https://github.com/coreos/etcd/blob/master/Documentation/design/cluster-finding.md
[optimal-cluster-size]: https://github.com/coreos/etcd/blob/master/Documentation/optimal-cluster-size.md
## Using discovery.etcd.io
@ -27,8 +28,8 @@ Here's a full example:
```
TOKEN=$(curl https://discovery.etcd.io/new)
./etcd -name instance1 -peer-addr 10.1.2.3:7001 -addr 10.1.2.3:4001 -discovery $TOKEN
./etcd -name instance2 -peer-addr 10.1.2.4:7002 -addr 10.1.2.4:4002 -discovery $TOKEN
./etcd -name instance3 -peer-addr 10.1.2.5:7002 -addr 10.1.2.5:4002 -discovery $TOKEN
./etcd -name instance2 -peer-addr 10.1.2.4:7001 -addr 10.1.2.4:4001 -discovery $TOKEN
./etcd -name instance3 -peer-addr 10.1.2.5:7001 -addr 10.1.2.5:4001 -discovery $TOKEN
```
## Running Your Own Discovery Endpoint
@ -38,8 +39,8 @@ The discovery API communicates with a separate etcd cluster to store and retriev
```
TOKEN="testcluster"
./etcd -name instance1 -peer-addr 10.1.2.3:7001 -addr 10.1.2.3:4001 -discovery http://10.10.10.10:4001/v2/keys/$TOKEN
./etcd -name instance2 -peer-addr 10.1.2.4:7002 -addr 10.1.2.4:4002 -discovery http://10.10.10.10:4001/v2/keys/$TOKEN
./etcd -name instance3 -peer-addr 10.1.2.5:7002 -addr 10.1.2.5:4002 -discovery http://10.10.10.10:4001/v2/keys/$TOKEN
./etcd -name instance2 -peer-addr 10.1.2.4:7001 -addr 10.1.2.4:4001 -discovery http://10.10.10.10:4001/v2/keys/$TOKEN
./etcd -name instance3 -peer-addr 10.1.2.5:7001 -addr 10.1.2.5:4001 -discovery http://10.10.10.10:4001/v2/keys/$TOKEN
```
If you're interested in how to discovery API works behind the scenes, read about the [Discovery Protocol](https://github.com/coreos/etcd/blob/master/Documentation/discovery-protocol.md).
@ -52,8 +53,6 @@ The Discovery API submits the `-peer-addr` of each etcd instance to the configur
The discovery API will automatically clean up the address of a stale peer that is no longer part of the cluster. The TTL for this process is a week, which should be long enough to handle any extremely long outage you may encounter. There is no harm in having stale peers in the list until they are cleaned up, since an etcd instance only needs to connect to one valid peer in the cluster to join.
[discovery-design]: https://github.com/coreos/etcd/blob/master/Documentation/design/cluster-finding.md
## Lifetime of a Discovery URL
A discovery URL identifies a single etcd cluster. Do not re-use discovery URLs for new clusters.

View File

@ -1,6 +1,8 @@
# Etcd Configuration
Configuration options can be set in three places:
## Node Configuration
Individual node configuration options can be set in three places:
1. Command line flags
2. Environment variables
@ -10,6 +12,16 @@ Options set on the command line take precedence over all other sources.
Options set in environment variables take precedence over options set in
configuration files.
## Cluster Configuration
Cluster-wide settings are configured via the `/config` admin endpoint and additionally in the configuration file. Values contained in the configuration file will seed the cluster setting with the provided value. After the cluster is running, only the admin endpoint is used.
The full documentation is contained in the [API docs](https://github.com/coreos/etcd/blob/master/Documentation/api.md#cluster-config).
* `activeSize` - the maximum number of peers that can participate in the consensus protocol. Other peers will join as standbys.
* `removeDelay` - the minimum time in seconds that a machine has been observed to be unresponsive before it is removed from the cluster.
* `syncInterval` - the amount of time in seconds between cluster sync when it runs in standby mode.
## Command Line Flags
### Required
@ -41,6 +53,9 @@ configuration files.
* `-peer-election-timeout` - The number of milliseconds to wait before the leader is declared unhealthy.
* `-peer-heartbeat-interval` - The number of milliseconds in between heartbeat requests
* `-snapshot=false` - Disable log snapshots. Defaults to `true`.
* `-cluster-active-size` - The expected number of instances participating in the consensus protocol. Only applied if the etcd instance is the first peer in the cluster.
* `-cluster-remove-delay` - The delay before one node is removed from the cluster since it cannot be connected at all. Only applied if the etcd instance is the first peer in the cluster.
* `-cluster-sync-interval` - The interval between synchronization for standby-mode instance with the cluster. Only applied if the etcd instance is the first peer in the cluster.
* `-v` - Enable verbose logging. Defaults to `false`.
* `-vv` - Enable very verbose logging. Defaults to `false`.
* `-version` - Print the version and exit.
@ -76,6 +91,11 @@ bind_addr = "127.0.0.1:7001"
ca_file = ""
cert_file = ""
key_file = ""
[cluster]
active_size = 9
remove_delay = 1800.0
sync_interval = 5.0
```
## Environment Variables
@ -105,3 +125,6 @@ key_file = ""
* `ETCD_PEER_CERT_FILE`
* `ETCD_PEER_KEY_FILE`
* `ETCD_PEER_ELECTION_TIMEOUT`
* `ETCD_CLUSTER_ACTIVE_SIZE`
* `ETCD_CLUSTER_REMOVE_DELAY`
* `ETCD_CLUSTER_SYNC_INTERVAL`

View File

@ -18,8 +18,8 @@ If there are not enough peers to meet the active size, standbys will send join r
If there are more peers than the target active size then peers are removed by the leader and will become standbys.
The remove delay specifies how long the cluster should wait before removing a dead peer.
By default this is 5 seconds.
If a peer is inactive for 5 seconds then the peer is removed.
By default this is 30 minutes.
If a peer is inactive for 30 minutes then the peer is removed.
The standby sync interval specifies the synchronization interval of standbys with the cluster.
By default this is 5 seconds.

View File

@ -90,3 +90,6 @@ A detailed recap of client functionalities can be found in the [clients compatib
- [configdb](https://git.autistici.org/ai/configdb/tree/master) - A REST relational abstraction on top of arbitrary database backends, aimed at storing configs and inventories.
- [scrz](https://github.com/scrz/scrz) - Container manager, stores configuration in etcd.
- [fleet](https://github.com/coreos/fleet) - Distributed init system
- [GoogleCloudPlatform/kubernetes](https://github.com/GoogleCloudPlatform/kubernetes) - Container cluster manager.
- [mailgun/vulcand](https://github.com/mailgun/vulcand) - HTTP proxy that uses etcd as a configuration backend.
- [duedil-ltd/discodns](https://github.com/duedil-ltd/discodns) - Simple DNS nameserver using etcd as a database for names and records.

View File

@ -1,7 +1,7 @@
## Modules
etcd has a number of modules that are built on top of the core etcd API.
These modules provide things like dashboards, locks and leader election.
These modules provide things like dashboards, locks and leader election (removed).
**Warning**: Modules are deprecated from v0.4 until we have a solid base we can apply them back onto.
For now, we are choosing to focus on raft algorithm and core etcd to make sure that it works correctly and fast.
@ -81,7 +81,7 @@ curl -X DELETE http://127.0.0.1:4001/mod/v2/lock/customer1?value=bar
```
### Leader Election
### Leader Election (Deprecated and Removed in 0.4)
The Leader Election module wraps the Lock module to allow clients to come to consensus on a single value.
This is useful when you want one server to process at a time but allow other servers to fail over.

View File

@ -1,4 +1,4 @@
ectd is being used successfully by many companies in production. It is,
etcd is being used successfully by many companies in production. It is,
however, under active development and systems like etcd are difficult to get
correct. If you are comfortable with bleeding-edge software please use etcd and
provide us with the feedback and testing young software needs.

View File

@ -1,6 +1,6 @@
# etcd
README version 0.4.2
README version 0.4.4
A highly-available key value store for shared configuration and service discovery.
etcd is inspired by [Apache ZooKeeper][zookeeper] and [doozer][doozer], with a focus on being:
@ -95,7 +95,7 @@ You have successfully started an etcd on a single machine and written a key to t
## Contact
- Mailing list: [etcd-dev](https://groups.google.com/forum/?hl=en#!forum/etcd-dev)
- IRC: #[coreos](irc://irc.freenode.org:6667/#coreos) oon freenode.org
- IRC: #[coreos](irc://irc.freenode.org:6667/#coreos) on freenode.org
- Planning/Roadmap: [milestones](https://github.com/coreos/etcd/issues/milestones)
- Bugs: [issues](https://github.com/coreos/etcd/issues)

View File

@ -1,85 +0,0 @@
package leader
import (
"fmt"
"testing"
"time"
"github.com/coreos/etcd/server"
"github.com/coreos/etcd/tests"
"github.com/coreos/etcd/third_party/github.com/stretchr/testify/assert"
)
// Ensure that a leader can be set and read.
func TestModLeaderSet(t *testing.T) {
tests.RunServer(func(s *server.Server) {
// Set leader.
body, status, err := testSetLeader(s, "foo", "xxx", 10)
assert.NoError(t, err)
assert.Equal(t, status, 200)
assert.Equal(t, body, "2")
// Check that the leader is set.
body, status, err = testGetLeader(s, "foo")
assert.NoError(t, err)
assert.Equal(t, status, 200)
assert.Equal(t, body, "xxx")
// Delete leader.
body, status, err = testDeleteLeader(s, "foo", "xxx")
assert.NoError(t, err)
assert.Equal(t, status, 200)
assert.Equal(t, body, "")
// Check that the leader is removed.
body, status, err = testGetLeader(s, "foo")
assert.NoError(t, err)
assert.Equal(t, status, 200)
assert.Equal(t, body, "")
})
}
// Ensure that a leader can be renewed.
func TestModLeaderRenew(t *testing.T) {
tests.RunServer(func(s *server.Server) {
// Set leader.
body, status, err := testSetLeader(s, "foo", "xxx", 2)
assert.NoError(t, err)
assert.Equal(t, status, 200)
assert.Equal(t, body, "2")
time.Sleep(1 * time.Second)
// Renew leader.
body, status, err = testSetLeader(s, "foo", "xxx", 3)
assert.NoError(t, err)
assert.Equal(t, status, 200)
assert.Equal(t, body, "2")
time.Sleep(2 * time.Second)
// Check that the leader is set.
body, status, err = testGetLeader(s, "foo")
assert.NoError(t, err)
assert.Equal(t, status, 200)
assert.Equal(t, body, "xxx")
})
}
func testSetLeader(s *server.Server, key string, name string, ttl int) (string, int, error) {
resp, err := tests.PutForm(fmt.Sprintf("%s/mod/v2/leader/%s?name=%s&ttl=%d", s.URL(), key, name, ttl), nil)
ret := tests.ReadBody(resp)
return string(ret), resp.StatusCode, err
}
func testGetLeader(s *server.Server, key string) (string, int, error) {
resp, err := tests.Get(fmt.Sprintf("%s/mod/v2/leader/%s", s.URL(), key))
ret := tests.ReadBody(resp)
return string(ret), resp.StatusCode, err
}
func testDeleteLeader(s *server.Server, key string, name string) (string, int, error) {
resp, err := tests.DeleteForm(fmt.Sprintf("%s/mod/v2/leader/%s?name=%s", s.URL(), key, name), nil)
ret := tests.ReadBody(resp)
return string(ret), resp.StatusCode, err
}

View File

@ -12,7 +12,7 @@ const (
MinActiveSize = 3
// DefaultRemoveDelay is the default elapsed time before removal.
DefaultRemoveDelay = float64((5 * time.Second) / time.Second)
DefaultRemoveDelay = float64((30 * time.Minute) / time.Second)
// MinRemoveDelay is the minimum remove delay allowed.
MinRemoveDelay = float64((2 * time.Second) / time.Second)
@ -25,7 +25,6 @@ const (
)
// ClusterConfig represents cluster-wide configuration settings.
// These settings can only be changed through Raft.
type ClusterConfig struct {
// ActiveSize is the maximum number of node that can join as Raft followers.
// Nodes that join the cluster after the limit is reached are standbys.

View File

@ -23,6 +23,10 @@ import (
)
const (
// MaxHeartbeatTimeoutBackoff is the maximum number of seconds before we warn
// the user again about a peer not accepting heartbeats.
MaxHeartbeatTimeoutBackoff = 15 * time.Second
// ThresholdMonitorTimeout is the time between log notifications that the
// Raft heartbeat is too close to the election timeout.
ThresholdMonitorTimeout = 5 * time.Second
@ -70,10 +74,18 @@ type PeerServer struct {
routineGroup sync.WaitGroup
timeoutThresholdChan chan interface{}
logBackoffs map[string]*logBackoff
metrics *metrics.Bucket
sync.Mutex
}
type logBackoff struct {
next time.Time
backoff time.Duration
count int
}
// TODO: find a good policy to do snapshot
type snapshotConf struct {
// Etcd will check if snapshot is need every checkingInterval
@ -97,6 +109,7 @@ func NewPeerServer(psConfig PeerServerConfig, client *Client, registry *Registry
serverStats: serverStats,
timeoutThresholdChan: make(chan interface{}, 1),
logBackoffs: make(map[string]*logBackoff),
metrics: mb,
}
@ -627,7 +640,7 @@ func (s *PeerServer) joinByPeer(server raft.Server, peer string, scheme string)
}
func (s *PeerServer) Stats() []byte {
s.serverStats.LeaderInfo.Uptime = time.Now().Sub(s.serverStats.LeaderInfo.startTime).String()
s.serverStats.LeaderInfo.Uptime = time.Now().Sub(s.serverStats.LeaderInfo.StartTime).String()
// TODO: register state listener to raft to change this field
// rather than compare the state each time Stats() is called.
@ -687,11 +700,12 @@ func (s *PeerServer) raftEventLogger(event raft.Event) {
case raft.RemovePeerEventType:
log.Infof("%s: peer removed: '%v'", s.Config.Name, value)
case raft.HeartbeatIntervalEventType:
var name = "<unknown>"
if peer, ok := value.(*raft.Peer); ok {
name = peer.Name
peer, ok := value.(*raft.Peer)
if !ok {
log.Warnf("%s: heatbeat timeout from unknown peer", s.Config.Name)
return
}
log.Infof("%s: warning: heartbeat timed out: '%v'", s.Config.Name, name)
s.logHeartbeatTimeout(peer)
case raft.ElectionTimeoutThresholdEventType:
select {
case s.timeoutThresholdChan <- value:
@ -701,6 +715,35 @@ func (s *PeerServer) raftEventLogger(event raft.Event) {
}
}
// logHeartbeatTimeout logs about the edge triggered heartbeat timeout event
// only if we haven't warned within a reasonable interval.
func (s *PeerServer) logHeartbeatTimeout(peer *raft.Peer) {
b, ok := s.logBackoffs[peer.Name]
if !ok {
b = &logBackoff{time.Time{}, time.Second, 1}
s.logBackoffs[peer.Name] = b
}
if peer.LastActivity().After(b.next) {
b.next = time.Time{}
b.backoff = time.Second
b.count = 1
}
if b.next.After(time.Now()) {
b.count++
return
}
b.backoff = 2 * b.backoff
if b.backoff > MaxHeartbeatTimeoutBackoff {
b.backoff = MaxHeartbeatTimeoutBackoff
}
b.next = time.Now().Add(b.backoff)
log.Infof("%s: warning: heartbeat time out peer=%q missed=%d backoff=%q", s.Config.Name, peer.Name, b.count, b.backoff)
}
func (s *PeerServer) recordMetricEvent(event raft.Event) {
name := fmt.Sprintf("raft.event.%s", event.Type())
value := event.Value().(time.Duration)
@ -810,7 +853,7 @@ func (s *PeerServer) monitorActiveSize() {
// If we have more active nodes than we should then remove.
if peerCount > activeSize {
peer := peers[rand.Intn(len(peers))]
log.Infof("%s: removing: %v", s.Config.Name, peer)
log.Infof("%s: removing node: %v; peer number %d > expected size %d", s.Config.Name, peer, peerCount, activeSize)
if _, err := s.raftServer.Do(&RemoveCommand{Name: peer}); err != nil {
log.Infof("%s: warning: remove error: %v", s.Config.Name, err)
}

View File

@ -188,6 +188,7 @@ func (ps *PeerServer) RemoveHttpHandler(w http.ResponseWriter, req *http.Request
// Returns a JSON-encoded cluster configuration.
func (ps *PeerServer) getClusterConfigHttpHandler(w http.ResponseWriter, req *http.Request) {
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(ps.ClusterConfig())
}
@ -217,6 +218,7 @@ func (ps *PeerServer) setClusterConfigHttpHandler(w http.ResponseWriter, req *ht
log.Debugf("[recv] Update Cluster Config Request")
ps.server.Dispatch(c, w, req)
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(ps.ClusterConfig())
}
@ -230,6 +232,7 @@ func (ps *PeerServer) getMachinesHttpHandler(w http.ResponseWriter, req *http.Re
}
}
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(&machines)
}
@ -237,6 +240,7 @@ func (ps *PeerServer) getMachinesHttpHandler(w http.ResponseWriter, req *http.Re
func (ps *PeerServer) getMachineHttpHandler(w http.ResponseWriter, req *http.Request) {
vars := mux.Vars(req)
m := ps.getMachineMessage(vars["name"], ps.raftServer.Leader())
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(m)
}

View File

@ -13,9 +13,9 @@ type raftServerStats struct {
StartTime time.Time `json:"startTime"`
LeaderInfo struct {
Name string `json:"leader"`
Uptime string `json:"uptime"`
startTime time.Time
Name string `json:"leader"`
Uptime string `json:"uptime"`
StartTime time.Time `json:"startTime"`
} `json:"leaderInfo"`
RecvAppendRequestCnt uint64 `json:"recvAppendRequestCnt,"`
@ -43,7 +43,7 @@ func NewRaftServerStats(name string) *raftServerStats {
back: -1,
},
}
stats.LeaderInfo.startTime = time.Now()
stats.LeaderInfo.StartTime = time.Now()
return stats
}
@ -54,7 +54,7 @@ func (ss *raftServerStats) RecvAppendReq(leaderName string, pkgSize int) {
ss.State = raft.Follower
if leaderName != ss.LeaderInfo.Name {
ss.LeaderInfo.Name = leaderName
ss.LeaderInfo.startTime = time.Now()
ss.LeaderInfo.StartTime = time.Now()
}
ss.recvRateQueue.Insert(NewPackageStats(time.Now(), pkgSize))
@ -70,7 +70,7 @@ func (ss *raftServerStats) SendAppendReq(pkgSize int) {
if ss.State != raft.Leader {
ss.State = raft.Leader
ss.LeaderInfo.Name = ss.Name
ss.LeaderInfo.startTime = now
ss.LeaderInfo.StartTime = now
}
ss.sendRateQueue.Insert(NewPackageStats(now, pkgSize))

View File

@ -1,3 +1,3 @@
package server
const ReleaseVersion = "0.4.2"
const ReleaseVersion = "0.4.4"

View File

@ -56,6 +56,9 @@ Other Options:
-max-cluster-size Maximum number of nodes in the cluster.
-snapshot=false Disable log snapshots
-snapshot-count Number of transactions before issuing a snapshot.
-cluster-active-size Number of active nodes in the cluster.
-cluster-remove-delay Seconds before one node is removed.
-cluster-sync-interval Seconds between synchronizations for standby mode.
`
// Usage returns the usage message for etcd.

View File

@ -25,6 +25,7 @@ func TestClusterConfigSet(t *testing.T) {
resp, _ = tests.Get("http://localhost:7002/v2/admin/config")
body := tests.ReadBodyJSON(resp)
assert.Equal(t, resp.StatusCode, 200)
assert.Equal(t, resp.Header.Get("Content-Type"), "application/json")
assert.Equal(t, body["activeSize"], 3)
assert.Equal(t, body["removeDelay"], 60)
}
@ -44,6 +45,7 @@ func TestClusterConfigReload(t *testing.T) {
resp, _ = tests.Get("http://localhost:7002/v2/admin/config")
body := tests.ReadBodyJSON(resp)
assert.Equal(t, resp.StatusCode, 200)
assert.Equal(t, resp.Header.Get("Content-Type"), "application/json")
assert.Equal(t, body["activeSize"], 3)
assert.Equal(t, body["removeDelay"], 60)
@ -59,6 +61,7 @@ func TestClusterConfigReload(t *testing.T) {
resp, _ = tests.Get("http://localhost:7002/v2/admin/config")
body = tests.ReadBodyJSON(resp)
assert.Equal(t, resp.StatusCode, 200)
assert.Equal(t, resp.Header.Get("Content-Type"), "application/json")
assert.Equal(t, body["activeSize"], 3)
assert.Equal(t, body["removeDelay"], 60)
}
@ -76,6 +79,7 @@ func TestGetMachines(t *testing.T) {
t.FailNow()
}
assert.Equal(t, resp.StatusCode, 200)
assert.Equal(t, resp.Header.Get("Content-Type"), "application/json")
machines := make([]map[string]interface{}, 0)
b := tests.ReadBody(resp)
json.Unmarshal(b, &machines)

View File

@ -507,61 +507,61 @@ func TestReset(t *testing.T) {
func TestEncodeDecode1(t *testing.T) {
pb := initGoTest(false)
overify(t, pb,
"0807"+ // field 1, encoding 0, value 7
"220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField)
"5001"+ // field 10, encoding 0, value 1
"5803"+ // field 11, encoding 0, value 3
"6006"+ // field 12, encoding 0, value 6
"6d20000000"+ // field 13, encoding 5, value 0x20
"714000000000000000"+ // field 14, encoding 1, value 0x40
"78a019"+ // field 15, encoding 0, value 0xca0 = 3232
"8001c032"+ // field 16, encoding 0, value 0x1940 = 6464
"8d0100004a45"+ // field 17, encoding 5, value 3232.0
"9101000000000040b940"+ // field 18, encoding 1, value 6464.0
"9a0106"+"737472696e67"+ // field 19, encoding 2, string "string"
"b304"+ // field 70, encoding 3, start group
"ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required"
"b404"+ // field 70, encoding 4, end group
"aa0605"+"6279746573"+ // field 101, encoding 2, string "bytes"
"b0063f"+ // field 102, encoding 0, 0x3f zigzag32
"b8067f") // field 103, encoding 0, 0x7f zigzag64
"0807"+ // field 1, encoding 0, value 7
"220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField)
"5001"+ // field 10, encoding 0, value 1
"5803"+ // field 11, encoding 0, value 3
"6006"+ // field 12, encoding 0, value 6
"6d20000000"+ // field 13, encoding 5, value 0x20
"714000000000000000"+ // field 14, encoding 1, value 0x40
"78a019"+ // field 15, encoding 0, value 0xca0 = 3232
"8001c032"+ // field 16, encoding 0, value 0x1940 = 6464
"8d0100004a45"+ // field 17, encoding 5, value 3232.0
"9101000000000040b940"+ // field 18, encoding 1, value 6464.0
"9a0106"+"737472696e67"+ // field 19, encoding 2, string "string"
"b304"+ // field 70, encoding 3, start group
"ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required"
"b404"+ // field 70, encoding 4, end group
"aa0605"+"6279746573"+ // field 101, encoding 2, string "bytes"
"b0063f"+ // field 102, encoding 0, 0x3f zigzag32
"b8067f") // field 103, encoding 0, 0x7f zigzag64
}
// All required fields set, defaults provided.
func TestEncodeDecode2(t *testing.T) {
pb := initGoTest(true)
overify(t, pb,
"0807"+ // field 1, encoding 0, value 7
"220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField)
"5001"+ // field 10, encoding 0, value 1
"5803"+ // field 11, encoding 0, value 3
"6006"+ // field 12, encoding 0, value 6
"6d20000000"+ // field 13, encoding 5, value 32
"714000000000000000"+ // field 14, encoding 1, value 64
"78a019"+ // field 15, encoding 0, value 3232
"8001c032"+ // field 16, encoding 0, value 6464
"8d0100004a45"+ // field 17, encoding 5, value 3232.0
"9101000000000040b940"+ // field 18, encoding 1, value 6464.0
"9a0106"+"737472696e67"+ // field 19, encoding 2 string "string"
"c00201"+ // field 40, encoding 0, value 1
"c80220"+ // field 41, encoding 0, value 32
"d00240"+ // field 42, encoding 0, value 64
"dd0240010000"+ // field 43, encoding 5, value 320
"e1028002000000000000"+ // field 44, encoding 1, value 640
"e8028019"+ // field 45, encoding 0, value 3200
"f0028032"+ // field 46, encoding 0, value 6400
"fd02e0659948"+ // field 47, encoding 5, value 314159.0
"81030000000050971041"+ // field 48, encoding 1, value 271828.0
"8a0310"+"68656c6c6f2c2022776f726c6421220a"+ // field 49, encoding 2 string "hello, \"world!\"\n"
"b304"+ // start group field 70 level 1
"ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required"
"b404"+ // end group field 70 level 1
"aa0605"+"6279746573"+ // field 101, encoding 2 string "bytes"
"b0063f"+ // field 102, encoding 0, 0x3f zigzag32
"b8067f"+ // field 103, encoding 0, 0x7f zigzag64
"8a1907"+"4269676e6f7365"+ // field 401, encoding 2, string "Bignose"
"90193f"+ // field 402, encoding 0, value 63
"98197f") // field 403, encoding 0, value 127
"0807"+ // field 1, encoding 0, value 7
"220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField)
"5001"+ // field 10, encoding 0, value 1
"5803"+ // field 11, encoding 0, value 3
"6006"+ // field 12, encoding 0, value 6
"6d20000000"+ // field 13, encoding 5, value 32
"714000000000000000"+ // field 14, encoding 1, value 64
"78a019"+ // field 15, encoding 0, value 3232
"8001c032"+ // field 16, encoding 0, value 6464
"8d0100004a45"+ // field 17, encoding 5, value 3232.0
"9101000000000040b940"+ // field 18, encoding 1, value 6464.0
"9a0106"+"737472696e67"+ // field 19, encoding 2 string "string"
"c00201"+ // field 40, encoding 0, value 1
"c80220"+ // field 41, encoding 0, value 32
"d00240"+ // field 42, encoding 0, value 64
"dd0240010000"+ // field 43, encoding 5, value 320
"e1028002000000000000"+ // field 44, encoding 1, value 640
"e8028019"+ // field 45, encoding 0, value 3200
"f0028032"+ // field 46, encoding 0, value 6400
"fd02e0659948"+ // field 47, encoding 5, value 314159.0
"81030000000050971041"+ // field 48, encoding 1, value 271828.0
"8a0310"+"68656c6c6f2c2022776f726c6421220a"+ // field 49, encoding 2 string "hello, \"world!\"\n"
"b304"+ // start group field 70 level 1
"ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required"
"b404"+ // end group field 70 level 1
"aa0605"+"6279746573"+ // field 101, encoding 2 string "bytes"
"b0063f"+ // field 102, encoding 0, 0x3f zigzag32
"b8067f"+ // field 103, encoding 0, 0x7f zigzag64
"8a1907"+"4269676e6f7365"+ // field 401, encoding 2, string "Bignose"
"90193f"+ // field 402, encoding 0, value 63
"98197f") // field 403, encoding 0, value 127
}
@ -583,37 +583,37 @@ func TestEncodeDecode3(t *testing.T) {
pb.F_Sint64Defaulted = Int64(-64)
overify(t, pb,
"0807"+ // field 1, encoding 0, value 7
"220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField)
"5001"+ // field 10, encoding 0, value 1
"5803"+ // field 11, encoding 0, value 3
"6006"+ // field 12, encoding 0, value 6
"6d20000000"+ // field 13, encoding 5, value 32
"714000000000000000"+ // field 14, encoding 1, value 64
"78a019"+ // field 15, encoding 0, value 3232
"8001c032"+ // field 16, encoding 0, value 6464
"8d0100004a45"+ // field 17, encoding 5, value 3232.0
"9101000000000040b940"+ // field 18, encoding 1, value 6464.0
"9a0106"+"737472696e67"+ // field 19, encoding 2 string "string"
"c00201"+ // field 40, encoding 0, value 1
"c80220"+ // field 41, encoding 0, value 32
"d00240"+ // field 42, encoding 0, value 64
"dd0240010000"+ // field 43, encoding 5, value 320
"e1028002000000000000"+ // field 44, encoding 1, value 640
"e8028019"+ // field 45, encoding 0, value 3200
"f0028032"+ // field 46, encoding 0, value 6400
"fd02e0659948"+ // field 47, encoding 5, value 314159.0
"81030000000050971041"+ // field 48, encoding 1, value 271828.0
"8a0310"+"68656c6c6f2c2022776f726c6421220a"+ // field 49, encoding 2 string "hello, \"world!\"\n"
"b304"+ // start group field 70 level 1
"ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required"
"b404"+ // end group field 70 level 1
"aa0605"+"6279746573"+ // field 101, encoding 2 string "bytes"
"b0063f"+ // field 102, encoding 0, 0x3f zigzag32
"b8067f"+ // field 103, encoding 0, 0x7f zigzag64
"8a1907"+"4269676e6f7365"+ // field 401, encoding 2, string "Bignose"
"90193f"+ // field 402, encoding 0, value 63
"98197f") // field 403, encoding 0, value 127
"0807"+ // field 1, encoding 0, value 7
"220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField)
"5001"+ // field 10, encoding 0, value 1
"5803"+ // field 11, encoding 0, value 3
"6006"+ // field 12, encoding 0, value 6
"6d20000000"+ // field 13, encoding 5, value 32
"714000000000000000"+ // field 14, encoding 1, value 64
"78a019"+ // field 15, encoding 0, value 3232
"8001c032"+ // field 16, encoding 0, value 6464
"8d0100004a45"+ // field 17, encoding 5, value 3232.0
"9101000000000040b940"+ // field 18, encoding 1, value 6464.0
"9a0106"+"737472696e67"+ // field 19, encoding 2 string "string"
"c00201"+ // field 40, encoding 0, value 1
"c80220"+ // field 41, encoding 0, value 32
"d00240"+ // field 42, encoding 0, value 64
"dd0240010000"+ // field 43, encoding 5, value 320
"e1028002000000000000"+ // field 44, encoding 1, value 640
"e8028019"+ // field 45, encoding 0, value 3200
"f0028032"+ // field 46, encoding 0, value 6400
"fd02e0659948"+ // field 47, encoding 5, value 314159.0
"81030000000050971041"+ // field 48, encoding 1, value 271828.0
"8a0310"+"68656c6c6f2c2022776f726c6421220a"+ // field 49, encoding 2 string "hello, \"world!\"\n"
"b304"+ // start group field 70 level 1
"ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required"
"b404"+ // end group field 70 level 1
"aa0605"+"6279746573"+ // field 101, encoding 2 string "bytes"
"b0063f"+ // field 102, encoding 0, 0x3f zigzag32
"b8067f"+ // field 103, encoding 0, 0x7f zigzag64
"8a1907"+"4269676e6f7365"+ // field 401, encoding 2, string "Bignose"
"90193f"+ // field 402, encoding 0, value 63
"98197f") // field 403, encoding 0, value 127
}
@ -639,56 +639,56 @@ func TestEncodeDecode4(t *testing.T) {
pb.Optionalgroup = initGoTest_OptionalGroup()
overify(t, pb,
"0807"+ // field 1, encoding 0, value 7
"1205"+"68656c6c6f"+ // field 2, encoding 2, string "hello"
"1807"+ // field 3, encoding 0, value 7
"220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField)
"320d"+"0a056c6162656c120474797065"+ // field 6, encoding 2 (GoTestField)
"5001"+ // field 10, encoding 0, value 1
"5803"+ // field 11, encoding 0, value 3
"6006"+ // field 12, encoding 0, value 6
"6d20000000"+ // field 13, encoding 5, value 32
"714000000000000000"+ // field 14, encoding 1, value 64
"78a019"+ // field 15, encoding 0, value 3232
"8001c032"+ // field 16, encoding 0, value 6464
"8d0100004a45"+ // field 17, encoding 5, value 3232.0
"9101000000000040b940"+ // field 18, encoding 1, value 6464.0
"9a0106"+"737472696e67"+ // field 19, encoding 2 string "string"
"f00101"+ // field 30, encoding 0, value 1
"f80120"+ // field 31, encoding 0, value 32
"800240"+ // field 32, encoding 0, value 64
"8d02a00c0000"+ // field 33, encoding 5, value 3232
"91024019000000000000"+ // field 34, encoding 1, value 6464
"9802a0dd13"+ // field 35, encoding 0, value 323232
"a002c0ba27"+ // field 36, encoding 0, value 646464
"ad0200000042"+ // field 37, encoding 5, value 32.0
"b1020000000000005040"+ // field 38, encoding 1, value 64.0
"ba0205"+"68656c6c6f"+ // field 39, encoding 2, string "hello"
"c00201"+ // field 40, encoding 0, value 1
"c80220"+ // field 41, encoding 0, value 32
"d00240"+ // field 42, encoding 0, value 64
"dd0240010000"+ // field 43, encoding 5, value 320
"e1028002000000000000"+ // field 44, encoding 1, value 640
"e8028019"+ // field 45, encoding 0, value 3200
"f0028032"+ // field 46, encoding 0, value 6400
"fd02e0659948"+ // field 47, encoding 5, value 314159.0
"81030000000050971041"+ // field 48, encoding 1, value 271828.0
"8a0310"+"68656c6c6f2c2022776f726c6421220a"+ // field 49, encoding 2 string "hello, \"world!\"\n"
"b304"+ // start group field 70 level 1
"ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required"
"b404"+ // end group field 70 level 1
"d305"+ // start group field 90 level 1
"da0508"+"6f7074696f6e616c"+ // field 91, encoding 2, string "optional"
"d405"+ // end group field 90 level 1
"aa0605"+"6279746573"+ // field 101, encoding 2 string "bytes"
"b0063f"+ // field 102, encoding 0, 0x3f zigzag32
"b8067f"+ // field 103, encoding 0, 0x7f zigzag64
"ea1207"+"4269676e6f7365"+ // field 301, encoding 2, string "Bignose"
"f0123f"+ // field 302, encoding 0, value 63
"f8127f"+ // field 303, encoding 0, value 127
"8a1907"+"4269676e6f7365"+ // field 401, encoding 2, string "Bignose"
"90193f"+ // field 402, encoding 0, value 63
"98197f") // field 403, encoding 0, value 127
"0807"+ // field 1, encoding 0, value 7
"1205"+"68656c6c6f"+ // field 2, encoding 2, string "hello"
"1807"+ // field 3, encoding 0, value 7
"220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField)
"320d"+"0a056c6162656c120474797065"+ // field 6, encoding 2 (GoTestField)
"5001"+ // field 10, encoding 0, value 1
"5803"+ // field 11, encoding 0, value 3
"6006"+ // field 12, encoding 0, value 6
"6d20000000"+ // field 13, encoding 5, value 32
"714000000000000000"+ // field 14, encoding 1, value 64
"78a019"+ // field 15, encoding 0, value 3232
"8001c032"+ // field 16, encoding 0, value 6464
"8d0100004a45"+ // field 17, encoding 5, value 3232.0
"9101000000000040b940"+ // field 18, encoding 1, value 6464.0
"9a0106"+"737472696e67"+ // field 19, encoding 2 string "string"
"f00101"+ // field 30, encoding 0, value 1
"f80120"+ // field 31, encoding 0, value 32
"800240"+ // field 32, encoding 0, value 64
"8d02a00c0000"+ // field 33, encoding 5, value 3232
"91024019000000000000"+ // field 34, encoding 1, value 6464
"9802a0dd13"+ // field 35, encoding 0, value 323232
"a002c0ba27"+ // field 36, encoding 0, value 646464
"ad0200000042"+ // field 37, encoding 5, value 32.0
"b1020000000000005040"+ // field 38, encoding 1, value 64.0
"ba0205"+"68656c6c6f"+ // field 39, encoding 2, string "hello"
"c00201"+ // field 40, encoding 0, value 1
"c80220"+ // field 41, encoding 0, value 32
"d00240"+ // field 42, encoding 0, value 64
"dd0240010000"+ // field 43, encoding 5, value 320
"e1028002000000000000"+ // field 44, encoding 1, value 640
"e8028019"+ // field 45, encoding 0, value 3200
"f0028032"+ // field 46, encoding 0, value 6400
"fd02e0659948"+ // field 47, encoding 5, value 314159.0
"81030000000050971041"+ // field 48, encoding 1, value 271828.0
"8a0310"+"68656c6c6f2c2022776f726c6421220a"+ // field 49, encoding 2 string "hello, \"world!\"\n"
"b304"+ // start group field 70 level 1
"ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required"
"b404"+ // end group field 70 level 1
"d305"+ // start group field 90 level 1
"da0508"+"6f7074696f6e616c"+ // field 91, encoding 2, string "optional"
"d405"+ // end group field 90 level 1
"aa0605"+"6279746573"+ // field 101, encoding 2 string "bytes"
"b0063f"+ // field 102, encoding 0, 0x3f zigzag32
"b8067f"+ // field 103, encoding 0, 0x7f zigzag64
"ea1207"+"4269676e6f7365"+ // field 301, encoding 2, string "Bignose"
"f0123f"+ // field 302, encoding 0, value 63
"f8127f"+ // field 303, encoding 0, value 127
"8a1907"+"4269676e6f7365"+ // field 401, encoding 2, string "Bignose"
"90193f"+ // field 402, encoding 0, value 63
"98197f") // field 403, encoding 0, value 127
}
@ -712,71 +712,71 @@ func TestEncodeDecode5(t *testing.T) {
pb.Repeatedgroup = []*GoTest_RepeatedGroup{initGoTest_RepeatedGroup(), initGoTest_RepeatedGroup()}
overify(t, pb,
"0807"+ // field 1, encoding 0, value 7
"220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField)
"2a0d"+"0a056c6162656c120474797065"+ // field 5, encoding 2 (GoTestField)
"2a0d"+"0a056c6162656c120474797065"+ // field 5, encoding 2 (GoTestField)
"5001"+ // field 10, encoding 0, value 1
"5803"+ // field 11, encoding 0, value 3
"6006"+ // field 12, encoding 0, value 6
"6d20000000"+ // field 13, encoding 5, value 32
"714000000000000000"+ // field 14, encoding 1, value 64
"78a019"+ // field 15, encoding 0, value 3232
"8001c032"+ // field 16, encoding 0, value 6464
"8d0100004a45"+ // field 17, encoding 5, value 3232.0
"9101000000000040b940"+ // field 18, encoding 1, value 6464.0
"9a0106"+"737472696e67"+ // field 19, encoding 2 string "string"
"a00100"+ // field 20, encoding 0, value 0
"a00101"+ // field 20, encoding 0, value 1
"a80120"+ // field 21, encoding 0, value 32
"a80121"+ // field 21, encoding 0, value 33
"b00140"+ // field 22, encoding 0, value 64
"b00141"+ // field 22, encoding 0, value 65
"bd01a00c0000"+ // field 23, encoding 5, value 3232
"bd01050d0000"+ // field 23, encoding 5, value 3333
"c1014019000000000000"+ // field 24, encoding 1, value 6464
"c101a519000000000000"+ // field 24, encoding 1, value 6565
"c801a0dd13"+ // field 25, encoding 0, value 323232
"c80195ac14"+ // field 25, encoding 0, value 333333
"d001c0ba27"+ // field 26, encoding 0, value 646464
"d001b58928"+ // field 26, encoding 0, value 656565
"dd0100000042"+ // field 27, encoding 5, value 32.0
"dd0100000442"+ // field 27, encoding 5, value 33.0
"e1010000000000005040"+ // field 28, encoding 1, value 64.0
"e1010000000000405040"+ // field 28, encoding 1, value 65.0
"ea0105"+"68656c6c6f"+ // field 29, encoding 2, string "hello"
"ea0106"+"7361696c6f72"+ // field 29, encoding 2, string "sailor"
"c00201"+ // field 40, encoding 0, value 1
"c80220"+ // field 41, encoding 0, value 32
"d00240"+ // field 42, encoding 0, value 64
"dd0240010000"+ // field 43, encoding 5, value 320
"e1028002000000000000"+ // field 44, encoding 1, value 640
"e8028019"+ // field 45, encoding 0, value 3200
"f0028032"+ // field 46, encoding 0, value 6400
"fd02e0659948"+ // field 47, encoding 5, value 314159.0
"81030000000050971041"+ // field 48, encoding 1, value 271828.0
"8a0310"+"68656c6c6f2c2022776f726c6421220a"+ // field 49, encoding 2 string "hello, \"world!\"\n"
"b304"+ // start group field 70 level 1
"ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required"
"b404"+ // end group field 70 level 1
"8305"+ // start group field 80 level 1
"8a0508"+"7265706561746564"+ // field 81, encoding 2, string "repeated"
"8405"+ // end group field 80 level 1
"8305"+ // start group field 80 level 1
"8a0508"+"7265706561746564"+ // field 81, encoding 2, string "repeated"
"8405"+ // end group field 80 level 1
"aa0605"+"6279746573"+ // field 101, encoding 2 string "bytes"
"b0063f"+ // field 102, encoding 0, 0x3f zigzag32
"b8067f"+ // field 103, encoding 0, 0x7f zigzag64
"ca0c03"+"626967"+ // field 201, encoding 2, string "big"
"ca0c04"+"6e6f7365"+ // field 201, encoding 2, string "nose"
"d00c40"+ // field 202, encoding 0, value 32
"d00c3f"+ // field 202, encoding 0, value -32
"d80c8001"+ // field 203, encoding 0, value 64
"d80c7f"+ // field 203, encoding 0, value -64
"8a1907"+"4269676e6f7365"+ // field 401, encoding 2, string "Bignose"
"90193f"+ // field 402, encoding 0, value 63
"98197f") // field 403, encoding 0, value 127
"0807"+ // field 1, encoding 0, value 7
"220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField)
"2a0d"+"0a056c6162656c120474797065"+ // field 5, encoding 2 (GoTestField)
"2a0d"+"0a056c6162656c120474797065"+ // field 5, encoding 2 (GoTestField)
"5001"+ // field 10, encoding 0, value 1
"5803"+ // field 11, encoding 0, value 3
"6006"+ // field 12, encoding 0, value 6
"6d20000000"+ // field 13, encoding 5, value 32
"714000000000000000"+ // field 14, encoding 1, value 64
"78a019"+ // field 15, encoding 0, value 3232
"8001c032"+ // field 16, encoding 0, value 6464
"8d0100004a45"+ // field 17, encoding 5, value 3232.0
"9101000000000040b940"+ // field 18, encoding 1, value 6464.0
"9a0106"+"737472696e67"+ // field 19, encoding 2 string "string"
"a00100"+ // field 20, encoding 0, value 0
"a00101"+ // field 20, encoding 0, value 1
"a80120"+ // field 21, encoding 0, value 32
"a80121"+ // field 21, encoding 0, value 33
"b00140"+ // field 22, encoding 0, value 64
"b00141"+ // field 22, encoding 0, value 65
"bd01a00c0000"+ // field 23, encoding 5, value 3232
"bd01050d0000"+ // field 23, encoding 5, value 3333
"c1014019000000000000"+ // field 24, encoding 1, value 6464
"c101a519000000000000"+ // field 24, encoding 1, value 6565
"c801a0dd13"+ // field 25, encoding 0, value 323232
"c80195ac14"+ // field 25, encoding 0, value 333333
"d001c0ba27"+ // field 26, encoding 0, value 646464
"d001b58928"+ // field 26, encoding 0, value 656565
"dd0100000042"+ // field 27, encoding 5, value 32.0
"dd0100000442"+ // field 27, encoding 5, value 33.0
"e1010000000000005040"+ // field 28, encoding 1, value 64.0
"e1010000000000405040"+ // field 28, encoding 1, value 65.0
"ea0105"+"68656c6c6f"+ // field 29, encoding 2, string "hello"
"ea0106"+"7361696c6f72"+ // field 29, encoding 2, string "sailor"
"c00201"+ // field 40, encoding 0, value 1
"c80220"+ // field 41, encoding 0, value 32
"d00240"+ // field 42, encoding 0, value 64
"dd0240010000"+ // field 43, encoding 5, value 320
"e1028002000000000000"+ // field 44, encoding 1, value 640
"e8028019"+ // field 45, encoding 0, value 3200
"f0028032"+ // field 46, encoding 0, value 6400
"fd02e0659948"+ // field 47, encoding 5, value 314159.0
"81030000000050971041"+ // field 48, encoding 1, value 271828.0
"8a0310"+"68656c6c6f2c2022776f726c6421220a"+ // field 49, encoding 2 string "hello, \"world!\"\n"
"b304"+ // start group field 70 level 1
"ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required"
"b404"+ // end group field 70 level 1
"8305"+ // start group field 80 level 1
"8a0508"+"7265706561746564"+ // field 81, encoding 2, string "repeated"
"8405"+ // end group field 80 level 1
"8305"+ // start group field 80 level 1
"8a0508"+"7265706561746564"+ // field 81, encoding 2, string "repeated"
"8405"+ // end group field 80 level 1
"aa0605"+"6279746573"+ // field 101, encoding 2 string "bytes"
"b0063f"+ // field 102, encoding 0, 0x3f zigzag32
"b8067f"+ // field 103, encoding 0, 0x7f zigzag64
"ca0c03"+"626967"+ // field 201, encoding 2, string "big"
"ca0c04"+"6e6f7365"+ // field 201, encoding 2, string "nose"
"d00c40"+ // field 202, encoding 0, value 32
"d00c3f"+ // field 202, encoding 0, value -32
"d80c8001"+ // field 203, encoding 0, value 64
"d80c7f"+ // field 203, encoding 0, value -64
"8a1907"+"4269676e6f7365"+ // field 401, encoding 2, string "Bignose"
"90193f"+ // field 402, encoding 0, value 63
"98197f") // field 403, encoding 0, value 127
}
@ -796,43 +796,43 @@ func TestEncodeDecode6(t *testing.T) {
pb.F_Sint64RepeatedPacked = []int64{64, -64}
overify(t, pb,
"0807"+ // field 1, encoding 0, value 7
"220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField)
"5001"+ // field 10, encoding 0, value 1
"5803"+ // field 11, encoding 0, value 3
"6006"+ // field 12, encoding 0, value 6
"6d20000000"+ // field 13, encoding 5, value 32
"714000000000000000"+ // field 14, encoding 1, value 64
"78a019"+ // field 15, encoding 0, value 3232
"8001c032"+ // field 16, encoding 0, value 6464
"8d0100004a45"+ // field 17, encoding 5, value 3232.0
"9101000000000040b940"+ // field 18, encoding 1, value 6464.0
"9a0106"+"737472696e67"+ // field 19, encoding 2 string "string"
"9203020001"+ // field 50, encoding 2, 2 bytes, value 0, value 1
"9a03022021"+ // field 51, encoding 2, 2 bytes, value 32, value 33
"a203024041"+ // field 52, encoding 2, 2 bytes, value 64, value 65
"aa0308"+ // field 53, encoding 2, 8 bytes
"a00c0000050d0000"+ // value 3232, value 3333
"b20310"+ // field 54, encoding 2, 16 bytes
"4019000000000000a519000000000000"+ // value 6464, value 6565
"ba0306"+ // field 55, encoding 2, 6 bytes
"a0dd1395ac14"+ // value 323232, value 333333
"c20306"+ // field 56, encoding 2, 6 bytes
"c0ba27b58928"+ // value 646464, value 656565
"ca0308"+ // field 57, encoding 2, 8 bytes
"0000004200000442"+ // value 32.0, value 33.0
"d20310"+ // field 58, encoding 2, 16 bytes
"00000000000050400000000000405040"+ // value 64.0, value 65.0
"b304"+ // start group field 70 level 1
"ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required"
"b404"+ // end group field 70 level 1
"aa0605"+"6279746573"+ // field 101, encoding 2 string "bytes"
"b0063f"+ // field 102, encoding 0, 0x3f zigzag32
"b8067f"+ // field 103, encoding 0, 0x7f zigzag64
"b21f02"+ // field 502, encoding 2, 2 bytes
"403f"+ // value 32, value -32
"ba1f03"+ // field 503, encoding 2, 3 bytes
"80017f") // value 64, value -64
"0807"+ // field 1, encoding 0, value 7
"220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField)
"5001"+ // field 10, encoding 0, value 1
"5803"+ // field 11, encoding 0, value 3
"6006"+ // field 12, encoding 0, value 6
"6d20000000"+ // field 13, encoding 5, value 32
"714000000000000000"+ // field 14, encoding 1, value 64
"78a019"+ // field 15, encoding 0, value 3232
"8001c032"+ // field 16, encoding 0, value 6464
"8d0100004a45"+ // field 17, encoding 5, value 3232.0
"9101000000000040b940"+ // field 18, encoding 1, value 6464.0
"9a0106"+"737472696e67"+ // field 19, encoding 2 string "string"
"9203020001"+ // field 50, encoding 2, 2 bytes, value 0, value 1
"9a03022021"+ // field 51, encoding 2, 2 bytes, value 32, value 33
"a203024041"+ // field 52, encoding 2, 2 bytes, value 64, value 65
"aa0308"+ // field 53, encoding 2, 8 bytes
"a00c0000050d0000"+ // value 3232, value 3333
"b20310"+ // field 54, encoding 2, 16 bytes
"4019000000000000a519000000000000"+ // value 6464, value 6565
"ba0306"+ // field 55, encoding 2, 6 bytes
"a0dd1395ac14"+ // value 323232, value 333333
"c20306"+ // field 56, encoding 2, 6 bytes
"c0ba27b58928"+ // value 646464, value 656565
"ca0308"+ // field 57, encoding 2, 8 bytes
"0000004200000442"+ // value 32.0, value 33.0
"d20310"+ // field 58, encoding 2, 16 bytes
"00000000000050400000000000405040"+ // value 64.0, value 65.0
"b304"+ // start group field 70 level 1
"ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required"
"b404"+ // end group field 70 level 1
"aa0605"+"6279746573"+ // field 101, encoding 2 string "bytes"
"b0063f"+ // field 102, encoding 0, 0x3f zigzag32
"b8067f"+ // field 103, encoding 0, 0x7f zigzag64
"b21f02"+ // field 502, encoding 2, 2 bytes
"403f"+ // value 32, value -32
"ba1f03"+ // field 503, encoding 2, 3 bytes
"80017f") // value 64, value -64
}
// Test that we can encode empty bytes fields.
@ -898,13 +898,13 @@ func TestSkippingUnrecognizedFields(t *testing.T) {
// Now new a GoSkipTest record.
skip := &GoSkipTest{
SkipInt32: Int32(32),
SkipFixed32: Uint32(3232),
SkipFixed64: Uint64(6464),
SkipString: String("skipper"),
SkipInt32: Int32(32),
SkipFixed32: Uint32(3232),
SkipFixed64: Uint64(6464),
SkipString: String("skipper"),
Skipgroup: &GoSkipTest_SkipGroup{
GroupInt32: Int32(75),
GroupString: String("wxyz"),
GroupInt32: Int32(75),
GroupString: String("wxyz"),
},
}
@ -944,8 +944,8 @@ func TestSkippingUnrecognizedFields(t *testing.T) {
func TestSubmessageUnrecognizedFields(t *testing.T) {
nm := &NewMessage{
Nested: &NewMessage_Nested{
Name: String("Nigel"),
FoodGroup: String("carbs"),
Name: String("Nigel"),
FoodGroup: String("carbs"),
},
}
b, err := Marshal(nm)
@ -960,9 +960,9 @@ func TestSubmessageUnrecognizedFields(t *testing.T) {
}
exp := &OldMessage{
Nested: &OldMessage_Nested{
Name: String("Nigel"),
Name: String("Nigel"),
// normal protocol buffer users should not do this
XXX_unrecognized: []byte("\x12\x05carbs"),
XXX_unrecognized: []byte("\x12\x05carbs"),
},
}
if !Equal(om, exp) {
@ -999,7 +999,7 @@ func TestBigRepeated(t *testing.T) {
pb := initGoTest(true)
// Create the arrays
const N = 50 // Internally the library starts much smaller.
const N = 50 // Internally the library starts much smaller.
pb.Repeatedgroup = make([]*GoTest_RepeatedGroup, N)
pb.F_Sint64Repeated = make([]int64, N)
pb.F_Sint32Repeated = make([]int32, N)
@ -1047,7 +1047,7 @@ func TestBigRepeated(t *testing.T) {
// Check the checkable values
for i := uint64(0); i < N; i++ {
if pbd.Repeatedgroup[i] == nil { // TODO: more checking?
if pbd.Repeatedgroup[i] == nil { // TODO: more checking?
t.Error("pbd.Repeatedgroup bad")
}
var x uint64
@ -1099,7 +1099,7 @@ func TestBigRepeated(t *testing.T) {
if pbd.F_BoolRepeated[i] != (i%2 == 0) {
t.Error("pbd.F_BoolRepeated bad", x, i)
}
if pbd.RepeatedField[i] == nil { // TODO: more checking?
if pbd.RepeatedField[i] == nil { // TODO: more checking?
t.Error("pbd.RepeatedField bad")
}
}
@ -1159,8 +1159,8 @@ func TestProto1RepeatedGroup(t *testing.T) {
pb := &MessageList{
Message: []*MessageList_Message{
{
Name: String("blah"),
Count: Int32(7),
Name: String("blah"),
Count: Int32(7),
},
// NOTE: pb.Message[1] is a nil
nil,
@ -1240,9 +1240,9 @@ type NNIMessage struct {
nni nonNillableInt
}
func (*NNIMessage) Reset() {}
func (*NNIMessage) String() string { return "" }
func (*NNIMessage) ProtoMessage() {}
func (*NNIMessage) Reset() {}
func (*NNIMessage) String() string { return "" }
func (*NNIMessage) ProtoMessage() {}
// A type that implements the Marshaler interface and is nillable.
type nillableMessage struct {
@ -1257,9 +1257,9 @@ type NMMessage struct {
nm *nillableMessage
}
func (*NMMessage) Reset() {}
func (*NMMessage) String() string { return "" }
func (*NMMessage) ProtoMessage() {}
func (*NMMessage) Reset() {}
func (*NMMessage) String() string { return "" }
func (*NMMessage) ProtoMessage() {}
// Verify a type that uses the Marshaler interface, but has a nil pointer.
func TestNilMarshaler(t *testing.T) {
@ -1273,7 +1273,7 @@ func TestNilMarshaler(t *testing.T) {
// Try a struct with a Marshaler field that is not nillable.
nnim := new(NNIMessage)
nnim.nni = 7
var _ Marshaler = nnim.nni // verify it is truly a Marshaler
var _ Marshaler = nnim.nni // verify it is truly a Marshaler
if _, err := Marshal(nnim); err != nil {
t.Error("unexpected error marshaling nnim: ", err)
}
@ -1286,23 +1286,23 @@ func TestAllSetDefaults(t *testing.T) {
F_Nan: Float32(1.7),
}
expected := &Defaults{
F_Bool: Bool(true),
F_Int32: Int32(32),
F_Int64: Int64(64),
F_Fixed32: Uint32(320),
F_Fixed64: Uint64(640),
F_Uint32: Uint32(3200),
F_Uint64: Uint64(6400),
F_Float: Float32(314159),
F_Double: Float64(271828),
F_String: String(`hello, "world!"` + "\n"),
F_Bytes: []byte("Bignose"),
F_Sint32: Int32(-32),
F_Sint64: Int64(-64),
F_Enum: Defaults_GREEN.Enum(),
F_Pinf: Float32(float32(math.Inf(1))),
F_Ninf: Float32(float32(math.Inf(-1))),
F_Nan: Float32(1.7),
F_Bool: Bool(true),
F_Int32: Int32(32),
F_Int64: Int64(64),
F_Fixed32: Uint32(320),
F_Fixed64: Uint64(640),
F_Uint32: Uint32(3200),
F_Uint64: Uint64(6400),
F_Float: Float32(314159),
F_Double: Float64(271828),
F_String: String(`hello, "world!"` + "\n"),
F_Bytes: []byte("Bignose"),
F_Sint32: Int32(-32),
F_Sint64: Int64(-64),
F_Enum: Defaults_GREEN.Enum(),
F_Pinf: Float32(float32(math.Inf(1))),
F_Ninf: Float32(float32(math.Inf(-1))),
F_Nan: Float32(1.7),
}
SetDefaults(m)
if !Equal(m, expected) {
@ -1323,16 +1323,16 @@ func TestSetDefaultsWithSetField(t *testing.T) {
func TestSetDefaultsWithSubMessage(t *testing.T) {
m := &OtherMessage{
Key: Int64(123),
Key: Int64(123),
Inner: &InnerMessage{
Host: String("gopher"),
},
}
expected := &OtherMessage{
Key: Int64(123),
Key: Int64(123),
Inner: &InnerMessage{
Host: String("gopher"),
Port: Int32(4000),
Host: String("gopher"),
Port: Int32(4000),
},
}
SetDefaults(m)
@ -1375,12 +1375,12 @@ func TestMaximumTagNumber(t *testing.T) {
func TestJSON(t *testing.T) {
m := &MyMessage{
Count: Int32(4),
Pet: []string{"bunny", "kitty"},
Count: Int32(4),
Pet: []string{"bunny", "kitty"},
Inner: &InnerMessage{
Host: String("cauchy"),
},
Bikeshed: MyMessage_GREEN.Enum(),
Bikeshed: MyMessage_GREEN.Enum(),
}
const expected = `{"count":4,"pet":["bunny","kitty"],"inner":{"host":"cauchy"},"bikeshed":1}`
@ -1413,7 +1413,7 @@ func TestJSON(t *testing.T) {
}
func TestBadWireType(t *testing.T) {
b := []byte{7<<3 | 6} // field 7, wire type 6
b := []byte{7<<3 | 6} // field 7, wire type 6
pb := new(OtherMessage)
if err := Unmarshal(b, pb); err == nil {
t.Errorf("Unmarshal did not fail")
@ -1610,10 +1610,10 @@ func TestUnmarshalMergesMessages(t *testing.T) {
// If a nested message occurs twice in the input,
// the fields should be merged when decoding.
a := &OtherMessage{
Key: Int64(123),
Key: Int64(123),
Inner: &InnerMessage{
Host: String("polhode"),
Port: Int32(1234),
Host: String("polhode"),
Port: Int32(1234),
},
}
aData, err := Marshal(a)
@ -1621,10 +1621,10 @@ func TestUnmarshalMergesMessages(t *testing.T) {
t.Fatalf("Marshal(a): %v", err)
}
b := &OtherMessage{
Weight: Float32(1.2),
Weight: Float32(1.2),
Inner: &InnerMessage{
Host: String("herpolhode"),
Connected: Bool(true),
Host: String("herpolhode"),
Connected: Bool(true),
},
}
bData, err := Marshal(b)
@ -1632,12 +1632,12 @@ func TestUnmarshalMergesMessages(t *testing.T) {
t.Fatalf("Marshal(b): %v", err)
}
want := &OtherMessage{
Key: Int64(123),
Weight: Float32(1.2),
Key: Int64(123),
Weight: Float32(1.2),
Inner: &InnerMessage{
Host: String("herpolhode"),
Port: Int32(1234),
Connected: Bool(true),
Host: String("herpolhode"),
Port: Int32(1234),
Connected: Bool(true),
},
}
got := new(OtherMessage)
@ -1651,8 +1651,8 @@ func TestUnmarshalMergesMessages(t *testing.T) {
func TestEncodingSizes(t *testing.T) {
tests := []struct {
m Message
n int
m Message
n int
}{
{&Defaults{F_Int32: Int32(math.MaxInt32)}, 6},
{&Defaults{F_Int32: Int32(math.MinInt32)}, 6},
@ -1676,22 +1676,22 @@ func TestRequiredNotSetError(t *testing.T) {
pb.F_Int32Required = nil
pb.F_Int64Required = nil
expected := "0807" + // field 1, encoding 0, value 7
"2206" + "120474797065" + // field 4, encoding 2 (GoTestField)
"5001" + // field 10, encoding 0, value 1
"6d20000000" + // field 13, encoding 5, value 0x20
"714000000000000000" + // field 14, encoding 1, value 0x40
"78a019" + // field 15, encoding 0, value 0xca0 = 3232
"8001c032" + // field 16, encoding 0, value 0x1940 = 6464
"8d0100004a45" + // field 17, encoding 5, value 3232.0
"9101000000000040b940" + // field 18, encoding 1, value 6464.0
"9a0106" + "737472696e67" + // field 19, encoding 2, string "string"
"b304" + // field 70, encoding 3, start group
"ba0408" + "7265717569726564" + // field 71, encoding 2, string "required"
"b404" + // field 70, encoding 4, end group
"aa0605" + "6279746573" + // field 101, encoding 2, string "bytes"
"b0063f" + // field 102, encoding 0, 0x3f zigzag32
"b8067f" // field 103, encoding 0, 0x7f zigzag64
expected := "0807" + // field 1, encoding 0, value 7
"2206" + "120474797065" + // field 4, encoding 2 (GoTestField)
"5001" + // field 10, encoding 0, value 1
"6d20000000" + // field 13, encoding 5, value 0x20
"714000000000000000" + // field 14, encoding 1, value 0x40
"78a019" + // field 15, encoding 0, value 0xca0 = 3232
"8001c032" + // field 16, encoding 0, value 0x1940 = 6464
"8d0100004a45" + // field 17, encoding 5, value 3232.0
"9101000000000040b940" + // field 18, encoding 1, value 6464.0
"9a0106" + "737472696e67" + // field 19, encoding 2, string "string"
"b304" + // field 70, encoding 3, start group
"ba0408" + "7265717569726564" + // field 71, encoding 2, string "required"
"b404" + // field 70, encoding 4, end group
"aa0605" + "6279746573" + // field 101, encoding 2, string "bytes"
"b0063f" + // field 102, encoding 0, 0x3f zigzag32
"b8067f" // field 103, encoding 0, 0x7f zigzag64
o := old()
bytes, err := Marshal(pb)
@ -1751,7 +1751,7 @@ func fuzzUnmarshal(t *testing.T, data []byte) {
func testMsg() *GoTest {
pb := initGoTest(true)
const N = 1000 // Internally the library starts much smaller.
const N = 1000 // Internally the library starts much smaller.
pb.F_Int32Repeated = make([]int32, N)
pb.F_DoubleRepeated = make([]float64, N)
for i := 0; i < N; i++ {
@ -1869,13 +1869,13 @@ func BenchmarkUnmarshalUnrecognizedFields(b *testing.B) {
b.StopTimer()
pb := initGoTestField()
skip := &GoSkipTest{
SkipInt32: Int32(32),
SkipFixed32: Uint32(3232),
SkipFixed64: Uint64(6464),
SkipString: String("skipper"),
SkipInt32: Int32(32),
SkipFixed32: Uint32(3232),
SkipFixed64: Uint64(6464),
SkipString: String("skipper"),
Skipgroup: &GoSkipTest_SkipGroup{
GroupInt32: Int32(75),
GroupString: String("wxyz"),
GroupInt32: Int32(75),
GroupString: String("wxyz"),
},
}

View File

@ -83,9 +83,14 @@ func mergeStruct(out, in reflect.Value) {
mergeAny(out.Field(i), in.Field(i))
}
if emIn, ok := in.Addr().Interface().(extendableProto); ok {
emOut := out.Addr().Interface().(extendableProto)
if emIn, ok := in.Addr().Interface().(extensionsMap); ok {
emOut := out.Addr().Interface().(extensionsMap)
mergeExtension(emOut.ExtensionMap(), emIn.ExtensionMap())
} else if emIn, ok := in.Addr().Interface().(extensionsBytes); ok {
emOut := out.Addr().Interface().(extensionsBytes)
bIn := emIn.GetExtensions()
bOut := emOut.GetExtensions()
*bOut = append(*bOut, *bIn...)
}
uf := in.FieldByName("XXX_unrecognized")

View File

@ -40,13 +40,13 @@ import (
)
var cloneTestMessage = &pb.MyMessage{
Count: proto.Int32(42),
Name: proto.String("Dave"),
Pet: []string{"bunny", "kitty", "horsey"},
Count: proto.Int32(42),
Name: proto.String("Dave"),
Pet: []string{"bunny", "kitty", "horsey"},
Inner: &pb.InnerMessage{
Host: proto.String("niles"),
Port: proto.Int32(9099),
Connected: proto.Bool(true),
Host: proto.String("niles"),
Port: proto.Int32(9099),
Connected: proto.Bool(true),
},
Others: []*pb.OtherMessage{
{
@ -56,7 +56,7 @@ var cloneTestMessage = &pb.MyMessage{
Somegroup: &pb.MyMessage_SomeGroup{
GroupField: proto.Int32(6),
},
RepBytes: [][]byte{[]byte("sham"), []byte("wow")},
RepBytes: [][]byte{[]byte("sham"), []byte("wow")},
}
func init() {
@ -99,17 +99,17 @@ var mergeTests = []struct {
Name: proto.String("Dave"),
},
want: &pb.MyMessage{
Count: proto.Int32(42),
Name: proto.String("Dave"),
Count: proto.Int32(42),
Name: proto.String("Dave"),
},
},
{
src: &pb.MyMessage{
Inner: &pb.InnerMessage{
Host: proto.String("hey"),
Connected: proto.Bool(true),
Host: proto.String("hey"),
Connected: proto.Bool(true),
},
Pet: []string{"horsey"},
Pet: []string{"horsey"},
Others: []*pb.OtherMessage{
{
Value: []byte("some bytes"),
@ -118,10 +118,10 @@ var mergeTests = []struct {
},
dst: &pb.MyMessage{
Inner: &pb.InnerMessage{
Host: proto.String("niles"),
Port: proto.Int32(9099),
Host: proto.String("niles"),
Port: proto.Int32(9099),
},
Pet: []string{"bunny", "kitty"},
Pet: []string{"bunny", "kitty"},
Others: []*pb.OtherMessage{
{
Key: proto.Int64(31415926535),
@ -134,11 +134,11 @@ var mergeTests = []struct {
},
want: &pb.MyMessage{
Inner: &pb.InnerMessage{
Host: proto.String("hey"),
Connected: proto.Bool(true),
Port: proto.Int32(9099),
Host: proto.String("hey"),
Connected: proto.Bool(true),
Port: proto.Int32(9099),
},
Pet: []string{"bunny", "kitty", "horsey"},
Pet: []string{"bunny", "kitty", "horsey"},
Others: []*pb.OtherMessage{
{
Key: proto.Int64(31415926535),
@ -158,13 +158,13 @@ var mergeTests = []struct {
Somegroup: &pb.MyMessage_SomeGroup{
GroupField: proto.Int32(6),
},
RepBytes: [][]byte{[]byte("sham")},
RepBytes: [][]byte{[]byte("sham")},
},
want: &pb.MyMessage{
Somegroup: &pb.MyMessage_SomeGroup{
GroupField: proto.Int32(6),
},
RepBytes: [][]byte{[]byte("sham"), []byte("wow")},
RepBytes: [][]byte{[]byte("sham"), []byte("wow")},
},
},
}

View File

@ -235,12 +235,6 @@ func (o *Buffer) skipAndSave(t reflect.Type, tag, wire int, base structPointer,
ptr := structPointer_Bytes(base, unrecField)
if *ptr == nil {
// This is the first skipped element,
// allocate a new buffer.
*ptr = o.bufalloc()
}
// Add the skipped field to struct field
obuf := o.buf
@ -381,9 +375,14 @@ func (o *Buffer) unmarshalType(st reflect.Type, prop *StructProperties, is_group
if prop.extendable {
if e := structPointer_Interface(base, st).(extendableProto); isExtensionField(e, int32(tag)) {
if err = o.skip(st, tag, wire); err == nil {
ext := e.ExtensionMap()[int32(tag)] // may be missing
ext.enc = append(ext.enc, o.buf[oi:o.index]...)
e.ExtensionMap()[int32(tag)] = ext
if ee, ok := e.(extensionsMap); ok {
ext := ee.ExtensionMap()[int32(tag)] // may be missing
ext.enc = append(ext.enc, o.buf[oi:o.index]...)
ee.ExtensionMap()[int32(tag)] = ext
} else if ee, ok := e.(extensionsBytes); ok {
ext := ee.GetExtensions()
*ext = append(*ext, o.buf[oi:o.index]...)
}
}
continue
}

View File

@ -221,6 +221,10 @@ func Marshal(pb Message) ([]byte, error) {
if err != nil && !state.shouldContinue(err, nil) {
return nil, err
}
if p.buf == nil && err == nil {
// Return a non-nil slice on success.
return []byte{}, nil
}
return p.buf, err
}
@ -400,23 +404,8 @@ func (o *Buffer) enc_struct_message(p *Properties, base structPointer) error {
return nil
}
// need the length before we can write out the message itself,
// so marshal into a separate byte buffer first.
obuf := o.buf
o.buf = o.bufalloc()
err := o.enc_struct(p.stype, p.sprop, structp)
nbuf := o.buf
o.buf = obuf
if err != nil && !state.shouldContinue(err, nil) {
o.buffree(nbuf)
return err
}
o.buf = append(o.buf, p.tagcode...)
o.EncodeRawBytes(nbuf)
o.buffree(nbuf)
return state.err
return o.enc_len_struct(p.stype, p.sprop, structp, &state)
}
func size_struct_message(p *Properties, base structPointer) int {
@ -748,24 +737,14 @@ func (o *Buffer) enc_slice_struct_message(p *Properties, base structPointer) err
continue
}
obuf := o.buf
o.buf = o.bufalloc()
err := o.enc_struct(p.stype, p.sprop, structp)
nbuf := o.buf
o.buf = obuf
o.buf = append(o.buf, p.tagcode...)
err := o.enc_len_struct(p.stype, p.sprop, structp, &state)
if err != nil && !state.shouldContinue(err, nil) {
o.buffree(nbuf)
if err == ErrNil {
return ErrRepeatedHasNil
}
return err
}
o.buf = append(o.buf, p.tagcode...)
o.EncodeRawBytes(nbuf)
o.buffree(nbuf)
}
return state.err
}
@ -923,6 +902,36 @@ func size_struct(t reflect.Type, prop *StructProperties, base structPointer) (n
return
}
var zeroes [20]byte // longer than any conceivable sizeVarint
// Encode a struct, preceded by its encoded length (as a varint).
func (o *Buffer) enc_len_struct(t reflect.Type, prop *StructProperties, base structPointer, state *errorState) error {
iLen := len(o.buf)
o.buf = append(o.buf, 0, 0, 0, 0) // reserve four bytes for length
iMsg := len(o.buf)
err := o.enc_struct(t, prop, base)
if err != nil && !state.shouldContinue(err, nil) {
return err
}
lMsg := len(o.buf) - iMsg
lLen := sizeVarint(uint64(lMsg))
switch x := lLen - (iMsg - iLen); {
case x > 0: // actual length is x bytes larger than the space we reserved
// Move msg x bytes right.
o.buf = append(o.buf, zeroes[:x]...)
copy(o.buf[iMsg+x:], o.buf[iMsg:iMsg+lMsg])
case x < 0: // actual length is x bytes smaller than the space we reserved
// Move msg x bytes left.
copy(o.buf[iMsg+x:], o.buf[iMsg:iMsg+lMsg])
o.buf = o.buf[:len(o.buf)+x] // x is negative
}
// Encode the length in the reserved space.
o.buf = o.buf[:iLen]
o.EncodeVarint(uint64(lMsg))
o.buf = o.buf[:len(o.buf)+lMsg]
return state.err
}
// errorState maintains the first error that occurs and updates that error
// with additional context.
type errorState struct {

View File

@ -44,6 +44,24 @@ type Sizer interface {
Size() int
}
func (o *Buffer) enc_ext_slice_byte(p *Properties, base structPointer) error {
s := *structPointer_Bytes(base, p.field)
if s == nil {
return ErrNil
}
o.buf = append(o.buf, s...)
return nil
}
func size_ext_slice_byte(p *Properties, base structPointer) (n int) {
s := *structPointer_Bytes(base, p.field)
if s == nil {
return 0
}
n += len(s)
return
}
// Encode a reference to bool pointer.
func (o *Buffer) enc_ref_bool(p *Properties, base structPointer) error {
v := structPointer_RefBool(base, p.field)
@ -156,23 +174,8 @@ func (o *Buffer) enc_ref_struct_message(p *Properties, base structPointer) error
return nil
}
// need the length before we can write out the message itself,
// so marshal into a separate byte buffer first.
obuf := o.buf
o.buf = o.bufalloc()
err := o.enc_struct(p.stype, p.sprop, structp)
nbuf := o.buf
o.buf = obuf
if err != nil && !state.shouldContinue(err, nil) {
o.buffree(nbuf)
return err
}
o.buf = append(o.buf, p.tagcode...)
o.EncodeRawBytes(nbuf)
o.buffree(nbuf)
return nil
return o.enc_len_struct(p.stype, p.sprop, structp, &state)
}
//TODO this is only copied, please fix this
@ -222,26 +225,17 @@ func (o *Buffer) enc_slice_ref_struct_message(p *Properties, base structPointer)
continue
}
obuf := o.buf
o.buf = o.bufalloc()
err := o.enc_struct(p.stype, p.sprop, structp)
nbuf := o.buf
o.buf = obuf
o.buf = append(o.buf, p.tagcode...)
err := o.enc_len_struct(p.stype, p.sprop, structp, &state)
if err != nil && !state.shouldContinue(err, nil) {
o.buffree(nbuf)
if err == ErrNil {
return ErrRepeatedHasNil
}
return err
}
o.buf = append(o.buf, p.tagcode...)
o.EncodeRawBytes(nbuf)
o.buffree(nbuf)
}
return nil
return state.err
}
//TODO this is only copied, please fix this

View File

@ -85,9 +85,9 @@ func init() {
}
var EqualTests = []struct {
desc string
a, b Message
exp bool
desc string
a, b Message
exp bool
}{
{"different types", &pb.GoEnum{}, &pb.GoTestField{}, false},
{"equal empty", &pb.GoEnum{}, &pb.GoEnum{}, true},
@ -142,13 +142,13 @@ var EqualTests = []struct {
{
"message with group",
&pb.MyMessage{
Count: Int32(1),
Count: Int32(1),
Somegroup: &pb.MyMessage_SomeGroup{
GroupField: Int32(5),
},
},
&pb.MyMessage{
Count: Int32(1),
Count: Int32(1),
Somegroup: &pb.MyMessage_SomeGroup{
GroupField: Int32(5),
},

View File

@ -55,9 +55,18 @@ type ExtensionRange struct {
type extendableProto interface {
Message
ExtensionRangeArray() []ExtensionRange
}
type extensionsMap interface {
extendableProto
ExtensionMap() map[int32]Extension
}
type extensionsBytes interface {
extendableProto
GetExtensions() *[]byte
}
var extendableProtoType = reflect.TypeOf((*extendableProto)(nil)).Elem()
// ExtensionDesc represents an extension specification.
@ -92,7 +101,15 @@ type Extension struct {
// SetRawExtension is for testing only.
func SetRawExtension(base extendableProto, id int32, b []byte) {
base.ExtensionMap()[id] = Extension{enc: b}
if ebase, ok := base.(extensionsMap); ok {
ebase.ExtensionMap()[id] = Extension{enc: b}
} else if ebase, ok := base.(extensionsBytes); ok {
clearExtension(base, id)
ext := ebase.GetExtensions()
*ext = append(*ext, b...)
} else {
panic("unreachable")
}
}
// isExtensionField returns true iff the given field number is in an extension range.
@ -210,51 +227,127 @@ func sizeExtensionMap(m map[int32]Extension) (n int) {
// HasExtension returns whether the given extension is present in pb.
func HasExtension(pb extendableProto, extension *ExtensionDesc) bool {
// TODO: Check types, field numbers, etc.?
_, ok := pb.ExtensionMap()[extension.Field]
return ok
if epb, doki := pb.(extensionsMap); doki {
_, ok := epb.ExtensionMap()[extension.Field]
return ok
} else if epb, doki := pb.(extensionsBytes); doki {
ext := epb.GetExtensions()
buf := *ext
o := 0
for o < len(buf) {
tag, n := DecodeVarint(buf[o:])
fieldNum := int32(tag >> 3)
if int32(fieldNum) == extension.Field {
return true
}
wireType := int(tag & 0x7)
o += n
l, err := size(buf[o:], wireType)
if err != nil {
return false
}
o += l
}
return false
}
panic("unreachable")
}
func deleteExtension(pb extensionsBytes, theFieldNum int32, offset int) int {
ext := pb.GetExtensions()
for offset < len(*ext) {
tag, n1 := DecodeVarint((*ext)[offset:])
fieldNum := int32(tag >> 3)
wireType := int(tag & 0x7)
n2, err := size((*ext)[offset+n1:], wireType)
if err != nil {
panic(err)
}
newOffset := offset + n1 + n2
if fieldNum == theFieldNum {
*ext = append((*ext)[:offset], (*ext)[newOffset:]...)
return offset
}
offset = newOffset
}
return -1
}
func clearExtension(pb extendableProto, fieldNum int32) {
if epb, doki := pb.(extensionsMap); doki {
delete(epb.ExtensionMap(), fieldNum)
} else if epb, doki := pb.(extensionsBytes); doki {
offset := 0
for offset != -1 {
offset = deleteExtension(epb, fieldNum, offset)
}
} else {
panic("unreachable")
}
}
// ClearExtension removes the given extension from pb.
func ClearExtension(pb extendableProto, extension *ExtensionDesc) {
// TODO: Check types, field numbers, etc.?
delete(pb.ExtensionMap(), extension.Field)
clearExtension(pb, extension.Field)
}
// GetExtension parses and returns the given extension of pb.
// If the extension is not present it returns ErrMissingExtension.
// If the returned extension is modified, SetExtension must be called
// for the modifications to be reflected in pb.
func GetExtension(pb extendableProto, extension *ExtensionDesc) (interface{}, error) {
if err := checkExtensionTypes(pb, extension); err != nil {
return nil, err
}
e, ok := pb.ExtensionMap()[extension.Field]
if !ok {
return nil, ErrMissingExtension
}
if e.value != nil {
// Already decoded. Check the descriptor, though.
if e.desc != extension {
// This shouldn't happen. If it does, it means that
// GetExtension was called twice with two different
// descriptors with the same field number.
return nil, errors.New("proto: descriptor conflict")
if epb, doki := pb.(extensionsMap); doki {
e, ok := epb.ExtensionMap()[extension.Field]
if !ok {
return nil, ErrMissingExtension
}
if e.value != nil {
// Already decoded. Check the descriptor, though.
if e.desc != extension {
// This shouldn't happen. If it does, it means that
// GetExtension was called twice with two different
// descriptors with the same field number.
return nil, errors.New("proto: descriptor conflict")
}
return e.value, nil
}
v, err := decodeExtension(e.enc, extension)
if err != nil {
return nil, err
}
// Remember the decoded version and drop the encoded version.
// That way it is safe to mutate what we return.
e.value = v
e.desc = extension
e.enc = nil
return e.value, nil
} else if epb, doki := pb.(extensionsBytes); doki {
ext := epb.GetExtensions()
o := 0
for o < len(*ext) {
tag, n := DecodeVarint((*ext)[o:])
fieldNum := int32(tag >> 3)
wireType := int(tag & 0x7)
l, err := size((*ext)[o+n:], wireType)
if err != nil {
return nil, err
}
if int32(fieldNum) == extension.Field {
v, err := decodeExtension((*ext)[o:o+n+l], extension)
if err != nil {
return nil, err
}
return v, nil
}
o += n + l
}
}
v, err := decodeExtension(e.enc, extension)
if err != nil {
return nil, err
}
// Remember the decoded version and drop the encoded version.
// That way it is safe to mutate what we return.
e.value = v
e.desc = extension
e.enc = nil
return e.value, nil
panic("unreachable")
}
// decodeExtension decodes an extension encoded in b.
@ -319,7 +412,21 @@ func SetExtension(pb extendableProto, extension *ExtensionDesc, value interface{
return errors.New("proto: bad extension value type")
}
pb.ExtensionMap()[extension.Field] = Extension{desc: extension, value: value}
if epb, doki := pb.(extensionsMap); doki {
epb.ExtensionMap()[extension.Field] = Extension{desc: extension, value: value}
} else if epb, doki := pb.(extensionsBytes); doki {
ClearExtension(pb, extension)
ext := epb.GetExtensions()
et := reflect.TypeOf(extension.ExtensionType)
props := extensionProperties(extension)
p := NewBuffer(nil)
x := reflect.New(et)
x.Elem().Set(reflect.ValueOf(value))
if err := props.enc(p, props, toStructPointer(x)); err != nil {
return err
}
*ext = append(*ext, p.buf...)
}
return nil
}

View File

@ -31,6 +31,7 @@ import (
"fmt"
"reflect"
"sort"
"strings"
)
func GetBoolExtension(pb extendableProto, extension *ExtensionDesc, ifnotset bool) bool {
@ -58,6 +59,48 @@ func SizeOfExtensionMap(m map[int32]Extension) (n int) {
return sizeExtensionMap(m)
}
type sortableMapElem struct {
field int32
ext Extension
}
func newSortableExtensionsFromMap(m map[int32]Extension) sortableExtensions {
s := make(sortableExtensions, 0, len(m))
for k, v := range m {
s = append(s, &sortableMapElem{field: k, ext: v})
}
return s
}
type sortableExtensions []*sortableMapElem
func (this sortableExtensions) Len() int { return len(this) }
func (this sortableExtensions) Swap(i, j int) { this[i], this[j] = this[j], this[i] }
func (this sortableExtensions) Less(i, j int) bool { return this[i].field < this[j].field }
func (this sortableExtensions) String() string {
sort.Sort(this)
ss := make([]string, len(this))
for i := range this {
ss[i] = fmt.Sprintf("%d: %v", this[i].field, this[i].ext)
}
return "map[" + strings.Join(ss, ",") + "]"
}
func StringFromExtensionsMap(m map[int32]Extension) string {
return newSortableExtensionsFromMap(m).String()
}
func StringFromExtensionsBytes(ext []byte) string {
m, err := BytesToExtensionsMap(ext)
if err != nil {
panic(err)
}
return StringFromExtensionsMap(m)
}
func EncodeExtensionMap(m map[int32]Extension, data []byte) (n int, err error) {
if err := encodeExtensionMap(m); err != nil {
return 0, err
@ -83,6 +126,58 @@ func GetRawExtension(m map[int32]Extension, id int32) ([]byte, error) {
return m[id].enc, nil
}
func size(buf []byte, wire int) (int, error) {
switch wire {
case WireVarint:
_, n := DecodeVarint(buf)
return n, nil
case WireFixed64:
return 8, nil
case WireBytes:
v, n := DecodeVarint(buf)
return int(v) + n, nil
case WireFixed32:
return 4, nil
case WireStartGroup:
offset := 0
for {
u, n := DecodeVarint(buf[offset:])
fwire := int(u & 0x7)
offset += n
if fwire == WireEndGroup {
return offset, nil
}
s, err := size(buf[offset:], wire)
if err != nil {
return 0, err
}
offset += s
}
}
return 0, fmt.Errorf("proto: can't get size for unknown wire type %d", wire)
}
func BytesToExtensionsMap(buf []byte) (map[int32]Extension, error) {
m := make(map[int32]Extension)
i := 0
for i < len(buf) {
tag, n := DecodeVarint(buf[i:])
if n <= 0 {
return nil, fmt.Errorf("unable to decode varint")
}
fieldNum := int32(tag >> 3)
wireType := int(tag & 0x7)
l, err := size(buf[i+n:], wireType)
if err != nil {
return nil, err
}
end := i + int(l) + n
m[int32(fieldNum)] = Extension{enc: buf[i:end]}
i = end
}
return m, nil
}
func NewExtension(e []byte) Extension {
ee := Extension{enc: make([]byte, len(e))}
copy(ee.enc, e)

View File

@ -240,10 +240,8 @@ func GetStats() Stats { return stats }
// the global functions Marshal and Unmarshal create a
// temporary Buffer and are fine for most applications.
type Buffer struct {
buf []byte // encode/decode byte stream
index int // write point
freelist [10][]byte // list of available buffers
nfreelist int // number of free buffers
buf []byte // encode/decode byte stream
index int // write point
// pools of basic types to amortize allocation.
bools []bool
@ -260,20 +258,11 @@ type Buffer struct {
// NewBuffer allocates a new Buffer and initializes its internal data to
// the contents of the argument slice.
func NewBuffer(e []byte) *Buffer {
p := new(Buffer)
if e == nil {
e = p.bufalloc()
}
p.buf = e
p.index = 0
return p
return &Buffer{buf: e}
}
// Reset resets the Buffer, ready for marshaling a new protocol buffer.
func (p *Buffer) Reset() {
if p.buf == nil {
p.buf = p.bufalloc()
}
p.buf = p.buf[0:0] // for reading/writing
p.index = 0 // for reading
}
@ -288,44 +277,6 @@ func (p *Buffer) SetBuf(s []byte) {
// Bytes returns the contents of the Buffer.
func (p *Buffer) Bytes() []byte { return p.buf }
// Allocate a buffer for the Buffer.
func (p *Buffer) bufalloc() []byte {
if p.nfreelist > 0 {
// reuse an old one
p.nfreelist--
s := p.freelist[p.nfreelist]
return s[0:0]
}
// make a new one
s := make([]byte, 0, 16)
return s
}
// Free (and remember in freelist) a byte buffer for the Buffer.
func (p *Buffer) buffree(s []byte) {
if p.nfreelist < len(p.freelist) {
// Take next slot.
p.freelist[p.nfreelist] = s
p.nfreelist++
return
}
// Find the smallest.
besti := -1
bestl := len(s)
for i, b := range p.freelist {
if len(b) < bestl {
besti = i
bestl = len(b)
}
}
// Overwrite the smallest.
if besti >= 0 {
p.freelist[besti] = s
}
}
/*
* Helper routines for simplifying the creation of optional fields of basic type.
*/

View File

@ -51,10 +51,17 @@ func structPointer_InterfaceRef(p structPointer, f field, t reflect.Type) interf
}
func copyUintPtr(oldptr, newptr uintptr, size int) {
for j := 0; j < size; j++ {
oldb := (*byte)(unsafe.Pointer(oldptr + uintptr(j)))
*(*byte)(unsafe.Pointer(newptr + uintptr(j))) = *oldb
}
oldbytes := make([]byte, 0)
oldslice := (*reflect.SliceHeader)(unsafe.Pointer(&oldbytes))
oldslice.Data = oldptr
oldslice.Len = size
oldslice.Cap = size
newbytes := make([]byte, 0)
newslice := (*reflect.SliceHeader)(unsafe.Pointer(&newbytes))
newslice.Data = newptr
newslice.Len = size
newslice.Cap = size
copy(newbytes, oldbytes)
}
func structPointer_Copy(oldptr structPointer, newptr structPointer, size int) {

View File

@ -575,9 +575,15 @@ func getPropertiesLocked(t reflect.Type) *StructProperties {
p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false)
if f.Name == "XXX_extensions" { // special case
p.enc = (*Buffer).enc_map
p.dec = nil // not needed
p.size = size_map
if len(f.Tag.Get("protobuf")) > 0 {
p.enc = (*Buffer).enc_ext_slice_byte
p.dec = nil // not needed
p.size = size_ext_slice_byte
} else {
p.enc = (*Buffer).enc_map
p.dec = nil // not needed
p.size = size_map
}
}
if f.Name == "XXX_unrecognized" { // special case
prop.unrecField = toField(&f)

View File

@ -58,8 +58,8 @@ func init() {
}
var SizeTests = []struct {
desc string
pb Message
desc string
pb Message
}{
{"empty", &pb.OtherMessage{}},
// Basic types.

File diff suppressed because it is too large Load Diff

View File

@ -79,6 +79,13 @@ type textWriter struct {
w writer
}
// textMarshaler is implemented by Messages that can marshal themsleves.
// It is identical to encoding.TextMarshaler, introduced in go 1.2,
// which will eventually replace it.
type textMarshaler interface {
MarshalText() (text []byte, err error)
}
func (w *textWriter) WriteString(s string) (n int, err error) {
if !strings.Contains(s, "\n") {
if !w.compact && w.complete {
@ -366,7 +373,15 @@ func writeAny(w *textWriter, v reflect.Value, props *Properties) error {
}
}
w.indent()
if err := writeStruct(w, v); err != nil {
if tm, ok := v.Interface().(textMarshaler); ok {
text, err := tm.MarshalText()
if err != nil {
return err
}
if _, err = w.Write(text); err != nil {
return err
}
} else if err := writeStruct(w, v); err != nil {
return err
}
w.unindent()
@ -555,7 +570,18 @@ func writeExtensions(w *textWriter, pv reflect.Value) error {
// Order the extensions by ID.
// This isn't strictly necessary, but it will give us
// canonical output, which will also make testing easier.
m := ep.ExtensionMap()
var m map[int32]Extension
if em, ok := ep.(extensionsMap); ok {
m = em.ExtensionMap()
} else if em, ok := ep.(extensionsBytes); ok {
eb := em.GetExtensions()
var err error
m, err = BytesToExtensionsMap(*eb)
if err != nil {
return err
}
}
ids := make([]int32, 0, len(m))
for id := range m {
ids = append(ids, id)
@ -653,6 +679,19 @@ func marshalText(w io.Writer, pb Message, compact bool) error {
compact: compact,
}
if tm, ok := pb.(textMarshaler); ok {
text, err := tm.MarshalText()
if err != nil {
return err
}
if _, err = aw.Write(text); err != nil {
return err
}
if bw != nil {
return bw.Flush()
}
return nil
}
// Dereference the received pointer so we don't have outer < and >.
v := reflect.Indirect(val)
if err := writeStruct(aw, v); err != nil {
@ -666,7 +705,9 @@ func marshalText(w io.Writer, pb Message, compact bool) error {
// MarshalText writes a given protocol buffer in text format.
// The only errors returned are from w.
func MarshalText(w io.Writer, pb Message) error { return marshalText(w, pb, false) }
func MarshalText(w io.Writer, pb Message) error {
return marshalText(w, pb, false)
}
// MarshalTextString is the same as MarshalText, but returns the string directly.
func MarshalTextString(pb Message) string {

View File

@ -48,6 +48,13 @@ import (
"unicode/utf8"
)
// textUnmarshaler is implemented by Messages that can unmarshal themsleves.
// It is identical to encoding.TextUnmarshaler, introduced in go 1.2,
// which will eventually replace it.
type textUnmarshaler interface {
UnmarshalText(text []byte) error
}
type ParseError struct {
Message string
Line int // 1-based line number
@ -686,6 +693,7 @@ func (p *textParser) readAny(v reflect.Value, props *Properties) *ParseError {
default:
return p.errorf("expected '{' or '<', found %q", tok.value)
}
// TODO: Handle nested messages which implement textUnmarshaler.
return p.readStruct(fv, terminator)
case reflect.Uint32:
if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil {
@ -704,6 +712,10 @@ func (p *textParser) readAny(v reflect.Value, props *Properties) *ParseError {
// UnmarshalText reads a protocol buffer in Text format. UnmarshalText resets pb
// before starting to unmarshal, so any existing data in pb is always removed.
func UnmarshalText(s string, pb Message) error {
if um, ok := pb.(textUnmarshaler); ok {
err := um.UnmarshalText([]byte(s))
return err
}
pb.Reset()
v := reflect.ValueOf(pb)
if pe := newTextParser(s).readStruct(v.Elem(), ""); pe != nil {

View File

@ -41,9 +41,9 @@ import (
)
type UnmarshalTextTest struct {
in string
err string // if "", no error expected
out *MyMessage
in string
err string // if "", no error expected
out *MyMessage
}
func buildExtStructTest(text string) UnmarshalTextTest {
@ -78,97 +78,97 @@ func buildExtRepStringTest(text string) UnmarshalTextTest {
var unMarshalTextTests = []UnmarshalTextTest{
// Basic
{
in: " count:42\n name:\"Dave\" ",
in: " count:42\n name:\"Dave\" ",
out: &MyMessage{
Count: Int32(42),
Name: String("Dave"),
Count: Int32(42),
Name: String("Dave"),
},
},
// Empty quoted string
{
in: `count:42 name:""`,
in: `count:42 name:""`,
out: &MyMessage{
Count: Int32(42),
Name: String(""),
Count: Int32(42),
Name: String(""),
},
},
// Quoted string concatenation
{
in: `count:42 name: "My name is "` + "\n" + `"elsewhere"`,
in: `count:42 name: "My name is "` + "\n" + `"elsewhere"`,
out: &MyMessage{
Count: Int32(42),
Name: String("My name is elsewhere"),
Count: Int32(42),
Name: String("My name is elsewhere"),
},
},
// Quoted string with escaped apostrophe
{
in: `count:42 name: "HOLIDAY - New Year\'s Day"`,
in: `count:42 name: "HOLIDAY - New Year\'s Day"`,
out: &MyMessage{
Count: Int32(42),
Name: String("HOLIDAY - New Year's Day"),
Count: Int32(42),
Name: String("HOLIDAY - New Year's Day"),
},
},
// Quoted string with single quote
{
in: `count:42 name: 'Roger "The Ramster" Ramjet'`,
in: `count:42 name: 'Roger "The Ramster" Ramjet'`,
out: &MyMessage{
Count: Int32(42),
Name: String(`Roger "The Ramster" Ramjet`),
Count: Int32(42),
Name: String(`Roger "The Ramster" Ramjet`),
},
},
// Quoted string with all the accepted special characters from the C++ test
{
in: `count:42 name: ` + "\"\\\"A string with \\' characters \\n and \\r newlines and \\t tabs and \\001 slashes \\\\ and multiple spaces\"",
in: `count:42 name: ` + "\"\\\"A string with \\' characters \\n and \\r newlines and \\t tabs and \\001 slashes \\\\ and multiple spaces\"",
out: &MyMessage{
Count: Int32(42),
Name: String("\"A string with ' characters \n and \r newlines and \t tabs and \001 slashes \\ and multiple spaces"),
Count: Int32(42),
Name: String("\"A string with ' characters \n and \r newlines and \t tabs and \001 slashes \\ and multiple spaces"),
},
},
// Quoted string with quoted backslash
{
in: `count:42 name: "\\'xyz"`,
in: `count:42 name: "\\'xyz"`,
out: &MyMessage{
Count: Int32(42),
Name: String(`\'xyz`),
Count: Int32(42),
Name: String(`\'xyz`),
},
},
// Quoted string with UTF-8 bytes.
{
in: "count:42 name: '\303\277\302\201\xAB'",
in: "count:42 name: '\303\277\302\201\xAB'",
out: &MyMessage{
Count: Int32(42),
Name: String("\303\277\302\201\xAB"),
Count: Int32(42),
Name: String("\303\277\302\201\xAB"),
},
},
// Bad quoted string
{
in: `inner: < host: "\0" >` + "\n",
err: `line 1.15: invalid quoted string "\0"`,
in: `inner: < host: "\0" >` + "\n",
err: `line 1.15: invalid quoted string "\0"`,
},
// Number too large for int64
{
in: "count: 123456789012345678901",
err: "line 1.7: invalid int32: 123456789012345678901",
in: "count: 123456789012345678901",
err: "line 1.7: invalid int32: 123456789012345678901",
},
// Number too large for int32
{
in: "count: 1234567890123",
err: "line 1.7: invalid int32: 1234567890123",
in: "count: 1234567890123",
err: "line 1.7: invalid int32: 1234567890123",
},
// Number in hexadecimal
{
in: "count: 0x2beef",
in: "count: 0x2beef",
out: &MyMessage{
Count: Int32(0x2beef),
},
@ -176,7 +176,7 @@ var unMarshalTextTests = []UnmarshalTextTest{
// Number in octal
{
in: "count: 024601",
in: "count: 024601",
out: &MyMessage{
Count: Int32(024601),
},
@ -184,9 +184,9 @@ var unMarshalTextTests = []UnmarshalTextTest{
// Floating point number with "f" suffix
{
in: "count: 4 others:< weight: 17.0f >",
in: "count: 4 others:< weight: 17.0f >",
out: &MyMessage{
Count: Int32(4),
Count: Int32(4),
Others: []*OtherMessage{
{
Weight: Float32(17),
@ -197,69 +197,69 @@ var unMarshalTextTests = []UnmarshalTextTest{
// Floating point positive infinity
{
in: "count: 4 bigfloat: inf",
in: "count: 4 bigfloat: inf",
out: &MyMessage{
Count: Int32(4),
Bigfloat: Float64(math.Inf(1)),
Count: Int32(4),
Bigfloat: Float64(math.Inf(1)),
},
},
// Floating point negative infinity
{
in: "count: 4 bigfloat: -inf",
in: "count: 4 bigfloat: -inf",
out: &MyMessage{
Count: Int32(4),
Bigfloat: Float64(math.Inf(-1)),
Count: Int32(4),
Bigfloat: Float64(math.Inf(-1)),
},
},
// Number too large for float32
{
in: "others:< weight: 12345678901234567890123456789012345678901234567890 >",
err: "line 1.17: invalid float32: 12345678901234567890123456789012345678901234567890",
in: "others:< weight: 12345678901234567890123456789012345678901234567890 >",
err: "line 1.17: invalid float32: 12345678901234567890123456789012345678901234567890",
},
// Number posing as a quoted string
{
in: `inner: < host: 12 >` + "\n",
err: `line 1.15: invalid string: 12`,
in: `inner: < host: 12 >` + "\n",
err: `line 1.15: invalid string: 12`,
},
// Quoted string posing as int32
{
in: `count: "12"`,
err: `line 1.7: invalid int32: "12"`,
in: `count: "12"`,
err: `line 1.7: invalid int32: "12"`,
},
// Quoted string posing a float32
{
in: `others:< weight: "17.4" >`,
err: `line 1.17: invalid float32: "17.4"`,
in: `others:< weight: "17.4" >`,
err: `line 1.17: invalid float32: "17.4"`,
},
// Enum
{
in: `count:42 bikeshed: BLUE`,
in: `count:42 bikeshed: BLUE`,
out: &MyMessage{
Count: Int32(42),
Bikeshed: MyMessage_BLUE.Enum(),
Count: Int32(42),
Bikeshed: MyMessage_BLUE.Enum(),
},
},
// Repeated field
{
in: `count:42 pet: "horsey" pet:"bunny"`,
in: `count:42 pet: "horsey" pet:"bunny"`,
out: &MyMessage{
Count: Int32(42),
Pet: []string{"horsey", "bunny"},
Count: Int32(42),
Pet: []string{"horsey", "bunny"},
},
},
// Repeated message with/without colon and <>/{}
{
in: `count:42 others:{} others{} others:<> others:{}`,
in: `count:42 others:{} others{} others:<> others:{}`,
out: &MyMessage{
Count: Int32(42),
Count: Int32(42),
Others: []*OtherMessage{
{},
{},
@ -271,9 +271,9 @@ var unMarshalTextTests = []UnmarshalTextTest{
// Missing colon for inner message
{
in: `count:42 inner < host: "cauchy.syd" >`,
in: `count:42 inner < host: "cauchy.syd" >`,
out: &MyMessage{
Count: Int32(42),
Count: Int32(42),
Inner: &InnerMessage{
Host: String("cauchy.syd"),
},
@ -282,33 +282,33 @@ var unMarshalTextTests = []UnmarshalTextTest{
// Missing colon for string field
{
in: `name "Dave"`,
err: `line 1.5: expected ':', found "\"Dave\""`,
in: `name "Dave"`,
err: `line 1.5: expected ':', found "\"Dave\""`,
},
// Missing colon for int32 field
{
in: `count 42`,
err: `line 1.6: expected ':', found "42"`,
in: `count 42`,
err: `line 1.6: expected ':', found "42"`,
},
// Missing required field
{
in: ``,
err: `line 1.0: message testdata.MyMessage missing required field "count"`,
in: ``,
err: `line 1.0: message testdata.MyMessage missing required field "count"`,
},
// Repeated non-repeated field
{
in: `name: "Rob" name: "Russ"`,
err: `line 1.12: non-repeated field "name" was repeated`,
in: `name: "Rob" name: "Russ"`,
err: `line 1.12: non-repeated field "name" was repeated`,
},
// Group
{
in: `count: 17 SomeGroup { group_field: 12 }`,
in: `count: 17 SomeGroup { group_field: 12 }`,
out: &MyMessage{
Count: Int32(17),
Count: Int32(17),
Somegroup: &MyMessage_SomeGroup{
GroupField: Int32(12),
},
@ -317,18 +317,18 @@ var unMarshalTextTests = []UnmarshalTextTest{
// Semicolon between fields
{
in: `count:3;name:"Calvin"`,
in: `count:3;name:"Calvin"`,
out: &MyMessage{
Count: Int32(3),
Name: String("Calvin"),
Count: Int32(3),
Name: String("Calvin"),
},
},
// Comma between fields
{
in: `count:4,name:"Ezekiel"`,
in: `count:4,name:"Ezekiel"`,
out: &MyMessage{
Count: Int32(4),
Name: String("Ezekiel"),
Count: Int32(4),
Name: String("Ezekiel"),
},
},
@ -363,25 +363,25 @@ var unMarshalTextTests = []UnmarshalTextTest{
` >` +
`>`,
out: &MyMessage{
Count: Int32(42),
Name: String("Dave"),
Quote: String(`"I didn't want to go."`),
Pet: []string{"bunny", "kitty", "horsey"},
Count: Int32(42),
Name: String("Dave"),
Quote: String(`"I didn't want to go."`),
Pet: []string{"bunny", "kitty", "horsey"},
Inner: &InnerMessage{
Host: String("footrest.syd"),
Port: Int32(7001),
Connected: Bool(true),
Host: String("footrest.syd"),
Port: Int32(7001),
Connected: Bool(true),
},
Others: []*OtherMessage{
{
Key: Int64(3735928559),
Value: []byte{0x1, 'A', '\a', '\f'},
Key: Int64(3735928559),
Value: []byte{0x1, 'A', '\a', '\f'},
},
{
Weight: Float32(58.9),
Weight: Float32(58.9),
Inner: &InnerMessage{
Host: String("lesha.mtv"),
Port: Int32(8002),
Host: String("lesha.mtv"),
Port: Int32(8002),
},
},
},
@ -413,6 +413,16 @@ func TestUnmarshalText(t *testing.T) {
}
}
func TestUnmarshalTextCustomMessage(t *testing.T) {
msg := &textMessage{}
if err := UnmarshalText("custom", msg); err != nil {
t.Errorf("Unexpected error from custom unmarshal: %v", err)
}
if UnmarshalText("not custom", msg) == nil {
t.Errorf("Didn't get expected error from custom unmarshal")
}
}
// Regression test; this caused a panic.
func TestRepeatedEnum(t *testing.T) {
pb := new(RepeatedEnum)

View File

@ -44,37 +44,57 @@ import (
pb "./testdata"
)
// textMessage implements the methods that allow it to marshal and unmarshal
// itself as text.
type textMessage struct {
}
func (*textMessage) MarshalText() ([]byte, error) {
return []byte("custom"), nil
}
func (*textMessage) UnmarshalText(bytes []byte) error {
if string(bytes) != "custom" {
return errors.New("expected 'custom'")
}
return nil
}
func (*textMessage) Reset() {}
func (*textMessage) String() string { return "" }
func (*textMessage) ProtoMessage() {}
func newTestMessage() *pb.MyMessage {
msg := &pb.MyMessage{
Count: proto.Int32(42),
Name: proto.String("Dave"),
Quote: proto.String(`"I didn't want to go."`),
Pet: []string{"bunny", "kitty", "horsey"},
Count: proto.Int32(42),
Name: proto.String("Dave"),
Quote: proto.String(`"I didn't want to go."`),
Pet: []string{"bunny", "kitty", "horsey"},
Inner: &pb.InnerMessage{
Host: proto.String("footrest.syd"),
Port: proto.Int32(7001),
Connected: proto.Bool(true),
Host: proto.String("footrest.syd"),
Port: proto.Int32(7001),
Connected: proto.Bool(true),
},
Others: []*pb.OtherMessage{
{
Key: proto.Int64(0xdeadbeef),
Value: []byte{1, 65, 7, 12},
Key: proto.Int64(0xdeadbeef),
Value: []byte{1, 65, 7, 12},
},
{
Weight: proto.Float32(6.022),
Weight: proto.Float32(6.022),
Inner: &pb.InnerMessage{
Host: proto.String("lesha.mtv"),
Port: proto.Int32(8002),
Host: proto.String("lesha.mtv"),
Port: proto.Int32(8002),
},
},
},
Bikeshed: pb.MyMessage_BLUE.Enum(),
Bikeshed: pb.MyMessage_BLUE.Enum(),
Somegroup: &pb.MyMessage_SomeGroup{
GroupField: proto.Int32(8),
},
// One normally wouldn't do this.
// This is an undeclared tag 13, as a varint (wire type 0) with value 4.
XXX_unrecognized: []byte{13<<3 | 0, 4},
XXX_unrecognized: []byte{13<<3 | 0, 4},
}
ext := &pb.Ext{
Data: proto.String("Big gobs for big rats"),
@ -153,6 +173,16 @@ func TestMarshalText(t *testing.T) {
}
}
func TestMarshalTextCustomMessage(t *testing.T) {
buf := new(bytes.Buffer)
if err := proto.MarshalText(buf, &textMessage{}); err != nil {
t.Fatalf("proto.MarshalText: %v", err)
}
s := buf.String()
if s != "custom" {
t.Errorf("Got %q, expected %q", s, "custom")
}
}
func TestMarshalTextNil(t *testing.T) {
want := "<nil>"
tests := []proto.Message{nil, (*pb.MyMessage)(nil)}
@ -250,8 +280,8 @@ func TestCompactText(t *testing.T) {
func TestStringEscaping(t *testing.T) {
testCases := []struct {
in *pb.Strings
out string
in *pb.Strings
out string
}{
{
// Test data from C++ test (TextFormatTest.StringEscape).
@ -299,8 +329,8 @@ func TestStringEscaping(t *testing.T) {
// This is a proxy for something like a nearly-full or imminently-failing disk,
// or a network connection that is about to die.
type limitedWriter struct {
b bytes.Buffer
limit int
b bytes.Buffer
limit int
}
var outOfSpace = errors.New("proto: insufficient space")
@ -337,8 +367,8 @@ func TestMarshalTextFailing(t *testing.T) {
func TestFloats(t *testing.T) {
tests := []struct {
f float64
want string
f float64
want string
}{
{0, "0"},
{4.7, "4.7"},

View File

@ -168,9 +168,10 @@ func (l *Log) open(path string) error {
if err == io.EOF {
debugln("open.log.append: finish ")
} else {
if err = os.Truncate(path, readBytes); err != nil {
if err = l.file.Truncate(readBytes); err != nil {
return fmt.Errorf("raft.Log: Unable to recover: %v", err)
}
l.file.Seek(readBytes, os.SEEK_SET)
}
break
}

View File

@ -2,6 +2,23 @@
// source: append_entries_request.proto
// DO NOT EDIT!
/*
Package protobuf is a generated protocol buffer package.
It is generated from these files:
append_entries_request.proto
append_entries_responses.proto
log_entry.proto
request_vote_request.proto
request_vote_responses.proto
snapshot_recovery_request.proto
snapshot_recovery_response.proto
snapshot_request.proto
snapshot_response.proto
It has these top-level messages:
AppendEntriesRequest
*/
package protobuf
import proto "github.com/coreos/etcd/third_party/code.google.com/p/gogoprotobuf/proto"
@ -110,7 +127,7 @@ func (m *AppendEntriesRequest) Unmarshal(data []byte) error {
switch fieldNum {
case 1:
if wireType != 0 {
return proto.ErrWrongType
return code_google_com_p_gogoprotobuf_proto2.ErrWrongType
}
var v uint64
for shift := uint(0); ; shift += 7 {
@ -127,7 +144,7 @@ func (m *AppendEntriesRequest) Unmarshal(data []byte) error {
m.Term = &v
case 2:
if wireType != 0 {
return proto.ErrWrongType
return code_google_com_p_gogoprotobuf_proto2.ErrWrongType
}
var v uint64
for shift := uint(0); ; shift += 7 {
@ -144,7 +161,7 @@ func (m *AppendEntriesRequest) Unmarshal(data []byte) error {
m.PrevLogIndex = &v
case 3:
if wireType != 0 {
return proto.ErrWrongType
return code_google_com_p_gogoprotobuf_proto2.ErrWrongType
}
var v uint64
for shift := uint(0); ; shift += 7 {
@ -161,7 +178,7 @@ func (m *AppendEntriesRequest) Unmarshal(data []byte) error {
m.PrevLogTerm = &v
case 4:
if wireType != 0 {
return proto.ErrWrongType
return code_google_com_p_gogoprotobuf_proto2.ErrWrongType
}
var v uint64
for shift := uint(0); ; shift += 7 {
@ -178,7 +195,7 @@ func (m *AppendEntriesRequest) Unmarshal(data []byte) error {
m.CommitIndex = &v
case 5:
if wireType != 2 {
return proto.ErrWrongType
return code_google_com_p_gogoprotobuf_proto2.ErrWrongType
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
@ -201,7 +218,7 @@ func (m *AppendEntriesRequest) Unmarshal(data []byte) error {
index = postIndex
case 6:
if wireType != 2 {
return proto.ErrWrongType
return code_google_com_p_gogoprotobuf_proto2.ErrWrongType
}
var msglen int
for shift := uint(0); ; shift += 7 {
@ -236,6 +253,9 @@ func (m *AppendEntriesRequest) Unmarshal(data []byte) error {
if err != nil {
return err
}
if (index + skippy) > l {
return io1.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, data[index:index+skippy]...)
index += skippy
}
@ -309,7 +329,6 @@ func sovAppendEntriesRequest(x uint64) (n int) {
}
func sozAppendEntriesRequest(x uint64) (n int) {
return sovAppendEntriesRequest(uint64((x << 1) ^ uint64((int64(x) >> 63))))
return sovAppendEntriesRequest(uint64((x << 1) ^ uint64((int64(x) >> 63))))
}
func NewPopulatedAppendEntriesRequest(r randyAppendEntriesRequest, easy bool) *AppendEntriesRequest {
this := &AppendEntriesRequest{}

View File

@ -94,7 +94,7 @@ func (m *AppendEntriesResponse) Unmarshal(data []byte) error {
switch fieldNum {
case 1:
if wireType != 0 {
return proto.ErrWrongType
return code_google_com_p_gogoprotobuf_proto4.ErrWrongType
}
var v uint64
for shift := uint(0); ; shift += 7 {
@ -111,7 +111,7 @@ func (m *AppendEntriesResponse) Unmarshal(data []byte) error {
m.Term = &v
case 2:
if wireType != 0 {
return proto.ErrWrongType
return code_google_com_p_gogoprotobuf_proto4.ErrWrongType
}
var v uint64
for shift := uint(0); ; shift += 7 {
@ -128,7 +128,7 @@ func (m *AppendEntriesResponse) Unmarshal(data []byte) error {
m.Index = &v
case 3:
if wireType != 0 {
return proto.ErrWrongType
return code_google_com_p_gogoprotobuf_proto4.ErrWrongType
}
var v uint64
for shift := uint(0); ; shift += 7 {
@ -145,7 +145,7 @@ func (m *AppendEntriesResponse) Unmarshal(data []byte) error {
m.CommitIndex = &v
case 4:
if wireType != 0 {
return proto.ErrWrongType
return code_google_com_p_gogoprotobuf_proto4.ErrWrongType
}
var v int
for shift := uint(0); ; shift += 7 {
@ -175,6 +175,9 @@ func (m *AppendEntriesResponse) Unmarshal(data []byte) error {
if err != nil {
return err
}
if (index + skippy) > l {
return io2.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, data[index:index+skippy]...)
index += skippy
}
@ -236,7 +239,6 @@ func sovAppendEntriesResponses(x uint64) (n int) {
}
func sozAppendEntriesResponses(x uint64) (n int) {
return sovAppendEntriesResponses(uint64((x << 1) ^ uint64((int64(x) >> 63))))
return sovAppendEntriesResponses(uint64((x << 1) ^ uint64((int64(x) >> 63))))
}
func NewPopulatedAppendEntriesResponse(r randyAppendEntriesResponses, easy bool) *AppendEntriesResponse {
this := &AppendEntriesResponse{}

View File

@ -94,7 +94,7 @@ func (m *LogEntry) Unmarshal(data []byte) error {
switch fieldNum {
case 1:
if wireType != 0 {
return proto.ErrWrongType
return code_google_com_p_gogoprotobuf_proto.ErrWrongType
}
var v uint64
for shift := uint(0); ; shift += 7 {
@ -111,7 +111,7 @@ func (m *LogEntry) Unmarshal(data []byte) error {
m.Index = &v
case 2:
if wireType != 0 {
return proto.ErrWrongType
return code_google_com_p_gogoprotobuf_proto.ErrWrongType
}
var v uint64
for shift := uint(0); ; shift += 7 {
@ -128,7 +128,7 @@ func (m *LogEntry) Unmarshal(data []byte) error {
m.Term = &v
case 3:
if wireType != 2 {
return proto.ErrWrongType
return code_google_com_p_gogoprotobuf_proto.ErrWrongType
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
@ -151,7 +151,7 @@ func (m *LogEntry) Unmarshal(data []byte) error {
index = postIndex
case 4:
if wireType != 2 {
return proto.ErrWrongType
return code_google_com_p_gogoprotobuf_proto.ErrWrongType
}
var byteLen int
for shift := uint(0); ; shift += 7 {
@ -185,6 +185,9 @@ func (m *LogEntry) Unmarshal(data []byte) error {
if err != nil {
return err
}
if (index + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, data[index:index+skippy]...)
index += skippy
}
@ -248,7 +251,6 @@ func sovLogEntry(x uint64) (n int) {
}
func sozLogEntry(x uint64) (n int) {
return sovLogEntry(uint64((x << 1) ^ uint64((int64(x) >> 63))))
return sovLogEntry(uint64((x << 1) ^ uint64((int64(x) >> 63))))
}
func NewPopulatedLogEntry(r randyLogEntry, easy bool) *LogEntry {
this := &LogEntry{}

View File

@ -94,7 +94,7 @@ func (m *RequestVoteRequest) Unmarshal(data []byte) error {
switch fieldNum {
case 1:
if wireType != 0 {
return proto.ErrWrongType
return code_google_com_p_gogoprotobuf_proto6.ErrWrongType
}
var v uint64
for shift := uint(0); ; shift += 7 {
@ -111,7 +111,7 @@ func (m *RequestVoteRequest) Unmarshal(data []byte) error {
m.Term = &v
case 2:
if wireType != 0 {
return proto.ErrWrongType
return code_google_com_p_gogoprotobuf_proto6.ErrWrongType
}
var v uint64
for shift := uint(0); ; shift += 7 {
@ -128,7 +128,7 @@ func (m *RequestVoteRequest) Unmarshal(data []byte) error {
m.LastLogIndex = &v
case 3:
if wireType != 0 {
return proto.ErrWrongType
return code_google_com_p_gogoprotobuf_proto6.ErrWrongType
}
var v uint64
for shift := uint(0); ; shift += 7 {
@ -145,7 +145,7 @@ func (m *RequestVoteRequest) Unmarshal(data []byte) error {
m.LastLogTerm = &v
case 4:
if wireType != 2 {
return proto.ErrWrongType
return code_google_com_p_gogoprotobuf_proto6.ErrWrongType
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
@ -180,6 +180,9 @@ func (m *RequestVoteRequest) Unmarshal(data []byte) error {
if err != nil {
return err
}
if (index + skippy) > l {
return io3.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, data[index:index+skippy]...)
index += skippy
}
@ -242,7 +245,6 @@ func sovRequestVoteRequest(x uint64) (n int) {
}
func sozRequestVoteRequest(x uint64) (n int) {
return sovRequestVoteRequest(uint64((x << 1) ^ uint64((int64(x) >> 63))))
return sovRequestVoteRequest(uint64((x << 1) ^ uint64((int64(x) >> 63))))
}
func NewPopulatedRequestVoteRequest(r randyRequestVoteRequest, easy bool) *RequestVoteRequest {
this := &RequestVoteRequest{}

View File

@ -78,7 +78,7 @@ func (m *RequestVoteResponse) Unmarshal(data []byte) error {
switch fieldNum {
case 1:
if wireType != 0 {
return proto.ErrWrongType
return code_google_com_p_gogoprotobuf_proto8.ErrWrongType
}
var v uint64
for shift := uint(0); ; shift += 7 {
@ -95,7 +95,7 @@ func (m *RequestVoteResponse) Unmarshal(data []byte) error {
m.Term = &v
case 2:
if wireType != 0 {
return proto.ErrWrongType
return code_google_com_p_gogoprotobuf_proto8.ErrWrongType
}
var v int
for shift := uint(0); ; shift += 7 {
@ -125,6 +125,9 @@ func (m *RequestVoteResponse) Unmarshal(data []byte) error {
if err != nil {
return err
}
if (index + skippy) > l {
return io4.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, data[index:index+skippy]...)
index += skippy
}
@ -178,7 +181,6 @@ func sovRequestVoteResponses(x uint64) (n int) {
}
func sozRequestVoteResponses(x uint64) (n int) {
return sovRequestVoteResponses(uint64((x << 1) ^ uint64((int64(x) >> 63))))
return sovRequestVoteResponses(uint64((x << 1) ^ uint64((int64(x) >> 63))))
}
func NewPopulatedRequestVoteResponse(r randyRequestVoteResponses, easy bool) *RequestVoteResponse {
this := &RequestVoteResponse{}

View File

@ -125,7 +125,7 @@ func (m *SnapshotRecoveryRequest) Unmarshal(data []byte) error {
switch fieldNum {
case 1:
if wireType != 2 {
return proto.ErrWrongType
return code_google_com_p_gogoprotobuf_proto10.ErrWrongType
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
@ -148,7 +148,7 @@ func (m *SnapshotRecoveryRequest) Unmarshal(data []byte) error {
index = postIndex
case 2:
if wireType != 0 {
return proto.ErrWrongType
return code_google_com_p_gogoprotobuf_proto10.ErrWrongType
}
var v uint64
for shift := uint(0); ; shift += 7 {
@ -165,7 +165,7 @@ func (m *SnapshotRecoveryRequest) Unmarshal(data []byte) error {
m.LastIndex = &v
case 3:
if wireType != 0 {
return proto.ErrWrongType
return code_google_com_p_gogoprotobuf_proto10.ErrWrongType
}
var v uint64
for shift := uint(0); ; shift += 7 {
@ -182,7 +182,7 @@ func (m *SnapshotRecoveryRequest) Unmarshal(data []byte) error {
m.LastTerm = &v
case 4:
if wireType != 2 {
return proto.ErrWrongType
return code_google_com_p_gogoprotobuf_proto10.ErrWrongType
}
var msglen int
for shift := uint(0); ; shift += 7 {
@ -205,7 +205,7 @@ func (m *SnapshotRecoveryRequest) Unmarshal(data []byte) error {
index = postIndex
case 5:
if wireType != 2 {
return proto.ErrWrongType
return code_google_com_p_gogoprotobuf_proto10.ErrWrongType
}
var byteLen int
for shift := uint(0); ; shift += 7 {
@ -239,6 +239,9 @@ func (m *SnapshotRecoveryRequest) Unmarshal(data []byte) error {
if err != nil {
return err
}
if (index + skippy) > l {
return io5.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, data[index:index+skippy]...)
index += skippy
}
@ -266,7 +269,7 @@ func (m *SnapshotRecoveryRequest_Peer) Unmarshal(data []byte) error {
switch fieldNum {
case 1:
if wireType != 2 {
return proto.ErrWrongType
return code_google_com_p_gogoprotobuf_proto10.ErrWrongType
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
@ -289,7 +292,7 @@ func (m *SnapshotRecoveryRequest_Peer) Unmarshal(data []byte) error {
index = postIndex
case 2:
if wireType != 2 {
return proto.ErrWrongType
return code_google_com_p_gogoprotobuf_proto10.ErrWrongType
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
@ -324,6 +327,9 @@ func (m *SnapshotRecoveryRequest_Peer) Unmarshal(data []byte) error {
if err != nil {
return err
}
if (index + skippy) > l {
return io5.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, data[index:index+skippy]...)
index += skippy
}
@ -422,7 +428,6 @@ func sovSnapshotRecoveryRequest(x uint64) (n int) {
}
func sozSnapshotRecoveryRequest(x uint64) (n int) {
return sovSnapshotRecoveryRequest(uint64((x << 1) ^ uint64((int64(x) >> 63))))
return sovSnapshotRecoveryRequest(uint64((x << 1) ^ uint64((int64(x) >> 63))))
}
func NewPopulatedSnapshotRecoveryRequest(r randySnapshotRecoveryRequest, easy bool) *SnapshotRecoveryRequest {
this := &SnapshotRecoveryRequest{}

View File

@ -86,7 +86,7 @@ func (m *SnapshotRecoveryResponse) Unmarshal(data []byte) error {
switch fieldNum {
case 1:
if wireType != 0 {
return proto.ErrWrongType
return code_google_com_p_gogoprotobuf_proto12.ErrWrongType
}
var v uint64
for shift := uint(0); ; shift += 7 {
@ -103,7 +103,7 @@ func (m *SnapshotRecoveryResponse) Unmarshal(data []byte) error {
m.Term = &v
case 2:
if wireType != 0 {
return proto.ErrWrongType
return code_google_com_p_gogoprotobuf_proto12.ErrWrongType
}
var v int
for shift := uint(0); ; shift += 7 {
@ -121,7 +121,7 @@ func (m *SnapshotRecoveryResponse) Unmarshal(data []byte) error {
m.Success = &b
case 3:
if wireType != 0 {
return proto.ErrWrongType
return code_google_com_p_gogoprotobuf_proto12.ErrWrongType
}
var v uint64
for shift := uint(0); ; shift += 7 {
@ -150,6 +150,9 @@ func (m *SnapshotRecoveryResponse) Unmarshal(data []byte) error {
if err != nil {
return err
}
if (index + skippy) > l {
return io6.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, data[index:index+skippy]...)
index += skippy
}
@ -207,7 +210,6 @@ func sovSnapshotRecoveryResponse(x uint64) (n int) {
}
func sozSnapshotRecoveryResponse(x uint64) (n int) {
return sovSnapshotRecoveryResponse(uint64((x << 1) ^ uint64((int64(x) >> 63))))
return sovSnapshotRecoveryResponse(uint64((x << 1) ^ uint64((int64(x) >> 63))))
}
func NewPopulatedSnapshotRecoveryResponse(r randySnapshotRecoveryResponse, easy bool) *SnapshotRecoveryResponse {
this := &SnapshotRecoveryResponse{}

View File

@ -86,7 +86,7 @@ func (m *SnapshotRequest) Unmarshal(data []byte) error {
switch fieldNum {
case 1:
if wireType != 2 {
return proto.ErrWrongType
return code_google_com_p_gogoprotobuf_proto14.ErrWrongType
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
@ -109,7 +109,7 @@ func (m *SnapshotRequest) Unmarshal(data []byte) error {
index = postIndex
case 2:
if wireType != 0 {
return proto.ErrWrongType
return code_google_com_p_gogoprotobuf_proto14.ErrWrongType
}
var v uint64
for shift := uint(0); ; shift += 7 {
@ -126,7 +126,7 @@ func (m *SnapshotRequest) Unmarshal(data []byte) error {
m.LastIndex = &v
case 3:
if wireType != 0 {
return proto.ErrWrongType
return code_google_com_p_gogoprotobuf_proto14.ErrWrongType
}
var v uint64
for shift := uint(0); ; shift += 7 {
@ -155,6 +155,9 @@ func (m *SnapshotRequest) Unmarshal(data []byte) error {
if err != nil {
return err
}
if (index + skippy) > l {
return io7.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, data[index:index+skippy]...)
index += skippy
}
@ -213,7 +216,6 @@ func sovSnapshotRequest(x uint64) (n int) {
}
func sozSnapshotRequest(x uint64) (n int) {
return sovSnapshotRequest(uint64((x << 1) ^ uint64((int64(x) >> 63))))
return sovSnapshotRequest(uint64((x << 1) ^ uint64((int64(x) >> 63))))
}
func NewPopulatedSnapshotRequest(r randySnapshotRequest, easy bool) *SnapshotRequest {
this := &SnapshotRequest{}

View File

@ -70,7 +70,7 @@ func (m *SnapshotResponse) Unmarshal(data []byte) error {
switch fieldNum {
case 1:
if wireType != 0 {
return proto.ErrWrongType
return code_google_com_p_gogoprotobuf_proto16.ErrWrongType
}
var v int
for shift := uint(0); ; shift += 7 {
@ -100,6 +100,9 @@ func (m *SnapshotResponse) Unmarshal(data []byte) error {
if err != nil {
return err
}
if (index + skippy) > l {
return io8.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, data[index:index+skippy]...)
index += skippy
}
@ -149,7 +152,6 @@ func sovSnapshotResponse(x uint64) (n int) {
}
func sozSnapshotResponse(x uint64) (n int) {
return sovSnapshotResponse(uint64((x << 1) ^ uint64((int64(x) >> 63))))
return sovSnapshotResponse(uint64((x << 1) ^ uint64((int64(x) >> 63))))
}
func NewPopulatedSnapshotResponse(r randySnapshotResponse, easy bool) *SnapshotResponse {
this := &SnapshotResponse{}