proxy: retrieve ClientURLs from cluster

This is a simple solution to having the proxy keep up to date with the
state of the cluster. Basically, it uses the cluster configuration
provided at start up (i.e. with `-initial-cluster-state`) to determine
where to reach peer(s) in the cluster, and then it will periodically hit
the `/members` endpoint of those peer(s) (using the same mechanism that
`-cluster-state=existing` does to initialise) to update the set of valid
client URLs to proxy to.

This does not address discovery (#1376), and it would probably be better
to update the set of proxyURLs dynamically whenever we fetch the new
state of the cluster; but it needs a bit more thinking to have this done
in a clean way with the proxy interface.

Example in Procfile works again.
This commit is contained in:
Jonathan Boulle 2014-10-24 15:26:05 -07:00
parent 0fcb59e7d9
commit 719c57a29d
7 changed files with 80 additions and 58 deletions

View File

@ -2,4 +2,4 @@
etcd1: bin/etcd -name node1 -listen-client-urls http://127.0.0.1:4001 -advertise-client-urls http://127.0.0.1:4001 -listen-peer-urls http://127.0.0.1:7001 -initial-advertise-peer-urls http://127.0.0.1:7001 -initial-cluster 'node1=http://localhost:7001,node2=http://localhost:7002,node3=http://localhost:7003' -initial-cluster-state new etcd1: bin/etcd -name node1 -listen-client-urls http://127.0.0.1:4001 -advertise-client-urls http://127.0.0.1:4001 -listen-peer-urls http://127.0.0.1:7001 -initial-advertise-peer-urls http://127.0.0.1:7001 -initial-cluster 'node1=http://localhost:7001,node2=http://localhost:7002,node3=http://localhost:7003' -initial-cluster-state new
etcd2: bin/etcd -name node2 -listen-client-urls http://127.0.0.1:4002 -advertise-client-urls http://127.0.0.1:4002 -listen-peer-urls http://127.0.0.1:7002 -initial-advertise-peer-urls http://127.0.0.1:7002 -initial-cluster 'node1=http://localhost:7001,node2=http://localhost:7002,node3=http://localhost:7003' -initial-cluster-state new etcd2: bin/etcd -name node2 -listen-client-urls http://127.0.0.1:4002 -advertise-client-urls http://127.0.0.1:4002 -listen-peer-urls http://127.0.0.1:7002 -initial-advertise-peer-urls http://127.0.0.1:7002 -initial-cluster 'node1=http://localhost:7001,node2=http://localhost:7002,node3=http://localhost:7003' -initial-cluster-state new
etcd3: bin/etcd -name node3 -listen-client-urls http://127.0.0.1:4003 -advertise-client-urls http://127.0.0.1:4003 -listen-peer-urls http://127.0.0.1:7003 -initial-advertise-peer-urls http://127.0.0.1:7003 -initial-cluster 'node1=http://localhost:7001,node2=http://localhost:7002,node3=http://localhost:7003' -initial-cluster-state new etcd3: bin/etcd -name node3 -listen-client-urls http://127.0.0.1:4003 -advertise-client-urls http://127.0.0.1:4003 -listen-peer-urls http://127.0.0.1:7003 -initial-advertise-peer-urls http://127.0.0.1:7003 -initial-cluster 'node1=http://localhost:7001,node2=http://localhost:7002,node3=http://localhost:7003' -initial-cluster-state new
#proxy: bin/etcd -proxy=on -bind-addr 127.0.0.1:8080 -peers 'localhost:7001,localhost:7002,localhost:7003' proxy: bin/etcd -proxy=on -bind-addr 127.0.0.1:8080 -initial-cluster 'node1=http://localhost:7001,node2=http://localhost:7002,node3=http://localhost:7003'

View File

@ -19,6 +19,7 @@ package etcdserver
import ( import (
"encoding/json" "encoding/json"
"errors" "errors"
"fmt"
"io/ioutil" "io/ioutil"
"log" "log"
"math/rand" "math/rand"
@ -184,7 +185,10 @@ func NewServer(cfg *ServerConfig) *EtcdServer {
haveWAL := wal.Exist(cfg.WALDir()) haveWAL := wal.Exist(cfg.WALDir())
switch { switch {
case !haveWAL && cfg.ClusterState == ClusterStateValueExisting: case !haveWAL && cfg.ClusterState == ClusterStateValueExisting:
cl := getClusterFromPeers(cfg.Cluster.PeerURLs()) cl, err := GetClusterFromPeers(cfg.Cluster.PeerURLs())
if err != nil {
log.Fatal(err)
}
if err := cfg.Cluster.ValidateAndAssignIDs(cl.Members()); err != nil { if err := cfg.Cluster.ValidateAndAssignIDs(cl.Members()); err != nil {
log.Fatalf("etcdserver: %v", err) log.Fatalf("etcdserver: %v", err)
} }
@ -669,7 +673,7 @@ func (s *EtcdServer) snapshot(snapi uint64, snapnodes []uint64) {
s.storage.Cut() s.storage.Cut()
} }
func getClusterFromPeers(urls []string) *Cluster { func GetClusterFromPeers(urls []string) (*Cluster, error) {
for _, u := range urls { for _, u := range urls {
resp, err := http.Get(u + "/members") resp, err := http.Get(u + "/members")
if err != nil { if err != nil {
@ -691,10 +695,9 @@ func getClusterFromPeers(urls []string) *Cluster {
log.Printf("etcdserver: parse uint error: %v", err) log.Printf("etcdserver: parse uint error: %v", err)
continue continue
} }
return NewClusterFromMembers("", id, membs) return NewClusterFromMembers("", id, membs), nil
} }
log.Fatalf("etcdserver: could not retrieve cluster information from the given urls") return nil, fmt.Errorf("etcdserver: could not retrieve cluster information from the given urls")
return nil
} }
func startNode(cfg *ServerConfig, ids []uint64) (id uint64, n raft.Node, w *wal.WAL) { func startNode(cfg *ServerConfig, ids []uint64) (id uint64, n raft.Node, w *wal.WAL) {

13
main.go
View File

@ -232,11 +232,18 @@ func startProxy() {
log.Fatal(err) log.Fatal(err)
} }
ph, err := proxy.NewHandler(pt, cls.PeerURLs()) // TODO(jonboulle): update peerURLs dynamically (i.e. when updating
// clientURLs) instead of just using the initial fixed list here
peerURLs := cls.PeerURLs()
uf := func() []string {
cls, err := etcdserver.GetClusterFromPeers(peerURLs)
if err != nil { if err != nil {
log.Fatal(err) log.Printf("etcd: %v", err)
return []string{}
} }
return cls.ClientURLs()
}
ph := proxy.NewHandler(pt, uf)
ph = &pkg.CORSHandler{ ph = &pkg.CORSHandler{
Handler: ph, Handler: ph,
Info: cors, Info: cors,

View File

@ -17,7 +17,6 @@
package proxy package proxy
import ( import (
"errors"
"log" "log"
"net/url" "net/url"
"sync" "sync"
@ -28,28 +27,52 @@ const (
// amount of time an endpoint will be held in a failed // amount of time an endpoint will be held in a failed
// state before being reconsidered for proxied requests // state before being reconsidered for proxied requests
endpointFailureWait = 5 * time.Second endpointFailureWait = 5 * time.Second
// how often the proxy will attempt to refresh its set of endpoints
refreshEndpoints = 30 * time.Second
) )
func newDirector(scheme string, addrs []string) (*director, error) { func newDirector(urlsFunc GetProxyURLs) *director {
if len(addrs) == 0 { d := &director{
return nil, errors.New("one or more upstream addresses required") uf: urlsFunc,
} }
d.refresh()
endpoints := make([]*endpoint, len(addrs)) go func() {
for i, addr := range addrs { for {
u := url.URL{Scheme: scheme, Host: addr} select {
endpoints[i] = newEndpoint(u) case <-time.After(refreshEndpoints):
d.refresh()
} }
}
d := director{ep: endpoints} }()
return &d, nil return d
} }
type director struct { type director struct {
sync.Mutex
ep []*endpoint ep []*endpoint
uf GetProxyURLs
}
func (d *director) refresh() {
urls := d.uf()
d.Lock()
defer d.Unlock()
var endpoints []*endpoint
for _, u := range urls {
uu, err := url.Parse(u)
if err != nil {
log.Printf("upstream URL invalid: %v", err)
continue
}
endpoints = append(endpoints, newEndpoint(*uu))
}
d.ep = endpoints
} }
func (d *director) endpoints() []*endpoint { func (d *director) endpoints() []*endpoint {
d.Lock()
defer d.Unlock()
filtered := make([]*endpoint, 0) filtered := make([]*endpoint, 0)
for _, ep := range d.ep { for _, ep := range d.ep {
if ep.Available { if ep.Available {

View File

@ -24,41 +24,36 @@ import (
func TestNewDirectorScheme(t *testing.T) { func TestNewDirectorScheme(t *testing.T) {
tests := []struct { tests := []struct {
scheme string urls []string
addrs []string
want []string want []string
}{ }{
{ {
scheme: "http", urls: []string{"http://192.0.2.8:4002", "http://example.com:8080"},
addrs: []string{"192.0.2.8:4002", "example.com:8080"},
want: []string{"http://192.0.2.8:4002", "http://example.com:8080"}, want: []string{"http://192.0.2.8:4002", "http://example.com:8080"},
}, },
{ {
scheme: "https", urls: []string{"https://192.0.2.8:4002", "https://example.com:8080"},
addrs: []string{"192.0.2.8:4002", "example.com:8080"},
want: []string{"https://192.0.2.8:4002", "https://example.com:8080"}, want: []string{"https://192.0.2.8:4002", "https://example.com:8080"},
}, },
// accept addrs without a port // accept urls without a port
{ {
scheme: "http", urls: []string{"http://192.0.2.8"},
addrs: []string{"192.0.2.8"},
want: []string{"http://192.0.2.8"}, want: []string{"http://192.0.2.8"},
}, },
// accept addrs even if they are garbage // accept urls even if they are garbage
{ {
scheme: "http", urls: []string{"http://."},
addrs: []string{"."},
want: []string{"http://."}, want: []string{"http://."},
}, },
} }
for i, tt := range tests { for i, tt := range tests {
got, err := newDirector(tt.scheme, tt.addrs) uf := func() []string {
if err != nil { return tt.urls
t.Errorf("#%d: newDirectory returned unexpected error: %v", i, err)
} }
got := newDirector(uf)
for ii, wep := range tt.want { for ii, wep := range tt.want {
gep := got.ep[ii].URL.String() gep := got.ep[ii].URL.String()

View File

@ -20,23 +20,17 @@ import (
"net/http" "net/http"
) )
func NewHandler(t *http.Transport, addrs []string) (http.Handler, error) { // GetProxyURLs is a function which should return the current set of URLs to
scheme := "http" // which client requests should be proxied. This function will be queried
if t.TLSClientConfig != nil { // periodically by the proxy Handler to refresh the set of available
scheme = "https" // backends.
} type GetProxyURLs func() []string
d, err := newDirector(scheme, addrs) func NewHandler(t *http.Transport, urlsFunc GetProxyURLs) http.Handler {
if err != nil { return &reverseProxy{
return nil, err director: newDirector(urlsFunc),
}
rp := reverseProxy{
director: d,
transport: t, transport: t,
} }
return &rp, nil
} }
func readonlyHandlerFunc(next http.Handler) func(http.ResponseWriter, *http.Request) { func readonlyHandlerFunc(next http.Handler) func(http.ResponseWriter, *http.Request) {

View File

@ -78,7 +78,7 @@ func TestReverseProxyServe(t *testing.T) {
for i, tt := range tests { for i, tt := range tests {
rp := reverseProxy{ rp := reverseProxy{
director: &director{tt.eps}, director: &director{ep: tt.eps},
transport: tt.rt, transport: tt.rt,
} }